Index: projects/nand/MAINTAINERS =================================================================== --- projects/nand/MAINTAINERS (revision 235262) +++ projects/nand/MAINTAINERS (revision 235263) Property changes on: projects/nand/MAINTAINERS ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/MAINTAINERS:r235157-235262 Index: projects/nand/Makefile.inc1 =================================================================== --- projects/nand/Makefile.inc1 (revision 235262) +++ projects/nand/Makefile.inc1 (revision 235263) @@ -1,1687 +1,1665 @@ # # $FreeBSD$ # # Make command line options: # -DNO_CLEANDIR run ${MAKE} clean, instead of ${MAKE} cleandir # -DNO_CLEAN do not clean at all # -DNO_SHARE do not go into share subdir # -DKERNFAST define NO_KERNEL{CONFIG,CLEAN,DEPEND,OBJ} # -DNO_KERNELCONFIG do not run config in ${MAKE} buildkernel # -DNO_KERNELCLEAN do not run ${MAKE} clean in ${MAKE} buildkernel # -DNO_KERNELDEPEND do not run ${MAKE} depend in ${MAKE} buildkernel # -DNO_KERNELOBJ do not run ${MAKE} obj in ${MAKE} buildkernel # -DNO_PORTSUPDATE do not update ports in ${MAKE} update # -DNO_DOCUPDATE do not update doc in ${MAKE} update # -DNO_WWWUPDATE do not update www in ${MAKE} update # -DNO_CTF do not run the DTrace CTF conversion tools on built objects # LOCAL_DIRS="list of dirs" to add additional dirs to the SUBDIR list # LOCAL_TOOL_DIRS="list of dirs" to add additional dirs to the build-tools # list # TARGET="machine" to crossbuild world for a different machine type # TARGET_ARCH= may be required when a TARGET supports multiple endians # BUILDENV_SHELL= shell to launch for the buildenv target (def:/bin/sh) # # The intended user-driven targets are: # buildworld - rebuild *everything*, including glue to help do upgrades # installworld- install everything built by "buildworld" # doxygen - build API documentation of the kernel # update - convenient way to update your source tree (eg: cvsup/cvs) # # Standard targets (not defined here) are documented in the makefiles in # /usr/share/mk. These include: # obj depend all install clean cleandepend cleanobj # You are supposed to define both of these when calling Makefile.inc1 # directly. However, some old scripts don't. Cope for the moment, but # issue a new warning for a transition period. .if defined(TARGET) && !defined(TARGET_ARCH) .warning "You must pass both TARGET and TARGET_ARCH to Makefile.inc1. Setting TARGET_ARCH=${TARGET}." TARGET_ARCH=${TARGET} .endif .if !defined(TARGET) || !defined(TARGET_ARCH) .error "Both TARGET and TARGET_ARCH must be defined." .endif .include .include # We must do share/info early so that installation of info `dir' # entries works correctly. Do it first since it is less likely to # grow dependencies on include and lib than vice versa. # # We must do lib/ and libexec/ before bin/, because if installworld # installs a new /bin/sh, the 'make' command will *immediately* # use that new version. And the new (dynamically-linked) /bin/sh # will expect to find appropriate libraries in /lib and /libexec. # SUBDIR= share/info lib libexec SUBDIR+=bin .if ${MK_GAMES} != "no" SUBDIR+=games .endif .if ${MK_CDDL} != "no" SUBDIR+=cddl .endif SUBDIR+=gnu include .if ${MK_KERBEROS} != "no" SUBDIR+=kerberos5 .endif .if ${MK_RESCUE} != "no" SUBDIR+=rescue .endif SUBDIR+=sbin .if ${MK_CRYPT} != "no" SUBDIR+=secure .endif .if !defined(NO_SHARE) SUBDIR+=share .endif SUBDIR+=sys usr.bin usr.sbin .if ${MK_OFED} != "no" SUBDIR+=contrib/ofed .endif # # We must do etc/ last for install/distribute to work. # SUBDIR+=etc # These are last, since it is nice to at least get the base system # rebuilt before you do them. .for _DIR in ${LOCAL_DIRS} .if exists(${.CURDIR}/${_DIR}/Makefile) SUBDIR+= ${_DIR} .endif .endfor .if defined(SUBDIR_OVERRIDE) SUBDIR= ${SUBDIR_OVERRIDE} .endif .if defined(NOCLEAN) NO_CLEAN= ${NOCLEAN} .endif .if defined(NO_CLEANDIR) CLEANDIR= clean cleandepend .else CLEANDIR= cleandir .endif LOCAL_TOOL_DIRS?= BUILDENV_SHELL?=/bin/sh CVS?= cvs CVSFLAGS?= -A -P -d -I! SVN?= svn SVNFLAGS?= -r HEAD SUP?= /usr/bin/csup SUPFLAGS?= -g -L 2 .if defined(SUPHOST) SUPFLAGS+= -h ${SUPHOST} .endif MAKEOBJDIRPREFIX?= /usr/obj .if !defined(OSRELDATE) .if exists(/usr/include/osreldate.h) OSRELDATE!= awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \ /usr/include/osreldate.h .else OSRELDATE= 0 .endif .endif .if !defined(VERSION) VERSION!= uname -srp VERSION+= ${OSRELDATE} .endif KNOWN_ARCHES?= amd64 arm armeb/arm i386 i386/pc98 ia64 mips mipsel/mips mips64el/mips mips64/mips mipsn32el/mips mipsn32/mips powerpc powerpc64/powerpc sparc64 .if ${TARGET} == ${TARGET_ARCH} _t= ${TARGET} .else _t= ${TARGET_ARCH}/${TARGET} .endif .for _t in ${_t} .if empty(KNOWN_ARCHES:M${_t}) .error Unknown target ${TARGET_ARCH}:${TARGET}. .endif .endfor .if ${TARGET} == ${MACHINE} TARGET_CPUTYPE?=${CPUTYPE} .else TARGET_CPUTYPE?= .endif .if !empty(TARGET_CPUTYPE) _TARGET_CPUTYPE=${TARGET_CPUTYPE} .else _TARGET_CPUTYPE=dummy .endif _CPUTYPE!= MAKEFLAGS= CPUTYPE=${_TARGET_CPUTYPE} ${MAKE} \ -f /dev/null -m ${.CURDIR}/share/mk -V CPUTYPE .if ${_CPUTYPE} != ${_TARGET_CPUTYPE} .error CPUTYPE global should be set with ?=. .endif .if make(buildworld) BUILD_ARCH!= uname -p .if ${MACHINE_ARCH} != ${BUILD_ARCH} .error To cross-build, set TARGET_ARCH. .endif .endif .if ${MACHINE} == ${TARGET} && ${MACHINE_ARCH} == ${TARGET_ARCH} && !defined(CROSS_BUILD_TESTING) OBJTREE= ${MAKEOBJDIRPREFIX} .else OBJTREE= ${MAKEOBJDIRPREFIX}/${TARGET}.${TARGET_ARCH} .endif WORLDTMP= ${OBJTREE}${.CURDIR}/tmp # /usr/games added for fortune which depend on strfile BPATH= ${WORLDTMP}/legacy/usr/sbin:${WORLDTMP}/legacy/usr/bin:${WORLDTMP}/legacy/usr/games XPATH= ${WORLDTMP}/usr/sbin:${WORLDTMP}/usr/bin:${WORLDTMP}/usr/games STRICTTMPPATH= ${BPATH}:${XPATH} TMPPATH= ${STRICTTMPPATH}:${PATH} # # Avoid running mktemp(1) unless actually needed. # It may not be functional, e.g., due to new ABI # when in the middle of installing over this system. # .if make(distributeworld) || make(installworld) INSTALLTMP!= /usr/bin/mktemp -d -u -t install .endif # # Building a world goes through the following stages # # 1. legacy stage [BMAKE] # This stage is responsible for creating compatibility # shims that are needed by the bootstrap-tools, # build-tools and cross-tools stages. # 1. bootstrap-tools stage [BMAKE] # This stage is responsible for creating programs that # are needed for backward compatibility reasons. They # are not built as cross-tools. # 2. build-tools stage [TMAKE] # This stage is responsible for creating the object # tree and building any tools that are needed during # the build process. # 3. cross-tools stage [XMAKE] # This stage is responsible for creating any tools that # are needed for cross-builds. A cross-compiler is one # of them. # 4. world stage [WMAKE] # This stage actually builds the world. # 5. install stage (optional) [IMAKE] # This stage installs a previously built world. # BOOTSTRAPPING?= 0 # Common environment for world related stages CROSSENV= MAKEOBJDIRPREFIX=${OBJTREE} \ MACHINE_ARCH=${TARGET_ARCH} \ MACHINE=${TARGET} \ CPUTYPE=${TARGET_CPUTYPE} -.if ${OSRELDATE} < 700044 -CROSSENV+= AR=gnu-ar RANLIB=gnu-ranlib -.endif .if ${MK_GROFF} != "no" CROSSENV+= GROFF_BIN_PATH=${WORLDTMP}/legacy/usr/bin \ GROFF_FONT_PATH=${WORLDTMP}/legacy/usr/share/groff_font \ GROFF_TMAC_PATH=${WORLDTMP}/legacy/usr/share/tmac .endif # bootstrap-tools stage BMAKEENV= INSTALL="sh ${.CURDIR}/tools/install.sh" \ PATH=${BPATH}:${PATH} \ WORLDTMP=${WORLDTMP} \ VERSION="${VERSION}" \ MAKEFLAGS="-m ${.CURDIR}/tools/build/mk ${.MAKEFLAGS}" BMAKE= MAKEOBJDIRPREFIX=${WORLDTMP} \ ${BMAKEENV} ${MAKE} -f Makefile.inc1 \ DESTDIR= \ BOOTSTRAPPING=${OSRELDATE} \ SSP_CFLAGS= \ -DWITHOUT_HTML -DWITHOUT_INFO -DNO_LINT -DWITHOUT_MAN \ -DNO_PIC -DNO_PROFILE -DNO_SHARED \ -DNO_CPU_CFLAGS -DNO_WARNS -DNO_CTF # build-tools stage TMAKE= MAKEOBJDIRPREFIX=${OBJTREE} \ ${BMAKEENV} ${MAKE} -f Makefile.inc1 \ TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \ DESTDIR= \ BOOTSTRAPPING=${OSRELDATE} \ SSP_CFLAGS= \ -DNO_LINT \ -DNO_CPU_CFLAGS -DNO_WARNS -DNO_CTF # cross-tools stage XMAKE= TOOLS_PREFIX=${WORLDTMP} ${BMAKE} \ TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \ -DWITHOUT_GDB # world stage WMAKEENV= ${CROSSENV} \ _SHLIBDIRPREFIX=${WORLDTMP} \ _LDSCRIPTROOT= \ VERSION="${VERSION}" \ INSTALL="sh ${.CURDIR}/tools/install.sh" \ PATH=${TMPPATH} .if ${MK_CDDL} == "no" WMAKEENV+= NO_CTF=1 .endif WMAKE= ${WMAKEENV} ${MAKE} -f Makefile.inc1 DESTDIR=${WORLDTMP} .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64" # 32 bit world LIB32TMP= ${OBJTREE}${.CURDIR}/lib32 .if ${TARGET_ARCH} == "amd64" .if empty(TARGET_CPUTYPE) LIB32CPUFLAGS= -march=i686 -mmmx -msse -msse2 .else LIB32CPUFLAGS= -march=${TARGET_CPUTYPE} .endif LIB32WMAKEENV= MACHINE=i386 MACHINE_ARCH=i386 \ MACHINE_CPU="i686 mmx sse sse2" \ LD="${LD} -m elf_i386_fbsd -Y P,${LIB32TMP}/usr/lib32" \ AS="${AS} --32" .elif ${TARGET_ARCH} == "powerpc64" .if empty(TARGET_CPUTYPE) LIB32CPUFLAGS= -mcpu=powerpc .else LIB32CPUFLAGS= -mcpu=${TARGET_CPUTYPE} .endif LIB32WMAKEENV= MACHINE=powerpc MACHINE_ARCH=powerpc \ LD="${LD} -m elf32ppc_fbsd" .endif LIB32FLAGS= -m32 ${LIB32CPUFLAGS} -DCOMPAT_32BIT \ -isystem ${LIB32TMP}/usr/include/ \ -L${LIB32TMP}/usr/lib32 \ -B${LIB32TMP}/usr/lib32 # Yes, the flags are redundant. LIB32WMAKEENV+= MAKEOBJDIRPREFIX=${OBJTREE}/lib32 \ _SHLIBDIRPREFIX=${LIB32TMP} \ _LDSCRIPTROOT=${LIB32TMP} \ VERSION="${VERSION}" \ INSTALL="sh ${.CURDIR}/tools/install.sh" \ PATH=${TMPPATH} \ CC="${CC} ${LIB32FLAGS}" \ CXX="${CXX} ${LIB32FLAGS}" \ LIBDIR=/usr/lib32 \ SHLIBDIR=/usr/lib32 LIB32WMAKE= ${LIB32WMAKEENV} ${MAKE} -DNO_CPU_CFLAGS -DCOMPAT_32BIT \ -DWITHOUT_BIND -DWITHOUT_MAN -DWITHOUT_INFO \ -DWITHOUT_HTML -DNO_CTF -DNO_LINT -ECC -ECXX -EAS -ELD \ DESTDIR=${LIB32TMP} LIB32IMAKE= ${LIB32WMAKE:NINSTALL=*:NDESTDIR=*:N_LDSCRIPTROOT=*} -DNO_INCS .endif # install stage IMAKEENV= ${CROSSENV:N_LDSCRIPTROOT=*} IMAKE= ${IMAKEENV} ${MAKE} -f Makefile.inc1 .if empty(.MAKEFLAGS:M-n) IMAKEENV+= PATH=${STRICTTMPPATH}:${INSTALLTMP} \ LD_LIBRARY_PATH=${INSTALLTMP} \ PATH_LOCALE=${INSTALLTMP}/locale IMAKE+= __MAKE_SHELL=${INSTALLTMP}/sh .else IMAKEENV+= PATH=${TMPPATH}:${INSTALLTMP} .endif # kernel stage KMAKEENV= ${WMAKEENV} KMAKE= ${KMAKEENV} ${MAKE} KERNEL=${INSTKERNNAME} # # buildworld # # Attempt to rebuild the entire system, with reasonable chance of # success, regardless of how old your existing system is. # _worldtmp: .if ${.CURDIR:C/[^,]//g} != "" # The m4 build of sendmail files doesn't like it if ',' is used # anywhere in the path of it's files. @echo @echo "*** Error: path to source tree contains a comma ','" @echo false .endif @echo @echo "--------------------------------------------------------------" @echo ">>> Rebuilding the temporary build tree" @echo "--------------------------------------------------------------" .if !defined(NO_CLEAN) rm -rf ${WORLDTMP} .if defined(LIB32TMP) rm -rf ${LIB32TMP} .endif .else rm -rf ${WORLDTMP}/legacy/usr/include # XXX - These three can depend on any header file. rm -f ${OBJTREE}${.CURDIR}/usr.bin/kdump/ioctl.c rm -f ${OBJTREE}${.CURDIR}/usr.bin/kdump/kdump_subr.c rm -f ${OBJTREE}${.CURDIR}/usr.bin/truss/ioctl.c .endif .for _dir in \ lib usr legacy/usr mkdir -p ${WORLDTMP}/${_dir} .endfor mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${WORLDTMP}/legacy/usr >/dev/null .if ${MK_GROFF} != "no" mtree -deU -f ${.CURDIR}/etc/mtree/BSD.groff.dist \ -p ${WORLDTMP}/legacy/usr >/dev/null .endif mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${WORLDTMP}/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${WORLDTMP}/usr/include >/dev/null ln -sf ${.CURDIR}/sys ${WORLDTMP} .if ${MK_BIND_LIBS} != "no" mtree -deU -f ${.CURDIR}/etc/mtree/BIND.include.dist \ -p ${WORLDTMP}/usr/include >/dev/null .endif _legacy: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 1.1: legacy release compatibility shims" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${BMAKE} legacy _bootstrap-tools: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 1.2: bootstrap tools" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${BMAKE} bootstrap-tools _cleanobj: .if !defined(NO_CLEAN) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.1: cleaning up the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} ${CLEANDIR:S/^/par-/} .if defined(LIB32TMP) ${_+_}cd ${.CURDIR}; ${LIB32WMAKE} -f Makefile.inc1 ${CLEANDIR:S/^/par-/} .endif .endif _obj: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.2: rebuilding the object tree" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} par-obj _build-tools: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.3: build tools" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${TMAKE} build-tools _cross-tools: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 3: cross tools" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${XMAKE} cross-tools _includes: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.1: building includes" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} SHARED=symlinks par-includes _libraries: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.2: building libraries" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; \ ${WMAKE} -DNO_FSCHG -DWITHOUT_HTML -DWITHOUT_INFO -DNO_LINT \ -DWITHOUT_MAN -DNO_PROFILE libraries _depend: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.3: make dependencies" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} par-depend everything: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 4.4: building everything" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${WMAKE} par-all .if defined(LIB32TMP) build32: @echo @echo "--------------------------------------------------------------" @echo ">>> stage 5.1: building 32 bit shim libraries" @echo "--------------------------------------------------------------" mkdir -p ${LIB32TMP}/usr/include mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${LIB32TMP}/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${LIB32TMP}/usr/include >/dev/null mkdir -p ${WORLDTMP} ln -sf ${.CURDIR}/sys ${WORLDTMP} .for _t in obj includes cd ${.CURDIR}/include; ${LIB32WMAKE} DIRPRFX=include/ ${_t} cd ${.CURDIR}/lib; ${LIB32WMAKE} DIRPRFX=lib/ ${_t} .if ${MK_CDDL} != "no" cd ${.CURDIR}/cddl/lib; ${LIB32WMAKE} DIRPRFX=cddl/lib/ ${_t} .endif cd ${.CURDIR}/gnu/lib; ${LIB32WMAKE} DIRPRFX=gnu/lib/ ${_t} .if ${MK_CRYPT} != "no" cd ${.CURDIR}/secure/lib; ${LIB32WMAKE} DIRPRFX=secure/lib/ ${_t} .endif .if ${MK_KERBEROS} != "no" cd ${.CURDIR}/kerberos5/lib; ${LIB32WMAKE} DIRPRFX=kerberos5/lib ${_t} .endif .endfor .for _dir in usr.bin/lex/lib cd ${.CURDIR}/${_dir}; ${LIB32WMAKE} DIRPRFX=${_dir}/ obj .endfor .for _dir in lib/ncurses/ncurses lib/ncurses/ncursesw lib/libmagic cd ${.CURDIR}/${_dir}; \ MAKEOBJDIRPREFIX=${OBJTREE}/lib32 ${MAKE} SSP_CFLAGS= DESTDIR= \ DIRPRFX=${_dir}/ build-tools .endfor cd ${.CURDIR}; \ ${LIB32WMAKE} -f Makefile.inc1 libraries .for _t in obj depend all cd ${.CURDIR}/libexec/rtld-elf; PROG=ld-elf32.so.1 ${LIB32WMAKE} \ DIRPRFX=libexec/rtld-elf/ ${_t} cd ${.CURDIR}/usr.bin/ldd; PROG=ldd32 ${LIB32WMAKE} \ DIRPRFX=usr.bin/ldd ${_t} .endfor distribute32 install32: cd ${.CURDIR}/lib; ${LIB32IMAKE} ${.TARGET:S/32$//} .if ${MK_CDDL} != "no" cd ${.CURDIR}/cddl/lib; ${LIB32IMAKE} ${.TARGET:S/32$//} .endif cd ${.CURDIR}/gnu/lib; ${LIB32IMAKE} ${.TARGET:S/32$//} .if ${MK_CRYPT} != "no" cd ${.CURDIR}/secure/lib; ${LIB32IMAKE} ${.TARGET:S/32$//} .endif .if ${MK_KERBEROS} != "no" cd ${.CURDIR}/kerberos5/lib; ${LIB32IMAKE} ${.TARGET:S/32$//} .endif cd ${.CURDIR}/libexec/rtld-elf; \ PROG=ld-elf32.so.1 ${LIB32IMAKE} ${.TARGET:S/32$//} cd ${.CURDIR}/usr.bin/ldd; PROG=ldd32 ${LIB32IMAKE} ${.TARGET:S/32$//} .endif WMAKE_TGTS= .if !defined(SUBDIR_OVERRIDE) WMAKE_TGTS+= _worldtmp _legacy _bootstrap-tools .endif WMAKE_TGTS+= _cleanobj _obj _build-tools .if !defined(SUBDIR_OVERRIDE) WMAKE_TGTS+= _cross-tools .endif WMAKE_TGTS+= _includes _libraries _depend everything .if defined(LIB32TMP) && ${MK_LIB32} != "no" WMAKE_TGTS+= build32 .endif buildworld: buildworld_prologue ${WMAKE_TGTS} buildworld_epilogue .ORDER: buildworld_prologue ${WMAKE_TGTS} buildworld_epilogue buildworld_prologue: @echo "--------------------------------------------------------------" @echo ">>> World build started on `LC_ALL=C date`" @echo "--------------------------------------------------------------" buildworld_epilogue: @echo @echo "--------------------------------------------------------------" @echo ">>> World build completed on `LC_ALL=C date`" @echo "--------------------------------------------------------------" # # We need to have this as a target because the indirection between Makefile # and Makefile.inc1 causes the correct PATH to be used, rather than a # modification of the current environment's PATH. In addition, we need # to quote multiword values. # buildenvvars: @echo ${WMAKEENV:Q} buildenv: @echo Entering world for ${TARGET_ARCH}:${TARGET} @cd ${.CURDIR} && env ${WMAKEENV} ${BUILDENV_SHELL} || true TOOLCHAIN_TGTS= ${WMAKE_TGTS:N_depend:Neverything:Nbuild32} toolchain: ${TOOLCHAIN_TGTS} kernel-toolchain: ${TOOLCHAIN_TGTS:N_includes:N_libraries} # # installcheck # # Checks to be sure system is ready for installworld/installkernel. # installcheck: # # Require DESTDIR to be set if installing for a different architecture. # .if ${TARGET_ARCH} != ${MACHINE_ARCH} || ${TARGET} != ${MACHINE} .if !make(distributeworld) installcheck: installcheck_DESTDIR installcheck_DESTDIR: .if !defined(DESTDIR) || empty(DESTDIR) @echo "ERROR: Please set DESTDIR!"; \ false .endif .endif .endif # # Check for missing UIDs/GIDs. # CHECK_UIDS= CHECK_GIDS= audit .if ${MK_SENDMAIL} != "no" CHECK_UIDS+= smmsp CHECK_GIDS+= smmsp .endif .if ${MK_PF} != "no" CHECK_UIDS+= proxy CHECK_GIDS+= proxy authpf .endif installcheck: installcheck_UGID installcheck_UGID: .for uid in ${CHECK_UIDS} @if ! `id -u ${uid} >/dev/null 2>&1`; then \ echo "ERROR: Required ${uid} user is missing, see /usr/src/UPDATING."; \ false; \ fi .endfor .for gid in ${CHECK_GIDS} @if ! `find / -prune -group ${gid} >/dev/null 2>&1`; then \ echo "ERROR: Required ${gid} group is missing, see /usr/src/UPDATING."; \ false; \ fi .endfor # # Required install tools to be saved in a scratch dir for safety. # .if ${MK_INFO} != "no" _install-info= install-info .endif .if ${MK_ZONEINFO} != "no" _zoneinfo= zic tzsetup .endif ITOOLS= [ awk cap_mkdb cat chflags chmod chown \ date echo egrep find grep ${_install-info} \ ln lockf make mkdir mtree mv pwd_mkdb rm sed sh sysctl \ test true uname wc ${_zoneinfo} # # distributeworld # # Distributes everything compiled by a `buildworld'. # # installworld # # Installs everything compiled by a 'buildworld'. # # Non-base distributions produced by the base system EXTRA_DISTRIBUTIONS= doc games .if defined(LIB32TMP) && ${MK_LIB32} != "no" EXTRA_DISTRIBUTIONS+= lib32 .endif distributeworld installworld: installcheck mkdir -p ${INSTALLTMP} progs=$$(for prog in ${ITOOLS}; do \ if progpath=`which $$prog`; then \ echo $$progpath; \ else \ echo "Required tool $$prog not found in PATH." >&2; \ exit 1; \ fi; \ done); \ libs=$$(ldd -f "%o %p\n" -f "%o %p\n" $$progs 2>/dev/null | sort -u | \ while read line; do \ set -- $$line; \ if [ "$$2 $$3" != "not found" ]; then \ echo $$2; \ else \ echo "Required library $$1 not found." >&2; \ exit 1; \ fi; \ done); \ cp $$libs $$progs ${INSTALLTMP} cp -R $${PATH_LOCALE:-"/usr/share/locale"} ${INSTALLTMP}/locale .if make(distributeworld) .for dist in ${EXTRA_DISTRIBUTIONS} -mkdir ${DESTDIR}/${DISTDIR}/${dist} mtree -deU -f ${.CURDIR}/etc/mtree/BSD.root.dist \ -p ${DESTDIR}/${DISTDIR}/${dist} >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${DESTDIR}/${DISTDIR}/${dist}/usr/include >/dev/null .endfor -mkdir ${DESTDIR}/${DISTDIR}/base ${_+_}cd ${.CURDIR}; ${IMAKE} distrib-dirs \ DESTDIR=${DESTDIR}/${DISTDIR}/base .endif ${_+_}cd ${.CURDIR}; ${IMAKE} re${.TARGET:S/world$//}; \ ${IMAKEENV} rm -rf ${INSTALLTMP} .if make(distributeworld) .for dist in ${EXTRA_DISTRIBUTIONS} find ${DESTDIR}/${DISTDIR}/${dist} -empty -delete .endfor .endif packageworld: .for dist in base ${EXTRA_DISTRIBUTIONS} ${_+_}cd ${DESTDIR}/${DISTDIR}/${dist}; \ tar cvJf ${DESTDIR}/${DISTDIR}/${dist}.txz . .endfor # # reinstall # # If you have a build server, you can NFS mount the source and obj directories # and do a 'make reinstall' on the *client* to install new binaries from the # most recent server build. # reinstall: @echo "--------------------------------------------------------------" @echo ">>> Making hierarchy" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 hierarchy @echo @echo "--------------------------------------------------------------" @echo ">>> Installing everything" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 install .if defined(LIB32TMP) && ${MK_LIB32} != "no" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 install32 .endif redistribute: @echo "--------------------------------------------------------------" @echo ">>> Distributing everything" @echo "--------------------------------------------------------------" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 distribute .if defined(LIB32TMP) && ${MK_LIB32} != "no" ${_+_}cd ${.CURDIR}; ${MAKE} -f Makefile.inc1 distribute32 \ DISTRIBUTION=lib32 .endif distrib-dirs distribution: cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH} ${MAKE} ${.TARGET} # # buildkernel and installkernel # # Which kernels to build and/or install is specified by setting # KERNCONF. If not defined a GENERIC kernel is built/installed. # Only the existing (depending TARGET) config files are used # for building kernels and only the first of these is designated # as the one being installed. # # Note that we have to use TARGET instead of TARGET_ARCH when # we're in kernel-land. Since only TARGET_ARCH is (expected) to # be set to cross-build, we have to make sure TARGET is set # properly. .if defined(KERNFAST) NO_KERNELCLEAN= t NO_KERNELCONFIG= t NO_KERNELDEPEND= t NO_KERNELOBJ= t # Shortcut for KERNCONF=Blah -DKERNFAST is now KERNFAST=Blah .if !defined(KERNCONF) && ${KERNFAST} != "1" KERNCONF=${KERNFAST} .endif .endif .if !defined(KERNCONF) && defined(KERNEL) KERNCONF= ${KERNEL} KERNWARN= .else .if ${TARGET_ARCH} == "powerpc64" KERNCONF?= GENERIC64 .else KERNCONF?= GENERIC .endif .endif INSTKERNNAME?= kernel KERNSRCDIR?= ${.CURDIR}/sys KRNLCONFDIR= ${KERNSRCDIR}/${TARGET}/conf KRNLOBJDIR= ${OBJTREE}${KERNSRCDIR} KERNCONFDIR?= ${KRNLCONFDIR} BUILDKERNELS= INSTALLKERNEL= .for _kernel in ${KERNCONF} .if exists(${KERNCONFDIR}/${_kernel}) BUILDKERNELS+= ${_kernel} .if empty(INSTALLKERNEL) INSTALLKERNEL= ${_kernel} .endif .endif .endfor # # buildkernel # # Builds all kernels defined by BUILDKERNELS. # buildkernel: .if empty(BUILDKERNELS) @echo "ERROR: Missing kernel configuration file(s) (${KERNCONF})."; \ false .endif .if defined(KERNWARN) @echo "--------------------------------------------------------------" @echo ">>> WARNING: KERNEL= setting should be changed to KERNCONF=" @echo "--------------------------------------------------------------" @sleep 3 .endif @echo .for _kernel in ${BUILDKERNELS} @echo "--------------------------------------------------------------" @echo ">>> Kernel build for ${_kernel} started on `LC_ALL=C date`" @echo "--------------------------------------------------------------" @echo "===> ${_kernel}" mkdir -p ${KRNLOBJDIR} .if !defined(NO_KERNELCONFIG) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 1: configuring the kernel" @echo "--------------------------------------------------------------" cd ${KRNLCONFDIR}; \ PATH=${TMPPATH} \ config ${CONFIGARGS} -d ${KRNLOBJDIR}/${_kernel} \ ${KERNCONFDIR}/${_kernel} .endif .if !defined(NO_CLEAN) && !defined(NO_KERNELCLEAN) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.1: cleaning up the object tree" @echo "--------------------------------------------------------------" cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} ${CLEANDIR} .endif .if !defined(NO_KERNELOBJ) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.2: rebuilding the object tree" @echo "--------------------------------------------------------------" cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} obj .endif @echo @echo "--------------------------------------------------------------" @echo ">>> stage 2.3: build tools" @echo "--------------------------------------------------------------" cd ${KRNLOBJDIR}/${_kernel}; \ PATH=${BPATH}:${PATH} \ MAKESRCPATH=${KERNSRCDIR}/dev/aic7xxx/aicasm \ ${MAKE} SSP_CFLAGS= -DNO_CPU_CFLAGS -DNO_CTF \ -f ${KERNSRCDIR}/dev/aic7xxx/aicasm/Makefile # XXX - Gratuitously builds aicasm in the ``makeoptions NO_MODULES'' case. .if !defined(MODULES_WITH_WORLD) && !defined(NO_MODULES) && exists(${KERNSRCDIR}/modules) .for target in obj depend all cd ${KERNSRCDIR}/modules/aic7xxx/aicasm; \ PATH=${BPATH}:${PATH} \ MAKEOBJDIRPREFIX=${KRNLOBJDIR}/${_kernel}/modules \ ${MAKE} SSP_CFLAGS= -DNO_CPU_CFLAGS -DNO_CTF ${target} .endfor .endif .if !defined(NO_KERNELDEPEND) @echo @echo "--------------------------------------------------------------" @echo ">>> stage 3.1: making dependencies" @echo "--------------------------------------------------------------" cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} depend -DNO_MODULES_OBJ .endif @echo @echo "--------------------------------------------------------------" @echo ">>> stage 3.2: building everything" @echo "--------------------------------------------------------------" cd ${KRNLOBJDIR}/${_kernel}; ${KMAKE} all -DNO_MODULES_OBJ @echo "--------------------------------------------------------------" @echo ">>> Kernel build for ${_kernel} completed on `LC_ALL=C date`" @echo "--------------------------------------------------------------" .endfor # # installkernel, etc. # # Install the kernel defined by INSTALLKERNEL # installkernel installkernel.debug \ reinstallkernel reinstallkernel.debug: installcheck .if empty(INSTALLKERNEL) @echo "ERROR: No kernel \"${KERNCONF}\" to install."; \ false .endif @echo "--------------------------------------------------------------" @echo ">>> Installing kernel ${INSTALLKERNEL}" @echo "--------------------------------------------------------------" cd ${KRNLOBJDIR}/${INSTALLKERNEL}; \ ${CROSSENV} PATH=${TMPPATH} \ ${MAKE} KERNEL=${INSTKERNNAME} ${.TARGET:S/kernel//} distributekernel distributekernel.debug: .if empty(INSTALLKERNEL) @echo "ERROR: No kernel \"${KERNCONF}\" to install."; \ false .endif cd ${KRNLOBJDIR}/${INSTALLKERNEL}; \ ${CROSSENV} PATH=${TMPPATH} ${MAKE} KERNEL=${INSTKERNNAME} \ DESTDIR=${DESTDIR}/${DISTDIR}/kernel \ ${.TARGET:S/distributekernel/install/} .for _kernel in ${BUILDKERNELS:S/${INSTALLKERNEL}//} cd ${KRNLOBJDIR}/${_kernel}; \ ${CROSSENV} PATH=${TMPPATH} ${MAKE} \ KERNEL=${INSTKERNNAME}.${_kernel} \ DESTDIR=${DESTDIR}/${DISTDIR}/kernel.${_kernel} \ ${.TARGET:S/distributekernel/install/} .endfor packagekernel: cd ${DESTDIR}/${DISTDIR}/kernel; \ tar cvJf ${DESTDIR}/${DISTDIR}/kernel.txz . .for _kernel in ${BUILDKERNELS:S/${INSTALLKERNEL}//} cd ${DESTDIR}/${DISTDIR}/kernel.${_kernel}; \ tar cvJf ${DESTDIR}/${DISTDIR}/kernel.${_kernel}.txz . .endfor # # doxygen # # Build the API documentation with doxygen # doxygen: @if [ ! -x `/usr/bin/which doxygen` ]; then \ echo "You need doxygen (devel/doxygen) to generate the API documentation of the kernel." | /usr/bin/fmt; \ exit 1; \ fi cd ${.CURDIR}/tools/kerneldoc/subsys && ${MAKE} obj all # # update # # Update the source tree(s), by running cvsup/cvs/svn to update to the # latest copy. # update: .if defined(SUP_UPDATE) @echo "--------------------------------------------------------------" @echo ">>> Running ${SUP}" @echo "--------------------------------------------------------------" .if defined(SUPFILE) @${SUP} ${SUPFLAGS} ${SUPFILE} .endif .if defined(SUPFILE1) @${SUP} ${SUPFLAGS} ${SUPFILE1} .endif .if defined(SUPFILE2) @${SUP} ${SUPFLAGS} ${SUPFILE2} .endif .if defined(PORTSSUPFILE) && !defined(NO_PORTSUPDATE) @${SUP} ${SUPFLAGS} ${PORTSSUPFILE} .endif .if defined(DOCSUPFILE) && !defined(NO_DOCUPDATE) @${SUP} ${SUPFLAGS} ${DOCSUPFILE} .endif .if defined(WWWSUPFILE) && !defined(NO_WWWUPDATE) @${SUP} ${SUPFLAGS} ${WWWSUPFILE} .endif .endif .if defined(CVS_UPDATE) @cd ${.CURDIR} ; \ if [ -d CVS ] ; then \ echo "--------------------------------------------------------------" ; \ echo ">>> Updating ${.CURDIR} from CVS repository" ${CVSROOT} ; \ echo "--------------------------------------------------------------" ; \ echo ${CVS} -R -q update ${CVSFLAGS} ; \ ${CVS} -R -q update ${CVSFLAGS} ; \ fi .endif .if defined(SVN_UPDATE) @cd ${.CURDIR} ; \ if [ -d .svn ] ; then \ echo "--------------------------------------------------------------" ; \ echo ">>> Updating ${.CURDIR} using Subversion" ; \ echo "--------------------------------------------------------------" ; \ echo ${SVN} update ${SVNFLAGS} ; \ ${SVN} update ${SVNFLAGS} ; \ fi .endif # # ------------------------------------------------------------------------ # # From here onwards are utility targets used by the 'make world' and # related targets. If your 'world' breaks, you may like to try to fix # the problem and manually run the following targets to attempt to # complete the build. Beware, this is *not* guaranteed to work, you # need to have a pretty good grip on the current state of the system # to attempt to manually finish it. If in doubt, 'make world' again. # # # legacy: Build compatibility shims for the next three targets # legacy: -.if ${BOOTSTRAPPING} < 600034 && ${BOOTSTRAPPING} != 0 - @echo "ERROR: Source upgrades from versions prior to 6.0 not supported."; \ +.if ${BOOTSTRAPPING} < 800107 && ${BOOTSTRAPPING} != 0 + @echo "ERROR: Source upgrades from versions prior to 8.0 not supported."; \ false .endif .for _tool in tools/build ${_+_}@${ECHODIR} "===> ${_tool} (obj,includes,depend,all,install)"; \ cd ${.CURDIR}/${_tool}; \ ${MAKE} DIRPRFX=${_tool}/ obj; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${MAKEOBJDIRPREFIX}/legacy includes; \ ${MAKE} DIRPRFX=${_tool}/ depend; \ ${MAKE} DIRPRFX=${_tool}/ all; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${MAKEOBJDIRPREFIX}/legacy install .endfor # # bootstrap-tools: Build tools needed for compatibility # .if ${MK_GAMES} != "no" _strfile= games/fortune/strfile .endif .if ${MK_CXX} != "no" _gperf= gnu/usr.bin/gperf .endif .if ${MK_GROFF} != "no" _groff= gnu/usr.bin/groff .endif -.if ${BOOTSTRAPPING} >= 700044 && ${BOOTSTRAPPING} < 800022 -_ar= usr.bin/ar -.endif - -.if ${BOOTSTRAPPING} < 800013 -_mklocale= usr.bin/mklocale -.endif - .if ${BOOTSTRAPPING} < 900002 _sed= usr.bin/sed .endif .if ${BOOTSTRAPPING} < 900006 _lex= usr.bin/lex _yacc= usr.bin/yacc .endif .if ${BOOTSTRAPPING} >= 900040 && ${BOOTSTRAPPING} < 900041 _awk= usr.bin/awk .endif -.if ${MK_BSNMP} != "no" && \ - (${BOOTSTRAPPING} < 700018 || !exists(/usr/sbin/gensnmptree)) +.if ${MK_BSNMP} != "no" && !exists(/usr/sbin/gensnmptree) _gensnmptree= usr.sbin/bsnmpd/gensnmptree .endif -.if ${MK_RESCUE} != "no" && \ - ${BOOTSTRAPPING} < 700026 -_crunchgen= usr.sbin/crunch/crunchgen -.endif - .if ${MK_CLANG} != "no" _clang_tblgen= \ lib/clang/libllvmsupport \ lib/clang/libllvmtablegen \ usr.bin/clang/tblgen \ usr.bin/clang/clang-tblgen .endif # dtrace tools are required for older bootstrap env and cross-build .if ${MK_CDDL} != "no" && \ - ((${BOOTSTRAPPING} < 800038 && \ - !(${BOOTSTRAPPING} >= 700112 && ${BOOTSTRAPPING} < 799999)) \ - || (${MACHINE} != ${TARGET} || ${MACHINE_ARCH} != ${TARGET_ARCH})) + (${MACHINE} != ${TARGET} || ${MACHINE_ARCH} != ${TARGET_ARCH}) _dtrace_tools= cddl/usr.bin/sgsmsg cddl/lib/libctf lib/libelf \ lib/libdwarf cddl/usr.bin/ctfconvert cddl/usr.bin/ctfmerge .endif .if ${MK_FDT} != "no" _dtc= gnu/usr.bin/dtc .endif .if ${MK_KERBEROS} != "no" _kerberos5_bootstrap_tools= \ kerberos5/tools/make-roken \ kerberos5/lib/libroken \ kerberos5/lib/libvers \ kerberos5/tools/asn1_compile \ kerberos5/tools/slc .endif # Please document (add comment) why something is in 'bootstrap-tools'. # Try to bound the building of the bootstrap-tool to just the # FreeBSD versions that need the tool built at this stage of the build. bootstrap-tools: .for _tool in \ ${_clang_tblgen} \ ${_kerberos5_bootstrap_tools} \ ${_dtrace_tools} \ ${_strfile} \ ${_gperf} \ ${_groff} \ - ${_ar} \ ${_dtc} \ ${_awk} \ usr.bin/lorder \ usr.bin/makewhatis \ - ${_mklocale} \ usr.bin/rpcgen \ ${_sed} \ ${_lex} \ ${_yacc} \ usr.bin/xinstall \ ${_gensnmptree} \ - usr.sbin/config \ - ${_crunchgen} + usr.sbin/config ${_+_}@${ECHODIR} "===> ${_tool} (obj,depend,all,install)"; \ cd ${.CURDIR}/${_tool}; \ ${MAKE} DIRPRFX=${_tool}/ obj; \ ${MAKE} DIRPRFX=${_tool}/ depend; \ ${MAKE} DIRPRFX=${_tool}/ all; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${MAKEOBJDIRPREFIX}/legacy install .endfor # # build-tools: Build special purpose build tools # .if defined(MODULES_WITH_WORLD) && exists(${KERNSRCDIR}/modules) _aicasm= sys/modules/aic7xxx/aicasm .endif .if !defined(NO_SHARE) _share= share/syscons/scrnmaps .endif .if ${MK_GCC} != "no" && ${MK_CLANG_IS_CC} == "no" _gcc_tools= gnu/usr.bin/cc/cc_tools .endif .if ${MK_RESCUE} != "no" _rescue= rescue/rescue .endif build-tools: .for _tool in \ bin/csh \ bin/sh \ ${_rescue} \ ${LOCAL_TOOL_DIRS} \ lib/ncurses/ncurses \ lib/ncurses/ncursesw \ ${_share} \ ${_aicasm} \ usr.bin/awk \ lib/libmagic \ usr.bin/mkesdb_static \ usr.bin/mkcsmapper_static ${_+_}@${ECHODIR} "===> ${_tool} (obj,build-tools)"; \ cd ${.CURDIR}/${_tool}; \ ${MAKE} DIRPRFX=${_tool}/ obj; \ ${MAKE} DIRPRFX=${_tool}/ build-tools .endfor .for _tool in \ ${_gcc_tools} ${_+_}@${ECHODIR} "===> ${_tool} (obj,depend,all)"; \ cd ${.CURDIR}/${_tool}; \ ${MAKE} DIRPRFX=${_tool}/ obj; \ ${MAKE} DIRPRFX=${_tool}/ depend; \ ${MAKE} DIRPRFX=${_tool}/ all .endfor # # cross-tools: Build cross-building tools # -.if ${TARGET_ARCH} != ${MACHINE_ARCH} || ${BOOTSTRAPPING} < 800035 +.if ${TARGET_ARCH} != ${MACHINE_ARCH} .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" _btxld= usr.sbin/btxld .endif .endif .if ${TARGET_ARCH} != ${MACHINE_ARCH} .if ${MK_RESCUE} != "no" || defined(RELEASEDIR) _crunchide= usr.sbin/crunch/crunchide .endif .if ${TARGET_ARCH} == "i386" && defined(RELEASEDIR) _kgzip= usr.sbin/kgzip .endif .endif .if ${MK_BINUTILS} != "no" _binutils= gnu/usr.bin/binutils .endif .if ${MK_CLANG} != "no" && (${MK_CLANG_IS_CC} != "no" || ${CC:T:Mclang} == "clang") _clang= usr.bin/clang _clang_libs= lib/clang .endif .if ${MK_GCC} != "no" && ${MK_CLANG_IS_CC} == "no" _cc= gnu/usr.bin/cc .endif cross-tools: .for _tool in \ ${_clang_libs} \ ${_clang} \ ${_binutils} \ ${_cc} \ usr.bin/xlint/lint1 usr.bin/xlint/lint2 usr.bin/xlint/xlint \ ${_btxld} \ ${_crunchide} \ ${_kgzip} ${_+_}@${ECHODIR} "===> ${_tool} (obj,depend,all,install)"; \ cd ${.CURDIR}/${_tool}; \ ${MAKE} DIRPRFX=${_tool}/ obj; \ ${MAKE} DIRPRFX=${_tool}/ depend; \ ${MAKE} DIRPRFX=${_tool}/ all; \ ${MAKE} DIRPRFX=${_tool}/ DESTDIR=${MAKEOBJDIRPREFIX} install .endfor # # hierarchy - ensure that all the needed directories are present # hierarchy: cd ${.CURDIR}/etc; ${MAKE} distrib-dirs # # libraries - build all libraries, and install them under ${DESTDIR}. # # The list of libraries with dependents (${_prebuild_libs}) and their # interdependencies (__L) are built automatically by the # ${.CURDIR}/tools/make_libdeps.sh script. # libraries: cd ${.CURDIR}; \ ${MAKE} -f Makefile.inc1 _prereq_libs; \ ${MAKE} -f Makefile.inc1 _startup_libs; \ ${MAKE} -f Makefile.inc1 _prebuild_libs; \ ${MAKE} -f Makefile.inc1 _generic_libs; # # static libgcc.a prerequisite for shared libc # _prereq_libs= gnu/lib/libssp/libssp_nonshared gnu/lib/libgcc lib/libcompiler_rt # These dependencies are not automatically generated: # # gnu/lib/csu, gnu/lib/libgcc, lib/csu and lib/libc must be built before # all shared libraries for ELF. # _startup_libs= gnu/lib/csu .if exists(${.CURDIR}/lib/csu/${MACHINE_ARCH}-elf) _startup_libs+= lib/csu/${MACHINE_ARCH}-elf .elif exists(${.CURDIR}/lib/csu/${MACHINE_ARCH}) _startup_libs+= lib/csu/${MACHINE_ARCH} .else _startup_libs+= lib/csu/${MACHINE_CPUARCH} .endif _startup_libs+= gnu/lib/libgcc _startup_libs+= lib/libcompiler_rt _startup_libs+= lib/libc .if ${MK_LIBCPLUSPLUS} != "no" _startup_libs+= lib/libcxxrt .endif gnu/lib/libgcc__L: lib/libc__L .if ${MK_LIBCPLUSPLUS} != "no" lib/libcxxrt__L: gnu/lib/libgcc__L .endif _prebuild_libs= ${_kerberos5_lib_libasn1} \ ${_kerberos5_lib_libhdb} \ ${_kerberos5_lib_libheimbase} \ ${_kerberos5_lib_libheimntlm} \ ${_kerberos5_lib_libheimsqlite} \ ${_kerberos5_lib_libheimipcc} \ ${_kerberos5_lib_libhx509} ${_kerberos5_lib_libkrb5} \ ${_kerberos5_lib_libroken} \ ${_kerberos5_lib_libwind} \ lib/libbz2 ${_libcom_err} lib/libcrypt \ lib/libexpat \ ${_lib_libgssapi} ${_lib_libipx} \ lib/libkiconv lib/libkvm lib/liblzma lib/libmd \ lib/ncurses/ncurses lib/ncurses/ncursesw \ lib/libopie lib/libpam ${_lib_libthr} \ lib/libradius lib/libsbuf lib/libtacplus \ ${_cddl_lib_libumem} \ lib/libutil ${_lib_libypclnt} lib/libz lib/msun \ ${_secure_lib_libcrypto} ${_secure_lib_libssh} \ ${_secure_lib_libssl} .if ${MK_LIBTHR} != "no" _lib_libthr= lib/libthr .endif .if ${MK_OFED} != "no" _ofed_lib= contrib/ofed/usr.lib/ .endif _generic_libs= ${_cddl_lib} gnu/lib ${_kerberos5_lib} lib ${_secure_lib} usr.bin/lex/lib ${_ofed_lib} lib/libopie__L lib/libtacplus__L: lib/libmd__L .if ${MK_CDDL} != "no" _cddl_lib_libumem= cddl/lib/libumem _cddl_lib= cddl/lib .endif .if ${MK_CRYPT} != "no" .if ${MK_OPENSSL} != "no" _secure_lib_libcrypto= secure/lib/libcrypto _secure_lib_libssl= secure/lib/libssl lib/libradius__L secure/lib/libssl__L: secure/lib/libcrypto__L .if ${MK_OPENSSH} != "no" _secure_lib_libssh= secure/lib/libssh secure/lib/libssh__L: lib/libz__L secure/lib/libcrypto__L lib/libcrypt__L .if ${MK_KERBEROS_SUPPORT} != "no" secure/lib/libssh__L: lib/libgssapi__L kerberos5/lib/libkrb5__L \ kerberos5/lib/libhx509__L kerberos5/lib/libasn1__L lib/libcom_err__L \ lib/libmd__L kerberos5/lib/libroken__L .endif .endif .endif _secure_lib= secure/lib .endif .if ${MK_KERBEROS} != "no" kerberos5/lib/libasn1__L: lib/libcom_err__L kerberos5/lib/libroken__L kerberos5/lib/libhdb__L: kerberos5/lib/libasn1__L lib/libcom_err__L \ kerberos5/lib/libkrb5__L kerberos5/lib/libroken__L \ kerberos5/lib/libwind__L kerberos5/lib/libheimsqlite__L kerberos5/lib/libheimntlm__L: secure/lib/libcrypto__L kerberos5/lib/libkrb5__L \ kerberos5/lib/libroken__L lib/libcom_err__L kerberos5/lib/libhx509__L: kerberos5/lib/libasn1__L lib/libcom_err__L \ secure/lib/libcrypto__L kerberos5/lib/libroken__L kerberos5/lib/libwind__L kerberos5/lib/libkrb5__L: kerberos5/lib/libasn1__L lib/libcom_err__L \ lib/libcrypt__L secure/lib/libcrypto__L kerberos5/lib/libhx509__L \ kerberos5/lib/libroken__L kerberos5/lib/libwind__L \ kerberos5/lib/libheimbase__L kerberos5/lib/libheimipcc__L kerberos5/lib/libroken__L: lib/libcrypt__L kerberos5/lib/libwind__L: kerberos5/lib/libroken__L lib/libcom_err__L kerberos5/lib/libheimbase__L: lib/libthr__L kerberos5/lib/libheimipcc__L: kerberos5/lib/libroken__L kerberos5/lib/libheimbase__L lib/libthr__L kerberos5/lib/libheimsqlite__L: lib/libthr__L .endif .if ${MK_GSSAPI} != "no" _lib_libgssapi= lib/libgssapi .endif .if ${MK_IPX} != "no" _lib_libipx= lib/libipx .endif .if ${MK_KERBEROS} != "no" _kerberos5_lib= kerberos5/lib _kerberos5_lib_libasn1= kerberos5/lib/libasn1 _kerberos5_lib_libhdb= kerberos5/lib/libhdb _kerberos5_lib_libheimbase= kerberos5/lib/libheimbase _kerberos5_lib_libkrb5= kerberos5/lib/libkrb5 _kerberos5_lib_libhx509= kerberos5/lib/libhx509 _kerberos5_lib_libroken= kerberos5/lib/libroken _kerberos5_lib_libheimntlm= kerberos5/lib/libheimntlm _kerberos5_lib_libheimsqlite= kerberos5/lib/libheimsqlite _kerberos5_lib_libheimipcc= kerberos5/lib/libheimipcc _kerberos5_lib_libwind= kerberos5/lib/libwind _libcom_err= lib/libcom_err .endif .if ${MK_NIS} != "no" _lib_libypclnt= lib/libypclnt .endif .if ${MK_OPENSSL} == "no" lib/libradius__L: lib/libmd__L .endif .for _lib in ${_prereq_libs} ${_lib}__PL: .PHONY .if exists(${.CURDIR}/${_lib}) ${_+_}@${ECHODIR} "===> ${_lib} (obj,depend,all,install)"; \ cd ${.CURDIR}/${_lib}; \ ${MAKE} DIRPRFX=${_lib}/ obj; \ ${MAKE} DIRPRFX=${_lib}/ depend; \ ${MAKE} -DNO_PROFILE -DNO_PIC DIRPRFX=${_lib}/ all; \ ${MAKE} -DNO_PROFILE -DNO_PIC DIRPRFX=${_lib}/ install .endif .endfor .for _lib in ${_startup_libs} ${_prebuild_libs:Nlib/libpam} ${_generic_libs} ${_lib}__L: .PHONY .if exists(${.CURDIR}/${_lib}) ${_+_}@${ECHODIR} "===> ${_lib} (obj,depend,all,install)"; \ cd ${.CURDIR}/${_lib}; \ ${MAKE} DIRPRFX=${_lib}/ obj; \ ${MAKE} DIRPRFX=${_lib}/ depend; \ ${MAKE} DIRPRFX=${_lib}/ all; \ ${MAKE} DIRPRFX=${_lib}/ install .endif .endfor # libpam is special: we need to build static PAM modules before # static PAM library, and dynamic PAM library before dynamic PAM # modules. lib/libpam__L: .PHONY ${_+_}@${ECHODIR} "===> lib/libpam (obj,depend,all,install)"; \ cd ${.CURDIR}/lib/libpam; \ ${MAKE} DIRPRFX=lib/libpam/ obj; \ ${MAKE} DIRPRFX=lib/libpam/ depend; \ ${MAKE} DIRPRFX=lib/libpam/ -D_NO_LIBPAM_SO_YET all; \ ${MAKE} DIRPRFX=lib/libpam/ -D_NO_LIBPAM_SO_YET install _prereq_libs: ${_prereq_libs:S/$/__PL/} _startup_libs: ${_startup_libs:S/$/__L/} _prebuild_libs: ${_prebuild_libs:S/$/__L/} _generic_libs: ${_generic_libs:S/$/__L/} .for __target in all clean cleandepend cleandir depend includes obj .for entry in ${SUBDIR} ${entry}.${__target}__D: .PHONY ${_+_}@if test -d ${.CURDIR}/${entry}.${MACHINE_ARCH}; then \ ${ECHODIR} "===> ${DIRPRFX}${entry}.${MACHINE_ARCH} (${__target})"; \ edir=${entry}.${MACHINE_ARCH}; \ cd ${.CURDIR}/$${edir}; \ else \ ${ECHODIR} "===> ${DIRPRFX}${entry} (${__target})"; \ edir=${entry}; \ cd ${.CURDIR}/$${edir}; \ fi; \ ${MAKE} ${__target} DIRPRFX=${DIRPRFX}$${edir}/ .endfor par-${__target}: ${SUBDIR:S/$/.${__target}__D/} .endfor .include .if make(check-old) || make(check-old-dirs) || \ make(check-old-files) || make(check-old-libs) || \ make(delete-old) || make(delete-old-dirs) || \ make(delete-old-files) || make(delete-old-libs) # # check for / delete old files section # .include "ObsoleteFiles.inc" OLD_LIBS_MESSAGE="Please be sure no application still uses those libraries, \ else you can not start such an application. Consult UPDATING for more \ information regarding how to cope with the removal/revision bump of a \ specific library." .if !defined(BATCH_DELETE_OLD_FILES) RM_I=-i .else RM_I=-v .endif delete-old-files: @echo ">>> Removing old files (only deletes safe to delete libs)" # Ask for every old file if the user really wants to remove it. # It's annoying, but better safe than sorry. # NB: We cannot pass the list of OLD_FILES as a parameter because the # argument list will get too long. Using .for/.endfor make "loops" will make # the Makefile parser segfault. @exec 3<&0; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_FILES -V "OLD_FILES:Musr/share/*.gz:R" | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ chflags noschg "${DESTDIR}/$${file}" 2>/dev/null || true; \ rm ${RM_I} "${DESTDIR}/$${file}" <&3; \ fi; \ done # Remove catpages without corresponding manpages. @exec 3<&0; \ find ${DESTDIR}/usr/share/man/cat* ! -type d | \ sed -ep -e's:${DESTDIR}/usr/share/man/cat:${DESTDIR}/usr/share/man/man:' | \ while read catpage; do \ read manpage; \ if [ ! -e "$${manpage}" ]; then \ rm ${RM_I} $${catpage} <&3; \ fi; \ done @echo ">>> Old files removed" check-old-files: @echo ">>> Checking for old files" @${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_FILES -V "OLD_FILES:Musr/share/*.gz:R" | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ echo "${DESTDIR}/$${file}"; \ fi; \ done # Check for catpages without corresponding manpages. @find ${DESTDIR}/usr/share/man/cat* ! -type d | \ sed -ep -e's:${DESTDIR}/usr/share/man/cat:${DESTDIR}/usr/share/man/man:' | \ while read catpage; do \ read manpage; \ if [ ! -e "$${manpage}" ]; then \ echo $${catpage}; \ fi; \ done delete-old-libs: @echo ">>> Removing old libraries" @echo "${OLD_LIBS_MESSAGE}" | fmt @exec 3<&0; \ ${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_LIBS | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ chflags noschg "${DESTDIR}/$${file}" 2>/dev/null || true; \ rm ${RM_I} "${DESTDIR}/$${file}" <&3; \ fi; \ done @echo ">>> Old libraries removed" check-old-libs: @echo ">>> Checking for old libraries" @${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_LIBS | xargs -n1 | \ while read file; do \ if [ -f "${DESTDIR}/$${file}" -o -L "${DESTDIR}/$${file}" ]; then \ echo "${DESTDIR}/$${file}"; \ fi; \ done delete-old-dirs: @echo ">>> Removing old directories" @${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_DIRS | xargs -n1 | \ while read dir; do \ if [ -d "${DESTDIR}/$${dir}" ]; then \ rmdir -v "${DESTDIR}/$${dir}" || true; \ elif [ -L "${DESTDIR}/$${dir}" ]; then \ echo "${DESTDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ done @echo ">>> Old directories removed" check-old-dirs: @echo ">>> Checking for old directories" @${MAKE} -f ${.CURDIR}/Makefile.inc1 ${.MAKEFLAGS} ${.TARGET} \ -V OLD_DIRS | xargs -n1 | \ while read dir; do \ if [ -d "${DESTDIR}/$${dir}" ]; then \ echo "${DESTDIR}/$${dir}"; \ elif [ -L "${DESTDIR}/$${dir}" ]; then \ echo "${DESTDIR}/$${dir} is a link, please remove everything manually."; \ fi; \ done delete-old: delete-old-files delete-old-dirs @echo "To remove old libraries run '${MAKE} delete-old-libs'." check-old: check-old-files check-old-libs check-old-dirs @echo "To remove old files and directories run '${MAKE} delete-old'." @echo "To remove old libraries run '${MAKE} delete-old-libs'." .endif # # showconfig - show build configuration. # showconfig: @${MAKE} -n -f bsd.own.mk -V dummy -dg1 | grep ^MK_ | sort .if !empty(KRNLOBJDIR) && !empty(KERNCONF) DTBOUTPUTPATH= ${KRNLOBJDIR}/${KERNCONF}/ .if !defined(FDT_DTS_FILE) || empty(FDT_DTS_FILE) .if exists(${KERNCONFDIR}/${KERNCONF}) FDT_DTS_FILE!= awk 'BEGIN {FS="="} /^makeoptions[[:space:]]+FDT_DTS_FILE/ {print $$2}' \ ${KERNCONFDIR}/${KERNCONF} .endif .endif .endif .if !defined(DTBOUTPUTPATH) || !exists(${DTBOUTPUTPATH}) DTBOUTPUTPATH= ${.CURDIR} .endif # # Build 'standalone' Device Tree Blob # builddtb: @if [ "${FDT_DTS_FILE}" = "" ]; then \ echo "ERROR: FDT_DTS_FILE must be specified!"; \ exit 1; \ fi; \ if [ ! -f ${.CURDIR}/sys/boot/fdt/dts/${FDT_DTS_FILE} ]; then \ echo "ERROR: Specified DTS file (${FDT_DTS_FILE}) does not \ exist!"; \ exit 1; \ fi; \ if [ "${DTBOUTPUTPATH}" = "${.CURDIR}" ]; then \ echo "WARNING: DTB will be placed in the current working \ directory"; \ fi @PATH=${TMPPATH} \ dtc -O dtb -o \ ${DTBOUTPUTPATH}/`echo ${FDT_DTS_FILE} | cut -d. -f1`.dtb -b 0 \ -p 1024 ${.CURDIR}/sys/boot/fdt/dts/${FDT_DTS_FILE} ############### .if defined(XDEV) && defined(XDEV_ARCH) .if ${XDEV} == ${MACHINE} && ${XDEV_ARCH} == ${MACHINE_ARCH} XDEV_CPUTYPE?=${CPUTYPE} .else XDEV_CPUTYPE?=${TARGET_CPUTYPE} .endif NOFUN=-DNO_FSCHG -DWITHOUT_HTML -DWITHOUT_INFO -DNO_LINT \ -DWITHOUT_MAN -DWITHOUT_NLS -DNO_PROFILE \ -DWITHOUT_KERBEROS -DWITHOUT_RESCUE -DNO_WARNS \ TARGET=${XDEV} TARGET_ARCH=${XDEV_ARCH} \ CPUTYPE=${XDEV_CPUTYPE} XDDIR=${XDEV_ARCH}-freebsd XDTP=/usr/${XDDIR} CDBENV=MAKEOBJDIRPREFIX=${MAKEOBJDIRPREFIX}/${XDDIR} CDENV= ${CDBENV} \ _SHLIBDIRPREFIX=${XDTP} \ TOOLS_PREFIX=${XDTP} CD2ENV=${CDENV} \ MACHINE=${XDEV} MACHINE_ARCH=${XDEV_ARCH} CDTMP= ${MAKEOBJDIRPREFIX}/${XDEV}/${.CURDIR}/tmp CDMAKE=${CDENV} ${MAKE} ${NOFUN} CD2MAKE=${CD2ENV} PATH=${CDTMP}/usr/bin:${XDTP}/usr/bin:${PATH} ${MAKE} ${NOFUN} XDDESTDIR=${DESTDIR}${XDTP} .if !defined(OSREL) OSREL!= uname -r | sed -e 's/[-(].*//' .endif .ORDER: xdev-build xdev-install xdev: xdev-build xdev-install .ORDER: _xb-build-tools _xb-cross-tools xdev-build: _xb-build-tools _xb-cross-tools _xb-build-tools: ${_+_}@cd ${.CURDIR}; \ ${CDBENV} ${MAKE} -f Makefile.inc1 ${NOFUN} build-tools _xb-cross-tools: .for _tool in \ gnu/usr.bin/binutils \ gnu/usr.bin/cc \ usr.bin/ar ${_+_}@${ECHODIR} "===> xdev ${_tool} (obj,depend,all)"; \ cd ${.CURDIR}/${_tool}; \ ${CDMAKE} DIRPRFX=${_tool}/ obj; \ ${CDMAKE} DIRPRFX=${_tool}/ depend; \ ${CDMAKE} DIRPRFX=${_tool}/ all .endfor _xi-mtree: ${_+_}@${ECHODIR} "mtree populating ${XDDESTDIR}" mkdir -p ${XDDESTDIR} mtree -deU -f ${.CURDIR}/etc/mtree/BSD.root.dist \ -p ${XDDESTDIR} >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.usr.dist \ -p ${XDDESTDIR}/usr >/dev/null mtree -deU -f ${.CURDIR}/etc/mtree/BSD.include.dist \ -p ${XDDESTDIR}/usr/include >/dev/null .ORDER: xdev-build _xi-mtree _xi-cross-tools _xi-includes _xi-libraries _xi-links xdev-install: xdev-build _xi-mtree _xi-cross-tools _xi-includes _xi-libraries _xi-links _xi-cross-tools: @echo "_xi-cross-tools" .for _tool in \ gnu/usr.bin/binutils \ gnu/usr.bin/cc \ usr.bin/ar ${_+_}@${ECHODIR} "===> xdev ${_tool} (install)"; \ cd ${.CURDIR}/${_tool}; \ ${CDMAKE} DIRPRFX=${_tool}/ install DESTDIR=${XDDESTDIR} .endfor _xi-includes: ${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 par-includes \ DESTDIR=${XDDESTDIR} _xi-libraries: ${_+_}cd ${.CURDIR}; ${CD2MAKE} -f Makefile.inc1 libraries \ DESTDIR=${XDDESTDIR} _xi-links: ${_+_}cd ${XDDESTDIR}/usr/bin; \ for i in *; do \ ln -sf ../../${XDTP}/usr/bin/$$i \ ../../../../usr/bin/${XDDIR}-$$i; \ ln -sf ../../${XDTP}/usr/bin/$$i \ ../../../../usr/bin/${XDDIR}${OSREL}-$$i; \ done .else xdev xdev-buil xdev-install: @echo "*** Error: Both XDEV and XDEV_ARCH must be defined for \"${.TARGET}\" target" .endif Index: projects/nand/cddl/contrib/opensolaris/cmd/zdb/zdb.8 =================================================================== --- projects/nand/cddl/contrib/opensolaris/cmd/zdb/zdb.8 (revision 235262) +++ projects/nand/cddl/contrib/opensolaris/cmd/zdb/zdb.8 (revision 235263) @@ -1,79 +1,306 @@ '\" te -.\" Copyright (c) 2011, Martin Matuska . +.\" Copyright (c) 2012, Martin Matuska . .\" All Rights Reserved. .\" -.\" The contents of this file are subject to the terms of the -.\" Common Development and Distribution License (the "License"). -.\" You may not use this file except in compliance with the License. +.\" This file and its contents are supplied under the terms of the +.\" Common Development and Distribution License ("CDDL"), version 1.0. +.\" You may only use this file in accordance with the terms of version +.\" 1.0 of the CDDL. .\" -.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE -.\" or http://www.opensolaris.org/os/licensing. -.\" See the License for the specific language governing permissions -.\" and limitations under the License. +.\" A full copy of the text of the CDDL should have accompanied this +.\" source. A copy of the CDDL is also available via the Internet at +.\" http://www.illumos.org/license/CDDL. .\" -.\" When distributing Covered Code, include this CDDL HEADER in each -.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE. -.\" If applicable, add the following below this CDDL HEADER, with the -.\" fields enclosed by brackets "[]" replaced with your own identifying -.\" information: Portions Copyright [yyyy] [name of copyright owner] .\" -.\" Copyright (c) 2004, Sun Microsystems, Inc. All Rights Reserved. +.\" Copyright 2012, Richard Lowe. +.\" Copyright (c) 2012, Marcelo Araujo . +.\" All Rights Reserved. .\" .\" $FreeBSD$ .\" -.Dd November 26, 2011 +.Dd May 10, 2012 .Dt ZDB 8 .Os .Sh NAME .Nm zdb -.Nd ZFS debugger +.Nd Display zpool debugging and consistency information .Sh SYNOPSIS .Nm -.Ar pool -.Sh DESCRIPTION -The +.Op Fl CumdibcsDvhLXFPA +.Op Fl e Op Fl p Ar path... +.Op Fl t Ar txg +.Ar poolname +.Op Ar object ... .Nm -command is used by support engineers to diagnose failures and -gather statistics. Since the -.Tn ZFS -file system is always consistent on disk and is self-repairing, +.Op Fl divPA +.Op Fl e Op Fl p Ar path... +.Ar dataset +.Op Ar object ... .Nm -should only be run under the direction by a support engineer. -.Pp -If no arguments are specified, +.Fl m Op Fl LXFPA +.Op Fl t Ar txg +.Op Fl e Op Fl p Ar path... +.Ar poolname .Nm -performs basic consistency checks on the pool and associated datasets, and -report any problems detected. +.Fl R Op Fl A +.Op Fl e Op Fl p Ar path... +.Ar poolname +.Ar vdev Ns : Ns Ar offset Ns : Ns Ar size Ns Op Ns : Ns Ar flags .Nm -Any options supported by this command are internal to Sun and subject to change -at any time. -.Sh EXIT STATUS -The following exit values are returned: -.Bl -tag -offset 2n -width 2n -.It 0 -The pool is consistent. -.It 1 -An error was detected. -.It 2 -Invalid command line options were specified. +.Fl S +.Op Fl AP +.Op Fl e Op Fl p Ar path... +.Ar poolname +.Nm +.Fl l +.Op Fl uA +.Ar device +.Nm +.Fl C +.Op Fl A +.Op Fl U Ar cache +.Sh DESCRIPTION +The +.Nm +utility displays information about a ZFS pool useful for debugging and +performs some amount of consistency checking. +It is a not a general purpose tool and options (and facilities) may change. +This is neither a +.Xr fsck 8 +nor a +.Xr fsdb 8 +utility. +.Pp +The output of this command in general reflects the on-disk structure of a ZFS +pool, and is inherently unstable. +The precise output of most invocations is not documented, a knowledge of ZFS +internals is assumed. +.Pp +When operating on an imported and active pool it is possible, though unlikely, +that zdb may interpret inconsistent pool data and behave erratically. +.Sh OPTIONS +Display options: +.Bl -tag -width indent +.It Fl b +Display statistics regarding the number, size (logical, physical and +allocated) and deduplication of blocks. +.It Fl c +Verify the checksum of all metadata blocks while printing block statistics +(see +.Fl b Ns ). +.Pp +If specified multiple times, verify the checksums of all blocks. +.It Fl C +Display information about the configuration. If specified with no other +options, instead display information about the cache file +.Ns ( Pa /etc/zfs/zpool.cache Ns ). +To specify the cache file to display, see +.Fl U +.Pp +If specified multiple times, and a pool name is also specified display both +the cached configuration and the on-disk configuration. +If specified multiple times with +.FL e +also display the configuration that would be used were the pool to be +imported. +.It Fl d +Display information about datasets. Specified once, displays basic dataset +information: ID, create transaction, size, and object count. +.Pp +If specified multiple times provides greater and greater verbosity. +.Pp +If object IDs are specified, display information about those specific objects only. +.It Fl D +Display deduplication statistics, including the deduplication ratio (dedup), +compression ratio (compress), inflation due to the zfs copies property +(copies), and an overall effective ratio (dedup * compress / copies). +.Pp +If specified twice, display a histogram of deduplication statistics, showing +the allocated (physically present on disk) and referenced (logically +referenced in the pool) block counts and sizes by reference count. +.It Fl h +Display pool history similar to +.Cm zpool history , +but include internal changes, transaction, and dataset information. +.It Fl i +Display information about intent log (ZIL) entries relating to each +dataset. +If specified multiple times, display counts of each intent log transaction +type. +.It Fl l Ar device +Display the vdev labels from the specified device. +If the +.Fl u +option is also specified, also display the uberblocks on this device. +.It Fl L +Disable leak tracing and the loading of space maps. +By default, +.Nm +verifies that all non-free blocks are referenced, which can be very expensive. +.It Fl m +Display the offset, spacemap, and free space of each metaslab. +When specified twice, also display information about the maximum contiguous +free space and the percentage of free space in each space map. +When specified three times display every spacemap record. +.It Xo +.Fl R Ar poolname +.Ar vdev Ns : Ns Ar offset Ns : Ns Ar size Ns Op Ns : Ns Ar flags +.Xc +Read and display a block from the specified device. By default the block is +displayed as a hex dump, but see the description of the +.Fl r +flag, below. +.Pp +The block is specified in terms of a colon-separated tuple +.Ar vdev +(an integer vdev identifier) +.Ar offset +(the offset within the vdev) +.Ar size +(the size of the block to read) and, optionally, +.Ar flags +(a set of flags, described below). +.Bl -tag -width indent +.It Sy b offset +Print block pointer +.It Sy d +Decompress the block +.It Sy e +Byte swap the block +.It Sy g +Dump gang block header +.It Sy i +Dump indirect block +.It Sy r +Dump raw uninterpreted block data .El +.It Fl s +Report statistics on +.Nm Ns 's +I/O. +Display operation counts, bandwidth, and error counts of I/O to the pool from +.Nm . +.It Fl S +Simulate the effects of deduplication, constructing a DDT and then display +that DDT as with \fB-DD\fR. +.It Fl u +Display the current uberblock. +.El +.Pp +Other options: +.Bl -tag -width indent +.It Fl A +Do not abort should any assertion fail. +.It Fl AA +Enable panic recovery, certain errors which would otherwise be fatal are +demoted to warnings. +.It Fl AAA +Do not abort if asserts fail and also enable panic recovery. +.It Fl e Op Fl p Ar path... +Operate on an exported pool, not present in +.Pa /etc/zfs/zpool.cache . +The +.Fl p +flag specifies the path under which devices are to be searched. +.It Fl F +Attempt to make an unreadable pool readable by trying progressively older +transactions. +.It Fl P +Print numbers in an unscaled form more amenable to parsing, eg. 1000000 rather +than 1M. +.It Fl t Ar transaction +Specify the highest transaction to use when searching for uberblocks. +See also the +.Fl u +and +.Fl l +options for a means to see the available uberblocks and their associated +transaction numbers. +.It Fl U Ar cachefile +Use a cache file other than +.Pa /etc/zfs/zpool.cache . +This option is only valid with +.Fl C +.It Fl v +Enable verbosity. +Specify multiple times for increased verbosity. +.It Fl X +Attempt +.Ql extreme +transaction rewind, that is attempt the same recovery as +.Fl F +but read transactions otherwise deemed too old. +.El +.Pp +Specifying a display option more than once enables verbosity for only that +option, with more occurrences enabling more verbosity. +.Pp +If no options are specified, all information about the named pool will be +displayed at default verbosity. +.Sh EXAMPLES +.Bl -tag -width 0n +.It Sy Example 1 Display the configuration of imported pool 'rpool' +.Bd -literal -offset 2n +.Li # Ic zdb -C rpool + +MOS Configuration: + version: 28 + name: 'rpool' + ... +.Ed +.It Sy Example 2 Display basic dataset information about 'rpool' +.Bd -literal -offset 2n +.Li # Ic zdb -d rpool +Dataset mos [META], ID 0, cr_txg 4, 26.9M, 1051 objects +Dataset rpool/swap [ZVOL], ID 59, cr_txg 356, 486M, 2 objects +... +.Ed +.It Xo Sy Example 3 Display basic information about object 0 in +.Sy 'rpool/export/home' +.Xc +.Bd -literal -offset 2n +.Li # Ic zdb -d rpool/export/home 0 +Dataset rpool/export/home [ZPL], ID 137, cr_txg 1546, 32K, 8 objects + + Object lvl iblk dblk dsize lsize %full type + 0 7 16K 16K 15.0K 16K 25.00 DMU dnode +.Ed +.It Xo Sy Example 4 Display the predicted effect of enabling deduplication on +.Sy 'rpool' +.Xc +.Bd -literal -offset 2n +.Li # Ic zdb -S rpool +Simulated DDT histogram: + +bucket allocated referenced +______ ______________________________ ______________________________ +refcnt blocks LSIZE PSIZE DSIZE blocks LSIZE PSIZE DSIZE +------ ------ ----- ----- ----- ------ ----- ----- ----- + 1 694K 27.1G 15.0G 15.0G 694K 27.1G 15.0G 15.0G + 2 35.0K 1.33G 699M 699M 74.7K 2.79G 1.45G 1.45G + ... +dedup = 1.11, compress = 1.80, copies = 1.00, dedup * compress / copies = 2.00 +.Ed +.El .Sh SEE ALSO .Xr zfs 8 , .Xr zpool 8 .Sh AUTHORS This manual page is a .Xr mdoc 7 reimplementation of the -.Tn OpenSolaris +.Tn illumos manual page .Em zdb(1M) , modified and customized for .Fx and licensed under the -.Tn Common Development and Distribution License +Common Development and Distribution License .Pq Tn CDDL . .Pp The .Xr mdoc 7 implementation of this manual page was initially written by -.An Martin Matuska Aq mm@FreeBSD.org . +.An Martin Matuska Aq mm@FreeBSD.org +and +.An Marcelo Araujo Aq araujo@FreeBSD.org . Index: projects/nand/cddl/contrib/opensolaris/cmd/zdb/zdb.c =================================================================== --- projects/nand/cddl/contrib/opensolaris/cmd/zdb/zdb.c (revision 235262) +++ projects/nand/cddl/contrib/opensolaris/cmd/zdb/zdb.c (revision 235263) @@ -1,3160 +1,3163 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #undef ZFS_MAXNAMELEN #undef verify #include #define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \ zio_compress_table[(idx)].ci_name : "UNKNOWN") #define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \ zio_checksum_table[(idx)].ci_name : "UNKNOWN") #define ZDB_OT_NAME(idx) ((idx) < DMU_OT_NUMTYPES ? \ dmu_ot[(idx)].ot_name : "UNKNOWN") #define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : DMU_OT_NUMTYPES) #ifndef lint extern int zfs_recover; #else int zfs_recover; #endif const char cmdname[] = "zdb"; uint8_t dump_opt[256]; typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size); extern void dump_intent_log(zilog_t *); uint64_t *zopt_object = NULL; int zopt_objects = 0; libzfs_handle_t *g_zfs; /* * These libumem hooks provide a reasonable set of defaults for the allocator's * debugging facilities. */ const char * _umem_debug_init() { return ("default,verbose"); /* $UMEM_DEBUG setting */ } const char * _umem_logging_init(void) { return ("fail,contents"); /* $UMEM_LOGGING setting */ } static void usage(void) { (void) fprintf(stderr, - "Usage: %s [-CumdibcsDvhL] poolname [object...]\n" - " %s [-div] dataset [object...]\n" - " %s -m [-L] poolname [vdev [metaslab...]]\n" - " %s -R poolname vdev:offset:size[:flags]\n" - " %s -S poolname\n" - " %s -l [-u] device\n" - " %s -C\n\n", + "Usage: %s [-CumdibcsDvhLXFPA] [-t txg] [-e [-p path...]]" + "poolname [object...]\n" + " %s [-divPA] [-e -p path...] dataset [object...]\n" + " %s -m [-LXFPA] [-t txg] [-e [-p path...]]" + "poolname [vdev [metaslab...]]\n" + " %s -R [-A] [-e [-p path...]] poolname " + "vdev:offset:size[:flags]\n" + " %s -S [-PA] [-e [-p path...]] poolname\n" + " %s -l [-uA] device\n" + " %s -C [-A] [-U config]\n\n", cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname); (void) fprintf(stderr, " Dataset name must include at least one " "separator character '/' or '@'\n"); (void) fprintf(stderr, " If dataset name is specified, only that " "dataset is dumped\n"); (void) fprintf(stderr, " If object numbers are specified, only " "those objects are dumped\n\n"); (void) fprintf(stderr, " Options to control amount of output:\n"); (void) fprintf(stderr, " -u uberblock\n"); (void) fprintf(stderr, " -d dataset(s)\n"); (void) fprintf(stderr, " -i intent logs\n"); (void) fprintf(stderr, " -C config (or cachefile if alone)\n"); (void) fprintf(stderr, " -h pool history\n"); (void) fprintf(stderr, " -b block statistics\n"); (void) fprintf(stderr, " -m metaslabs\n"); (void) fprintf(stderr, " -c checksum all metadata (twice for " "all data) blocks\n"); (void) fprintf(stderr, " -s report stats on zdb's I/O\n"); (void) fprintf(stderr, " -D dedup statistics\n"); (void) fprintf(stderr, " -S simulate dedup to measure effect\n"); (void) fprintf(stderr, " -v verbose (applies to all others)\n"); (void) fprintf(stderr, " -l dump label contents\n"); (void) fprintf(stderr, " -L disable leak tracking (do not " "load spacemaps)\n"); (void) fprintf(stderr, " -R read and display block from a " "device\n\n"); (void) fprintf(stderr, " Below options are intended for use " "with other options (except -l):\n"); (void) fprintf(stderr, " -A ignore assertions (-A), enable " "panic recovery (-AA) or both (-AAA)\n"); (void) fprintf(stderr, " -F attempt automatic rewind within " "safe range of transaction groups\n"); (void) fprintf(stderr, " -U -- use alternate " "cachefile\n"); (void) fprintf(stderr, " -X attempt extreme rewind (does not " "work with dataset)\n"); (void) fprintf(stderr, " -e pool is exported/destroyed/" "has altroot/not in a cachefile\n"); (void) fprintf(stderr, " -p -- use one or more with " "-e to specify path to vdev dir\n"); - (void) fprintf(stderr, " -P print numbers parsable\n"); + (void) fprintf(stderr, " -P print numbers in parseable form\n"); (void) fprintf(stderr, " -t -- highest txg to use when " "searching for uberblocks\n"); (void) fprintf(stderr, "Specify an option more than once (e.g. -bb) " "to make only that option verbose\n"); (void) fprintf(stderr, "Default is to dump everything non-verbosely\n"); exit(1); } /* * Called for usage errors that are discovered after a call to spa_open(), * dmu_bonus_hold(), or pool_match(). abort() is called for other errors. */ static void fatal(const char *fmt, ...) { va_list ap; va_start(ap, fmt); (void) fprintf(stderr, "%s: ", cmdname); (void) vfprintf(stderr, fmt, ap); va_end(ap); (void) fprintf(stderr, "\n"); exit(1); } /* ARGSUSED */ static void dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size) { nvlist_t *nv; size_t nvsize = *(uint64_t *)data; char *packed = umem_alloc(nvsize, UMEM_NOFAIL); VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH)); VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0); umem_free(packed, nvsize); dump_nvlist(nv, 8); nvlist_free(nv); } static void zdb_nicenum(uint64_t num, char *buf) { if (dump_opt['P']) (void) sprintf(buf, "%llu", (longlong_t)num); else nicenum(num, buf); } const char dump_zap_stars[] = "****************************************"; const int dump_zap_width = sizeof (dump_zap_stars) - 1; static void dump_zap_histogram(uint64_t histo[ZAP_HISTOGRAM_SIZE]) { int i; int minidx = ZAP_HISTOGRAM_SIZE - 1; int maxidx = 0; uint64_t max = 0; for (i = 0; i < ZAP_HISTOGRAM_SIZE; i++) { if (histo[i] > max) max = histo[i]; if (histo[i] > 0 && i > maxidx) maxidx = i; if (histo[i] > 0 && i < minidx) minidx = i; } if (max < dump_zap_width) max = dump_zap_width; for (i = minidx; i <= maxidx; i++) (void) printf("\t\t\t%u: %6llu %s\n", i, (u_longlong_t)histo[i], &dump_zap_stars[(max - histo[i]) * dump_zap_width / max]); } static void dump_zap_stats(objset_t *os, uint64_t object) { int error; zap_stats_t zs; error = zap_get_stats(os, object, &zs); if (error) return; if (zs.zs_ptrtbl_len == 0) { ASSERT(zs.zs_num_blocks == 1); (void) printf("\tmicrozap: %llu bytes, %llu entries\n", (u_longlong_t)zs.zs_blocksize, (u_longlong_t)zs.zs_num_entries); return; } (void) printf("\tFat ZAP stats:\n"); (void) printf("\t\tPointer table:\n"); (void) printf("\t\t\t%llu elements\n", (u_longlong_t)zs.zs_ptrtbl_len); (void) printf("\t\t\tzt_blk: %llu\n", (u_longlong_t)zs.zs_ptrtbl_zt_blk); (void) printf("\t\t\tzt_numblks: %llu\n", (u_longlong_t)zs.zs_ptrtbl_zt_numblks); (void) printf("\t\t\tzt_shift: %llu\n", (u_longlong_t)zs.zs_ptrtbl_zt_shift); (void) printf("\t\t\tzt_blks_copied: %llu\n", (u_longlong_t)zs.zs_ptrtbl_blks_copied); (void) printf("\t\t\tzt_nextblk: %llu\n", (u_longlong_t)zs.zs_ptrtbl_nextblk); (void) printf("\t\tZAP entries: %llu\n", (u_longlong_t)zs.zs_num_entries); (void) printf("\t\tLeaf blocks: %llu\n", (u_longlong_t)zs.zs_num_leafs); (void) printf("\t\tTotal blocks: %llu\n", (u_longlong_t)zs.zs_num_blocks); (void) printf("\t\tzap_block_type: 0x%llx\n", (u_longlong_t)zs.zs_block_type); (void) printf("\t\tzap_magic: 0x%llx\n", (u_longlong_t)zs.zs_magic); (void) printf("\t\tzap_salt: 0x%llx\n", (u_longlong_t)zs.zs_salt); (void) printf("\t\tLeafs with 2^n pointers:\n"); dump_zap_histogram(zs.zs_leafs_with_2n_pointers); (void) printf("\t\tBlocks with n*5 entries:\n"); dump_zap_histogram(zs.zs_blocks_with_n5_entries); (void) printf("\t\tBlocks n/10 full:\n"); dump_zap_histogram(zs.zs_blocks_n_tenths_full); (void) printf("\t\tEntries with n chunks:\n"); dump_zap_histogram(zs.zs_entries_using_n_chunks); (void) printf("\t\tBuckets with n entries:\n"); dump_zap_histogram(zs.zs_buckets_with_n_entries); } /*ARGSUSED*/ static void dump_none(objset_t *os, uint64_t object, void *data, size_t size) { } /*ARGSUSED*/ static void dump_unknown(objset_t *os, uint64_t object, void *data, size_t size) { (void) printf("\tUNKNOWN OBJECT TYPE\n"); } /*ARGSUSED*/ void dump_uint8(objset_t *os, uint64_t object, void *data, size_t size) { } /*ARGSUSED*/ static void dump_uint64(objset_t *os, uint64_t object, void *data, size_t size) { } /*ARGSUSED*/ static void dump_zap(objset_t *os, uint64_t object, void *data, size_t size) { zap_cursor_t zc; zap_attribute_t attr; void *prop; int i; dump_zap_stats(os, object); (void) printf("\n"); for (zap_cursor_init(&zc, os, object); zap_cursor_retrieve(&zc, &attr) == 0; zap_cursor_advance(&zc)) { (void) printf("\t\t%s = ", attr.za_name); if (attr.za_num_integers == 0) { (void) printf("\n"); continue; } prop = umem_zalloc(attr.za_num_integers * attr.za_integer_length, UMEM_NOFAIL); (void) zap_lookup(os, object, attr.za_name, attr.za_integer_length, attr.za_num_integers, prop); if (attr.za_integer_length == 1) { (void) printf("%s", (char *)prop); } else { for (i = 0; i < attr.za_num_integers; i++) { switch (attr.za_integer_length) { case 2: (void) printf("%u ", ((uint16_t *)prop)[i]); break; case 4: (void) printf("%u ", ((uint32_t *)prop)[i]); break; case 8: (void) printf("%lld ", (u_longlong_t)((int64_t *)prop)[i]); break; } } } (void) printf("\n"); umem_free(prop, attr.za_num_integers * attr.za_integer_length); } zap_cursor_fini(&zc); } /*ARGSUSED*/ static void dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size) { dump_zap_stats(os, object); /* contents are printed elsewhere, properly decoded */ } /*ARGSUSED*/ static void dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size) { zap_cursor_t zc; zap_attribute_t attr; dump_zap_stats(os, object); (void) printf("\n"); for (zap_cursor_init(&zc, os, object); zap_cursor_retrieve(&zc, &attr) == 0; zap_cursor_advance(&zc)) { (void) printf("\t\t%s = ", attr.za_name); if (attr.za_num_integers == 0) { (void) printf("\n"); continue; } (void) printf(" %llx : [%d:%d:%d]\n", (u_longlong_t)attr.za_first_integer, (int)ATTR_LENGTH(attr.za_first_integer), (int)ATTR_BSWAP(attr.za_first_integer), (int)ATTR_NUM(attr.za_first_integer)); } zap_cursor_fini(&zc); } /*ARGSUSED*/ static void dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size) { zap_cursor_t zc; zap_attribute_t attr; uint16_t *layout_attrs; int i; dump_zap_stats(os, object); (void) printf("\n"); for (zap_cursor_init(&zc, os, object); zap_cursor_retrieve(&zc, &attr) == 0; zap_cursor_advance(&zc)) { (void) printf("\t\t%s = [", attr.za_name); if (attr.za_num_integers == 0) { (void) printf("\n"); continue; } VERIFY(attr.za_integer_length == 2); layout_attrs = umem_zalloc(attr.za_num_integers * attr.za_integer_length, UMEM_NOFAIL); VERIFY(zap_lookup(os, object, attr.za_name, attr.za_integer_length, attr.za_num_integers, layout_attrs) == 0); for (i = 0; i != attr.za_num_integers; i++) (void) printf(" %d ", (int)layout_attrs[i]); (void) printf("]\n"); umem_free(layout_attrs, attr.za_num_integers * attr.za_integer_length); } zap_cursor_fini(&zc); } /*ARGSUSED*/ static void dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size) { zap_cursor_t zc; zap_attribute_t attr; const char *typenames[] = { /* 0 */ "not specified", /* 1 */ "FIFO", /* 2 */ "Character Device", /* 3 */ "3 (invalid)", /* 4 */ "Directory", /* 5 */ "5 (invalid)", /* 6 */ "Block Device", /* 7 */ "7 (invalid)", /* 8 */ "Regular File", /* 9 */ "9 (invalid)", /* 10 */ "Symbolic Link", /* 11 */ "11 (invalid)", /* 12 */ "Socket", /* 13 */ "Door", /* 14 */ "Event Port", /* 15 */ "15 (invalid)", }; dump_zap_stats(os, object); (void) printf("\n"); for (zap_cursor_init(&zc, os, object); zap_cursor_retrieve(&zc, &attr) == 0; zap_cursor_advance(&zc)) { (void) printf("\t\t%s = %lld (type: %s)\n", attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer), typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]); } zap_cursor_fini(&zc); } static void dump_spacemap(objset_t *os, space_map_obj_t *smo, space_map_t *sm) { uint64_t alloc, offset, entry; uint8_t mapshift = sm->sm_shift; uint64_t mapstart = sm->sm_start; char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID", "INVALID", "INVALID", "INVALID", "INVALID" }; if (smo->smo_object == 0) return; /* * Print out the freelist entries in both encoded and decoded form. */ alloc = 0; for (offset = 0; offset < smo->smo_objsize; offset += sizeof (entry)) { VERIFY3U(0, ==, dmu_read(os, smo->smo_object, offset, sizeof (entry), &entry, DMU_READ_PREFETCH)); if (SM_DEBUG_DECODE(entry)) { (void) printf("\t [%6llu] %s: txg %llu, pass %llu\n", (u_longlong_t)(offset / sizeof (entry)), ddata[SM_DEBUG_ACTION_DECODE(entry)], (u_longlong_t)SM_DEBUG_TXG_DECODE(entry), (u_longlong_t)SM_DEBUG_SYNCPASS_DECODE(entry)); } else { (void) printf("\t [%6llu] %c range:" " %010llx-%010llx size: %06llx\n", (u_longlong_t)(offset / sizeof (entry)), SM_TYPE_DECODE(entry) == SM_ALLOC ? 'A' : 'F', (u_longlong_t)((SM_OFFSET_DECODE(entry) << mapshift) + mapstart), (u_longlong_t)((SM_OFFSET_DECODE(entry) << mapshift) + mapstart + (SM_RUN_DECODE(entry) << mapshift)), (u_longlong_t)(SM_RUN_DECODE(entry) << mapshift)); if (SM_TYPE_DECODE(entry) == SM_ALLOC) alloc += SM_RUN_DECODE(entry) << mapshift; else alloc -= SM_RUN_DECODE(entry) << mapshift; } } if (alloc != smo->smo_alloc) { (void) printf("space_map_object alloc (%llu) INCONSISTENT " "with space map summary (%llu)\n", (u_longlong_t)smo->smo_alloc, (u_longlong_t)alloc); } } static void dump_metaslab_stats(metaslab_t *msp) { char maxbuf[32]; space_map_t *sm = &msp->ms_map; avl_tree_t *t = sm->sm_pp_root; int free_pct = sm->sm_space * 100 / sm->sm_size; zdb_nicenum(space_map_maxsize(sm), maxbuf); (void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n", "segments", avl_numnodes(t), "maxsize", maxbuf, "freepct", free_pct); } static void dump_metaslab(metaslab_t *msp) { vdev_t *vd = msp->ms_group->mg_vd; spa_t *spa = vd->vdev_spa; space_map_t *sm = &msp->ms_map; space_map_obj_t *smo = &msp->ms_smo; char freebuf[32]; zdb_nicenum(sm->sm_size - smo->smo_alloc, freebuf); (void) printf( "\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n", (u_longlong_t)(sm->sm_start / sm->sm_size), (u_longlong_t)sm->sm_start, (u_longlong_t)smo->smo_object, freebuf); if (dump_opt['m'] > 1 && !dump_opt['L']) { mutex_enter(&msp->ms_lock); space_map_load_wait(sm); if (!sm->sm_loaded) VERIFY(space_map_load(sm, zfs_metaslab_ops, SM_FREE, smo, spa->spa_meta_objset) == 0); dump_metaslab_stats(msp); space_map_unload(sm); mutex_exit(&msp->ms_lock); } if (dump_opt['d'] > 5 || dump_opt['m'] > 2) { ASSERT(sm->sm_size == (1ULL << vd->vdev_ms_shift)); mutex_enter(&msp->ms_lock); dump_spacemap(spa->spa_meta_objset, smo, sm); mutex_exit(&msp->ms_lock); } } static void print_vdev_metaslab_header(vdev_t *vd) { (void) printf("\tvdev %10llu\n\t%-10s%5llu %-19s %-15s %-10s\n", (u_longlong_t)vd->vdev_id, "metaslabs", (u_longlong_t)vd->vdev_ms_count, "offset", "spacemap", "free"); (void) printf("\t%15s %19s %15s %10s\n", "---------------", "-------------------", "---------------", "-------------"); } static void dump_metaslabs(spa_t *spa) { vdev_t *vd, *rvd = spa->spa_root_vdev; uint64_t m, c = 0, children = rvd->vdev_children; (void) printf("\nMetaslabs:\n"); if (!dump_opt['d'] && zopt_objects > 0) { c = zopt_object[0]; if (c >= children) (void) fatal("bad vdev id: %llu", (u_longlong_t)c); if (zopt_objects > 1) { vd = rvd->vdev_child[c]; print_vdev_metaslab_header(vd); for (m = 1; m < zopt_objects; m++) { if (zopt_object[m] < vd->vdev_ms_count) dump_metaslab( vd->vdev_ms[zopt_object[m]]); else (void) fprintf(stderr, "bad metaslab " "number %llu\n", (u_longlong_t)zopt_object[m]); } (void) printf("\n"); return; } children = c + 1; } for (; c < children; c++) { vd = rvd->vdev_child[c]; print_vdev_metaslab_header(vd); for (m = 0; m < vd->vdev_ms_count; m++) dump_metaslab(vd->vdev_ms[m]); (void) printf("\n"); } } static void dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index) { const ddt_phys_t *ddp = dde->dde_phys; const ddt_key_t *ddk = &dde->dde_key; char *types[4] = { "ditto", "single", "double", "triple" }; char blkbuf[BP_SPRINTF_LEN]; blkptr_t blk; for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { if (ddp->ddp_phys_birth == 0) continue; ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk); sprintf_blkptr(blkbuf, &blk); (void) printf("index %llx refcnt %llu %s %s\n", (u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt, types[p], blkbuf); } } static void dump_dedup_ratio(const ddt_stat_t *dds) { double rL, rP, rD, D, dedup, compress, copies; if (dds->dds_blocks == 0) return; rL = (double)dds->dds_ref_lsize; rP = (double)dds->dds_ref_psize; rD = (double)dds->dds_ref_dsize; D = (double)dds->dds_dsize; dedup = rD / D; compress = rL / rP; copies = rD / rP; (void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, " "dedup * compress / copies = %.2f\n\n", dedup, compress, copies, dedup * compress / copies); } static void dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class) { char name[DDT_NAMELEN]; ddt_entry_t dde; uint64_t walk = 0; dmu_object_info_t doi; uint64_t count, dspace, mspace; int error; error = ddt_object_info(ddt, type, class, &doi); if (error == ENOENT) return; ASSERT(error == 0); if ((count = ddt_object_count(ddt, type, class)) == 0) return; dspace = doi.doi_physical_blocks_512 << 9; mspace = doi.doi_fill_count * doi.doi_data_block_size; ddt_object_name(ddt, type, class, name); (void) printf("%s: %llu entries, size %llu on disk, %llu in core\n", name, (u_longlong_t)count, (u_longlong_t)(dspace / count), (u_longlong_t)(mspace / count)); if (dump_opt['D'] < 3) return; zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]); if (dump_opt['D'] < 4) return; if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE) return; (void) printf("%s contents:\n\n", name); while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0) dump_dde(ddt, &dde, walk); ASSERT(error == ENOENT); (void) printf("\n"); } static void dump_all_ddts(spa_t *spa) { ddt_histogram_t ddh_total = { 0 }; ddt_stat_t dds_total = { 0 }; for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { ddt_t *ddt = spa->spa_ddt[c]; for (enum ddt_type type = 0; type < DDT_TYPES; type++) { for (enum ddt_class class = 0; class < DDT_CLASSES; class++) { dump_ddt(ddt, type, class); } } } ddt_get_dedup_stats(spa, &dds_total); if (dds_total.dds_blocks == 0) { (void) printf("All DDTs are empty\n"); return; } (void) printf("\n"); if (dump_opt['D'] > 1) { (void) printf("DDT histogram (aggregated over all DDTs):\n"); ddt_get_dedup_histogram(spa, &ddh_total); zpool_dump_ddt(&dds_total, &ddh_total); } dump_dedup_ratio(&dds_total); } static void dump_dtl_seg(space_map_t *sm, uint64_t start, uint64_t size) { char *prefix = (void *)sm; (void) printf("%s [%llu,%llu) length %llu\n", prefix, (u_longlong_t)start, (u_longlong_t)(start + size), (u_longlong_t)(size)); } static void dump_dtl(vdev_t *vd, int indent) { spa_t *spa = vd->vdev_spa; boolean_t required; char *name[DTL_TYPES] = { "missing", "partial", "scrub", "outage" }; char prefix[256]; spa_vdev_state_enter(spa, SCL_NONE); required = vdev_dtl_required(vd); (void) spa_vdev_state_exit(spa, NULL, 0); if (indent == 0) (void) printf("\nDirty time logs:\n\n"); (void) printf("\t%*s%s [%s]\n", indent, "", vd->vdev_path ? vd->vdev_path : vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa), required ? "DTL-required" : "DTL-expendable"); for (int t = 0; t < DTL_TYPES; t++) { space_map_t *sm = &vd->vdev_dtl[t]; if (sm->sm_space == 0) continue; (void) snprintf(prefix, sizeof (prefix), "\t%*s%s", indent + 2, "", name[t]); mutex_enter(sm->sm_lock); space_map_walk(sm, dump_dtl_seg, (void *)prefix); mutex_exit(sm->sm_lock); if (dump_opt['d'] > 5 && vd->vdev_children == 0) dump_spacemap(spa->spa_meta_objset, &vd->vdev_dtl_smo, sm); } for (int c = 0; c < vd->vdev_children; c++) dump_dtl(vd->vdev_child[c], indent + 4); } static void dump_history(spa_t *spa) { nvlist_t **events = NULL; char buf[SPA_MAXBLOCKSIZE]; uint64_t resid, len, off = 0; uint_t num = 0; int error; time_t tsec; struct tm t; char tbuf[30]; char internalstr[MAXPATHLEN]; do { len = sizeof (buf); if ((error = spa_history_get(spa, &off, &len, buf)) != 0) { (void) fprintf(stderr, "Unable to read history: " "error %d\n", error); return; } if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0) break; off -= resid; } while (len != 0); (void) printf("\nHistory:\n"); for (int i = 0; i < num; i++) { uint64_t time, txg, ievent; char *cmd, *intstr; if (nvlist_lookup_uint64(events[i], ZPOOL_HIST_TIME, &time) != 0) continue; if (nvlist_lookup_string(events[i], ZPOOL_HIST_CMD, &cmd) != 0) { if (nvlist_lookup_uint64(events[i], ZPOOL_HIST_INT_EVENT, &ievent) != 0) continue; verify(nvlist_lookup_uint64(events[i], ZPOOL_HIST_TXG, &txg) == 0); verify(nvlist_lookup_string(events[i], ZPOOL_HIST_INT_STR, &intstr) == 0); if (ievent >= LOG_END) continue; (void) snprintf(internalstr, sizeof (internalstr), "[internal %s txg:%lld] %s", zfs_history_event_names[ievent], txg, intstr); cmd = internalstr; } tsec = time; (void) localtime_r(&tsec, &t); (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); (void) printf("%s %s\n", tbuf, cmd); } } /*ARGSUSED*/ static void dump_dnode(objset_t *os, uint64_t object, void *data, size_t size) { } static uint64_t blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp, const zbookmark_t *zb) { if (dnp == NULL) { ASSERT(zb->zb_level < 0); if (zb->zb_object == 0) return (zb->zb_blkid); return (zb->zb_blkid * BP_GET_LSIZE(bp)); } ASSERT(zb->zb_level >= 0); return ((zb->zb_blkid << (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) * dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); } static void sprintf_blkptr_compact(char *blkbuf, const blkptr_t *bp) { const dva_t *dva = bp->blk_dva; int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1; if (dump_opt['b'] >= 5) { sprintf_blkptr(blkbuf, bp); return; } blkbuf[0] = '\0'; for (int i = 0; i < ndvas; i++) (void) sprintf(blkbuf + strlen(blkbuf), "%llu:%llx:%llx ", (u_longlong_t)DVA_GET_VDEV(&dva[i]), (u_longlong_t)DVA_GET_OFFSET(&dva[i]), (u_longlong_t)DVA_GET_ASIZE(&dva[i])); (void) sprintf(blkbuf + strlen(blkbuf), "%llxL/%llxP F=%llu B=%llu/%llu", (u_longlong_t)BP_GET_LSIZE(bp), (u_longlong_t)BP_GET_PSIZE(bp), (u_longlong_t)bp->blk_fill, (u_longlong_t)bp->blk_birth, (u_longlong_t)BP_PHYSICAL_BIRTH(bp)); } static void print_indirect(blkptr_t *bp, const zbookmark_t *zb, const dnode_phys_t *dnp) { char blkbuf[BP_SPRINTF_LEN]; int l; ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type); ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level); (void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb)); ASSERT(zb->zb_level >= 0); for (l = dnp->dn_nlevels - 1; l >= -1; l--) { if (l == zb->zb_level) { (void) printf("L%llx", (u_longlong_t)zb->zb_level); } else { (void) printf(" "); } } sprintf_blkptr_compact(blkbuf, bp); (void) printf("%s\n", blkbuf); } static int visit_indirect(spa_t *spa, const dnode_phys_t *dnp, blkptr_t *bp, const zbookmark_t *zb) { int err = 0; if (bp->blk_birth == 0) return (0); print_indirect(bp, zb, dnp); if (BP_GET_LEVEL(bp) > 0) { uint32_t flags = ARC_WAIT; int i; blkptr_t *cbp; int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; arc_buf_t *buf; uint64_t fill = 0; err = arc_read_nolock(NULL, spa, bp, arc_getbuf_func, &buf, ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb); if (err) return (err); ASSERT(buf->b_data); /* recursively visit blocks below this */ cbp = buf->b_data; for (i = 0; i < epb; i++, cbp++) { zbookmark_t czb; SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, zb->zb_level - 1, zb->zb_blkid * epb + i); err = visit_indirect(spa, dnp, cbp, &czb); if (err) break; fill += cbp->blk_fill; } if (!err) ASSERT3U(fill, ==, bp->blk_fill); (void) arc_buf_remove_ref(buf, &buf); } return (err); } /*ARGSUSED*/ static void dump_indirect(dnode_t *dn) { dnode_phys_t *dnp = dn->dn_phys; int j; zbookmark_t czb; (void) printf("Indirect blocks:\n"); SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset), dn->dn_object, dnp->dn_nlevels - 1, 0); for (j = 0; j < dnp->dn_nblkptr; j++) { czb.zb_blkid = j; (void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp, &dnp->dn_blkptr[j], &czb); } (void) printf("\n"); } /*ARGSUSED*/ static void dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size) { dsl_dir_phys_t *dd = data; time_t crtime; char nice[32]; if (dd == NULL) return; ASSERT3U(size, >=, sizeof (dsl_dir_phys_t)); crtime = dd->dd_creation_time; (void) printf("\t\tcreation_time = %s", ctime(&crtime)); (void) printf("\t\thead_dataset_obj = %llu\n", (u_longlong_t)dd->dd_head_dataset_obj); (void) printf("\t\tparent_dir_obj = %llu\n", (u_longlong_t)dd->dd_parent_obj); (void) printf("\t\torigin_obj = %llu\n", (u_longlong_t)dd->dd_origin_obj); (void) printf("\t\tchild_dir_zapobj = %llu\n", (u_longlong_t)dd->dd_child_dir_zapobj); zdb_nicenum(dd->dd_used_bytes, nice); (void) printf("\t\tused_bytes = %s\n", nice); zdb_nicenum(dd->dd_compressed_bytes, nice); (void) printf("\t\tcompressed_bytes = %s\n", nice); zdb_nicenum(dd->dd_uncompressed_bytes, nice); (void) printf("\t\tuncompressed_bytes = %s\n", nice); zdb_nicenum(dd->dd_quota, nice); (void) printf("\t\tquota = %s\n", nice); zdb_nicenum(dd->dd_reserved, nice); (void) printf("\t\treserved = %s\n", nice); (void) printf("\t\tprops_zapobj = %llu\n", (u_longlong_t)dd->dd_props_zapobj); (void) printf("\t\tdeleg_zapobj = %llu\n", (u_longlong_t)dd->dd_deleg_zapobj); (void) printf("\t\tflags = %llx\n", (u_longlong_t)dd->dd_flags); #define DO(which) \ zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice); \ (void) printf("\t\tused_breakdown[" #which "] = %s\n", nice) DO(HEAD); DO(SNAP); DO(CHILD); DO(CHILD_RSRV); DO(REFRSRV); #undef DO } /*ARGSUSED*/ static void dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size) { dsl_dataset_phys_t *ds = data; time_t crtime; char used[32], compressed[32], uncompressed[32], unique[32]; char blkbuf[BP_SPRINTF_LEN]; if (ds == NULL) return; ASSERT(size == sizeof (*ds)); crtime = ds->ds_creation_time; zdb_nicenum(ds->ds_used_bytes, used); zdb_nicenum(ds->ds_compressed_bytes, compressed); zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed); zdb_nicenum(ds->ds_unique_bytes, unique); sprintf_blkptr(blkbuf, &ds->ds_bp); (void) printf("\t\tdir_obj = %llu\n", (u_longlong_t)ds->ds_dir_obj); (void) printf("\t\tprev_snap_obj = %llu\n", (u_longlong_t)ds->ds_prev_snap_obj); (void) printf("\t\tprev_snap_txg = %llu\n", (u_longlong_t)ds->ds_prev_snap_txg); (void) printf("\t\tnext_snap_obj = %llu\n", (u_longlong_t)ds->ds_next_snap_obj); (void) printf("\t\tsnapnames_zapobj = %llu\n", (u_longlong_t)ds->ds_snapnames_zapobj); (void) printf("\t\tnum_children = %llu\n", (u_longlong_t)ds->ds_num_children); (void) printf("\t\tuserrefs_obj = %llu\n", (u_longlong_t)ds->ds_userrefs_obj); (void) printf("\t\tcreation_time = %s", ctime(&crtime)); (void) printf("\t\tcreation_txg = %llu\n", (u_longlong_t)ds->ds_creation_txg); (void) printf("\t\tdeadlist_obj = %llu\n", (u_longlong_t)ds->ds_deadlist_obj); (void) printf("\t\tused_bytes = %s\n", used); (void) printf("\t\tcompressed_bytes = %s\n", compressed); (void) printf("\t\tuncompressed_bytes = %s\n", uncompressed); (void) printf("\t\tunique = %s\n", unique); (void) printf("\t\tfsid_guid = %llu\n", (u_longlong_t)ds->ds_fsid_guid); (void) printf("\t\tguid = %llu\n", (u_longlong_t)ds->ds_guid); (void) printf("\t\tflags = %llx\n", (u_longlong_t)ds->ds_flags); (void) printf("\t\tnext_clones_obj = %llu\n", (u_longlong_t)ds->ds_next_clones_obj); (void) printf("\t\tprops_obj = %llu\n", (u_longlong_t)ds->ds_props_obj); (void) printf("\t\tbp = %s\n", blkbuf); } /* ARGSUSED */ static int dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) { char blkbuf[BP_SPRINTF_LEN]; ASSERT(bp->blk_birth != 0); sprintf_blkptr_compact(blkbuf, bp); (void) printf("\t%s\n", blkbuf); return (0); } static void dump_bpobj(bpobj_t *bpo, char *name) { char bytes[32]; char comp[32]; char uncomp[32]; if (dump_opt['d'] < 3) return; zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes); if (bpo->bpo_havesubobj) { zdb_nicenum(bpo->bpo_phys->bpo_comp, comp); zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp); (void) printf("\n %s: %llu local blkptrs, %llu subobjs, " "%s (%s/%s comp)\n", name, (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, (u_longlong_t)bpo->bpo_phys->bpo_num_subobjs, bytes, comp, uncomp); } else { (void) printf("\n %s: %llu blkptrs, %s\n", name, (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, bytes); } if (dump_opt['d'] < 5) return; (void) printf("\n"); (void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL); } static void dump_deadlist(dsl_deadlist_t *dl) { dsl_deadlist_entry_t *dle; char bytes[32]; char comp[32]; char uncomp[32]; if (dump_opt['d'] < 3) return; zdb_nicenum(dl->dl_phys->dl_used, bytes); zdb_nicenum(dl->dl_phys->dl_comp, comp); zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp); (void) printf("\n Deadlist: %s (%s/%s comp)\n", bytes, comp, uncomp); if (dump_opt['d'] < 4) return; (void) printf("\n"); for (dle = avl_first(&dl->dl_tree); dle; dle = AVL_NEXT(&dl->dl_tree, dle)) { (void) printf(" mintxg %llu -> obj %llu\n", (longlong_t)dle->dle_mintxg, (longlong_t)dle->dle_bpobj.bpo_object); if (dump_opt['d'] >= 5) dump_bpobj(&dle->dle_bpobj, ""); } } static avl_tree_t idx_tree; static avl_tree_t domain_tree; static boolean_t fuid_table_loaded; static boolean_t sa_loaded; sa_attr_type_t *sa_attr_table; static void fuid_table_destroy() { if (fuid_table_loaded) { zfs_fuid_table_destroy(&idx_tree, &domain_tree); fuid_table_loaded = B_FALSE; } } /* * print uid or gid information. * For normal POSIX id just the id is printed in decimal format. * For CIFS files with FUID the fuid is printed in hex followed by * the doman-rid string. */ static void print_idstr(uint64_t id, const char *id_type) { if (FUID_INDEX(id)) { char *domain; domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id)); (void) printf("\t%s %llx [%s-%d]\n", id_type, (u_longlong_t)id, domain, (int)FUID_RID(id)); } else { (void) printf("\t%s %llu\n", id_type, (u_longlong_t)id); } } static void dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid) { uint32_t uid_idx, gid_idx; uid_idx = FUID_INDEX(uid); gid_idx = FUID_INDEX(gid); /* Load domain table, if not already loaded */ if (!fuid_table_loaded && (uid_idx || gid_idx)) { uint64_t fuid_obj; /* first find the fuid object. It lives in the master node */ VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1, &fuid_obj) == 0); zfs_fuid_avl_tree_create(&idx_tree, &domain_tree); (void) zfs_fuid_table_load(os, fuid_obj, &idx_tree, &domain_tree); fuid_table_loaded = B_TRUE; } print_idstr(uid, "uid"); print_idstr(gid, "gid"); } /*ARGSUSED*/ static void dump_znode(objset_t *os, uint64_t object, void *data, size_t size) { char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */ sa_handle_t *hdl; uint64_t xattr, rdev, gen; uint64_t uid, gid, mode, fsize, parent, links; uint64_t pflags; uint64_t acctm[2], modtm[2], chgtm[2], crtm[2]; time_t z_crtime, z_atime, z_mtime, z_ctime; sa_bulk_attr_t bulk[12]; int idx = 0; int error; if (!sa_loaded) { uint64_t sa_attrs = 0; uint64_t version; VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 8, 1, &version) == 0); if (version >= ZPL_VERSION_SA) { VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_attrs) == 0); } if ((error = sa_setup(os, sa_attrs, zfs_attr_table, ZPL_END, &sa_attr_table)) != 0) { (void) printf("sa_setup failed errno %d, can't " "display znode contents\n", error); return; } sa_loaded = B_TRUE; } if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) { (void) printf("Failed to get handle for SA znode\n"); return; } SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8); SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8); SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL, &links, 8); SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8); SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL, &mode, 8); SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT], NULL, &parent, 8); SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL, &fsize, 8); SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL, acctm, 16); SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL, modtm, 16); SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL, crtm, 16); SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL, chgtm, 16); SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL, &pflags, 8); if (sa_bulk_lookup(hdl, bulk, idx)) { (void) sa_handle_destroy(hdl); return; } error = zfs_obj_to_path(os, object, path, sizeof (path)); if (error != 0) { (void) snprintf(path, sizeof (path), "\?\?\?", (u_longlong_t)object); } if (dump_opt['d'] < 3) { (void) printf("\t%s\n", path); (void) sa_handle_destroy(hdl); return; } z_crtime = (time_t)crtm[0]; z_atime = (time_t)acctm[0]; z_mtime = (time_t)modtm[0]; z_ctime = (time_t)chgtm[0]; (void) printf("\tpath %s\n", path); dump_uidgid(os, uid, gid); (void) printf("\tatime %s", ctime(&z_atime)); (void) printf("\tmtime %s", ctime(&z_mtime)); (void) printf("\tctime %s", ctime(&z_ctime)); (void) printf("\tcrtime %s", ctime(&z_crtime)); (void) printf("\tgen %llu\n", (u_longlong_t)gen); (void) printf("\tmode %llo\n", (u_longlong_t)mode); (void) printf("\tsize %llu\n", (u_longlong_t)fsize); (void) printf("\tparent %llu\n", (u_longlong_t)parent); (void) printf("\tlinks %llu\n", (u_longlong_t)links); (void) printf("\tpflags %llx\n", (u_longlong_t)pflags); if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr, sizeof (uint64_t)) == 0) (void) printf("\txattr %llu\n", (u_longlong_t)xattr); if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev, sizeof (uint64_t)) == 0) (void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev); sa_handle_destroy(hdl); } /*ARGSUSED*/ static void dump_acl(objset_t *os, uint64_t object, void *data, size_t size) { } /*ARGSUSED*/ static void dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size) { } static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = { dump_none, /* unallocated */ dump_zap, /* object directory */ dump_uint64, /* object array */ dump_none, /* packed nvlist */ dump_packed_nvlist, /* packed nvlist size */ dump_none, /* bplist */ dump_none, /* bplist header */ dump_none, /* SPA space map header */ dump_none, /* SPA space map */ dump_none, /* ZIL intent log */ dump_dnode, /* DMU dnode */ dump_dmu_objset, /* DMU objset */ dump_dsl_dir, /* DSL directory */ dump_zap, /* DSL directory child map */ dump_zap, /* DSL dataset snap map */ dump_zap, /* DSL props */ dump_dsl_dataset, /* DSL dataset */ dump_znode, /* ZFS znode */ dump_acl, /* ZFS V0 ACL */ dump_uint8, /* ZFS plain file */ dump_zpldir, /* ZFS directory */ dump_zap, /* ZFS master node */ dump_zap, /* ZFS delete queue */ dump_uint8, /* zvol object */ dump_zap, /* zvol prop */ dump_uint8, /* other uint8[] */ dump_uint64, /* other uint64[] */ dump_zap, /* other ZAP */ dump_zap, /* persistent error log */ dump_uint8, /* SPA history */ dump_uint64, /* SPA history offsets */ dump_zap, /* Pool properties */ dump_zap, /* DSL permissions */ dump_acl, /* ZFS ACL */ dump_uint8, /* ZFS SYSACL */ dump_none, /* FUID nvlist */ dump_packed_nvlist, /* FUID nvlist size */ dump_zap, /* DSL dataset next clones */ dump_zap, /* DSL scrub queue */ dump_zap, /* ZFS user/group used */ dump_zap, /* ZFS user/group quota */ dump_zap, /* snapshot refcount tags */ dump_ddt_zap, /* DDT ZAP object */ dump_zap, /* DDT statistics */ dump_znode, /* SA object */ dump_zap, /* SA Master Node */ dump_sa_attrs, /* SA attribute registration */ dump_sa_layouts, /* SA attribute layouts */ dump_zap, /* DSL scrub translations */ dump_none, /* fake dedup BP */ dump_zap, /* deadlist */ dump_none, /* deadlist hdr */ dump_zap, /* dsl clones */ dump_none, /* bpobj subobjs */ dump_unknown, /* Unknown type, must be last */ }; static void dump_object(objset_t *os, uint64_t object, int verbosity, int *print_header) { dmu_buf_t *db = NULL; dmu_object_info_t doi; dnode_t *dn; void *bonus = NULL; size_t bsize = 0; char iblk[32], dblk[32], lsize[32], asize[32], fill[32]; char bonus_size[32]; char aux[50]; int error; if (*print_header) { (void) printf("\n%10s %3s %5s %5s %5s %5s %6s %s\n", "Object", "lvl", "iblk", "dblk", "dsize", "lsize", "%full", "type"); *print_header = 0; } if (object == 0) { dn = DMU_META_DNODE(os); } else { error = dmu_bonus_hold(os, object, FTAG, &db); if (error) fatal("dmu_bonus_hold(%llu) failed, errno %u", object, error); bonus = db->db_data; bsize = db->db_size; dn = DB_DNODE((dmu_buf_impl_t *)db); } dmu_object_info_from_dnode(dn, &doi); zdb_nicenum(doi.doi_metadata_block_size, iblk); zdb_nicenum(doi.doi_data_block_size, dblk); zdb_nicenum(doi.doi_max_offset, lsize); zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize); zdb_nicenum(doi.doi_bonus_size, bonus_size); (void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count * doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) / doi.doi_max_offset); aux[0] = '\0'; if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) { (void) snprintf(aux + strlen(aux), sizeof (aux), " (K=%s)", ZDB_CHECKSUM_NAME(doi.doi_checksum)); } if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) { (void) snprintf(aux + strlen(aux), sizeof (aux), " (Z=%s)", ZDB_COMPRESS_NAME(doi.doi_compress)); } (void) printf("%10lld %3u %5s %5s %5s %5s %6s %s%s\n", (u_longlong_t)object, doi.doi_indirection, iblk, dblk, asize, lsize, fill, ZDB_OT_NAME(doi.doi_type), aux); if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) { (void) printf("%10s %3s %5s %5s %5s %5s %6s %s\n", "", "", "", "", "", bonus_size, "bonus", ZDB_OT_NAME(doi.doi_bonus_type)); } if (verbosity >= 4) { (void) printf("\tdnode flags: %s%s%s\n", (dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ? "USED_BYTES " : "", (dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ? "USERUSED_ACCOUNTED " : "", (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ? "SPILL_BLKPTR" : ""); (void) printf("\tdnode maxblkid: %llu\n", (longlong_t)dn->dn_phys->dn_maxblkid); object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os, object, bonus, bsize); object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object, NULL, 0); *print_header = 1; } if (verbosity >= 5) dump_indirect(dn); if (verbosity >= 5) { /* * Report the list of segments that comprise the object. */ uint64_t start = 0; uint64_t end; uint64_t blkfill = 1; int minlvl = 1; if (dn->dn_type == DMU_OT_DNODE) { minlvl = 0; blkfill = DNODES_PER_BLOCK; } for (;;) { char segsize[32]; error = dnode_next_offset(dn, 0, &start, minlvl, blkfill, 0); if (error) break; end = start; error = dnode_next_offset(dn, DNODE_FIND_HOLE, &end, minlvl, blkfill, 0); zdb_nicenum(end - start, segsize); (void) printf("\t\tsegment [%016llx, %016llx)" " size %5s\n", (u_longlong_t)start, (u_longlong_t)end, segsize); if (error) break; start = end; } } if (db != NULL) dmu_buf_rele(db, FTAG); } static char *objset_types[DMU_OST_NUMTYPES] = { "NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" }; static void dump_dir(objset_t *os) { dmu_objset_stats_t dds; uint64_t object, object_count; uint64_t refdbytes, usedobjs, scratch; char numbuf[32]; char blkbuf[BP_SPRINTF_LEN + 20]; char osname[MAXNAMELEN]; char *type = "UNKNOWN"; int verbosity = dump_opt['d']; int print_header = 1; int i, error; dmu_objset_fast_stat(os, &dds); if (dds.dds_type < DMU_OST_NUMTYPES) type = objset_types[dds.dds_type]; if (dds.dds_type == DMU_OST_META) { dds.dds_creation_txg = TXG_INITIAL; usedobjs = os->os_rootbp->blk_fill; refdbytes = os->os_spa->spa_dsl_pool-> dp_mos_dir->dd_phys->dd_used_bytes; } else { dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch); } ASSERT3U(usedobjs, ==, os->os_rootbp->blk_fill); zdb_nicenum(refdbytes, numbuf); if (verbosity >= 4) { (void) sprintf(blkbuf, ", rootbp "); (void) sprintf_blkptr(blkbuf + strlen(blkbuf), os->os_rootbp); } else { blkbuf[0] = '\0'; } dmu_objset_name(os, osname); (void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, " "%s, %llu objects%s\n", osname, type, (u_longlong_t)dmu_objset_id(os), (u_longlong_t)dds.dds_creation_txg, numbuf, (u_longlong_t)usedobjs, blkbuf); if (zopt_objects != 0) { for (i = 0; i < zopt_objects; i++) dump_object(os, zopt_object[i], verbosity, &print_header); (void) printf("\n"); return; } if (dump_opt['i'] != 0 || verbosity >= 2) dump_intent_log(dmu_objset_zil(os)); if (dmu_objset_ds(os) != NULL) dump_deadlist(&dmu_objset_ds(os)->ds_deadlist); if (verbosity < 2) return; if (os->os_rootbp->blk_birth == 0) return; dump_object(os, 0, verbosity, &print_header); object_count = 0; if (DMU_USERUSED_DNODE(os) != NULL && DMU_USERUSED_DNODE(os)->dn_type != 0) { dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header); dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header); } object = 0; while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) { dump_object(os, object, verbosity, &print_header); object_count++; } ASSERT3U(object_count, ==, usedobjs); (void) printf("\n"); if (error != ESRCH) { (void) fprintf(stderr, "dmu_object_next() = %d\n", error); abort(); } } static void dump_uberblock(uberblock_t *ub, const char *header, const char *footer) { time_t timestamp = ub->ub_timestamp; (void) printf(header ? header : ""); (void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic); (void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version); (void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg); (void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum); (void) printf("\ttimestamp = %llu UTC = %s", (u_longlong_t)ub->ub_timestamp, asctime(localtime(×tamp))); if (dump_opt['u'] >= 3) { char blkbuf[BP_SPRINTF_LEN]; sprintf_blkptr(blkbuf, &ub->ub_rootbp); (void) printf("\trootbp = %s\n", blkbuf); } (void) printf(footer ? footer : ""); } static void dump_config(spa_t *spa) { dmu_buf_t *db; size_t nvsize = 0; int error = 0; error = dmu_bonus_hold(spa->spa_meta_objset, spa->spa_config_object, FTAG, &db); if (error == 0) { nvsize = *(uint64_t *)db->db_data; dmu_buf_rele(db, FTAG); (void) printf("\nMOS Configuration:\n"); dump_packed_nvlist(spa->spa_meta_objset, spa->spa_config_object, (void *)&nvsize, 1); } else { (void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d", (u_longlong_t)spa->spa_config_object, error); } } static void dump_cachefile(const char *cachefile) { int fd; struct stat64 statbuf; char *buf; nvlist_t *config; if ((fd = open64(cachefile, O_RDONLY)) < 0) { (void) printf("cannot open '%s': %s\n", cachefile, strerror(errno)); exit(1); } if (fstat64(fd, &statbuf) != 0) { (void) printf("failed to stat '%s': %s\n", cachefile, strerror(errno)); exit(1); } if ((buf = malloc(statbuf.st_size)) == NULL) { (void) fprintf(stderr, "failed to allocate %llu bytes\n", (u_longlong_t)statbuf.st_size); exit(1); } if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { (void) fprintf(stderr, "failed to read %llu bytes\n", (u_longlong_t)statbuf.st_size); exit(1); } (void) close(fd); if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) { (void) fprintf(stderr, "failed to unpack nvlist\n"); exit(1); } free(buf); dump_nvlist(config, 0); nvlist_free(config); } #define ZDB_MAX_UB_HEADER_SIZE 32 static void dump_label_uberblocks(vdev_label_t *lbl, uint64_t ashift) { vdev_t vd; vdev_t *vdp = &vd; char header[ZDB_MAX_UB_HEADER_SIZE]; vd.vdev_ashift = ashift; vdp->vdev_top = vdp; for (int i = 0; i < VDEV_UBERBLOCK_COUNT(vdp); i++) { uint64_t uoff = VDEV_UBERBLOCK_OFFSET(vdp, i); uberblock_t *ub = (void *)((char *)lbl + uoff); if (uberblock_verify(ub)) continue; (void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE, "Uberblock[%d]\n", i); dump_uberblock(ub, header, ""); } } static void dump_label(const char *dev) { int fd; vdev_label_t label; char *path, *buf = label.vl_vdev_phys.vp_nvlist; size_t buflen = sizeof (label.vl_vdev_phys.vp_nvlist); struct stat64 statbuf; uint64_t psize, ashift; int len = strlen(dev) + 1; if (strncmp(dev, "/dev/dsk/", 9) == 0) { len++; path = malloc(len); (void) snprintf(path, len, "%s%s", "/dev/rdsk/", dev + 9); } else { path = strdup(dev); } if ((fd = open64(path, O_RDONLY)) < 0) { (void) printf("cannot open '%s': %s\n", path, strerror(errno)); free(path); exit(1); } if (fstat64(fd, &statbuf) != 0) { (void) printf("failed to stat '%s': %s\n", path, strerror(errno)); free(path); (void) close(fd); exit(1); } if (S_ISBLK(statbuf.st_mode)) { (void) printf("cannot use '%s': character device required\n", path); free(path); (void) close(fd); exit(1); } psize = statbuf.st_size; psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t)); for (int l = 0; l < VDEV_LABELS; l++) { nvlist_t *config = NULL; (void) printf("--------------------------------------------\n"); (void) printf("LABEL %d\n", l); (void) printf("--------------------------------------------\n"); if (pread64(fd, &label, sizeof (label), vdev_label_offset(psize, l, 0)) != sizeof (label)) { (void) printf("failed to read label %d\n", l); continue; } if (nvlist_unpack(buf, buflen, &config, 0) != 0) { (void) printf("failed to unpack label %d\n", l); ashift = SPA_MINBLOCKSHIFT; } else { nvlist_t *vdev_tree = NULL; dump_nvlist(config, 4); if ((nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) || (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ASHIFT, &ashift) != 0)) ashift = SPA_MINBLOCKSHIFT; nvlist_free(config); } if (dump_opt['u']) dump_label_uberblocks(&label, ashift); } free(path); (void) close(fd); } /*ARGSUSED*/ static int dump_one_dir(const char *dsname, void *arg) { int error; objset_t *os; error = dmu_objset_own(dsname, DMU_OST_ANY, B_TRUE, FTAG, &os); if (error) { (void) printf("Could not open %s, error %d\n", dsname, error); return (0); } dump_dir(os); dmu_objset_disown(os, FTAG); fuid_table_destroy(); sa_loaded = B_FALSE; return (0); } /* * Block statistics. */ typedef struct zdb_blkstats { uint64_t zb_asize; uint64_t zb_lsize; uint64_t zb_psize; uint64_t zb_count; } zdb_blkstats_t; /* * Extended object types to report deferred frees and dedup auto-ditto blocks. */ #define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0) #define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1) #define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 2) static char *zdb_ot_extname[] = { "deferred free", "dedup ditto", "Total", }; #define ZB_TOTAL DN_MAX_LEVELS typedef struct zdb_cb { zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1]; uint64_t zcb_dedup_asize; uint64_t zcb_dedup_blocks; uint64_t zcb_errors[256]; int zcb_readfails; int zcb_haderrors; spa_t *zcb_spa; } zdb_cb_t; static void zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp, dmu_object_type_t type) { uint64_t refcnt = 0; ASSERT(type < ZDB_OT_TOTAL); if (zilog && zil_bp_tree_add(zilog, bp) != 0) return; for (int i = 0; i < 4; i++) { int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL; int t = (i & 1) ? type : ZDB_OT_TOTAL; zdb_blkstats_t *zb = &zcb->zcb_type[l][t]; zb->zb_asize += BP_GET_ASIZE(bp); zb->zb_lsize += BP_GET_LSIZE(bp); zb->zb_psize += BP_GET_PSIZE(bp); zb->zb_count++; } if (dump_opt['L']) return; if (BP_GET_DEDUP(bp)) { ddt_t *ddt; ddt_entry_t *dde; ddt = ddt_select(zcb->zcb_spa, bp); ddt_enter(ddt); dde = ddt_lookup(ddt, bp, B_FALSE); if (dde == NULL) { refcnt = 0; } else { ddt_phys_t *ddp = ddt_phys_select(dde, bp); ddt_phys_decref(ddp); refcnt = ddp->ddp_refcnt; if (ddt_phys_total_refcnt(dde) == 0) ddt_remove(ddt, dde); } ddt_exit(ddt); } VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa, refcnt ? 0 : spa_first_txg(zcb->zcb_spa), bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0); } /* ARGSUSED */ static int zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf, const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) { zdb_cb_t *zcb = arg; char blkbuf[BP_SPRINTF_LEN]; dmu_object_type_t type; boolean_t is_metadata; if (bp == NULL) return (0); type = BP_GET_TYPE(bp); zdb_count_block(zcb, zilog, bp, type); is_metadata = (BP_GET_LEVEL(bp) != 0 || dmu_ot[type].ot_metadata); if (dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata)) { int ioerr; size_t size = BP_GET_PSIZE(bp); void *data = malloc(size); int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW; /* If it's an intent log block, failure is expected. */ if (zb->zb_level == ZB_ZIL_LEVEL) flags |= ZIO_FLAG_SPECULATIVE; ioerr = zio_wait(zio_read(NULL, spa, bp, data, size, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, flags, zb)); free(data); if (ioerr && !(flags & ZIO_FLAG_SPECULATIVE)) { zcb->zcb_haderrors = 1; zcb->zcb_errors[ioerr]++; if (dump_opt['b'] >= 2) sprintf_blkptr(blkbuf, bp); else blkbuf[0] = '\0'; (void) printf("zdb_blkptr_cb: " "Got error %d reading " "<%llu, %llu, %lld, %llx> %s -- skipping\n", ioerr, (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object, (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid, blkbuf); } } zcb->zcb_readfails = 0; if (dump_opt['b'] >= 4) { sprintf_blkptr(blkbuf, bp); (void) printf("objset %llu object %llu " "level %lld offset 0x%llx %s\n", (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object, (longlong_t)zb->zb_level, (u_longlong_t)blkid2offset(dnp, bp, zb), blkbuf); } return (0); } static void zdb_leak(space_map_t *sm, uint64_t start, uint64_t size) { vdev_t *vd = sm->sm_ppd; (void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n", (u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size); } /* ARGSUSED */ static void zdb_space_map_load(space_map_t *sm) { } static void zdb_space_map_unload(space_map_t *sm) { space_map_vacate(sm, zdb_leak, sm); } /* ARGSUSED */ static void zdb_space_map_claim(space_map_t *sm, uint64_t start, uint64_t size) { } static space_map_ops_t zdb_space_map_ops = { zdb_space_map_load, zdb_space_map_unload, NULL, /* alloc */ zdb_space_map_claim, NULL, /* free */ NULL /* maxsize */ }; static void zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb) { ddt_bookmark_t ddb = { 0 }; ddt_entry_t dde; int error; while ((error = ddt_walk(spa, &ddb, &dde)) == 0) { blkptr_t blk; ddt_phys_t *ddp = dde.dde_phys; if (ddb.ddb_class == DDT_CLASS_UNIQUE) return; ASSERT(ddt_phys_total_refcnt(&dde) > 1); for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { if (ddp->ddp_phys_birth == 0) continue; ddt_bp_create(ddb.ddb_checksum, &dde.dde_key, ddp, &blk); if (p == DDT_PHYS_DITTO) { zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO); } else { zcb->zcb_dedup_asize += BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1); zcb->zcb_dedup_blocks++; } } if (!dump_opt['L']) { ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum]; ddt_enter(ddt); VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL); ddt_exit(ddt); } } ASSERT(error == ENOENT); } static void zdb_leak_init(spa_t *spa, zdb_cb_t *zcb) { zcb->zcb_spa = spa; if (!dump_opt['L']) { vdev_t *rvd = spa->spa_root_vdev; for (int c = 0; c < rvd->vdev_children; c++) { vdev_t *vd = rvd->vdev_child[c]; for (int m = 0; m < vd->vdev_ms_count; m++) { metaslab_t *msp = vd->vdev_ms[m]; mutex_enter(&msp->ms_lock); space_map_unload(&msp->ms_map); VERIFY(space_map_load(&msp->ms_map, &zdb_space_map_ops, SM_ALLOC, &msp->ms_smo, spa->spa_meta_objset) == 0); msp->ms_map.sm_ppd = vd; mutex_exit(&msp->ms_lock); } } } spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); zdb_ddt_leak_init(spa, zcb); spa_config_exit(spa, SCL_CONFIG, FTAG); } static void zdb_leak_fini(spa_t *spa) { if (!dump_opt['L']) { vdev_t *rvd = spa->spa_root_vdev; for (int c = 0; c < rvd->vdev_children; c++) { vdev_t *vd = rvd->vdev_child[c]; for (int m = 0; m < vd->vdev_ms_count; m++) { metaslab_t *msp = vd->vdev_ms[m]; mutex_enter(&msp->ms_lock); space_map_unload(&msp->ms_map); mutex_exit(&msp->ms_lock); } } } } /* ARGSUSED */ static int count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) { zdb_cb_t *zcb = arg; if (dump_opt['b'] >= 4) { char blkbuf[BP_SPRINTF_LEN]; sprintf_blkptr(blkbuf, bp); (void) printf("[%s] %s\n", "deferred free", blkbuf); } zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED); return (0); } static int dump_block_stats(spa_t *spa) { zdb_cb_t zcb = { 0 }; zdb_blkstats_t *zb, *tzb; uint64_t norm_alloc, norm_space, total_alloc, total_found; int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_HARD; int leaks = 0; (void) printf("\nTraversing all blocks %s%s%s%s%s...\n", (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "", (dump_opt['c'] == 1) ? "metadata " : "", dump_opt['c'] ? "checksums " : "", (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "", !dump_opt['L'] ? "nothing leaked " : ""); /* * Load all space maps as SM_ALLOC maps, then traverse the pool * claiming each block we discover. If the pool is perfectly * consistent, the space maps will be empty when we're done. * Anything left over is a leak; any block we can't claim (because * it's not part of any space map) is a double allocation, * reference to a freed block, or an unclaimed log block. */ zdb_leak_init(spa, &zcb); /* * If there's a deferred-free bplist, process that first. */ (void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj, count_block_cb, &zcb, NULL); if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { (void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj, count_block_cb, &zcb, NULL); } if (dump_opt['c'] > 1) flags |= TRAVERSE_PREFETCH_DATA; zcb.zcb_haderrors |= traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb); if (zcb.zcb_haderrors) { (void) printf("\nError counts:\n\n"); (void) printf("\t%5s %s\n", "errno", "count"); for (int e = 0; e < 256; e++) { if (zcb.zcb_errors[e] != 0) { (void) printf("\t%5d %llu\n", e, (u_longlong_t)zcb.zcb_errors[e]); } } } /* * Report any leaked segments. */ zdb_leak_fini(spa); tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL]; norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa)); norm_space = metaslab_class_get_space(spa_normal_class(spa)); total_alloc = norm_alloc + metaslab_class_get_alloc(spa_log_class(spa)); total_found = tzb->zb_asize - zcb.zcb_dedup_asize; if (total_found == total_alloc) { if (!dump_opt['L']) (void) printf("\n\tNo leaks (block sum matches space" " maps exactly)\n"); } else { (void) printf("block traversal size %llu != alloc %llu " "(%s %lld)\n", (u_longlong_t)total_found, (u_longlong_t)total_alloc, (dump_opt['L']) ? "unreachable" : "leaked", (longlong_t)(total_alloc - total_found)); leaks = 1; } if (tzb->zb_count == 0) return (2); (void) printf("\n"); (void) printf("\tbp count: %10llu\n", (u_longlong_t)tzb->zb_count); (void) printf("\tbp logical: %10llu avg: %6llu\n", (u_longlong_t)tzb->zb_lsize, (u_longlong_t)(tzb->zb_lsize / tzb->zb_count)); (void) printf("\tbp physical: %10llu avg:" " %6llu compression: %6.2f\n", (u_longlong_t)tzb->zb_psize, (u_longlong_t)(tzb->zb_psize / tzb->zb_count), (double)tzb->zb_lsize / tzb->zb_psize); (void) printf("\tbp allocated: %10llu avg:" " %6llu compression: %6.2f\n", (u_longlong_t)tzb->zb_asize, (u_longlong_t)(tzb->zb_asize / tzb->zb_count), (double)tzb->zb_lsize / tzb->zb_asize); (void) printf("\tbp deduped: %10llu ref>1:" " %6llu deduplication: %6.2f\n", (u_longlong_t)zcb.zcb_dedup_asize, (u_longlong_t)zcb.zcb_dedup_blocks, (double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0); (void) printf("\tSPA allocated: %10llu used: %5.2f%%\n", (u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space); if (dump_opt['b'] >= 2) { int l, t, level; (void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE" "\t avg\t comp\t%%Total\tType\n"); for (t = 0; t <= ZDB_OT_TOTAL; t++) { char csize[32], lsize[32], psize[32], asize[32]; char avg[32]; char *typename; if (t < DMU_OT_NUMTYPES) typename = dmu_ot[t].ot_name; else typename = zdb_ot_extname[t - DMU_OT_NUMTYPES]; if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) { (void) printf("%6s\t%5s\t%5s\t%5s" "\t%5s\t%5s\t%6s\t%s\n", "-", "-", "-", "-", "-", "-", "-", typename); continue; } for (l = ZB_TOTAL - 1; l >= -1; l--) { level = (l == -1 ? ZB_TOTAL : l); zb = &zcb.zcb_type[level][t]; if (zb->zb_asize == 0) continue; if (dump_opt['b'] < 3 && level != ZB_TOTAL) continue; if (level == 0 && zb->zb_asize == zcb.zcb_type[ZB_TOTAL][t].zb_asize) continue; zdb_nicenum(zb->zb_count, csize); zdb_nicenum(zb->zb_lsize, lsize); zdb_nicenum(zb->zb_psize, psize); zdb_nicenum(zb->zb_asize, asize); zdb_nicenum(zb->zb_asize / zb->zb_count, avg); (void) printf("%6s\t%5s\t%5s\t%5s\t%5s" "\t%5.2f\t%6.2f\t", csize, lsize, psize, asize, avg, (double)zb->zb_lsize / zb->zb_psize, 100.0 * zb->zb_asize / tzb->zb_asize); if (level == ZB_TOTAL) (void) printf("%s\n", typename); else (void) printf(" L%d %s\n", level, typename); } } } (void) printf("\n"); if (leaks) return (2); if (zcb.zcb_haderrors) return (3); return (0); } typedef struct zdb_ddt_entry { ddt_key_t zdde_key; uint64_t zdde_ref_blocks; uint64_t zdde_ref_lsize; uint64_t zdde_ref_psize; uint64_t zdde_ref_dsize; avl_node_t zdde_node; } zdb_ddt_entry_t; /* ARGSUSED */ static int zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf, const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) { avl_tree_t *t = arg; avl_index_t where; zdb_ddt_entry_t *zdde, zdde_search; if (bp == NULL) return (0); if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) { (void) printf("traversing objset %llu, %llu objects, " "%lu blocks so far\n", (u_longlong_t)zb->zb_objset, (u_longlong_t)bp->blk_fill, avl_numnodes(t)); } if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF || BP_GET_LEVEL(bp) > 0 || dmu_ot[BP_GET_TYPE(bp)].ot_metadata) return (0); ddt_key_fill(&zdde_search.zdde_key, bp); zdde = avl_find(t, &zdde_search, &where); if (zdde == NULL) { zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL); zdde->zdde_key = zdde_search.zdde_key; avl_insert(t, zdde, where); } zdde->zdde_ref_blocks += 1; zdde->zdde_ref_lsize += BP_GET_LSIZE(bp); zdde->zdde_ref_psize += BP_GET_PSIZE(bp); zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp); return (0); } static void dump_simulated_ddt(spa_t *spa) { avl_tree_t t; void *cookie = NULL; zdb_ddt_entry_t *zdde; ddt_histogram_t ddh_total = { 0 }; ddt_stat_t dds_total = { 0 }; avl_create(&t, ddt_entry_compare, sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node)); spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); (void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zdb_ddt_add_cb, &t); spa_config_exit(spa, SCL_CONFIG, FTAG); while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) { ddt_stat_t dds; uint64_t refcnt = zdde->zdde_ref_blocks; ASSERT(refcnt != 0); dds.dds_blocks = zdde->zdde_ref_blocks / refcnt; dds.dds_lsize = zdde->zdde_ref_lsize / refcnt; dds.dds_psize = zdde->zdde_ref_psize / refcnt; dds.dds_dsize = zdde->zdde_ref_dsize / refcnt; dds.dds_ref_blocks = zdde->zdde_ref_blocks; dds.dds_ref_lsize = zdde->zdde_ref_lsize; dds.dds_ref_psize = zdde->zdde_ref_psize; dds.dds_ref_dsize = zdde->zdde_ref_dsize; ddt_stat_add(&ddh_total.ddh_stat[highbit(refcnt) - 1], &dds, 0); umem_free(zdde, sizeof (*zdde)); } avl_destroy(&t); ddt_histogram_stat(&dds_total, &ddh_total); (void) printf("Simulated DDT histogram:\n"); zpool_dump_ddt(&dds_total, &ddh_total); dump_dedup_ratio(&dds_total); } static void dump_zpool(spa_t *spa) { dsl_pool_t *dp = spa_get_dsl(spa); int rc = 0; if (dump_opt['S']) { dump_simulated_ddt(spa); return; } if (!dump_opt['e'] && dump_opt['C'] > 1) { (void) printf("\nCached configuration:\n"); dump_nvlist(spa->spa_config, 8); } if (dump_opt['C']) dump_config(spa); if (dump_opt['u']) dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n"); if (dump_opt['D']) dump_all_ddts(spa); if (dump_opt['d'] > 2 || dump_opt['m']) dump_metaslabs(spa); if (dump_opt['d'] || dump_opt['i']) { dump_dir(dp->dp_meta_objset); if (dump_opt['d'] >= 3) { dump_bpobj(&spa->spa_deferred_bpobj, "Deferred frees"); if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { dump_bpobj(&spa->spa_dsl_pool->dp_free_bpobj, "Pool frees"); } dump_dtl(spa->spa_root_vdev, 0); } (void) dmu_objset_find(spa_name(spa), dump_one_dir, NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); } if (dump_opt['b'] || dump_opt['c']) rc = dump_block_stats(spa); if (dump_opt['s']) show_pool_stats(spa); if (dump_opt['h']) dump_history(spa); if (rc != 0) exit(rc); } #define ZDB_FLAG_CHECKSUM 0x0001 #define ZDB_FLAG_DECOMPRESS 0x0002 #define ZDB_FLAG_BSWAP 0x0004 #define ZDB_FLAG_GBH 0x0008 #define ZDB_FLAG_INDIRECT 0x0010 #define ZDB_FLAG_PHYS 0x0020 #define ZDB_FLAG_RAW 0x0040 #define ZDB_FLAG_PRINT_BLKPTR 0x0080 int flagbits[256]; static void zdb_print_blkptr(blkptr_t *bp, int flags) { char blkbuf[BP_SPRINTF_LEN]; if (flags & ZDB_FLAG_BSWAP) byteswap_uint64_array((void *)bp, sizeof (blkptr_t)); sprintf_blkptr(blkbuf, bp); (void) printf("%s\n", blkbuf); } static void zdb_dump_indirect(blkptr_t *bp, int nbps, int flags) { int i; for (i = 0; i < nbps; i++) zdb_print_blkptr(&bp[i], flags); } static void zdb_dump_gbh(void *buf, int flags) { zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags); } static void zdb_dump_block_raw(void *buf, uint64_t size, int flags) { if (flags & ZDB_FLAG_BSWAP) byteswap_uint64_array(buf, size); (void) write(1, buf, size); } static void zdb_dump_block(char *label, void *buf, uint64_t size, int flags) { uint64_t *d = (uint64_t *)buf; int nwords = size / sizeof (uint64_t); int do_bswap = !!(flags & ZDB_FLAG_BSWAP); int i, j; char *hdr, *c; if (do_bswap) hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8"; else hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f"; (void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr); for (i = 0; i < nwords; i += 2) { (void) printf("%06llx: %016llx %016llx ", (u_longlong_t)(i * sizeof (uint64_t)), (u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]), (u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1])); c = (char *)&d[i]; for (j = 0; j < 2 * sizeof (uint64_t); j++) (void) printf("%c", isprint(c[j]) ? c[j] : '.'); (void) printf("\n"); } } /* * There are two acceptable formats: * leaf_name - For example: c1t0d0 or /tmp/ztest.0a * child[.child]* - For example: 0.1.1 * * The second form can be used to specify arbitrary vdevs anywhere * in the heirarchy. For example, in a pool with a mirror of * RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 . */ static vdev_t * zdb_vdev_lookup(vdev_t *vdev, char *path) { char *s, *p, *q; int i; if (vdev == NULL) return (NULL); /* First, assume the x.x.x.x format */ i = (int)strtoul(path, &s, 10); if (s == path || (s && *s != '.' && *s != '\0')) goto name; if (i < 0 || i >= vdev->vdev_children) return (NULL); vdev = vdev->vdev_child[i]; if (*s == '\0') return (vdev); return (zdb_vdev_lookup(vdev, s+1)); name: for (i = 0; i < vdev->vdev_children; i++) { vdev_t *vc = vdev->vdev_child[i]; if (vc->vdev_path == NULL) { vc = zdb_vdev_lookup(vc, path); if (vc == NULL) continue; else return (vc); } p = strrchr(vc->vdev_path, '/'); p = p ? p + 1 : vc->vdev_path; q = &vc->vdev_path[strlen(vc->vdev_path) - 2]; if (strcmp(vc->vdev_path, path) == 0) return (vc); if (strcmp(p, path) == 0) return (vc); if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0) return (vc); } return (NULL); } /* * Read a block from a pool and print it out. The syntax of the * block descriptor is: * * pool:vdev_specifier:offset:size[:flags] * * pool - The name of the pool you wish to read from * vdev_specifier - Which vdev (see comment for zdb_vdev_lookup) * offset - offset, in hex, in bytes * size - Amount of data to read, in hex, in bytes * flags - A string of characters specifying options * b: Decode a blkptr at given offset within block * *c: Calculate and display checksums * d: Decompress data before dumping * e: Byteswap data before dumping * g: Display data as a gang block header * i: Display as an indirect block * p: Do I/O to physical offset * r: Dump raw data to stdout * * * = not yet implemented */ static void zdb_read_block(char *thing, spa_t *spa) { blkptr_t blk, *bp = &blk; dva_t *dva = bp->blk_dva; int flags = 0; uint64_t offset = 0, size = 0, psize = 0, lsize = 0, blkptr_offset = 0; zio_t *zio; vdev_t *vd; void *pbuf, *lbuf, *buf; char *s, *p, *dup, *vdev, *flagstr; int i, error; dup = strdup(thing); s = strtok(dup, ":"); vdev = s ? s : ""; s = strtok(NULL, ":"); offset = strtoull(s ? s : "", NULL, 16); s = strtok(NULL, ":"); size = strtoull(s ? s : "", NULL, 16); s = strtok(NULL, ":"); flagstr = s ? s : ""; s = NULL; if (size == 0) s = "size must not be zero"; if (!IS_P2ALIGNED(size, DEV_BSIZE)) s = "size must be a multiple of sector size"; if (!IS_P2ALIGNED(offset, DEV_BSIZE)) s = "offset must be a multiple of sector size"; if (s) { (void) printf("Invalid block specifier: %s - %s\n", thing, s); free(dup); return; } for (s = strtok(flagstr, ":"); s; s = strtok(NULL, ":")) { for (i = 0; flagstr[i]; i++) { int bit = flagbits[(uchar_t)flagstr[i]]; if (bit == 0) { (void) printf("***Invalid flag: %c\n", flagstr[i]); continue; } flags |= bit; /* If it's not something with an argument, keep going */ if ((bit & (ZDB_FLAG_CHECKSUM | ZDB_FLAG_PRINT_BLKPTR)) == 0) continue; p = &flagstr[i + 1]; if (bit == ZDB_FLAG_PRINT_BLKPTR) blkptr_offset = strtoull(p, &p, 16); if (*p != ':' && *p != '\0') { (void) printf("***Invalid flag arg: '%s'\n", s); free(dup); return; } } } vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev); if (vd == NULL) { (void) printf("***Invalid vdev: %s\n", vdev); free(dup); return; } else { if (vd->vdev_path) (void) fprintf(stderr, "Found vdev: %s\n", vd->vdev_path); else (void) fprintf(stderr, "Found vdev type: %s\n", vd->vdev_ops->vdev_op_type); } psize = size; lsize = size; pbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); BP_ZERO(bp); DVA_SET_VDEV(&dva[0], vd->vdev_id); DVA_SET_OFFSET(&dva[0], offset); DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH)); DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize)); BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); BP_SET_LSIZE(bp, lsize); BP_SET_PSIZE(bp, psize); BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); BP_SET_TYPE(bp, DMU_OT_NONE); BP_SET_LEVEL(bp, 0); BP_SET_DEDUP(bp, 0); BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); zio = zio_root(spa, NULL, NULL, 0); if (vd == vd->vdev_top) { /* * Treat this as a normal block read. */ zio_nowait(zio_read(zio, spa, bp, pbuf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL)); } else { /* * Treat this as a vdev child I/O. */ zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pbuf, psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL, NULL)); } error = zio_wait(zio); spa_config_exit(spa, SCL_STATE, FTAG); if (error) { (void) printf("Read of %s failed, error: %d\n", thing, error); goto out; } if (flags & ZDB_FLAG_DECOMPRESS) { /* * We don't know how the data was compressed, so just try * every decompress function at every inflated blocksize. */ enum zio_compress c; void *pbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL); bcopy(pbuf, pbuf2, psize); VERIFY(random_get_pseudo_bytes((uint8_t *)pbuf + psize, SPA_MAXBLOCKSIZE - psize) == 0); VERIFY(random_get_pseudo_bytes((uint8_t *)pbuf2 + psize, SPA_MAXBLOCKSIZE - psize) == 0); for (lsize = SPA_MAXBLOCKSIZE; lsize > psize; lsize -= SPA_MINBLOCKSIZE) { for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) { if (zio_decompress_data(c, pbuf, lbuf, psize, lsize) == 0 && zio_decompress_data(c, pbuf2, lbuf2, psize, lsize) == 0 && bcmp(lbuf, lbuf2, lsize) == 0) break; } if (c != ZIO_COMPRESS_FUNCTIONS) break; lsize -= SPA_MINBLOCKSIZE; } umem_free(pbuf2, SPA_MAXBLOCKSIZE); umem_free(lbuf2, SPA_MAXBLOCKSIZE); if (lsize <= psize) { (void) printf("Decompress of %s failed\n", thing); goto out; } buf = lbuf; size = lsize; } else { buf = pbuf; size = psize; } if (flags & ZDB_FLAG_PRINT_BLKPTR) zdb_print_blkptr((blkptr_t *)(void *) ((uintptr_t)buf + (uintptr_t)blkptr_offset), flags); else if (flags & ZDB_FLAG_RAW) zdb_dump_block_raw(buf, size, flags); else if (flags & ZDB_FLAG_INDIRECT) zdb_dump_indirect((blkptr_t *)buf, size / sizeof (blkptr_t), flags); else if (flags & ZDB_FLAG_GBH) zdb_dump_gbh(buf, flags); else zdb_dump_block(thing, buf, size, flags); out: umem_free(pbuf, SPA_MAXBLOCKSIZE); umem_free(lbuf, SPA_MAXBLOCKSIZE); free(dup); } static boolean_t pool_match(nvlist_t *cfg, char *tgt) { uint64_t v, guid = strtoull(tgt, NULL, 0); char *s; if (guid != 0) { if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0) return (v == guid); } else { if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0) return (strcmp(s, tgt) == 0); } return (B_FALSE); } static char * find_zpool(char **target, nvlist_t **configp, int dirc, char **dirv) { nvlist_t *pools; nvlist_t *match = NULL; char *name = NULL; char *sepp = NULL; char sep; int count = 0; importargs_t args = { 0 }; args.paths = dirc; args.path = dirv; args.can_be_active = B_TRUE; if ((sepp = strpbrk(*target, "/@")) != NULL) { sep = *sepp; *sepp = '\0'; } pools = zpool_search_import(g_zfs, &args); if (pools != NULL) { nvpair_t *elem = NULL; while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { verify(nvpair_value_nvlist(elem, configp) == 0); if (pool_match(*configp, *target)) { count++; if (match != NULL) { /* print previously found config */ if (name != NULL) { (void) printf("%s\n", name); dump_nvlist(match, 8); name = NULL; } (void) printf("%s\n", nvpair_name(elem)); dump_nvlist(*configp, 8); } else { match = *configp; name = nvpair_name(elem); } } } } if (count > 1) (void) fatal("\tMatched %d pools - use pool GUID " "instead of pool name or \n" "\tpool name part of a dataset name to select pool", count); if (sepp) *sepp = sep; /* * If pool GUID was specified for pool id, replace it with pool name */ if (name && (strstr(*target, name) != *target)) { int sz = 1 + strlen(name) + ((sepp) ? strlen(sepp) : 0); *target = umem_alloc(sz, UMEM_NOFAIL); (void) snprintf(*target, sz, "%s%s", name, sepp ? sepp : ""); } *configp = name ? match : NULL; return (name); } int main(int argc, char **argv) { int i, c; struct rlimit rl = { 1024, 1024 }; spa_t *spa = NULL; objset_t *os = NULL; int dump_all = 1; int verbose = 0; int error = 0; char **searchdirs = NULL; int nsearch = 0; char *target; nvlist_t *policy = NULL; uint64_t max_txg = UINT64_MAX; int rewind = ZPOOL_NEVER_REWIND; (void) setrlimit(RLIMIT_NOFILE, &rl); (void) enable_extended_FILE_stdio(-1, -1); dprintf_setup(&argc, argv); while ((c = getopt(argc, argv, "bcdhilmsuCDRSAFLXevp:t:U:P")) != -1) { switch (c) { case 'b': case 'c': case 'd': case 'h': case 'i': case 'l': case 'm': case 's': case 'u': case 'C': case 'D': case 'R': case 'S': dump_opt[c]++; dump_all = 0; break; case 'A': case 'F': case 'L': case 'X': case 'e': case 'P': dump_opt[c]++; break; case 'v': verbose++; break; case 'p': if (searchdirs == NULL) { searchdirs = umem_alloc(sizeof (char *), UMEM_NOFAIL); } else { char **tmp = umem_alloc((nsearch + 1) * sizeof (char *), UMEM_NOFAIL); bcopy(searchdirs, tmp, nsearch * sizeof (char *)); umem_free(searchdirs, nsearch * sizeof (char *)); searchdirs = tmp; } searchdirs[nsearch++] = optarg; break; case 't': max_txg = strtoull(optarg, NULL, 0); if (max_txg < TXG_INITIAL) { (void) fprintf(stderr, "incorrect txg " "specified: %s\n", optarg); usage(); } break; case 'U': spa_config_path = optarg; break; default: usage(); break; } } if (!dump_opt['e'] && searchdirs != NULL) { (void) fprintf(stderr, "-p option requires use of -e\n"); usage(); } kernel_init(FREAD); g_zfs = libzfs_init(); ASSERT(g_zfs != NULL); if (dump_all) verbose = MAX(verbose, 1); for (c = 0; c < 256; c++) { if (dump_all && !strchr("elAFLRSXP", c)) dump_opt[c] = 1; if (dump_opt[c]) dump_opt[c] += verbose; } aok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2); zfs_recover = (dump_opt['A'] > 1); argc -= optind; argv += optind; if (argc < 2 && dump_opt['R']) usage(); if (argc < 1) { if (!dump_opt['e'] && dump_opt['C']) { dump_cachefile(spa_config_path); return (0); } usage(); } if (dump_opt['l']) { dump_label(argv[0]); return (0); } if (dump_opt['X'] || dump_opt['F']) rewind = ZPOOL_DO_REWIND | (dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0); if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 || nvlist_add_uint64(policy, ZPOOL_REWIND_REQUEST_TXG, max_txg) != 0 || nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind) != 0) fatal("internal error: %s", strerror(ENOMEM)); error = 0; target = argv[0]; if (dump_opt['e']) { nvlist_t *cfg = NULL; char *name = find_zpool(&target, &cfg, nsearch, searchdirs); error = ENOENT; if (name) { if (dump_opt['C'] > 1) { (void) printf("\nConfiguration for import:\n"); dump_nvlist(cfg, 8); } if (nvlist_add_nvlist(cfg, ZPOOL_REWIND_POLICY, policy) != 0) { fatal("can't open '%s': %s", target, strerror(ENOMEM)); } if ((error = spa_import(name, cfg, NULL, ZFS_IMPORT_MISSING_LOG)) != 0) { error = spa_import(name, cfg, NULL, ZFS_IMPORT_VERBATIM); } } } if (error == 0) { if (strpbrk(target, "/@") == NULL || dump_opt['R']) { error = spa_open_rewind(target, &spa, FTAG, policy, NULL); if (error) { /* * If we're missing the log device then * try opening the pool after clearing the * log state. */ mutex_enter(&spa_namespace_lock); if ((spa = spa_lookup(target)) != NULL && spa->spa_log_state == SPA_LOG_MISSING) { spa->spa_log_state = SPA_LOG_CLEAR; error = 0; } mutex_exit(&spa_namespace_lock); if (!error) { error = spa_open_rewind(target, &spa, FTAG, policy, NULL); } } } else { error = dmu_objset_own(target, DMU_OST_ANY, B_TRUE, FTAG, &os); } } nvlist_free(policy); if (error) fatal("can't open '%s': %s", target, strerror(error)); argv++; argc--; if (!dump_opt['R']) { if (argc > 0) { zopt_objects = argc; zopt_object = calloc(zopt_objects, sizeof (uint64_t)); for (i = 0; i < zopt_objects; i++) { errno = 0; zopt_object[i] = strtoull(argv[i], NULL, 0); if (zopt_object[i] == 0 && errno != 0) fatal("bad number %s: %s", argv[i], strerror(errno)); } } (os != NULL) ? dump_dir(os) : dump_zpool(spa); } else { flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR; flagbits['c'] = ZDB_FLAG_CHECKSUM; flagbits['d'] = ZDB_FLAG_DECOMPRESS; flagbits['e'] = ZDB_FLAG_BSWAP; flagbits['g'] = ZDB_FLAG_GBH; flagbits['i'] = ZDB_FLAG_INDIRECT; flagbits['p'] = ZDB_FLAG_PHYS; flagbits['r'] = ZDB_FLAG_RAW; for (i = 0; i < argc; i++) zdb_read_block(argv[i], spa); } (os != NULL) ? dmu_objset_disown(os, FTAG) : spa_close(spa, FTAG); fuid_table_destroy(); sa_loaded = B_FALSE; libzfs_fini(g_zfs); kernel_fini(); return (0); } Index: projects/nand/cddl/contrib/opensolaris/cmd/zfs/zfs.8 =================================================================== --- projects/nand/cddl/contrib/opensolaris/cmd/zfs/zfs.8 (revision 235262) +++ projects/nand/cddl/contrib/opensolaris/cmd/zfs/zfs.8 (revision 235263) @@ -1,3200 +1,3211 @@ '\" te .\" Copyright (c) 2012, Martin Matuska . .\" All Rights Reserved. .\" .\" The contents of this file are subject to the terms of the .\" Common Development and Distribution License (the "License"). .\" You may not use this file except in compliance with the License. .\" .\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE .\" or http://www.opensolaris.org/os/licensing. .\" See the License for the specific language governing permissions .\" and limitations under the License. .\" .\" When distributing Covered Code, include this CDDL HEADER in each .\" file and include the License file at usr/src/OPENSOLARIS.LICENSE. .\" If applicable, add the following below this CDDL HEADER, with the .\" fields enclosed by brackets "[]" replaced with your own identifying .\" information: Portions Copyright [yyyy] [name of copyright owner] .\" .\" Copyright (c) 2010, Sun Microsystems, Inc. All Rights Reserved. -.\" Copyright (c) 2011 by Delphix. All rights reserved. +.\" Copyright (c) 2012 by Delphix. All rights reserved. .\" Copyright (c) 2012 Nexenta Systems, Inc. All Rights Reserved. +.\" Copyright (c) 2012, Joyent, Inc. All rights reserved. .\" Copyright (c) 2011, Pawel Jakub Dawidek .\" .\" $FreeBSD$ .\" .Dd April 22, 2012 .Dt ZFS 8 .Os .Sh NAME .Nm zfs .Nd configures ZFS file systems .Sh SYNOPSIS .Nm .Op Fl \&? .Nm .Cm create .Op Fl pu .Op Fl o Ar property Ns = Ns Ar value .Ar ... filesystem .Nm .Cm create .Op Fl ps .Op Fl b Ar blocksize .Op Fl o Ar property Ns = Ns Ar value .Ar ... .Fl V .Ar size volume .Nm .Cm destroy .Op Fl fnpRrv .Ar filesystem Ns | Ns Ar volume .Nm .Cm destroy .Op Fl dnpRrv .Sm off .Ar snapshot .Ns Op % Ns Ar snapname .Ns Op , Ns Ar ... .Sm on .Nm .Cm snapshot .Op Fl r .Op Fl o Ar property Ns = Ns Ar value .Ar ... filesystem@snapname Ns | Ns Ar volume@snapname .Nm .Cm rollback .Op Fl rRf .Ar snapshot .Nm .Cm clone .Op Fl p .Op Fl o Ar property Ns = Ns Ar value .Ar ... snapshot filesystem Ns | Ns Ar volume .Nm .Cm promote .Ar clone-filesystem .Nm .Cm rename +.Op Fl f .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Nm .Cm rename +.Op Fl f .Fl p .Ar filesystem Ns | Ns Ar volume .Ar filesystem Ns | Ns Ar volume .Nm .Cm rename .Fl r .Ar snapshot snapshot .Nm .Cm rename .Fl u .Op Fl p .Ar filesystem filesystem .Nm .Cm list .Op Fl r Ns | Ns Fl d Ar depth .Op Fl H .Op Fl o Ar property Ns Op , Ns Ar ... .Op Fl t Ar type Ns Op , Ns Ar ... .Op Fl s Ar property .Ar ... .Op Fl S Ar property .Ar ... .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Nm .Cm set .Ar property Ns = Ns Ar value .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Nm .Cm get .Op Fl r Ns | Ns Fl d Ar depth .Op Fl Hp .Op Fl o Ar all | field Ns Op , Ns Ar ... .Op Fl t Ar type Ns Op , Ns Ar ... .Op Fl s Ar source Ns Op , Ns Ar ... .Ar all | property Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Nm .Cm inherit .Op Fl rS .Ar property .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Nm .Cm upgrade .Op Fl v .Nm .Cm upgrade .Op Fl r .Op Fl V Ar version .Fl a | Ar filesystem .Nm .Cm userspace .Op Fl niHp .Op Fl o Ar field Ns Op , Ns Ar ... .Op Fl sS Ar field .Ar ... .Op Fl t Ar type Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar snapshot .Nm .Cm groupspace .Op Fl niHp .Op Fl o Ar field Ns Op , Ns Ar ... .Op Fl sS Ar field .Ar ... .Op Fl t Ar type Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar snapshot .Nm .Cm mount .Nm .Cm mount .Op Fl vO .Op Fl o Ar property Ns Op , Ns Ar ... .Fl a | Ar filesystem .Nm .Cm unmount .Op Fl f .Fl a | Ar filesystem Ns | Ns Ar mountpoint .Nm .Cm share .Fl a | Ar filesystem .Nm .Cm unshare .Fl a | Ar filesystem Ns | Ns Ar mountpoint .Nm .Cm send .Op Fl DnPpRrv .Op Fl i Ar snapshot | Fl I Ar snapshot .Ar snapshot .Nm .Cm receive .Op Fl vnFu .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Nm .Cm receive .Op Fl vnFu .Op Fl d | e .Ar filesystem .Nm .Cm allow .Ar filesystem Ns | Ns Ar volume .Nm .Cm allow .Op Fl ldug .Cm everyone Ns | Ns Ar user Ns | Ns Ar group Ns Op , Ns Ar ... .Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Nm .Cm allow .Op Fl ld .Fl e .Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Nm .Cm allow .Fl c .Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Nm .Cm allow .Fl s .Ar @setname .Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Nm .Cm unallow .Op Fl rldug .Cm everyone Ns | Ns Ar user Ns | Ns Ar group Ns Op , Ns Ar ... .Op Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Nm .Cm unallow .Op Fl rld .Fl e .Op Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Nm .Cm unallow .Op Fl r .Fl c .Op Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Nm .Cm unallow .Op Fl r .Fl s .Ar @setname .Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Nm .Cm hold .Op Fl r .Ar tag snapshot ... .Nm .Cm holds .Op Fl r .Ar snapshot ... .Nm .Cm release .Op Fl r .Ar tag snapshot ... .Nm .Cm diff .Op Fl FHt .Ar snapshot .Op Ar snapshot Ns | Ns Ar filesystem .Nm .Cm jail .Ar jailid filesystem .Nm .Cm unjail .Ar jailid filesystem .Sh DESCRIPTION The .Nm command configures .Tn ZFS datasets within a .Tn ZFS storage pool, as described in .Xr zpool 8 . A dataset is identified by a unique path within the .Tn ZFS namespace. For example: .Bd -ragged -offset 4n .No pool/ Ns Brq filesystem,volume,snapshot .Ed .Pp where the maximum length of a dataset name is .Dv MAXNAMELEN (256 bytes). .Pp A dataset can be one of the following: .Bl -hang -width 12n .It Sy file system A .Tn ZFS dataset of type .Em filesystem can be mounted within the standard system namespace and behaves like other file systems. While .Tn ZFS file systems are designed to be .Tn POSIX compliant, known issues exist that prevent compliance in some cases. Applications that depend on standards conformance might fail due to nonstandard behavior when checking file system free space. .It Sy volume A logical volume exported as a raw or block device. This type of dataset should only be used under special circumstances. File systems are typically used in most environments. .It Sy snapshot A read-only version of a file system or volume at a given point in time. It is specified as .Em filesystem@name or .Em volume@name . .El .Ss ZFS File System Hierarchy A .Tn ZFS storage pool is a logical collection of devices that provide space for datasets. A storage pool is also the root of the .Tn ZFS file system hierarchy. .Pp The root of the pool can be accessed as a file system, such as mounting and unmounting, taking snapshots, and setting properties. The physical storage characteristics, however, are managed by the .Xr zpool 8 command. .Pp See .Xr zpool 8 for more information on creating and administering pools. .Ss Snapshots A snapshot is a read-only copy of a file system or volume. Snapshots can be created extremely quickly, and initially consume no additional space within the pool. As data within the active dataset changes, the snapshot consumes more data than would otherwise be shared with the active dataset. .Pp Snapshots can have arbitrary names. Snapshots of volumes can be cloned or rolled back, but cannot be accessed independently. .Pp File system snapshots can be accessed under the .Pa \&.zfs/snapshot directory in the root of the file system. Snapshots are automatically mounted on demand and may be unmounted at regular intervals. The visibility of the .Pa \&.zfs directory can be controlled by the .Sy snapdir property. .Ss Clones A clone is a writable volume or file system whose initial contents are the same as another dataset. As with snapshots, creating a clone is nearly instantaneous, and initially consumes no additional space. .Pp Clones can only be created from a snapshot. When a snapshot is cloned, it creates an implicit dependency between the parent and child. Even though the clone is created somewhere else in the dataset hierarchy, the original snapshot cannot be destroyed as long as a clone exists. The .Sy origin property exposes this dependency, and the .Cm destroy command lists any such dependencies, if they exist. .Pp The clone parent-child dependency relationship can be reversed by using the .Cm promote subcommand. This causes the "origin" file system to become a clone of the specified file system, which makes it possible to destroy the file system that the clone was created from. .Ss Mount Points Creating a .Tn ZFS file system is a simple operation, so the number of file systems per system is likely to be numerous. To cope with this, .Tn ZFS automatically manages mounting and unmounting file systems without the need to edit the .Pa /etc/fstab file. All automatically managed file systems are mounted by .Tn ZFS at boot time. .Pp By default, file systems are mounted under .Pa /path , where .Ar path is the name of the file system in the .Tn ZFS namespace. Directories are created and destroyed as needed. .Pp A file system can also have a mount point set in the .Sy mountpoint property. This directory is created as needed, and .Tn ZFS automatically mounts the file system when the .Qq Nm Cm mount Fl a command is invoked (without editing .Pa /etc/fstab ) . The .Sy mountpoint property can be inherited, so if .Em pool/home has a mount point of .Pa /home , then .Em pool/home/user automatically inherits a mount point of .Pa /home/user . .Pp A file system .Sy mountpoint property of .Cm none prevents the file system from being mounted. .Pp If needed, .Tn ZFS file systems can also be managed with traditional tools .Pq Xr mount 8 , Xr umount 8 , Xr fstab 5 . If a file system's mount point is set to .Cm legacy , .Tn ZFS makes no attempt to manage the file system, and the administrator is responsible for mounting and unmounting the file system. .Ss Jails .No A Tn ZFS dataset can be attached to a jail by using the .Qq Nm Cm jail subcommand. You cannot attach a dataset to one jail and the children of the same dataset to another jails. To allow management of the dataset from within a jail, the .Sy jailed property has to be set and the jail needs access to the .Pa /dev/zfs device. The .Sy quota property cannot be changed from within a jail. See .Xr jail 8 for information on how to allow mounting .Tn ZFS datasets from within a jail. .Pp .No A Tn ZFS dataset can be detached from a jail using the .Qq Nm Cm unjail subcommand. .Pp After a dataset is attached to a jail and the jailed property is set, a jailed file system cannot be mounted outside the jail, since the jail administrator might have set the mount point to an unacceptable value. .Ss Deduplication Deduplication is the process for removing redundant data at the block-level, reducing the total amount of data stored. If a file system has the .Cm dedup property enabled, duplicate data blocks are removed synchronously. The result is that only unique data is stored and common components are shared among files. .Ss Native Properties Properties are divided into two types, native properties and user-defined (or "user") properties. Native properties either export internal statistics or control .Tn ZFS behavior. In addition, native properties are either editable or read-only. User properties have no effect on .Tn ZFS behavior, but you can use them to annotate datasets in a way that is meaningful in your environment. For more information about user properties, see the .Qq Sx User Properties section, below. .Pp Every dataset has a set of properties that export statistics about the dataset as well as control various behaviors. Properties are inherited from the parent unless overridden by the child. Some properties apply only to certain types of datasets (file systems, volumes, or snapshots). .Pp The values of numeric properties can be specified using human-readable suffixes (for example, .Sy k , KB , M , Gb , and so forth, up to .Sy Z for zettabyte). The following are all valid (and equal) specifications: .Bd -ragged -offset 4n 1536M, 1.5g, 1.50GB .Ed .Pp The values of non-numeric properties are case sensitive and must be lowercase, except for .Sy mountpoint , sharenfs , No and Sy sharesmb . .Pp The following native properties consist of read-only statistics about the dataset. These properties can be neither set, nor inherited. Native properties apply to all dataset types unless otherwise noted. .Bl -tag -width 2n .It Sy available The amount of space available to the dataset and all its children, assuming that there is no other activity in the pool. Because space is shared within a pool, availability can be limited by any number of factors, including physical pool size, quotas, reservations, or other datasets within the pool. .Pp This property can also be referred to by its shortened column name, .Sy avail . .It Sy compressratio For non-snapshots, the compression ratio achieved for the .Sy used space of this dataset, expressed as a multiplier. The .Sy used property includes descendant datasets, and, for clones, does not include the space shared with the origin snapshot. For snapshots, the .Sy compressratio is the same as the .Sy refcompressratio property. Compression can be turned on by running: .Qq Nm Cm set compression=on Ar dataset The default value is .Cm off . .It Sy creation The time this dataset was created. .It Sy clones For snapshots, this property is a comma-separated list of filesystems or volumes which are clones of this snapshot. The clones' .Sy origin property is this snapshot. If the .Sy clones property is not empty, then this snapshot can not be destroyed (even with the .Fl r or .Fl f options). .It Sy defer_destroy This property is .Cm on if the snapshot has been marked for deferred destroy by using the .Qq Nm Cm destroy -d command. Otherwise, the property is .Cm off . .It Sy mounted For file systems, indicates whether the file system is currently mounted. This property can be either .Cm yes or .Cm no . .It Sy origin For cloned file systems or volumes, the snapshot from which the clone was created. See also the .Sy clones property. .It Sy referenced The amount of data that is accessible by this dataset, which may or may not be shared with other datasets in the pool. When a snapshot or clone is created, it initially references the same amount of space as the file system or snapshot it was created from, since its contents are identical. .Pp This property can also be referred to by its shortened column name, .Sy refer . .It Sy refcompressratio The compression ratio achieved for the .Sy referenced space of this dataset, expressed as a multiplier. See also the .Sy compressratio property. .It Sy type The type of dataset: .Sy filesystem , volume , No or Sy snapshot . .It Sy used The amount of space consumed by this dataset and all its descendents. This is the value that is checked against this dataset's quota and reservation. The space used does not include this dataset's reservation, but does take into account the reservations of any descendent datasets. The amount of space that a dataset consumes from its parent, as well as the amount of space that are freed if this dataset is recursively destroyed, is the greater of its space used and its reservation. .Pp When snapshots (see the .Qq Sx Snapshots section) are created, their space is initially shared between the snapshot and the file system, and possibly with previous snapshots. As the file system changes, space that was previously shared becomes unique to the snapshot, and counted in the snapshot's space used. Additionally, deleting snapshots can increase the amount of space unique to (and used by) other snapshots. .Pp The amount of space used, available, or referenced does not take into account pending changes. Pending changes are generally accounted for within a few seconds. Committing a change to a disk using .Xr fsync 2 or .Sy O_SYNC does not necessarily guarantee that the space usage information is updated immediately. .It Sy usedby* The .Sy usedby* properties decompose the .Sy used properties into the various reasons that space is used. Specifically, .Sy used No = .Sy usedbysnapshots + usedbydataset + usedbychildren + usedbyrefreservation . These properties are only available for datasets created with .Tn ZFS pool version 13 pools and higher. .It Sy usedbysnapshots The amount of space consumed by snapshots of this dataset. In particular, it is the amount of space that would be freed if all of this dataset's snapshots were destroyed. Note that this is not simply the sum of the snapshots' .Sy used properties because space can be shared by multiple snapshots. .It Sy usedbydataset The amount of space used by this dataset itself, which would be freed if the dataset were destroyed (after first removing any .Sy refreservation and destroying any necessary snapshots or descendents). .It Sy usedbychildren The amount of space used by children of this dataset, which would be freed if all the dataset's children were destroyed. .It Sy usedbyrefreservation The amount of space used by a .Sy refreservation set on this dataset, which would be freed if the .Sy refreservation was removed. .It Sy userused@ Ns Ar user The amount of space consumed by the specified user in this dataset. Space is charged to the owner of each file, as displayed by .Qq Nm ls Fl l . The amount of space charged is displayed by .Qq Nm du and .Qq Nm ls Fl s . See the .Qq Nm Cm userspace subcommand for more information. .Pp Unprivileged users can access only their own space usage. The root user, or a user who has been granted the .Sy userused privilege with .Qq Nm Cm allow , can access everyone's usage. .Pp The .Sy userused@ Ns ... properties are not displayed by .Qq Nm Cm get all . The user's name must be appended after the .Sy @ symbol, using one of the following forms: .Bl -bullet -offset 2n .It POSIX name (for example, .Em joe ) .It POSIX numeric ID (for example, .Em 1001 ) .El .It Sy userrefs This property is set to the number of user holds on this snapshot. User holds are set by using the .Qq Nm Cm hold command. .It Sy groupused@ Ns Ar group The amount of space consumed by the specified group in this dataset. Space is charged to the group of each file, as displayed by .Nm ls Fl l . See the .Sy userused@ Ns Ar user property for more information. .Pp Unprivileged users can only access their own groups' space usage. The root user, or a user who has been granted the .Sy groupused privilege with .Qq Nm Cm allow , can access all groups' usage. .It Sy volblocksize Ns = Ns Ar blocksize For volumes, specifies the block size of the volume. The .Ar blocksize cannot be changed once the volume has been written, so it should be set at volume creation time. The default .Ar blocksize for volumes is 8 Kbytes. Any power of 2 from 512 bytes to 128 Kbytes is valid. .Pp This property can also be referred to by its shortened column name, .Sy volblock . .It Sy written The amount of .Sy referenced space written to this dataset since the previous snapshot. .It Sy written@ Ns Ar snapshot The amount of .Sy referenced space written to this dataset since the specified snapshot. This is the space that is referenced by this dataset but was not referenced by the specified snapshot. .Pp The .Ar snapshot may be specified as a short snapshot name (just the part after the .Sy @ ) , in which case it will be interpreted as a snapshot in the same filesystem as this dataset. The .Ar snapshot may be a full snapshot name .Pq Em filesystem@snapshot , which for clones may be a snapshot in the origin's filesystem (or the origin of the origin's filesystem, etc). .El .Pp The following native properties can be used to change the behavior of a .Tn ZFS dataset. .Bl -tag -width 2n .It Xo .Sy aclinherit Ns = Ns Cm discard | .Cm noallow | .Cm restricted | .Cm passthrough | .Cm passthrough-x .Xc Controls how .Tn ACL entries are inherited when files and directories are created. A file system with an .Sy aclinherit property of .Cm discard does not inherit any .Tn ACL entries. A file system with an .Sy aclinherit property value of .Cm noallow only inherits inheritable .Tn ACL entries that specify "deny" permissions. The property value .Cm restricted (the default) removes the .Em write_acl and .Em write_owner permissions when the .Tn ACL entry is inherited. A file system with an .Sy aclinherit property value of .Cm passthrough inherits all inheritable .Tn ACL entries without any modifications made to the .Tn ACL entries when they are inherited. A file system with an .Sy aclinherit property value of .Cm passthrough-x has the same meaning as .Cm passthrough , except that the .Em owner@ , group@ , No and Em everyone@ Tn ACE Ns s inherit the execute permission only if the file creation mode also requests the execute bit. .Pp When the property value is set to .Cm passthrough , files are created with a mode determined by the inheritable .Tn ACE Ns s. If no inheritable .Tn ACE Ns s exist that affect the mode, then the mode is set in accordance to the requested mode from the application. .It Sy aclmode Ns = Ns Cm discard | groupmask | passthrough Controls how an .Tn ACL is modified during .Xr chmod 2 . A file system with an .Sy aclmode property of .Cm discard (the default) deletes all .Tn ACL entries that do not represent the mode of the file. An .Sy aclmode property of .Cm groupmask reduces permissions granted in all .Em ALLOW entries found in the .Tn ACL such that they are no greater than the group permissions specified by .Xr chmod 2 . A file system with an .Sy aclmode property of .Cm passthrough indicates that no changes are made to the .Tn ACL other than creating or updating the necessary .Tn ACL entries to represent the new mode of the file or directory. .It Sy atime Ns = Ns Cm on | off Controls whether the access time for files is updated when they are read. Turning this property off avoids producing write traffic when reading files and can result in significant performance gains, though it might confuse mailers and other similar utilities. The default value is .Cm on . .It Sy canmount Ns = Ns Cm on | off | noauto If this property is set to .Cm off , the file system cannot be mounted, and is ignored by .Qq Nm Cm mount Fl a . Setting this property to .Cm off is similar to setting the .Sy mountpoint property to .Cm none , except that the dataset still has a normal .Sy mountpoint property, which can be inherited. Setting this property to .Cm off allows datasets to be used solely as a mechanism to inherit properties. One example of setting .Sy canmount Ns = Ns Cm off is to have two datasets with the same .Sy mountpoint , so that the children of both datasets appear in the same directory, but might have different inherited characteristics. .Pp When the .Cm noauto value is set, a dataset can only be mounted and unmounted explicitly. The dataset is not mounted automatically when the dataset is created or imported, nor is it mounted by the .Qq Nm Cm mount Fl a command or unmounted by the .Qq Nm Cm umount Fl a command. .Pp This property is not inherited. .It Sy checksum Ns = Ns Cm on | off | fletcher2 | fletcher4 Controls the checksum used to verify data integrity. The default value is .Cm on , which automatically selects an appropriate algorithm (currently, .Cm fletcher4 , but this may change in future releases). The value .Cm off disables integrity checking on user data. Disabling checksums is .Em NOT a recommended practice. .It Sy compression Ns = Ns Cm on | off | lzjb | gzip | gzip- Ns Ar N | Cm zle Controls the compression algorithm used for this dataset. The .Cm lzjb compression algorithm is optimized for performance while providing decent data compression. Setting compression to .Cm on uses the .Cm lzjb compression algorithm. The .Cm gzip compression algorithm uses the same compression as the .Xr gzip 1 command. You can specify the .Cm gzip level by using the value .Cm gzip- Ns Ar N where .Ar N is an integer from 1 (fastest) to 9 (best compression ratio). Currently, .Cm gzip is equivalent to .Cm gzip-6 (which is also the default for .Xr gzip 1 ) . The .Cm zle compression algorithm compresses runs of zeros. .Pp This property can also be referred to by its shortened column name .Cm compress . Changing this property affects only newly-written data. .It Sy copies Ns = Ns Cm 1 | 2 | 3 Controls the number of copies of data stored for this dataset. These copies are in addition to any redundancy provided by the pool, for example, mirroring or RAID-Z. The copies are stored on different disks, if possible. The space used by multiple copies is charged to the associated file and dataset, changing the .Sy used property and counting against quotas and reservations. .Pp Changing this property only affects newly-written data. Therefore, set this property at file system creation time by using the .Fl o Cm copies= Ns Ar N option. .It Sy dedup Ns = Ns Cm on | off | verify | sha256 Ns Op Cm ,verify Configures deduplication for a dataset. The default value is .Cm off . The default deduplication checksum is .Cm sha256 (this may change in the future). When .Sy dedup is enabled, the checksum defined here overrides the .Sy checksum property. Setting the value to .Cm verify has the same effect as the setting .Cm sha256,verify . .Pp If set to .Cm verify , .Tn ZFS will do a byte-to-byte comparsion in case of two blocks having the same signature to make sure the block contents are identical. .It Sy devices Ns = Ns Cm on | off The .Sy devices property is currently not supported on .Fx . .It Sy exec Ns = Ns Cm on | off Controls whether processes can be executed from within this file system. The default value is .Cm on . .It Sy mlslabel Ns = Ns Ar label | Cm none The .Sy mlslabel property is currently not supported on .Fx . .It Sy mountpoint Ns = Ns Ar path | Cm none | legacy Controls the mount point used for this file system. See the .Qq Sx Mount Points section for more information on how this property is used. .Pp When the .Sy mountpoint property is changed for a file system, the file system and any children that inherit the mount point are unmounted. If the new value is .Cm legacy , then they remain unmounted. Otherwise, they are automatically remounted in the new location if the property was previously .Cm legacy or .Cm none , or if they were mounted before the property was changed. In addition, any shared file systems are unshared and shared in the new location. .It Sy nbmand Ns = Ns Cm on | off The .Sy nbmand property is currently not supported on .Fx . .It Sy primarycache Ns = Ns Cm all | none | metadata Controls what is cached in the primary cache (ARC). If this property is set to .Cm all , then both user data and metadata is cached. If this property is set to .Cm none , then neither user data nor metadata is cached. If this property is set to .Cm metadata , then only metadata is cached. The default value is .Cm all . .It Sy quota Ns = Ns Ar size | Cm none Limits the amount of space a dataset and its descendents can consume. This property enforces a hard limit on the amount of space used. This includes all space consumed by descendents, including file systems and snapshots. Setting a quota on a descendent of a dataset that already has a quota does not override the ancestor's quota, but rather imposes an additional limit. .Pp Quotas cannot be set on volumes, as the .Sy volsize property acts as an implicit quota. .It Sy userquota@ Ns Ar user Ns = Ns Ar size | Cm none Limits the amount of space consumed by the specified user. Similar to the .Sy refquota property, the .Sy userquota space calculation does not include space that is used by descendent datasets, such as snapshots and clones. User space consumption is identified by the .Sy userspace@ Ns Ar user property. .Pp Enforcement of user quotas may be delayed by several seconds. This delay means that a user might exceed their quota before the system notices that they are over quota and begins to refuse additional writes with the .Em EDQUOT error message. See the .Cm userspace subcommand for more information. .Pp Unprivileged users can only access their own groups' space usage. The root user, or a user who has been granted the .Sy userquota privilege with .Qq Nm Cm allow , can get and set everyone's quota. .Pp This property is not available on volumes, on file systems before version 4, or on pools before version 15. The .Sy userquota@ Ns ... properties are not displayed by .Qq Nm Cm get all . The user's name must be appended after the .Sy @ symbol, using one of the following forms: .Bl -bullet -offset 2n .It POSIX name (for example, .Em joe ) .It POSIX numeric ID (for example, .Em 1001 ) .El .It Sy groupquota@ Ns Ar group Ns = Ns Ar size | Cm none Limits the amount of space consumed by the specified group. Group space consumption is identified by the .Sy userquota@ Ns Ar user property. .Pp Unprivileged users can access only their own groups' space usage. The root user, or a user who has been granted the .Sy groupquota privilege with .Qq Nm Cm allow , can get and set all groups' quotas. .It Sy readonly Ns = Ns Cm on | off Controls whether this dataset can be modified. The default value is .Cm off . .It Sy recordsize Ns = Ns Ar size Specifies a suggested block size for files in the file system. This property is designed solely for use with database workloads that access files in fixed-size records. .Tn ZFS automatically tunes block sizes according to internal algorithms optimized for typical access patterns. .Pp For databases that create very large files but access them in small random chunks, these algorithms may be suboptimal. Specifying a .Sy recordsize greater than or equal to the record size of the database can result in significant performance gains. Use of this property for general purpose file systems is strongly discouraged, and may adversely affect performance. .Pp The size specified must be a power of two greater than or equal to 512 and less than or equal to 128 Kbytes. .Pp Changing the file system's .Sy recordsize affects only files created afterward; existing files are unaffected. .Pp This property can also be referred to by its shortened column name, .Sy recsize . .It Sy refquota Ns = Ns Ar size | Cm none Limits the amount of space a dataset can consume. This property enforces a hard limit on the amount of space used. This hard limit does not include space used by descendents, including file systems and snapshots. .It Sy refreservation Ns = Ns Ar size | Cm none The minimum amount of space guaranteed to a dataset, not including its descendents. When the amount of space used is below this value, the dataset is treated as if it were taking up the amount of space specified by .Sy refreservation . The .Sy refreservation reservation is accounted for in the parent datasets' space used, and counts against the parent datasets' quotas and reservations. .Pp If .Sy refreservation is set, a snapshot is only allowed if there is enough free pool space outside of this reservation to accommodate the current number of "referenced" bytes in the dataset. .Pp This property can also be referred to by its shortened column name, .Sy refreserv . .It Sy reservation Ns = Ns Ar size | Cm none The minimum amount of space guaranteed to a dataset and its descendents. When the amount of space used is below this value, the dataset is treated as if it were taking up the amount of space specified by its reservation. Reservations are accounted for in the parent datasets' space used, and count against the parent datasets' quotas and reservations. .Pp This property can also be referred to by its shortened column name, .Sy reserv . .It Sy secondarycache Ns = Ns Cm all | none | metadata Controls what is cached in the secondary cache (L2ARC). If this property is set to .Cm all , then both user data and metadata is cached. If this property is set to .Cm none , then neither user data nor metadata is cached. If this property is set to .Cm metadata , then only metadata is cached. The default value is .Cm all . .It Sy setuid Ns = Ns Cm on | off Controls whether the .No set- Ns Tn UID bit is respected for the file system. The default value is .Cm on . .It Sy sharesmb Ns = Ns Cm on | off | Ar opts The .Sy sharesmb property has currently no effect o .Fx . .It Sy sharenfs Ns = Ns Cm on | off | Ar opts Controls whether the file system is shared via .Tn NFS , and what options are used. A file system with a .Sy sharenfs property of .Cm off is managed the traditional way via .Xr exports 5 . Otherwise, the file system is automatically shared and unshared with the .Qq Nm Cm share and .Qq Nm Cm unshare commands. If the property is set to .Cm on no .Tn NFS export options are used. Otherwise, .Tn NFS export options are equivalent to the contents of this property. The export options may be comma-separated. See .Xr exports 5 for a list of valid options. .Pp When the .Sy sharenfs property is changed for a dataset, the .Xr mountd 8 dameon is reloaded. .It Sy logbias Ns = Ns Cm latency | throughput Provide a hint to .Tn ZFS about handling of synchronous requests in this dataset. If .Sy logbias is set to .Cm latency (the default), .Tn ZFS will use pool log devices (if configured) to handle the requests at low latency. If .Sy logbias is set to .Cm throughput , .Tn ZFS will not use configured pool log devices. .Tn ZFS will instead optimize synchronous operations for global pool throughput and efficient use of resources. .It Sy snapdir Ns = Ns Cm hidden | visible Controls whether the .Pa \&.zfs directory is hidden or visible in the root of the file system as discussed in the .Qq Sx Snapshots section. The default value is .Cm hidden . .It Sy sync Ns = Ns Cm standard | always | disabled Controls the behavior of synchronous requests (e.g. .Xr fsync 2 , O_DSYNC). This property accepts the following values: .Bl -tag -offset 4n -width 8n .It Sy standard This is the POSIX specified behavior of ensuring all synchronous requests are written to stable storage and all devices are flushed to ensure data is not cached by device controllers (this is the default). .It Sy always All file system transactions are written and flushed before their system calls return. This has a large performance penalty. .It Sy disabled Disables synchronous requests. File system transactions are only committed to stable storage periodically. This option will give the highest performance. However, it is very dangerous as .Tn ZFS would be ignoring the synchronous transaction demands of applications such as databases or .Tn NFS . Administrators should only use this option when the risks are understood. .El .It Sy volsize Ns = Ns Ar size For volumes, specifies the logical size of the volume. By default, creating a volume establishes a reservation of equal size. For storage pools with a version number of 9 or higher, a .Sy refreservation is set instead. Any changes to .Sy volsize are reflected in an equivalent change to the reservation (or .Sy refreservation ) . The .Sy volsize can only be set to a multiple of .Cm volblocksize , and cannot be zero. .Pp The reservation is kept equal to the volume's logical size to prevent unexpected behavior for consumers. Without the reservation, the volume could run out of space, resulting in undefined behavior or data corruption, depending on how the volume is used. These effects can also occur when the volume size is changed while it is in use (particularly when shrinking the size). Extreme care should be used when adjusting the volume size. .Pp Though not recommended, a "sparse volume" (also known as "thin provisioning") can be created by specifying the .Fl s option to the .Qq Nm Cm create Fl V command, or by changing the reservation after the volume has been created. A "sparse volume" is a volume where the reservation is less then the volume size. Consequently, writes to a sparse volume can fail with .Sy ENOSPC when the pool is low on space. For a sparse volume, changes to .Sy volsize are not reflected in the reservation. .It Sy vscan Ns = Ns Cm off | on The .Sy vscan property is currently not supported on .Fx . .It Sy xattr Ns = Ns Cm off | on The .Sy xattr property is currently not supported on .Fx . .It Sy jailed Ns = Ns Cm off | on Controls whether the dataset is managed from a jail. See the .Qq Sx Jails section for more information. The default value is .Cm off . .El .Pp The following three properties cannot be changed after the file system is created, and therefore, should be set when the file system is created. If the properties are not set with the .Qq Nm Cm create or .Nm zpool Cm create commands, these properties are inherited from the parent dataset. If the parent dataset lacks these properties due to having been created prior to these features being supported, the new file system will have the default values for these properties. .Bl -tag -width 4n .It Sy casesensitivity Ns = Ns Cm sensitive | insensitive | mixed The .Sy casesensitivity property is currently not supported on .Fx . .It Sy normalization Ns = Ns Cm none | formC | formD | formKC | formKD Indicates whether the file system should perform a .Sy unicode normalization of file names whenever two file names are compared, and which normalization algorithm should be used. File names are always stored unmodified, names are normalized as part of any comparison process. If this property is set to a legal value other than .Cm none , and the .Sy utf8only property was left unspecified, the .Sy utf8only property is automatically set to .Cm on . The default value of the .Sy normalization property is .Cm none . This property cannot be changed after the file system is created. .It Sy utf8only Ns = Ns Cm on | off Indicates whether the file system should reject file names that include characters that are not present in the .Sy UTF-8 character code set. If this property is explicitly set to .Cm off , the normalization property must either not be explicitly set or be set to .Cm none . The default value for the .Sy utf8only property is .Cm off . This property cannot be changed after the file system is created. .El .Pp The .Sy casesensitivity , normalization , No and Sy utf8only properties are also new permissions that can be assigned to non-privileged users by using the .Tn ZFS delegated administration feature. .Ss Temporary Mount Point Properties When a file system is mounted, either through .Xr mount 8 for legacy mounts or the .Qq Nm Cm mount command for normal file systems, its mount options are set according to its properties. The correlation between properties and mount options is as follows: .Bl -column -offset 4n "PROPERTY" "MOUNT OPTION" .It "PROPERTY MOUNT OPTION" .It "atime atime/noatime" .It "exec exec/noexec" .It "readonly ro/rw" .It "setuid suid/nosuid" .El .Pp In addition, these options can be set on a per-mount basis using the .Fl o option, without affecting the property that is stored on disk. The values specified on the command line override the values stored in the dataset. These properties are reported as "temporary" by the .Qq Nm Cm get command. If the properties are changed while the dataset is mounted, the new setting overrides any temporary settings. .Ss User Properties In addition to the standard native properties, .Tn ZFS supports arbitrary user properties. User properties have no effect on .Tn ZFS behavior, but applications or administrators can use them to annotate datasets (file systems, volumes, and snapshots). .Pp User property names must contain a colon .Pq Sy \&: character to distinguish them from native properties. They may contain lowercase letters, numbers, and the following punctuation characters: colon .Pq Sy \&: , dash .Pq Sy \&- , period .Pq Sy \&. and underscore .Pq Sy \&_ . The expected convention is that the property name is divided into two portions such as .Em module Ns Sy \&: Ns Em property , but this namespace is not enforced by .Tn ZFS . User property names can be at most 256 characters, and cannot begin with a dash .Pq Sy \&- . .Pp When making programmatic use of user properties, it is strongly suggested to use a reversed .Tn DNS domain name for the .Ar module component of property names to reduce the chance that two independently-developed packages use the same property name for different purposes. Property names beginning with .Em com.sun are reserved for use by Sun Microsystems. .Pp The values of user properties are arbitrary strings, are always inherited, and are never validated. All of the commands that operate on properties .Po .Qq Nm Cm list , .Qq Nm Cm get , .Qq Nm Cm set and so forth .Pc can be used to manipulate both native properties and user properties. Use the .Qq Nm Cm inherit command to clear a user property. If the property is not defined in any parent dataset, it is removed entirely. Property values are limited to 1024 characters. .Sh SUBCOMMANDS All subcommands that modify state are logged persistently to the pool in their original form. .Bl -tag -width 2n .It Xo .Nm .Op Fl \&? .Xc .Pp Displays a help message. .It Xo .Nm .Cm create .Op Fl pu .Op Fl o Ar property Ns = Ns Ar value .Ar ... filesystem .Xc .Pp Creates a new .Tn ZFS file system. The file system is automatically mounted according to the .Sy mountpoint property inherited from the parent. .Bl -tag -width indent .It Fl p Creates all the non-existing parent datasets. Datasets created in this manner are automatically mounted according to the .Sy mountpoint property inherited from their parent. Any property specified on the command line using the .Fl o option is ignored. If the target filesystem already exists, the operation completes successfully. .It Fl u Newly created file system is not mounted. .It Fl o Ar property Ns = Ns Ar value Sets the specified property as if the command .Qq Nm Cm set Ar property Ns = Ns Ar value was invoked at the same time the dataset was created. Any editable .Tn ZFS property can also be set at creation time. Multiple .Fl o options can be specified. An error results if the same property is specified in multiple .Fl o options. .El .It Xo .Nm .Cm create .Op Fl ps .Op Fl b Ar blocksize .Op Fl o Ar property Ns = Ns Ar value .Ar ... .Fl V .Ar size volume .Xc .Pp Creates a volume of the given size. The volume is exported as a block device in .Pa /dev/zvol/path , where .Ar path is the name of the volume in the .Tn ZFS namespace. The size represents the logical size as exported by the device. By default, a reservation of equal size is created. .Pp .Ar size is automatically rounded up to the nearest 128 Kbytes to ensure that the volume has an integral number of blocks regardless of .Ar blocksize . .Bl -tag -width indent .It Fl p Creates all the non-existing parent datasets. Datasets created in this manner are automatically mounted according to the .Sy mountpoint property inherited from their parent. Any property specified on the command line using the .Fl o option is ignored. If the target filesystem already exists, the operation completes successfully. .It Fl s Creates a sparse volume with no reservation. See .Sy volsize in the .Qq Sx Native Properties section for more information about sparse volumes. .It Fl b Ar blocksize Equivalent to .Fl o Cm volblocksize Ns = Ns Ar blocksize . If this option is specified in conjunction with .Fl o Cm volblocksize , the resulting behavior is undefined. .It Fl o Ar property Ns = Ns Ar value Sets the specified property as if the .Qq Nm Cm set Ar property Ns = Ns Ar value command was invoked at the same time the dataset was created. Any editable .Tn ZFS property can also be set at creation time. Multiple .Fl o options can be specified. An error results if the same property is specified in multiple .Fl o options. .El .It Xo .Nm .Cm destroy .Op Fl fnpRrv .Ar filesystem Ns | Ns Ar volume .Xc .Pp Destroys the given dataset. By default, the command unshares any file systems that are currently shared, unmounts any file systems that are currently mounted, and refuses to destroy a dataset that has active dependents (children or clones). .Bl -tag -width indent .It Fl r Recursively destroy all children. .It Fl R Recursively destroy all dependents, including cloned file systems outside the target hierarchy. .It Fl f Force an unmount of any file systems using the .Qq Nm Cm unmount Fl f command. This option has no effect on non-file systems or unmounted file systems. .It Fl n Do a dry-run ("No-op") deletion. No data will be deleted. This is useful in conjunction with the .Fl v or .Fl p flags to determine what data would be deleted. .It Fl p Print machine-parsable verbose information about the deleted data. .It Fl v Print verbose information about the deleted data. .El .Pp Extreme care should be taken when applying either the .Fl r or the .Fl R options, as they can destroy large portions of a pool and cause unexpected behavior for mounted file systems in use. .It Xo .Nm .Cm destroy .Op Fl dnpRrv .Sm off .Ar snapshot .Ns Op % Ns Ar snapname .Ns Op , Ns Ar ... .Sm on .Xc .Pp The given snapshots are destroyed immediately if and only if the .Qq Nm Cm destroy command without the .Fl d option would have destroyed it. Such immediate destruction would occur, for example, if the snapshot had no clones and the user-initiated reference count were zero. .Pp If a snapshot does not qualify for immediate destruction, it is marked for deferred deletion. In this state, it exists as a usable, visible snapshot until both of the preconditions listed above are met, at which point it is destroyed. .Pp An inclusive range of snapshots may be specified by separating the first and last snapshots with a percent sign .Pq Sy % . The first and/or last snapshots may be left blank, in which case the filesystem's oldest or newest snapshot will be implied. .Pp Multiple snapshots (or ranges of snapshots) of the same filesystem or volume may be specified in a comma-separated list of snapshots. Only the snapshot's short name (the part after the .Sy @ ) should be specified when using a range or comma-separated list to identify multiple snapshots. .Bl -tag -width indent .It Fl r Destroy (or mark for deferred deletion) all snapshots with this name in descendent file systems. .It Fl R Recursively destroy all dependents. .It Fl n Do a dry-run ("No-op") deletion. No data will be deleted. This is useful in conjunction with the .Fl v or .Fl p flags to determine what data would be deleted. .It Fl p Print machine-parsable verbose information about the deleted data. .It Fl v Print verbose information about the deleted data. .It Fl d Defer snapshot deletion. .El .Pp Extreme care should be taken when applying either the .Fl r or the .Fl R options, as they can destroy large portions of a pool and cause unexpected behavior for mounted file systems in use. .It Xo .Nm .Cm snapshot .Op Fl r .Op Fl o Ar property Ns = Ns Ar value .Ar ... .Ar filesystem@snapname Ns | Ns volume@snapname .Xc .Pp Creates a snapshot with the given name. All previous modifications by successful system calls to the file system are part of the snapshot. See the .Qq Sx Snapshots section for details. .Bl -tag -width indent .It Fl r Recursively create snapshots of all descendent datasets. Snapshots are taken atomically, so that all recursive snapshots correspond to the same moment in time. .It Fl o Ar property Ns = Ns Ar value Sets the specified property; see .Qq Nm Cm create for details. .El .It Xo .Nm .Cm rollback .Op Fl rRf .Ar snapshot .Xc .Pp Roll back the given dataset to a previous snapshot. When a dataset is rolled back, all data that has changed since the snapshot is discarded, and the dataset reverts to the state at the time of the snapshot. By default, the command refuses to roll back to a snapshot other than the most recent one. In order to do so, all intermediate snapshots must be destroyed by specifying the .Fl r option. .Bl -tag -width indent .It Fl r Recursively destroy any snapshots more recent than the one specified. .It Fl R Recursively destroy any more recent snapshots, as well as any clones of those snapshots. .It Fl f Used with the .Fl R option to force an unmount of any clone file systems that are to be destroyed. .El .It Xo .Nm .Cm clone .Op Fl p .Op Fl o Ar property Ns = Ns Ar value .Ar ... snapshot filesystem Ns | Ns Ar volume .Xc .Pp Creates a clone of the given snapshot. See the .Qq Sx Clones section for details. The target dataset can be located anywhere in the .Tn ZFS hierarchy, and is created as the same type as the original. .Bl -tag -width indent .It Fl p Creates all the non-existing parent datasets. Datasets created in this manner are automatically mounted according to the .Sy mountpoint property inherited from their parent. If the target filesystem or volume already exists, the operation completes successfully. .It Fl o Ar property Ns = Ns Ar value Sets the specified property; see .Qq Nm Cm create for details. .El .It Xo .Nm .Cm promote .Ar clone-filesystem .Xc .Pp Promotes a clone file system to no longer be dependent on its "origin" snapshot. This makes it possible to destroy the file system that the clone was created from. The clone parent-child dependency relationship is reversed, so that the origin file system becomes a clone of the specified file system. .Pp The snapshot that was cloned, and any snapshots previous to this snapshot, are now owned by the promoted clone. The space they use moves from the origin file system to the promoted clone, so enough space must be available to accommodate these snapshots. No new space is consumed by this operation, but the space accounting is adjusted. The promoted clone must not have any conflicting snapshot names of its own. The .Cm rename subcommand can be used to rename any conflicting snapshots. .It Xo .Nm .Cm rename +.Op Fl f .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Xc .It Xo .Nm .Cm rename +.Op Fl f .Fl p .Ar filesystem Ns | Ns Ar volume .Ar filesystem Ns | Ns Ar volume .Xc .It Xo .Nm .Cm rename .Fl u .Op Fl p .Ar filesystem filesystem .Xc .Pp Renames the given dataset. The new target can be located anywhere in the .Tn ZFS hierarchy, with the exception of snapshots. Snapshots can only be renamed within the parent file system or volume. When renaming a snapshot, the parent file system of the snapshot does not need to be specified as part of the second argument. Renamed file systems can inherit new mount points, in which case they are unmounted and remounted at the new mount point. .Bl -tag -width indent .It Fl p Creates all the nonexistent parent datasets. Datasets created in this manner are automatically mounted according to the .Sy mountpoint property inherited from their parent. .It Fl u Do not remount file systems during rename. If a file system's .Sy mountpoint property is set to .Cm legacy or .Cm none , file system is not unmounted even if this option is not given. +.It Fl f +Force unmount any filesystems that need to be unmounted in the process. +This flag has no effect if used together with the +.Fl u +flag. .El .It Xo .Nm .Cm rename .Fl r .Ar snapshot snapshot .Xc .Pp Recursively rename the snapshots of all descendent datasets. Snapshots are the only dataset that can be renamed recursively. .It Xo .Nm .Cm list .Op Fl r Ns | Ns Fl d Ar depth .Op Fl H .Op Fl o Ar property Ns Op , Ns Ar ... .Op Fl t Ar type Ns Op , Ns Ar ... .Op Fl s Ar property .Ar ... .Op Fl S Ar property .Ar ... .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Xc .Pp Lists the property information for the given datasets in tabular form. If specified, you can list property information by the absolute pathname or the relative pathname. By default, all file systems and volumes are displayed. Snapshots are displayed if the .Sy listsnaps property is .Cm on (the default is .Cm off ) . The following fields are displayed, .Sy name , used , available , referenced , mountpoint . .Bl -tag -width indent .It Fl r Recursively display any children of the dataset on the command line. .It Fl d Ar depth Recursively display any children of the dataset, limiting the recursion to .Ar depth . A depth of .Sy 1 will display only the dataset and its direct children. .It Fl H Used for scripting mode. Do not print headers and separate fields by a single tab instead of arbitrary white space. .It Fl o Ar property Ns Op , Ns Ar ... A comma-separated list of properties to display. The property must be: .Bl -bullet -offset 2n .It One of the properties described in the .Qq Sx Native Properties section .It A user property .It The value .Cm name to display the dataset name .It The value .Cm space to display space usage properties on file systems and volumes. This is a shortcut for specifying .Fl o .Sy name,avail,used,usedsnap,usedds,usedrefreserv,usedchild .Fl t .Sy filesystem,volume syntax. .El .It Fl t Ar type Ns Op , Ns Ar ... A comma-separated list of types to display, where .Ar type is one of .Sy filesystem , snapshot , volume , No or Sy all . For example, specifying .Fl t Cm snapshot displays only snapshots. .It Fl s Ar property A property for sorting the output by column in ascending order based on the value of the property. The property must be one of the properties described in the .Qq Sx Properties section, or the special value .Cm name to sort by the dataset name. Multiple properties can be specified at one time using multiple .Fl s property options. Multiple .Fl s options are evaluated from left to right in decreasing order of importance. .Pp The following is a list of sorting criteria: .Bl -bullet -offset 2n .It Numeric types sort in numeric order. .It String types sort in alphabetical order. .It Types inappropriate for a row sort that row to the literal bottom, regardless of the specified ordering. .It If no sorting options are specified the existing behavior of .Qq Nm Cm list is preserved. .El .It Fl S Ar property Same as the .Fl s option, but sorts by property in descending order. .El .It Xo .Nm .Cm set .Ar property Ns = Ns Ar value .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Xc .Pp Sets the property to the given value for each dataset. Only some properties can be edited. See the "Properties" section for more information on what properties can be set and acceptable values. Numeric values can be specified as exact values, or in a human-readable form with a suffix of .Sy B , K , M , G , T , P , E , Z (for bytes, kilobytes, megabytes, gigabytes, terabytes, petabytes, exabytes, or zettabytes, respectively). User properties can be set on snapshots. For more information, see the .Qq Sx User Properties section. .It Xo .Nm .Cm get .Op Fl r Ns | Ns Fl d Ar depth .Op Fl Hp .Op Fl o Ar all | field Ns Op , Ns Ar ... .Op Fl t Ar type Ns Op , Ns Ar ... .Op Fl s Ar source Ns Op , Ns Ar ... .Ar all | property Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Xc .Pp Displays properties for the given datasets. If no datasets are specified, then the command displays properties for all datasets on the system. For each property, the following columns are displayed: .Pp .Bl -hang -width "property" -offset indent -compact .It name Dataset name .It property Property name .It value Property value .It source Property source. Can either be local, default, temporary, inherited, or none (\&-). .El .Pp All columns except the .Sy RECEIVED column are displayed by default. The columns to display can be specified by using the .Fl o option. This command takes a comma-separated list of properties as described in the .Qq Sx Native Properties and .Qq Sx User Properties sections. .Pp The special value .Cm all can be used to display all properties that apply to the given dataset's type (filesystem, volume, or snapshot). .Bl -tag -width indent .It Fl r Recursively display properties for any children. .It Fl d Ar depth Recursively display any children of the dataset, limiting the recursion to .Ar depth . A depth of .Sy 1 will display only the dataset and its direct children. .It Fl H Display output in a form more easily parsed by scripts. Any headers are omitted, and fields are explicitly separated by a single tab instead of an arbitrary amount of space. .It Fl p Display numbers in parseable (exact) values. .It Fl o Cm all | Ar field Ns Op , Ns Ar ... A comma-separated list of columns to display. Supported values are .Sy name,property,value,received,source . Default values are .Sy name,property,value,source . The keyword .Cm all specifies all columns. .It Fl t Ar type Ns Op , Ns Ar ... A comma-separated list of types to display, where .Ar type is one of .Sy filesystem , snapshot , volume , No or Sy all . For example, specifying .Fl t Cm snapshot displays only snapshots. .It Fl s Ar source Ns Op , Ns Ar ... A comma-separated list of sources to display. Those properties coming from a source other than those in this list are ignored. Each source must be one of the following: .Sy local,default,inherited,temporary,received,none . The default value is all sources. .El .It Xo .Nm .Cm inherit .Op Fl rS .Ar property .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Xc .Pp Clears the specified property, causing it to be inherited from an ancestor. If no ancestor has the property set, then the default value is used. See the .Qq Sx Properties section for a listing of default values, and details on which properties can be inherited. .Bl -tag -width indent .It Fl r Recursively inherit the given property for all children. .It Fl S For properties with a received value, revert to this value. This flag has no effect on properties that do not have a received value. .El .It Xo .Nm .Cm upgrade .Op Fl v .Xc .Pp Displays a list of file systems that are not the most recent version. .Bl -tag -width indent .It Fl v Displays .Tn ZFS filesystem versions supported by the current software. The current .Tn ZFS filesystem version and all previous supported versions are displayed, along with an explanation of the features provided with each version. .El .It Xo .Nm .Cm upgrade .Op Fl r .Op Fl V Ar version .Fl a | Ar filesystem .Xc .Pp Upgrades file systems to a new on-disk version. Once this is done, the file systems will no longer be accessible on systems running older versions of the software. .Qq Nm Cm send streams generated from new snapshots of these file systems cannot be accessed on systems running older versions of the software. .Pp In general, the file system version is independent of the pool version. See .Xr zpool 8 for information on the .Nm zpool Cm upgrade command. .Pp In some cases, the file system version and the pool version are interrelated and the pool version must be upgraded before the file system version can be upgraded. .Bl -tag -width indent .It Fl r Upgrade the specified file system and all descendent file systems. .It Fl V Ar version Upgrade to the specified .Ar version . If the .Fl V flag is not specified, this command upgrades to the most recent version. This option can only be used to increase the version number, and only up to the most recent version supported by this software. .It Fl a Upgrade all file systems on all imported pools. .It Ar filesystem Upgrade the specified file system. .El .It Xo .Nm .Cm userspace .Op Fl niHp .Op Fl o Ar field Ns Op , Ns Ar ... .Op Fl sS Ar field .Ar ... .Op Fl t Ar type Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar snapshot .Xc .Pp Displays space consumed by, and quotas on, each user in the specified filesystem or snapshot. This corresponds to the .Sy userused@ Ns Ar user and .Sy userquota@ Ns Ar user properties. .Bl -tag -width indent .It Fl n Print numeric ID instead of user/group name. .It Fl H Do not print headers, use tab-delimited output. .It Fl p Use exact (parseable) numeric output. .It Fl o Ar field Ns Op , Ns Ar ... Display only the specified fields from the following set, .Sy type,name,used,quota . The default is to display all fields. .It Fl s Ar field Sort output by this field. The .Fl s and .Fl S flags may be specified multiple times to sort first by one field, then by another. The default is .Fl s Cm type Fl s Cm name . .It Fl S Ar field Sort by this field in reverse order. See .Fl s . .It Fl t Ar type Ns Op , Ns Ar ... Print only the specified types from the following set, .Sy all,posixuser,smbuser,posixgroup,smbgroup . .Pp The default is .Fl t Cm posixuser,smbuser . .Pp The default can be changed to include group types. .It Fl i Translate SID to POSIX ID. This flag has currently no effect on .Fx . .El .It Xo .Nm .Cm groupspace .Op Fl niHp .Op Fl o Ar field Ns Op , Ns Ar ... .Op Fl sS Ar field .Ar ... .Op Fl t Ar type Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar snapshot .Xc .Pp Displays space consumed by, and quotas on, each group in the specified filesystem or snapshot. This subcommand is identical to .Qq Nm Cm userspace , except that the default types to display are .Fl t Sy posixgroup,smbgroup . .It Xo .Nm .Cm mount .Xc .Pp Displays all .Tn ZFS file systems currently mounted. .Bl -tag -width indent .It Fl f .El .It Xo .Nm .Cm mount .Op Fl vO .Op Fl o Ar property Ns Op , Ns Ar ... .Fl a | Ar filesystem .Xc .Pp Mounts .Tn ZFS file systems. .Bl -tag -width indent .It Fl v Report mount progress. .It Fl O Perform an overlay mount. Overlay mounts are not supported on .Fx . .It Fl o Ar property Ns Op , Ns Ar ... An optional, comma-separated list of mount options to use temporarily for the duration of the mount. See the .Qq Sx Temporary Mount Point Properties section for details. .It Fl a Mount all available .Tn ZFS file systems. This command may be executed on .Fx system startup by .Pa /etc/rc.d/zfs . For more information, see variable .Va zfs_enable in .Xr rc.conf 5 . .It Ar filesystem Mount the specified filesystem. .El .It Xo .Nm .Cm unmount .Op Fl f .Fl a | Ar filesystem Ns | Ns Ar mountpoint .Xc .Pp Unmounts currently mounted .Tn ZFS file systems. .Bl -tag -width indent .It Fl f Forcefully unmount the file system, even if it is currently in use. .It Fl a Unmount all available .Tn ZFS file systems. .It Ar filesystem | mountpoint Unmount the specified filesystem. The command can also be given a path to a .Tn ZFS file system mount point on the system. .El .It Xo .Nm .Cm share .Fl a | Ar filesystem .Xc .Pp Shares .Tn ZFS file systems that have the .Sy sharenfs property set. .Bl -tag -width indent .It Fl a Share all .Tn ZFS file systems that have the .Sy sharenfs property set. This command may be executed on .Fx system startup by .Pa /etc/rc.d/zfs . For more information, see variable .Va zfs_enable in .Xr rc.conf 5 . .It Ar filesystem Share the specified filesystem according to the .Tn sharenfs property. File systems are shared when the .Tn sharenfs property is set. .El .It Xo .Nm .Cm unshare .Fl a | Ar filesystem Ns | Ns Ar mountpoint .Xc .Pp Unshares .Tn ZFS file systems that have the .Tn sharenfs property set. .Bl -tag -width indent .It Fl a Unshares .Tn ZFS file systems that have the .Sy sharenfs property set. This command may be executed on .Fx system shutdown by .Pa /etc/rc.d/zfs . For more information, see variable .Va zfs_enable in .Xr rc.conf 5 . .It Ar filesystem | mountpoint Unshare the specified filesystem. The command can also be given a path to a .Tn ZFS file system shared on the system. .El .It Xo .Nm .Cm send .Op Fl DnPpRrv .Op Fl i Ar snapshot | Fl I Ar snapshot .Ar snapshot .Xc .Pp Creates a stream representation of the last .Ar snapshot argument (not part of .Fl i or .Fl I ) which is written to standard output. The output can be redirected to a file or to a different system (for example, using .Xr ssh 1 ) . By default, a full stream is generated. .Bl -tag -width indent .It Fl i Ar snapshot Generate an incremental stream from the .Fl i Ar snapshot to the last .Ar snapshot . The incremental source (the .Fl i Ar snapshot ) can be specified as the last component of the snapshot name (for example, the part after the .Sy @ ) , and it is assumed to be from the same file system as the last .Ar snapshot . .Pp If the destination is a clone, the source may be the origin snapshot, which must be fully specified (for example, .Cm pool/fs@origin , not just .Cm @origin ) . .It Fl I Ar snapshot Generate a stream package that sends all intermediary snapshots from the .Fl I Ar snapshot to the last .Ar snapshot . For example, .Ic -I @a fs@d is similar to .Ic -i @a fs@b; -i @b fs@c; -i @c fs@d . The incremental source snapshot may be specified as with the .Fl i option. .It Fl R Generate a replication stream package, which will replicate the specified filesystem, and all descendent file systems, up to the named snapshot. When received, all properties, snapshots, descendent file systems, and clones are preserved. .Pp If the .Fl i or .Fl I flags are used in conjunction with the .Fl R flag, an incremental replication stream is generated. The current values of properties, and current snapshot and file system names are set when the stream is received. If the .Fl F flag is specified when this stream is received, snapshots and file systems that do not exist on the sending side are destroyed. .It Fl D Generate a deduplicated stream. Blocks which would have been sent multiple times in the send stream will only be sent once. The receiving system must also support this feature to receive a deduplicated stream. This flag can be used regardless of the dataset's .Sy dedup property, but performance will be much better if the filesystem uses a dedup-capable checksum (eg. .Sy sha256 ) . .It Fl r Recursively send all descendant snapshots. This is similar to the .Fl R flag, but information about deleted and renamed datasets is not included, and property information is only included if the .Fl p flag is specified. .It Fl p Include the dataset's properties in the stream. This flag is implicit when .Fl R is specified. The receiving system must also support this feature. .It Fl n Do a dry-run ("No-op") send. Do not generate any actual send data. This is useful in conjunction with the .Fl v or .Fl P flags to determine what data will be sent. .It Fl P Print machine-parsable verbose information about the stream package generated. .It Fl v Print verbose information about the stream package generated. +This information includes a per-second report of how much data has been sent. .El .Pp The format of the stream is committed. You will be able to receive your streams on future versions of .Tn ZFS . .It Xo .Nm .Cm receive .Op Fl vnFu .Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot .Xc .It Xo .Nm .Cm receive .Op Fl vnFu .Op Fl d | e .Ar filesystem .Xc .Pp Creates a snapshot whose contents are as specified in the stream provided on standard input. If a full stream is received, then a new file system is created as well. Streams are created using the .Qq Nm Cm send subcommand, which by default creates a full stream. .Qq Nm Cm recv can be used as an alias for .Qq Nm Cm receive . .Pp If an incremental stream is received, then the destination file system must already exist, and its most recent snapshot must match the incremental stream's source. For .Sy zvol Ns s, the destination device link is destroyed and recreated, which means the .Sy zvol cannot be accessed during the .Sy receive operation. .Pp When a snapshot replication package stream that is generated by using the .Qq Nm Cm send Fl R command is received, any snapshots that do not exist on the sending location are destroyed by using the .Qq Nm Cm destroy Fl d command. .Pp The name of the snapshot (and file system, if a full stream is received) that this subcommand creates depends on the argument type and the .Fl d or .Fl e option. .Pp If the argument is a snapshot name, the specified .Ar snapshot is created. If the argument is a file system or volume name, a snapshot with the same name as the sent snapshot is created within the specified .Ar filesystem or .Ar volume . If the .Fl d or .Fl e option is specified, the snapshot name is determined by appending the sent snapshot's name to the specified .Ar filesystem . If the .Fl d option is specified, all but the pool name of the sent snapshot path is appended (for example, .Sy b/c@1 appended from sent snapshot .Sy a/b/c@1 ) , and if the .Fl e option is specified, only the tail of the sent snapshot path is appended (for example, .Sy c@1 appended from sent snapshot .Sy a/b/c@1 ) . In the case of .Fl d , any file systems needed to replicate the path of the sent snapshot are created within the specified file system. .Bl -tag -width indent .It Fl d Use the full sent snapshot path without the first element (without pool name) to determine the name of the new snapshot as described in the paragraph above. .It Fl e Use only the last element of the sent snapshot path to determine the name of the new snapshot as described in the paragraph above. .It Fl u File system that is associated with the received stream is not mounted. .It Fl v Print verbose information about the stream and the time required to perform the receive operation. .It Fl n Do not actually receive the stream. This can be useful in conjunction with the .Fl v option to verify the name the receive operation would use. .It Fl F Force a rollback of the file system to the most recent snapshot before performing the receive operation. If receiving an incremental replication stream (for example, one generated by .Qq Nm Cm send Fl R Fi iI ) , destroy snapshots and file systems that do not exist on the sending side. .El .It Xo .Nm .Cm allow .Ar filesystem Ns | Ns Ar volume .Xc .Pp Displays permissions that have been delegated on the specified filesystem or volume. See the other forms of .Qq Nm Cm allow for more information. .It Xo .Nm .Cm allow .Op Fl ldug .Cm everyone Ns | Ns Ar user Ns | Ns Ar group Ns Op , Ns Ar ... .Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Xc .It Xo .Nm .Cm allow .Op Fl ld .Fl e .Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Xc .Pp Delegates .Tn ZFS administration permission for the file systems to non-privileged users. .Bl -tag -width indent .It Xo .Op Fl ug .Cm everyone Ns | Ns Ar user Ns | Ns Ar group Ns Op , Ns Ar ... .Xc Specifies to whom the permissions are delegated. Multiple entities can be specified as a comma-separated list. If neither of the .Fl ug options are specified, then the argument is interpreted preferentially as the keyword "everyone", then as a user name, and lastly as a group name. To specify a user or group named "everyone", use the .Fl u or .Fl g options. To specify a group with the same name as a user, use the .Fl g option. .It Xo .Op Fl e .Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Xc Specifies that the permissions be delegated to "everyone". Multiple permissions may be specified as a comma-separated list. Permission names are the same as .Tn ZFS subcommand and property names. See the property list below. Property set names, which begin with an at sign .Pq Sy @ , may be specified. See the .Fl s form below for details. .It Xo .Op Fl ld .Ar filesystem Ns | Ns Ar volume .Xc Specifies where the permissions are delegated. If neither of the .Fl ld options are specified, or both are, then the permissions are allowed for the file system or volume, and all of its descendents. If only the .Fl l option is used, then is allowed "locally" only for the specified file system. If only the .Fl d option is used, then is allowed only for the descendent file systems. .El .Pp Permissions are generally the ability to use a .Tn ZFS subcommand or change a .Tn ZFS property. The following permissions are available: .Bl -column -offset 4n "secondarycache" "subcommand" .It NAME Ta TYPE Ta NOTES .It Xo allow Ta subcommand Ta Must also have the permission that is being allowed .Xc .It Xo clone Ta subcommand Ta Must also have the 'create' ability and 'mount' ability in the origin file system .Xc .It create Ta subcommand Ta Must also have the 'mount' ability .It destroy Ta subcommand Ta Must also have the 'mount' ability .It hold Ta subcommand Ta Allows adding a user hold to a snapshot .It mount Ta subcommand Ta Allows mount/umount of Tn ZFS No datasets .It Xo promote Ta subcommand Ta Must also have the 'mount' and 'promote' ability in the origin file system .Xc .It receive Ta subcommand Ta Must also have the 'mount' and 'create' ability .It Xo release Ta subcommand Ta Allows releasing a user hold which might destroy the snapshot .Xc .It Xo rename Ta subcommand Ta Must also have the 'mount' and 'create' ability in the new parent .Xc .It rollback Ta subcommand Ta Must also have the 'mount' ability .It send Ta subcommand .It share Ta subcommand Ta Allows Xo sharing file systems over the .Tn NFS protocol .Xc .It snapshot Ta subcommand Ta Must also have the 'mount' ability .It groupquota Ta other Ta Allows accessing any groupquota@... property .It groupused Ta other Ta Allows reading any groupused@... property .It userprop Ta other Ta Allows changing any user property .It userquota Ta other Ta Allows accessing any userquota@... property .It userused Ta other Ta Allows reading any userused@... property .It Ta .It aclinherit Ta property .It aclmode Ta property .It atime Ta property .It canmount Ta property .It casesensitivity Ta property .It checksum Ta property .It compression Ta property .It copies Ta property .It dedup Ta property .It devices Ta property .It exec Ta property .It logbias Ta property .It jailed Ta property .It mlslabel Ta property .It mountpoint Ta property .It nbmand Ta property .It normalization Ta property .It primarycache Ta property .It quota Ta property .It readonly Ta property .It recordsize Ta property .It refquota Ta property .It refreservation Ta property .It reservation Ta property .It secondarycache Ta property .It setuid Ta property .It sharenfs Ta property .It sharesmb Ta property .It snapdir Ta property .It sync Ta property .It utf8only Ta property .It version Ta property .It volblocksize Ta property .It volsize Ta property .It vscan Ta property .It xattr Ta property .El .It Xo .Nm .Cm allow .Fl c .Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Xc .Pp Sets "create time" permissions. These permissions are granted (locally) to the creator of any newly-created descendent file system. .It Xo .Nm .Cm allow .Fl s .Ar @setname .Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Xc .Pp Defines or adds permissions to a permission set. The set can be used by other .Qq Nm Cm allow commands for the specified file system and its descendents. Sets are evaluated dynamically, so changes to a set are immediately reflected. Permission sets follow the same naming restrictions as ZFS file systems, but the name must begin with an "at sign" .Pq Sy @ , and can be no more than 64 characters long. .It Xo .Nm .Cm unallow .Op Fl rldug .Cm everyone Ns | Ns Ar user Ns | Ns Ar group Ns Op , Ns Ar ... .Op Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Xc .It Xo .Nm .Cm unallow .Op Fl rld .Fl e .Op Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Xc .It Xo .Nm .Cm unallow .Op Fl r .Fl c .Op Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Xc .Pp Removes permissions that were granted with the .Qq Nm Cm allow command. No permissions are explicitly denied, so other permissions granted are still in effect. For example, if the permission is granted by an ancestor. If no permissions are specified, then all permissions for the specified .Ar user , group , No or Ar everyone are removed. Specifying "everyone" (or using the .Fl e option) only removes the permissions that were granted to "everyone", not all permissions for every user and group. See the .Qq Nm Cm allow command for a description of the .Fl ldugec options. .Bl -tag -width indent .It Fl r Recursively remove the permissions from this file system and all descendents. .El .It Xo .Nm .Cm unallow .Op Fl r .Fl s .Ar @setname .Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ... .Ar filesystem Ns | Ns Ar volume .Xc .Pp Removes permissions from a permission set. If no permissions are specified, then all permissions are removed, thus removing the set entirely. .It Xo .Nm .Cm hold .Op Fl r .Ar tag snapshot ... .Xc .Pp Adds a single reference, named with the .Ar tag argument, to the specified snapshot or snapshots. Each snapshot has its own tag namespace, and tags must be unique within that space. .Pp If a hold exists on a snapshot, attempts to destroy that snapshot by using the .Qq Nm Cm destroy command returns .Em EBUSY . .Bl -tag -width indent .It Fl r Specifies that a hold with the given tag is applied recursively to the snapshots of all descendent file systems. .El .It Xo .Nm .Cm holds .Op Fl r .Ar snapshot ... .Xc .Pp Lists all existing user references for the given snapshot or snapshots. .Bl -tag -width indent .It Fl r Lists the holds that are set on the named descendent snapshots, in addition to listing the holds on the named snapshot. .El .It Xo .Nm .Cm release .Op Fl r .Ar tag snapshot ... .Xc .Pp Removes a single reference, named with the .Ar tag argument, from the specified snapshot or snapshots. The tag must already exist for each snapshot. .Bl -tag -width indent .It Fl r Recursively releases a hold with the given tag on the snapshots of all descendent file systems. .El .It Xo .Nm .Cm diff .Op Fl FHt .Ar snapshot .Op Ar snapshot Ns | Ns Ar filesystem .Xc .Pp Describes differences between a snapshot and a successor dataset. The successor dataset can be a later snapshot or the current filesystem. .Pp The changed files are displayed including the change type. The change type is displayed useing a single character. If a file or directory was renamed, the old and the new names are displayed. .Pp The following change types can be displayed: .Pp .Bl -column -offset indent "CHARACTER" "CHANGE TYPE" .It CHARACTER Ta CHANGE TYPE .It \&+ Ta file was added .It \&- Ta file was removed .It \&M Ta file was modified .It \&R Ta file was renamed .El .Bl -tag -width indent .It Fl F Display a single letter for the file type in second to last column. .Pp The following file types can be displayed: .Pp .Bl -column -offset indent "CHARACTER" "FILE TYPE" .It CHARACTER Ta FILE TYPE .It \&F Ta file .It \&/ Ta directory .It \&B Ta block device .It \&@ Ta symbolic link .It \&= Ta socket .It \&> Ta door (not supported on Fx ) .It \&| Ta FIFO (not supported on Fx ) .It \&P Ta event portal (not supported on Fx ) .El .It Fl H Machine-parseable output, fields separated a tab character. .It Fl t Display a change timestamp in the first column. .El .It Xo .Nm .Cm jail .Ar jailid filesystem .Xc .Pp Attaches the specified .Ar filesystem to the jail identified by JID .Ar jailid . From now on this file system tree can be managed from within a jail if the .Sy jailed property has been set. To use this functionality, the jail needs the .Va allow.mount and .Va allow.mount.zfs parameters set to 1 and the .Va enforce_statfs parameter set to a value lower than 2. .Pp See .Xr jail 8 for more information on managing jails and configuring the parameters above. .It Xo .Nm .Cm unjail .Ar jailid filesystem .Xc .Pp Detaches the specified .Ar filesystem from the jail identified by JID .Ar jailid . .El .Sh EXAMPLES .Bl -tag -width 0n .It Sy Example 1 No Creating a Tn ZFS No File System Hierarchy .Pp The following commands create a file system named .Em pool/home and a file system named .Em pool/home/bob . The mount point .Pa /home is set for the parent file system, and is automatically inherited by the child file system. .Bd -literal -offset 2n .Li # Ic zfs create pool/home .Li # Ic zfs set mountpoint=/home pool/home .Li # Ic zfs create pool/home/bob .Ed .It Sy Example 2 No Creating a Tn ZFS No Snapshot .Pp The following command creates a snapshot named .Sy yesterday . This snapshot is mounted on demand in the .Pa \&.zfs/snapshot directory at the root of the .Em pool/home/bob file system. .Bd -literal -offset 2n .Li # Ic zfs snapshot pool/home/bob@yesterday .Ed .It Sy Example 3 No Creating and Destroying Multiple Snapshots .Pp The following command creates snapshots named .Em yesterday of .Em pool/home and all of its descendent file systems. Each snapshot is mounted on demand in the .Pa \&.zfs/snapshot directory at the root of its file system. The second command destroys the newly created snapshots. .Bd -literal -offset 2n .Li # Ic zfs snapshot -r pool/home@yesterday .Li # Ic zfs destroy -r pool/home@yesterday .Ed .It Sy Example 4 No Disabling and Enabling File System Compression .Pp The following command disables the .Sy compression property for all file systems under .Em pool/home . The next command explicitly enables .Sy compression for .Em pool/home/anne . .Bd -literal -offset 2n .Li # Ic zfs set compression=off pool/home .Li # Ic zfs set compression=on pool/home/anne .Ed .It Sy Example 5 No Listing Tn ZFS No Datasets .Pp The following command lists all active file systems and volumes in the system. Snapshots are displayed if the .Sy listsnaps property is .Cm on . The default is .Cm off . See .Xr zpool 8 for more information on pool properties. .Bd -literal -offset 2n .Li # Ic zfs list NAME USED AVAIL REFER MOUNTPOINT pool 450K 457G 18K /pool pool/home 315K 457G 21K /home pool/home/anne 18K 457G 18K /home/anne pool/home/bob 276K 457G 276K /home/bob .Ed .It Sy Example 6 No Setting a Quota on a Tn ZFS No File System .Pp The following command sets a quota of 50 Gbytes for .Em pool/home/bob . .Bd -literal -offset 2n .Li # Ic zfs set quota=50G pool/home/bob .Ed .It Sy Example 7 No Listing Tn ZFS No Properties .Pp The following command lists all properties for .Em pool/home/bob . .Bd -literal -offset 2n .Li # Ic zfs get all pool/home/bob NAME PROPERTY VALUE SOURCE pool/home/bob type filesystem - pool/home/bob creation Tue Jul 21 15:53 2009 - pool/home/bob used 21K - pool/home/bob available 20.0G - pool/home/bob referenced 21K - pool/home/bob compressratio 1.00x - pool/home/bob mounted yes - pool/home/bob quota 20G local pool/home/bob reservation none default pool/home/bob recordsize 128K default pool/home/bob mountpoint /home/bob default pool/home/bob sharenfs off default pool/home/bob checksum on default pool/home/bob compression on local pool/home/bob atime on default pool/home/bob devices on default pool/home/bob exec on default pool/home/bob setuid on default pool/home/bob readonly off default pool/home/bob jailed off default pool/home/bob snapdir hidden default pool/home/bob aclmode discard default pool/home/bob aclinherit restricted default pool/home/bob canmount on default pool/home/bob xattr on default pool/home/bob copies 1 default pool/home/bob version 5 - pool/home/bob utf8only off - pool/home/bob normalization none - pool/home/bob casesensitivity sensitive - pool/home/bob vscan off default pool/home/bob nbmand off default pool/home/bob sharesmb off default pool/home/bob refquota none default pool/home/bob refreservation none default pool/home/bob primarycache all default pool/home/bob secondarycache all default pool/home/bob usedbysnapshots 0 - pool/home/bob usedbydataset 21K - pool/home/bob usedbychildren 0 - pool/home/bob usedbyrefreservation 0 - pool/home/bob logbias latency default pool/home/bob dedup off default pool/home/bob mlslabel - pool/home/bob sync standard default pool/home/bob refcompressratio 1.00x - .Ed .Pp The following command gets a single property value. .Bd -literal -offset 2n .Li # Ic zfs get -H -o value compression pool/home/bob on .Ed .Pp The following command lists all properties with local settings for .Em pool/home/bob . .Bd -literal -offset 2n .Li # Ic zfs get -s local -o name,property,value all pool/home/bob NAME PROPERTY VALUE pool/home/bob quota 20G pool/home/bob compression on .Ed .It Sy Example 8 No Rolling Back a Tn ZFS No File System .Pp The following command reverts the contents of .Em pool/home/anne to the snapshot named .Em yesterday , deleting all intermediate snapshots. .Bd -literal -offset 2n .Li # Ic zfs rollback -r pool/home/anne@yesterday .Ed .It Sy Example 9 No Creating a Tn ZFS No Clone .Pp The following command creates a writable file system whose initial contents are the same as .Em pool/home/bob@yesterday . .Bd -literal -offset 2n .Li # Ic zfs clone pool/home/bob@yesterday pool/clone .Ed .It Sy Example 10 No Promoting a Tn ZFS No Clone .Pp The following commands illustrate how to test out changes to a file system, and then replace the original file system with the changed one, using clones, clone promotion, and renaming: .Bd -literal -offset 2n .Li # Ic zfs create pool/project/production .Ed .Pp Populate .Pa /pool/project/production with data and continue with the following commands: .Bd -literal -offset 2n .Li # Ic zfs snapshot pool/project/production@today .Li # Ic zfs clone pool/project/production@today pool/project/beta .Ed .Pp Now make changes to .Pa /pool/project/beta and continue with the following commands: .Bd -literal -offset 2n .Li # Ic zfs promote pool/project/beta .Li # Ic zfs rename pool/project/production pool/project/legacy .Li # Ic zfs rename pool/project/beta pool/project/production .Ed .Pp Once the legacy version is no longer needed, it can be destroyed. .Bd -literal -offset 2n .Li # Ic zfs destroy pool/project/legacy .Ed .It Sy Example 11 No Inheriting Tn ZFS No Properties .Pp The following command causes .Em pool/home/bob and .Em pool/home/anne to inherit the .Sy checksum property from their parent. .Bd -literal -offset 2n .Li # Ic zfs inherit checksum pool/home/bob pool/home/anne .Ed .It Sy Example 12 No Remotely Replicating Tn ZFS No Data .Pp The following commands send a full stream and then an incremental stream to a remote machine, restoring them into .Sy poolB/received/fs@a and .Sy poolB/received/fs@b , respectively. .Sy poolB must contain the file system .Sy poolB/received , and must not initially contain .Sy poolB/received/fs . .Bd -literal -offset 2n .Li # Ic zfs send pool/fs@a | ssh host zfs receive poolB/received/fs@a .Li # Ic zfs send -i a pool/fs@b | ssh host zfs receive poolB/received/fs .Ed .It Xo .Sy Example 13 Using the .Qq zfs receive -d Option .Xc .Pp The following command sends a full stream of .Sy poolA/fsA/fsB@snap to a remote machine, receiving it into .Sy poolB/received/fsA/fsB@snap . The .Sy fsA/fsB@snap portion of the received snapshot's name is determined from the name of the sent snapshot. .Sy poolB must contain the file system .Sy poolB/received . If .Sy poolB/received/fsA does not exist, it is created as an empty file system. .Bd -literal -offset 2n .Li # Ic zfs send poolA/fsA/fsB@snap | ssh host zfs receive -d poolB/received .Ed .It Sy Example 14 No Setting User Properties .Pp The following example sets the user-defined .Sy com.example:department property for a dataset. .Bd -literal -offset 2n .Li # Ic zfs set com.example:department=12345 tank/accounting .Ed .It Sy Example 15 No Performing a Rolling Snapshot .Pp The following example shows how to maintain a history of snapshots with a consistent naming scheme. To keep a week's worth of snapshots, the user destroys the oldest snapshot, renames the remaining snapshots, and then creates a new snapshot, as follows: .Bd -literal -offset 2n .Li # Ic zfs destroy -r pool/users@7daysago .Li # Ic zfs rename -r pool/users@6daysago @7daysago .Li # Ic zfs rename -r pool/users@5daysago @6daysago .Li # Ic zfs rename -r pool/users@yesterday @5daysago .Li # Ic zfs rename -r pool/users@yesterday @4daysago .Li # Ic zfs rename -r pool/users@yesterday @3daysago .Li # Ic zfs rename -r pool/users@yesterday @2daysago .Li # Ic zfs rename -r pool/users@today @yesterday .Li # Ic zfs snapshot -r pool/users@today .Ed .It Xo .Sy Example 16 Setting .Qq sharenfs Property Options on a ZFS File System .Xc .Pp The following command shows how to set .Sy sharenfs property options to enable root access for a specific network on the .Em tank/home file system. The contents of the .Sy sharenfs property are valid .Xr exports 5 options. .Bd -literal -offset 2n .Li # Ic zfs set sharenfs="maproot=root,network 192.168.0.0/24" tank/home .Ed .Pp Another way to write this command with the same result is: .Bd -literal -offset 2n .Li # Ic set zfs sharenfs="-maproot=root -network 192.168.0.0/24" tank/home .Ed .It Xo .Sy Example 17 Delegating .Tn ZFS Administration Permissions on a .Tn ZFS Dataset .Xc .Pp The following example shows how to set permissions so that user .Em cindys can create, destroy, mount, and take snapshots on .Em tank/cindys . The permissions on .Em tank/cindys are also displayed. .Bd -literal -offset 2n .Li # Ic zfs allow cindys create,destroy,mount,snapshot tank/cindys .Li # Ic zfs allow tank/cindys ------------------------------------------------------------- Local+Descendent permissions on (tank/cindys) user cindys create,destroy,mount,snapshot ------------------------------------------------------------- .Ed .It Sy Example 18 No Delegating Create Time Permissions on a Tn ZFS No Dataset .Pp The following example shows how to grant anyone in the group .Em staff to create file systems in .Em tank/users . This syntax also allows staff members to destroy their own file systems, but not destroy anyone else's file system. The permissions on .Em tank/users are also displayed. .Bd -literal -offset 2n .Li # Ic zfs allow staff create,mount tank/users .Li # Ic zfs allow -c destroy tank/users .Li # Ic zfs allow tank/users ------------------------------------------------------------- Create time permissions on (tank/users) create,destroy Local+Descendent permissions on (tank/users) group staff create,mount ------------------------------------------------------------- .Ed .It Xo .Sy Example 19 Defining and Granting a Permission Set on a .Tn ZFS Dataset .Xc .Pp The following example shows how to define and grant a permission set on the .Em tank/users file system. The permissions on .Em tank/users are also displayed. .Bd -literal -offset 2n .Li # Ic zfs allow -s @pset create,destroy,snapshot,mount tank/users .Li # Ic zfs allow staff @pset tank/users .Li # Ic zfs allow tank/users ------------------------------------------------------------- Permission sets on (tank/users) @pset create,destroy,mount,snapshot Create time permissions on (tank/users) create,destroy Local+Descendent permissions on (tank/users) group staff @pset,create,mount ------------------------------------------------------------- .Ed .It Sy Example 20 No Delegating Property Permissions on a Tn ZFS No Dataset .Pp The following example shows to grant the ability to set quotas and reservations on the .Sy users/home file system. The permissions on .Sy users/home are also displayed. .Bd -literal -offset 2n .Li # Ic zfs allow cindys quota,reservation users/home .Li # Ic zfs allow cindys ------------------------------------------------------------- Local+Descendent permissions on (users/home) user cindys quota,reservation ------------------------------------------------------------- .Li # Ic su - cindys .Li cindys% Ic zfs set quota=10G users/home/marks .Li cindys% Ic zfs get quota users/home/marks NAME PROPERTY VALUE SOURCE users/home/marks quota 10G local .Ed .It Sy Example 21 No Removing ZFS Delegated Permissions on a Tn ZFS No Dataset .Pp The following example shows how to remove the snapshot permission from the .Em staff group on the .Em tank/users file system. The permissions on .Em tank/users are also displayed. .Bd -literal -offset 2n .Li # Ic zfs unallow staff snapshot tank/users .Li # Ic zfs allow tank/users ------------------------------------------------------------- Permission sets on (tank/users) @pset create,destroy,mount,snapshot Create time permissions on (tank/users) create,destroy Local+Descendent permissions on (tank/users) group staff @pset,create,mount ------------------------------------------------------------- .Ed .El .Sh EXIT STATUS The following exit values are returned: .Bl -tag -offset 2n -width 2n .It 0 Successful completion. .It 1 An error occurred. .It 2 Invalid command line options were specified. .El .Sh SEE ALSO .Xr chmod 2 , .Xr fsync 2 , .Xr exports 5 , .Xr fstab 5 , .Xr rc.conf 5 , .Xr jail 8 , .Xr mount 8 , .Xr umount 8 , .Xr zpool 8 .Sh AUTHORS This manual page is a .Xr mdoc 7 reimplementation of the .Tn OpenSolaris manual page .Em zfs(1M) , modified and customized for .Fx and licensed under the Common Development and Distribution License .Pq Tn CDDL . .Pp The .Xr mdoc 7 implementation of this manual page was initially written by .An Martin Matuska Aq mm@FreeBSD.org . Index: projects/nand/cddl/contrib/opensolaris/cmd/zfs/zfs_main.c =================================================================== --- projects/nand/cddl/contrib/opensolaris/cmd/zfs/zfs_main.c (revision 235262) +++ projects/nand/cddl/contrib/opensolaris/cmd/zfs/zfs_main.c (revision 235263) @@ -1,6698 +1,6704 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright 2012 Nexenta Systems, Inc. All rights reserved. - * Copyright (c) 2011 by Delphix. All rights reserved. + * Copyright (c) 2012 by Delphix. All rights reserved. + * Copyright 2012 Milan Jurik. All rights reserved. + * Copyright (c) 2012, Joyent, Inc. All rights reserved. * Copyright (c) 2011-2012 Pawel Jakub Dawidek . * All rights reserved. - * Copyright (c) 2011 Martin Matuska . All rights reserved. + * Copyright (c) 2012 Martin Matuska . All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef sun #include #include #endif #include "zfs_iter.h" #include "zfs_util.h" #include "zfs_comutil.h" libzfs_handle_t *g_zfs; static FILE *mnttab_file; static char history_str[HIS_MAX_RECORD_LEN]; static int zfs_do_clone(int argc, char **argv); static int zfs_do_create(int argc, char **argv); static int zfs_do_destroy(int argc, char **argv); static int zfs_do_get(int argc, char **argv); static int zfs_do_inherit(int argc, char **argv); static int zfs_do_list(int argc, char **argv); static int zfs_do_mount(int argc, char **argv); static int zfs_do_rename(int argc, char **argv); static int zfs_do_rollback(int argc, char **argv); static int zfs_do_set(int argc, char **argv); static int zfs_do_upgrade(int argc, char **argv); static int zfs_do_snapshot(int argc, char **argv); static int zfs_do_unmount(int argc, char **argv); static int zfs_do_share(int argc, char **argv); static int zfs_do_unshare(int argc, char **argv); static int zfs_do_send(int argc, char **argv); static int zfs_do_receive(int argc, char **argv); static int zfs_do_promote(int argc, char **argv); static int zfs_do_userspace(int argc, char **argv); static int zfs_do_allow(int argc, char **argv); static int zfs_do_unallow(int argc, char **argv); static int zfs_do_hold(int argc, char **argv); static int zfs_do_holds(int argc, char **argv); static int zfs_do_release(int argc, char **argv); static int zfs_do_diff(int argc, char **argv); static int zfs_do_jail(int argc, char **argv); static int zfs_do_unjail(int argc, char **argv); /* * Enable a reasonable set of defaults for libumem debugging on DEBUG builds. */ #ifdef DEBUG const char * _umem_debug_init(void) { return ("default,verbose"); /* $UMEM_DEBUG setting */ } const char * _umem_logging_init(void) { return ("fail,contents"); /* $UMEM_LOGGING setting */ } #endif typedef enum { HELP_CLONE, HELP_CREATE, HELP_DESTROY, HELP_GET, HELP_INHERIT, HELP_UPGRADE, HELP_JAIL, HELP_UNJAIL, HELP_LIST, HELP_MOUNT, HELP_PROMOTE, HELP_RECEIVE, HELP_RENAME, HELP_ROLLBACK, HELP_SEND, HELP_SET, HELP_SHARE, HELP_SNAPSHOT, HELP_UNMOUNT, HELP_UNSHARE, HELP_ALLOW, HELP_UNALLOW, HELP_USERSPACE, HELP_GROUPSPACE, HELP_HOLD, HELP_HOLDS, HELP_RELEASE, HELP_DIFF, } zfs_help_t; typedef struct zfs_command { const char *name; int (*func)(int argc, char **argv); zfs_help_t usage; } zfs_command_t; /* * Master command table. Each ZFS command has a name, associated function, and * usage message. The usage messages need to be internationalized, so we have * to have a function to return the usage message based on a command index. * * These commands are organized according to how they are displayed in the usage * message. An empty command (one with a NULL name) indicates an empty line in * the generic usage message. */ static zfs_command_t command_table[] = { { "create", zfs_do_create, HELP_CREATE }, { "destroy", zfs_do_destroy, HELP_DESTROY }, { NULL }, { "snapshot", zfs_do_snapshot, HELP_SNAPSHOT }, { "rollback", zfs_do_rollback, HELP_ROLLBACK }, { "clone", zfs_do_clone, HELP_CLONE }, { "promote", zfs_do_promote, HELP_PROMOTE }, { "rename", zfs_do_rename, HELP_RENAME }, { NULL }, { "list", zfs_do_list, HELP_LIST }, { NULL }, { "set", zfs_do_set, HELP_SET }, { "get", zfs_do_get, HELP_GET }, { "inherit", zfs_do_inherit, HELP_INHERIT }, { "upgrade", zfs_do_upgrade, HELP_UPGRADE }, { "userspace", zfs_do_userspace, HELP_USERSPACE }, { "groupspace", zfs_do_userspace, HELP_GROUPSPACE }, { NULL }, { "mount", zfs_do_mount, HELP_MOUNT }, { "unmount", zfs_do_unmount, HELP_UNMOUNT }, { "share", zfs_do_share, HELP_SHARE }, { "unshare", zfs_do_unshare, HELP_UNSHARE }, { NULL }, { "send", zfs_do_send, HELP_SEND }, { "receive", zfs_do_receive, HELP_RECEIVE }, { NULL }, { "allow", zfs_do_allow, HELP_ALLOW }, { NULL }, { "unallow", zfs_do_unallow, HELP_UNALLOW }, { NULL }, { "hold", zfs_do_hold, HELP_HOLD }, { "holds", zfs_do_holds, HELP_HOLDS }, { "release", zfs_do_release, HELP_RELEASE }, { "diff", zfs_do_diff, HELP_DIFF }, { NULL }, { "jail", zfs_do_jail, HELP_JAIL }, { "unjail", zfs_do_unjail, HELP_UNJAIL }, }; #define NCOMMAND (sizeof (command_table) / sizeof (command_table[0])) zfs_command_t *current_command; static const char * get_usage(zfs_help_t idx) { switch (idx) { case HELP_CLONE: return (gettext("\tclone [-p] [-o property=value] ... " " \n")); case HELP_CREATE: return (gettext("\tcreate [-pu] [-o property=value] ... " "\n" "\tcreate [-ps] [-b blocksize] [-o property=value] ... " "-V \n")); case HELP_DESTROY: return (gettext("\tdestroy [-fnpRrv] \n" "\tdestroy [-dnpRrv] " "[%][,...]\n")); case HELP_GET: return (gettext("\tget [-rHp] [-d max] " "[-o \"all\" | field[,...]] [-t type[,...]] " "[-s source[,...]]\n" "\t <\"all\" | property[,...]> " "[filesystem|volume|snapshot] ...\n")); case HELP_INHERIT: return (gettext("\tinherit [-rS] " " ...\n")); case HELP_UPGRADE: return (gettext("\tupgrade [-v]\n" "\tupgrade [-r] [-V version] <-a | filesystem ...>\n")); case HELP_JAIL: return (gettext("\tjail \n")); case HELP_UNJAIL: return (gettext("\tunjail \n")); case HELP_LIST: return (gettext("\tlist [-rH][-d max] " "[-o property[,...]] [-t type[,...]] [-s property] ...\n" "\t [-S property] ... " "[filesystem|volume|snapshot] ...\n")); case HELP_MOUNT: return (gettext("\tmount\n" "\tmount [-vO] [-o opts] <-a | filesystem>\n")); case HELP_PROMOTE: return (gettext("\tpromote \n")); case HELP_RECEIVE: return (gettext("\treceive [-vnFu] \n" "\treceive [-vnFu] [-d | -e] \n")); case HELP_RENAME: - return (gettext("\trename " + return (gettext("\trename [-f] " "\n" - "\trename -p \n" + "\trename [-f] -p " + "\n" "\trename -r \n" "\trename -u [-p] ")); case HELP_ROLLBACK: return (gettext("\trollback [-rRf] \n")); case HELP_SEND: return (gettext("\tsend [-DnPpRrv] " "[-i snapshot | -I snapshot] \n")); case HELP_SET: return (gettext("\tset " " ...\n")); case HELP_SHARE: return (gettext("\tshare <-a | filesystem>\n")); case HELP_SNAPSHOT: return (gettext("\tsnapshot [-r] [-o property=value] ... " "\n")); case HELP_UNMOUNT: return (gettext("\tunmount [-f] " "<-a | filesystem|mountpoint>\n")); case HELP_UNSHARE: return (gettext("\tunshare " "<-a | filesystem|mountpoint>\n")); case HELP_ALLOW: return (gettext("\tallow \n" "\tallow [-ldug] " "<\"everyone\"|user|group>[,...] [,...]\n" "\t \n" "\tallow [-ld] -e [,...] " "\n" "\tallow -c [,...] \n" "\tallow -s @setname [,...] " "\n")); case HELP_UNALLOW: return (gettext("\tunallow [-rldug] " "<\"everyone\"|user|group>[,...]\n" "\t [[,...]] \n" "\tunallow [-rld] -e [[,...]] " "\n" "\tunallow [-r] -c [[,...]] " "\n" "\tunallow [-r] -s @setname [[,...]] " "\n")); case HELP_USERSPACE: return (gettext("\tuserspace [-niHp] [-o field[,...]] " "[-sS field] ... [-t type[,...]]\n" "\t \n")); case HELP_GROUPSPACE: return (gettext("\tgroupspace [-niHp] [-o field[,...]] " "[-sS field] ... [-t type[,...]]\n" "\t \n")); case HELP_HOLD: return (gettext("\thold [-r] ...\n")); case HELP_HOLDS: return (gettext("\tholds [-r] ...\n")); case HELP_RELEASE: return (gettext("\trelease [-r] ...\n")); case HELP_DIFF: return (gettext("\tdiff [-FHt] " "[snapshot|filesystem]\n")); } abort(); /* NOTREACHED */ } void nomem(void) { (void) fprintf(stderr, gettext("internal error: out of memory\n")); exit(1); } /* * Utility function to guarantee malloc() success. */ void * safe_malloc(size_t size) { void *data; if ((data = calloc(1, size)) == NULL) nomem(); return (data); } static char * safe_strdup(char *str) { char *dupstr = strdup(str); if (dupstr == NULL) nomem(); return (dupstr); } /* * Callback routine that will print out information for each of * the properties. */ static int usage_prop_cb(int prop, void *cb) { FILE *fp = cb; (void) fprintf(fp, "\t%-15s ", zfs_prop_to_name(prop)); if (zfs_prop_readonly(prop)) (void) fprintf(fp, " NO "); else (void) fprintf(fp, "YES "); if (zfs_prop_inheritable(prop)) (void) fprintf(fp, " YES "); else (void) fprintf(fp, " NO "); if (zfs_prop_values(prop) == NULL) (void) fprintf(fp, "-\n"); else (void) fprintf(fp, "%s\n", zfs_prop_values(prop)); return (ZPROP_CONT); } /* * Display usage message. If we're inside a command, display only the usage for * that command. Otherwise, iterate over the entire command table and display * a complete usage message. */ static void usage(boolean_t requested) { int i; boolean_t show_properties = B_FALSE; FILE *fp = requested ? stdout : stderr; if (current_command == NULL) { (void) fprintf(fp, gettext("usage: zfs command args ...\n")); (void) fprintf(fp, gettext("where 'command' is one of the following:\n\n")); for (i = 0; i < NCOMMAND; i++) { if (command_table[i].name == NULL) (void) fprintf(fp, "\n"); else (void) fprintf(fp, "%s", get_usage(command_table[i].usage)); } (void) fprintf(fp, gettext("\nEach dataset is of the form: " "pool/[dataset/]*dataset[@name]\n")); } else { (void) fprintf(fp, gettext("usage:\n")); (void) fprintf(fp, "%s", get_usage(current_command->usage)); } if (current_command != NULL && (strcmp(current_command->name, "set") == 0 || strcmp(current_command->name, "get") == 0 || strcmp(current_command->name, "inherit") == 0 || strcmp(current_command->name, "list") == 0)) show_properties = B_TRUE; if (show_properties) { (void) fprintf(fp, gettext("\nThe following properties are supported:\n")); (void) fprintf(fp, "\n\t%-14s %s %s %s\n\n", "PROPERTY", "EDIT", "INHERIT", "VALUES"); /* Iterate over all properties */ (void) zprop_iter(usage_prop_cb, fp, B_FALSE, B_TRUE, ZFS_TYPE_DATASET); (void) fprintf(fp, "\t%-15s ", "userused@..."); (void) fprintf(fp, " NO NO \n"); (void) fprintf(fp, "\t%-15s ", "groupused@..."); (void) fprintf(fp, " NO NO \n"); (void) fprintf(fp, "\t%-15s ", "userquota@..."); (void) fprintf(fp, "YES NO | none\n"); (void) fprintf(fp, "\t%-15s ", "groupquota@..."); (void) fprintf(fp, "YES NO | none\n"); (void) fprintf(fp, "\t%-15s ", "written@"); (void) fprintf(fp, " NO NO \n"); (void) fprintf(fp, gettext("\nSizes are specified in bytes " "with standard units such as K, M, G, etc.\n")); (void) fprintf(fp, gettext("\nUser-defined properties can " "be specified by using a name containing a colon (:).\n")); (void) fprintf(fp, gettext("\nThe {user|group}{used|quota}@ " "properties must be appended with\n" "a user or group specifier of one of these forms:\n" " POSIX name (eg: \"matt\")\n" " POSIX id (eg: \"126829\")\n" " SMB name@domain (eg: \"matt@sun\")\n" " SMB SID (eg: \"S-1-234-567-89\")\n")); } else { (void) fprintf(fp, gettext("\nFor the property list, run: %s\n"), "zfs set|get"); (void) fprintf(fp, gettext("\nFor the delegated permission list, run: %s\n"), "zfs allow|unallow"); } /* * See comments at end of main(). */ if (getenv("ZFS_ABORT") != NULL) { (void) printf("dumping core by request\n"); abort(); } exit(requested ? 0 : 2); } static int parseprop(nvlist_t *props) { char *propname = optarg; char *propval, *strval; if ((propval = strchr(propname, '=')) == NULL) { (void) fprintf(stderr, gettext("missing " "'=' for -o option\n")); return (-1); } *propval = '\0'; propval++; if (nvlist_lookup_string(props, propname, &strval) == 0) { (void) fprintf(stderr, gettext("property '%s' " "specified multiple times\n"), propname); return (-1); } if (nvlist_add_string(props, propname, propval) != 0) nomem(); return (0); } static int parse_depth(char *opt, int *flags) { char *tmp; int depth; depth = (int)strtol(opt, &tmp, 0); if (*tmp) { (void) fprintf(stderr, gettext("%s is not an integer\n"), optarg); usage(B_FALSE); } if (depth < 0) { (void) fprintf(stderr, gettext("Depth can not be negative.\n")); usage(B_FALSE); } *flags |= (ZFS_ITER_DEPTH_LIMIT|ZFS_ITER_RECURSE); return (depth); } #define PROGRESS_DELAY 2 /* seconds */ static char *pt_reverse = "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b"; static time_t pt_begin; static char *pt_header = NULL; static boolean_t pt_shown; static void start_progress_timer(void) { pt_begin = time(NULL) + PROGRESS_DELAY; pt_shown = B_FALSE; } static void set_progress_header(char *header) { assert(pt_header == NULL); pt_header = safe_strdup(header); if (pt_shown) { (void) printf("%s: ", header); (void) fflush(stdout); } } static void update_progress(char *update) { if (!pt_shown && time(NULL) > pt_begin) { int len = strlen(update); (void) printf("%s: %s%*.*s", pt_header, update, len, len, pt_reverse); (void) fflush(stdout); pt_shown = B_TRUE; } else if (pt_shown) { int len = strlen(update); (void) printf("%s%*.*s", update, len, len, pt_reverse); (void) fflush(stdout); } } static void finish_progress(char *done) { if (pt_shown) { (void) printf("%s\n", done); (void) fflush(stdout); } free(pt_header); pt_header = NULL; } /* * zfs clone [-p] [-o prop=value] ... * * Given an existing dataset, create a writable copy whose initial contents * are the same as the source. The newly created dataset maintains a * dependency on the original; the original cannot be destroyed so long as * the clone exists. * * The '-p' flag creates all the non-existing ancestors of the target first. */ static int zfs_do_clone(int argc, char **argv) { zfs_handle_t *zhp = NULL; boolean_t parents = B_FALSE; nvlist_t *props; int ret = 0; int c; if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) nomem(); /* check options */ while ((c = getopt(argc, argv, "o:p")) != -1) { switch (c) { case 'o': if (parseprop(props)) return (1); break; case 'p': parents = B_TRUE; break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); goto usage; } } argc -= optind; argv += optind; /* check number of arguments */ if (argc < 1) { (void) fprintf(stderr, gettext("missing source dataset " "argument\n")); goto usage; } if (argc < 2) { (void) fprintf(stderr, gettext("missing target dataset " "argument\n")); goto usage; } if (argc > 2) { (void) fprintf(stderr, gettext("too many arguments\n")); goto usage; } /* open the source dataset */ if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL) return (1); if (parents && zfs_name_valid(argv[1], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) { /* * Now create the ancestors of the target dataset. If the * target already exists and '-p' option was used we should not * complain. */ if (zfs_dataset_exists(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) return (0); if (zfs_create_ancestors(g_zfs, argv[1]) != 0) return (1); } /* pass to libzfs */ ret = zfs_clone(zhp, argv[1], props); /* create the mountpoint if necessary */ if (ret == 0) { zfs_handle_t *clone; clone = zfs_open(g_zfs, argv[1], ZFS_TYPE_DATASET); if (clone != NULL) { if (zfs_get_type(clone) != ZFS_TYPE_VOLUME) if ((ret = zfs_mount(clone, NULL, 0)) == 0) ret = zfs_share(clone); zfs_close(clone); } } zfs_close(zhp); nvlist_free(props); return (!!ret); usage: if (zhp) zfs_close(zhp); nvlist_free(props); usage(B_FALSE); return (-1); } /* * zfs create [-pu] [-o prop=value] ... fs * zfs create [-ps] [-b blocksize] [-o prop=value] ... -V vol size * * Create a new dataset. This command can be used to create filesystems * and volumes. Snapshot creation is handled by 'zfs snapshot'. * For volumes, the user must specify a size to be used. * * The '-s' flag applies only to volumes, and indicates that we should not try * to set the reservation for this volume. By default we set a reservation * equal to the size for any volume. For pools with SPA_VERSION >= * SPA_VERSION_REFRESERVATION, we set a refreservation instead. * * The '-p' flag creates all the non-existing ancestors of the target first. * * The '-u' flag prevents mounting of newly created file system. */ static int zfs_do_create(int argc, char **argv) { zfs_type_t type = ZFS_TYPE_FILESYSTEM; zfs_handle_t *zhp = NULL; uint64_t volsize; int c; boolean_t noreserve = B_FALSE; boolean_t bflag = B_FALSE; boolean_t parents = B_FALSE; boolean_t nomount = B_FALSE; int ret = 1; nvlist_t *props; uint64_t intval; int canmount = ZFS_CANMOUNT_OFF; if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) nomem(); /* check options */ while ((c = getopt(argc, argv, ":V:b:so:pu")) != -1) { switch (c) { case 'V': type = ZFS_TYPE_VOLUME; if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) { (void) fprintf(stderr, gettext("bad volume " "size '%s': %s\n"), optarg, libzfs_error_description(g_zfs)); goto error; } if (nvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLSIZE), intval) != 0) nomem(); volsize = intval; break; case 'p': parents = B_TRUE; break; case 'b': bflag = B_TRUE; if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) { (void) fprintf(stderr, gettext("bad volume " "block size '%s': %s\n"), optarg, libzfs_error_description(g_zfs)); goto error; } if (nvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), intval) != 0) nomem(); break; case 'o': if (parseprop(props)) goto error; break; case 's': noreserve = B_TRUE; break; case 'u': nomount = B_TRUE; break; case ':': (void) fprintf(stderr, gettext("missing size " "argument\n")); goto badusage; - break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); goto badusage; } } if ((bflag || noreserve) && type != ZFS_TYPE_VOLUME) { (void) fprintf(stderr, gettext("'-s' and '-b' can only be " "used when creating a volume\n")); goto badusage; } if (nomount && type != ZFS_TYPE_FILESYSTEM) { (void) fprintf(stderr, gettext("'-u' can only be " "used when creating a file system\n")); goto badusage; } argc -= optind; argv += optind; /* check number of arguments */ if (argc == 0) { (void) fprintf(stderr, gettext("missing %s argument\n"), zfs_type_to_name(type)); goto badusage; } if (argc > 1) { (void) fprintf(stderr, gettext("too many arguments\n")); goto badusage; } if (type == ZFS_TYPE_VOLUME && !noreserve) { zpool_handle_t *zpool_handle; uint64_t spa_version; char *p; zfs_prop_t resv_prop; char *strval; if (p = strchr(argv[0], '/')) *p = '\0'; zpool_handle = zpool_open(g_zfs, argv[0]); if (p != NULL) *p = '/'; if (zpool_handle == NULL) goto error; spa_version = zpool_get_prop_int(zpool_handle, ZPOOL_PROP_VERSION, NULL); zpool_close(zpool_handle); if (spa_version >= SPA_VERSION_REFRESERVATION) resv_prop = ZFS_PROP_REFRESERVATION; else resv_prop = ZFS_PROP_RESERVATION; volsize = zvol_volsize_to_reservation(volsize, props); if (nvlist_lookup_string(props, zfs_prop_to_name(resv_prop), &strval) != 0) { if (nvlist_add_uint64(props, zfs_prop_to_name(resv_prop), volsize) != 0) { nvlist_free(props); nomem(); } } } if (parents && zfs_name_valid(argv[0], type)) { /* * Now create the ancestors of target dataset. If the target * already exists and '-p' option was used we should not * complain. */ if (zfs_dataset_exists(g_zfs, argv[0], type)) { ret = 0; goto error; } if (zfs_create_ancestors(g_zfs, argv[0]) != 0) goto error; } /* pass to libzfs */ if (zfs_create(g_zfs, argv[0], type, props) != 0) goto error; if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET)) == NULL) goto error; ret = 0; /* * if the user doesn't want the dataset automatically mounted, * then skip the mount/share step */ if (zfs_prop_valid_for_type(ZFS_PROP_CANMOUNT, type)) canmount = zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT); /* * Mount and/or share the new filesystem as appropriate. We provide a * verbose error message to let the user know that their filesystem was * in fact created, even if we failed to mount or share it. */ if (!nomount && canmount == ZFS_CANMOUNT_ON) { if (zfs_mount(zhp, NULL, 0) != 0) { (void) fprintf(stderr, gettext("filesystem " "successfully created, but not mounted\n")); ret = 1; } else if (zfs_share(zhp) != 0) { (void) fprintf(stderr, gettext("filesystem " "successfully created, but not shared\n")); ret = 1; } } error: if (zhp) zfs_close(zhp); nvlist_free(props); return (ret); badusage: nvlist_free(props); usage(B_FALSE); return (2); } /* * zfs destroy [-rRf] * zfs destroy [-rRd] * * -r Recursively destroy all children * -R Recursively destroy all dependents, including clones * -f Force unmounting of any dependents * -d If we can't destroy now, mark for deferred destruction * * Destroys the given dataset. By default, it will unmount any filesystems, * and refuse to destroy a dataset that has any dependents. A dependent can * either be a child, or a clone of a child. */ typedef struct destroy_cbdata { boolean_t cb_first; boolean_t cb_force; boolean_t cb_recurse; boolean_t cb_error; boolean_t cb_doclones; zfs_handle_t *cb_target; boolean_t cb_defer_destroy; boolean_t cb_verbose; boolean_t cb_parsable; boolean_t cb_dryrun; nvlist_t *cb_nvl; /* first snap in contiguous run */ zfs_handle_t *cb_firstsnap; /* previous snap in contiguous run */ zfs_handle_t *cb_prevsnap; int64_t cb_snapused; char *cb_snapspec; } destroy_cbdata_t; /* * Check for any dependents based on the '-r' or '-R' flags. */ static int destroy_check_dependent(zfs_handle_t *zhp, void *data) { destroy_cbdata_t *cbp = data; const char *tname = zfs_get_name(cbp->cb_target); const char *name = zfs_get_name(zhp); if (strncmp(tname, name, strlen(tname)) == 0 && (name[strlen(tname)] == '/' || name[strlen(tname)] == '@')) { /* * This is a direct descendant, not a clone somewhere else in * the hierarchy. */ if (cbp->cb_recurse) goto out; if (cbp->cb_first) { (void) fprintf(stderr, gettext("cannot destroy '%s': " "%s has children\n"), zfs_get_name(cbp->cb_target), zfs_type_to_name(zfs_get_type(cbp->cb_target))); (void) fprintf(stderr, gettext("use '-r' to destroy " "the following datasets:\n")); cbp->cb_first = B_FALSE; cbp->cb_error = B_TRUE; } (void) fprintf(stderr, "%s\n", zfs_get_name(zhp)); } else { /* * This is a clone. We only want to report this if the '-r' * wasn't specified, or the target is a snapshot. */ if (!cbp->cb_recurse && zfs_get_type(cbp->cb_target) != ZFS_TYPE_SNAPSHOT) goto out; if (cbp->cb_first) { (void) fprintf(stderr, gettext("cannot destroy '%s': " "%s has dependent clones\n"), zfs_get_name(cbp->cb_target), zfs_type_to_name(zfs_get_type(cbp->cb_target))); (void) fprintf(stderr, gettext("use '-R' to destroy " "the following datasets:\n")); cbp->cb_first = B_FALSE; cbp->cb_error = B_TRUE; cbp->cb_dryrun = B_TRUE; } (void) fprintf(stderr, "%s\n", zfs_get_name(zhp)); } out: zfs_close(zhp); return (0); } static int destroy_callback(zfs_handle_t *zhp, void *data) { destroy_cbdata_t *cb = data; const char *name = zfs_get_name(zhp); if (cb->cb_verbose) { if (cb->cb_parsable) { (void) printf("destroy\t%s\n", name); } else if (cb->cb_dryrun) { (void) printf(gettext("would destroy %s\n"), name); } else { (void) printf(gettext("will destroy %s\n"), name); } } /* * Ignore pools (which we've already flagged as an error before getting * here). */ if (strchr(zfs_get_name(zhp), '/') == NULL && zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) { zfs_close(zhp); return (0); } if (!cb->cb_dryrun) { if (zfs_unmount(zhp, NULL, cb->cb_force ? MS_FORCE : 0) != 0 || zfs_destroy(zhp, cb->cb_defer_destroy) != 0) { zfs_close(zhp); return (-1); } } zfs_close(zhp); return (0); } static int destroy_print_cb(zfs_handle_t *zhp, void *arg) { destroy_cbdata_t *cb = arg; const char *name = zfs_get_name(zhp); int err = 0; if (nvlist_exists(cb->cb_nvl, name)) { if (cb->cb_firstsnap == NULL) cb->cb_firstsnap = zfs_handle_dup(zhp); if (cb->cb_prevsnap != NULL) zfs_close(cb->cb_prevsnap); /* this snap continues the current range */ cb->cb_prevsnap = zfs_handle_dup(zhp); if (cb->cb_verbose) { if (cb->cb_parsable) { (void) printf("destroy\t%s\n", name); } else if (cb->cb_dryrun) { (void) printf(gettext("would destroy %s\n"), name); } else { (void) printf(gettext("will destroy %s\n"), name); } } } else if (cb->cb_firstsnap != NULL) { /* end of this range */ uint64_t used = 0; err = zfs_get_snapused_int(cb->cb_firstsnap, cb->cb_prevsnap, &used); cb->cb_snapused += used; zfs_close(cb->cb_firstsnap); cb->cb_firstsnap = NULL; zfs_close(cb->cb_prevsnap); cb->cb_prevsnap = NULL; } zfs_close(zhp); return (err); } static int destroy_print_snapshots(zfs_handle_t *fs_zhp, destroy_cbdata_t *cb) { int err = 0; assert(cb->cb_firstsnap == NULL); assert(cb->cb_prevsnap == NULL); err = zfs_iter_snapshots_sorted(fs_zhp, destroy_print_cb, cb); if (cb->cb_firstsnap != NULL) { uint64_t used = 0; if (err == 0) { err = zfs_get_snapused_int(cb->cb_firstsnap, cb->cb_prevsnap, &used); } cb->cb_snapused += used; zfs_close(cb->cb_firstsnap); cb->cb_firstsnap = NULL; zfs_close(cb->cb_prevsnap); cb->cb_prevsnap = NULL; } return (err); } static int snapshot_to_nvl_cb(zfs_handle_t *zhp, void *arg) { destroy_cbdata_t *cb = arg; int err = 0; /* Check for clones. */ if (!cb->cb_doclones) { cb->cb_target = zhp; cb->cb_first = B_TRUE; err = zfs_iter_dependents(zhp, B_TRUE, destroy_check_dependent, cb); } if (err == 0) { if (nvlist_add_boolean(cb->cb_nvl, zfs_get_name(zhp))) nomem(); } zfs_close(zhp); return (err); } static int gather_snapshots(zfs_handle_t *zhp, void *arg) { destroy_cbdata_t *cb = arg; int err = 0; err = zfs_iter_snapspec(zhp, cb->cb_snapspec, snapshot_to_nvl_cb, cb); if (err == ENOENT) err = 0; if (err != 0) goto out; if (cb->cb_verbose) { err = destroy_print_snapshots(zhp, cb); if (err != 0) goto out; } if (cb->cb_recurse) err = zfs_iter_filesystems(zhp, gather_snapshots, cb); out: zfs_close(zhp); return (err); } static int destroy_clones(destroy_cbdata_t *cb) { nvpair_t *pair; for (pair = nvlist_next_nvpair(cb->cb_nvl, NULL); pair != NULL; pair = nvlist_next_nvpair(cb->cb_nvl, pair)) { zfs_handle_t *zhp = zfs_open(g_zfs, nvpair_name(pair), ZFS_TYPE_SNAPSHOT); if (zhp != NULL) { boolean_t defer = cb->cb_defer_destroy; int err = 0; /* * We can't defer destroy non-snapshots, so set it to * false while destroying the clones. */ cb->cb_defer_destroy = B_FALSE; err = zfs_iter_dependents(zhp, B_FALSE, destroy_callback, cb); cb->cb_defer_destroy = defer; zfs_close(zhp); if (err != 0) return (err); } } return (0); } static int zfs_do_destroy(int argc, char **argv) { destroy_cbdata_t cb = { 0 }; int c; zfs_handle_t *zhp; char *at; zfs_type_t type = ZFS_TYPE_DATASET; /* check options */ while ((c = getopt(argc, argv, "vpndfrR")) != -1) { switch (c) { case 'v': cb.cb_verbose = B_TRUE; break; case 'p': cb.cb_verbose = B_TRUE; cb.cb_parsable = B_TRUE; break; case 'n': cb.cb_dryrun = B_TRUE; break; case 'd': cb.cb_defer_destroy = B_TRUE; type = ZFS_TYPE_SNAPSHOT; break; case 'f': cb.cb_force = B_TRUE; break; case 'r': cb.cb_recurse = B_TRUE; break; case 'R': cb.cb_recurse = B_TRUE; cb.cb_doclones = B_TRUE; break; case '?': default: (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; /* check number of arguments */ if (argc == 0) { (void) fprintf(stderr, gettext("missing dataset argument\n")); usage(B_FALSE); } if (argc > 1) { (void) fprintf(stderr, gettext("too many arguments\n")); usage(B_FALSE); } at = strchr(argv[0], '@'); if (at != NULL) { int err = 0; /* Build the list of snaps to destroy in cb_nvl. */ if (nvlist_alloc(&cb.cb_nvl, NV_UNIQUE_NAME, 0) != 0) nomem(); *at = '\0'; zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); if (zhp == NULL) return (1); cb.cb_snapspec = at + 1; if (gather_snapshots(zfs_handle_dup(zhp), &cb) != 0 || cb.cb_error) { zfs_close(zhp); nvlist_free(cb.cb_nvl); return (1); } if (nvlist_empty(cb.cb_nvl)) { (void) fprintf(stderr, gettext("could not find any " "snapshots to destroy; check snapshot names.\n")); zfs_close(zhp); nvlist_free(cb.cb_nvl); return (1); } if (cb.cb_verbose) { char buf[16]; zfs_nicenum(cb.cb_snapused, buf, sizeof (buf)); if (cb.cb_parsable) { (void) printf("reclaim\t%llu\n", cb.cb_snapused); } else if (cb.cb_dryrun) { (void) printf(gettext("would reclaim %s\n"), buf); } else { (void) printf(gettext("will reclaim %s\n"), buf); } } if (!cb.cb_dryrun) { if (cb.cb_doclones) err = destroy_clones(&cb); if (err == 0) { err = zfs_destroy_snaps_nvl(zhp, cb.cb_nvl, cb.cb_defer_destroy); } } zfs_close(zhp); nvlist_free(cb.cb_nvl); if (err != 0) return (1); } else { /* Open the given dataset */ if ((zhp = zfs_open(g_zfs, argv[0], type)) == NULL) return (1); cb.cb_target = zhp; /* * Perform an explicit check for pools before going any further. */ if (!cb.cb_recurse && strchr(zfs_get_name(zhp), '/') == NULL && zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) { (void) fprintf(stderr, gettext("cannot destroy '%s': " "operation does not apply to pools\n"), zfs_get_name(zhp)); (void) fprintf(stderr, gettext("use 'zfs destroy -r " "%s' to destroy all datasets in the pool\n"), zfs_get_name(zhp)); (void) fprintf(stderr, gettext("use 'zpool destroy %s' " "to destroy the pool itself\n"), zfs_get_name(zhp)); zfs_close(zhp); return (1); } /* * Check for any dependents and/or clones. */ cb.cb_first = B_TRUE; if (!cb.cb_doclones && zfs_iter_dependents(zhp, B_TRUE, destroy_check_dependent, &cb) != 0) { zfs_close(zhp); return (1); } if (cb.cb_error) { zfs_close(zhp); return (1); } if (zfs_iter_dependents(zhp, B_FALSE, destroy_callback, &cb) != 0) { zfs_close(zhp); return (1); } /* * Do the real thing. The callback will close the * handle regardless of whether it succeeds or not. */ if (destroy_callback(zhp, &cb) != 0) return (1); } return (0); } static boolean_t is_recvd_column(zprop_get_cbdata_t *cbp) { int i; zfs_get_column_t col; for (i = 0; i < ZFS_GET_NCOLS && (col = cbp->cb_columns[i]) != GET_COL_NONE; i++) if (col == GET_COL_RECVD) return (B_TRUE); return (B_FALSE); } /* * zfs get [-rHp] [-o all | field[,field]...] [-s source[,source]...] * < all | property[,property]... > < fs | snap | vol > ... * * -r recurse over any child datasets * -H scripted mode. Headers are stripped, and fields are separated * by tabs instead of spaces. * -o Set of fields to display. One of "name,property,value, * received,source". Default is "name,property,value,source". * "all" is an alias for all five. * -s Set of sources to allow. One of * "local,default,inherited,received,temporary,none". Default is * all six. * -p Display values in parsable (literal) format. * * Prints properties for the given datasets. The user can control which * columns to display as well as which property types to allow. */ /* * Invoked to display the properties for a single dataset. */ static int get_callback(zfs_handle_t *zhp, void *data) { char buf[ZFS_MAXPROPLEN]; char rbuf[ZFS_MAXPROPLEN]; zprop_source_t sourcetype; char source[ZFS_MAXNAMELEN]; zprop_get_cbdata_t *cbp = data; nvlist_t *user_props = zfs_get_user_props(zhp); zprop_list_t *pl = cbp->cb_proplist; nvlist_t *propval; char *strval; char *sourceval; boolean_t received = is_recvd_column(cbp); for (; pl != NULL; pl = pl->pl_next) { char *recvdval = NULL; /* * Skip the special fake placeholder. This will also skip over * the name property when 'all' is specified. */ if (pl->pl_prop == ZFS_PROP_NAME && pl == cbp->cb_proplist) continue; if (pl->pl_prop != ZPROP_INVAL) { if (zfs_prop_get(zhp, pl->pl_prop, buf, sizeof (buf), &sourcetype, source, sizeof (source), cbp->cb_literal) != 0) { if (pl->pl_all) continue; if (!zfs_prop_valid_for_type(pl->pl_prop, ZFS_TYPE_DATASET)) { (void) fprintf(stderr, gettext("No such property '%s'\n"), zfs_prop_to_name(pl->pl_prop)); continue; } sourcetype = ZPROP_SRC_NONE; (void) strlcpy(buf, "-", sizeof (buf)); } if (received && (zfs_prop_get_recvd(zhp, zfs_prop_to_name(pl->pl_prop), rbuf, sizeof (rbuf), cbp->cb_literal) == 0)) recvdval = rbuf; zprop_print_one_property(zfs_get_name(zhp), cbp, zfs_prop_to_name(pl->pl_prop), buf, sourcetype, source, recvdval); } else if (zfs_prop_userquota(pl->pl_user_prop)) { sourcetype = ZPROP_SRC_LOCAL; if (zfs_prop_get_userquota(zhp, pl->pl_user_prop, buf, sizeof (buf), cbp->cb_literal) != 0) { sourcetype = ZPROP_SRC_NONE; (void) strlcpy(buf, "-", sizeof (buf)); } zprop_print_one_property(zfs_get_name(zhp), cbp, pl->pl_user_prop, buf, sourcetype, source, NULL); } else if (zfs_prop_written(pl->pl_user_prop)) { sourcetype = ZPROP_SRC_LOCAL; if (zfs_prop_get_written(zhp, pl->pl_user_prop, buf, sizeof (buf), cbp->cb_literal) != 0) { sourcetype = ZPROP_SRC_NONE; (void) strlcpy(buf, "-", sizeof (buf)); } zprop_print_one_property(zfs_get_name(zhp), cbp, pl->pl_user_prop, buf, sourcetype, source, NULL); } else { if (nvlist_lookup_nvlist(user_props, pl->pl_user_prop, &propval) != 0) { if (pl->pl_all) continue; sourcetype = ZPROP_SRC_NONE; strval = "-"; } else { verify(nvlist_lookup_string(propval, ZPROP_VALUE, &strval) == 0); verify(nvlist_lookup_string(propval, ZPROP_SOURCE, &sourceval) == 0); if (strcmp(sourceval, zfs_get_name(zhp)) == 0) { sourcetype = ZPROP_SRC_LOCAL; } else if (strcmp(sourceval, ZPROP_SOURCE_VAL_RECVD) == 0) { sourcetype = ZPROP_SRC_RECEIVED; } else { sourcetype = ZPROP_SRC_INHERITED; (void) strlcpy(source, sourceval, sizeof (source)); } } if (received && (zfs_prop_get_recvd(zhp, pl->pl_user_prop, rbuf, sizeof (rbuf), cbp->cb_literal) == 0)) recvdval = rbuf; zprop_print_one_property(zfs_get_name(zhp), cbp, pl->pl_user_prop, strval, sourcetype, source, recvdval); } } return (0); } static int zfs_do_get(int argc, char **argv) { zprop_get_cbdata_t cb = { 0 }; int i, c, flags = ZFS_ITER_ARGS_CAN_BE_PATHS; int types = ZFS_TYPE_DATASET; char *value, *fields; int ret = 0; int limit = 0; zprop_list_t fake_name = { 0 }; /* * Set up default columns and sources. */ cb.cb_sources = ZPROP_SRC_ALL; cb.cb_columns[0] = GET_COL_NAME; cb.cb_columns[1] = GET_COL_PROPERTY; cb.cb_columns[2] = GET_COL_VALUE; cb.cb_columns[3] = GET_COL_SOURCE; cb.cb_type = ZFS_TYPE_DATASET; /* check options */ while ((c = getopt(argc, argv, ":d:o:s:rt:Hp")) != -1) { switch (c) { case 'p': cb.cb_literal = B_TRUE; break; case 'd': limit = parse_depth(optarg, &flags); break; case 'r': flags |= ZFS_ITER_RECURSE; break; case 'H': cb.cb_scripted = B_TRUE; break; case ':': (void) fprintf(stderr, gettext("missing argument for " "'%c' option\n"), optopt); usage(B_FALSE); break; case 'o': /* * Process the set of columns to display. We zero out * the structure to give us a blank slate. */ bzero(&cb.cb_columns, sizeof (cb.cb_columns)); i = 0; while (*optarg != '\0') { static char *col_subopts[] = { "name", "property", "value", "received", "source", "all", NULL }; if (i == ZFS_GET_NCOLS) { (void) fprintf(stderr, gettext("too " "many fields given to -o " "option\n")); usage(B_FALSE); } switch (getsubopt(&optarg, col_subopts, &value)) { case 0: cb.cb_columns[i++] = GET_COL_NAME; break; case 1: cb.cb_columns[i++] = GET_COL_PROPERTY; break; case 2: cb.cb_columns[i++] = GET_COL_VALUE; break; case 3: cb.cb_columns[i++] = GET_COL_RECVD; flags |= ZFS_ITER_RECVD_PROPS; break; case 4: cb.cb_columns[i++] = GET_COL_SOURCE; break; case 5: if (i > 0) { (void) fprintf(stderr, gettext("\"all\" conflicts " "with specific fields " "given to -o option\n")); usage(B_FALSE); } cb.cb_columns[0] = GET_COL_NAME; cb.cb_columns[1] = GET_COL_PROPERTY; cb.cb_columns[2] = GET_COL_VALUE; cb.cb_columns[3] = GET_COL_RECVD; cb.cb_columns[4] = GET_COL_SOURCE; flags |= ZFS_ITER_RECVD_PROPS; i = ZFS_GET_NCOLS; break; default: (void) fprintf(stderr, gettext("invalid column name " "'%s'\n"), value); usage(B_FALSE); } } break; case 's': cb.cb_sources = 0; while (*optarg != '\0') { static char *source_subopts[] = { "local", "default", "inherited", "received", "temporary", "none", NULL }; switch (getsubopt(&optarg, source_subopts, &value)) { case 0: cb.cb_sources |= ZPROP_SRC_LOCAL; break; case 1: cb.cb_sources |= ZPROP_SRC_DEFAULT; break; case 2: cb.cb_sources |= ZPROP_SRC_INHERITED; break; case 3: cb.cb_sources |= ZPROP_SRC_RECEIVED; break; case 4: cb.cb_sources |= ZPROP_SRC_TEMPORARY; break; case 5: cb.cb_sources |= ZPROP_SRC_NONE; break; default: (void) fprintf(stderr, gettext("invalid source " "'%s'\n"), value); usage(B_FALSE); } } break; case 't': types = 0; flags &= ~ZFS_ITER_PROP_LISTSNAPS; while (*optarg != '\0') { static char *type_subopts[] = { "filesystem", "volume", "snapshot", "all", NULL }; switch (getsubopt(&optarg, type_subopts, &value)) { case 0: types |= ZFS_TYPE_FILESYSTEM; break; case 1: types |= ZFS_TYPE_VOLUME; break; case 2: types |= ZFS_TYPE_SNAPSHOT; break; case 3: types = ZFS_TYPE_DATASET; break; default: (void) fprintf(stderr, gettext("invalid type '%s'\n"), value); usage(B_FALSE); } } break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; if (argc < 1) { (void) fprintf(stderr, gettext("missing property " "argument\n")); usage(B_FALSE); } fields = argv[0]; if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET) != 0) usage(B_FALSE); argc--; argv++; /* * As part of zfs_expand_proplist(), we keep track of the maximum column * width for each property. For the 'NAME' (and 'SOURCE') columns, we * need to know the maximum name length. However, the user likely did * not specify 'name' as one of the properties to fetch, so we need to * make sure we always include at least this property for * print_get_headers() to work properly. */ if (cb.cb_proplist != NULL) { fake_name.pl_prop = ZFS_PROP_NAME; fake_name.pl_width = strlen(gettext("NAME")); fake_name.pl_next = cb.cb_proplist; cb.cb_proplist = &fake_name; } cb.cb_first = B_TRUE; /* run for each object */ ret = zfs_for_each(argc, argv, flags, types, NULL, &cb.cb_proplist, limit, get_callback, &cb); if (cb.cb_proplist == &fake_name) zprop_free_list(fake_name.pl_next); else zprop_free_list(cb.cb_proplist); return (ret); } /* * inherit [-rS] ... * * -r Recurse over all children * -S Revert to received value, if any * * For each dataset specified on the command line, inherit the given property * from its parent. Inheriting a property at the pool level will cause it to * use the default value. The '-r' flag will recurse over all children, and is * useful for setting a property on a hierarchy-wide basis, regardless of any * local modifications for each dataset. */ typedef struct inherit_cbdata { const char *cb_propname; boolean_t cb_received; } inherit_cbdata_t; static int inherit_recurse_cb(zfs_handle_t *zhp, void *data) { inherit_cbdata_t *cb = data; zfs_prop_t prop = zfs_name_to_prop(cb->cb_propname); /* * If we're doing it recursively, then ignore properties that * are not valid for this type of dataset. */ if (prop != ZPROP_INVAL && !zfs_prop_valid_for_type(prop, zfs_get_type(zhp))) return (0); return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0); } static int inherit_cb(zfs_handle_t *zhp, void *data) { inherit_cbdata_t *cb = data; return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0); } static int zfs_do_inherit(int argc, char **argv) { int c; zfs_prop_t prop; inherit_cbdata_t cb = { 0 }; char *propname; int ret = 0; int flags = 0; boolean_t received = B_FALSE; /* check options */ while ((c = getopt(argc, argv, "rS")) != -1) { switch (c) { case 'r': flags |= ZFS_ITER_RECURSE; break; case 'S': received = B_TRUE; break; case '?': default: (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; /* check number of arguments */ if (argc < 1) { (void) fprintf(stderr, gettext("missing property argument\n")); usage(B_FALSE); } if (argc < 2) { (void) fprintf(stderr, gettext("missing dataset argument\n")); usage(B_FALSE); } propname = argv[0]; argc--; argv++; if ((prop = zfs_name_to_prop(propname)) != ZPROP_INVAL) { if (zfs_prop_readonly(prop)) { (void) fprintf(stderr, gettext( "%s property is read-only\n"), propname); return (1); } if (!zfs_prop_inheritable(prop) && !received) { (void) fprintf(stderr, gettext("'%s' property cannot " "be inherited\n"), propname); if (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_RESERVATION || prop == ZFS_PROP_REFQUOTA || prop == ZFS_PROP_REFRESERVATION) (void) fprintf(stderr, gettext("use 'zfs set " "%s=none' to clear\n"), propname); return (1); } if (received && (prop == ZFS_PROP_VOLSIZE || prop == ZFS_PROP_VERSION)) { (void) fprintf(stderr, gettext("'%s' property cannot " "be reverted to a received value\n"), propname); return (1); } } else if (!zfs_prop_user(propname)) { (void) fprintf(stderr, gettext("invalid property '%s'\n"), propname); usage(B_FALSE); } cb.cb_propname = propname; cb.cb_received = received; if (flags & ZFS_ITER_RECURSE) { ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET, NULL, NULL, 0, inherit_recurse_cb, &cb); } else { ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET, NULL, NULL, 0, inherit_cb, &cb); } return (ret); } typedef struct upgrade_cbdata { uint64_t cb_numupgraded; uint64_t cb_numsamegraded; uint64_t cb_numfailed; uint64_t cb_version; boolean_t cb_newer; boolean_t cb_foundone; char cb_lastfs[ZFS_MAXNAMELEN]; } upgrade_cbdata_t; static int same_pool(zfs_handle_t *zhp, const char *name) { int len1 = strcspn(name, "/@"); const char *zhname = zfs_get_name(zhp); int len2 = strcspn(zhname, "/@"); if (len1 != len2) return (B_FALSE); return (strncmp(name, zhname, len1) == 0); } static int upgrade_list_callback(zfs_handle_t *zhp, void *data) { upgrade_cbdata_t *cb = data; int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION); /* list if it's old/new */ if ((!cb->cb_newer && version < ZPL_VERSION) || (cb->cb_newer && version > ZPL_VERSION)) { char *str; if (cb->cb_newer) { str = gettext("The following filesystems are " "formatted using a newer software version and\n" "cannot be accessed on the current system.\n\n"); } else { str = gettext("The following filesystems are " "out of date, and can be upgraded. After being\n" "upgraded, these filesystems (and any 'zfs send' " "streams generated from\n" "subsequent snapshots) will no longer be " "accessible by older software versions.\n\n"); } if (!cb->cb_foundone) { (void) puts(str); (void) printf(gettext("VER FILESYSTEM\n")); (void) printf(gettext("--- ------------\n")); cb->cb_foundone = B_TRUE; } (void) printf("%2u %s\n", version, zfs_get_name(zhp)); } return (0); } static int upgrade_set_callback(zfs_handle_t *zhp, void *data) { upgrade_cbdata_t *cb = data; int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION); int needed_spa_version; int spa_version; if (zfs_spa_version(zhp, &spa_version) < 0) return (-1); needed_spa_version = zfs_spa_version_map(cb->cb_version); if (needed_spa_version < 0) return (-1); if (spa_version < needed_spa_version) { /* can't upgrade */ (void) printf(gettext("%s: can not be " "upgraded; the pool version needs to first " "be upgraded\nto version %d\n\n"), zfs_get_name(zhp), needed_spa_version); cb->cb_numfailed++; return (0); } /* upgrade */ if (version < cb->cb_version) { char verstr[16]; (void) snprintf(verstr, sizeof (verstr), "%llu", cb->cb_version); if (cb->cb_lastfs[0] && !same_pool(zhp, cb->cb_lastfs)) { /* * If they did "zfs upgrade -a", then we could * be doing ioctls to different pools. We need * to log this history once to each pool. */ verify(zpool_stage_history(g_zfs, history_str) == 0); } if (zfs_prop_set(zhp, "version", verstr) == 0) cb->cb_numupgraded++; else cb->cb_numfailed++; (void) strcpy(cb->cb_lastfs, zfs_get_name(zhp)); } else if (version > cb->cb_version) { /* can't downgrade */ (void) printf(gettext("%s: can not be downgraded; " "it is already at version %u\n"), zfs_get_name(zhp), version); cb->cb_numfailed++; } else { cb->cb_numsamegraded++; } return (0); } /* * zfs upgrade * zfs upgrade -v * zfs upgrade [-r] [-V ] <-a | filesystem> */ static int zfs_do_upgrade(int argc, char **argv) { boolean_t all = B_FALSE; boolean_t showversions = B_FALSE; int ret = 0; upgrade_cbdata_t cb = { 0 }; char c; int flags = ZFS_ITER_ARGS_CAN_BE_PATHS; /* check options */ while ((c = getopt(argc, argv, "rvV:a")) != -1) { switch (c) { case 'r': flags |= ZFS_ITER_RECURSE; break; case 'v': showversions = B_TRUE; break; case 'V': if (zfs_prop_string_to_index(ZFS_PROP_VERSION, optarg, &cb.cb_version) != 0) { (void) fprintf(stderr, gettext("invalid version %s\n"), optarg); usage(B_FALSE); } break; case 'a': all = B_TRUE; break; case '?': default: (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; if ((!all && !argc) && ((flags & ZFS_ITER_RECURSE) | cb.cb_version)) usage(B_FALSE); if (showversions && (flags & ZFS_ITER_RECURSE || all || cb.cb_version || argc)) usage(B_FALSE); if ((all || argc) && (showversions)) usage(B_FALSE); if (all && argc) usage(B_FALSE); if (showversions) { /* Show info on available versions. */ (void) printf(gettext("The following filesystem versions are " "supported:\n\n")); (void) printf(gettext("VER DESCRIPTION\n")); (void) printf("--- -----------------------------------------" "---------------\n"); (void) printf(gettext(" 1 Initial ZFS filesystem version\n")); (void) printf(gettext(" 2 Enhanced directory entries\n")); (void) printf(gettext(" 3 Case insensitive and filesystem " "user identifier (FUID)\n")); (void) printf(gettext(" 4 userquota, groupquota " "properties\n")); (void) printf(gettext(" 5 System attributes\n")); (void) printf(gettext("\nFor more information on a particular " "version, including supported releases,\n")); (void) printf("see the ZFS Administration Guide.\n\n"); ret = 0; } else if (argc || all) { /* Upgrade filesystems */ if (cb.cb_version == 0) cb.cb_version = ZPL_VERSION; ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_FILESYSTEM, NULL, NULL, 0, upgrade_set_callback, &cb); (void) printf(gettext("%llu filesystems upgraded\n"), cb.cb_numupgraded); if (cb.cb_numsamegraded) { (void) printf(gettext("%llu filesystems already at " "this version\n"), cb.cb_numsamegraded); } if (cb.cb_numfailed != 0) ret = 1; } else { /* List old-version filesytems */ boolean_t found; (void) printf(gettext("This system is currently running " "ZFS filesystem version %llu.\n\n"), ZPL_VERSION); flags |= ZFS_ITER_RECURSE; ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM, NULL, NULL, 0, upgrade_list_callback, &cb); found = cb.cb_foundone; cb.cb_foundone = B_FALSE; cb.cb_newer = B_TRUE; ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM, NULL, NULL, 0, upgrade_list_callback, &cb); if (!cb.cb_foundone && !found) { (void) printf(gettext("All filesystems are " "formatted with the current version.\n")); } } return (ret); } #define USTYPE_USR_BIT (0) #define USTYPE_GRP_BIT (1) #define USTYPE_PSX_BIT (2) #define USTYPE_SMB_BIT (3) #define USTYPE_USR (1 << USTYPE_USR_BIT) #define USTYPE_GRP (1 << USTYPE_GRP_BIT) #define USTYPE_PSX (1 << USTYPE_PSX_BIT) #define USTYPE_SMB (1 << USTYPE_SMB_BIT) #define USTYPE_PSX_USR (USTYPE_PSX | USTYPE_USR) #define USTYPE_SMB_USR (USTYPE_SMB | USTYPE_USR) #define USTYPE_PSX_GRP (USTYPE_PSX | USTYPE_GRP) #define USTYPE_SMB_GRP (USTYPE_SMB | USTYPE_GRP) #define USTYPE_ALL (USTYPE_PSX_USR | USTYPE_SMB_USR \ | USTYPE_PSX_GRP | USTYPE_SMB_GRP) #define USPROP_USED_BIT (0) #define USPROP_QUOTA_BIT (1) #define USPROP_USED (1 << USPROP_USED_BIT) #define USPROP_QUOTA (1 << USPROP_QUOTA_BIT) typedef struct us_node { nvlist_t *usn_nvl; uu_avl_node_t usn_avlnode; uu_list_node_t usn_listnode; } us_node_t; typedef struct us_cbdata { nvlist_t **cb_nvlp; uu_avl_pool_t *cb_avl_pool; uu_avl_t *cb_avl; boolean_t cb_numname; boolean_t cb_nicenum; boolean_t cb_sid2posix; zfs_userquota_prop_t cb_prop; zfs_sort_column_t *cb_sortcol; size_t cb_max_typelen; size_t cb_max_namelen; size_t cb_max_usedlen; size_t cb_max_quotalen; } us_cbdata_t; typedef struct { zfs_sort_column_t *si_sortcol; boolean_t si_num_name; boolean_t si_parsable; } us_sort_info_t; static int us_compare(const void *larg, const void *rarg, void *unused) { const us_node_t *l = larg; const us_node_t *r = rarg; int rc = 0; us_sort_info_t *si = (us_sort_info_t *)unused; zfs_sort_column_t *sortcol = si->si_sortcol; boolean_t num_name = si->si_num_name; nvlist_t *lnvl = l->usn_nvl; nvlist_t *rnvl = r->usn_nvl; for (; sortcol != NULL; sortcol = sortcol->sc_next) { char *lvstr = ""; char *rvstr = ""; uint32_t lv32 = 0; uint32_t rv32 = 0; uint64_t lv64 = 0; uint64_t rv64 = 0; zfs_prop_t prop = sortcol->sc_prop; const char *propname = NULL; boolean_t reverse = sortcol->sc_reverse; switch (prop) { case ZFS_PROP_TYPE: propname = "type"; (void) nvlist_lookup_uint32(lnvl, propname, &lv32); (void) nvlist_lookup_uint32(rnvl, propname, &rv32); if (rv32 != lv32) rc = (rv32 > lv32) ? 1 : -1; break; case ZFS_PROP_NAME: propname = "name"; if (num_name) { (void) nvlist_lookup_uint32(lnvl, propname, &lv32); (void) nvlist_lookup_uint32(rnvl, propname, &rv32); if (rv32 != lv32) rc = (rv32 > lv32) ? 1 : -1; } else { (void) nvlist_lookup_string(lnvl, propname, &lvstr); (void) nvlist_lookup_string(rnvl, propname, &rvstr); rc = strcmp(lvstr, rvstr); } break; case ZFS_PROP_USED: case ZFS_PROP_QUOTA: if (ZFS_PROP_USED == prop) propname = "used"; else propname = "quota"; (void) nvlist_lookup_uint64(lnvl, propname, &lv64); (void) nvlist_lookup_uint64(rnvl, propname, &rv64); if (rv64 != lv64) rc = (rv64 > lv64) ? 1 : -1; } if (rc) if (rc < 0) return (reverse ? 1 : -1); else return (reverse ? -1 : 1); } return (rc); } static inline const char * us_type2str(unsigned field_type) { switch (field_type) { case USTYPE_PSX_USR: return ("POSIX User"); case USTYPE_PSX_GRP: return ("POSIX Group"); case USTYPE_SMB_USR: return ("SMB User"); case USTYPE_SMB_GRP: return ("SMB Group"); default: return ("Undefined"); } } /* * zfs userspace */ static int userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space) { us_cbdata_t *cb = (us_cbdata_t *)arg; zfs_userquota_prop_t prop = cb->cb_prop; char *name = NULL; char *propname; char namebuf[32]; char sizebuf[32]; us_node_t *node; uu_avl_pool_t *avl_pool = cb->cb_avl_pool; uu_avl_t *avl = cb->cb_avl; uu_avl_index_t idx; nvlist_t *props; us_node_t *n; zfs_sort_column_t *sortcol = cb->cb_sortcol; unsigned type; const char *typestr; size_t namelen; size_t typelen; size_t sizelen; us_sort_info_t sortinfo = { sortcol, cb->cb_numname }; if (domain == NULL || domain[0] == '\0') { /* POSIX */ if (prop == ZFS_PROP_GROUPUSED || prop == ZFS_PROP_GROUPQUOTA) { type = USTYPE_PSX_GRP; struct group *g = getgrgid(rid); if (g) name = g->gr_name; } else { type = USTYPE_PSX_USR; struct passwd *p = getpwuid(rid); if (p) name = p->pw_name; } } else { char sid[ZFS_MAXNAMELEN+32]; uid_t id; uint64_t classes; #ifdef sun int err = 0; directory_error_t e; #endif (void) snprintf(sid, sizeof (sid), "%s-%u", domain, rid); /* SMB */ if (prop == ZFS_PROP_GROUPUSED || prop == ZFS_PROP_GROUPQUOTA) { type = USTYPE_SMB_GRP; #ifdef sun err = sid_to_id(sid, B_FALSE, &id); #endif } else { type = USTYPE_SMB_USR; #ifdef sun err = sid_to_id(sid, B_TRUE, &id); #endif } #ifdef sun if (err == 0) { rid = id; e = directory_name_from_sid(NULL, sid, &name, &classes); if (e != NULL) { directory_error_free(e); return (NULL); } if (name == NULL) name = sid; } #endif } /* * if (prop == ZFS_PROP_GROUPUSED || prop == ZFS_PROP_GROUPQUOTA) * ug = "group"; * else * ug = "user"; */ if (prop == ZFS_PROP_USERUSED || prop == ZFS_PROP_GROUPUSED) propname = "used"; else propname = "quota"; (void) snprintf(namebuf, sizeof (namebuf), "%u", rid); if (name == NULL) name = namebuf; if (cb->cb_nicenum) zfs_nicenum(space, sizebuf, sizeof (sizebuf)); else (void) sprintf(sizebuf, "%llu", space); node = safe_malloc(sizeof (us_node_t)); uu_avl_node_init(node, &node->usn_avlnode, avl_pool); if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { free(node); return (-1); } if (nvlist_add_uint32(props, "type", type) != 0) nomem(); if (cb->cb_numname) { if (nvlist_add_uint32(props, "name", rid) != 0) nomem(); namelen = strlen(namebuf); } else { if (nvlist_add_string(props, "name", name) != 0) nomem(); namelen = strlen(name); } typestr = us_type2str(type); typelen = strlen(gettext(typestr)); if (typelen > cb->cb_max_typelen) cb->cb_max_typelen = typelen; if (namelen > cb->cb_max_namelen) cb->cb_max_namelen = namelen; sizelen = strlen(sizebuf); if (0 == strcmp(propname, "used")) { if (sizelen > cb->cb_max_usedlen) cb->cb_max_usedlen = sizelen; } else { if (sizelen > cb->cb_max_quotalen) cb->cb_max_quotalen = sizelen; } node->usn_nvl = props; n = uu_avl_find(avl, node, &sortinfo, &idx); if (n == NULL) uu_avl_insert(avl, node, idx); else { nvlist_free(props); free(node); node = n; props = node->usn_nvl; } if (nvlist_add_uint64(props, propname, space) != 0) nomem(); return (0); } static inline boolean_t usprop_check(zfs_userquota_prop_t p, unsigned types, unsigned props) { unsigned type; unsigned prop; switch (p) { case ZFS_PROP_USERUSED: type = USTYPE_USR; prop = USPROP_USED; break; case ZFS_PROP_USERQUOTA: type = USTYPE_USR; prop = USPROP_QUOTA; break; case ZFS_PROP_GROUPUSED: type = USTYPE_GRP; prop = USPROP_USED; break; case ZFS_PROP_GROUPQUOTA: type = USTYPE_GRP; prop = USPROP_QUOTA; break; default: /* ALL */ return (B_TRUE); }; return (type & types && prop & props); } #define USFIELD_TYPE (1 << 0) #define USFIELD_NAME (1 << 1) #define USFIELD_USED (1 << 2) #define USFIELD_QUOTA (1 << 3) #define USFIELD_ALL (USFIELD_TYPE | USFIELD_NAME | USFIELD_USED | USFIELD_QUOTA) static int parsefields(unsigned *fieldsp, char **names, unsigned *bits, size_t len) { char *field = optarg; char *delim; do { int i; boolean_t found = B_FALSE; delim = strchr(field, ','); if (delim != NULL) *delim = '\0'; for (i = 0; i < len; i++) if (0 == strcmp(field, names[i])) { found = B_TRUE; *fieldsp |= bits[i]; break; } if (!found) { (void) fprintf(stderr, gettext("invalid type '%s'" "for -t option\n"), field); return (-1); } field = delim + 1; } while (delim); return (0); } static char *type_names[] = { "posixuser", "smbuser", "posixgroup", "smbgroup", "all" }; static unsigned type_bits[] = { USTYPE_PSX_USR, USTYPE_SMB_USR, USTYPE_PSX_GRP, USTYPE_SMB_GRP, USTYPE_ALL }; static char *us_field_names[] = { "type", "name", "used", "quota" }; static unsigned us_field_bits[] = { USFIELD_TYPE, USFIELD_NAME, USFIELD_USED, USFIELD_QUOTA }; static void print_us_node(boolean_t scripted, boolean_t parseable, unsigned fields, size_t type_width, size_t name_width, size_t used_width, size_t quota_width, us_node_t *node) { nvlist_t *nvl = node->usn_nvl; nvpair_t *nvp = NULL; char valstr[ZFS_MAXNAMELEN]; boolean_t first = B_TRUE; boolean_t quota_found = B_FALSE; if (fields & USFIELD_QUOTA && !nvlist_exists(nvl, "quota")) if (nvlist_add_string(nvl, "quota", "none") != 0) nomem(); while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { char *pname = nvpair_name(nvp); data_type_t type = nvpair_type(nvp); uint32_t val32 = 0; uint64_t val64 = 0; char *strval = NULL; unsigned field = 0; unsigned width = 0; int i; for (i = 0; i < 4; i++) { if (0 == strcmp(pname, us_field_names[i])) { field = us_field_bits[i]; break; } } if (!(field & fields)) continue; switch (type) { case DATA_TYPE_UINT32: (void) nvpair_value_uint32(nvp, &val32); break; case DATA_TYPE_UINT64: (void) nvpair_value_uint64(nvp, &val64); break; case DATA_TYPE_STRING: (void) nvpair_value_string(nvp, &strval); break; default: (void) fprintf(stderr, "Invalid data type\n"); } if (!first) if (scripted) (void) printf("\t"); else (void) printf(" "); switch (field) { case USFIELD_TYPE: strval = (char *)us_type2str(val32); width = type_width; break; case USFIELD_NAME: if (type == DATA_TYPE_UINT64) { (void) sprintf(valstr, "%llu", val64); strval = valstr; } width = name_width; break; case USFIELD_USED: case USFIELD_QUOTA: if (type == DATA_TYPE_UINT64) { (void) nvpair_value_uint64(nvp, &val64); if (parseable) (void) sprintf(valstr, "%llu", val64); else zfs_nicenum(val64, valstr, sizeof (valstr)); strval = valstr; } if (field == USFIELD_USED) width = used_width; else { quota_found = B_FALSE; width = quota_width; } break; } if (field == USFIELD_QUOTA && !quota_found) (void) printf("%*s", width, strval); else { if (type == DATA_TYPE_STRING) (void) printf("%-*s", width, strval); else (void) printf("%*s", width, strval); } first = B_FALSE; } (void) printf("\n"); } static void print_us(boolean_t scripted, boolean_t parsable, unsigned fields, unsigned type_width, unsigned name_width, unsigned used_width, unsigned quota_width, boolean_t rmnode, uu_avl_t *avl) { static char *us_field_hdr[] = { "TYPE", "NAME", "USED", "QUOTA" }; us_node_t *node; const char *col; int i; size_t width[4] = { type_width, name_width, used_width, quota_width }; if (!scripted) { boolean_t first = B_TRUE; for (i = 0; i < 4; i++) { unsigned field = us_field_bits[i]; if (!(field & fields)) continue; col = gettext(us_field_hdr[i]); if (field == USFIELD_TYPE || field == USFIELD_NAME) (void) printf(first?"%-*s":" %-*s", width[i], col); else (void) printf(first?"%*s":" %*s", width[i], col); first = B_FALSE; } (void) printf("\n"); } for (node = uu_avl_first(avl); node != NULL; node = uu_avl_next(avl, node)) { print_us_node(scripted, parsable, fields, type_width, name_width, used_width, used_width, node); if (rmnode) nvlist_free(node->usn_nvl); } } static int zfs_do_userspace(int argc, char **argv) { zfs_handle_t *zhp; zfs_userquota_prop_t p; uu_avl_pool_t *avl_pool; uu_avl_t *avl_tree; uu_avl_walk_t *walk; char *cmd; boolean_t scripted = B_FALSE; boolean_t prtnum = B_FALSE; boolean_t parseable = B_FALSE; boolean_t sid2posix = B_FALSE; int error = 0; int c; zfs_sort_column_t *default_sortcol = NULL; zfs_sort_column_t *sortcol = NULL; unsigned types = USTYPE_PSX_USR | USTYPE_SMB_USR; unsigned fields = 0; unsigned props = USPROP_USED | USPROP_QUOTA; us_cbdata_t cb; us_node_t *node; boolean_t resort_avl = B_FALSE; if (argc < 2) usage(B_FALSE); cmd = argv[0]; if (0 == strcmp(cmd, "groupspace")) /* toggle default group types */ types = USTYPE_PSX_GRP | USTYPE_SMB_GRP; /* check options */ while ((c = getopt(argc, argv, "nHpo:s:S:t:i")) != -1) { switch (c) { case 'n': prtnum = B_TRUE; break; case 'H': scripted = B_TRUE; break; case 'p': parseable = B_TRUE; break; case 'o': if (parsefields(&fields, us_field_names, us_field_bits, 4) != 0) return (1); break; case 's': if (zfs_add_sort_column(&sortcol, optarg, B_FALSE) != 0) { (void) fprintf(stderr, gettext("invalid property '%s'\n"), optarg); usage(B_FALSE); } break; case 'S': if (zfs_add_sort_column(&sortcol, optarg, B_TRUE) != 0) { (void) fprintf(stderr, gettext("invalid property '%s'\n"), optarg); usage(B_FALSE); } break; case 't': if (parsefields(&types, type_names, type_bits, 5)) return (1); break; case 'i': sid2posix = B_TRUE; break; case ':': (void) fprintf(stderr, gettext("missing argument for " "'%c' option\n"), optopt); usage(B_FALSE); break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; /* ok, now we have sorted by default colums (type,name) avl tree */ if (sortcol) { zfs_sort_column_t *sc; for (sc = sortcol; sc; sc = sc->sc_next) { if (sc->sc_prop == ZFS_PROP_QUOTA) { resort_avl = B_TRUE; break; } } } if (!fields) fields = USFIELD_ALL; if ((zhp = zfs_open(g_zfs, argv[argc-1], ZFS_TYPE_DATASET)) == NULL) return (1); if ((avl_pool = uu_avl_pool_create("us_avl_pool", sizeof (us_node_t), offsetof(us_node_t, usn_avlnode), us_compare, UU_DEFAULT)) == NULL) nomem(); if ((avl_tree = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL) nomem(); if (sortcol && !resort_avl) cb.cb_sortcol = sortcol; else { (void) zfs_add_sort_column(&default_sortcol, "type", B_FALSE); (void) zfs_add_sort_column(&default_sortcol, "name", B_FALSE); cb.cb_sortcol = default_sortcol; } cb.cb_numname = prtnum; cb.cb_nicenum = !parseable; cb.cb_avl_pool = avl_pool; cb.cb_avl = avl_tree; cb.cb_sid2posix = sid2posix; cb.cb_max_typelen = strlen(gettext("TYPE")); cb.cb_max_namelen = strlen(gettext("NAME")); cb.cb_max_usedlen = strlen(gettext("USED")); cb.cb_max_quotalen = strlen(gettext("QUOTA")); for (p = 0; p < ZFS_NUM_USERQUOTA_PROPS; p++) { if (!usprop_check(p, types, props)) continue; cb.cb_prop = p; error = zfs_userspace(zhp, p, userspace_cb, &cb); if (error) break; } if (resort_avl) { us_node_t *node; us_node_t *rmnode; uu_list_pool_t *listpool; uu_list_t *list; uu_avl_index_t idx = 0; uu_list_index_t idx2 = 0; listpool = uu_list_pool_create("tmplist", sizeof (us_node_t), offsetof(us_node_t, usn_listnode), NULL, UU_DEFAULT); list = uu_list_create(listpool, NULL, UU_DEFAULT); node = uu_avl_first(avl_tree); uu_list_node_init(node, &node->usn_listnode, listpool); while (node != NULL) { rmnode = node; node = uu_avl_next(avl_tree, node); uu_avl_remove(avl_tree, rmnode); if (uu_list_find(list, rmnode, NULL, &idx2) == NULL) { uu_list_insert(list, rmnode, idx2); } } for (node = uu_list_first(list); node != NULL; node = uu_list_next(list, node)) { us_sort_info_t sortinfo = { sortcol, cb.cb_numname }; if (uu_avl_find(avl_tree, node, &sortinfo, &idx) == NULL) uu_avl_insert(avl_tree, node, idx); } uu_list_destroy(list); } /* print & free node`s nvlist memory */ print_us(scripted, parseable, fields, cb.cb_max_typelen, cb.cb_max_namelen, cb.cb_max_usedlen, cb.cb_max_quotalen, B_TRUE, cb.cb_avl); if (sortcol) zfs_free_sort_columns(sortcol); zfs_free_sort_columns(default_sortcol); /* * Finally, clean up the AVL tree. */ if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL) nomem(); while ((node = uu_avl_walk_next(walk)) != NULL) { uu_avl_remove(cb.cb_avl, node); free(node); } uu_avl_walk_end(walk); uu_avl_destroy(avl_tree); uu_avl_pool_destroy(avl_pool); return (error); } /* * list [-r][-d max] [-H] [-o property[,property]...] [-t type[,type]...] * [-s property [-s property]...] [-S property [-S property]...] * ... * * -r Recurse over all children * -d Limit recursion by depth. * -H Scripted mode; elide headers and separate columns by tabs * -o Control which fields to display. * -t Control which object types to display. * -s Specify sort columns, descending order. * -S Specify sort columns, ascending order. * * When given no arguments, lists all filesystems in the system. * Otherwise, list the specified datasets, optionally recursing down them if * '-r' is specified. */ typedef struct list_cbdata { boolean_t cb_first; boolean_t cb_scripted; zprop_list_t *cb_proplist; } list_cbdata_t; /* * Given a list of columns to display, output appropriate headers for each one. */ static void print_header(zprop_list_t *pl) { char headerbuf[ZFS_MAXPROPLEN]; const char *header; int i; boolean_t first = B_TRUE; boolean_t right_justify; for (; pl != NULL; pl = pl->pl_next) { if (!first) { (void) printf(" "); } else { first = B_FALSE; } right_justify = B_FALSE; if (pl->pl_prop != ZPROP_INVAL) { header = zfs_prop_column_name(pl->pl_prop); right_justify = zfs_prop_align_right(pl->pl_prop); } else { for (i = 0; pl->pl_user_prop[i] != '\0'; i++) headerbuf[i] = toupper(pl->pl_user_prop[i]); headerbuf[i] = '\0'; header = headerbuf; } if (pl->pl_next == NULL && !right_justify) (void) printf("%s", header); else if (right_justify) (void) printf("%*s", pl->pl_width, header); else (void) printf("%-*s", pl->pl_width, header); } (void) printf("\n"); } /* * Given a dataset and a list of fields, print out all the properties according * to the described layout. */ static void print_dataset(zfs_handle_t *zhp, zprop_list_t *pl, boolean_t scripted) { boolean_t first = B_TRUE; char property[ZFS_MAXPROPLEN]; nvlist_t *userprops = zfs_get_user_props(zhp); nvlist_t *propval; char *propstr; boolean_t right_justify; int width; for (; pl != NULL; pl = pl->pl_next) { if (!first) { if (scripted) (void) printf("\t"); else (void) printf(" "); } else { first = B_FALSE; } if (pl->pl_prop == ZFS_PROP_NAME) { (void) strlcpy(property, zfs_get_name(zhp), sizeof(property)); propstr = property; right_justify = zfs_prop_align_right(pl->pl_prop); } else if (pl->pl_prop != ZPROP_INVAL) { if (zfs_prop_get(zhp, pl->pl_prop, property, sizeof (property), NULL, NULL, 0, B_FALSE) != 0) propstr = "-"; else propstr = property; right_justify = zfs_prop_align_right(pl->pl_prop); } else if (zfs_prop_userquota(pl->pl_user_prop)) { if (zfs_prop_get_userquota(zhp, pl->pl_user_prop, property, sizeof (property), B_FALSE) != 0) propstr = "-"; else propstr = property; right_justify = B_TRUE; } else if (zfs_prop_written(pl->pl_user_prop)) { if (zfs_prop_get_written(zhp, pl->pl_user_prop, property, sizeof (property), B_FALSE) != 0) propstr = "-"; else propstr = property; right_justify = B_TRUE; } else { if (nvlist_lookup_nvlist(userprops, pl->pl_user_prop, &propval) != 0) propstr = "-"; else verify(nvlist_lookup_string(propval, ZPROP_VALUE, &propstr) == 0); right_justify = B_FALSE; } width = pl->pl_width; /* * If this is being called in scripted mode, or if this is the * last column and it is left-justified, don't include a width * format specifier. */ if (scripted || (pl->pl_next == NULL && !right_justify)) (void) printf("%s", propstr); else if (right_justify) (void) printf("%*s", width, propstr); else (void) printf("%-*s", width, propstr); } (void) printf("\n"); } /* * Generic callback function to list a dataset or snapshot. */ static int list_callback(zfs_handle_t *zhp, void *data) { list_cbdata_t *cbp = data; if (cbp->cb_first) { if (!cbp->cb_scripted) print_header(cbp->cb_proplist); cbp->cb_first = B_FALSE; } print_dataset(zhp, cbp->cb_proplist, cbp->cb_scripted); return (0); } static int zfs_do_list(int argc, char **argv) { int c; boolean_t scripted = B_FALSE; static char default_fields[] = "name,used,available,referenced,mountpoint"; int types = ZFS_TYPE_DATASET; boolean_t types_specified = B_FALSE; char *fields = NULL; list_cbdata_t cb = { 0 }; char *value; int limit = 0; int ret = 0; zfs_sort_column_t *sortcol = NULL; int flags = ZFS_ITER_PROP_LISTSNAPS | ZFS_ITER_ARGS_CAN_BE_PATHS; /* check options */ while ((c = getopt(argc, argv, ":d:o:rt:Hs:S:")) != -1) { switch (c) { case 'o': fields = optarg; break; case 'd': limit = parse_depth(optarg, &flags); break; case 'r': flags |= ZFS_ITER_RECURSE; break; case 'H': scripted = B_TRUE; break; case 's': if (zfs_add_sort_column(&sortcol, optarg, B_FALSE) != 0) { (void) fprintf(stderr, gettext("invalid property '%s'\n"), optarg); usage(B_FALSE); } break; case 'S': if (zfs_add_sort_column(&sortcol, optarg, B_TRUE) != 0) { (void) fprintf(stderr, gettext("invalid property '%s'\n"), optarg); usage(B_FALSE); } break; case 't': types = 0; types_specified = B_TRUE; flags &= ~ZFS_ITER_PROP_LISTSNAPS; while (*optarg != '\0') { static char *type_subopts[] = { "filesystem", "volume", "snapshot", "all", NULL }; switch (getsubopt(&optarg, type_subopts, &value)) { case 0: types |= ZFS_TYPE_FILESYSTEM; break; case 1: types |= ZFS_TYPE_VOLUME; break; case 2: types |= ZFS_TYPE_SNAPSHOT; break; case 3: types = ZFS_TYPE_DATASET; break; default: (void) fprintf(stderr, gettext("invalid type '%s'\n"), value); usage(B_FALSE); } } break; case ':': (void) fprintf(stderr, gettext("missing argument for " "'%c' option\n"), optopt); usage(B_FALSE); break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; if (fields == NULL) fields = default_fields; /* * If we are only going to list snapshot names and sort by name, * then we can use faster version. */ if (strcmp(fields, "name") == 0 && zfs_sort_only_by_name(sortcol)) flags |= ZFS_ITER_SIMPLE; /* * If "-o space" and no types were specified, don't display snapshots. */ if (strcmp(fields, "space") == 0 && types_specified == B_FALSE) types &= ~ZFS_TYPE_SNAPSHOT; /* * If the user specifies '-o all', the zprop_get_list() doesn't * normally include the name of the dataset. For 'zfs list', we always * want this property to be first. */ if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET) != 0) usage(B_FALSE); cb.cb_scripted = scripted; cb.cb_first = B_TRUE; ret = zfs_for_each(argc, argv, flags, types, sortcol, &cb.cb_proplist, limit, list_callback, &cb); zprop_free_list(cb.cb_proplist); zfs_free_sort_columns(sortcol); if (ret == 0 && cb.cb_first && !cb.cb_scripted) (void) printf(gettext("no datasets available\n")); return (ret); } /* - * zfs rename - * zfs rename -p + * zfs rename [-f] + * zfs rename [-f] -p * zfs rename -r * zfs rename -u [-p] * * Renames the given dataset to another of the same type. * * The '-p' flag creates all the non-existing ancestors of the target first. */ /* ARGSUSED */ static int zfs_do_rename(int argc, char **argv) { zfs_handle_t *zhp; renameflags_t flags = { 0 }; int c; int ret = 0; int types; boolean_t parents = B_FALSE; /* check options */ - while ((c = getopt(argc, argv, "pru")) != -1) { + while ((c = getopt(argc, argv, "fpru")) != -1) { switch (c) { case 'p': parents = B_TRUE; break; case 'r': flags.recurse = B_TRUE; break; case 'u': flags.nounmount = B_TRUE; break; + case 'f': + flags.forceunmount = B_TRUE; + break; case '?': default: (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; /* check number of arguments */ if (argc < 1) { (void) fprintf(stderr, gettext("missing source dataset " "argument\n")); usage(B_FALSE); } if (argc < 2) { (void) fprintf(stderr, gettext("missing target dataset " "argument\n")); usage(B_FALSE); } if (argc > 2) { (void) fprintf(stderr, gettext("too many arguments\n")); usage(B_FALSE); } if (flags.recurse && parents) { (void) fprintf(stderr, gettext("-p and -r options are mutually " "exclusive\n")); usage(B_FALSE); } if (flags.recurse && strchr(argv[0], '@') == 0) { (void) fprintf(stderr, gettext("source dataset for recursive " "rename must be a snapshot\n")); usage(B_FALSE); } if (flags.nounmount && parents) { (void) fprintf(stderr, gettext("-u and -r options are mutually " "exclusive\n")); usage(B_FALSE); } if (flags.nounmount) types = ZFS_TYPE_FILESYSTEM; else if (parents) types = ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME; else types = ZFS_TYPE_DATASET; if ((zhp = zfs_open(g_zfs, argv[0], types)) == NULL) return (1); /* If we were asked and the name looks good, try to create ancestors. */ if (parents && zfs_name_valid(argv[1], zfs_get_type(zhp)) && zfs_create_ancestors(g_zfs, argv[1]) != 0) { zfs_close(zhp); return (1); } ret = (zfs_rename(zhp, argv[1], flags) != 0); zfs_close(zhp); return (ret); } /* * zfs promote * * Promotes the given clone fs to be the parent */ /* ARGSUSED */ static int zfs_do_promote(int argc, char **argv) { zfs_handle_t *zhp; int ret = 0; /* check options */ if (argc > 1 && argv[1][0] == '-') { (void) fprintf(stderr, gettext("invalid option '%c'\n"), argv[1][1]); usage(B_FALSE); } /* check number of arguments */ if (argc < 2) { (void) fprintf(stderr, gettext("missing clone filesystem" " argument\n")); usage(B_FALSE); } if (argc > 2) { (void) fprintf(stderr, gettext("too many arguments\n")); usage(B_FALSE); } zhp = zfs_open(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); if (zhp == NULL) return (1); ret = (zfs_promote(zhp) != 0); zfs_close(zhp); return (ret); } /* * zfs rollback [-rRf] * * -r Delete any intervening snapshots before doing rollback * -R Delete any snapshots and their clones * -f ignored for backwards compatability * * Given a filesystem, rollback to a specific snapshot, discarding any changes * since then and making it the active dataset. If more recent snapshots exist, * the command will complain unless the '-r' flag is given. */ typedef struct rollback_cbdata { uint64_t cb_create; boolean_t cb_first; int cb_doclones; char *cb_target; int cb_error; boolean_t cb_recurse; boolean_t cb_dependent; } rollback_cbdata_t; /* * Report any snapshots more recent than the one specified. Used when '-r' is * not specified. We reuse this same callback for the snapshot dependents - if * 'cb_dependent' is set, then this is a dependent and we should report it * without checking the transaction group. */ static int rollback_check(zfs_handle_t *zhp, void *data) { rollback_cbdata_t *cbp = data; if (cbp->cb_doclones) { zfs_close(zhp); return (0); } if (!cbp->cb_dependent) { if (strcmp(zfs_get_name(zhp), cbp->cb_target) != 0 && zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT && zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) { if (cbp->cb_first && !cbp->cb_recurse) { (void) fprintf(stderr, gettext("cannot " "rollback to '%s': more recent snapshots " "exist\n"), cbp->cb_target); (void) fprintf(stderr, gettext("use '-r' to " "force deletion of the following " "snapshots:\n")); cbp->cb_first = 0; cbp->cb_error = 1; } if (cbp->cb_recurse) { cbp->cb_dependent = B_TRUE; if (zfs_iter_dependents(zhp, B_TRUE, rollback_check, cbp) != 0) { zfs_close(zhp); return (-1); } cbp->cb_dependent = B_FALSE; } else { (void) fprintf(stderr, "%s\n", zfs_get_name(zhp)); } } } else { if (cbp->cb_first && cbp->cb_recurse) { (void) fprintf(stderr, gettext("cannot rollback to " "'%s': clones of previous snapshots exist\n"), cbp->cb_target); (void) fprintf(stderr, gettext("use '-R' to " "force deletion of the following clones and " "dependents:\n")); cbp->cb_first = 0; cbp->cb_error = 1; } (void) fprintf(stderr, "%s\n", zfs_get_name(zhp)); } zfs_close(zhp); return (0); } static int zfs_do_rollback(int argc, char **argv) { int ret = 0; int c; boolean_t force = B_FALSE; rollback_cbdata_t cb = { 0 }; zfs_handle_t *zhp, *snap; char parentname[ZFS_MAXNAMELEN]; char *delim; /* check options */ while ((c = getopt(argc, argv, "rRf")) != -1) { switch (c) { case 'r': cb.cb_recurse = 1; break; case 'R': cb.cb_recurse = 1; cb.cb_doclones = 1; break; case 'f': force = B_TRUE; break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; /* check number of arguments */ if (argc < 1) { (void) fprintf(stderr, gettext("missing dataset argument\n")); usage(B_FALSE); } if (argc > 1) { (void) fprintf(stderr, gettext("too many arguments\n")); usage(B_FALSE); } /* open the snapshot */ if ((snap = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL) return (1); /* open the parent dataset */ (void) strlcpy(parentname, argv[0], sizeof (parentname)); verify((delim = strrchr(parentname, '@')) != NULL); *delim = '\0'; if ((zhp = zfs_open(g_zfs, parentname, ZFS_TYPE_DATASET)) == NULL) { zfs_close(snap); return (1); } /* * Check for more recent snapshots and/or clones based on the presence * of '-r' and '-R'. */ cb.cb_target = argv[0]; cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG); cb.cb_first = B_TRUE; cb.cb_error = 0; if ((ret = zfs_iter_children(zhp, rollback_check, &cb)) != 0) goto out; if ((ret = cb.cb_error) != 0) goto out; /* * Rollback parent to the given snapshot. */ ret = zfs_rollback(zhp, snap, force); out: zfs_close(snap); zfs_close(zhp); if (ret == 0) return (0); else return (1); } /* * zfs set property=value { fs | snap | vol } ... * * Sets the given property for all datasets specified on the command line. */ typedef struct set_cbdata { char *cb_propname; char *cb_value; } set_cbdata_t; static int set_callback(zfs_handle_t *zhp, void *data) { set_cbdata_t *cbp = data; if (zfs_prop_set(zhp, cbp->cb_propname, cbp->cb_value) != 0) { switch (libzfs_errno(g_zfs)) { case EZFS_MOUNTFAILED: (void) fprintf(stderr, gettext("property may be set " "but unable to remount filesystem\n")); break; case EZFS_SHARENFSFAILED: (void) fprintf(stderr, gettext("property may be set " "but unable to reshare filesystem\n")); break; } return (1); } return (0); } static int zfs_do_set(int argc, char **argv) { set_cbdata_t cb; int ret = 0; /* check for options */ if (argc > 1 && argv[1][0] == '-') { (void) fprintf(stderr, gettext("invalid option '%c'\n"), argv[1][1]); usage(B_FALSE); } /* check number of arguments */ if (argc < 2) { (void) fprintf(stderr, gettext("missing property=value " "argument\n")); usage(B_FALSE); } if (argc < 3) { (void) fprintf(stderr, gettext("missing dataset name\n")); usage(B_FALSE); } /* validate property=value argument */ cb.cb_propname = argv[1]; if (((cb.cb_value = strchr(cb.cb_propname, '=')) == NULL) || (cb.cb_value[1] == '\0')) { (void) fprintf(stderr, gettext("missing value in " "property=value argument\n")); usage(B_FALSE); } *cb.cb_value = '\0'; cb.cb_value++; if (*cb.cb_propname == '\0') { (void) fprintf(stderr, gettext("missing property in property=value argument\n")); usage(B_FALSE); } ret = zfs_for_each(argc - 2, argv + 2, 0, ZFS_TYPE_DATASET, NULL, NULL, 0, set_callback, &cb); return (ret); } /* * zfs snapshot [-r] [-o prop=value] ... * * Creates a snapshot with the given name. While functionally equivalent to * 'zfs create', it is a separate command to differentiate intent. */ static int zfs_do_snapshot(int argc, char **argv) { boolean_t recursive = B_FALSE; int ret = 0; char c; nvlist_t *props; if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) nomem(); /* check options */ while ((c = getopt(argc, argv, "ro:")) != -1) { switch (c) { case 'o': if (parseprop(props)) return (1); break; case 'r': recursive = B_TRUE; break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); goto usage; } } argc -= optind; argv += optind; /* check number of arguments */ if (argc < 1) { (void) fprintf(stderr, gettext("missing snapshot argument\n")); goto usage; } if (argc > 1) { (void) fprintf(stderr, gettext("too many arguments\n")); goto usage; } ret = zfs_snapshot(g_zfs, argv[0], recursive, props); nvlist_free(props); if (ret && recursive) (void) fprintf(stderr, gettext("no snapshots were created\n")); return (ret != 0); usage: nvlist_free(props); usage(B_FALSE); return (-1); } /* * Send a backup stream to stdout. */ static int zfs_do_send(int argc, char **argv) { char *fromname = NULL; char *toname = NULL; char *cp; zfs_handle_t *zhp; sendflags_t flags = { 0 }; int c, err; nvlist_t *dbgnv = NULL; boolean_t extraverbose = B_FALSE; /* check options */ while ((c = getopt(argc, argv, ":i:I:RDpvnP")) != -1) { switch (c) { case 'i': if (fromname) usage(B_FALSE); fromname = optarg; break; case 'I': if (fromname) usage(B_FALSE); fromname = optarg; flags.doall = B_TRUE; break; case 'R': flags.replicate = B_TRUE; break; case 'p': flags.props = B_TRUE; break; case 'P': flags.parsable = B_TRUE; flags.verbose = B_TRUE; break; case 'v': if (flags.verbose) extraverbose = B_TRUE; flags.verbose = B_TRUE; + flags.progress = B_TRUE; break; case 'D': flags.dedup = B_TRUE; break; case 'n': flags.dryrun = B_TRUE; break; case ':': (void) fprintf(stderr, gettext("missing argument for " "'%c' option\n"), optopt); usage(B_FALSE); break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; /* check number of arguments */ if (argc < 1) { (void) fprintf(stderr, gettext("missing snapshot argument\n")); usage(B_FALSE); } if (argc > 1) { (void) fprintf(stderr, gettext("too many arguments\n")); usage(B_FALSE); } if (!flags.dryrun && isatty(STDOUT_FILENO)) { (void) fprintf(stderr, gettext("Error: Stream can not be written to a terminal.\n" "You must redirect standard output.\n")); return (1); } cp = strchr(argv[0], '@'); if (cp == NULL) { (void) fprintf(stderr, gettext("argument must be a snapshot\n")); usage(B_FALSE); } *cp = '\0'; toname = cp + 1; zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); if (zhp == NULL) return (1); /* * If they specified the full path to the snapshot, chop off * everything except the short name of the snapshot, but special * case if they specify the origin. */ if (fromname && (cp = strchr(fromname, '@')) != NULL) { char origin[ZFS_MAXNAMELEN]; zprop_source_t src; (void) zfs_prop_get(zhp, ZFS_PROP_ORIGIN, origin, sizeof (origin), &src, NULL, 0, B_FALSE); if (strcmp(origin, fromname) == 0) { fromname = NULL; flags.fromorigin = B_TRUE; } else { *cp = '\0'; if (cp != fromname && strcmp(argv[0], fromname)) { (void) fprintf(stderr, gettext("incremental source must be " "in same filesystem\n")); usage(B_FALSE); } fromname = cp + 1; if (strchr(fromname, '@') || strchr(fromname, '/')) { (void) fprintf(stderr, gettext("invalid incremental source\n")); usage(B_FALSE); } } } if (flags.replicate && fromname == NULL) flags.doall = B_TRUE; err = zfs_send(zhp, fromname, toname, &flags, STDOUT_FILENO, NULL, 0, extraverbose ? &dbgnv : NULL); if (extraverbose && dbgnv != NULL) { /* * dump_nvlist prints to stdout, but that's been * redirected to a file. Make it print to stderr * instead. */ (void) dup2(STDERR_FILENO, STDOUT_FILENO); dump_nvlist(dbgnv, 0); nvlist_free(dbgnv); } zfs_close(zhp); return (err != 0); } /* * zfs receive [-vnFu] [-d | -e] * * Restore a backup stream from stdin. */ static int zfs_do_receive(int argc, char **argv) { int c, err; recvflags_t flags = { 0 }; /* check options */ while ((c = getopt(argc, argv, ":denuvF")) != -1) { switch (c) { case 'd': flags.isprefix = B_TRUE; break; case 'e': flags.isprefix = B_TRUE; flags.istail = B_TRUE; break; case 'n': flags.dryrun = B_TRUE; break; case 'u': flags.nomount = B_TRUE; break; case 'v': flags.verbose = B_TRUE; break; case 'F': flags.force = B_TRUE; break; case ':': (void) fprintf(stderr, gettext("missing argument for " "'%c' option\n"), optopt); usage(B_FALSE); break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; /* check number of arguments */ if (argc < 1) { (void) fprintf(stderr, gettext("missing snapshot argument\n")); usage(B_FALSE); } if (argc > 1) { (void) fprintf(stderr, gettext("too many arguments\n")); usage(B_FALSE); } if (isatty(STDIN_FILENO)) { (void) fprintf(stderr, gettext("Error: Backup stream can not be read " "from a terminal.\n" "You must redirect standard input.\n")); return (1); } err = zfs_receive(g_zfs, argv[0], &flags, STDIN_FILENO, NULL); return (err != 0); } /* * allow/unallow stuff */ /* copied from zfs/sys/dsl_deleg.h */ #define ZFS_DELEG_PERM_CREATE "create" #define ZFS_DELEG_PERM_DESTROY "destroy" #define ZFS_DELEG_PERM_SNAPSHOT "snapshot" #define ZFS_DELEG_PERM_ROLLBACK "rollback" #define ZFS_DELEG_PERM_CLONE "clone" #define ZFS_DELEG_PERM_PROMOTE "promote" #define ZFS_DELEG_PERM_RENAME "rename" #define ZFS_DELEG_PERM_MOUNT "mount" #define ZFS_DELEG_PERM_SHARE "share" #define ZFS_DELEG_PERM_SEND "send" #define ZFS_DELEG_PERM_RECEIVE "receive" #define ZFS_DELEG_PERM_ALLOW "allow" #define ZFS_DELEG_PERM_USERPROP "userprop" #define ZFS_DELEG_PERM_VSCAN "vscan" /* ??? */ #define ZFS_DELEG_PERM_USERQUOTA "userquota" #define ZFS_DELEG_PERM_GROUPQUOTA "groupquota" #define ZFS_DELEG_PERM_USERUSED "userused" #define ZFS_DELEG_PERM_GROUPUSED "groupused" #define ZFS_DELEG_PERM_HOLD "hold" #define ZFS_DELEG_PERM_RELEASE "release" #define ZFS_DELEG_PERM_DIFF "diff" #define ZFS_NUM_DELEG_NOTES ZFS_DELEG_NOTE_NONE static zfs_deleg_perm_tab_t zfs_deleg_perm_tbl[] = { { ZFS_DELEG_PERM_ALLOW, ZFS_DELEG_NOTE_ALLOW }, { ZFS_DELEG_PERM_CLONE, ZFS_DELEG_NOTE_CLONE }, { ZFS_DELEG_PERM_CREATE, ZFS_DELEG_NOTE_CREATE }, { ZFS_DELEG_PERM_DESTROY, ZFS_DELEG_NOTE_DESTROY }, { ZFS_DELEG_PERM_DIFF, ZFS_DELEG_NOTE_DIFF}, { ZFS_DELEG_PERM_HOLD, ZFS_DELEG_NOTE_HOLD }, { ZFS_DELEG_PERM_MOUNT, ZFS_DELEG_NOTE_MOUNT }, { ZFS_DELEG_PERM_PROMOTE, ZFS_DELEG_NOTE_PROMOTE }, { ZFS_DELEG_PERM_RECEIVE, ZFS_DELEG_NOTE_RECEIVE }, { ZFS_DELEG_PERM_RELEASE, ZFS_DELEG_NOTE_RELEASE }, { ZFS_DELEG_PERM_RENAME, ZFS_DELEG_NOTE_RENAME }, { ZFS_DELEG_PERM_ROLLBACK, ZFS_DELEG_NOTE_ROLLBACK }, { ZFS_DELEG_PERM_SEND, ZFS_DELEG_NOTE_SEND }, { ZFS_DELEG_PERM_SHARE, ZFS_DELEG_NOTE_SHARE }, { ZFS_DELEG_PERM_SNAPSHOT, ZFS_DELEG_NOTE_SNAPSHOT }, { ZFS_DELEG_PERM_GROUPQUOTA, ZFS_DELEG_NOTE_GROUPQUOTA }, { ZFS_DELEG_PERM_GROUPUSED, ZFS_DELEG_NOTE_GROUPUSED }, { ZFS_DELEG_PERM_USERPROP, ZFS_DELEG_NOTE_USERPROP }, { ZFS_DELEG_PERM_USERQUOTA, ZFS_DELEG_NOTE_USERQUOTA }, { ZFS_DELEG_PERM_USERUSED, ZFS_DELEG_NOTE_USERUSED }, { NULL, ZFS_DELEG_NOTE_NONE } }; /* permission structure */ typedef struct deleg_perm { zfs_deleg_who_type_t dp_who_type; const char *dp_name; boolean_t dp_local; boolean_t dp_descend; } deleg_perm_t; /* */ typedef struct deleg_perm_node { deleg_perm_t dpn_perm; uu_avl_node_t dpn_avl_node; } deleg_perm_node_t; typedef struct fs_perm fs_perm_t; /* permissions set */ typedef struct who_perm { zfs_deleg_who_type_t who_type; const char *who_name; /* id */ char who_ug_name[256]; /* user/group name */ fs_perm_t *who_fsperm; /* uplink */ uu_avl_t *who_deleg_perm_avl; /* permissions */ } who_perm_t; /* */ typedef struct who_perm_node { who_perm_t who_perm; uu_avl_node_t who_avl_node; } who_perm_node_t; typedef struct fs_perm_set fs_perm_set_t; /* fs permissions */ struct fs_perm { const char *fsp_name; uu_avl_t *fsp_sc_avl; /* sets,create */ uu_avl_t *fsp_uge_avl; /* user,group,everyone */ fs_perm_set_t *fsp_set; /* uplink */ }; /* */ typedef struct fs_perm_node { fs_perm_t fspn_fsperm; uu_avl_t *fspn_avl; uu_list_node_t fspn_list_node; } fs_perm_node_t; /* top level structure */ struct fs_perm_set { uu_list_pool_t *fsps_list_pool; uu_list_t *fsps_list; /* list of fs_perms */ uu_avl_pool_t *fsps_named_set_avl_pool; uu_avl_pool_t *fsps_who_perm_avl_pool; uu_avl_pool_t *fsps_deleg_perm_avl_pool; }; static inline const char * deleg_perm_type(zfs_deleg_note_t note) { /* subcommands */ switch (note) { /* SUBCOMMANDS */ /* OTHER */ case ZFS_DELEG_NOTE_GROUPQUOTA: case ZFS_DELEG_NOTE_GROUPUSED: case ZFS_DELEG_NOTE_USERPROP: case ZFS_DELEG_NOTE_USERQUOTA: case ZFS_DELEG_NOTE_USERUSED: /* other */ return (gettext("other")); default: return (gettext("subcommand")); } } static int inline who_type2weight(zfs_deleg_who_type_t who_type) { int res; switch (who_type) { case ZFS_DELEG_NAMED_SET_SETS: case ZFS_DELEG_NAMED_SET: res = 0; break; case ZFS_DELEG_CREATE_SETS: case ZFS_DELEG_CREATE: res = 1; break; case ZFS_DELEG_USER_SETS: case ZFS_DELEG_USER: res = 2; break; case ZFS_DELEG_GROUP_SETS: case ZFS_DELEG_GROUP: res = 3; break; case ZFS_DELEG_EVERYONE_SETS: case ZFS_DELEG_EVERYONE: res = 4; break; default: res = -1; } return (res); } /* ARGSUSED */ static int who_perm_compare(const void *larg, const void *rarg, void *unused) { const who_perm_node_t *l = larg; const who_perm_node_t *r = rarg; zfs_deleg_who_type_t ltype = l->who_perm.who_type; zfs_deleg_who_type_t rtype = r->who_perm.who_type; int lweight = who_type2weight(ltype); int rweight = who_type2weight(rtype); int res = lweight - rweight; if (res == 0) res = strncmp(l->who_perm.who_name, r->who_perm.who_name, ZFS_MAX_DELEG_NAME-1); if (res == 0) return (0); if (res > 0) return (1); else return (-1); } /* ARGSUSED */ static int deleg_perm_compare(const void *larg, const void *rarg, void *unused) { const deleg_perm_node_t *l = larg; const deleg_perm_node_t *r = rarg; int res = strncmp(l->dpn_perm.dp_name, r->dpn_perm.dp_name, ZFS_MAX_DELEG_NAME-1); if (res == 0) return (0); if (res > 0) return (1); else return (-1); } static inline void fs_perm_set_init(fs_perm_set_t *fspset) { bzero(fspset, sizeof (fs_perm_set_t)); if ((fspset->fsps_list_pool = uu_list_pool_create("fsps_list_pool", sizeof (fs_perm_node_t), offsetof(fs_perm_node_t, fspn_list_node), NULL, UU_DEFAULT)) == NULL) nomem(); if ((fspset->fsps_list = uu_list_create(fspset->fsps_list_pool, NULL, UU_DEFAULT)) == NULL) nomem(); if ((fspset->fsps_named_set_avl_pool = uu_avl_pool_create( "named_set_avl_pool", sizeof (who_perm_node_t), offsetof( who_perm_node_t, who_avl_node), who_perm_compare, UU_DEFAULT)) == NULL) nomem(); if ((fspset->fsps_who_perm_avl_pool = uu_avl_pool_create( "who_perm_avl_pool", sizeof (who_perm_node_t), offsetof( who_perm_node_t, who_avl_node), who_perm_compare, UU_DEFAULT)) == NULL) nomem(); if ((fspset->fsps_deleg_perm_avl_pool = uu_avl_pool_create( "deleg_perm_avl_pool", sizeof (deleg_perm_node_t), offsetof( deleg_perm_node_t, dpn_avl_node), deleg_perm_compare, UU_DEFAULT)) == NULL) nomem(); } static inline void fs_perm_fini(fs_perm_t *); static inline void who_perm_fini(who_perm_t *); static inline void fs_perm_set_fini(fs_perm_set_t *fspset) { fs_perm_node_t *node = uu_list_first(fspset->fsps_list); while (node != NULL) { fs_perm_node_t *next_node = uu_list_next(fspset->fsps_list, node); fs_perm_t *fsperm = &node->fspn_fsperm; fs_perm_fini(fsperm); uu_list_remove(fspset->fsps_list, node); free(node); node = next_node; } uu_avl_pool_destroy(fspset->fsps_named_set_avl_pool); uu_avl_pool_destroy(fspset->fsps_who_perm_avl_pool); uu_avl_pool_destroy(fspset->fsps_deleg_perm_avl_pool); } static inline void deleg_perm_init(deleg_perm_t *deleg_perm, zfs_deleg_who_type_t type, const char *name) { deleg_perm->dp_who_type = type; deleg_perm->dp_name = name; } static inline void who_perm_init(who_perm_t *who_perm, fs_perm_t *fsperm, zfs_deleg_who_type_t type, const char *name) { uu_avl_pool_t *pool; pool = fsperm->fsp_set->fsps_deleg_perm_avl_pool; bzero(who_perm, sizeof (who_perm_t)); if ((who_perm->who_deleg_perm_avl = uu_avl_create(pool, NULL, UU_DEFAULT)) == NULL) nomem(); who_perm->who_type = type; who_perm->who_name = name; who_perm->who_fsperm = fsperm; } static inline void who_perm_fini(who_perm_t *who_perm) { deleg_perm_node_t *node = uu_avl_first(who_perm->who_deleg_perm_avl); while (node != NULL) { deleg_perm_node_t *next_node = uu_avl_next(who_perm->who_deleg_perm_avl, node); uu_avl_remove(who_perm->who_deleg_perm_avl, node); free(node); node = next_node; } uu_avl_destroy(who_perm->who_deleg_perm_avl); } static inline void fs_perm_init(fs_perm_t *fsperm, fs_perm_set_t *fspset, const char *fsname) { uu_avl_pool_t *nset_pool = fspset->fsps_named_set_avl_pool; uu_avl_pool_t *who_pool = fspset->fsps_who_perm_avl_pool; bzero(fsperm, sizeof (fs_perm_t)); if ((fsperm->fsp_sc_avl = uu_avl_create(nset_pool, NULL, UU_DEFAULT)) == NULL) nomem(); if ((fsperm->fsp_uge_avl = uu_avl_create(who_pool, NULL, UU_DEFAULT)) == NULL) nomem(); fsperm->fsp_set = fspset; fsperm->fsp_name = fsname; } static inline void fs_perm_fini(fs_perm_t *fsperm) { who_perm_node_t *node = uu_avl_first(fsperm->fsp_sc_avl); while (node != NULL) { who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_sc_avl, node); who_perm_t *who_perm = &node->who_perm; who_perm_fini(who_perm); uu_avl_remove(fsperm->fsp_sc_avl, node); free(node); node = next_node; } node = uu_avl_first(fsperm->fsp_uge_avl); while (node != NULL) { who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_uge_avl, node); who_perm_t *who_perm = &node->who_perm; who_perm_fini(who_perm); uu_avl_remove(fsperm->fsp_uge_avl, node); free(node); node = next_node; } uu_avl_destroy(fsperm->fsp_sc_avl); uu_avl_destroy(fsperm->fsp_uge_avl); } static void inline set_deleg_perm_node(uu_avl_t *avl, deleg_perm_node_t *node, zfs_deleg_who_type_t who_type, const char *name, char locality) { uu_avl_index_t idx = 0; deleg_perm_node_t *found_node = NULL; deleg_perm_t *deleg_perm = &node->dpn_perm; deleg_perm_init(deleg_perm, who_type, name); if ((found_node = uu_avl_find(avl, node, NULL, &idx)) == NULL) uu_avl_insert(avl, node, idx); else { node = found_node; deleg_perm = &node->dpn_perm; } switch (locality) { case ZFS_DELEG_LOCAL: deleg_perm->dp_local = B_TRUE; break; case ZFS_DELEG_DESCENDENT: deleg_perm->dp_descend = B_TRUE; break; case ZFS_DELEG_NA: break; default: assert(B_FALSE); /* invalid locality */ } } static inline int parse_who_perm(who_perm_t *who_perm, nvlist_t *nvl, char locality) { nvpair_t *nvp = NULL; fs_perm_set_t *fspset = who_perm->who_fsperm->fsp_set; uu_avl_t *avl = who_perm->who_deleg_perm_avl; zfs_deleg_who_type_t who_type = who_perm->who_type; while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { const char *name = nvpair_name(nvp); data_type_t type = nvpair_type(nvp); uu_avl_pool_t *avl_pool = fspset->fsps_deleg_perm_avl_pool; deleg_perm_node_t *node = safe_malloc(sizeof (deleg_perm_node_t)); assert(type == DATA_TYPE_BOOLEAN); uu_avl_node_init(node, &node->dpn_avl_node, avl_pool); set_deleg_perm_node(avl, node, who_type, name, locality); } return (0); } static inline int parse_fs_perm(fs_perm_t *fsperm, nvlist_t *nvl) { nvpair_t *nvp = NULL; fs_perm_set_t *fspset = fsperm->fsp_set; while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { nvlist_t *nvl2 = NULL; const char *name = nvpair_name(nvp); uu_avl_t *avl = NULL; uu_avl_pool_t *avl_pool; zfs_deleg_who_type_t perm_type = name[0]; char perm_locality = name[1]; const char *perm_name = name + 3; boolean_t is_set = B_TRUE; who_perm_t *who_perm = NULL; assert('$' == name[2]); if (nvpair_value_nvlist(nvp, &nvl2) != 0) return (-1); switch (perm_type) { case ZFS_DELEG_CREATE: case ZFS_DELEG_CREATE_SETS: case ZFS_DELEG_NAMED_SET: case ZFS_DELEG_NAMED_SET_SETS: avl_pool = fspset->fsps_named_set_avl_pool; avl = fsperm->fsp_sc_avl; break; case ZFS_DELEG_USER: case ZFS_DELEG_USER_SETS: case ZFS_DELEG_GROUP: case ZFS_DELEG_GROUP_SETS: case ZFS_DELEG_EVERYONE: case ZFS_DELEG_EVERYONE_SETS: avl_pool = fspset->fsps_who_perm_avl_pool; avl = fsperm->fsp_uge_avl; break; } if (is_set) { who_perm_node_t *found_node = NULL; who_perm_node_t *node = safe_malloc( sizeof (who_perm_node_t)); who_perm = &node->who_perm; uu_avl_index_t idx = 0; uu_avl_node_init(node, &node->who_avl_node, avl_pool); who_perm_init(who_perm, fsperm, perm_type, perm_name); if ((found_node = uu_avl_find(avl, node, NULL, &idx)) == NULL) { if (avl == fsperm->fsp_uge_avl) { uid_t rid = 0; struct passwd *p = NULL; struct group *g = NULL; const char *nice_name = NULL; switch (perm_type) { case ZFS_DELEG_USER_SETS: case ZFS_DELEG_USER: rid = atoi(perm_name); p = getpwuid(rid); if (p) nice_name = p->pw_name; break; case ZFS_DELEG_GROUP_SETS: case ZFS_DELEG_GROUP: rid = atoi(perm_name); g = getgrgid(rid); if (g) nice_name = g->gr_name; break; } if (nice_name != NULL) (void) strlcpy( node->who_perm.who_ug_name, nice_name, 256); } uu_avl_insert(avl, node, idx); } else { node = found_node; who_perm = &node->who_perm; } } (void) parse_who_perm(who_perm, nvl2, perm_locality); } return (0); } static inline int parse_fs_perm_set(fs_perm_set_t *fspset, nvlist_t *nvl) { nvpair_t *nvp = NULL; uu_avl_index_t idx = 0; while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { nvlist_t *nvl2 = NULL; const char *fsname = nvpair_name(nvp); data_type_t type = nvpair_type(nvp); fs_perm_t *fsperm = NULL; fs_perm_node_t *node = safe_malloc(sizeof (fs_perm_node_t)); if (node == NULL) nomem(); fsperm = &node->fspn_fsperm; assert(DATA_TYPE_NVLIST == type); uu_list_node_init(node, &node->fspn_list_node, fspset->fsps_list_pool); idx = uu_list_numnodes(fspset->fsps_list); fs_perm_init(fsperm, fspset, fsname); if (nvpair_value_nvlist(nvp, &nvl2) != 0) return (-1); (void) parse_fs_perm(fsperm, nvl2); uu_list_insert(fspset->fsps_list, node, idx); } return (0); } static inline const char * deleg_perm_comment(zfs_deleg_note_t note) { const char *str = ""; /* subcommands */ switch (note) { /* SUBCOMMANDS */ case ZFS_DELEG_NOTE_ALLOW: str = gettext("Must also have the permission that is being" "\n\t\t\t\tallowed"); break; case ZFS_DELEG_NOTE_CLONE: str = gettext("Must also have the 'create' ability and 'mount'" "\n\t\t\t\tability in the origin file system"); break; case ZFS_DELEG_NOTE_CREATE: str = gettext("Must also have the 'mount' ability"); break; case ZFS_DELEG_NOTE_DESTROY: str = gettext("Must also have the 'mount' ability"); break; case ZFS_DELEG_NOTE_DIFF: str = gettext("Allows lookup of paths within a dataset;" "\n\t\t\t\tgiven an object number. Ordinary users need this" "\n\t\t\t\tin order to use zfs diff"); break; case ZFS_DELEG_NOTE_HOLD: str = gettext("Allows adding a user hold to a snapshot"); break; case ZFS_DELEG_NOTE_MOUNT: str = gettext("Allows mount/umount of ZFS datasets"); break; case ZFS_DELEG_NOTE_PROMOTE: str = gettext("Must also have the 'mount'\n\t\t\t\tand" " 'promote' ability in the origin file system"); break; case ZFS_DELEG_NOTE_RECEIVE: str = gettext("Must also have the 'mount' and 'create'" " ability"); break; case ZFS_DELEG_NOTE_RELEASE: str = gettext("Allows releasing a user hold which\n\t\t\t\t" "might destroy the snapshot"); break; case ZFS_DELEG_NOTE_RENAME: str = gettext("Must also have the 'mount' and 'create'" "\n\t\t\t\tability in the new parent"); break; case ZFS_DELEG_NOTE_ROLLBACK: str = gettext(""); break; case ZFS_DELEG_NOTE_SEND: str = gettext(""); break; case ZFS_DELEG_NOTE_SHARE: str = gettext("Allows sharing file systems over NFS or SMB" "\n\t\t\t\tprotocols"); break; case ZFS_DELEG_NOTE_SNAPSHOT: str = gettext(""); break; /* * case ZFS_DELEG_NOTE_VSCAN: * str = gettext(""); * break; */ /* OTHER */ case ZFS_DELEG_NOTE_GROUPQUOTA: str = gettext("Allows accessing any groupquota@... property"); break; case ZFS_DELEG_NOTE_GROUPUSED: str = gettext("Allows reading any groupused@... property"); break; case ZFS_DELEG_NOTE_USERPROP: str = gettext("Allows changing any user property"); break; case ZFS_DELEG_NOTE_USERQUOTA: str = gettext("Allows accessing any userquota@... property"); break; case ZFS_DELEG_NOTE_USERUSED: str = gettext("Allows reading any userused@... property"); break; /* other */ default: str = ""; } return (str); } struct allow_opts { boolean_t local; boolean_t descend; boolean_t user; boolean_t group; boolean_t everyone; boolean_t create; boolean_t set; boolean_t recursive; /* unallow only */ boolean_t prt_usage; boolean_t prt_perms; char *who; char *perms; const char *dataset; }; static inline int prop_cmp(const void *a, const void *b) { const char *str1 = *(const char **)a; const char *str2 = *(const char **)b; return (strcmp(str1, str2)); } static void allow_usage(boolean_t un, boolean_t requested, const char *msg) { const char *opt_desc[] = { "-h", gettext("show this help message and exit"), "-l", gettext("set permission locally"), "-d", gettext("set permission for descents"), "-u", gettext("set permission for user"), "-g", gettext("set permission for group"), "-e", gettext("set permission for everyone"), "-c", gettext("set create time permission"), "-s", gettext("define permission set"), /* unallow only */ "-r", gettext("remove permissions recursively"), }; size_t unallow_size = sizeof (opt_desc) / sizeof (char *); size_t allow_size = unallow_size - 2; const char *props[ZFS_NUM_PROPS]; int i; size_t count = 0; FILE *fp = requested ? stdout : stderr; zprop_desc_t *pdtbl = zfs_prop_get_table(); const char *fmt = gettext("%-16s %-14s\t%s\n"); (void) fprintf(fp, gettext("Usage: %s\n"), get_usage(un ? HELP_UNALLOW : HELP_ALLOW)); (void) fprintf(fp, gettext("Options:\n")); for (i = 0; i < (un ? unallow_size : allow_size); i++) { const char *opt = opt_desc[i++]; const char *optdsc = opt_desc[i]; (void) fprintf(fp, gettext(" %-10s %s\n"), opt, optdsc); } (void) fprintf(fp, gettext("\nThe following permissions are " "supported:\n\n")); (void) fprintf(fp, fmt, gettext("NAME"), gettext("TYPE"), gettext("NOTES")); for (i = 0; i < ZFS_NUM_DELEG_NOTES; i++) { const char *perm_name = zfs_deleg_perm_tbl[i].z_perm; zfs_deleg_note_t perm_note = zfs_deleg_perm_tbl[i].z_note; const char *perm_type = deleg_perm_type(perm_note); const char *perm_comment = deleg_perm_comment(perm_note); (void) fprintf(fp, fmt, perm_name, perm_type, perm_comment); } for (i = 0; i < ZFS_NUM_PROPS; i++) { zprop_desc_t *pd = &pdtbl[i]; if (pd->pd_visible != B_TRUE) continue; if (pd->pd_attr == PROP_READONLY) continue; props[count++] = pd->pd_name; } props[count] = NULL; qsort(props, count, sizeof (char *), prop_cmp); for (i = 0; i < count; i++) (void) fprintf(fp, fmt, props[i], gettext("property"), ""); if (msg != NULL) (void) fprintf(fp, gettext("\nzfs: error: %s"), msg); exit(requested ? 0 : 2); } static inline const char * munge_args(int argc, char **argv, boolean_t un, size_t expected_argc, char **permsp) { if (un && argc == expected_argc - 1) *permsp = NULL; else if (argc == expected_argc) *permsp = argv[argc - 2]; else allow_usage(un, B_FALSE, gettext("wrong number of parameters\n")); return (argv[argc - 1]); } static void parse_allow_args(int argc, char **argv, boolean_t un, struct allow_opts *opts) { int uge_sum = opts->user + opts->group + opts->everyone; int csuge_sum = opts->create + opts->set + uge_sum; int ldcsuge_sum = csuge_sum + opts->local + opts->descend; int all_sum = un ? ldcsuge_sum + opts->recursive : ldcsuge_sum; if (uge_sum > 1) allow_usage(un, B_FALSE, gettext("-u, -g, and -e are mutually exclusive\n")); if (opts->prt_usage) if (argc == 0 && all_sum == 0) allow_usage(un, B_TRUE, NULL); else usage(B_FALSE); if (opts->set) { if (csuge_sum > 1) allow_usage(un, B_FALSE, gettext("invalid options combined with -s\n")); opts->dataset = munge_args(argc, argv, un, 3, &opts->perms); if (argv[0][0] != '@') allow_usage(un, B_FALSE, gettext("invalid set name: missing '@' prefix\n")); opts->who = argv[0]; } else if (opts->create) { if (ldcsuge_sum > 1) allow_usage(un, B_FALSE, gettext("invalid options combined with -c\n")); opts->dataset = munge_args(argc, argv, un, 2, &opts->perms); } else if (opts->everyone) { if (csuge_sum > 1) allow_usage(un, B_FALSE, gettext("invalid options combined with -e\n")); opts->dataset = munge_args(argc, argv, un, 2, &opts->perms); } else if (uge_sum == 0 && argc > 0 && strcmp(argv[0], "everyone") == 0) { opts->everyone = B_TRUE; argc--; argv++; opts->dataset = munge_args(argc, argv, un, 2, &opts->perms); } else if (argc == 1 && !un) { opts->prt_perms = B_TRUE; opts->dataset = argv[argc-1]; } else { opts->dataset = munge_args(argc, argv, un, 3, &opts->perms); opts->who = argv[0]; } if (!opts->local && !opts->descend) { opts->local = B_TRUE; opts->descend = B_TRUE; } } static void store_allow_perm(zfs_deleg_who_type_t type, boolean_t local, boolean_t descend, const char *who, char *perms, nvlist_t *top_nvl) { int i; char ld[2] = { '\0', '\0' }; char who_buf[ZFS_MAXNAMELEN+32]; char base_type; char set_type; nvlist_t *base_nvl = NULL; nvlist_t *set_nvl = NULL; nvlist_t *nvl; if (nvlist_alloc(&base_nvl, NV_UNIQUE_NAME, 0) != 0) nomem(); if (nvlist_alloc(&set_nvl, NV_UNIQUE_NAME, 0) != 0) nomem(); switch (type) { case ZFS_DELEG_NAMED_SET_SETS: case ZFS_DELEG_NAMED_SET: set_type = ZFS_DELEG_NAMED_SET_SETS; base_type = ZFS_DELEG_NAMED_SET; ld[0] = ZFS_DELEG_NA; break; case ZFS_DELEG_CREATE_SETS: case ZFS_DELEG_CREATE: set_type = ZFS_DELEG_CREATE_SETS; base_type = ZFS_DELEG_CREATE; ld[0] = ZFS_DELEG_NA; break; case ZFS_DELEG_USER_SETS: case ZFS_DELEG_USER: set_type = ZFS_DELEG_USER_SETS; base_type = ZFS_DELEG_USER; if (local) ld[0] = ZFS_DELEG_LOCAL; if (descend) ld[1] = ZFS_DELEG_DESCENDENT; break; case ZFS_DELEG_GROUP_SETS: case ZFS_DELEG_GROUP: set_type = ZFS_DELEG_GROUP_SETS; base_type = ZFS_DELEG_GROUP; if (local) ld[0] = ZFS_DELEG_LOCAL; if (descend) ld[1] = ZFS_DELEG_DESCENDENT; break; case ZFS_DELEG_EVERYONE_SETS: case ZFS_DELEG_EVERYONE: set_type = ZFS_DELEG_EVERYONE_SETS; base_type = ZFS_DELEG_EVERYONE; if (local) ld[0] = ZFS_DELEG_LOCAL; if (descend) ld[1] = ZFS_DELEG_DESCENDENT; } if (perms != NULL) { char *curr = perms; char *end = curr + strlen(perms); while (curr < end) { char *delim = strchr(curr, ','); if (delim == NULL) delim = end; else *delim = '\0'; if (curr[0] == '@') nvl = set_nvl; else nvl = base_nvl; (void) nvlist_add_boolean(nvl, curr); if (delim != end) *delim = ','; curr = delim + 1; } for (i = 0; i < 2; i++) { char locality = ld[i]; if (locality == 0) continue; if (!nvlist_empty(base_nvl)) { if (who != NULL) (void) snprintf(who_buf, sizeof (who_buf), "%c%c$%s", base_type, locality, who); else (void) snprintf(who_buf, sizeof (who_buf), "%c%c$", base_type, locality); (void) nvlist_add_nvlist(top_nvl, who_buf, base_nvl); } if (!nvlist_empty(set_nvl)) { if (who != NULL) (void) snprintf(who_buf, sizeof (who_buf), "%c%c$%s", set_type, locality, who); else (void) snprintf(who_buf, sizeof (who_buf), "%c%c$", set_type, locality); (void) nvlist_add_nvlist(top_nvl, who_buf, set_nvl); } } } else { for (i = 0; i < 2; i++) { char locality = ld[i]; if (locality == 0) continue; if (who != NULL) (void) snprintf(who_buf, sizeof (who_buf), "%c%c$%s", base_type, locality, who); else (void) snprintf(who_buf, sizeof (who_buf), "%c%c$", base_type, locality); (void) nvlist_add_boolean(top_nvl, who_buf); if (who != NULL) (void) snprintf(who_buf, sizeof (who_buf), "%c%c$%s", set_type, locality, who); else (void) snprintf(who_buf, sizeof (who_buf), "%c%c$", set_type, locality); (void) nvlist_add_boolean(top_nvl, who_buf); } } } static int construct_fsacl_list(boolean_t un, struct allow_opts *opts, nvlist_t **nvlp) { if (nvlist_alloc(nvlp, NV_UNIQUE_NAME, 0) != 0) nomem(); if (opts->set) { store_allow_perm(ZFS_DELEG_NAMED_SET, opts->local, opts->descend, opts->who, opts->perms, *nvlp); } else if (opts->create) { store_allow_perm(ZFS_DELEG_CREATE, opts->local, opts->descend, NULL, opts->perms, *nvlp); } else if (opts->everyone) { store_allow_perm(ZFS_DELEG_EVERYONE, opts->local, opts->descend, NULL, opts->perms, *nvlp); } else { char *curr = opts->who; char *end = curr + strlen(curr); while (curr < end) { const char *who; zfs_deleg_who_type_t who_type; char *endch; char *delim = strchr(curr, ','); char errbuf[256]; char id[64]; struct passwd *p = NULL; struct group *g = NULL; uid_t rid; if (delim == NULL) delim = end; else *delim = '\0'; rid = (uid_t)strtol(curr, &endch, 0); if (opts->user) { who_type = ZFS_DELEG_USER; if (*endch != '\0') p = getpwnam(curr); else p = getpwuid(rid); if (p != NULL) rid = p->pw_uid; else { (void) snprintf(errbuf, 256, gettext( "invalid user %s"), curr); allow_usage(un, B_TRUE, errbuf); } } else if (opts->group) { who_type = ZFS_DELEG_GROUP; if (*endch != '\0') g = getgrnam(curr); else g = getgrgid(rid); if (g != NULL) rid = g->gr_gid; else { (void) snprintf(errbuf, 256, gettext( "invalid group %s"), curr); allow_usage(un, B_TRUE, errbuf); } } else { if (*endch != '\0') { p = getpwnam(curr); } else { p = getpwuid(rid); } if (p == NULL) if (*endch != '\0') { g = getgrnam(curr); } else { g = getgrgid(rid); } if (p != NULL) { who_type = ZFS_DELEG_USER; rid = p->pw_uid; } else if (g != NULL) { who_type = ZFS_DELEG_GROUP; rid = g->gr_gid; } else { (void) snprintf(errbuf, 256, gettext( "invalid user/group %s"), curr); allow_usage(un, B_TRUE, errbuf); } } (void) sprintf(id, "%u", rid); who = id; store_allow_perm(who_type, opts->local, opts->descend, who, opts->perms, *nvlp); curr = delim + 1; } } return (0); } static void print_set_creat_perms(uu_avl_t *who_avl) { const char *sc_title[] = { gettext("Permission sets:\n"), gettext("Create time permissions:\n"), NULL }; const char **title_ptr = sc_title; who_perm_node_t *who_node = NULL; int prev_weight = -1; for (who_node = uu_avl_first(who_avl); who_node != NULL; who_node = uu_avl_next(who_avl, who_node)) { uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl; zfs_deleg_who_type_t who_type = who_node->who_perm.who_type; const char *who_name = who_node->who_perm.who_name; int weight = who_type2weight(who_type); boolean_t first = B_TRUE; deleg_perm_node_t *deleg_node; if (prev_weight != weight) { (void) printf(*title_ptr++); prev_weight = weight; } if (who_name == NULL || strnlen(who_name, 1) == 0) (void) printf("\t"); else (void) printf("\t%s ", who_name); for (deleg_node = uu_avl_first(avl); deleg_node != NULL; deleg_node = uu_avl_next(avl, deleg_node)) { if (first) { (void) printf("%s", deleg_node->dpn_perm.dp_name); first = B_FALSE; } else (void) printf(",%s", deleg_node->dpn_perm.dp_name); } (void) printf("\n"); } } static void inline print_uge_deleg_perms(uu_avl_t *who_avl, boolean_t local, boolean_t descend, const char *title) { who_perm_node_t *who_node = NULL; boolean_t prt_title = B_TRUE; uu_avl_walk_t *walk; if ((walk = uu_avl_walk_start(who_avl, UU_WALK_ROBUST)) == NULL) nomem(); while ((who_node = uu_avl_walk_next(walk)) != NULL) { const char *who_name = who_node->who_perm.who_name; const char *nice_who_name = who_node->who_perm.who_ug_name; uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl; zfs_deleg_who_type_t who_type = who_node->who_perm.who_type; char delim = ' '; deleg_perm_node_t *deleg_node; boolean_t prt_who = B_TRUE; for (deleg_node = uu_avl_first(avl); deleg_node != NULL; deleg_node = uu_avl_next(avl, deleg_node)) { if (local != deleg_node->dpn_perm.dp_local || descend != deleg_node->dpn_perm.dp_descend) continue; if (prt_who) { const char *who = NULL; if (prt_title) { prt_title = B_FALSE; (void) printf(title); } switch (who_type) { case ZFS_DELEG_USER_SETS: case ZFS_DELEG_USER: who = gettext("user"); if (nice_who_name) who_name = nice_who_name; break; case ZFS_DELEG_GROUP_SETS: case ZFS_DELEG_GROUP: who = gettext("group"); if (nice_who_name) who_name = nice_who_name; break; case ZFS_DELEG_EVERYONE_SETS: case ZFS_DELEG_EVERYONE: who = gettext("everyone"); who_name = NULL; } prt_who = B_FALSE; if (who_name == NULL) (void) printf("\t%s", who); else (void) printf("\t%s %s", who, who_name); } (void) printf("%c%s", delim, deleg_node->dpn_perm.dp_name); delim = ','; } if (!prt_who) (void) printf("\n"); } uu_avl_walk_end(walk); } static void print_fs_perms(fs_perm_set_t *fspset) { fs_perm_node_t *node = NULL; char buf[ZFS_MAXNAMELEN+32]; const char *dsname = buf; for (node = uu_list_first(fspset->fsps_list); node != NULL; node = uu_list_next(fspset->fsps_list, node)) { uu_avl_t *sc_avl = node->fspn_fsperm.fsp_sc_avl; uu_avl_t *uge_avl = node->fspn_fsperm.fsp_uge_avl; int left = 0; (void) snprintf(buf, ZFS_MAXNAMELEN+32, gettext("---- Permissions on %s "), node->fspn_fsperm.fsp_name); (void) printf(dsname); left = 70 - strlen(buf); while (left-- > 0) (void) printf("-"); (void) printf("\n"); print_set_creat_perms(sc_avl); print_uge_deleg_perms(uge_avl, B_TRUE, B_FALSE, gettext("Local permissions:\n")); print_uge_deleg_perms(uge_avl, B_FALSE, B_TRUE, gettext("Descendent permissions:\n")); print_uge_deleg_perms(uge_avl, B_TRUE, B_TRUE, gettext("Local+Descendent permissions:\n")); } } static fs_perm_set_t fs_perm_set = { NULL, NULL, NULL, NULL }; struct deleg_perms { boolean_t un; nvlist_t *nvl; }; static int set_deleg_perms(zfs_handle_t *zhp, void *data) { struct deleg_perms *perms = (struct deleg_perms *)data; zfs_type_t zfs_type = zfs_get_type(zhp); if (zfs_type != ZFS_TYPE_FILESYSTEM && zfs_type != ZFS_TYPE_VOLUME) return (0); return (zfs_set_fsacl(zhp, perms->un, perms->nvl)); } static int zfs_do_allow_unallow_impl(int argc, char **argv, boolean_t un) { zfs_handle_t *zhp; nvlist_t *perm_nvl = NULL; nvlist_t *update_perm_nvl = NULL; int error = 1; int c; struct allow_opts opts = { 0 }; const char *optstr = un ? "ldugecsrh" : "ldugecsh"; /* check opts */ while ((c = getopt(argc, argv, optstr)) != -1) { switch (c) { case 'l': opts.local = B_TRUE; break; case 'd': opts.descend = B_TRUE; break; case 'u': opts.user = B_TRUE; break; case 'g': opts.group = B_TRUE; break; case 'e': opts.everyone = B_TRUE; break; case 's': opts.set = B_TRUE; break; case 'c': opts.create = B_TRUE; break; case 'r': opts.recursive = B_TRUE; break; case ':': (void) fprintf(stderr, gettext("missing argument for " "'%c' option\n"), optopt); usage(B_FALSE); break; case 'h': opts.prt_usage = B_TRUE; break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; /* check arguments */ parse_allow_args(argc, argv, un, &opts); /* try to open the dataset */ if ((zhp = zfs_open(g_zfs, opts.dataset, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) { (void) fprintf(stderr, "Failed to open dataset: %s\n", opts.dataset); return (-1); } if (zfs_get_fsacl(zhp, &perm_nvl) != 0) goto cleanup2; fs_perm_set_init(&fs_perm_set); if (parse_fs_perm_set(&fs_perm_set, perm_nvl) != 0) { (void) fprintf(stderr, "Failed to parse fsacl permissions\n"); goto cleanup1; } if (opts.prt_perms) print_fs_perms(&fs_perm_set); else { (void) construct_fsacl_list(un, &opts, &update_perm_nvl); if (zfs_set_fsacl(zhp, un, update_perm_nvl) != 0) goto cleanup0; if (un && opts.recursive) { struct deleg_perms data = { un, update_perm_nvl }; if (zfs_iter_filesystems(zhp, set_deleg_perms, &data) != 0) goto cleanup0; } } error = 0; cleanup0: nvlist_free(perm_nvl); if (update_perm_nvl != NULL) nvlist_free(update_perm_nvl); cleanup1: fs_perm_set_fini(&fs_perm_set); cleanup2: zfs_close(zhp); return (error); } /* * zfs allow [-r] [-t] ... * * -r Recursively hold * -t Temporary hold (hidden option) * * Apply a user-hold with the given tag to the list of snapshots. */ static int zfs_do_allow(int argc, char **argv) { return (zfs_do_allow_unallow_impl(argc, argv, B_FALSE)); } /* * zfs unallow [-r] [-t] ... * * -r Recursively hold * -t Temporary hold (hidden option) * * Apply a user-hold with the given tag to the list of snapshots. */ static int zfs_do_unallow(int argc, char **argv) { return (zfs_do_allow_unallow_impl(argc, argv, B_TRUE)); } static int zfs_do_hold_rele_impl(int argc, char **argv, boolean_t holding) { int errors = 0; int i; const char *tag; boolean_t recursive = B_FALSE; boolean_t temphold = B_FALSE; const char *opts = holding ? "rt" : "r"; int c; /* check options */ while ((c = getopt(argc, argv, opts)) != -1) { switch (c) { case 'r': recursive = B_TRUE; break; case 't': temphold = B_TRUE; break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; /* check number of arguments */ if (argc < 2) usage(B_FALSE); tag = argv[0]; --argc; ++argv; if (holding && tag[0] == '.') { /* tags starting with '.' are reserved for libzfs */ (void) fprintf(stderr, gettext("tag may not start with '.'\n")); usage(B_FALSE); } for (i = 0; i < argc; ++i) { zfs_handle_t *zhp; char parent[ZFS_MAXNAMELEN]; const char *delim; char *path = argv[i]; delim = strchr(path, '@'); if (delim == NULL) { (void) fprintf(stderr, gettext("'%s' is not a snapshot\n"), path); ++errors; continue; } (void) strncpy(parent, path, delim - path); parent[delim - path] = '\0'; zhp = zfs_open(g_zfs, parent, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); if (zhp == NULL) { ++errors; continue; } if (holding) { if (zfs_hold(zhp, delim+1, tag, recursive, temphold, B_FALSE, -1, 0, 0) != 0) ++errors; } else { if (zfs_release(zhp, delim+1, tag, recursive) != 0) ++errors; } zfs_close(zhp); } return (errors != 0); } /* * zfs hold [-r] [-t] ... * * -r Recursively hold * -t Temporary hold (hidden option) * * Apply a user-hold with the given tag to the list of snapshots. */ static int zfs_do_hold(int argc, char **argv) { return (zfs_do_hold_rele_impl(argc, argv, B_TRUE)); } /* * zfs release [-r] ... * * -r Recursively release * * Release a user-hold with the given tag from the list of snapshots. */ static int zfs_do_release(int argc, char **argv) { return (zfs_do_hold_rele_impl(argc, argv, B_FALSE)); } typedef struct holds_cbdata { boolean_t cb_recursive; const char *cb_snapname; nvlist_t **cb_nvlp; size_t cb_max_namelen; size_t cb_max_taglen; } holds_cbdata_t; #define STRFTIME_FMT_STR "%a %b %e %k:%M %Y" #define DATETIME_BUF_LEN (32) /* * */ static void print_holds(boolean_t scripted, size_t nwidth, size_t tagwidth, nvlist_t *nvl) { int i; nvpair_t *nvp = NULL; char *hdr_cols[] = { "NAME", "TAG", "TIMESTAMP" }; const char *col; if (!scripted) { for (i = 0; i < 3; i++) { col = gettext(hdr_cols[i]); if (i < 2) (void) printf("%-*s ", i ? tagwidth : nwidth, col); else (void) printf("%s\n", col); } } while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { char *zname = nvpair_name(nvp); nvlist_t *nvl2; nvpair_t *nvp2 = NULL; (void) nvpair_value_nvlist(nvp, &nvl2); while ((nvp2 = nvlist_next_nvpair(nvl2, nvp2)) != NULL) { char tsbuf[DATETIME_BUF_LEN]; char *tagname = nvpair_name(nvp2); uint64_t val = 0; time_t time; struct tm t; char sep = scripted ? '\t' : ' '; size_t sepnum = scripted ? 1 : 2; (void) nvpair_value_uint64(nvp2, &val); time = (time_t)val; (void) localtime_r(&time, &t); (void) strftime(tsbuf, DATETIME_BUF_LEN, gettext(STRFTIME_FMT_STR), &t); (void) printf("%-*s%*c%-*s%*c%s\n", nwidth, zname, sepnum, sep, tagwidth, tagname, sepnum, sep, tsbuf); } } } /* * Generic callback function to list a dataset or snapshot. */ static int holds_callback(zfs_handle_t *zhp, void *data) { holds_cbdata_t *cbp = data; nvlist_t *top_nvl = *cbp->cb_nvlp; nvlist_t *nvl = NULL; nvpair_t *nvp = NULL; const char *zname = zfs_get_name(zhp); size_t znamelen = strnlen(zname, ZFS_MAXNAMELEN); if (cbp->cb_recursive) { const char *snapname; char *delim = strchr(zname, '@'); if (delim == NULL) return (0); snapname = delim + 1; if (strcmp(cbp->cb_snapname, snapname)) return (0); } if (zfs_get_holds(zhp, &nvl) != 0) return (-1); if (znamelen > cbp->cb_max_namelen) cbp->cb_max_namelen = znamelen; while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { const char *tag = nvpair_name(nvp); size_t taglen = strnlen(tag, MAXNAMELEN); if (taglen > cbp->cb_max_taglen) cbp->cb_max_taglen = taglen; } return (nvlist_add_nvlist(top_nvl, zname, nvl)); } /* * zfs holds [-r] ... * * -r Recursively hold */ static int zfs_do_holds(int argc, char **argv) { int errors = 0; int c; int i; boolean_t scripted = B_FALSE; boolean_t recursive = B_FALSE; const char *opts = "rH"; nvlist_t *nvl; int types = ZFS_TYPE_SNAPSHOT; holds_cbdata_t cb = { 0 }; int limit = 0; int ret = 0; int flags = 0; /* check options */ while ((c = getopt(argc, argv, opts)) != -1) { switch (c) { case 'r': recursive = B_TRUE; break; case 'H': scripted = B_TRUE; break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } if (recursive) { types |= ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME; flags |= ZFS_ITER_RECURSE; } argc -= optind; argv += optind; /* check number of arguments */ if (argc < 1) usage(B_FALSE); if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) nomem(); for (i = 0; i < argc; ++i) { char *snapshot = argv[i]; const char *delim; const char *snapname; delim = strchr(snapshot, '@'); if (delim == NULL) { (void) fprintf(stderr, gettext("'%s' is not a snapshot\n"), snapshot); ++errors; continue; } snapname = delim + 1; if (recursive) snapshot[delim - snapshot] = '\0'; cb.cb_recursive = recursive; cb.cb_snapname = snapname; cb.cb_nvlp = &nvl; /* * 1. collect holds data, set format options */ ret = zfs_for_each(argc, argv, flags, types, NULL, NULL, limit, holds_callback, &cb); if (ret != 0) ++errors; } /* * 2. print holds data */ print_holds(scripted, cb.cb_max_namelen, cb.cb_max_taglen, nvl); if (nvlist_empty(nvl)) (void) printf(gettext("no datasets available\n")); nvlist_free(nvl); return (0 != errors); } #define CHECK_SPINNER 30 #define SPINNER_TIME 3 /* seconds */ #define MOUNT_TIME 5 /* seconds */ static int get_one_dataset(zfs_handle_t *zhp, void *data) { static char *spin[] = { "-", "\\", "|", "/" }; static int spinval = 0; static int spincheck = 0; static time_t last_spin_time = (time_t)0; get_all_cb_t *cbp = data; zfs_type_t type = zfs_get_type(zhp); if (cbp->cb_verbose) { if (--spincheck < 0) { time_t now = time(NULL); if (last_spin_time + SPINNER_TIME < now) { update_progress(spin[spinval++ % 4]); last_spin_time = now; } spincheck = CHECK_SPINNER; } } /* * Interate over any nested datasets. */ if (zfs_iter_filesystems(zhp, get_one_dataset, data) != 0) { zfs_close(zhp); return (1); } /* * Skip any datasets whose type does not match. */ if ((type & ZFS_TYPE_FILESYSTEM) == 0) { zfs_close(zhp); return (0); } libzfs_add_handle(cbp, zhp); assert(cbp->cb_used <= cbp->cb_alloc); return (0); } static void get_all_datasets(zfs_handle_t ***dslist, size_t *count, boolean_t verbose) { get_all_cb_t cb = { 0 }; cb.cb_verbose = verbose; cb.cb_getone = get_one_dataset; if (verbose) set_progress_header(gettext("Reading ZFS config")); (void) zfs_iter_root(g_zfs, get_one_dataset, &cb); *dslist = cb.cb_handles; *count = cb.cb_used; if (verbose) finish_progress(gettext("done.")); } /* * Generic callback for sharing or mounting filesystems. Because the code is so * similar, we have a common function with an extra parameter to determine which * mode we are using. */ #define OP_SHARE 0x1 #define OP_MOUNT 0x2 /* * Share or mount a dataset. */ static int share_mount_one(zfs_handle_t *zhp, int op, int flags, char *protocol, boolean_t explicit, const char *options) { char mountpoint[ZFS_MAXPROPLEN]; char shareopts[ZFS_MAXPROPLEN]; char smbshareopts[ZFS_MAXPROPLEN]; const char *cmdname = op == OP_SHARE ? "share" : "mount"; struct mnttab mnt; uint64_t zoned, canmount; boolean_t shared_nfs, shared_smb; assert(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM); /* * Check to make sure we can mount/share this dataset. If we * are in the global zone and the filesystem is exported to a * local zone, or if we are in a local zone and the * filesystem is not exported, then it is an error. */ zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); if (zoned && getzoneid() == GLOBAL_ZONEID) { if (!explicit) return (0); (void) fprintf(stderr, gettext("cannot %s '%s': " "dataset is exported to a local zone\n"), cmdname, zfs_get_name(zhp)); return (1); } else if (!zoned && getzoneid() != GLOBAL_ZONEID) { if (!explicit) return (0); (void) fprintf(stderr, gettext("cannot %s '%s': " "permission denied\n"), cmdname, zfs_get_name(zhp)); return (1); } /* * Ignore any filesystems which don't apply to us. This * includes those with a legacy mountpoint, or those with * legacy share options. */ verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint, sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0); verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, shareopts, sizeof (shareopts), NULL, NULL, 0, B_FALSE) == 0); verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshareopts, sizeof (smbshareopts), NULL, NULL, 0, B_FALSE) == 0); if (op == OP_SHARE && strcmp(shareopts, "off") == 0 && strcmp(smbshareopts, "off") == 0) { if (!explicit) return (0); (void) fprintf(stderr, gettext("cannot share '%s': " "legacy share\n"), zfs_get_name(zhp)); (void) fprintf(stderr, gettext("use share(1M) to " "share this filesystem, or set " "sharenfs property on\n")); return (1); } /* * We cannot share or mount legacy filesystems. If the * shareopts is non-legacy but the mountpoint is legacy, we * treat it as a legacy share. */ if (strcmp(mountpoint, "legacy") == 0) { if (!explicit) return (0); (void) fprintf(stderr, gettext("cannot %s '%s': " "legacy mountpoint\n"), cmdname, zfs_get_name(zhp)); (void) fprintf(stderr, gettext("use %s(1M) to " "%s this filesystem\n"), cmdname, cmdname); return (1); } if (strcmp(mountpoint, "none") == 0) { if (!explicit) return (0); (void) fprintf(stderr, gettext("cannot %s '%s': no " "mountpoint set\n"), cmdname, zfs_get_name(zhp)); return (1); } /* * canmount explicit outcome * on no pass through * on yes pass through * off no return 0 * off yes display error, return 1 * noauto no return 0 * noauto yes pass through */ canmount = zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT); if (canmount == ZFS_CANMOUNT_OFF) { if (!explicit) return (0); (void) fprintf(stderr, gettext("cannot %s '%s': " "'canmount' property is set to 'off'\n"), cmdname, zfs_get_name(zhp)); return (1); } else if (canmount == ZFS_CANMOUNT_NOAUTO && !explicit) { return (0); } /* * At this point, we have verified that the mountpoint and/or * shareopts are appropriate for auto management. If the * filesystem is already mounted or shared, return (failing * for explicit requests); otherwise mount or share the * filesystem. */ switch (op) { case OP_SHARE: shared_nfs = zfs_is_shared_nfs(zhp, NULL); shared_smb = zfs_is_shared_smb(zhp, NULL); if (shared_nfs && shared_smb || (shared_nfs && strcmp(shareopts, "on") == 0 && strcmp(smbshareopts, "off") == 0) || (shared_smb && strcmp(smbshareopts, "on") == 0 && strcmp(shareopts, "off") == 0)) { if (!explicit) return (0); (void) fprintf(stderr, gettext("cannot share " "'%s': filesystem already shared\n"), zfs_get_name(zhp)); return (1); } if (!zfs_is_mounted(zhp, NULL) && zfs_mount(zhp, NULL, 0) != 0) return (1); if (protocol == NULL) { if (zfs_shareall(zhp) != 0) return (1); } else if (strcmp(protocol, "nfs") == 0) { if (zfs_share_nfs(zhp)) return (1); } else if (strcmp(protocol, "smb") == 0) { if (zfs_share_smb(zhp)) return (1); } else { (void) fprintf(stderr, gettext("cannot share " "'%s': invalid share type '%s' " "specified\n"), zfs_get_name(zhp), protocol); return (1); } break; case OP_MOUNT: if (options == NULL) mnt.mnt_mntopts = ""; else mnt.mnt_mntopts = (char *)options; if (!hasmntopt(&mnt, MNTOPT_REMOUNT) && zfs_is_mounted(zhp, NULL)) { if (!explicit) return (0); (void) fprintf(stderr, gettext("cannot mount " "'%s': filesystem already mounted\n"), zfs_get_name(zhp)); return (1); } if (zfs_mount(zhp, options, flags) != 0) return (1); break; } return (0); } /* * Reports progress in the form "(current/total)". Not thread-safe. */ static void report_mount_progress(int current, int total) { static time_t last_progress_time = 0; time_t now = time(NULL); char info[32]; /* report 1..n instead of 0..n-1 */ ++current; /* display header if we're here for the first time */ if (current == 1) { set_progress_header(gettext("Mounting ZFS filesystems")); } else if (current != total && last_progress_time + MOUNT_TIME >= now) { /* too soon to report again */ return; } last_progress_time = now; (void) sprintf(info, "(%d/%d)", current, total); if (current == total) finish_progress(info); else update_progress(info); } static void append_options(char *mntopts, char *newopts) { int len = strlen(mntopts); /* original length plus new string to append plus 1 for the comma */ if (len + 1 + strlen(newopts) >= MNT_LINE_MAX) { (void) fprintf(stderr, gettext("the opts argument for " "'%c' option is too long (more than %d chars)\n"), "-o", MNT_LINE_MAX); usage(B_FALSE); } if (*mntopts) mntopts[len++] = ','; (void) strcpy(&mntopts[len], newopts); } static int share_mount(int op, int argc, char **argv) { int do_all = 0; boolean_t verbose = B_FALSE; int c, ret = 0; char *options = NULL; int flags = 0; /* check options */ while ((c = getopt(argc, argv, op == OP_MOUNT ? ":avo:O" : "a")) != -1) { switch (c) { case 'a': do_all = 1; break; case 'v': verbose = B_TRUE; break; case 'o': if (*optarg == '\0') { (void) fprintf(stderr, gettext("empty mount " "options (-o) specified\n")); usage(B_FALSE); } if (options == NULL) options = safe_malloc(MNT_LINE_MAX + 1); /* option validation is done later */ append_options(options, optarg); break; case 'O': warnx("no overlay mounts support on FreeBSD, ignoring"); break; case ':': (void) fprintf(stderr, gettext("missing argument for " "'%c' option\n"), optopt); usage(B_FALSE); break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; /* check number of arguments */ if (do_all) { zfs_handle_t **dslist = NULL; size_t i, count = 0; char *protocol = NULL; if (op == OP_SHARE && argc > 0) { if (strcmp(argv[0], "nfs") != 0 && strcmp(argv[0], "smb") != 0) { (void) fprintf(stderr, gettext("share type " "must be 'nfs' or 'smb'\n")); usage(B_FALSE); } protocol = argv[0]; argc--; argv++; } if (argc != 0) { (void) fprintf(stderr, gettext("too many arguments\n")); usage(B_FALSE); } start_progress_timer(); get_all_datasets(&dslist, &count, verbose); if (count == 0) return (0); qsort(dslist, count, sizeof (void *), libzfs_dataset_cmp); for (i = 0; i < count; i++) { if (verbose) report_mount_progress(i, count); if (share_mount_one(dslist[i], op, flags, protocol, B_FALSE, options) != 0) ret = 1; zfs_close(dslist[i]); } free(dslist); } else if (argc == 0) { struct mnttab entry; if ((op == OP_SHARE) || (options != NULL)) { (void) fprintf(stderr, gettext("missing filesystem " "argument (specify -a for all)\n")); usage(B_FALSE); } /* * When mount is given no arguments, go through /etc/mnttab and * display any active ZFS mounts. We hide any snapshots, since * they are controlled automatically. */ rewind(mnttab_file); while (getmntent(mnttab_file, &entry) == 0) { if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0 || strchr(entry.mnt_special, '@') != NULL) continue; (void) printf("%-30s %s\n", entry.mnt_special, entry.mnt_mountp); } } else { zfs_handle_t *zhp; if (argc > 1) { (void) fprintf(stderr, gettext("too many arguments\n")); usage(B_FALSE); } if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM)) == NULL) { ret = 1; } else { ret = share_mount_one(zhp, op, flags, NULL, B_TRUE, options); zfs_close(zhp); } } return (ret); } /* * zfs mount -a [nfs] * zfs mount filesystem * * Mount all filesystems, or mount the given filesystem. */ static int zfs_do_mount(int argc, char **argv) { return (share_mount(OP_MOUNT, argc, argv)); } /* * zfs share -a [nfs | smb] * zfs share filesystem * * Share all filesystems, or share the given filesystem. */ static int zfs_do_share(int argc, char **argv) { return (share_mount(OP_SHARE, argc, argv)); } typedef struct unshare_unmount_node { zfs_handle_t *un_zhp; char *un_mountp; uu_avl_node_t un_avlnode; } unshare_unmount_node_t; /* ARGSUSED */ static int unshare_unmount_compare(const void *larg, const void *rarg, void *unused) { const unshare_unmount_node_t *l = larg; const unshare_unmount_node_t *r = rarg; return (strcmp(l->un_mountp, r->un_mountp)); } /* * Convenience routine used by zfs_do_umount() and manual_unmount(). Given an * absolute path, find the entry /etc/mnttab, verify that its a ZFS filesystem, * and unmount it appropriately. */ static int unshare_unmount_path(int op, char *path, int flags, boolean_t is_manual) { zfs_handle_t *zhp; int ret = 0; struct stat64 statbuf; struct extmnttab entry; const char *cmdname = (op == OP_SHARE) ? "unshare" : "unmount"; ino_t path_inode; /* * Search for the path in /etc/mnttab. Rather than looking for the * specific path, which can be fooled by non-standard paths (i.e. ".." * or "//"), we stat() the path and search for the corresponding * (major,minor) device pair. */ if (stat64(path, &statbuf) != 0) { (void) fprintf(stderr, gettext("cannot %s '%s': %s\n"), cmdname, path, strerror(errno)); return (1); } path_inode = statbuf.st_ino; /* * Search for the given (major,minor) pair in the mount table. */ #ifdef sun rewind(mnttab_file); while ((ret = getextmntent(mnttab_file, &entry, 0)) == 0) { if (entry.mnt_major == major(statbuf.st_dev) && entry.mnt_minor == minor(statbuf.st_dev)) break; } #else { struct statfs sfs; if (statfs(path, &sfs) != 0) { (void) fprintf(stderr, "%s: %s\n", path, strerror(errno)); ret = -1; } statfs2mnttab(&sfs, &entry); } #endif if (ret != 0) { if (op == OP_SHARE) { (void) fprintf(stderr, gettext("cannot %s '%s': not " "currently mounted\n"), cmdname, path); return (1); } (void) fprintf(stderr, gettext("warning: %s not in mnttab\n"), path); if ((ret = umount2(path, flags)) != 0) (void) fprintf(stderr, gettext("%s: %s\n"), path, strerror(errno)); return (ret != 0); } if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) { (void) fprintf(stderr, gettext("cannot %s '%s': not a ZFS " "filesystem\n"), cmdname, path); return (1); } if ((zhp = zfs_open(g_zfs, entry.mnt_special, ZFS_TYPE_FILESYSTEM)) == NULL) return (1); ret = 1; if (stat64(entry.mnt_mountp, &statbuf) != 0) { (void) fprintf(stderr, gettext("cannot %s '%s': %s\n"), cmdname, path, strerror(errno)); goto out; } else if (statbuf.st_ino != path_inode) { (void) fprintf(stderr, gettext("cannot " "%s '%s': not a mountpoint\n"), cmdname, path); goto out; } if (op == OP_SHARE) { char nfs_mnt_prop[ZFS_MAXPROPLEN]; char smbshare_prop[ZFS_MAXPROPLEN]; verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, nfs_mnt_prop, sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0); verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshare_prop, sizeof (smbshare_prop), NULL, NULL, 0, B_FALSE) == 0); if (strcmp(nfs_mnt_prop, "off") == 0 && strcmp(smbshare_prop, "off") == 0) { (void) fprintf(stderr, gettext("cannot unshare " "'%s': legacy share\n"), path); (void) fprintf(stderr, gettext("use " "unshare(1M) to unshare this filesystem\n")); } else if (!zfs_is_shared(zhp)) { (void) fprintf(stderr, gettext("cannot unshare '%s': " "not currently shared\n"), path); } else { ret = zfs_unshareall_bypath(zhp, path); } } else { char mtpt_prop[ZFS_MAXPROPLEN]; verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mtpt_prop, sizeof (mtpt_prop), NULL, NULL, 0, B_FALSE) == 0); if (is_manual) { ret = zfs_unmount(zhp, NULL, flags); } else if (strcmp(mtpt_prop, "legacy") == 0) { (void) fprintf(stderr, gettext("cannot unmount " "'%s': legacy mountpoint\n"), zfs_get_name(zhp)); (void) fprintf(stderr, gettext("use umount(1M) " "to unmount this filesystem\n")); } else { ret = zfs_unmountall(zhp, flags); } } out: zfs_close(zhp); return (ret != 0); } /* * Generic callback for unsharing or unmounting a filesystem. */ static int unshare_unmount(int op, int argc, char **argv) { int do_all = 0; int flags = 0; int ret = 0; int c; zfs_handle_t *zhp; char nfs_mnt_prop[ZFS_MAXPROPLEN]; char sharesmb[ZFS_MAXPROPLEN]; /* check options */ while ((c = getopt(argc, argv, op == OP_SHARE ? "a" : "af")) != -1) { switch (c) { case 'a': do_all = 1; break; case 'f': flags = MS_FORCE; break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; if (do_all) { /* * We could make use of zfs_for_each() to walk all datasets in * the system, but this would be very inefficient, especially * since we would have to linearly search /etc/mnttab for each * one. Instead, do one pass through /etc/mnttab looking for * zfs entries and call zfs_unmount() for each one. * * Things get a little tricky if the administrator has created * mountpoints beneath other ZFS filesystems. In this case, we * have to unmount the deepest filesystems first. To accomplish * this, we place all the mountpoints in an AVL tree sorted by * the special type (dataset name), and walk the result in * reverse to make sure to get any snapshots first. */ struct mnttab entry; uu_avl_pool_t *pool; uu_avl_t *tree; unshare_unmount_node_t *node; uu_avl_index_t idx; uu_avl_walk_t *walk; if (argc != 0) { (void) fprintf(stderr, gettext("too many arguments\n")); usage(B_FALSE); } if (((pool = uu_avl_pool_create("unmount_pool", sizeof (unshare_unmount_node_t), offsetof(unshare_unmount_node_t, un_avlnode), unshare_unmount_compare, UU_DEFAULT)) == NULL) || ((tree = uu_avl_create(pool, NULL, UU_DEFAULT)) == NULL)) nomem(); rewind(mnttab_file); while (getmntent(mnttab_file, &entry) == 0) { /* ignore non-ZFS entries */ if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) continue; /* ignore snapshots */ if (strchr(entry.mnt_special, '@') != NULL) continue; if ((zhp = zfs_open(g_zfs, entry.mnt_special, ZFS_TYPE_FILESYSTEM)) == NULL) { ret = 1; continue; } switch (op) { case OP_SHARE: verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, nfs_mnt_prop, sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0); if (strcmp(nfs_mnt_prop, "off") != 0) break; verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, nfs_mnt_prop, sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0); if (strcmp(nfs_mnt_prop, "off") == 0) continue; break; case OP_MOUNT: /* Ignore legacy mounts */ verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, nfs_mnt_prop, sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0); if (strcmp(nfs_mnt_prop, "legacy") == 0) continue; /* Ignore canmount=noauto mounts */ if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) continue; default: break; } node = safe_malloc(sizeof (unshare_unmount_node_t)); node->un_zhp = zhp; node->un_mountp = safe_strdup(entry.mnt_mountp); uu_avl_node_init(node, &node->un_avlnode, pool); if (uu_avl_find(tree, node, NULL, &idx) == NULL) { uu_avl_insert(tree, node, idx); } else { zfs_close(node->un_zhp); free(node->un_mountp); free(node); } } /* * Walk the AVL tree in reverse, unmounting each filesystem and * removing it from the AVL tree in the process. */ if ((walk = uu_avl_walk_start(tree, UU_WALK_REVERSE | UU_WALK_ROBUST)) == NULL) nomem(); while ((node = uu_avl_walk_next(walk)) != NULL) { uu_avl_remove(tree, node); switch (op) { case OP_SHARE: if (zfs_unshareall_bypath(node->un_zhp, node->un_mountp) != 0) ret = 1; break; case OP_MOUNT: if (zfs_unmount(node->un_zhp, node->un_mountp, flags) != 0) ret = 1; break; } zfs_close(node->un_zhp); free(node->un_mountp); free(node); } uu_avl_walk_end(walk); uu_avl_destroy(tree); uu_avl_pool_destroy(pool); } else { if (argc != 1) { if (argc == 0) (void) fprintf(stderr, gettext("missing filesystem argument\n")); else (void) fprintf(stderr, gettext("too many arguments\n")); usage(B_FALSE); } /* * We have an argument, but it may be a full path or a ZFS * filesystem. Pass full paths off to unmount_path() (shared by * manual_unmount), otherwise open the filesystem and pass to * zfs_unmount(). */ if (argv[0][0] == '/') return (unshare_unmount_path(op, argv[0], flags, B_FALSE)); if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM)) == NULL) return (1); verify(zfs_prop_get(zhp, op == OP_SHARE ? ZFS_PROP_SHARENFS : ZFS_PROP_MOUNTPOINT, nfs_mnt_prop, sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0); switch (op) { case OP_SHARE: verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, nfs_mnt_prop, sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0); verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, sharesmb, sizeof (sharesmb), NULL, NULL, 0, B_FALSE) == 0); if (strcmp(nfs_mnt_prop, "off") == 0 && strcmp(sharesmb, "off") == 0) { (void) fprintf(stderr, gettext("cannot " "unshare '%s': legacy share\n"), zfs_get_name(zhp)); (void) fprintf(stderr, gettext("use " "unshare(1M) to unshare this " "filesystem\n")); ret = 1; } else if (!zfs_is_shared(zhp)) { (void) fprintf(stderr, gettext("cannot " "unshare '%s': not currently " "shared\n"), zfs_get_name(zhp)); ret = 1; } else if (zfs_unshareall(zhp) != 0) { ret = 1; } break; case OP_MOUNT: if (strcmp(nfs_mnt_prop, "legacy") == 0) { (void) fprintf(stderr, gettext("cannot " "unmount '%s': legacy " "mountpoint\n"), zfs_get_name(zhp)); (void) fprintf(stderr, gettext("use " "umount(1M) to unmount this " "filesystem\n")); ret = 1; } else if (!zfs_is_mounted(zhp, NULL)) { (void) fprintf(stderr, gettext("cannot " "unmount '%s': not currently " "mounted\n"), zfs_get_name(zhp)); ret = 1; } else if (zfs_unmountall(zhp, flags) != 0) { ret = 1; } break; } zfs_close(zhp); } return (ret); } /* * zfs unmount -a * zfs unmount filesystem * * Unmount all filesystems, or a specific ZFS filesystem. */ static int zfs_do_unmount(int argc, char **argv) { return (unshare_unmount(OP_MOUNT, argc, argv)); } /* * zfs unshare -a * zfs unshare filesystem * * Unshare all filesystems, or a specific ZFS filesystem. */ static int zfs_do_unshare(int argc, char **argv) { return (unshare_unmount(OP_SHARE, argc, argv)); } /* * Attach/detach the given dataset to/from the given jail */ /* ARGSUSED */ static int do_jail(int argc, char **argv, int attach) { zfs_handle_t *zhp; int jailid, ret; /* check number of arguments */ if (argc < 3) { (void) fprintf(stderr, gettext("missing argument(s)\n")); usage(B_FALSE); } if (argc > 3) { (void) fprintf(stderr, gettext("too many arguments\n")); usage(B_FALSE); } jailid = atoi(argv[1]); if (jailid == 0) { (void) fprintf(stderr, gettext("invalid jailid\n")); usage(B_FALSE); } zhp = zfs_open(g_zfs, argv[2], ZFS_TYPE_FILESYSTEM); if (zhp == NULL) return (1); ret = (zfs_jail(zhp, jailid, attach) != 0); zfs_close(zhp); return (ret); } /* * zfs jail jailid filesystem * * Attach the given dataset to the given jail */ /* ARGSUSED */ static int zfs_do_jail(int argc, char **argv) { return (do_jail(argc, argv, 1)); } /* * zfs unjail jailid filesystem * * Detach the given dataset from the given jail */ /* ARGSUSED */ static int zfs_do_unjail(int argc, char **argv) { return (do_jail(argc, argv, 0)); } /* * Called when invoked as /etc/fs/zfs/mount. Do the mount if the mountpoint is * 'legacy'. Otherwise, complain that use should be using 'zfs mount'. */ static int manual_mount(int argc, char **argv) { zfs_handle_t *zhp; char mountpoint[ZFS_MAXPROPLEN]; char mntopts[MNT_LINE_MAX] = { '\0' }; int ret = 0; int c; int flags = 0; char *dataset, *path; /* check options */ while ((c = getopt(argc, argv, ":mo:O")) != -1) { switch (c) { case 'o': (void) strlcpy(mntopts, optarg, sizeof (mntopts)); break; case 'O': flags |= MS_OVERLAY; break; case 'm': flags |= MS_NOMNTTAB; break; case ':': (void) fprintf(stderr, gettext("missing argument for " "'%c' option\n"), optopt); usage(B_FALSE); break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); (void) fprintf(stderr, gettext("usage: mount [-o opts] " "\n")); return (2); } } argc -= optind; argv += optind; /* check that we only have two arguments */ if (argc != 2) { if (argc == 0) (void) fprintf(stderr, gettext("missing dataset " "argument\n")); else if (argc == 1) (void) fprintf(stderr, gettext("missing mountpoint argument\n")); else (void) fprintf(stderr, gettext("too many arguments\n")); (void) fprintf(stderr, "usage: mount \n"); return (2); } dataset = argv[0]; path = argv[1]; /* try to open the dataset */ if ((zhp = zfs_open(g_zfs, dataset, ZFS_TYPE_FILESYSTEM)) == NULL) return (1); (void) zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint, sizeof (mountpoint), NULL, NULL, 0, B_FALSE); /* check for legacy mountpoint and complain appropriately */ ret = 0; if (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) == 0) { if (zmount(dataset, path, flags, MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) { (void) fprintf(stderr, gettext("mount failed: %s\n"), strerror(errno)); ret = 1; } } else { (void) fprintf(stderr, gettext("filesystem '%s' cannot be " "mounted using 'mount -F zfs'\n"), dataset); (void) fprintf(stderr, gettext("Use 'zfs set mountpoint=%s' " "instead.\n"), path); (void) fprintf(stderr, gettext("If you must use 'mount -F zfs' " "or /etc/vfstab, use 'zfs set mountpoint=legacy'.\n")); (void) fprintf(stderr, gettext("See zfs(1M) for more " "information.\n")); ret = 1; } return (ret); } /* * Called when invoked as /etc/fs/zfs/umount. Unlike a manual mount, we allow * unmounts of non-legacy filesystems, as this is the dominant administrative * interface. */ static int manual_unmount(int argc, char **argv) { int flags = 0; int c; /* check options */ while ((c = getopt(argc, argv, "f")) != -1) { switch (c) { case 'f': flags = MS_FORCE; break; case '?': (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); (void) fprintf(stderr, gettext("usage: unmount [-f] " "\n")); return (2); } } argc -= optind; argv += optind; /* check arguments */ if (argc != 1) { if (argc == 0) (void) fprintf(stderr, gettext("missing path " "argument\n")); else (void) fprintf(stderr, gettext("too many arguments\n")); (void) fprintf(stderr, gettext("usage: unmount [-f] \n")); return (2); } return (unshare_unmount_path(OP_MOUNT, argv[0], flags, B_TRUE)); } static int find_command_idx(char *command, int *idx) { int i; for (i = 0; i < NCOMMAND; i++) { if (command_table[i].name == NULL) continue; if (strcmp(command, command_table[i].name) == 0) { *idx = i; return (0); } } return (1); } static int zfs_do_diff(int argc, char **argv) { zfs_handle_t *zhp; int flags = 0; char *tosnap = NULL; char *fromsnap = NULL; char *atp, *copy; int err = 0; int c; while ((c = getopt(argc, argv, "FHt")) != -1) { switch (c) { case 'F': flags |= ZFS_DIFF_CLASSIFY; break; case 'H': flags |= ZFS_DIFF_PARSEABLE; break; case 't': flags |= ZFS_DIFF_TIMESTAMP; break; default: (void) fprintf(stderr, gettext("invalid option '%c'\n"), optopt); usage(B_FALSE); } } argc -= optind; argv += optind; if (argc < 1) { (void) fprintf(stderr, gettext("must provide at least one snapshot name\n")); usage(B_FALSE); } if (argc > 2) { (void) fprintf(stderr, gettext("too many arguments\n")); usage(B_FALSE); } fromsnap = argv[0]; tosnap = (argc == 2) ? argv[1] : NULL; copy = NULL; if (*fromsnap != '@') copy = strdup(fromsnap); else if (tosnap) copy = strdup(tosnap); if (copy == NULL) usage(B_FALSE); if (atp = strchr(copy, '@')) *atp = '\0'; if ((zhp = zfs_open(g_zfs, copy, ZFS_TYPE_FILESYSTEM)) == NULL) return (1); free(copy); /* * Ignore SIGPIPE so that the library can give us * information on any failure */ (void) sigignore(SIGPIPE); err = zfs_show_diffs(zhp, STDOUT_FILENO, fromsnap, tosnap, flags); zfs_close(zhp); return (err != 0); } int main(int argc, char **argv) { int ret = 0; int i; char *progname; char *cmdname; (void) setlocale(LC_ALL, ""); (void) textdomain(TEXT_DOMAIN); opterr = 0; if ((g_zfs = libzfs_init()) == NULL) { (void) fprintf(stderr, gettext("internal error: failed to " "initialize ZFS library\n")); return (1); } zpool_set_history_str("zfs", argc, argv, history_str); verify(zpool_stage_history(g_zfs, history_str) == 0); libzfs_print_on_error(g_zfs, B_TRUE); if ((mnttab_file = fopen(MNTTAB, "r")) == NULL) { (void) fprintf(stderr, gettext("internal error: unable to " "open %s\n"), MNTTAB); return (1); } /* * This command also doubles as the /etc/fs mount and unmount program. * Determine if we should take this behavior based on argv[0]. */ progname = basename(argv[0]); if (strcmp(progname, "mount") == 0) { ret = manual_mount(argc, argv); } else if (strcmp(progname, "umount") == 0) { ret = manual_unmount(argc, argv); } else { /* * Make sure the user has specified some command. */ if (argc < 2) { (void) fprintf(stderr, gettext("missing command\n")); usage(B_FALSE); } cmdname = argv[1]; /* * The 'umount' command is an alias for 'unmount' */ if (strcmp(cmdname, "umount") == 0) cmdname = "unmount"; /* * The 'recv' command is an alias for 'receive' */ if (strcmp(cmdname, "recv") == 0) cmdname = "receive"; /* * Special case '-?' */ if (strcmp(cmdname, "-?") == 0) usage(B_TRUE); /* * Run the appropriate command. */ libzfs_mnttab_cache(g_zfs, B_TRUE); if (find_command_idx(cmdname, &i) == 0) { current_command = &command_table[i]; ret = command_table[i].func(argc - 1, argv + 1); } else if (strchr(cmdname, '=') != NULL) { verify(find_command_idx("set", &i) == 0); current_command = &command_table[i]; ret = command_table[i].func(argc, argv); } else { (void) fprintf(stderr, gettext("unrecognized " "command '%s'\n"), cmdname); usage(B_FALSE); } libzfs_mnttab_cache(g_zfs, B_FALSE); } (void) fclose(mnttab_file); libzfs_fini(g_zfs); /* * The 'ZFS_ABORT' environment variable causes us to dump core on exit * for the purposes of running ::findleaks. */ if (getenv("ZFS_ABORT") != NULL) { (void) printf("dumping core by request\n"); abort(); } return (ret); } Index: projects/nand/cddl/contrib/opensolaris/cmd/zstreamdump/zstreamdump.1 =================================================================== --- projects/nand/cddl/contrib/opensolaris/cmd/zstreamdump/zstreamdump.1 (revision 235262) +++ projects/nand/cddl/contrib/opensolaris/cmd/zstreamdump/zstreamdump.1 (revision 235263) @@ -1,67 +1,67 @@ '\" te .\" Copyright (c) 2011, Martin Matuska . .\" All Rights Reserved. .\" .\" The contents of this file are subject to the terms of the .\" Common Development and Distribution License (the "License"). .\" You may not use this file except in compliance with the License. .\" .\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE .\" or http://www.opensolaris.org/os/licensing. .\" See the License for the specific language governing permissions .\" and limitations under the License. .\" .\" When distributing Covered Code, include this CDDL HEADER in each .\" file and include the License file at usr/src/OPENSOLARIS.LICENSE. .\" If applicable, add the following below this CDDL HEADER, with the .\" fields enclosed by brackets "[]" replaced with your own identifying .\" information: Portions Copyright [yyyy] [name of copyright owner] .\" .\" Copyright (c) 2009, Sun Microsystems, Inc. All Rights Reserved. .\" .\" $FreeBSD$ .\" .Dd November 26, 2011 .Dt ZSTREAMDUMP 8 .Os .Sh NAME -.Nm zdb +.Nm zstreamdump .Nd filter data in zfs send stream .Sh SYNOPSIS .Nm .Op Fl C .Op Fl v .Sh DESCRIPTION The .Nm utility reads from the output of the .Qq Nm zfs Cm send command, then displays headers and some statistics from that output. See .Xr zfs 8 . .Pp The following options are supported: .Bl -tag -width indent .It Fl C Suppress the validation of checksums. .It Fl v Verbose. Dump all headers, not only begin and end headers. .El .Sh SEE ALSO .Xr zfs 8 .Sh AUTHORS This manual page is a .Xr mdoc 7 reimplementation of the .Tn OpenSolaris manual page .Em zstreamdump(1M) , modified and customized for .Fx and licensed under the .Tn Common Development and Distribution License .Pq Tn CDDL . .Pp The .Xr mdoc 7 implementation of this manual page was initially written by .An Martin Matuska Aq mm@FreeBSD.org . Index: projects/nand/cddl/contrib/opensolaris/lib/libzfs/common/libzfs.h =================================================================== --- projects/nand/cddl/contrib/opensolaris/lib/libzfs/common/libzfs.h (revision 235262) +++ projects/nand/cddl/contrib/opensolaris/lib/libzfs/common/libzfs.h (revision 235263) @@ -1,753 +1,760 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright 2011 Nexenta Systems, Inc. All rights reserved. * Copyright (c) 2011 Pawel Jakub Dawidek . * All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved. - * All rights reserved. + * Copyright (c) 2012, Joyent, Inc. All rights reserved. + * Copyright (c) 2012 Martin Matuska . All rights reserved. */ #ifndef _LIBZFS_H #define _LIBZFS_H #include #include #include #include #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif /* * Miscellaneous ZFS constants */ #define ZFS_MAXNAMELEN MAXNAMELEN #define ZPOOL_MAXNAMELEN MAXNAMELEN #define ZFS_MAXPROPLEN MAXPATHLEN #define ZPOOL_MAXPROPLEN MAXPATHLEN /* * libzfs errors */ enum { EZFS_NOMEM = 2000, /* out of memory */ EZFS_BADPROP, /* invalid property value */ EZFS_PROPREADONLY, /* cannot set readonly property */ EZFS_PROPTYPE, /* property does not apply to dataset type */ EZFS_PROPNONINHERIT, /* property is not inheritable */ EZFS_PROPSPACE, /* bad quota or reservation */ EZFS_BADTYPE, /* dataset is not of appropriate type */ EZFS_BUSY, /* pool or dataset is busy */ EZFS_EXISTS, /* pool or dataset already exists */ EZFS_NOENT, /* no such pool or dataset */ EZFS_BADSTREAM, /* bad backup stream */ EZFS_DSREADONLY, /* dataset is readonly */ EZFS_VOLTOOBIG, /* volume is too large for 32-bit system */ EZFS_INVALIDNAME, /* invalid dataset name */ EZFS_BADRESTORE, /* unable to restore to destination */ EZFS_BADBACKUP, /* backup failed */ EZFS_BADTARGET, /* bad attach/detach/replace target */ EZFS_NODEVICE, /* no such device in pool */ EZFS_BADDEV, /* invalid device to add */ EZFS_NOREPLICAS, /* no valid replicas */ EZFS_RESILVERING, /* currently resilvering */ EZFS_BADVERSION, /* unsupported version */ EZFS_POOLUNAVAIL, /* pool is currently unavailable */ EZFS_DEVOVERFLOW, /* too many devices in one vdev */ EZFS_BADPATH, /* must be an absolute path */ EZFS_CROSSTARGET, /* rename or clone across pool or dataset */ EZFS_ZONED, /* used improperly in local zone */ EZFS_MOUNTFAILED, /* failed to mount dataset */ EZFS_UMOUNTFAILED, /* failed to unmount dataset */ EZFS_UNSHARENFSFAILED, /* unshare(1M) failed */ EZFS_SHARENFSFAILED, /* share(1M) failed */ EZFS_PERM, /* permission denied */ EZFS_NOSPC, /* out of space */ EZFS_FAULT, /* bad address */ EZFS_IO, /* I/O error */ EZFS_INTR, /* signal received */ EZFS_ISSPARE, /* device is a hot spare */ EZFS_INVALCONFIG, /* invalid vdev configuration */ EZFS_RECURSIVE, /* recursive dependency */ EZFS_NOHISTORY, /* no history object */ EZFS_POOLPROPS, /* couldn't retrieve pool props */ EZFS_POOL_NOTSUP, /* ops not supported for this type of pool */ EZFS_POOL_INVALARG, /* invalid argument for this pool operation */ EZFS_NAMETOOLONG, /* dataset name is too long */ EZFS_OPENFAILED, /* open of device failed */ EZFS_NOCAP, /* couldn't get capacity */ EZFS_LABELFAILED, /* write of label failed */ EZFS_BADWHO, /* invalid permission who */ EZFS_BADPERM, /* invalid permission */ EZFS_BADPERMSET, /* invalid permission set name */ EZFS_NODELEGATION, /* delegated administration is disabled */ EZFS_UNSHARESMBFAILED, /* failed to unshare over smb */ EZFS_SHARESMBFAILED, /* failed to share over smb */ EZFS_BADCACHE, /* bad cache file */ EZFS_ISL2CACHE, /* device is for the level 2 ARC */ EZFS_VDEVNOTSUP, /* unsupported vdev type */ EZFS_NOTSUP, /* ops not supported on this dataset */ EZFS_ACTIVE_SPARE, /* pool has active shared spare devices */ EZFS_UNPLAYED_LOGS, /* log device has unplayed logs */ EZFS_REFTAG_RELE, /* snapshot release: tag not found */ EZFS_REFTAG_HOLD, /* snapshot hold: tag already exists */ EZFS_TAGTOOLONG, /* snapshot hold/rele: tag too long */ EZFS_PIPEFAILED, /* pipe create failed */ EZFS_THREADCREATEFAILED, /* thread create failed */ EZFS_POSTSPLIT_ONLINE, /* onlining a disk after splitting it */ EZFS_SCRUBBING, /* currently scrubbing */ EZFS_NO_SCRUB, /* no active scrub */ EZFS_DIFF, /* general failure of zfs diff */ EZFS_DIFFDATA, /* bad zfs diff data */ EZFS_POOLREADONLY, /* pool is in read-only mode */ EZFS_UNKNOWN }; /* * The following data structures are all part * of the zfs_allow_t data structure which is * used for printing 'allow' permissions. * It is a linked list of zfs_allow_t's which * then contain avl tree's for user/group/sets/... * and each one of the entries in those trees have * avl tree's for the permissions they belong to and * whether they are local,descendent or local+descendent * permissions. The AVL trees are used primarily for * sorting purposes, but also so that we can quickly find * a given user and or permission. */ typedef struct zfs_perm_node { avl_node_t z_node; char z_pname[MAXPATHLEN]; } zfs_perm_node_t; typedef struct zfs_allow_node { avl_node_t z_node; char z_key[MAXPATHLEN]; /* name, such as joe */ avl_tree_t z_localdescend; /* local+descendent perms */ avl_tree_t z_local; /* local permissions */ avl_tree_t z_descend; /* descendent permissions */ } zfs_allow_node_t; typedef struct zfs_allow { struct zfs_allow *z_next; char z_setpoint[MAXPATHLEN]; avl_tree_t z_sets; avl_tree_t z_crperms; avl_tree_t z_user; avl_tree_t z_group; avl_tree_t z_everyone; } zfs_allow_t; /* * Basic handle types */ typedef struct zfs_handle zfs_handle_t; typedef struct zpool_handle zpool_handle_t; typedef struct libzfs_handle libzfs_handle_t; /* * Library initialization */ extern libzfs_handle_t *libzfs_init(void); extern void libzfs_fini(libzfs_handle_t *); extern libzfs_handle_t *zpool_get_handle(zpool_handle_t *); extern libzfs_handle_t *zfs_get_handle(zfs_handle_t *); extern void libzfs_print_on_error(libzfs_handle_t *, boolean_t); extern int libzfs_errno(libzfs_handle_t *); extern const char *libzfs_error_action(libzfs_handle_t *); extern const char *libzfs_error_description(libzfs_handle_t *); extern void libzfs_mnttab_init(libzfs_handle_t *); extern void libzfs_mnttab_fini(libzfs_handle_t *); extern void libzfs_mnttab_cache(libzfs_handle_t *, boolean_t); extern int libzfs_mnttab_find(libzfs_handle_t *, const char *, struct mnttab *); extern void libzfs_mnttab_add(libzfs_handle_t *, const char *, const char *, const char *); extern void libzfs_mnttab_remove(libzfs_handle_t *, const char *); /* * Basic handle functions */ extern zpool_handle_t *zpool_open(libzfs_handle_t *, const char *); extern zpool_handle_t *zpool_open_canfail(libzfs_handle_t *, const char *); extern void zpool_close(zpool_handle_t *); extern const char *zpool_get_name(zpool_handle_t *); extern int zpool_get_state(zpool_handle_t *); extern const char *zpool_state_to_name(vdev_state_t, vdev_aux_t); extern const char *zpool_pool_state_to_name(pool_state_t); extern void zpool_free_handles(libzfs_handle_t *); /* * Iterate over all active pools in the system. */ typedef int (*zpool_iter_f)(zpool_handle_t *, void *); extern int zpool_iter(libzfs_handle_t *, zpool_iter_f, void *); /* * Functions to create and destroy pools */ extern int zpool_create(libzfs_handle_t *, const char *, nvlist_t *, nvlist_t *, nvlist_t *); extern int zpool_destroy(zpool_handle_t *); extern int zpool_add(zpool_handle_t *, nvlist_t *); typedef struct splitflags { /* do not split, but return the config that would be split off */ int dryrun : 1; /* after splitting, import the pool */ int import : 1; } splitflags_t; /* * Functions to manipulate pool and vdev state */ extern int zpool_scan(zpool_handle_t *, pool_scan_func_t); extern int zpool_clear(zpool_handle_t *, const char *, nvlist_t *); extern int zpool_reguid(zpool_handle_t *); extern int zpool_vdev_online(zpool_handle_t *, const char *, int, vdev_state_t *); extern int zpool_vdev_offline(zpool_handle_t *, const char *, boolean_t); extern int zpool_vdev_attach(zpool_handle_t *, const char *, const char *, nvlist_t *, int); extern int zpool_vdev_detach(zpool_handle_t *, const char *); extern int zpool_vdev_remove(zpool_handle_t *, const char *); extern int zpool_vdev_split(zpool_handle_t *, char *, nvlist_t **, nvlist_t *, splitflags_t); extern int zpool_vdev_fault(zpool_handle_t *, uint64_t, vdev_aux_t); extern int zpool_vdev_degrade(zpool_handle_t *, uint64_t, vdev_aux_t); extern int zpool_vdev_clear(zpool_handle_t *, uint64_t); extern nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *, boolean_t *, boolean_t *); extern nvlist_t *zpool_find_vdev_by_physpath(zpool_handle_t *, const char *, boolean_t *, boolean_t *, boolean_t *); extern int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *, const char *); /* * Functions to manage pool properties */ extern int zpool_set_prop(zpool_handle_t *, const char *, const char *); extern int zpool_get_prop(zpool_handle_t *, zpool_prop_t, char *, size_t proplen, zprop_source_t *); extern uint64_t zpool_get_prop_int(zpool_handle_t *, zpool_prop_t, zprop_source_t *); extern const char *zpool_prop_to_name(zpool_prop_t); extern const char *zpool_prop_values(zpool_prop_t); /* * Pool health statistics. */ typedef enum { /* * The following correspond to faults as defined in the (fault.fs.zfs.*) * event namespace. Each is associated with a corresponding message ID. */ ZPOOL_STATUS_CORRUPT_CACHE, /* corrupt /kernel/drv/zpool.cache */ ZPOOL_STATUS_MISSING_DEV_R, /* missing device with replicas */ ZPOOL_STATUS_MISSING_DEV_NR, /* missing device with no replicas */ ZPOOL_STATUS_CORRUPT_LABEL_R, /* bad device label with replicas */ ZPOOL_STATUS_CORRUPT_LABEL_NR, /* bad device label with no replicas */ ZPOOL_STATUS_BAD_GUID_SUM, /* sum of device guids didn't match */ ZPOOL_STATUS_CORRUPT_POOL, /* pool metadata is corrupted */ ZPOOL_STATUS_CORRUPT_DATA, /* data errors in user (meta)data */ ZPOOL_STATUS_FAILING_DEV, /* device experiencing errors */ ZPOOL_STATUS_VERSION_NEWER, /* newer on-disk version */ ZPOOL_STATUS_HOSTID_MISMATCH, /* last accessed by another system */ ZPOOL_STATUS_IO_FAILURE_WAIT, /* failed I/O, failmode 'wait' */ ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */ ZPOOL_STATUS_BAD_LOG, /* cannot read log chain(s) */ /* * These faults have no corresponding message ID. At the time we are * checking the status, the original reason for the FMA fault (I/O or * checksum errors) has been lost. */ ZPOOL_STATUS_FAULTED_DEV_R, /* faulted device with replicas */ ZPOOL_STATUS_FAULTED_DEV_NR, /* faulted device with no replicas */ /* * The following are not faults per se, but still an error possibly * requiring administrative attention. There is no corresponding * message ID. */ ZPOOL_STATUS_VERSION_OLDER, /* older on-disk version */ ZPOOL_STATUS_RESILVERING, /* device being resilvered */ ZPOOL_STATUS_OFFLINE_DEV, /* device online */ ZPOOL_STATUS_REMOVED_DEV, /* removed device */ /* * Finally, the following indicates a healthy pool. */ ZPOOL_STATUS_OK } zpool_status_t; extern zpool_status_t zpool_get_status(zpool_handle_t *, char **); extern zpool_status_t zpool_import_status(nvlist_t *, char **); extern void zpool_dump_ddt(const ddt_stat_t *dds, const ddt_histogram_t *ddh); /* * Statistics and configuration functions. */ extern nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **); extern int zpool_refresh_stats(zpool_handle_t *, boolean_t *); extern int zpool_get_errlog(zpool_handle_t *, nvlist_t **); /* * Import and export functions */ extern int zpool_export(zpool_handle_t *, boolean_t); extern int zpool_export_force(zpool_handle_t *); extern int zpool_import(libzfs_handle_t *, nvlist_t *, const char *, char *altroot); extern int zpool_import_props(libzfs_handle_t *, nvlist_t *, const char *, nvlist_t *, int); /* * Search for pools to import */ typedef struct importargs { char **path; /* a list of paths to search */ int paths; /* number of paths to search */ char *poolname; /* name of a pool to find */ uint64_t guid; /* guid of a pool to find */ char *cachefile; /* cachefile to use for import */ int can_be_active : 1; /* can the pool be active? */ int unique : 1; /* does 'poolname' already exist? */ int exists : 1; /* set on return if pool already exists */ } importargs_t; extern nvlist_t *zpool_search_import(libzfs_handle_t *, importargs_t *); /* legacy pool search routines */ extern nvlist_t *zpool_find_import(libzfs_handle_t *, int, char **); extern nvlist_t *zpool_find_import_cached(libzfs_handle_t *, const char *, char *, uint64_t); /* * Miscellaneous pool functions */ struct zfs_cmd; extern const char *zfs_history_event_names[LOG_END]; extern char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *, boolean_t verbose); extern int zpool_upgrade(zpool_handle_t *, uint64_t); extern int zpool_get_history(zpool_handle_t *, nvlist_t **); extern int zpool_history_unpack(char *, uint64_t, uint64_t *, nvlist_t ***, uint_t *); extern void zpool_set_history_str(const char *subcommand, int argc, char **argv, char *history_str); extern int zpool_stage_history(libzfs_handle_t *, const char *); extern void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *, size_t len); extern int zfs_ioctl(libzfs_handle_t *, unsigned long, struct zfs_cmd *); extern int zpool_get_physpath(zpool_handle_t *, char *, size_t); extern void zpool_explain_recover(libzfs_handle_t *, const char *, int, nvlist_t *); /* * Basic handle manipulations. These functions do not create or destroy the * underlying datasets, only the references to them. */ extern zfs_handle_t *zfs_open(libzfs_handle_t *, const char *, int); extern zfs_handle_t *zfs_handle_dup(zfs_handle_t *); extern void zfs_close(zfs_handle_t *); extern zfs_type_t zfs_get_type(const zfs_handle_t *); extern const char *zfs_get_name(const zfs_handle_t *); extern zpool_handle_t *zfs_get_pool_handle(const zfs_handle_t *); /* * Property management functions. Some functions are shared with the kernel, * and are found in sys/fs/zfs.h. */ /* * zfs dataset property management */ extern const char *zfs_prop_default_string(zfs_prop_t); extern uint64_t zfs_prop_default_numeric(zfs_prop_t); extern const char *zfs_prop_column_name(zfs_prop_t); extern boolean_t zfs_prop_align_right(zfs_prop_t); extern nvlist_t *zfs_valid_proplist(libzfs_handle_t *, zfs_type_t, nvlist_t *, uint64_t, zfs_handle_t *, const char *); extern const char *zfs_prop_to_name(zfs_prop_t); extern int zfs_prop_set(zfs_handle_t *, const char *, const char *); extern int zfs_prop_get(zfs_handle_t *, zfs_prop_t, char *, size_t, zprop_source_t *, char *, size_t, boolean_t); extern int zfs_prop_get_recvd(zfs_handle_t *, const char *, char *, size_t, boolean_t); extern int zfs_prop_get_numeric(zfs_handle_t *, zfs_prop_t, uint64_t *, zprop_source_t *, char *, size_t); extern int zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname, uint64_t *propvalue); extern int zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname, char *propbuf, int proplen, boolean_t literal); extern int zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname, uint64_t *propvalue); extern int zfs_prop_get_written(zfs_handle_t *zhp, const char *propname, char *propbuf, int proplen, boolean_t literal); extern int zfs_get_snapused_int(zfs_handle_t *firstsnap, zfs_handle_t *lastsnap, uint64_t *usedp); extern uint64_t zfs_prop_get_int(zfs_handle_t *, zfs_prop_t); extern int zfs_prop_inherit(zfs_handle_t *, const char *, boolean_t); extern const char *zfs_prop_values(zfs_prop_t); extern int zfs_prop_is_string(zfs_prop_t prop); extern nvlist_t *zfs_get_user_props(zfs_handle_t *); extern nvlist_t *zfs_get_recvd_props(zfs_handle_t *); extern nvlist_t *zfs_get_clones_nvl(zfs_handle_t *); typedef struct zprop_list { int pl_prop; char *pl_user_prop; struct zprop_list *pl_next; boolean_t pl_all; size_t pl_width; size_t pl_recvd_width; boolean_t pl_fixed; } zprop_list_t; extern int zfs_expand_proplist(zfs_handle_t *, zprop_list_t **, boolean_t); extern void zfs_prune_proplist(zfs_handle_t *, uint8_t *); #define ZFS_MOUNTPOINT_NONE "none" #define ZFS_MOUNTPOINT_LEGACY "legacy" /* * zpool property management */ extern int zpool_expand_proplist(zpool_handle_t *, zprop_list_t **); extern const char *zpool_prop_default_string(zpool_prop_t); extern uint64_t zpool_prop_default_numeric(zpool_prop_t); extern const char *zpool_prop_column_name(zpool_prop_t); extern boolean_t zpool_prop_align_right(zpool_prop_t); /* * Functions shared by zfs and zpool property management. */ extern int zprop_iter(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered, zfs_type_t type); extern int zprop_get_list(libzfs_handle_t *, char *, zprop_list_t **, zfs_type_t); extern void zprop_free_list(zprop_list_t *); #define ZFS_GET_NCOLS 5 typedef enum { GET_COL_NONE, GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE, GET_COL_RECVD, GET_COL_SOURCE } zfs_get_column_t; /* * Functions for printing zfs or zpool properties */ typedef struct zprop_get_cbdata { int cb_sources; zfs_get_column_t cb_columns[ZFS_GET_NCOLS]; int cb_colwidths[ZFS_GET_NCOLS + 1]; boolean_t cb_scripted; boolean_t cb_literal; boolean_t cb_first; zprop_list_t *cb_proplist; zfs_type_t cb_type; } zprop_get_cbdata_t; void zprop_print_one_property(const char *, zprop_get_cbdata_t *, const char *, const char *, zprop_source_t, const char *, const char *); /* * Iterator functions. */ typedef int (*zfs_iter_f)(zfs_handle_t *, void *); extern int zfs_iter_root(libzfs_handle_t *, zfs_iter_f, void *); extern int zfs_iter_children(zfs_handle_t *, zfs_iter_f, void *); extern int zfs_iter_dependents(zfs_handle_t *, boolean_t, zfs_iter_f, void *); extern int zfs_iter_filesystems(zfs_handle_t *, zfs_iter_f, void *); extern int zfs_iter_snapshots(zfs_handle_t *, boolean_t, zfs_iter_f, void *); extern int zfs_iter_snapshots_sorted(zfs_handle_t *, zfs_iter_f, void *); extern int zfs_iter_snapspec(zfs_handle_t *, const char *, zfs_iter_f, void *); typedef struct get_all_cb { zfs_handle_t **cb_handles; size_t cb_alloc; size_t cb_used; boolean_t cb_verbose; int (*cb_getone)(zfs_handle_t *, void *); } get_all_cb_t; void libzfs_add_handle(get_all_cb_t *, zfs_handle_t *); int libzfs_dataset_cmp(const void *, const void *); /* * Functions to create and destroy datasets. */ extern int zfs_create(libzfs_handle_t *, const char *, zfs_type_t, nvlist_t *); extern int zfs_create_ancestors(libzfs_handle_t *, const char *); extern int zfs_destroy(zfs_handle_t *, boolean_t); extern int zfs_destroy_snaps(zfs_handle_t *, char *, boolean_t); extern int zfs_destroy_snaps_nvl(zfs_handle_t *, nvlist_t *, boolean_t); extern int zfs_clone(zfs_handle_t *, const char *, nvlist_t *); extern int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t, nvlist_t *); extern int zfs_rollback(zfs_handle_t *, zfs_handle_t *, boolean_t); typedef struct renameflags { /* recursive rename */ int recurse : 1; /* don't unmount file systems */ int nounmount : 1; + + /* force unmount file systems */ + int forceunmount : 1; } renameflags_t; extern int zfs_rename(zfs_handle_t *, const char *, renameflags_t flags); typedef struct sendflags { /* print informational messages (ie, -v was specified) */ boolean_t verbose; /* recursive send (ie, -R) */ boolean_t replicate; /* for incrementals, do all intermediate snapshots */ boolean_t doall; /* if dataset is a clone, do incremental from its origin */ boolean_t fromorigin; /* do deduplication */ boolean_t dedup; /* send properties (ie, -p) */ boolean_t props; /* do not send (no-op, ie. -n) */ boolean_t dryrun; /* parsable verbose output (ie. -P) */ boolean_t parsable; + + /* show progress (ie. -v) */ + boolean_t progress; } sendflags_t; typedef boolean_t (snapfilter_cb_t)(zfs_handle_t *, void *); extern int zfs_send(zfs_handle_t *, const char *, const char *, sendflags_t *, int, snapfilter_cb_t, void *, nvlist_t **); extern int zfs_promote(zfs_handle_t *); extern int zfs_hold(zfs_handle_t *, const char *, const char *, boolean_t, boolean_t, boolean_t, int, uint64_t, uint64_t); extern int zfs_release(zfs_handle_t *, const char *, const char *, boolean_t); extern int zfs_get_holds(zfs_handle_t *, nvlist_t **); extern uint64_t zvol_volsize_to_reservation(uint64_t, nvlist_t *); typedef int (*zfs_userspace_cb_t)(void *arg, const char *domain, uid_t rid, uint64_t space); extern int zfs_userspace(zfs_handle_t *, zfs_userquota_prop_t, zfs_userspace_cb_t, void *); extern int zfs_get_fsacl(zfs_handle_t *, nvlist_t **); extern int zfs_set_fsacl(zfs_handle_t *, boolean_t, nvlist_t *); typedef struct recvflags { /* print informational messages (ie, -v was specified) */ boolean_t verbose; /* the destination is a prefix, not the exact fs (ie, -d) */ boolean_t isprefix; /* * Only the tail of the sent snapshot path is appended to the * destination to determine the received snapshot name (ie, -e). */ boolean_t istail; /* do not actually do the recv, just check if it would work (ie, -n) */ boolean_t dryrun; /* rollback/destroy filesystems as necessary (eg, -F) */ boolean_t force; /* set "canmount=off" on all modified filesystems */ boolean_t canmountoff; /* byteswap flag is used internally; callers need not specify */ boolean_t byteswap; /* do not mount file systems as they are extracted (private) */ boolean_t nomount; } recvflags_t; extern int zfs_receive(libzfs_handle_t *, const char *, recvflags_t *, int, avl_tree_t *); typedef enum diff_flags { ZFS_DIFF_PARSEABLE = 0x1, ZFS_DIFF_TIMESTAMP = 0x2, ZFS_DIFF_CLASSIFY = 0x4 } diff_flags_t; extern int zfs_show_diffs(zfs_handle_t *, int, const char *, const char *, int); /* * Miscellaneous functions. */ extern const char *zfs_type_to_name(zfs_type_t); extern void zfs_refresh_properties(zfs_handle_t *); extern int zfs_name_valid(const char *, zfs_type_t); extern zfs_handle_t *zfs_path_to_zhandle(libzfs_handle_t *, char *, zfs_type_t); extern boolean_t zfs_dataset_exists(libzfs_handle_t *, const char *, zfs_type_t); extern int zfs_spa_version(zfs_handle_t *, int *); /* * Mount support functions. */ extern boolean_t is_mounted(libzfs_handle_t *, const char *special, char **); extern boolean_t zfs_is_mounted(zfs_handle_t *, char **); extern int zfs_mount(zfs_handle_t *, const char *, int); extern int zfs_unmount(zfs_handle_t *, const char *, int); extern int zfs_unmountall(zfs_handle_t *, int); /* * Share support functions. */ extern boolean_t zfs_is_shared(zfs_handle_t *); extern int zfs_share(zfs_handle_t *); extern int zfs_unshare(zfs_handle_t *); /* * Protocol-specific share support functions. */ extern boolean_t zfs_is_shared_nfs(zfs_handle_t *, char **); extern boolean_t zfs_is_shared_smb(zfs_handle_t *, char **); extern int zfs_share_nfs(zfs_handle_t *); extern int zfs_share_smb(zfs_handle_t *); extern int zfs_shareall(zfs_handle_t *); extern int zfs_unshare_nfs(zfs_handle_t *, const char *); extern int zfs_unshare_smb(zfs_handle_t *, const char *); extern int zfs_unshareall_nfs(zfs_handle_t *); extern int zfs_unshareall_smb(zfs_handle_t *); extern int zfs_unshareall_bypath(zfs_handle_t *, const char *); extern int zfs_unshareall(zfs_handle_t *); extern int zfs_deleg_share_nfs(libzfs_handle_t *, char *, char *, char *, void *, void *, int, zfs_share_op_t); /* * FreeBSD-specific jail support function. */ extern int zfs_jail(zfs_handle_t *, int, int); /* * When dealing with nvlists, verify() is extremely useful */ #ifndef verify #ifdef NDEBUG #define verify(EX) ((void)(EX)) #else #define verify(EX) assert(EX) #endif #endif /* * Utility function to convert a number to a human-readable form. */ extern void zfs_nicenum(uint64_t, char *, size_t); extern int zfs_nicestrtonum(libzfs_handle_t *, const char *, uint64_t *); /* * Given a device or file, determine if it is part of a pool. */ extern int zpool_in_use(libzfs_handle_t *, int, pool_state_t *, char **, boolean_t *); /* * Label manipulation. */ extern int zpool_read_label(int, nvlist_t **); extern int zpool_clear_label(int); /* is this zvol valid for use as a dump device? */ extern int zvol_check_dump_config(char *); /* * Management interfaces for SMB ACL files */ int zfs_smb_acl_add(libzfs_handle_t *, char *, char *, char *); int zfs_smb_acl_remove(libzfs_handle_t *, char *, char *, char *); int zfs_smb_acl_purge(libzfs_handle_t *, char *, char *); int zfs_smb_acl_rename(libzfs_handle_t *, char *, char *, char *, char *); /* * Enable and disable datasets within a pool by mounting/unmounting and * sharing/unsharing them. */ extern int zpool_enable_datasets(zpool_handle_t *, const char *, int); extern int zpool_disable_datasets(zpool_handle_t *, boolean_t); /* * Mappings between vdev and FRU. */ extern void libzfs_fru_refresh(libzfs_handle_t *); extern const char *libzfs_fru_lookup(libzfs_handle_t *, const char *); extern const char *libzfs_fru_devpath(libzfs_handle_t *, const char *); extern boolean_t libzfs_fru_compare(libzfs_handle_t *, const char *, const char *); extern boolean_t libzfs_fru_notself(libzfs_handle_t *, const char *); extern int zpool_fru_set(zpool_handle_t *, uint64_t, const char *); #ifndef sun extern int zmount(const char *, const char *, int, char *, char *, int, char *, int); #endif /* !sun */ #ifdef __cplusplus } #endif #endif /* _LIBZFS_H */ Index: projects/nand/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c =================================================================== --- projects/nand/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c (revision 235262) +++ projects/nand/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c (revision 235263) @@ -1,4456 +1,4458 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright 2010 Nexenta Systems, Inc. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved. * Copyright (c) 2011-2012 Pawel Jakub Dawidek . * All rights reserved. + * Copyright (c) 2012 Martin Matuska . All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "zfs_namecheck.h" #include "zfs_prop.h" #include "libzfs_impl.h" #include "zfs_deleg.h" static int userquota_propname_decode(const char *propname, boolean_t zoned, zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp); /* * Given a single type (not a mask of types), return the type in a human * readable form. */ const char * zfs_type_to_name(zfs_type_t type) { switch (type) { case ZFS_TYPE_FILESYSTEM: return (dgettext(TEXT_DOMAIN, "filesystem")); case ZFS_TYPE_SNAPSHOT: return (dgettext(TEXT_DOMAIN, "snapshot")); case ZFS_TYPE_VOLUME: return (dgettext(TEXT_DOMAIN, "volume")); } return (NULL); } /* * Given a path and mask of ZFS types, return a string describing this dataset. * This is used when we fail to open a dataset and we cannot get an exact type. * We guess what the type would have been based on the path and the mask of * acceptable types. */ static const char * path_to_str(const char *path, int types) { /* * When given a single type, always report the exact type. */ if (types == ZFS_TYPE_SNAPSHOT) return (dgettext(TEXT_DOMAIN, "snapshot")); if (types == ZFS_TYPE_FILESYSTEM) return (dgettext(TEXT_DOMAIN, "filesystem")); if (types == ZFS_TYPE_VOLUME) return (dgettext(TEXT_DOMAIN, "volume")); /* * The user is requesting more than one type of dataset. If this is the * case, consult the path itself. If we're looking for a snapshot, and * a '@' is found, then report it as "snapshot". Otherwise, remove the * snapshot attribute and try again. */ if (types & ZFS_TYPE_SNAPSHOT) { if (strchr(path, '@') != NULL) return (dgettext(TEXT_DOMAIN, "snapshot")); return (path_to_str(path, types & ~ZFS_TYPE_SNAPSHOT)); } /* * The user has requested either filesystems or volumes. * We have no way of knowing a priori what type this would be, so always * report it as "filesystem" or "volume", our two primitive types. */ if (types & ZFS_TYPE_FILESYSTEM) return (dgettext(TEXT_DOMAIN, "filesystem")); assert(types & ZFS_TYPE_VOLUME); return (dgettext(TEXT_DOMAIN, "volume")); } /* * Validate a ZFS path. This is used even before trying to open the dataset, to * provide a more meaningful error message. We call zfs_error_aux() to * explain exactly why the name was not valid. */ int zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type, boolean_t modifying) { namecheck_err_t why; char what; (void) zfs_prop_get_table(); if (dataset_namecheck(path, &why, &what) != 0) { if (hdl != NULL) { switch (why) { case NAME_ERR_TOOLONG: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "name is too long")); break; case NAME_ERR_LEADING_SLASH: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "leading slash in name")); break; case NAME_ERR_EMPTY_COMPONENT: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "empty component in name")); break; case NAME_ERR_TRAILING_SLASH: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "trailing slash in name")); break; case NAME_ERR_INVALCHAR: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid character " "'%c' in name"), what); break; case NAME_ERR_MULTIPLE_AT: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "multiple '@' delimiters in name")); break; case NAME_ERR_NOLETTER: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool doesn't begin with a letter")); break; case NAME_ERR_RESERVED: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "name is reserved")); break; case NAME_ERR_DISKLIKE: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "reserved disk name")); break; } } return (0); } if (!(type & ZFS_TYPE_SNAPSHOT) && strchr(path, '@') != NULL) { if (hdl != NULL) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "snapshot delimiter '@' in filesystem name")); return (0); } if (type == ZFS_TYPE_SNAPSHOT && strchr(path, '@') == NULL) { if (hdl != NULL) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "missing '@' delimiter in snapshot name")); return (0); } if (modifying && strchr(path, '%') != NULL) { if (hdl != NULL) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid character %c in name"), '%'); return (0); } return (-1); } int zfs_name_valid(const char *name, zfs_type_t type) { if (type == ZFS_TYPE_POOL) return (zpool_name_valid(NULL, B_FALSE, name)); return (zfs_validate_name(NULL, name, type, B_FALSE)); } /* * This function takes the raw DSL properties, and filters out the user-defined * properties into a separate nvlist. */ static nvlist_t * process_user_props(zfs_handle_t *zhp, nvlist_t *props) { libzfs_handle_t *hdl = zhp->zfs_hdl; nvpair_t *elem; nvlist_t *propval; nvlist_t *nvl; if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) { (void) no_memory(hdl); return (NULL); } elem = NULL; while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { if (!zfs_prop_user(nvpair_name(elem))) continue; verify(nvpair_value_nvlist(elem, &propval) == 0); if (nvlist_add_nvlist(nvl, nvpair_name(elem), propval) != 0) { nvlist_free(nvl); (void) no_memory(hdl); return (NULL); } } return (nvl); } static zpool_handle_t * zpool_add_handle(zfs_handle_t *zhp, const char *pool_name) { libzfs_handle_t *hdl = zhp->zfs_hdl; zpool_handle_t *zph; if ((zph = zpool_open_canfail(hdl, pool_name)) != NULL) { if (hdl->libzfs_pool_handles != NULL) zph->zpool_next = hdl->libzfs_pool_handles; hdl->libzfs_pool_handles = zph; } return (zph); } static zpool_handle_t * zpool_find_handle(zfs_handle_t *zhp, const char *pool_name, int len) { libzfs_handle_t *hdl = zhp->zfs_hdl; zpool_handle_t *zph = hdl->libzfs_pool_handles; while ((zph != NULL) && (strncmp(pool_name, zpool_get_name(zph), len) != 0)) zph = zph->zpool_next; return (zph); } /* * Returns a handle to the pool that contains the provided dataset. * If a handle to that pool already exists then that handle is returned. * Otherwise, a new handle is created and added to the list of handles. */ static zpool_handle_t * zpool_handle(zfs_handle_t *zhp) { char *pool_name; int len; zpool_handle_t *zph; len = strcspn(zhp->zfs_name, "/@") + 1; pool_name = zfs_alloc(zhp->zfs_hdl, len); (void) strlcpy(pool_name, zhp->zfs_name, len); zph = zpool_find_handle(zhp, pool_name, len); if (zph == NULL) zph = zpool_add_handle(zhp, pool_name); free(pool_name); return (zph); } void zpool_free_handles(libzfs_handle_t *hdl) { zpool_handle_t *next, *zph = hdl->libzfs_pool_handles; while (zph != NULL) { next = zph->zpool_next; zpool_close(zph); zph = next; } hdl->libzfs_pool_handles = NULL; } /* * Utility function to gather stats (objset and zpl) for the given object. */ static int get_stats_ioctl(zfs_handle_t *zhp, zfs_cmd_t *zc) { libzfs_handle_t *hdl = zhp->zfs_hdl; (void) strlcpy(zc->zc_name, zhp->zfs_name, sizeof (zc->zc_name)); while (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, zc) != 0) { if (errno == ENOMEM) { if (zcmd_expand_dst_nvlist(hdl, zc) != 0) { return (-1); } } else { return (-1); } } return (0); } /* * Utility function to get the received properties of the given object. */ static int get_recvd_props_ioctl(zfs_handle_t *zhp) { libzfs_handle_t *hdl = zhp->zfs_hdl; nvlist_t *recvdprops; zfs_cmd_t zc = { 0 }; int err; if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) return (-1); (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); while (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_RECVD_PROPS, &zc) != 0) { if (errno == ENOMEM) { if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { return (-1); } } else { zcmd_free_nvlists(&zc); return (-1); } } err = zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &recvdprops); zcmd_free_nvlists(&zc); if (err != 0) return (-1); nvlist_free(zhp->zfs_recvd_props); zhp->zfs_recvd_props = recvdprops; return (0); } static int put_stats_zhdl(zfs_handle_t *zhp, zfs_cmd_t *zc) { nvlist_t *allprops, *userprops; zhp->zfs_dmustats = zc->zc_objset_stats; /* structure assignment */ if (zcmd_read_dst_nvlist(zhp->zfs_hdl, zc, &allprops) != 0) { return (-1); } /* * XXX Why do we store the user props separately, in addition to * storing them in zfs_props? */ if ((userprops = process_user_props(zhp, allprops)) == NULL) { nvlist_free(allprops); return (-1); } nvlist_free(zhp->zfs_props); nvlist_free(zhp->zfs_user_props); zhp->zfs_props = allprops; zhp->zfs_user_props = userprops; return (0); } static int get_stats(zfs_handle_t *zhp) { int rc = 0; zfs_cmd_t zc = { 0 }; if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0) return (-1); if (get_stats_ioctl(zhp, &zc) != 0) rc = -1; else if (put_stats_zhdl(zhp, &zc) != 0) rc = -1; zcmd_free_nvlists(&zc); return (rc); } /* * Refresh the properties currently stored in the handle. */ void zfs_refresh_properties(zfs_handle_t *zhp) { (void) get_stats(zhp); } /* * Makes a handle from the given dataset name. Used by zfs_open() and * zfs_iter_* to create child handles on the fly. */ static int make_dataset_handle_common(zfs_handle_t *zhp, zfs_cmd_t *zc) { if (put_stats_zhdl(zhp, zc) != 0) return (-1); /* * We've managed to open the dataset and gather statistics. Determine * the high-level type. */ if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL) zhp->zfs_head_type = ZFS_TYPE_VOLUME; else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS) zhp->zfs_head_type = ZFS_TYPE_FILESYSTEM; else abort(); if (zhp->zfs_dmustats.dds_is_snapshot) zhp->zfs_type = ZFS_TYPE_SNAPSHOT; else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL) zhp->zfs_type = ZFS_TYPE_VOLUME; else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS) zhp->zfs_type = ZFS_TYPE_FILESYSTEM; else abort(); /* we should never see any other types */ if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL) return (-1); return (0); } zfs_handle_t * make_dataset_handle(libzfs_handle_t *hdl, const char *path) { zfs_cmd_t zc = { 0 }; zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1); if (zhp == NULL) return (NULL); zhp->zfs_hdl = hdl; (void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name)); if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) { free(zhp); return (NULL); } if (get_stats_ioctl(zhp, &zc) == -1) { zcmd_free_nvlists(&zc); free(zhp); return (NULL); } if (make_dataset_handle_common(zhp, &zc) == -1) { free(zhp); zhp = NULL; } zcmd_free_nvlists(&zc); return (zhp); } zfs_handle_t * make_dataset_handle_zc(libzfs_handle_t *hdl, zfs_cmd_t *zc) { zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1); if (zhp == NULL) return (NULL); zhp->zfs_hdl = hdl; (void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name)); if (make_dataset_handle_common(zhp, zc) == -1) { free(zhp); return (NULL); } return (zhp); } zfs_handle_t * make_dataset_simple_handle_zc(zfs_handle_t *pzhp, zfs_cmd_t *zc) { zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1); if (zhp == NULL) return (NULL); zhp->zfs_hdl = pzhp->zfs_hdl; (void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name)); zhp->zfs_head_type = pzhp->zfs_type; zhp->zfs_type = ZFS_TYPE_SNAPSHOT; zhp->zpool_hdl = zpool_handle(zhp); return (zhp); } zfs_handle_t * zfs_handle_dup(zfs_handle_t *zhp_orig) { zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1); if (zhp == NULL) return (NULL); zhp->zfs_hdl = zhp_orig->zfs_hdl; zhp->zpool_hdl = zhp_orig->zpool_hdl; (void) strlcpy(zhp->zfs_name, zhp_orig->zfs_name, sizeof (zhp->zfs_name)); zhp->zfs_type = zhp_orig->zfs_type; zhp->zfs_head_type = zhp_orig->zfs_head_type; zhp->zfs_dmustats = zhp_orig->zfs_dmustats; if (zhp_orig->zfs_props != NULL) { if (nvlist_dup(zhp_orig->zfs_props, &zhp->zfs_props, 0) != 0) { (void) no_memory(zhp->zfs_hdl); zfs_close(zhp); return (NULL); } } if (zhp_orig->zfs_user_props != NULL) { if (nvlist_dup(zhp_orig->zfs_user_props, &zhp->zfs_user_props, 0) != 0) { (void) no_memory(zhp->zfs_hdl); zfs_close(zhp); return (NULL); } } if (zhp_orig->zfs_recvd_props != NULL) { if (nvlist_dup(zhp_orig->zfs_recvd_props, &zhp->zfs_recvd_props, 0)) { (void) no_memory(zhp->zfs_hdl); zfs_close(zhp); return (NULL); } } zhp->zfs_mntcheck = zhp_orig->zfs_mntcheck; if (zhp_orig->zfs_mntopts != NULL) { zhp->zfs_mntopts = zfs_strdup(zhp_orig->zfs_hdl, zhp_orig->zfs_mntopts); } zhp->zfs_props_table = zhp_orig->zfs_props_table; return (zhp); } /* * Opens the given snapshot, filesystem, or volume. The 'types' * argument is a mask of acceptable types. The function will print an * appropriate error message and return NULL if it can't be opened. */ zfs_handle_t * zfs_open(libzfs_handle_t *hdl, const char *path, int types) { zfs_handle_t *zhp; char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot open '%s'"), path); /* * Validate the name before we even try to open it. */ if (!zfs_validate_name(hdl, path, ZFS_TYPE_DATASET, B_FALSE)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid dataset name")); (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); return (NULL); } /* * Try to get stats for the dataset, which will tell us if it exists. */ errno = 0; if ((zhp = make_dataset_handle(hdl, path)) == NULL) { (void) zfs_standard_error(hdl, errno, errbuf); return (NULL); } if (!(types & zhp->zfs_type)) { (void) zfs_error(hdl, EZFS_BADTYPE, errbuf); zfs_close(zhp); return (NULL); } return (zhp); } /* * Release a ZFS handle. Nothing to do but free the associated memory. */ void zfs_close(zfs_handle_t *zhp) { if (zhp->zfs_mntopts) free(zhp->zfs_mntopts); nvlist_free(zhp->zfs_props); nvlist_free(zhp->zfs_user_props); nvlist_free(zhp->zfs_recvd_props); free(zhp); } typedef struct mnttab_node { struct mnttab mtn_mt; avl_node_t mtn_node; } mnttab_node_t; static int libzfs_mnttab_cache_compare(const void *arg1, const void *arg2) { const mnttab_node_t *mtn1 = arg1; const mnttab_node_t *mtn2 = arg2; int rv; rv = strcmp(mtn1->mtn_mt.mnt_special, mtn2->mtn_mt.mnt_special); if (rv == 0) return (0); return (rv > 0 ? 1 : -1); } void libzfs_mnttab_init(libzfs_handle_t *hdl) { assert(avl_numnodes(&hdl->libzfs_mnttab_cache) == 0); avl_create(&hdl->libzfs_mnttab_cache, libzfs_mnttab_cache_compare, sizeof (mnttab_node_t), offsetof(mnttab_node_t, mtn_node)); } void libzfs_mnttab_update(libzfs_handle_t *hdl) { struct mnttab entry; rewind(hdl->libzfs_mnttab); while (getmntent(hdl->libzfs_mnttab, &entry) == 0) { mnttab_node_t *mtn; if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) continue; mtn = zfs_alloc(hdl, sizeof (mnttab_node_t)); mtn->mtn_mt.mnt_special = zfs_strdup(hdl, entry.mnt_special); mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, entry.mnt_mountp); mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, entry.mnt_fstype); mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, entry.mnt_mntopts); avl_add(&hdl->libzfs_mnttab_cache, mtn); } } void libzfs_mnttab_fini(libzfs_handle_t *hdl) { void *cookie = NULL; mnttab_node_t *mtn; while (mtn = avl_destroy_nodes(&hdl->libzfs_mnttab_cache, &cookie)) { free(mtn->mtn_mt.mnt_special); free(mtn->mtn_mt.mnt_mountp); free(mtn->mtn_mt.mnt_fstype); free(mtn->mtn_mt.mnt_mntopts); free(mtn); } avl_destroy(&hdl->libzfs_mnttab_cache); } void libzfs_mnttab_cache(libzfs_handle_t *hdl, boolean_t enable) { hdl->libzfs_mnttab_enable = enable; } int libzfs_mnttab_find(libzfs_handle_t *hdl, const char *fsname, struct mnttab *entry) { mnttab_node_t find; mnttab_node_t *mtn; if (!hdl->libzfs_mnttab_enable) { struct mnttab srch = { 0 }; if (avl_numnodes(&hdl->libzfs_mnttab_cache)) libzfs_mnttab_fini(hdl); rewind(hdl->libzfs_mnttab); srch.mnt_special = (char *)fsname; srch.mnt_fstype = MNTTYPE_ZFS; if (getmntany(hdl->libzfs_mnttab, entry, &srch) == 0) return (0); else return (ENOENT); } if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0) libzfs_mnttab_update(hdl); find.mtn_mt.mnt_special = (char *)fsname; mtn = avl_find(&hdl->libzfs_mnttab_cache, &find, NULL); if (mtn) { *entry = mtn->mtn_mt; return (0); } return (ENOENT); } void libzfs_mnttab_add(libzfs_handle_t *hdl, const char *special, const char *mountp, const char *mntopts) { mnttab_node_t *mtn; if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0) return; mtn = zfs_alloc(hdl, sizeof (mnttab_node_t)); mtn->mtn_mt.mnt_special = zfs_strdup(hdl, special); mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, mountp); mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, MNTTYPE_ZFS); mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, mntopts); avl_add(&hdl->libzfs_mnttab_cache, mtn); } void libzfs_mnttab_remove(libzfs_handle_t *hdl, const char *fsname) { mnttab_node_t find; mnttab_node_t *ret; find.mtn_mt.mnt_special = (char *)fsname; if (ret = avl_find(&hdl->libzfs_mnttab_cache, (void *)&find, NULL)) { avl_remove(&hdl->libzfs_mnttab_cache, ret); free(ret->mtn_mt.mnt_special); free(ret->mtn_mt.mnt_mountp); free(ret->mtn_mt.mnt_fstype); free(ret->mtn_mt.mnt_mntopts); free(ret); } } int zfs_spa_version(zfs_handle_t *zhp, int *spa_version) { zpool_handle_t *zpool_handle = zhp->zpool_hdl; if (zpool_handle == NULL) return (-1); *spa_version = zpool_get_prop_int(zpool_handle, ZPOOL_PROP_VERSION, NULL); return (0); } /* * The choice of reservation property depends on the SPA version. */ static int zfs_which_resv_prop(zfs_handle_t *zhp, zfs_prop_t *resv_prop) { int spa_version; if (zfs_spa_version(zhp, &spa_version) < 0) return (-1); if (spa_version >= SPA_VERSION_REFRESERVATION) *resv_prop = ZFS_PROP_REFRESERVATION; else *resv_prop = ZFS_PROP_RESERVATION; return (0); } /* * Given an nvlist of properties to set, validates that they are correct, and * parses any numeric properties (index, boolean, etc) if they are specified as * strings. */ nvlist_t * zfs_valid_proplist(libzfs_handle_t *hdl, zfs_type_t type, nvlist_t *nvl, uint64_t zoned, zfs_handle_t *zhp, const char *errbuf) { nvpair_t *elem; uint64_t intval; char *strval; zfs_prop_t prop; nvlist_t *ret; int chosen_normal = -1; int chosen_utf = -1; if (nvlist_alloc(&ret, NV_UNIQUE_NAME, 0) != 0) { (void) no_memory(hdl); return (NULL); } /* * Make sure this property is valid and applies to this type. */ elem = NULL; while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) { const char *propname = nvpair_name(elem); prop = zfs_name_to_prop(propname); if (prop == ZPROP_INVAL && zfs_prop_user(propname)) { /* * This is a user property: make sure it's a * string, and that it's less than ZAP_MAXNAMELEN. */ if (nvpair_type(elem) != DATA_TYPE_STRING) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be a string"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property name '%s' is too long"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } (void) nvpair_value_string(elem, &strval); if (nvlist_add_string(ret, propname, strval) != 0) { (void) no_memory(hdl); goto error; } continue; } /* * Currently, only user properties can be modified on * snapshots. */ if (type == ZFS_TYPE_SNAPSHOT) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "this property can not be modified for snapshots")); (void) zfs_error(hdl, EZFS_PROPTYPE, errbuf); goto error; } if (prop == ZPROP_INVAL && zfs_prop_userquota(propname)) { zfs_userquota_prop_t uqtype; char newpropname[128]; char domain[128]; uint64_t rid; uint64_t valary[3]; if (userquota_propname_decode(propname, zoned, &uqtype, domain, sizeof (domain), &rid) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' has an invalid user/group name"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (uqtype != ZFS_PROP_USERQUOTA && uqtype != ZFS_PROP_GROUPQUOTA) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' is readonly"), propname); (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); goto error; } if (nvpair_type(elem) == DATA_TYPE_STRING) { (void) nvpair_value_string(elem, &strval); if (strcmp(strval, "none") == 0) { intval = 0; } else if (zfs_nicestrtonum(hdl, strval, &intval) != 0) { (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { (void) nvpair_value_uint64(elem, &intval); if (intval == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "use 'none' to disable " "userquota/groupquota")); goto error; } } else { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be a number"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } /* * Encode the prop name as * userquota@-domain, to make it easy * for the kernel to decode. */ (void) snprintf(newpropname, sizeof (newpropname), "%s%llx-%s", zfs_userquota_prop_prefixes[uqtype], (longlong_t)rid, domain); valary[0] = uqtype; valary[1] = rid; valary[2] = intval; if (nvlist_add_uint64_array(ret, newpropname, valary, 3) != 0) { (void) no_memory(hdl); goto error; } continue; } else if (prop == ZPROP_INVAL && zfs_prop_written(propname)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' is readonly"), propname); (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); goto error; } if (prop == ZPROP_INVAL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid property '%s'"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (!zfs_prop_valid_for_type(prop, type)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' does not " "apply to datasets of this type"), propname); (void) zfs_error(hdl, EZFS_PROPTYPE, errbuf); goto error; } if (zfs_prop_readonly(prop) && (!zfs_prop_setonce(prop) || zhp != NULL)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' is readonly"), propname); (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); goto error; } if (zprop_parse_value(hdl, elem, prop, type, ret, &strval, &intval, errbuf) != 0) goto error; /* * Perform some additional checks for specific properties. */ switch (prop) { case ZFS_PROP_VERSION: { int version; if (zhp == NULL) break; version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION); if (intval < version) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Can not downgrade; already at version %u"), version); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } break; } case ZFS_PROP_RECORDSIZE: case ZFS_PROP_VOLBLOCKSIZE: /* must be power of two within SPA_{MIN,MAX}BLOCKSIZE */ if (intval < SPA_MINBLOCKSIZE || intval > SPA_MAXBLOCKSIZE || !ISP2(intval)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be power of 2 from %u " "to %uk"), propname, (uint_t)SPA_MINBLOCKSIZE, (uint_t)SPA_MAXBLOCKSIZE >> 10); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } break; case ZFS_PROP_MLSLABEL: { #ifdef sun /* * Verify the mlslabel string and convert to * internal hex label string. */ m_label_t *new_sl; char *hex = NULL; /* internal label string */ /* Default value is already OK. */ if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0) break; /* Verify the label can be converted to binary form */ if (((new_sl = m_label_alloc(MAC_LABEL)) == NULL) || (str_to_label(strval, &new_sl, MAC_LABEL, L_NO_CORRECTION, NULL) == -1)) { goto badlabel; } /* Now translate to hex internal label string */ if (label_to_str(new_sl, &hex, M_INTERNAL, DEF_NAMES) != 0) { if (hex) free(hex); goto badlabel; } m_label_free(new_sl); /* If string is already in internal form, we're done. */ if (strcmp(strval, hex) == 0) { free(hex); break; } /* Replace the label string with the internal form. */ (void) nvlist_remove(ret, zfs_prop_to_name(prop), DATA_TYPE_STRING); verify(nvlist_add_string(ret, zfs_prop_to_name(prop), hex) == 0); free(hex); break; badlabel: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid mlslabel '%s'"), strval); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); m_label_free(new_sl); /* OK if null */ #else /* !sun */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "mlslabel is not supported on FreeBSD")); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); #endif /* !sun */ goto error; } case ZFS_PROP_MOUNTPOINT: { namecheck_err_t why; if (strcmp(strval, ZFS_MOUNTPOINT_NONE) == 0 || strcmp(strval, ZFS_MOUNTPOINT_LEGACY) == 0) break; if (mountpoint_namecheck(strval, &why)) { switch (why) { case NAME_ERR_LEADING_SLASH: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be an absolute path, " "'none', or 'legacy'"), propname); break; case NAME_ERR_TOOLONG: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "component of '%s' is too long"), propname); break; } (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } } /*FALLTHRU*/ case ZFS_PROP_SHARESMB: case ZFS_PROP_SHARENFS: /* * For the mountpoint and sharenfs or sharesmb * properties, check if it can be set in a * global/non-global zone based on * the zoned property value: * * global zone non-global zone * -------------------------------------------------- * zoned=on mountpoint (no) mountpoint (yes) * sharenfs (no) sharenfs (no) * sharesmb (no) sharesmb (no) * * zoned=off mountpoint (yes) N/A * sharenfs (yes) * sharesmb (yes) */ if (zoned) { if (getzoneid() == GLOBAL_ZONEID) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' cannot be set on " "dataset in a non-global zone"), propname); (void) zfs_error(hdl, EZFS_ZONED, errbuf); goto error; } else if (prop == ZFS_PROP_SHARENFS || prop == ZFS_PROP_SHARESMB) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' cannot be set in " "a non-global zone"), propname); (void) zfs_error(hdl, EZFS_ZONED, errbuf); goto error; } } else if (getzoneid() != GLOBAL_ZONEID) { /* * If zoned property is 'off', this must be in * a global zone. If not, something is wrong. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' cannot be set while dataset " "'zoned' property is set"), propname); (void) zfs_error(hdl, EZFS_ZONED, errbuf); goto error; } /* * At this point, it is legitimate to set the * property. Now we want to make sure that the * property value is valid if it is sharenfs. */ if ((prop == ZFS_PROP_SHARENFS || prop == ZFS_PROP_SHARESMB) && strcmp(strval, "on") != 0 && strcmp(strval, "off") != 0) { zfs_share_proto_t proto; if (prop == ZFS_PROP_SHARESMB) proto = PROTO_SMB; else proto = PROTO_NFS; /* * Must be an valid sharing protocol * option string so init the libshare * in order to enable the parser and * then parse the options. We use the * control API since we don't care about * the current configuration and don't * want the overhead of loading it * until we actually do something. */ if (zfs_init_libshare(hdl, SA_INIT_CONTROL_API) != SA_OK) { /* * An error occurred so we can't do * anything */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' cannot be set: problem " "in share initialization"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (zfs_parse_options(strval, proto) != SA_OK) { /* * There was an error in parsing so * deal with it by issuing an error * message and leaving after * uninitializing the the libshare * interface. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' cannot be set to invalid " "options"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); zfs_uninit_libshare(hdl); goto error; } zfs_uninit_libshare(hdl); } break; case ZFS_PROP_UTF8ONLY: chosen_utf = (int)intval; break; case ZFS_PROP_NORMALIZE: chosen_normal = (int)intval; break; } /* * For changes to existing volumes, we have some additional * checks to enforce. */ if (type == ZFS_TYPE_VOLUME && zhp != NULL) { uint64_t volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE); uint64_t blocksize = zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE); char buf[64]; switch (prop) { case ZFS_PROP_RESERVATION: case ZFS_PROP_REFRESERVATION: if (intval > volsize) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' is greater than current " "volume size"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } break; case ZFS_PROP_VOLSIZE: if (intval % blocksize != 0) { zfs_nicenum(blocksize, buf, sizeof (buf)); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be a multiple of " "volume block size (%s)"), propname, buf); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (intval == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' cannot be zero"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } break; } } } /* * If normalization was chosen, but no UTF8 choice was made, * enforce rejection of non-UTF8 names. * * If normalization was chosen, but rejecting non-UTF8 names * was explicitly not chosen, it is an error. */ if (chosen_normal > 0 && chosen_utf < 0) { if (nvlist_add_uint64(ret, zfs_prop_to_name(ZFS_PROP_UTF8ONLY), 1) != 0) { (void) no_memory(hdl); goto error; } } else if (chosen_normal > 0 && chosen_utf == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' must be set 'on' if normalization chosen"), zfs_prop_to_name(ZFS_PROP_UTF8ONLY)); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } return (ret); error: nvlist_free(ret); return (NULL); } int zfs_add_synthetic_resv(zfs_handle_t *zhp, nvlist_t *nvl) { uint64_t old_volsize; uint64_t new_volsize; uint64_t old_reservation; uint64_t new_reservation; zfs_prop_t resv_prop; /* * If this is an existing volume, and someone is setting the volsize, * make sure that it matches the reservation, or add it if necessary. */ old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE); if (zfs_which_resv_prop(zhp, &resv_prop) < 0) return (-1); old_reservation = zfs_prop_get_int(zhp, resv_prop); if ((zvol_volsize_to_reservation(old_volsize, zhp->zfs_props) != old_reservation) || nvlist_lookup_uint64(nvl, zfs_prop_to_name(resv_prop), &new_reservation) != ENOENT) { return (0); } if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE), &new_volsize) != 0) return (-1); new_reservation = zvol_volsize_to_reservation(new_volsize, zhp->zfs_props); if (nvlist_add_uint64(nvl, zfs_prop_to_name(resv_prop), new_reservation) != 0) { (void) no_memory(zhp->zfs_hdl); return (-1); } return (1); } void zfs_setprop_error(libzfs_handle_t *hdl, zfs_prop_t prop, int err, char *errbuf) { switch (err) { case ENOSPC: /* * For quotas and reservations, ENOSPC indicates * something different; setting a quota or reservation * doesn't use any disk space. */ switch (prop) { case ZFS_PROP_QUOTA: case ZFS_PROP_REFQUOTA: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "size is less than current used or " "reserved space")); (void) zfs_error(hdl, EZFS_PROPSPACE, errbuf); break; case ZFS_PROP_RESERVATION: case ZFS_PROP_REFRESERVATION: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "size is greater than available space")); (void) zfs_error(hdl, EZFS_PROPSPACE, errbuf); break; default: (void) zfs_standard_error(hdl, err, errbuf); break; } break; case EBUSY: (void) zfs_standard_error(hdl, EBUSY, errbuf); break; case EROFS: (void) zfs_error(hdl, EZFS_DSREADONLY, errbuf); break; case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool and or dataset must be upgraded to set this " "property or value")); (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); break; case ERANGE: if (prop == ZFS_PROP_COMPRESSION) { (void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property setting is not allowed on " "bootable datasets")); (void) zfs_error(hdl, EZFS_NOTSUP, errbuf); } else { (void) zfs_standard_error(hdl, err, errbuf); } break; case EINVAL: if (prop == ZPROP_INVAL) { (void) zfs_error(hdl, EZFS_BADPROP, errbuf); } else { (void) zfs_standard_error(hdl, err, errbuf); } break; case EOVERFLOW: /* * This platform can't address a volume this big. */ #ifdef _ILP32 if (prop == ZFS_PROP_VOLSIZE) { (void) zfs_error(hdl, EZFS_VOLTOOBIG, errbuf); break; } #endif /* FALLTHROUGH */ default: (void) zfs_standard_error(hdl, err, errbuf); } } /* * Given a property name and value, set the property for the given dataset. */ int zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval) { zfs_cmd_t zc = { 0 }; int ret = -1; prop_changelist_t *cl = NULL; char errbuf[1024]; libzfs_handle_t *hdl = zhp->zfs_hdl; nvlist_t *nvl = NULL, *realprops; zfs_prop_t prop; boolean_t do_prefix; uint64_t idx; int added_resv; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), zhp->zfs_name); if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 || nvlist_add_string(nvl, propname, propval) != 0) { (void) no_memory(hdl); goto error; } if ((realprops = zfs_valid_proplist(hdl, zhp->zfs_type, nvl, zfs_prop_get_int(zhp, ZFS_PROP_ZONED), zhp, errbuf)) == NULL) goto error; nvlist_free(nvl); nvl = realprops; prop = zfs_name_to_prop(propname); /* We don't support those properties on FreeBSD. */ switch (prop) { case ZFS_PROP_DEVICES: case ZFS_PROP_ISCSIOPTIONS: case ZFS_PROP_XATTR: case ZFS_PROP_VSCAN: case ZFS_PROP_NBMAND: case ZFS_PROP_MLSLABEL: (void) snprintf(errbuf, sizeof (errbuf), "property '%s' not supported on FreeBSD", propname); ret = zfs_error(hdl, EZFS_PERM, errbuf); goto error; } if (prop == ZFS_PROP_VOLSIZE) { if ((added_resv = zfs_add_synthetic_resv(zhp, nvl)) == -1) goto error; } if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL) goto error; if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "child dataset with inherited mountpoint is used " "in a non-global zone")); ret = zfs_error(hdl, EZFS_ZONED, errbuf); goto error; } /* * If the dataset's canmount property is being set to noauto, * then we want to prevent unmounting & remounting it. */ do_prefix = !((prop == ZFS_PROP_CANMOUNT) && (zprop_string_to_index(prop, propval, &idx, ZFS_TYPE_DATASET) == 0) && (idx == ZFS_CANMOUNT_NOAUTO)); if (do_prefix && (ret = changelist_prefix(cl)) != 0) goto error; /* * Execute the corresponding ioctl() to set this property. */ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); if (zcmd_write_src_nvlist(hdl, &zc, nvl) != 0) goto error; ret = zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc); if (ret != 0) { zfs_setprop_error(hdl, prop, errno, errbuf); if (added_resv && errno == ENOSPC) { /* clean up the volsize property we tried to set */ uint64_t old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE); nvlist_free(nvl); zcmd_free_nvlists(&zc); if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) goto error; if (nvlist_add_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE), old_volsize) != 0) goto error; if (zcmd_write_src_nvlist(hdl, &zc, nvl) != 0) goto error; (void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc); } } else { if (do_prefix) ret = changelist_postfix(cl); /* * Refresh the statistics so the new property value * is reflected. */ if (ret == 0) (void) get_stats(zhp); } error: nvlist_free(nvl); zcmd_free_nvlists(&zc); if (cl) changelist_free(cl); return (ret); } /* * Given a property, inherit the value from the parent dataset, or if received * is TRUE, revert to the received value, if any. */ int zfs_prop_inherit(zfs_handle_t *zhp, const char *propname, boolean_t received) { zfs_cmd_t zc = { 0 }; int ret; prop_changelist_t *cl; libzfs_handle_t *hdl = zhp->zfs_hdl; char errbuf[1024]; zfs_prop_t prop; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot inherit %s for '%s'"), propname, zhp->zfs_name); zc.zc_cookie = received; if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL) { /* * For user properties, the amount of work we have to do is very * small, so just do it here. */ if (!zfs_prop_user(propname)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid property")); return (zfs_error(hdl, EZFS_BADPROP, errbuf)); } (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value)); if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0) return (zfs_standard_error(hdl, errno, errbuf)); return (0); } /* * Verify that this property is inheritable. */ if (zfs_prop_readonly(prop)) return (zfs_error(hdl, EZFS_PROPREADONLY, errbuf)); if (!zfs_prop_inheritable(prop) && !received) return (zfs_error(hdl, EZFS_PROPNONINHERIT, errbuf)); /* * Check to see if the value applies to this type */ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type)) return (zfs_error(hdl, EZFS_PROPTYPE, errbuf)); /* * Normalize the name, to get rid of shorthand abbreviations. */ propname = zfs_prop_to_name(prop); (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value)); if (prop == ZFS_PROP_MOUNTPOINT && getzoneid() == GLOBAL_ZONEID && zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "dataset is used in a non-global zone")); return (zfs_error(hdl, EZFS_ZONED, errbuf)); } /* * Determine datasets which will be affected by this change, if any. */ if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL) return (-1); if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "child dataset with inherited mountpoint is used " "in a non-global zone")); ret = zfs_error(hdl, EZFS_ZONED, errbuf); goto error; } if ((ret = changelist_prefix(cl)) != 0) goto error; if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc)) != 0) { return (zfs_standard_error(hdl, errno, errbuf)); } else { if ((ret = changelist_postfix(cl)) != 0) goto error; /* * Refresh the statistics so the new property is reflected. */ (void) get_stats(zhp); } error: changelist_free(cl); return (ret); } /* * True DSL properties are stored in an nvlist. The following two functions * extract them appropriately. */ static uint64_t getprop_uint64(zfs_handle_t *zhp, zfs_prop_t prop, char **source) { nvlist_t *nv; uint64_t value; *source = NULL; if (nvlist_lookup_nvlist(zhp->zfs_props, zfs_prop_to_name(prop), &nv) == 0) { verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); (void) nvlist_lookup_string(nv, ZPROP_SOURCE, source); } else { verify(!zhp->zfs_props_table || zhp->zfs_props_table[prop] == B_TRUE); value = zfs_prop_default_numeric(prop); *source = ""; } return (value); } static char * getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, char **source) { nvlist_t *nv; char *value; *source = NULL; if (nvlist_lookup_nvlist(zhp->zfs_props, zfs_prop_to_name(prop), &nv) == 0) { verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); (void) nvlist_lookup_string(nv, ZPROP_SOURCE, source); } else { verify(!zhp->zfs_props_table || zhp->zfs_props_table[prop] == B_TRUE); if ((value = (char *)zfs_prop_default_string(prop)) == NULL) value = ""; *source = ""; } return (value); } static boolean_t zfs_is_recvd_props_mode(zfs_handle_t *zhp) { return (zhp->zfs_props == zhp->zfs_recvd_props); } static void zfs_set_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie) { *cookie = (uint64_t)(uintptr_t)zhp->zfs_props; zhp->zfs_props = zhp->zfs_recvd_props; } static void zfs_unset_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie) { zhp->zfs_props = (nvlist_t *)(uintptr_t)*cookie; *cookie = 0; } /* * Internal function for getting a numeric property. Both zfs_prop_get() and * zfs_prop_get_int() are built using this interface. * * Certain properties can be overridden using 'mount -o'. In this case, scan * the contents of the /etc/mnttab entry, searching for the appropriate options. * If they differ from the on-disk values, report the current values and mark * the source "temporary". */ static int get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src, char **source, uint64_t *val) { zfs_cmd_t zc = { 0 }; nvlist_t *zplprops = NULL; struct mnttab mnt; char *mntopt_on = NULL; char *mntopt_off = NULL; boolean_t received = zfs_is_recvd_props_mode(zhp); *source = NULL; switch (prop) { case ZFS_PROP_ATIME: mntopt_on = MNTOPT_ATIME; mntopt_off = MNTOPT_NOATIME; break; case ZFS_PROP_DEVICES: mntopt_on = MNTOPT_DEVICES; mntopt_off = MNTOPT_NODEVICES; break; case ZFS_PROP_EXEC: mntopt_on = MNTOPT_EXEC; mntopt_off = MNTOPT_NOEXEC; break; case ZFS_PROP_READONLY: mntopt_on = MNTOPT_RO; mntopt_off = MNTOPT_RW; break; case ZFS_PROP_SETUID: mntopt_on = MNTOPT_SETUID; mntopt_off = MNTOPT_NOSETUID; break; case ZFS_PROP_XATTR: mntopt_on = MNTOPT_XATTR; mntopt_off = MNTOPT_NOXATTR; break; case ZFS_PROP_NBMAND: mntopt_on = MNTOPT_NBMAND; mntopt_off = MNTOPT_NONBMAND; break; } /* * Because looking up the mount options is potentially expensive * (iterating over all of /etc/mnttab), we defer its calculation until * we're looking up a property which requires its presence. */ if (!zhp->zfs_mntcheck && (mntopt_on != NULL || prop == ZFS_PROP_MOUNTED)) { libzfs_handle_t *hdl = zhp->zfs_hdl; struct mnttab entry; if (libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0) { zhp->zfs_mntopts = zfs_strdup(hdl, entry.mnt_mntopts); if (zhp->zfs_mntopts == NULL) return (-1); } zhp->zfs_mntcheck = B_TRUE; } if (zhp->zfs_mntopts == NULL) mnt.mnt_mntopts = ""; else mnt.mnt_mntopts = zhp->zfs_mntopts; switch (prop) { case ZFS_PROP_ATIME: case ZFS_PROP_DEVICES: case ZFS_PROP_EXEC: case ZFS_PROP_READONLY: case ZFS_PROP_SETUID: case ZFS_PROP_XATTR: case ZFS_PROP_NBMAND: *val = getprop_uint64(zhp, prop, source); if (received) break; if (hasmntopt(&mnt, mntopt_on) && !*val) { *val = B_TRUE; if (src) *src = ZPROP_SRC_TEMPORARY; } else if (hasmntopt(&mnt, mntopt_off) && *val) { *val = B_FALSE; if (src) *src = ZPROP_SRC_TEMPORARY; } break; case ZFS_PROP_CANMOUNT: case ZFS_PROP_VOLSIZE: case ZFS_PROP_QUOTA: case ZFS_PROP_REFQUOTA: case ZFS_PROP_RESERVATION: case ZFS_PROP_REFRESERVATION: *val = getprop_uint64(zhp, prop, source); if (*source == NULL) { /* not default, must be local */ *source = zhp->zfs_name; } break; case ZFS_PROP_MOUNTED: *val = (zhp->zfs_mntopts != NULL); break; case ZFS_PROP_NUMCLONES: *val = zhp->zfs_dmustats.dds_num_clones; break; case ZFS_PROP_VERSION: case ZFS_PROP_NORMALIZE: case ZFS_PROP_UTF8ONLY: case ZFS_PROP_CASE: if (!zfs_prop_valid_for_type(prop, zhp->zfs_head_type) || zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0) return (-1); (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_ZPLPROPS, &zc)) { zcmd_free_nvlists(&zc); return (-1); } if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &zplprops) != 0 || nvlist_lookup_uint64(zplprops, zfs_prop_to_name(prop), val) != 0) { zcmd_free_nvlists(&zc); return (-1); } if (zplprops) nvlist_free(zplprops); zcmd_free_nvlists(&zc); break; default: switch (zfs_prop_get_type(prop)) { case PROP_TYPE_NUMBER: case PROP_TYPE_INDEX: *val = getprop_uint64(zhp, prop, source); /* * If we tried to use a default value for a * readonly property, it means that it was not * present. */ if (zfs_prop_readonly(prop) && *source != NULL && (*source)[0] == '\0') { *source = NULL; } break; case PROP_TYPE_STRING: default: zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, "cannot get non-numeric property")); return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN, "internal error"))); } } return (0); } /* * Calculate the source type, given the raw source string. */ static void get_source(zfs_handle_t *zhp, zprop_source_t *srctype, char *source, char *statbuf, size_t statlen) { if (statbuf == NULL || *srctype == ZPROP_SRC_TEMPORARY) return; if (source == NULL) { *srctype = ZPROP_SRC_NONE; } else if (source[0] == '\0') { *srctype = ZPROP_SRC_DEFAULT; } else if (strstr(source, ZPROP_SOURCE_VAL_RECVD) != NULL) { *srctype = ZPROP_SRC_RECEIVED; } else { if (strcmp(source, zhp->zfs_name) == 0) { *srctype = ZPROP_SRC_LOCAL; } else { (void) strlcpy(statbuf, source, statlen); *srctype = ZPROP_SRC_INHERITED; } } } int zfs_prop_get_recvd(zfs_handle_t *zhp, const char *propname, char *propbuf, size_t proplen, boolean_t literal) { zfs_prop_t prop; int err = 0; if (zhp->zfs_recvd_props == NULL) if (get_recvd_props_ioctl(zhp) != 0) return (-1); prop = zfs_name_to_prop(propname); if (prop != ZPROP_INVAL) { uint64_t cookie; if (!nvlist_exists(zhp->zfs_recvd_props, propname)) return (-1); zfs_set_recvd_props_mode(zhp, &cookie); err = zfs_prop_get(zhp, prop, propbuf, proplen, NULL, NULL, 0, literal); zfs_unset_recvd_props_mode(zhp, &cookie); } else { nvlist_t *propval; char *recvdval; if (nvlist_lookup_nvlist(zhp->zfs_recvd_props, propname, &propval) != 0) return (-1); verify(nvlist_lookup_string(propval, ZPROP_VALUE, &recvdval) == 0); (void) strlcpy(propbuf, recvdval, proplen); } return (err == 0 ? 0 : -1); } static int get_clones_string(zfs_handle_t *zhp, char *propbuf, size_t proplen) { nvlist_t *value; nvpair_t *pair; value = zfs_get_clones_nvl(zhp); if (value == NULL) return (-1); propbuf[0] = '\0'; for (pair = nvlist_next_nvpair(value, NULL); pair != NULL; pair = nvlist_next_nvpair(value, pair)) { if (propbuf[0] != '\0') (void) strlcat(propbuf, ",", proplen); (void) strlcat(propbuf, nvpair_name(pair), proplen); } return (0); } struct get_clones_arg { uint64_t numclones; nvlist_t *value; const char *origin; char buf[ZFS_MAXNAMELEN]; }; int get_clones_cb(zfs_handle_t *zhp, void *arg) { struct get_clones_arg *gca = arg; if (gca->numclones == 0) { zfs_close(zhp); return (0); } if (zfs_prop_get(zhp, ZFS_PROP_ORIGIN, gca->buf, sizeof (gca->buf), NULL, NULL, 0, B_TRUE) != 0) goto out; if (strcmp(gca->buf, gca->origin) == 0) { if (nvlist_add_boolean(gca->value, zfs_get_name(zhp)) != 0) { zfs_close(zhp); return (no_memory(zhp->zfs_hdl)); } gca->numclones--; } out: (void) zfs_iter_children(zhp, get_clones_cb, gca); zfs_close(zhp); return (0); } nvlist_t * zfs_get_clones_nvl(zfs_handle_t *zhp) { nvlist_t *nv, *value; if (nvlist_lookup_nvlist(zhp->zfs_props, zfs_prop_to_name(ZFS_PROP_CLONES), &nv) != 0) { struct get_clones_arg gca; /* * if this is a snapshot, then the kernel wasn't able * to get the clones. Do it by slowly iterating. */ if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT) return (NULL); if (nvlist_alloc(&nv, NV_UNIQUE_NAME, 0) != 0) return (NULL); if (nvlist_alloc(&value, NV_UNIQUE_NAME, 0) != 0) { nvlist_free(nv); return (NULL); } gca.numclones = zfs_prop_get_int(zhp, ZFS_PROP_NUMCLONES); gca.value = value; gca.origin = zhp->zfs_name; if (gca.numclones != 0) { zfs_handle_t *root; char pool[ZFS_MAXNAMELEN]; char *cp = pool; /* get the pool name */ (void) strlcpy(pool, zhp->zfs_name, sizeof (pool)); (void) strsep(&cp, "/@"); root = zfs_open(zhp->zfs_hdl, pool, ZFS_TYPE_FILESYSTEM); (void) get_clones_cb(root, &gca); } if (gca.numclones != 0 || nvlist_add_nvlist(nv, ZPROP_VALUE, value) != 0 || nvlist_add_nvlist(zhp->zfs_props, zfs_prop_to_name(ZFS_PROP_CLONES), nv) != 0) { nvlist_free(nv); nvlist_free(value); return (NULL); } nvlist_free(nv); nvlist_free(value); verify(0 == nvlist_lookup_nvlist(zhp->zfs_props, zfs_prop_to_name(ZFS_PROP_CLONES), &nv)); } verify(nvlist_lookup_nvlist(nv, ZPROP_VALUE, &value) == 0); return (value); } /* * Retrieve a property from the given object. If 'literal' is specified, then * numbers are left as exact values. Otherwise, numbers are converted to a * human-readable form. * * Returns 0 on success, or -1 on error. */ int zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen, zprop_source_t *src, char *statbuf, size_t statlen, boolean_t literal) { char *source = NULL; uint64_t val; char *str; const char *strval; boolean_t received = zfs_is_recvd_props_mode(zhp); /* * Check to see if this property applies to our object */ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type)) return (-1); if (received && zfs_prop_readonly(prop)) return (-1); if (src) *src = ZPROP_SRC_NONE; switch (prop) { case ZFS_PROP_CREATION: /* * 'creation' is a time_t stored in the statistics. We convert * this into a string unless 'literal' is specified. */ { val = getprop_uint64(zhp, prop, &source); time_t time = (time_t)val; struct tm t; if (literal || localtime_r(&time, &t) == NULL || strftime(propbuf, proplen, "%a %b %e %k:%M %Y", &t) == 0) (void) snprintf(propbuf, proplen, "%llu", val); } break; case ZFS_PROP_MOUNTPOINT: /* * Getting the precise mountpoint can be tricky. * * - for 'none' or 'legacy', return those values. * - for inherited mountpoints, we want to take everything * after our ancestor and append it to the inherited value. * * If the pool has an alternate root, we want to prepend that * root to any values we return. */ str = getprop_string(zhp, prop, &source); if (str[0] == '/') { char buf[MAXPATHLEN]; char *root = buf; const char *relpath; /* * If we inherit the mountpoint, even from a dataset * with a received value, the source will be the path of * the dataset we inherit from. If source is * ZPROP_SOURCE_VAL_RECVD, the received value is not * inherited. */ if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) { relpath = ""; } else { relpath = zhp->zfs_name + strlen(source); if (relpath[0] == '/') relpath++; } if ((zpool_get_prop(zhp->zpool_hdl, ZPOOL_PROP_ALTROOT, buf, MAXPATHLEN, NULL)) || (strcmp(root, "-") == 0)) root[0] = '\0'; /* * Special case an alternate root of '/'. This will * avoid having multiple leading slashes in the * mountpoint path. */ if (strcmp(root, "/") == 0) root++; /* * If the mountpoint is '/' then skip over this * if we are obtaining either an alternate root or * an inherited mountpoint. */ if (str[1] == '\0' && (root[0] != '\0' || relpath[0] != '\0')) str++; if (relpath[0] == '\0') (void) snprintf(propbuf, proplen, "%s%s", root, str); else (void) snprintf(propbuf, proplen, "%s%s%s%s", root, str, relpath[0] == '@' ? "" : "/", relpath); } else { /* 'legacy' or 'none' */ (void) strlcpy(propbuf, str, proplen); } break; case ZFS_PROP_ORIGIN: (void) strlcpy(propbuf, getprop_string(zhp, prop, &source), proplen); /* * If there is no parent at all, return failure to indicate that * it doesn't apply to this dataset. */ if (propbuf[0] == '\0') return (-1); break; case ZFS_PROP_CLONES: if (get_clones_string(zhp, propbuf, proplen) != 0) return (-1); break; case ZFS_PROP_QUOTA: case ZFS_PROP_REFQUOTA: case ZFS_PROP_RESERVATION: case ZFS_PROP_REFRESERVATION: if (get_numeric_property(zhp, prop, src, &source, &val) != 0) return (-1); /* * If quota or reservation is 0, we translate this into 'none' * (unless literal is set), and indicate that it's the default * value. Otherwise, we print the number nicely and indicate * that its set locally. */ if (val == 0) { if (literal) (void) strlcpy(propbuf, "0", proplen); else (void) strlcpy(propbuf, "none", proplen); } else { if (literal) (void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val); else zfs_nicenum(val, propbuf, proplen); } break; case ZFS_PROP_REFRATIO: case ZFS_PROP_COMPRESSRATIO: if (get_numeric_property(zhp, prop, src, &source, &val) != 0) return (-1); (void) snprintf(propbuf, proplen, "%llu.%02llux", (u_longlong_t)(val / 100), (u_longlong_t)(val % 100)); break; case ZFS_PROP_TYPE: switch (zhp->zfs_type) { case ZFS_TYPE_FILESYSTEM: str = "filesystem"; break; case ZFS_TYPE_VOLUME: str = "volume"; break; case ZFS_TYPE_SNAPSHOT: str = "snapshot"; break; default: abort(); } (void) snprintf(propbuf, proplen, "%s", str); break; case ZFS_PROP_MOUNTED: /* * The 'mounted' property is a pseudo-property that described * whether the filesystem is currently mounted. Even though * it's a boolean value, the typical values of "on" and "off" * don't make sense, so we translate to "yes" and "no". */ if (get_numeric_property(zhp, ZFS_PROP_MOUNTED, src, &source, &val) != 0) return (-1); if (val) (void) strlcpy(propbuf, "yes", proplen); else (void) strlcpy(propbuf, "no", proplen); break; case ZFS_PROP_NAME: /* * The 'name' property is a pseudo-property derived from the * dataset name. It is presented as a real property to simplify * consumers. */ (void) strlcpy(propbuf, zhp->zfs_name, proplen); break; case ZFS_PROP_MLSLABEL: { #ifdef sun m_label_t *new_sl = NULL; char *ascii = NULL; /* human readable label */ (void) strlcpy(propbuf, getprop_string(zhp, prop, &source), proplen); if (literal || (strcasecmp(propbuf, ZFS_MLSLABEL_DEFAULT) == 0)) break; /* * Try to translate the internal hex string to * human-readable output. If there are any * problems just use the hex string. */ if (str_to_label(propbuf, &new_sl, MAC_LABEL, L_NO_CORRECTION, NULL) == -1) { m_label_free(new_sl); break; } if (label_to_str(new_sl, &ascii, M_LABEL, DEF_NAMES) != 0) { if (ascii) free(ascii); m_label_free(new_sl); break; } m_label_free(new_sl); (void) strlcpy(propbuf, ascii, proplen); free(ascii); #else /* !sun */ propbuf[0] = '\0'; #endif /* !sun */ } break; default: switch (zfs_prop_get_type(prop)) { case PROP_TYPE_NUMBER: if (get_numeric_property(zhp, prop, src, &source, &val) != 0) return (-1); if (literal) (void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val); else zfs_nicenum(val, propbuf, proplen); break; case PROP_TYPE_STRING: (void) strlcpy(propbuf, getprop_string(zhp, prop, &source), proplen); break; case PROP_TYPE_INDEX: if (get_numeric_property(zhp, prop, src, &source, &val) != 0) return (-1); if (zfs_prop_index_to_string(prop, val, &strval) != 0) return (-1); (void) strlcpy(propbuf, strval, proplen); break; default: abort(); } } get_source(zhp, src, source, statbuf, statlen); return (0); } /* * Utility function to get the given numeric property. Does no validation that * the given property is the appropriate type; should only be used with * hard-coded property types. */ uint64_t zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop) { char *source; uint64_t val; (void) get_numeric_property(zhp, prop, NULL, &source, &val); return (val); } int zfs_prop_set_int(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t val) { char buf[64]; (void) snprintf(buf, sizeof (buf), "%llu", (longlong_t)val); return (zfs_prop_set(zhp, zfs_prop_to_name(prop), buf)); } /* * Similar to zfs_prop_get(), but returns the value as an integer. */ int zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value, zprop_source_t *src, char *statbuf, size_t statlen) { char *source; /* * Check to see if this property applies to our object */ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type)) { return (zfs_error_fmt(zhp->zfs_hdl, EZFS_PROPTYPE, dgettext(TEXT_DOMAIN, "cannot get property '%s'"), zfs_prop_to_name(prop))); } if (src) *src = ZPROP_SRC_NONE; if (get_numeric_property(zhp, prop, src, &source, value) != 0) return (-1); get_source(zhp, src, source, statbuf, statlen); return (0); } static int idmap_id_to_numeric_domain_rid(uid_t id, boolean_t isuser, char **domainp, idmap_rid_t *ridp) { #ifdef sun idmap_get_handle_t *get_hdl = NULL; idmap_stat status; int err = EINVAL; if (idmap_get_create(&get_hdl) != IDMAP_SUCCESS) goto out; if (isuser) { err = idmap_get_sidbyuid(get_hdl, id, IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status); } else { err = idmap_get_sidbygid(get_hdl, id, IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status); } if (err == IDMAP_SUCCESS && idmap_get_mappings(get_hdl) == IDMAP_SUCCESS && status == IDMAP_SUCCESS) err = 0; else err = EINVAL; out: if (get_hdl) idmap_get_destroy(get_hdl); return (err); #else /* !sun */ assert(!"invalid code path"); #endif /* !sun */ } /* * convert the propname into parameters needed by kernel * Eg: userquota@ahrens -> ZFS_PROP_USERQUOTA, "", 126829 * Eg: userused@matt@domain -> ZFS_PROP_USERUSED, "S-1-123-456", 789 */ static int userquota_propname_decode(const char *propname, boolean_t zoned, zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp) { zfs_userquota_prop_t type; char *cp, *end; char *numericsid = NULL; boolean_t isuser; domain[0] = '\0'; /* Figure out the property type ({user|group}{quota|space}) */ for (type = 0; type < ZFS_NUM_USERQUOTA_PROPS; type++) { if (strncmp(propname, zfs_userquota_prop_prefixes[type], strlen(zfs_userquota_prop_prefixes[type])) == 0) break; } if (type == ZFS_NUM_USERQUOTA_PROPS) return (EINVAL); *typep = type; isuser = (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_USERUSED); cp = strchr(propname, '@') + 1; if (strchr(cp, '@')) { #ifdef sun /* * It's a SID name (eg "user@domain") that needs to be * turned into S-1-domainID-RID. */ directory_error_t e; if (zoned && getzoneid() == GLOBAL_ZONEID) return (ENOENT); if (isuser) { e = directory_sid_from_user_name(NULL, cp, &numericsid); } else { e = directory_sid_from_group_name(NULL, cp, &numericsid); } if (e != NULL) { directory_error_free(e); return (ENOENT); } if (numericsid == NULL) return (ENOENT); cp = numericsid; /* will be further decoded below */ #else /* !sun */ return (ENOENT); #endif /* !sun */ } if (strncmp(cp, "S-1-", 4) == 0) { /* It's a numeric SID (eg "S-1-234-567-89") */ (void) strlcpy(domain, cp, domainlen); cp = strrchr(domain, '-'); *cp = '\0'; cp++; errno = 0; *ridp = strtoull(cp, &end, 10); if (numericsid) { free(numericsid); numericsid = NULL; } if (errno != 0 || *end != '\0') return (EINVAL); } else if (!isdigit(*cp)) { /* * It's a user/group name (eg "user") that needs to be * turned into a uid/gid */ if (zoned && getzoneid() == GLOBAL_ZONEID) return (ENOENT); if (isuser) { struct passwd *pw; pw = getpwnam(cp); if (pw == NULL) return (ENOENT); *ridp = pw->pw_uid; } else { struct group *gr; gr = getgrnam(cp); if (gr == NULL) return (ENOENT); *ridp = gr->gr_gid; } } else { /* It's a user/group ID (eg "12345"). */ uid_t id = strtoul(cp, &end, 10); idmap_rid_t rid; char *mapdomain; if (*end != '\0') return (EINVAL); if (id > MAXUID) { /* It's an ephemeral ID. */ if (idmap_id_to_numeric_domain_rid(id, isuser, &mapdomain, &rid) != 0) return (ENOENT); (void) strlcpy(domain, mapdomain, domainlen); *ridp = rid; } else { *ridp = id; } } ASSERT3P(numericsid, ==, NULL); return (0); } static int zfs_prop_get_userquota_common(zfs_handle_t *zhp, const char *propname, uint64_t *propvalue, zfs_userquota_prop_t *typep) { int err; zfs_cmd_t zc = { 0 }; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); err = userquota_propname_decode(propname, zfs_prop_get_int(zhp, ZFS_PROP_ZONED), typep, zc.zc_value, sizeof (zc.zc_value), &zc.zc_guid); zc.zc_objset_type = *typep; if (err) return (err); err = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_USERSPACE_ONE, &zc); if (err) return (err); *propvalue = zc.zc_cookie; return (0); } int zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname, uint64_t *propvalue) { zfs_userquota_prop_t type; return (zfs_prop_get_userquota_common(zhp, propname, propvalue, &type)); } int zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname, char *propbuf, int proplen, boolean_t literal) { int err; uint64_t propvalue; zfs_userquota_prop_t type; err = zfs_prop_get_userquota_common(zhp, propname, &propvalue, &type); if (err) return (err); if (literal) { (void) snprintf(propbuf, proplen, "%llu", propvalue); } else if (propvalue == 0 && (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA)) { (void) strlcpy(propbuf, "none", proplen); } else { zfs_nicenum(propvalue, propbuf, proplen); } return (0); } int zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname, uint64_t *propvalue) { int err; zfs_cmd_t zc = { 0 }; const char *snapname; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); snapname = strchr(propname, '@') + 1; if (strchr(snapname, '@')) { (void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value)); } else { /* snapname is the short name, append it to zhp's fsname */ char *cp; (void) strlcpy(zc.zc_value, zhp->zfs_name, sizeof (zc.zc_value)); cp = strchr(zc.zc_value, '@'); if (cp != NULL) *cp = '\0'; (void) strlcat(zc.zc_value, "@", sizeof (zc.zc_value)); (void) strlcat(zc.zc_value, snapname, sizeof (zc.zc_value)); } err = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SPACE_WRITTEN, &zc); if (err) return (err); *propvalue = zc.zc_cookie; return (0); } int zfs_prop_get_written(zfs_handle_t *zhp, const char *propname, char *propbuf, int proplen, boolean_t literal) { int err; uint64_t propvalue; err = zfs_prop_get_written_int(zhp, propname, &propvalue); if (err) return (err); if (literal) { (void) snprintf(propbuf, proplen, "%llu", propvalue); } else { zfs_nicenum(propvalue, propbuf, proplen); } return (0); } int zfs_get_snapused_int(zfs_handle_t *firstsnap, zfs_handle_t *lastsnap, uint64_t *usedp) { int err; zfs_cmd_t zc = { 0 }; (void) strlcpy(zc.zc_name, lastsnap->zfs_name, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, firstsnap->zfs_name, sizeof (zc.zc_value)); err = ioctl(lastsnap->zfs_hdl->libzfs_fd, ZFS_IOC_SPACE_SNAPS, &zc); if (err) return (err); *usedp = zc.zc_cookie; return (0); } /* * Returns the name of the given zfs handle. */ const char * zfs_get_name(const zfs_handle_t *zhp) { return (zhp->zfs_name); } /* * Returns the type of the given zfs handle. */ zfs_type_t zfs_get_type(const zfs_handle_t *zhp) { return (zhp->zfs_type); } /* * Is one dataset name a child dataset of another? * * Needs to handle these cases: * Dataset 1 "a/foo" "a/foo" "a/foo" "a/foo" * Dataset 2 "a/fo" "a/foobar" "a/bar/baz" "a/foo/bar" * Descendant? No. No. No. Yes. */ static boolean_t is_descendant(const char *ds1, const char *ds2) { size_t d1len = strlen(ds1); /* ds2 can't be a descendant if it's smaller */ if (strlen(ds2) < d1len) return (B_FALSE); /* otherwise, compare strings and verify that there's a '/' char */ return (ds2[d1len] == '/' && (strncmp(ds1, ds2, d1len) == 0)); } /* * Given a complete name, return just the portion that refers to the parent. * Will return -1 if there is no parent (path is just the name of the * pool). */ static int parent_name(const char *path, char *buf, size_t buflen) { char *slashp; (void) strlcpy(buf, path, buflen); if ((slashp = strrchr(buf, '/')) == NULL) return (-1); *slashp = '\0'; return (0); } /* * If accept_ancestor is false, then check to make sure that the given path has * a parent, and that it exists. If accept_ancestor is true, then find the * closest existing ancestor for the given path. In prefixlen return the * length of already existing prefix of the given path. We also fetch the * 'zoned' property, which is used to validate property settings when creating * new datasets. */ static int check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned, boolean_t accept_ancestor, int *prefixlen) { zfs_cmd_t zc = { 0 }; char parent[ZFS_MAXNAMELEN]; char *slash; zfs_handle_t *zhp; char errbuf[1024]; uint64_t is_zoned; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot create '%s'"), path); /* get parent, and check to see if this is just a pool */ if (parent_name(path, parent, sizeof (parent)) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "missing dataset name")); return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); } /* check to see if the pool exists */ if ((slash = strchr(parent, '/')) == NULL) slash = parent + strlen(parent); (void) strncpy(zc.zc_name, parent, slash - parent); zc.zc_name[slash - parent] = '\0'; if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 && errno == ENOENT) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool '%s'"), zc.zc_name); return (zfs_error(hdl, EZFS_NOENT, errbuf)); } /* check to see if the parent dataset exists */ while ((zhp = make_dataset_handle(hdl, parent)) == NULL) { if (errno == ENOENT && accept_ancestor) { /* * Go deeper to find an ancestor, give up on top level. */ if (parent_name(parent, parent, sizeof (parent)) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool '%s'"), zc.zc_name); return (zfs_error(hdl, EZFS_NOENT, errbuf)); } } else if (errno == ENOENT) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "parent does not exist")); return (zfs_error(hdl, EZFS_NOENT, errbuf)); } else return (zfs_standard_error(hdl, errno, errbuf)); } is_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED); if (zoned != NULL) *zoned = is_zoned; /* we are in a non-global zone, but parent is in the global zone */ if (getzoneid() != GLOBAL_ZONEID && !is_zoned) { (void) zfs_standard_error(hdl, EPERM, errbuf); zfs_close(zhp); return (-1); } /* make sure parent is a filesystem */ if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "parent is not a filesystem")); (void) zfs_error(hdl, EZFS_BADTYPE, errbuf); zfs_close(zhp); return (-1); } zfs_close(zhp); if (prefixlen != NULL) *prefixlen = strlen(parent); return (0); } /* * Finds whether the dataset of the given type(s) exists. */ boolean_t zfs_dataset_exists(libzfs_handle_t *hdl, const char *path, zfs_type_t types) { zfs_handle_t *zhp; if (!zfs_validate_name(hdl, path, types, B_FALSE)) return (B_FALSE); /* * Try to get stats for the dataset, which will tell us if it exists. */ if ((zhp = make_dataset_handle(hdl, path)) != NULL) { int ds_type = zhp->zfs_type; zfs_close(zhp); if (types & ds_type) return (B_TRUE); } return (B_FALSE); } /* * Given a path to 'target', create all the ancestors between * the prefixlen portion of the path, and the target itself. * Fail if the initial prefixlen-ancestor does not already exist. */ int create_parents(libzfs_handle_t *hdl, char *target, int prefixlen) { zfs_handle_t *h; char *cp; const char *opname; /* make sure prefix exists */ cp = target + prefixlen; if (*cp != '/') { assert(strchr(cp, '/') == NULL); h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM); } else { *cp = '\0'; h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM); *cp = '/'; } if (h == NULL) return (-1); zfs_close(h); /* * Attempt to create, mount, and share any ancestor filesystems, * up to the prefixlen-long one. */ for (cp = target + prefixlen + 1; cp = strchr(cp, '/'); *cp = '/', cp++) { char *logstr; *cp = '\0'; h = make_dataset_handle(hdl, target); if (h) { /* it already exists, nothing to do here */ zfs_close(h); continue; } logstr = hdl->libzfs_log_str; hdl->libzfs_log_str = NULL; if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM, NULL) != 0) { hdl->libzfs_log_str = logstr; opname = dgettext(TEXT_DOMAIN, "create"); goto ancestorerr; } hdl->libzfs_log_str = logstr; h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM); if (h == NULL) { opname = dgettext(TEXT_DOMAIN, "open"); goto ancestorerr; } if (zfs_mount(h, NULL, 0) != 0) { opname = dgettext(TEXT_DOMAIN, "mount"); goto ancestorerr; } if (zfs_share(h) != 0) { opname = dgettext(TEXT_DOMAIN, "share"); goto ancestorerr; } zfs_close(h); } return (0); ancestorerr: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to %s ancestor '%s'"), opname, target); return (-1); } /* * Creates non-existing ancestors of the given path. */ int zfs_create_ancestors(libzfs_handle_t *hdl, const char *path) { int prefix; char *path_copy; int rc; if (check_parents(hdl, path, NULL, B_TRUE, &prefix) != 0) return (-1); if ((path_copy = strdup(path)) != NULL) { rc = create_parents(hdl, path_copy, prefix); free(path_copy); } if (path_copy == NULL || rc != 0) return (-1); return (0); } /* * Create a new filesystem or volume. */ int zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type, nvlist_t *props) { zfs_cmd_t zc = { 0 }; int ret; uint64_t size = 0; uint64_t blocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); char errbuf[1024]; uint64_t zoned; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot create '%s'"), path); /* validate the path, taking care to note the extended error message */ if (!zfs_validate_name(hdl, path, type, B_TRUE)) return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); /* validate parents exist */ if (check_parents(hdl, path, &zoned, B_FALSE, NULL) != 0) return (-1); /* * The failure modes when creating a dataset of a different type over * one that already exists is a little strange. In particular, if you * try to create a dataset on top of an existing dataset, the ioctl() * will return ENOENT, not EEXIST. To prevent this from happening, we * first try to see if the dataset exists. */ (void) strlcpy(zc.zc_name, path, sizeof (zc.zc_name)); if (zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "dataset already exists")); return (zfs_error(hdl, EZFS_EXISTS, errbuf)); } if (type == ZFS_TYPE_VOLUME) zc.zc_objset_type = DMU_OST_ZVOL; else zc.zc_objset_type = DMU_OST_ZFS; if (props && (props = zfs_valid_proplist(hdl, type, props, zoned, NULL, errbuf)) == 0) return (-1); if (type == ZFS_TYPE_VOLUME) { /* * If we are creating a volume, the size and block size must * satisfy a few restraints. First, the blocksize must be a * valid block size between SPA_{MIN,MAX}BLOCKSIZE. Second, the * volsize must be a multiple of the block size, and cannot be * zero. */ if (props == NULL || nvlist_lookup_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLSIZE), &size) != 0) { nvlist_free(props); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "missing volume size")); return (zfs_error(hdl, EZFS_BADPROP, errbuf)); } if ((ret = nvlist_lookup_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &blocksize)) != 0) { if (ret == ENOENT) { blocksize = zfs_prop_default_numeric( ZFS_PROP_VOLBLOCKSIZE); } else { nvlist_free(props); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "missing volume block size")); return (zfs_error(hdl, EZFS_BADPROP, errbuf)); } } if (size == 0) { nvlist_free(props); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "volume size cannot be zero")); return (zfs_error(hdl, EZFS_BADPROP, errbuf)); } if (size % blocksize != 0) { nvlist_free(props); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "volume size must be a multiple of volume block " "size")); return (zfs_error(hdl, EZFS_BADPROP, errbuf)); } } if (props && zcmd_write_src_nvlist(hdl, &zc, props) != 0) return (-1); nvlist_free(props); /* create the dataset */ ret = zfs_ioctl(hdl, ZFS_IOC_CREATE, &zc); zcmd_free_nvlists(&zc); /* check for failure */ if (ret != 0) { char parent[ZFS_MAXNAMELEN]; (void) parent_name(path, parent, sizeof (parent)); switch (errno) { case ENOENT: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such parent '%s'"), parent); return (zfs_error(hdl, EZFS_NOENT, errbuf)); case EINVAL: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "parent '%s' is not a filesystem"), parent); return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); case EDOM: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "volume block size must be power of 2 from " "%u to %uk"), (uint_t)SPA_MINBLOCKSIZE, (uint_t)SPA_MAXBLOCKSIZE >> 10); return (zfs_error(hdl, EZFS_BADPROP, errbuf)); case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded to set this " "property or value")); return (zfs_error(hdl, EZFS_BADVERSION, errbuf)); #ifdef _ILP32 case EOVERFLOW: /* * This platform can't address a volume this big. */ if (type == ZFS_TYPE_VOLUME) return (zfs_error(hdl, EZFS_VOLTOOBIG, errbuf)); #endif /* FALLTHROUGH */ default: return (zfs_standard_error(hdl, errno, errbuf)); } } return (0); } /* * Destroys the given dataset. The caller must make sure that the filesystem * isn't mounted, and that there are no active dependents. */ int zfs_destroy(zfs_handle_t *zhp, boolean_t defer) { zfs_cmd_t zc = { 0 }; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); if (ZFS_IS_VOLUME(zhp)) { zc.zc_objset_type = DMU_OST_ZVOL; } else { zc.zc_objset_type = DMU_OST_ZFS; } zc.zc_defer_destroy = defer; if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_DESTROY, &zc) != 0) { return (zfs_standard_error_fmt(zhp->zfs_hdl, errno, dgettext(TEXT_DOMAIN, "cannot destroy '%s'"), zhp->zfs_name)); } remove_mountpoint(zhp); return (0); } struct destroydata { nvlist_t *nvl; const char *snapname; }; static int zfs_check_snap_cb(zfs_handle_t *zhp, void *arg) { struct destroydata *dd = arg; zfs_handle_t *szhp; char name[ZFS_MAXNAMELEN]; int rv = 0; (void) snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name, dd->snapname); szhp = make_dataset_handle(zhp->zfs_hdl, name); if (szhp) { verify(nvlist_add_boolean(dd->nvl, name) == 0); zfs_close(szhp); } rv = zfs_iter_filesystems(zhp, zfs_check_snap_cb, dd); zfs_close(zhp); return (rv); } /* * Destroys all snapshots with the given name in zhp & descendants. */ int zfs_destroy_snaps(zfs_handle_t *zhp, char *snapname, boolean_t defer) { int ret; struct destroydata dd = { 0 }; dd.snapname = snapname; verify(nvlist_alloc(&dd.nvl, NV_UNIQUE_NAME, 0) == 0); (void) zfs_check_snap_cb(zfs_handle_dup(zhp), &dd); if (nvlist_next_nvpair(dd.nvl, NULL) == NULL) { ret = zfs_standard_error_fmt(zhp->zfs_hdl, ENOENT, dgettext(TEXT_DOMAIN, "cannot destroy '%s@%s'"), zhp->zfs_name, snapname); } else { ret = zfs_destroy_snaps_nvl(zhp, dd.nvl, defer); } nvlist_free(dd.nvl); return (ret); } /* * Destroys all the snapshots named in the nvlist. They must be underneath * the zhp (either snapshots of it, or snapshots of its descendants). */ int zfs_destroy_snaps_nvl(zfs_handle_t *zhp, nvlist_t *snaps, boolean_t defer) { int ret; zfs_cmd_t zc = { 0 }; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); if (zcmd_write_src_nvlist(zhp->zfs_hdl, &zc, snaps) != 0) return (-1); zc.zc_defer_destroy = defer; ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_DESTROY_SNAPS_NVL, &zc); if (ret != 0) { char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot destroy snapshots in %s"), zc.zc_name); switch (errno) { case EEXIST: zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, "snapshot is cloned")); return (zfs_error(zhp->zfs_hdl, EZFS_EXISTS, errbuf)); default: return (zfs_standard_error(zhp->zfs_hdl, errno, errbuf)); } } return (0); } /* * Clones the given dataset. The target must be of the same type as the source. */ int zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props) { zfs_cmd_t zc = { 0 }; char parent[ZFS_MAXNAMELEN]; int ret; char errbuf[1024]; libzfs_handle_t *hdl = zhp->zfs_hdl; zfs_type_t type; uint64_t zoned; assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT); (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot create '%s'"), target); /* validate the target/clone name */ if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM, B_TRUE)) return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); /* validate parents exist */ if (check_parents(hdl, target, &zoned, B_FALSE, NULL) != 0) return (-1); (void) parent_name(target, parent, sizeof (parent)); /* do the clone */ if (ZFS_IS_VOLUME(zhp)) { zc.zc_objset_type = DMU_OST_ZVOL; type = ZFS_TYPE_VOLUME; } else { zc.zc_objset_type = DMU_OST_ZFS; type = ZFS_TYPE_FILESYSTEM; } if (props) { if ((props = zfs_valid_proplist(hdl, type, props, zoned, zhp, errbuf)) == NULL) return (-1); if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { nvlist_free(props); return (-1); } nvlist_free(props); } (void) strlcpy(zc.zc_name, target, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, zhp->zfs_name, sizeof (zc.zc_value)); ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_CREATE, &zc); zcmd_free_nvlists(&zc); if (ret != 0) { switch (errno) { case ENOENT: /* * The parent doesn't exist. We should have caught this * above, but there may a race condition that has since * destroyed the parent. * * At this point, we don't know whether it's the source * that doesn't exist anymore, or whether the target * dataset doesn't exist. */ zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, "no such parent '%s'"), parent); return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf)); case EXDEV: zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, "source and target pools differ")); return (zfs_error(zhp->zfs_hdl, EZFS_CROSSTARGET, errbuf)); default: return (zfs_standard_error(zhp->zfs_hdl, errno, errbuf)); } } return (ret); } /* * Promotes the given clone fs to be the clone parent. */ int zfs_promote(zfs_handle_t *zhp) { libzfs_handle_t *hdl = zhp->zfs_hdl; zfs_cmd_t zc = { 0 }; char parent[MAXPATHLEN]; int ret; char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot promote '%s'"), zhp->zfs_name); if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "snapshots can not be promoted")); return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); } (void) strlcpy(parent, zhp->zfs_dmustats.dds_origin, sizeof (parent)); if (parent[0] == '\0') { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "not a cloned filesystem")); return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); } (void) strlcpy(zc.zc_value, zhp->zfs_dmustats.dds_origin, sizeof (zc.zc_value)); (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); ret = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc); if (ret != 0) { int save_errno = errno; switch (save_errno) { case EEXIST: /* There is a conflicting snapshot name. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "conflicting snapshot '%s' from parent '%s'"), zc.zc_string, parent); return (zfs_error(hdl, EZFS_EXISTS, errbuf)); default: return (zfs_standard_error(hdl, save_errno, errbuf)); } } return (ret); } /* * Takes a snapshot of the given dataset. */ int zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive, nvlist_t *props) { const char *delim; char parent[ZFS_MAXNAMELEN]; zfs_handle_t *zhp; zfs_cmd_t zc = { 0 }; int ret; char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot snapshot '%s'"), path); /* validate the target name */ if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT, B_TRUE)) return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); if (props) { if ((props = zfs_valid_proplist(hdl, ZFS_TYPE_SNAPSHOT, props, B_FALSE, NULL, errbuf)) == NULL) return (-1); if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { nvlist_free(props); return (-1); } nvlist_free(props); } /* make sure the parent exists and is of the appropriate type */ delim = strchr(path, '@'); (void) strncpy(parent, path, delim - path); parent[delim - path] = '\0'; if ((zhp = zfs_open(hdl, parent, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) { zcmd_free_nvlists(&zc); return (-1); } (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, delim+1, sizeof (zc.zc_value)); if (ZFS_IS_VOLUME(zhp)) zc.zc_objset_type = DMU_OST_ZVOL; else zc.zc_objset_type = DMU_OST_ZFS; zc.zc_cookie = recursive; ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SNAPSHOT, &zc); zcmd_free_nvlists(&zc); /* * if it was recursive, the one that actually failed will be in * zc.zc_name. */ if (ret != 0) { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot create snapshot '%s@%s'"), zc.zc_name, zc.zc_value); (void) zfs_standard_error(hdl, errno, errbuf); } zfs_close(zhp); return (ret); } /* * Destroy any more recent snapshots. We invoke this callback on any dependents * of the snapshot first. If the 'cb_dependent' member is non-zero, then this * is a dependent and we should just destroy it without checking the transaction * group. */ typedef struct rollback_data { const char *cb_target; /* the snapshot */ uint64_t cb_create; /* creation time reference */ boolean_t cb_error; boolean_t cb_dependent; boolean_t cb_force; } rollback_data_t; static int rollback_destroy(zfs_handle_t *zhp, void *data) { rollback_data_t *cbp = data; if (!cbp->cb_dependent) { if (strcmp(zhp->zfs_name, cbp->cb_target) != 0 && zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT && zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) { char *logstr; cbp->cb_dependent = B_TRUE; cbp->cb_error |= zfs_iter_dependents(zhp, B_FALSE, rollback_destroy, cbp); cbp->cb_dependent = B_FALSE; logstr = zhp->zfs_hdl->libzfs_log_str; zhp->zfs_hdl->libzfs_log_str = NULL; cbp->cb_error |= zfs_destroy(zhp, B_FALSE); zhp->zfs_hdl->libzfs_log_str = logstr; } } else { /* We must destroy this clone; first unmount it */ prop_changelist_t *clp; clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, cbp->cb_force ? MS_FORCE: 0); if (clp == NULL || changelist_prefix(clp) != 0) { cbp->cb_error = B_TRUE; zfs_close(zhp); return (0); } if (zfs_destroy(zhp, B_FALSE) != 0) cbp->cb_error = B_TRUE; else changelist_remove(clp, zhp->zfs_name); (void) changelist_postfix(clp); changelist_free(clp); } zfs_close(zhp); return (0); } /* * Given a dataset, rollback to a specific snapshot, discarding any * data changes since then and making it the active dataset. * * Any snapshots more recent than the target are destroyed, along with * their dependents. */ int zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, boolean_t force) { rollback_data_t cb = { 0 }; int err; zfs_cmd_t zc = { 0 }; boolean_t restore_resv = 0; uint64_t old_volsize, new_volsize; zfs_prop_t resv_prop; assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM || zhp->zfs_type == ZFS_TYPE_VOLUME); /* * Destroy all recent snapshots and its dependends. */ cb.cb_force = force; cb.cb_target = snap->zfs_name; cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG); (void) zfs_iter_children(zhp, rollback_destroy, &cb); if (cb.cb_error) return (-1); /* * Now that we have verified that the snapshot is the latest, * rollback to the given snapshot. */ if (zhp->zfs_type == ZFS_TYPE_VOLUME) { if (zfs_which_resv_prop(zhp, &resv_prop) < 0) return (-1); old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE); restore_resv = (old_volsize == zfs_prop_get_int(zhp, resv_prop)); } (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); if (ZFS_IS_VOLUME(zhp)) zc.zc_objset_type = DMU_OST_ZVOL; else zc.zc_objset_type = DMU_OST_ZFS; /* * We rely on zfs_iter_children() to verify that there are no * newer snapshots for the given dataset. Therefore, we can * simply pass the name on to the ioctl() call. There is still * an unlikely race condition where the user has taken a * snapshot since we verified that this was the most recent. * */ if ((err = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_ROLLBACK, &zc)) != 0) { (void) zfs_standard_error_fmt(zhp->zfs_hdl, errno, dgettext(TEXT_DOMAIN, "cannot rollback '%s'"), zhp->zfs_name); return (err); } /* * For volumes, if the pre-rollback volsize matched the pre- * rollback reservation and the volsize has changed then set * the reservation property to the post-rollback volsize. * Make a new handle since the rollback closed the dataset. */ if ((zhp->zfs_type == ZFS_TYPE_VOLUME) && (zhp = make_dataset_handle(zhp->zfs_hdl, zhp->zfs_name))) { if (restore_resv) { new_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE); if (old_volsize != new_volsize) err = zfs_prop_set_int(zhp, resv_prop, new_volsize); } zfs_close(zhp); } return (err); } /* * Renames the given dataset. */ int zfs_rename(zfs_handle_t *zhp, const char *target, renameflags_t flags) { int ret; zfs_cmd_t zc = { 0 }; char *delim; prop_changelist_t *cl = NULL; zfs_handle_t *zhrp = NULL; char *parentname = NULL; char parent[ZFS_MAXNAMELEN]; char property[ZFS_MAXPROPLEN]; libzfs_handle_t *hdl = zhp->zfs_hdl; char errbuf[1024]; /* if we have the same exact name, just return success */ if (strcmp(zhp->zfs_name, target) == 0) return (0); (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot rename to '%s'"), target); /* * Make sure the target name is valid */ if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) { if ((strchr(target, '@') == NULL) || *target == '@') { /* * Snapshot target name is abbreviated, * reconstruct full dataset name */ (void) strlcpy(parent, zhp->zfs_name, sizeof (parent)); delim = strchr(parent, '@'); if (strchr(target, '@') == NULL) *(++delim) = '\0'; else *delim = '\0'; (void) strlcat(parent, target, sizeof (parent)); target = parent; } else { /* * Make sure we're renaming within the same dataset. */ delim = strchr(target, '@'); if (strncmp(zhp->zfs_name, target, delim - target) != 0 || zhp->zfs_name[delim - target] != '@') { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "snapshots must be part of same " "dataset")); return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf)); } } if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE)) return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); } else { if (flags.recurse) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "recursive rename must be a snapshot")); return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); } if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE)) return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); /* validate parents */ if (check_parents(hdl, target, NULL, B_FALSE, NULL) != 0) return (-1); /* make sure we're in the same pool */ verify((delim = strchr(target, '/')) != NULL); if (strncmp(zhp->zfs_name, target, delim - target) != 0 || zhp->zfs_name[delim - target] != '/') { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "datasets must be within same pool")); return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf)); } /* new name cannot be a child of the current dataset name */ if (is_descendant(zhp->zfs_name, target)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "New dataset name cannot be a descendant of " "current dataset name")); return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); } } (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zhp->zfs_name); if (getzoneid() == GLOBAL_ZONEID && zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "dataset is used in a non-global zone")); return (zfs_error(hdl, EZFS_ZONED, errbuf)); } /* * Avoid unmounting file systems with mountpoint property set to * 'legacy' or 'none' even if -u option is not given. */ if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM && !flags.recurse && !flags.nounmount && zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, property, sizeof (property), NULL, NULL, 0, B_FALSE) == 0 && (strcmp(property, "legacy") == 0 || strcmp(property, "none") == 0)) { flags.nounmount = B_TRUE; } if (flags.recurse) { parentname = zfs_strdup(zhp->zfs_hdl, zhp->zfs_name); if (parentname == NULL) { ret = -1; goto error; } delim = strchr(parentname, '@'); *delim = '\0'; zhrp = zfs_open(zhp->zfs_hdl, parentname, ZFS_TYPE_DATASET); if (zhrp == NULL) { ret = -1; goto error; } } else { if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, - flags.nounmount ? CL_GATHER_DONT_UNMOUNT : 0, 0)) == NULL) { + flags.nounmount ? CL_GATHER_DONT_UNMOUNT : 0, + flags.forceunmount ? MS_FORCE : 0)) == NULL) { return (-1); } if (changelist_haszonedchild(cl)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "child dataset with inherited mountpoint is used " "in a non-global zone")); (void) zfs_error(hdl, EZFS_ZONED, errbuf); goto error; } if ((ret = changelist_prefix(cl)) != 0) goto error; } if (ZFS_IS_VOLUME(zhp)) zc.zc_objset_type = DMU_OST_ZVOL; else zc.zc_objset_type = DMU_OST_ZFS; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value)); zc.zc_cookie = flags.recurse ? 1 : 0; if (flags.nounmount) zc.zc_cookie |= 2; if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_RENAME, &zc)) != 0) { /* * if it was recursive, the one that actually failed will * be in zc.zc_name */ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zc.zc_name); if (flags.recurse && errno == EEXIST) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "a child dataset already has a snapshot " "with the new name")); (void) zfs_error(hdl, EZFS_EXISTS, errbuf); } else { (void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf); } /* * On failure, we still want to remount any filesystems that * were previously mounted, so we don't alter the system state. */ if (!flags.recurse) (void) changelist_postfix(cl); } else { if (!flags.recurse) { changelist_rename(cl, zfs_get_name(zhp), target); ret = changelist_postfix(cl); } } error: if (parentname) { free(parentname); } if (zhrp) { zfs_close(zhrp); } if (cl) { changelist_free(cl); } return (ret); } nvlist_t * zfs_get_user_props(zfs_handle_t *zhp) { return (zhp->zfs_user_props); } nvlist_t * zfs_get_recvd_props(zfs_handle_t *zhp) { if (zhp->zfs_recvd_props == NULL) if (get_recvd_props_ioctl(zhp) != 0) return (NULL); return (zhp->zfs_recvd_props); } /* * This function is used by 'zfs list' to determine the exact set of columns to * display, and their maximum widths. This does two main things: * * - If this is a list of all properties, then expand the list to include * all native properties, and set a flag so that for each dataset we look * for new unique user properties and add them to the list. * * - For non fixed-width properties, keep track of the maximum width seen * so that we can size the column appropriately. If the user has * requested received property values, we also need to compute the width * of the RECEIVED column. */ int zfs_expand_proplist(zfs_handle_t *zhp, zprop_list_t **plp, boolean_t received) { libzfs_handle_t *hdl = zhp->zfs_hdl; zprop_list_t *entry; zprop_list_t **last, **start; nvlist_t *userprops, *propval; nvpair_t *elem; char *strval; char buf[ZFS_MAXPROPLEN]; if (zprop_expand_list(hdl, plp, ZFS_TYPE_DATASET) != 0) return (-1); userprops = zfs_get_user_props(zhp); entry = *plp; if (entry->pl_all && nvlist_next_nvpair(userprops, NULL) != NULL) { /* * Go through and add any user properties as necessary. We * start by incrementing our list pointer to the first * non-native property. */ start = plp; while (*start != NULL) { if ((*start)->pl_prop == ZPROP_INVAL) break; start = &(*start)->pl_next; } elem = NULL; while ((elem = nvlist_next_nvpair(userprops, elem)) != NULL) { /* * See if we've already found this property in our list. */ for (last = start; *last != NULL; last = &(*last)->pl_next) { if (strcmp((*last)->pl_user_prop, nvpair_name(elem)) == 0) break; } if (*last == NULL) { if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL || ((entry->pl_user_prop = zfs_strdup(hdl, nvpair_name(elem)))) == NULL) { free(entry); return (-1); } entry->pl_prop = ZPROP_INVAL; entry->pl_width = strlen(nvpair_name(elem)); entry->pl_all = B_TRUE; *last = entry; } } } /* * Now go through and check the width of any non-fixed columns */ for (entry = *plp; entry != NULL; entry = entry->pl_next) { if (entry->pl_fixed) continue; if (entry->pl_prop != ZPROP_INVAL) { if (zfs_prop_get(zhp, entry->pl_prop, buf, sizeof (buf), NULL, NULL, 0, B_FALSE) == 0) { if (strlen(buf) > entry->pl_width) entry->pl_width = strlen(buf); } if (received && zfs_prop_get_recvd(zhp, zfs_prop_to_name(entry->pl_prop), buf, sizeof (buf), B_FALSE) == 0) if (strlen(buf) > entry->pl_recvd_width) entry->pl_recvd_width = strlen(buf); } else { if (nvlist_lookup_nvlist(userprops, entry->pl_user_prop, &propval) == 0) { verify(nvlist_lookup_string(propval, ZPROP_VALUE, &strval) == 0); if (strlen(strval) > entry->pl_width) entry->pl_width = strlen(strval); } if (received && zfs_prop_get_recvd(zhp, entry->pl_user_prop, buf, sizeof (buf), B_FALSE) == 0) if (strlen(buf) > entry->pl_recvd_width) entry->pl_recvd_width = strlen(buf); } } return (0); } int zfs_deleg_share_nfs(libzfs_handle_t *hdl, char *dataset, char *path, char *resource, void *export, void *sharetab, int sharemax, zfs_share_op_t operation) { zfs_cmd_t zc = { 0 }; int error; (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value)); if (resource) (void) strlcpy(zc.zc_string, resource, sizeof (zc.zc_string)); zc.zc_share.z_sharedata = (uint64_t)(uintptr_t)sharetab; zc.zc_share.z_exportdata = (uint64_t)(uintptr_t)export; zc.zc_share.z_sharetype = operation; zc.zc_share.z_sharemax = sharemax; error = ioctl(hdl->libzfs_fd, ZFS_IOC_SHARE, &zc); return (error); } void zfs_prune_proplist(zfs_handle_t *zhp, uint8_t *props) { nvpair_t *curr; /* * Keep a reference to the props-table against which we prune the * properties. */ zhp->zfs_props_table = props; curr = nvlist_next_nvpair(zhp->zfs_props, NULL); while (curr) { zfs_prop_t zfs_prop = zfs_name_to_prop(nvpair_name(curr)); nvpair_t *next = nvlist_next_nvpair(zhp->zfs_props, curr); /* * User properties will result in ZPROP_INVAL, and since we * only know how to prune standard ZFS properties, we always * leave these in the list. This can also happen if we * encounter an unknown DSL property (when running older * software, for example). */ if (zfs_prop != ZPROP_INVAL && props[zfs_prop] == B_FALSE) (void) nvlist_remove(zhp->zfs_props, nvpair_name(curr), nvpair_type(curr)); curr = next; } } #ifdef sun static int zfs_smb_acl_mgmt(libzfs_handle_t *hdl, char *dataset, char *path, zfs_smb_acl_op_t cmd, char *resource1, char *resource2) { zfs_cmd_t zc = { 0 }; nvlist_t *nvlist = NULL; int error; (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value)); zc.zc_cookie = (uint64_t)cmd; if (cmd == ZFS_SMB_ACL_RENAME) { if (nvlist_alloc(&nvlist, NV_UNIQUE_NAME, 0) != 0) { (void) no_memory(hdl); return (NULL); } } switch (cmd) { case ZFS_SMB_ACL_ADD: case ZFS_SMB_ACL_REMOVE: (void) strlcpy(zc.zc_string, resource1, sizeof (zc.zc_string)); break; case ZFS_SMB_ACL_RENAME: if (nvlist_add_string(nvlist, ZFS_SMB_ACL_SRC, resource1) != 0) { (void) no_memory(hdl); return (-1); } if (nvlist_add_string(nvlist, ZFS_SMB_ACL_TARGET, resource2) != 0) { (void) no_memory(hdl); return (-1); } if (zcmd_write_src_nvlist(hdl, &zc, nvlist) != 0) { nvlist_free(nvlist); return (-1); } break; case ZFS_SMB_ACL_PURGE: break; default: return (-1); } error = ioctl(hdl->libzfs_fd, ZFS_IOC_SMB_ACL, &zc); if (nvlist) nvlist_free(nvlist); return (error); } int zfs_smb_acl_add(libzfs_handle_t *hdl, char *dataset, char *path, char *resource) { return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_ADD, resource, NULL)); } int zfs_smb_acl_remove(libzfs_handle_t *hdl, char *dataset, char *path, char *resource) { return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_REMOVE, resource, NULL)); } int zfs_smb_acl_purge(libzfs_handle_t *hdl, char *dataset, char *path) { return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_PURGE, NULL, NULL)); } int zfs_smb_acl_rename(libzfs_handle_t *hdl, char *dataset, char *path, char *oldname, char *newname) { return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_RENAME, oldname, newname)); } #endif /* sun */ int zfs_userspace(zfs_handle_t *zhp, zfs_userquota_prop_t type, zfs_userspace_cb_t func, void *arg) { zfs_cmd_t zc = { 0 }; int error; zfs_useracct_t buf[100]; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); zc.zc_objset_type = type; zc.zc_nvlist_dst = (uintptr_t)buf; /* CONSTCOND */ while (1) { zfs_useracct_t *zua = buf; zc.zc_nvlist_dst_size = sizeof (buf); error = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_USERSPACE_MANY, &zc); if (error || zc.zc_nvlist_dst_size == 0) break; while (zc.zc_nvlist_dst_size > 0) { error = func(arg, zua->zu_domain, zua->zu_rid, zua->zu_space); if (error != 0) return (error); zua++; zc.zc_nvlist_dst_size -= sizeof (zfs_useracct_t); } } return (error); } int zfs_hold(zfs_handle_t *zhp, const char *snapname, const char *tag, boolean_t recursive, boolean_t temphold, boolean_t enoent_ok, int cleanup_fd, uint64_t dsobj, uint64_t createtxg) { zfs_cmd_t zc = { 0 }; libzfs_handle_t *hdl = zhp->zfs_hdl; ASSERT(!recursive || dsobj == 0); (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value)); if (strlcpy(zc.zc_string, tag, sizeof (zc.zc_string)) >= sizeof (zc.zc_string)) return (zfs_error(hdl, EZFS_TAGTOOLONG, tag)); zc.zc_cookie = recursive; zc.zc_temphold = temphold; zc.zc_cleanup_fd = cleanup_fd; zc.zc_sendobj = dsobj; zc.zc_createtxg = createtxg; if (zfs_ioctl(hdl, ZFS_IOC_HOLD, &zc) != 0) { char errbuf[ZFS_MAXNAMELEN+32]; /* * if it was recursive, the one that actually failed will be in * zc.zc_name. */ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot hold '%s@%s'"), zc.zc_name, snapname); switch (errno) { case E2BIG: /* * Temporary tags wind up having the ds object id * prepended. So even if we passed the length check * above, it's still possible for the tag to wind * up being slightly too long. */ return (zfs_error(hdl, EZFS_TAGTOOLONG, errbuf)); case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded")); return (zfs_error(hdl, EZFS_BADVERSION, errbuf)); case EINVAL: return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); case EEXIST: return (zfs_error(hdl, EZFS_REFTAG_HOLD, errbuf)); case ENOENT: if (enoent_ok) return (ENOENT); /* FALLTHROUGH */ default: return (zfs_standard_error_fmt(hdl, errno, errbuf)); } } return (0); } int zfs_release(zfs_handle_t *zhp, const char *snapname, const char *tag, boolean_t recursive) { zfs_cmd_t zc = { 0 }; libzfs_handle_t *hdl = zhp->zfs_hdl; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value)); if (strlcpy(zc.zc_string, tag, sizeof (zc.zc_string)) >= sizeof (zc.zc_string)) return (zfs_error(hdl, EZFS_TAGTOOLONG, tag)); zc.zc_cookie = recursive; if (zfs_ioctl(hdl, ZFS_IOC_RELEASE, &zc) != 0) { char errbuf[ZFS_MAXNAMELEN+32]; /* * if it was recursive, the one that actually failed will be in * zc.zc_name. */ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot release '%s' from '%s@%s'"), tag, zc.zc_name, snapname); switch (errno) { case ESRCH: return (zfs_error(hdl, EZFS_REFTAG_RELE, errbuf)); case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded")); return (zfs_error(hdl, EZFS_BADVERSION, errbuf)); case EINVAL: return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); default: return (zfs_standard_error_fmt(hdl, errno, errbuf)); } } return (0); } int zfs_get_fsacl(zfs_handle_t *zhp, nvlist_t **nvl) { zfs_cmd_t zc = { 0 }; libzfs_handle_t *hdl = zhp->zfs_hdl; int nvsz = 2048; void *nvbuf; int err = 0; char errbuf[ZFS_MAXNAMELEN+32]; assert(zhp->zfs_type == ZFS_TYPE_VOLUME || zhp->zfs_type == ZFS_TYPE_FILESYSTEM); tryagain: nvbuf = malloc(nvsz); if (nvbuf == NULL) { err = (zfs_error(hdl, EZFS_NOMEM, strerror(errno))); goto out; } zc.zc_nvlist_dst_size = nvsz; zc.zc_nvlist_dst = (uintptr_t)nvbuf; (void) strlcpy(zc.zc_name, zhp->zfs_name, ZFS_MAXNAMELEN); if (ioctl(hdl->libzfs_fd, ZFS_IOC_GET_FSACL, &zc) != 0) { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot get permissions on '%s'"), zc.zc_name); switch (errno) { case ENOMEM: free(nvbuf); nvsz = zc.zc_nvlist_dst_size; goto tryagain; case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded")); err = zfs_error(hdl, EZFS_BADVERSION, errbuf); break; case EINVAL: err = zfs_error(hdl, EZFS_BADTYPE, errbuf); break; case ENOENT: err = zfs_error(hdl, EZFS_NOENT, errbuf); break; default: err = zfs_standard_error_fmt(hdl, errno, errbuf); break; } } else { /* success */ int rc = nvlist_unpack(nvbuf, zc.zc_nvlist_dst_size, nvl, 0); if (rc) { (void) snprintf(errbuf, sizeof (errbuf), dgettext( TEXT_DOMAIN, "cannot get permissions on '%s'"), zc.zc_name); err = zfs_standard_error_fmt(hdl, rc, errbuf); } } free(nvbuf); out: return (err); } int zfs_set_fsacl(zfs_handle_t *zhp, boolean_t un, nvlist_t *nvl) { zfs_cmd_t zc = { 0 }; libzfs_handle_t *hdl = zhp->zfs_hdl; char *nvbuf; char errbuf[ZFS_MAXNAMELEN+32]; size_t nvsz; int err; assert(zhp->zfs_type == ZFS_TYPE_VOLUME || zhp->zfs_type == ZFS_TYPE_FILESYSTEM); err = nvlist_size(nvl, &nvsz, NV_ENCODE_NATIVE); assert(err == 0); nvbuf = malloc(nvsz); err = nvlist_pack(nvl, &nvbuf, &nvsz, NV_ENCODE_NATIVE, 0); assert(err == 0); zc.zc_nvlist_src_size = nvsz; zc.zc_nvlist_src = (uintptr_t)nvbuf; zc.zc_perm_action = un; (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); if (zfs_ioctl(hdl, ZFS_IOC_SET_FSACL, &zc) != 0) { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot set permissions on '%s'"), zc.zc_name); switch (errno) { case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded")); err = zfs_error(hdl, EZFS_BADVERSION, errbuf); break; case EINVAL: err = zfs_error(hdl, EZFS_BADTYPE, errbuf); break; case ENOENT: err = zfs_error(hdl, EZFS_NOENT, errbuf); break; default: err = zfs_standard_error_fmt(hdl, errno, errbuf); break; } } free(nvbuf); return (err); } int zfs_get_holds(zfs_handle_t *zhp, nvlist_t **nvl) { zfs_cmd_t zc = { 0 }; libzfs_handle_t *hdl = zhp->zfs_hdl; int nvsz = 2048; void *nvbuf; int err = 0; char errbuf[ZFS_MAXNAMELEN+32]; assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT); tryagain: nvbuf = malloc(nvsz); if (nvbuf == NULL) { err = (zfs_error(hdl, EZFS_NOMEM, strerror(errno))); goto out; } zc.zc_nvlist_dst_size = nvsz; zc.zc_nvlist_dst = (uintptr_t)nvbuf; (void) strlcpy(zc.zc_name, zhp->zfs_name, ZFS_MAXNAMELEN); if (zfs_ioctl(hdl, ZFS_IOC_GET_HOLDS, &zc) != 0) { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot get holds for '%s'"), zc.zc_name); switch (errno) { case ENOMEM: free(nvbuf); nvsz = zc.zc_nvlist_dst_size; goto tryagain; case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded")); err = zfs_error(hdl, EZFS_BADVERSION, errbuf); break; case EINVAL: err = zfs_error(hdl, EZFS_BADTYPE, errbuf); break; case ENOENT: err = zfs_error(hdl, EZFS_NOENT, errbuf); break; default: err = zfs_standard_error_fmt(hdl, errno, errbuf); break; } } else { /* success */ int rc = nvlist_unpack(nvbuf, zc.zc_nvlist_dst_size, nvl, 0); if (rc) { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot get holds for '%s'"), zc.zc_name); err = zfs_standard_error_fmt(hdl, rc, errbuf); } } free(nvbuf); out: return (err); } uint64_t zvol_volsize_to_reservation(uint64_t volsize, nvlist_t *props) { uint64_t numdb; uint64_t nblocks, volblocksize; int ncopies; char *strval; if (nvlist_lookup_string(props, zfs_prop_to_name(ZFS_PROP_COPIES), &strval) == 0) ncopies = atoi(strval); else ncopies = 1; if (nvlist_lookup_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) volblocksize = ZVOL_DEFAULT_BLOCKSIZE; nblocks = volsize/volblocksize; /* start with metadnode L0-L6 */ numdb = 7; /* calculate number of indirects */ while (nblocks > 1) { nblocks += DNODES_PER_LEVEL - 1; nblocks /= DNODES_PER_LEVEL; numdb += nblocks; } numdb *= MIN(SPA_DVAS_PER_BP, ncopies + 1); volsize *= ncopies; /* * this is exactly DN_MAX_INDBLKSHIFT when metadata isn't * compressed, but in practice they compress down to about * 1100 bytes */ numdb *= 1ULL << DN_MAX_INDBLKSHIFT; volsize += numdb; return (volsize); } /* * Attach/detach the given filesystem to/from the given jail. */ int zfs_jail(zfs_handle_t *zhp, int jailid, int attach) { libzfs_handle_t *hdl = zhp->zfs_hdl; zfs_cmd_t zc = { 0 }; char errbuf[1024]; unsigned long cmd; int ret; if (attach) { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot jail '%s'"), zhp->zfs_name); } else { (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot jail '%s'"), zhp->zfs_name); } switch (zhp->zfs_type) { case ZFS_TYPE_VOLUME: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "volumes can not be jailed")); return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); case ZFS_TYPE_SNAPSHOT: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "snapshots can not be jailed")); return (zfs_error(hdl, EZFS_BADTYPE, errbuf)); } assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM); (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); zc.zc_objset_type = DMU_OST_ZFS; zc.zc_jailid = jailid; cmd = attach ? ZFS_IOC_JAIL : ZFS_IOC_UNJAIL; if ((ret = ioctl(hdl->libzfs_fd, cmd, &zc)) != 0) zfs_standard_error(hdl, errno, errbuf); return (ret); } Index: projects/nand/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_sendrecv.c =================================================================== --- projects/nand/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_sendrecv.c (revision 235262) +++ projects/nand/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_sendrecv.c (revision 235263) @@ -1,3120 +1,3200 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved. + * Copyright (c) 2012, Joyent, Inc. All rights reserved. * Copyright (c) 2012 Pawel Jakub Dawidek . * All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include "zfs_namecheck.h" #include "zfs_prop.h" #include "zfs_fletcher.h" #include "libzfs_impl.h" #include #include #include /* in libzfs_dataset.c */ extern void zfs_setprop_error(libzfs_handle_t *, zfs_prop_t, int, char *); /* We need to use something for ENODATA. */ #define ENODATA EIDRM static int zfs_receive_impl(libzfs_handle_t *, const char *, recvflags_t *, int, const char *, nvlist_t *, avl_tree_t *, char **, int, uint64_t *); static const zio_cksum_t zero_cksum = { 0 }; typedef struct dedup_arg { int inputfd; int outputfd; libzfs_handle_t *dedup_hdl; } dedup_arg_t; +typedef struct progress_arg { + zfs_handle_t *pa_zhp; + int pa_fd; + boolean_t pa_parsable; +} progress_arg_t; + typedef struct dataref { uint64_t ref_guid; uint64_t ref_object; uint64_t ref_offset; } dataref_t; typedef struct dedup_entry { struct dedup_entry *dde_next; zio_cksum_t dde_chksum; uint64_t dde_prop; dataref_t dde_ref; } dedup_entry_t; #define MAX_DDT_PHYSMEM_PERCENT 20 #define SMALLEST_POSSIBLE_MAX_DDT_MB 128 typedef struct dedup_table { dedup_entry_t **dedup_hash_array; umem_cache_t *ddecache; uint64_t max_ddt_size; /* max dedup table size in bytes */ uint64_t cur_ddt_size; /* current dedup table size in bytes */ uint64_t ddt_count; int numhashbits; boolean_t ddt_full; } dedup_table_t; static int high_order_bit(uint64_t n) { int count; for (count = 0; n != 0; count++) n >>= 1; return (count); } static size_t ssread(void *buf, size_t len, FILE *stream) { size_t outlen; if ((outlen = fread(buf, len, 1, stream)) == 0) return (0); return (outlen); } static void ddt_hash_append(libzfs_handle_t *hdl, dedup_table_t *ddt, dedup_entry_t **ddepp, zio_cksum_t *cs, uint64_t prop, dataref_t *dr) { dedup_entry_t *dde; if (ddt->cur_ddt_size >= ddt->max_ddt_size) { if (ddt->ddt_full == B_FALSE) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Dedup table full. Deduplication will continue " "with existing table entries")); ddt->ddt_full = B_TRUE; } return; } if ((dde = umem_cache_alloc(ddt->ddecache, UMEM_DEFAULT)) != NULL) { assert(*ddepp == NULL); dde->dde_next = NULL; dde->dde_chksum = *cs; dde->dde_prop = prop; dde->dde_ref = *dr; *ddepp = dde; ddt->cur_ddt_size += sizeof (dedup_entry_t); ddt->ddt_count++; } } /* * Using the specified dedup table, do a lookup for an entry with * the checksum cs. If found, return the block's reference info * in *dr. Otherwise, insert a new entry in the dedup table, using * the reference information specified by *dr. * * return value: true - entry was found * false - entry was not found */ static boolean_t ddt_update(libzfs_handle_t *hdl, dedup_table_t *ddt, zio_cksum_t *cs, uint64_t prop, dataref_t *dr) { uint32_t hashcode; dedup_entry_t **ddepp; hashcode = BF64_GET(cs->zc_word[0], 0, ddt->numhashbits); for (ddepp = &(ddt->dedup_hash_array[hashcode]); *ddepp != NULL; ddepp = &((*ddepp)->dde_next)) { if (ZIO_CHECKSUM_EQUAL(((*ddepp)->dde_chksum), *cs) && (*ddepp)->dde_prop == prop) { *dr = (*ddepp)->dde_ref; return (B_TRUE); } } ddt_hash_append(hdl, ddt, ddepp, cs, prop, dr); return (B_FALSE); } static int cksum_and_write(const void *buf, uint64_t len, zio_cksum_t *zc, int outfd) { fletcher_4_incremental_native(buf, len, zc); return (write(outfd, buf, len)); } /* * This function is started in a separate thread when the dedup option * has been requested. The main send thread determines the list of * snapshots to be included in the send stream and makes the ioctl calls * for each one. But instead of having the ioctl send the output to the * the output fd specified by the caller of zfs_send()), the * ioctl is told to direct the output to a pipe, which is read by the * alternate thread running THIS function. This function does the * dedup'ing by: * 1. building a dedup table (the DDT) * 2. doing checksums on each data block and inserting a record in the DDT * 3. looking for matching checksums, and * 4. sending a DRR_WRITE_BYREF record instead of a write record whenever * a duplicate block is found. * The output of this function then goes to the output fd requested * by the caller of zfs_send(). */ static void * cksummer(void *arg) { dedup_arg_t *dda = arg; char *buf = malloc(1<<20); dmu_replay_record_t thedrr; dmu_replay_record_t *drr = &thedrr; struct drr_begin *drrb = &thedrr.drr_u.drr_begin; struct drr_end *drre = &thedrr.drr_u.drr_end; struct drr_object *drro = &thedrr.drr_u.drr_object; struct drr_write *drrw = &thedrr.drr_u.drr_write; struct drr_spill *drrs = &thedrr.drr_u.drr_spill; FILE *ofp; int outfd; dmu_replay_record_t wbr_drr = {0}; struct drr_write_byref *wbr_drrr = &wbr_drr.drr_u.drr_write_byref; dedup_table_t ddt; zio_cksum_t stream_cksum; uint64_t physmem = sysconf(_SC_PHYS_PAGES) * sysconf(_SC_PAGESIZE); uint64_t numbuckets; ddt.max_ddt_size = MAX((physmem * MAX_DDT_PHYSMEM_PERCENT)/100, SMALLEST_POSSIBLE_MAX_DDT_MB<<20); numbuckets = ddt.max_ddt_size/(sizeof (dedup_entry_t)); /* * numbuckets must be a power of 2. Increase number to * a power of 2 if necessary. */ if (!ISP2(numbuckets)) numbuckets = 1 << high_order_bit(numbuckets); ddt.dedup_hash_array = calloc(numbuckets, sizeof (dedup_entry_t *)); ddt.ddecache = umem_cache_create("dde", sizeof (dedup_entry_t), 0, NULL, NULL, NULL, NULL, NULL, 0); ddt.cur_ddt_size = numbuckets * sizeof (dedup_entry_t *); ddt.numhashbits = high_order_bit(numbuckets) - 1; ddt.ddt_full = B_FALSE; /* Initialize the write-by-reference block. */ wbr_drr.drr_type = DRR_WRITE_BYREF; wbr_drr.drr_payloadlen = 0; outfd = dda->outputfd; ofp = fdopen(dda->inputfd, "r"); while (ssread(drr, sizeof (dmu_replay_record_t), ofp) != 0) { switch (drr->drr_type) { case DRR_BEGIN: { int fflags; ZIO_SET_CHECKSUM(&stream_cksum, 0, 0, 0, 0); /* set the DEDUP feature flag for this stream */ fflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); fflags |= (DMU_BACKUP_FEATURE_DEDUP | DMU_BACKUP_FEATURE_DEDUPPROPS); DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, fflags); if (cksum_and_write(drr, sizeof (dmu_replay_record_t), &stream_cksum, outfd) == -1) goto out; if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_COMPOUNDSTREAM && drr->drr_payloadlen != 0) { int sz = drr->drr_payloadlen; if (sz > 1<<20) { free(buf); buf = malloc(sz); } (void) ssread(buf, sz, ofp); if (ferror(stdin)) perror("fread"); if (cksum_and_write(buf, sz, &stream_cksum, outfd) == -1) goto out; } break; } case DRR_END: { /* use the recalculated checksum */ ZIO_SET_CHECKSUM(&drre->drr_checksum, stream_cksum.zc_word[0], stream_cksum.zc_word[1], stream_cksum.zc_word[2], stream_cksum.zc_word[3]); if ((write(outfd, drr, sizeof (dmu_replay_record_t))) == -1) goto out; break; } case DRR_OBJECT: { if (cksum_and_write(drr, sizeof (dmu_replay_record_t), &stream_cksum, outfd) == -1) goto out; if (drro->drr_bonuslen > 0) { (void) ssread(buf, P2ROUNDUP((uint64_t)drro->drr_bonuslen, 8), ofp); if (cksum_and_write(buf, P2ROUNDUP((uint64_t)drro->drr_bonuslen, 8), &stream_cksum, outfd) == -1) goto out; } break; } case DRR_SPILL: { if (cksum_and_write(drr, sizeof (dmu_replay_record_t), &stream_cksum, outfd) == -1) goto out; (void) ssread(buf, drrs->drr_length, ofp); if (cksum_and_write(buf, drrs->drr_length, &stream_cksum, outfd) == -1) goto out; break; } case DRR_FREEOBJECTS: { if (cksum_and_write(drr, sizeof (dmu_replay_record_t), &stream_cksum, outfd) == -1) goto out; break; } case DRR_WRITE: { dataref_t dataref; (void) ssread(buf, drrw->drr_length, ofp); /* * Use the existing checksum if it's dedup-capable, * else calculate a SHA256 checksum for it. */ if (ZIO_CHECKSUM_EQUAL(drrw->drr_key.ddk_cksum, zero_cksum) || !DRR_IS_DEDUP_CAPABLE(drrw->drr_checksumflags)) { SHA256_CTX ctx; zio_cksum_t tmpsha256; SHA256Init(&ctx); SHA256Update(&ctx, buf, drrw->drr_length); SHA256Final(&tmpsha256, &ctx); drrw->drr_key.ddk_cksum.zc_word[0] = BE_64(tmpsha256.zc_word[0]); drrw->drr_key.ddk_cksum.zc_word[1] = BE_64(tmpsha256.zc_word[1]); drrw->drr_key.ddk_cksum.zc_word[2] = BE_64(tmpsha256.zc_word[2]); drrw->drr_key.ddk_cksum.zc_word[3] = BE_64(tmpsha256.zc_word[3]); drrw->drr_checksumtype = ZIO_CHECKSUM_SHA256; drrw->drr_checksumflags = DRR_CHECKSUM_DEDUP; } dataref.ref_guid = drrw->drr_toguid; dataref.ref_object = drrw->drr_object; dataref.ref_offset = drrw->drr_offset; if (ddt_update(dda->dedup_hdl, &ddt, &drrw->drr_key.ddk_cksum, drrw->drr_key.ddk_prop, &dataref)) { /* block already present in stream */ wbr_drrr->drr_object = drrw->drr_object; wbr_drrr->drr_offset = drrw->drr_offset; wbr_drrr->drr_length = drrw->drr_length; wbr_drrr->drr_toguid = drrw->drr_toguid; wbr_drrr->drr_refguid = dataref.ref_guid; wbr_drrr->drr_refobject = dataref.ref_object; wbr_drrr->drr_refoffset = dataref.ref_offset; wbr_drrr->drr_checksumtype = drrw->drr_checksumtype; wbr_drrr->drr_checksumflags = drrw->drr_checksumtype; wbr_drrr->drr_key.ddk_cksum = drrw->drr_key.ddk_cksum; wbr_drrr->drr_key.ddk_prop = drrw->drr_key.ddk_prop; if (cksum_and_write(&wbr_drr, sizeof (dmu_replay_record_t), &stream_cksum, outfd) == -1) goto out; } else { /* block not previously seen */ if (cksum_and_write(drr, sizeof (dmu_replay_record_t), &stream_cksum, outfd) == -1) goto out; if (cksum_and_write(buf, drrw->drr_length, &stream_cksum, outfd) == -1) goto out; } break; } case DRR_FREE: { if (cksum_and_write(drr, sizeof (dmu_replay_record_t), &stream_cksum, outfd) == -1) goto out; break; } default: (void) printf("INVALID record type 0x%x\n", drr->drr_type); /* should never happen, so assert */ assert(B_FALSE); } } out: umem_cache_destroy(ddt.ddecache); free(ddt.dedup_hash_array); free(buf); (void) fclose(ofp); return (NULL); } /* * Routines for dealing with the AVL tree of fs-nvlists */ typedef struct fsavl_node { avl_node_t fn_node; nvlist_t *fn_nvfs; char *fn_snapname; uint64_t fn_guid; } fsavl_node_t; static int fsavl_compare(const void *arg1, const void *arg2) { const fsavl_node_t *fn1 = arg1; const fsavl_node_t *fn2 = arg2; if (fn1->fn_guid > fn2->fn_guid) return (+1); else if (fn1->fn_guid < fn2->fn_guid) return (-1); else return (0); } /* * Given the GUID of a snapshot, find its containing filesystem and * (optionally) name. */ static nvlist_t * fsavl_find(avl_tree_t *avl, uint64_t snapguid, char **snapname) { fsavl_node_t fn_find; fsavl_node_t *fn; fn_find.fn_guid = snapguid; fn = avl_find(avl, &fn_find, NULL); if (fn) { if (snapname) *snapname = fn->fn_snapname; return (fn->fn_nvfs); } return (NULL); } static void fsavl_destroy(avl_tree_t *avl) { fsavl_node_t *fn; void *cookie; if (avl == NULL) return; cookie = NULL; while ((fn = avl_destroy_nodes(avl, &cookie)) != NULL) free(fn); avl_destroy(avl); free(avl); } /* * Given an nvlist, produce an avl tree of snapshots, ordered by guid */ static avl_tree_t * fsavl_create(nvlist_t *fss) { avl_tree_t *fsavl; nvpair_t *fselem = NULL; if ((fsavl = malloc(sizeof (avl_tree_t))) == NULL) return (NULL); avl_create(fsavl, fsavl_compare, sizeof (fsavl_node_t), offsetof(fsavl_node_t, fn_node)); while ((fselem = nvlist_next_nvpair(fss, fselem)) != NULL) { nvlist_t *nvfs, *snaps; nvpair_t *snapelem = NULL; VERIFY(0 == nvpair_value_nvlist(fselem, &nvfs)); VERIFY(0 == nvlist_lookup_nvlist(nvfs, "snaps", &snaps)); while ((snapelem = nvlist_next_nvpair(snaps, snapelem)) != NULL) { fsavl_node_t *fn; uint64_t guid; VERIFY(0 == nvpair_value_uint64(snapelem, &guid)); if ((fn = malloc(sizeof (fsavl_node_t))) == NULL) { fsavl_destroy(fsavl); return (NULL); } fn->fn_nvfs = nvfs; fn->fn_snapname = nvpair_name(snapelem); fn->fn_guid = guid; /* * Note: if there are multiple snaps with the * same GUID, we ignore all but one. */ if (avl_find(fsavl, fn, NULL) == NULL) avl_add(fsavl, fn); else free(fn); } } return (fsavl); } /* * Routines for dealing with the giant nvlist of fs-nvlists, etc. */ typedef struct send_data { uint64_t parent_fromsnap_guid; nvlist_t *parent_snaps; nvlist_t *fss; nvlist_t *snapprops; const char *fromsnap; const char *tosnap; boolean_t recursive; /* * The header nvlist is of the following format: * { * "tosnap" -> string * "fromsnap" -> string (if incremental) * "fss" -> { * id -> { * * "name" -> string (full name; for debugging) * "parentfromsnap" -> number (guid of fromsnap in parent) * * "props" -> { name -> value (only if set here) } * "snaps" -> { name (lastname) -> number (guid) } * "snapprops" -> { name (lastname) -> { name -> value } } * * "origin" -> number (guid) (if clone) * "sent" -> boolean (not on-disk) * } * } * } * */ } send_data_t; static void send_iterate_prop(zfs_handle_t *zhp, nvlist_t *nv); static int send_iterate_snap(zfs_handle_t *zhp, void *arg) { send_data_t *sd = arg; uint64_t guid = zhp->zfs_dmustats.dds_guid; char *snapname; nvlist_t *nv; snapname = strrchr(zhp->zfs_name, '@')+1; VERIFY(0 == nvlist_add_uint64(sd->parent_snaps, snapname, guid)); /* * NB: if there is no fromsnap here (it's a newly created fs in * an incremental replication), we will substitute the tosnap. */ if ((sd->fromsnap && strcmp(snapname, sd->fromsnap) == 0) || (sd->parent_fromsnap_guid == 0 && sd->tosnap && strcmp(snapname, sd->tosnap) == 0)) { sd->parent_fromsnap_guid = guid; } VERIFY(0 == nvlist_alloc(&nv, NV_UNIQUE_NAME, 0)); send_iterate_prop(zhp, nv); VERIFY(0 == nvlist_add_nvlist(sd->snapprops, snapname, nv)); nvlist_free(nv); zfs_close(zhp); return (0); } static void send_iterate_prop(zfs_handle_t *zhp, nvlist_t *nv) { nvpair_t *elem = NULL; while ((elem = nvlist_next_nvpair(zhp->zfs_props, elem)) != NULL) { char *propname = nvpair_name(elem); zfs_prop_t prop = zfs_name_to_prop(propname); nvlist_t *propnv; if (!zfs_prop_user(propname)) { /* * Realistically, this should never happen. However, * we want the ability to add DSL properties without * needing to make incompatible version changes. We * need to ignore unknown properties to allow older * software to still send datasets containing these * properties, with the unknown properties elided. */ if (prop == ZPROP_INVAL) continue; if (zfs_prop_readonly(prop)) continue; } verify(nvpair_value_nvlist(elem, &propnv) == 0); if (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_RESERVATION || prop == ZFS_PROP_REFQUOTA || prop == ZFS_PROP_REFRESERVATION) { char *source; uint64_t value; verify(nvlist_lookup_uint64(propnv, ZPROP_VALUE, &value) == 0); if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) continue; /* * May have no source before SPA_VERSION_RECVD_PROPS, * but is still modifiable. */ if (nvlist_lookup_string(propnv, ZPROP_SOURCE, &source) == 0) { if ((strcmp(source, zhp->zfs_name) != 0) && (strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)) continue; } } else { char *source; if (nvlist_lookup_string(propnv, ZPROP_SOURCE, &source) != 0) continue; if ((strcmp(source, zhp->zfs_name) != 0) && (strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)) continue; } if (zfs_prop_user(propname) || zfs_prop_get_type(prop) == PROP_TYPE_STRING) { char *value; verify(nvlist_lookup_string(propnv, ZPROP_VALUE, &value) == 0); VERIFY(0 == nvlist_add_string(nv, propname, value)); } else { uint64_t value; verify(nvlist_lookup_uint64(propnv, ZPROP_VALUE, &value) == 0); VERIFY(0 == nvlist_add_uint64(nv, propname, value)); } } } /* * recursively generate nvlists describing datasets. See comment * for the data structure send_data_t above for description of contents * of the nvlist. */ static int send_iterate_fs(zfs_handle_t *zhp, void *arg) { send_data_t *sd = arg; nvlist_t *nvfs, *nv; int rv = 0; uint64_t parent_fromsnap_guid_save = sd->parent_fromsnap_guid; uint64_t guid = zhp->zfs_dmustats.dds_guid; char guidstring[64]; VERIFY(0 == nvlist_alloc(&nvfs, NV_UNIQUE_NAME, 0)); VERIFY(0 == nvlist_add_string(nvfs, "name", zhp->zfs_name)); VERIFY(0 == nvlist_add_uint64(nvfs, "parentfromsnap", sd->parent_fromsnap_guid)); if (zhp->zfs_dmustats.dds_origin[0]) { zfs_handle_t *origin = zfs_open(zhp->zfs_hdl, zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT); if (origin == NULL) return (-1); VERIFY(0 == nvlist_add_uint64(nvfs, "origin", origin->zfs_dmustats.dds_guid)); } /* iterate over props */ VERIFY(0 == nvlist_alloc(&nv, NV_UNIQUE_NAME, 0)); send_iterate_prop(zhp, nv); VERIFY(0 == nvlist_add_nvlist(nvfs, "props", nv)); nvlist_free(nv); /* iterate over snaps, and set sd->parent_fromsnap_guid */ sd->parent_fromsnap_guid = 0; VERIFY(0 == nvlist_alloc(&sd->parent_snaps, NV_UNIQUE_NAME, 0)); VERIFY(0 == nvlist_alloc(&sd->snapprops, NV_UNIQUE_NAME, 0)); (void) zfs_iter_snapshots(zhp, B_FALSE, send_iterate_snap, sd); VERIFY(0 == nvlist_add_nvlist(nvfs, "snaps", sd->parent_snaps)); VERIFY(0 == nvlist_add_nvlist(nvfs, "snapprops", sd->snapprops)); nvlist_free(sd->parent_snaps); nvlist_free(sd->snapprops); /* add this fs to nvlist */ (void) snprintf(guidstring, sizeof (guidstring), "0x%llx", (longlong_t)guid); VERIFY(0 == nvlist_add_nvlist(sd->fss, guidstring, nvfs)); nvlist_free(nvfs); /* iterate over children */ if (sd->recursive) rv = zfs_iter_filesystems(zhp, send_iterate_fs, sd); sd->parent_fromsnap_guid = parent_fromsnap_guid_save; zfs_close(zhp); return (rv); } static int gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap, const char *tosnap, boolean_t recursive, nvlist_t **nvlp, avl_tree_t **avlp) { zfs_handle_t *zhp; send_data_t sd = { 0 }; int error; zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); if (zhp == NULL) return (EZFS_BADTYPE); VERIFY(0 == nvlist_alloc(&sd.fss, NV_UNIQUE_NAME, 0)); sd.fromsnap = fromsnap; sd.tosnap = tosnap; sd.recursive = recursive; if ((error = send_iterate_fs(zhp, &sd)) != 0) { nvlist_free(sd.fss); if (avlp != NULL) *avlp = NULL; *nvlp = NULL; return (error); } if (avlp != NULL && (*avlp = fsavl_create(sd.fss)) == NULL) { nvlist_free(sd.fss); *nvlp = NULL; return (EZFS_NOMEM); } *nvlp = sd.fss; return (0); } /* * Routines specific to "zfs send" */ typedef struct send_dump_data { /* these are all just the short snapname (the part after the @) */ const char *fromsnap; const char *tosnap; char prevsnap[ZFS_MAXNAMELEN]; uint64_t prevsnap_obj; boolean_t seenfrom, seento, replicate, doall, fromorigin; - boolean_t verbose, dryrun, parsable; + boolean_t verbose, dryrun, parsable, progress; int outfd; boolean_t err; nvlist_t *fss; avl_tree_t *fsavl; snapfilter_cb_t *filter_cb; void *filter_cb_arg; nvlist_t *debugnv; char holdtag[ZFS_MAXNAMELEN]; int cleanup_fd; uint64_t size; } send_dump_data_t; static int estimate_ioctl(zfs_handle_t *zhp, uint64_t fromsnap_obj, boolean_t fromorigin, uint64_t *sizep) { zfs_cmd_t zc = { 0 }; libzfs_handle_t *hdl = zhp->zfs_hdl; assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT); assert(fromsnap_obj == 0 || !fromorigin); (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); zc.zc_obj = fromorigin; zc.zc_sendobj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID); zc.zc_fromobj = fromsnap_obj; zc.zc_guid = 1; /* estimate flag */ if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND, &zc) != 0) { char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "warning: cannot estimate space for '%s'"), zhp->zfs_name); switch (errno) { case EXDEV: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "not an earlier snapshot from the same fs")); return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf)); case ENOENT: if (zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_SNAPSHOT)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "incremental source (@%s) does not exist"), zc.zc_value); } return (zfs_error(hdl, EZFS_NOENT, errbuf)); case EDQUOT: case EFBIG: case EIO: case ENOLINK: case ENOSPC: case ENXIO: case EPIPE: case ERANGE: case EFAULT: case EROFS: zfs_error_aux(hdl, strerror(errno)); return (zfs_error(hdl, EZFS_BADBACKUP, errbuf)); default: return (zfs_standard_error(hdl, errno, errbuf)); } } *sizep = zc.zc_objset_type; return (0); } /* * Dumps a backup of the given snapshot (incremental from fromsnap if it's not * NULL) to the file descriptor specified by outfd. */ static int dump_ioctl(zfs_handle_t *zhp, const char *fromsnap, uint64_t fromsnap_obj, boolean_t fromorigin, int outfd, nvlist_t *debugnv) { zfs_cmd_t zc = { 0 }; libzfs_handle_t *hdl = zhp->zfs_hdl; nvlist_t *thisdbg; assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT); assert(fromsnap_obj == 0 || !fromorigin); (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); zc.zc_cookie = outfd; zc.zc_obj = fromorigin; zc.zc_sendobj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID); zc.zc_fromobj = fromsnap_obj; VERIFY(0 == nvlist_alloc(&thisdbg, NV_UNIQUE_NAME, 0)); if (fromsnap && fromsnap[0] != '\0') { VERIFY(0 == nvlist_add_string(thisdbg, "fromsnap", fromsnap)); } if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND, &zc) != 0) { char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "warning: cannot send '%s'"), zhp->zfs_name); VERIFY(0 == nvlist_add_uint64(thisdbg, "error", errno)); if (debugnv) { VERIFY(0 == nvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg)); } nvlist_free(thisdbg); switch (errno) { case EXDEV: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "not an earlier snapshot from the same fs")); return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf)); case ENOENT: if (zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_SNAPSHOT)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "incremental source (@%s) does not exist"), zc.zc_value); } return (zfs_error(hdl, EZFS_NOENT, errbuf)); case EDQUOT: case EFBIG: case EIO: case ENOLINK: case ENOSPC: #ifdef sun case ENOSTR: #endif case ENXIO: case EPIPE: case ERANGE: case EFAULT: case EROFS: zfs_error_aux(hdl, strerror(errno)); return (zfs_error(hdl, EZFS_BADBACKUP, errbuf)); default: return (zfs_standard_error(hdl, errno, errbuf)); } } if (debugnv) VERIFY(0 == nvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg)); nvlist_free(thisdbg); return (0); } static int hold_for_send(zfs_handle_t *zhp, send_dump_data_t *sdd) { zfs_handle_t *pzhp; int error = 0; char *thissnap; assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT); if (sdd->dryrun) return (0); /* * zfs_send() only opens a cleanup_fd for sends that need it, * e.g. replication and doall. */ if (sdd->cleanup_fd == -1) return (0); thissnap = strchr(zhp->zfs_name, '@') + 1; *(thissnap - 1) = '\0'; pzhp = zfs_open(zhp->zfs_hdl, zhp->zfs_name, ZFS_TYPE_DATASET); *(thissnap - 1) = '@'; /* * It's OK if the parent no longer exists. The send code will * handle that error. */ if (pzhp) { error = zfs_hold(pzhp, thissnap, sdd->holdtag, B_FALSE, B_TRUE, B_TRUE, sdd->cleanup_fd, zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID), zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG)); zfs_close(pzhp); } return (error); } +static void * +send_progress_thread(void *arg) +{ + progress_arg_t *pa = arg; + + zfs_cmd_t zc = { 0 }; + zfs_handle_t *zhp = pa->pa_zhp; + libzfs_handle_t *hdl = zhp->zfs_hdl; + unsigned long long bytes; + char buf[16]; + + time_t t; + struct tm *tm; + + assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT); + (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name)); + + if (!pa->pa_parsable) + (void) fprintf(stderr, "TIME SENT SNAPSHOT\n"); + + /* + * Print the progress from ZFS_IOC_SEND_PROGRESS every second. + */ + for (;;) { + (void) sleep(1); + + zc.zc_cookie = pa->pa_fd; + if (zfs_ioctl(hdl, ZFS_IOC_SEND_PROGRESS, &zc) != 0) + return ((void *)-1); + + (void) time(&t); + tm = localtime(&t); + bytes = zc.zc_cookie; + + if (pa->pa_parsable) { + (void) fprintf(stderr, "%02d:%02d:%02d\t%llu\t%s\n", + tm->tm_hour, tm->tm_min, tm->tm_sec, + bytes, zhp->zfs_name); + } else { + zfs_nicenum(bytes, buf, sizeof (buf)); + (void) fprintf(stderr, "%02d:%02d:%02d %5s %s\n", + tm->tm_hour, tm->tm_min, tm->tm_sec, + buf, zhp->zfs_name); + } + } +} + static int dump_snapshot(zfs_handle_t *zhp, void *arg) { send_dump_data_t *sdd = arg; + progress_arg_t pa = { 0 }; + pthread_t tid; + char *thissnap; int err; boolean_t isfromsnap, istosnap, fromorigin; boolean_t exclude = B_FALSE; thissnap = strchr(zhp->zfs_name, '@') + 1; isfromsnap = (sdd->fromsnap != NULL && strcmp(sdd->fromsnap, thissnap) == 0); if (!sdd->seenfrom && isfromsnap) { err = hold_for_send(zhp, sdd); if (err == 0) { sdd->seenfrom = B_TRUE; (void) strcpy(sdd->prevsnap, thissnap); sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID); } else if (err == ENOENT) { err = 0; } zfs_close(zhp); return (err); } if (sdd->seento || !sdd->seenfrom) { zfs_close(zhp); return (0); } istosnap = (strcmp(sdd->tosnap, thissnap) == 0); if (istosnap) sdd->seento = B_TRUE; if (!sdd->doall && !isfromsnap && !istosnap) { if (sdd->replicate) { char *snapname; nvlist_t *snapprops; /* * Filter out all intermediate snapshots except origin * snapshots needed to replicate clones. */ nvlist_t *nvfs = fsavl_find(sdd->fsavl, zhp->zfs_dmustats.dds_guid, &snapname); VERIFY(0 == nvlist_lookup_nvlist(nvfs, "snapprops", &snapprops)); VERIFY(0 == nvlist_lookup_nvlist(snapprops, thissnap, &snapprops)); exclude = !nvlist_exists(snapprops, "is_clone_origin"); } else { exclude = B_TRUE; } } /* * If a filter function exists, call it to determine whether * this snapshot will be sent. */ if (exclude || (sdd->filter_cb != NULL && sdd->filter_cb(zhp, sdd->filter_cb_arg) == B_FALSE)) { /* * This snapshot is filtered out. Don't send it, and don't * set prevsnap_obj, so it will be as if this snapshot didn't * exist, and the next accepted snapshot will be sent as * an incremental from the last accepted one, or as the * first (and full) snapshot in the case of a replication, * non-incremental send. */ zfs_close(zhp); return (0); } err = hold_for_send(zhp, sdd); if (err) { if (err == ENOENT) err = 0; zfs_close(zhp); return (err); } fromorigin = sdd->prevsnap[0] == '\0' && (sdd->fromorigin || sdd->replicate); if (sdd->verbose) { uint64_t size; err = estimate_ioctl(zhp, sdd->prevsnap_obj, fromorigin, &size); if (sdd->parsable) { if (sdd->prevsnap[0] != '\0') { (void) fprintf(stderr, "incremental\t%s\t%s", sdd->prevsnap, zhp->zfs_name); } else { (void) fprintf(stderr, "full\t%s", zhp->zfs_name); } } else { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "send from @%s to %s"), sdd->prevsnap, zhp->zfs_name); } if (err == 0) { if (sdd->parsable) { (void) fprintf(stderr, "\t%llu\n", (longlong_t)size); } else { char buf[16]; zfs_nicenum(size, buf, sizeof (buf)); (void) fprintf(stderr, dgettext(TEXT_DOMAIN, " estimated size is %s\n"), buf); } sdd->size += size; } else { (void) fprintf(stderr, "\n"); } } if (!sdd->dryrun) { + /* + * If progress reporting is requested, spawn a new thread to + * poll ZFS_IOC_SEND_PROGRESS at a regular interval. + */ + if (sdd->progress) { + pa.pa_zhp = zhp; + pa.pa_fd = sdd->outfd; + pa.pa_parsable = sdd->parsable; + + if (err = pthread_create(&tid, NULL, + send_progress_thread, &pa)) { + zfs_close(zhp); + return (err); + } + } + err = dump_ioctl(zhp, sdd->prevsnap, sdd->prevsnap_obj, fromorigin, sdd->outfd, sdd->debugnv); + + if (sdd->progress) { + (void) pthread_cancel(tid); + (void) pthread_join(tid, NULL); + } } (void) strcpy(sdd->prevsnap, thissnap); sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID); zfs_close(zhp); return (err); } static int dump_filesystem(zfs_handle_t *zhp, void *arg) { int rv = 0; send_dump_data_t *sdd = arg; boolean_t missingfrom = B_FALSE; zfs_cmd_t zc = { 0 }; (void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s", zhp->zfs_name, sdd->tosnap); if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "WARNING: could not send %s@%s: does not exist\n"), zhp->zfs_name, sdd->tosnap); sdd->err = B_TRUE; return (0); } if (sdd->replicate && sdd->fromsnap) { /* * If this fs does not have fromsnap, and we're doing * recursive, we need to send a full stream from the * beginning (or an incremental from the origin if this * is a clone). If we're doing non-recursive, then let * them get the error. */ (void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s", zhp->zfs_name, sdd->fromsnap); if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) { missingfrom = B_TRUE; } } sdd->seenfrom = sdd->seento = sdd->prevsnap[0] = 0; sdd->prevsnap_obj = 0; if (sdd->fromsnap == NULL || missingfrom) sdd->seenfrom = B_TRUE; rv = zfs_iter_snapshots_sorted(zhp, dump_snapshot, arg); if (!sdd->seenfrom) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "WARNING: could not send %s@%s:\n" "incremental source (%s@%s) does not exist\n"), zhp->zfs_name, sdd->tosnap, zhp->zfs_name, sdd->fromsnap); sdd->err = B_TRUE; } else if (!sdd->seento) { if (sdd->fromsnap) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "WARNING: could not send %s@%s:\n" "incremental source (%s@%s) " "is not earlier than it\n"), zhp->zfs_name, sdd->tosnap, zhp->zfs_name, sdd->fromsnap); } else { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "WARNING: " "could not send %s@%s: does not exist\n"), zhp->zfs_name, sdd->tosnap); } sdd->err = B_TRUE; } return (rv); } static int dump_filesystems(zfs_handle_t *rzhp, void *arg) { send_dump_data_t *sdd = arg; nvpair_t *fspair; boolean_t needagain, progress; if (!sdd->replicate) return (dump_filesystem(rzhp, sdd)); /* Mark the clone origin snapshots. */ for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair; fspair = nvlist_next_nvpair(sdd->fss, fspair)) { nvlist_t *nvfs; uint64_t origin_guid = 0; VERIFY(0 == nvpair_value_nvlist(fspair, &nvfs)); (void) nvlist_lookup_uint64(nvfs, "origin", &origin_guid); if (origin_guid != 0) { char *snapname; nvlist_t *origin_nv = fsavl_find(sdd->fsavl, origin_guid, &snapname); if (origin_nv != NULL) { nvlist_t *snapprops; VERIFY(0 == nvlist_lookup_nvlist(origin_nv, "snapprops", &snapprops)); VERIFY(0 == nvlist_lookup_nvlist(snapprops, snapname, &snapprops)); VERIFY(0 == nvlist_add_boolean( snapprops, "is_clone_origin")); } } } again: needagain = progress = B_FALSE; for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair; fspair = nvlist_next_nvpair(sdd->fss, fspair)) { nvlist_t *fslist, *parent_nv; char *fsname; zfs_handle_t *zhp; int err; uint64_t origin_guid = 0; uint64_t parent_guid = 0; VERIFY(nvpair_value_nvlist(fspair, &fslist) == 0); if (nvlist_lookup_boolean(fslist, "sent") == 0) continue; VERIFY(nvlist_lookup_string(fslist, "name", &fsname) == 0); (void) nvlist_lookup_uint64(fslist, "origin", &origin_guid); (void) nvlist_lookup_uint64(fslist, "parentfromsnap", &parent_guid); if (parent_guid != 0) { parent_nv = fsavl_find(sdd->fsavl, parent_guid, NULL); if (!nvlist_exists(parent_nv, "sent")) { /* parent has not been sent; skip this one */ needagain = B_TRUE; continue; } } if (origin_guid != 0) { nvlist_t *origin_nv = fsavl_find(sdd->fsavl, origin_guid, NULL); if (origin_nv != NULL && !nvlist_exists(origin_nv, "sent")) { /* * origin has not been sent yet; * skip this clone. */ needagain = B_TRUE; continue; } } zhp = zfs_open(rzhp->zfs_hdl, fsname, ZFS_TYPE_DATASET); if (zhp == NULL) return (-1); err = dump_filesystem(zhp, sdd); VERIFY(nvlist_add_boolean(fslist, "sent") == 0); progress = B_TRUE; zfs_close(zhp); if (err) return (err); } if (needagain) { assert(progress); goto again; } /* clean out the sent flags in case we reuse this fss */ for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair; fspair = nvlist_next_nvpair(sdd->fss, fspair)) { nvlist_t *fslist; VERIFY(nvpair_value_nvlist(fspair, &fslist) == 0); (void) nvlist_remove_all(fslist, "sent"); } return (0); } /* * Generate a send stream for the dataset identified by the argument zhp. * * The content of the send stream is the snapshot identified by * 'tosnap'. Incremental streams are requested in two ways: * - from the snapshot identified by "fromsnap" (if non-null) or * - from the origin of the dataset identified by zhp, which must * be a clone. In this case, "fromsnap" is null and "fromorigin" * is TRUE. * * The send stream is recursive (i.e. dumps a hierarchy of snapshots) and * uses a special header (with a hdrtype field of DMU_COMPOUNDSTREAM) * if "replicate" is set. If "doall" is set, dump all the intermediate * snapshots. The DMU_COMPOUNDSTREAM header is used in the "doall" * case too. If "props" is set, send properties. */ int zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap, sendflags_t *flags, int outfd, snapfilter_cb_t filter_func, void *cb_arg, nvlist_t **debugnvp) { char errbuf[1024]; send_dump_data_t sdd = { 0 }; int err = 0; nvlist_t *fss = NULL; avl_tree_t *fsavl = NULL; static uint64_t holdseq; int spa_version; boolean_t holdsnaps = B_FALSE; pthread_t tid; int pipefd[2]; dedup_arg_t dda = { 0 }; int featureflags = 0; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot send '%s'"), zhp->zfs_name); if (fromsnap && fromsnap[0] == '\0') { zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN, "zero-length incremental source")); return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf)); } if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM) { uint64_t version; version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION); if (version >= ZPL_VERSION_SA) { featureflags |= DMU_BACKUP_FEATURE_SA_SPILL; } } if (!flags->dryrun && zfs_spa_version(zhp, &spa_version) == 0 && spa_version >= SPA_VERSION_USERREFS && (flags->doall || flags->replicate)) holdsnaps = B_TRUE; if (flags->dedup && !flags->dryrun) { featureflags |= (DMU_BACKUP_FEATURE_DEDUP | DMU_BACKUP_FEATURE_DEDUPPROPS); if (err = pipe(pipefd)) { zfs_error_aux(zhp->zfs_hdl, strerror(errno)); return (zfs_error(zhp->zfs_hdl, EZFS_PIPEFAILED, errbuf)); } dda.outputfd = outfd; dda.inputfd = pipefd[1]; dda.dedup_hdl = zhp->zfs_hdl; if (err = pthread_create(&tid, NULL, cksummer, &dda)) { (void) close(pipefd[0]); (void) close(pipefd[1]); zfs_error_aux(zhp->zfs_hdl, strerror(errno)); return (zfs_error(zhp->zfs_hdl, EZFS_THREADCREATEFAILED, errbuf)); } } if (flags->replicate || flags->doall || flags->props) { dmu_replay_record_t drr = { 0 }; char *packbuf = NULL; size_t buflen = 0; zio_cksum_t zc = { 0 }; if (flags->replicate || flags->props) { nvlist_t *hdrnv; VERIFY(0 == nvlist_alloc(&hdrnv, NV_UNIQUE_NAME, 0)); if (fromsnap) { VERIFY(0 == nvlist_add_string(hdrnv, "fromsnap", fromsnap)); } VERIFY(0 == nvlist_add_string(hdrnv, "tosnap", tosnap)); if (!flags->replicate) { VERIFY(0 == nvlist_add_boolean(hdrnv, "not_recursive")); } err = gather_nvlist(zhp->zfs_hdl, zhp->zfs_name, fromsnap, tosnap, flags->replicate, &fss, &fsavl); if (err) goto err_out; VERIFY(0 == nvlist_add_nvlist(hdrnv, "fss", fss)); err = nvlist_pack(hdrnv, &packbuf, &buflen, NV_ENCODE_XDR, 0); if (debugnvp) *debugnvp = hdrnv; else nvlist_free(hdrnv); if (err) { fsavl_destroy(fsavl); nvlist_free(fss); goto stderr_out; } } if (!flags->dryrun) { /* write first begin record */ drr.drr_type = DRR_BEGIN; drr.drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; DMU_SET_STREAM_HDRTYPE(drr.drr_u.drr_begin. drr_versioninfo, DMU_COMPOUNDSTREAM); DMU_SET_FEATUREFLAGS(drr.drr_u.drr_begin. drr_versioninfo, featureflags); (void) snprintf(drr.drr_u.drr_begin.drr_toname, sizeof (drr.drr_u.drr_begin.drr_toname), "%s@%s", zhp->zfs_name, tosnap); drr.drr_payloadlen = buflen; err = cksum_and_write(&drr, sizeof (drr), &zc, outfd); /* write header nvlist */ if (err != -1 && packbuf != NULL) { err = cksum_and_write(packbuf, buflen, &zc, outfd); } free(packbuf); if (err == -1) { fsavl_destroy(fsavl); nvlist_free(fss); err = errno; goto stderr_out; } /* write end record */ bzero(&drr, sizeof (drr)); drr.drr_type = DRR_END; drr.drr_u.drr_end.drr_checksum = zc; err = write(outfd, &drr, sizeof (drr)); if (err == -1) { fsavl_destroy(fsavl); nvlist_free(fss); err = errno; goto stderr_out; } err = 0; } } /* dump each stream */ sdd.fromsnap = fromsnap; sdd.tosnap = tosnap; if (flags->dedup) sdd.outfd = pipefd[0]; else sdd.outfd = outfd; sdd.replicate = flags->replicate; sdd.doall = flags->doall; sdd.fromorigin = flags->fromorigin; sdd.fss = fss; sdd.fsavl = fsavl; sdd.verbose = flags->verbose; sdd.parsable = flags->parsable; + sdd.progress = flags->progress; sdd.dryrun = flags->dryrun; sdd.filter_cb = filter_func; sdd.filter_cb_arg = cb_arg; if (debugnvp) sdd.debugnv = *debugnvp; - if (holdsnaps) { + if (holdsnaps || flags->progress) { ++holdseq; (void) snprintf(sdd.holdtag, sizeof (sdd.holdtag), ".send-%d-%llu", getpid(), (u_longlong_t)holdseq); sdd.cleanup_fd = open(ZFS_DEV, O_RDWR|O_EXCL); if (sdd.cleanup_fd < 0) { err = errno; goto stderr_out; } } else { sdd.cleanup_fd = -1; } if (flags->verbose) { /* * Do a verbose no-op dry run to get all the verbose output * before generating any data. Then do a non-verbose real * run to generate the streams. */ sdd.dryrun = B_TRUE; err = dump_filesystems(zhp, &sdd); sdd.dryrun = flags->dryrun; sdd.verbose = B_FALSE; if (flags->parsable) { (void) fprintf(stderr, "size\t%llu\n", (longlong_t)sdd.size); } else { char buf[16]; zfs_nicenum(sdd.size, buf, sizeof (buf)); (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "total estimated size is %s\n"), buf); } } err = dump_filesystems(zhp, &sdd); fsavl_destroy(fsavl); nvlist_free(fss); if (flags->dedup) { (void) close(pipefd[0]); (void) pthread_join(tid, NULL); } if (sdd.cleanup_fd != -1) { VERIFY(0 == close(sdd.cleanup_fd)); sdd.cleanup_fd = -1; } if (!flags->dryrun && (flags->replicate || flags->doall || flags->props)) { /* * write final end record. NB: want to do this even if * there was some error, because it might not be totally * failed. */ dmu_replay_record_t drr = { 0 }; drr.drr_type = DRR_END; if (write(outfd, &drr, sizeof (drr)) == -1) { return (zfs_standard_error(zhp->zfs_hdl, errno, errbuf)); } } return (err || sdd.err); stderr_out: err = zfs_standard_error(zhp->zfs_hdl, err, errbuf); err_out: if (sdd.cleanup_fd != -1) VERIFY(0 == close(sdd.cleanup_fd)); if (flags->dedup) { (void) pthread_cancel(tid); (void) pthread_join(tid, NULL); (void) close(pipefd[0]); } return (err); } /* * Routines specific to "zfs recv" */ static int recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen, boolean_t byteswap, zio_cksum_t *zc) { char *cp = buf; int rv; int len = ilen; do { rv = read(fd, cp, len); cp += rv; len -= rv; } while (rv > 0); if (rv < 0 || len != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to read from stream")); return (zfs_error(hdl, EZFS_BADSTREAM, dgettext(TEXT_DOMAIN, "cannot receive"))); } if (zc) { if (byteswap) fletcher_4_incremental_byteswap(buf, ilen, zc); else fletcher_4_incremental_native(buf, ilen, zc); } return (0); } static int recv_read_nvlist(libzfs_handle_t *hdl, int fd, int len, nvlist_t **nvp, boolean_t byteswap, zio_cksum_t *zc) { char *buf; int err; buf = zfs_alloc(hdl, len); if (buf == NULL) return (ENOMEM); err = recv_read(hdl, fd, buf, len, byteswap, zc); if (err != 0) { free(buf); return (err); } err = nvlist_unpack(buf, len, nvp, 0); free(buf); if (err != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " "stream (malformed nvlist)")); return (EINVAL); } return (0); } static int recv_rename(libzfs_handle_t *hdl, const char *name, const char *tryname, int baselen, char *newname, recvflags_t *flags) { static int seq; zfs_cmd_t zc = { 0 }; int err; prop_changelist_t *clp; zfs_handle_t *zhp; zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET); if (zhp == NULL) return (-1); clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, flags->force ? MS_FORCE : 0); zfs_close(zhp); if (clp == NULL) return (-1); err = changelist_prefix(clp); if (err) return (err); zc.zc_objset_type = DMU_OST_ZFS; (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name)); if (tryname) { (void) strcpy(newname, tryname); (void) strlcpy(zc.zc_value, tryname, sizeof (zc.zc_value)); if (flags->verbose) { (void) printf("attempting rename %s to %s\n", zc.zc_name, zc.zc_value); } err = ioctl(hdl->libzfs_fd, ZFS_IOC_RENAME, &zc); if (err == 0) changelist_rename(clp, name, tryname); } else { err = ENOENT; } if (err != 0 && strncmp(name+baselen, "recv-", 5) != 0) { seq++; (void) strncpy(newname, name, baselen); (void) snprintf(newname+baselen, ZFS_MAXNAMELEN-baselen, "recv-%u-%u", getpid(), seq); (void) strlcpy(zc.zc_value, newname, sizeof (zc.zc_value)); if (flags->verbose) { (void) printf("failed - trying rename %s to %s\n", zc.zc_name, zc.zc_value); } err = ioctl(hdl->libzfs_fd, ZFS_IOC_RENAME, &zc); if (err == 0) changelist_rename(clp, name, newname); if (err && flags->verbose) { (void) printf("failed (%u) - " "will try again on next pass\n", errno); } err = EAGAIN; } else if (flags->verbose) { if (err == 0) (void) printf("success\n"); else (void) printf("failed (%u)\n", errno); } (void) changelist_postfix(clp); changelist_free(clp); return (err); } static int recv_destroy(libzfs_handle_t *hdl, const char *name, int baselen, char *newname, recvflags_t *flags) { zfs_cmd_t zc = { 0 }; int err = 0; prop_changelist_t *clp; zfs_handle_t *zhp; boolean_t defer = B_FALSE; int spa_version; zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET); if (zhp == NULL) return (-1); clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, flags->force ? MS_FORCE : 0); if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT && zfs_spa_version(zhp, &spa_version) == 0 && spa_version >= SPA_VERSION_USERREFS) defer = B_TRUE; zfs_close(zhp); if (clp == NULL) return (-1); err = changelist_prefix(clp); if (err) return (err); zc.zc_objset_type = DMU_OST_ZFS; zc.zc_defer_destroy = defer; (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name)); if (flags->verbose) (void) printf("attempting destroy %s\n", zc.zc_name); err = ioctl(hdl->libzfs_fd, ZFS_IOC_DESTROY, &zc); if (err == 0) { if (flags->verbose) (void) printf("success\n"); changelist_remove(clp, zc.zc_name); } (void) changelist_postfix(clp); changelist_free(clp); /* * Deferred destroy might destroy the snapshot or only mark it to be * destroyed later, and it returns success in either case. */ if (err != 0 || (defer && zfs_dataset_exists(hdl, name, ZFS_TYPE_SNAPSHOT))) { err = recv_rename(hdl, name, NULL, baselen, newname, flags); } return (err); } typedef struct guid_to_name_data { uint64_t guid; char *name; char *skip; } guid_to_name_data_t; static int guid_to_name_cb(zfs_handle_t *zhp, void *arg) { guid_to_name_data_t *gtnd = arg; int err; if (gtnd->skip != NULL && strcmp(zhp->zfs_name, gtnd->skip) == 0) { return (0); } if (zhp->zfs_dmustats.dds_guid == gtnd->guid) { (void) strcpy(gtnd->name, zhp->zfs_name); zfs_close(zhp); return (EEXIST); } err = zfs_iter_children(zhp, guid_to_name_cb, gtnd); zfs_close(zhp); return (err); } /* * Attempt to find the local dataset associated with this guid. In the case of * multiple matches, we attempt to find the "best" match by searching * progressively larger portions of the hierarchy. This allows one to send a * tree of datasets individually and guarantee that we will find the source * guid within that hierarchy, even if there are multiple matches elsewhere. */ static int guid_to_name(libzfs_handle_t *hdl, const char *parent, uint64_t guid, char *name) { /* exhaustive search all local snapshots */ char pname[ZFS_MAXNAMELEN]; guid_to_name_data_t gtnd; int err = 0; zfs_handle_t *zhp; char *cp; gtnd.guid = guid; gtnd.name = name; gtnd.skip = NULL; (void) strlcpy(pname, parent, sizeof (pname)); /* * Search progressively larger portions of the hierarchy. This will * select the "most local" version of the origin snapshot in the case * that there are multiple matching snapshots in the system. */ while ((cp = strrchr(pname, '/')) != NULL) { /* Chop off the last component and open the parent */ *cp = '\0'; zhp = make_dataset_handle(hdl, pname); if (zhp == NULL) continue; err = zfs_iter_children(zhp, guid_to_name_cb, >nd); zfs_close(zhp); if (err == EEXIST) return (0); /* * Remember the dataset that we already searched, so we * skip it next time through. */ gtnd.skip = pname; } return (ENOENT); } /* * Return +1 if guid1 is before guid2, 0 if they are the same, and -1 if * guid1 is after guid2. */ static int created_before(libzfs_handle_t *hdl, avl_tree_t *avl, uint64_t guid1, uint64_t guid2) { nvlist_t *nvfs; char *fsname, *snapname; char buf[ZFS_MAXNAMELEN]; int rv; zfs_handle_t *guid1hdl, *guid2hdl; uint64_t create1, create2; if (guid2 == 0) return (0); if (guid1 == 0) return (1); nvfs = fsavl_find(avl, guid1, &snapname); VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname)); (void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname); guid1hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT); if (guid1hdl == NULL) return (-1); nvfs = fsavl_find(avl, guid2, &snapname); VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname)); (void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname); guid2hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT); if (guid2hdl == NULL) { zfs_close(guid1hdl); return (-1); } create1 = zfs_prop_get_int(guid1hdl, ZFS_PROP_CREATETXG); create2 = zfs_prop_get_int(guid2hdl, ZFS_PROP_CREATETXG); if (create1 < create2) rv = -1; else if (create1 > create2) rv = +1; else rv = 0; zfs_close(guid1hdl); zfs_close(guid2hdl); return (rv); } static int recv_incremental_replication(libzfs_handle_t *hdl, const char *tofs, recvflags_t *flags, nvlist_t *stream_nv, avl_tree_t *stream_avl, nvlist_t *renamed) { nvlist_t *local_nv; avl_tree_t *local_avl; nvpair_t *fselem, *nextfselem; char *fromsnap; char newname[ZFS_MAXNAMELEN]; int error; boolean_t needagain, progress, recursive; char *s1, *s2; VERIFY(0 == nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap)); recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") == ENOENT); if (flags->dryrun) return (0); again: needagain = progress = B_FALSE; if ((error = gather_nvlist(hdl, tofs, fromsnap, NULL, recursive, &local_nv, &local_avl)) != 0) return (error); /* * Process deletes and renames */ for (fselem = nvlist_next_nvpair(local_nv, NULL); fselem; fselem = nextfselem) { nvlist_t *nvfs, *snaps; nvlist_t *stream_nvfs = NULL; nvpair_t *snapelem, *nextsnapelem; uint64_t fromguid = 0; uint64_t originguid = 0; uint64_t stream_originguid = 0; uint64_t parent_fromsnap_guid, stream_parent_fromsnap_guid; char *fsname, *stream_fsname; nextfselem = nvlist_next_nvpair(local_nv, fselem); VERIFY(0 == nvpair_value_nvlist(fselem, &nvfs)); VERIFY(0 == nvlist_lookup_nvlist(nvfs, "snaps", &snaps)); VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname)); VERIFY(0 == nvlist_lookup_uint64(nvfs, "parentfromsnap", &parent_fromsnap_guid)); (void) nvlist_lookup_uint64(nvfs, "origin", &originguid); /* * First find the stream's fs, so we can check for * a different origin (due to "zfs promote") */ for (snapelem = nvlist_next_nvpair(snaps, NULL); snapelem; snapelem = nvlist_next_nvpair(snaps, snapelem)) { uint64_t thisguid; VERIFY(0 == nvpair_value_uint64(snapelem, &thisguid)); stream_nvfs = fsavl_find(stream_avl, thisguid, NULL); if (stream_nvfs != NULL) break; } /* check for promote */ (void) nvlist_lookup_uint64(stream_nvfs, "origin", &stream_originguid); if (stream_nvfs && originguid != stream_originguid) { switch (created_before(hdl, local_avl, stream_originguid, originguid)) { case 1: { /* promote it! */ zfs_cmd_t zc = { 0 }; nvlist_t *origin_nvfs; char *origin_fsname; if (flags->verbose) (void) printf("promoting %s\n", fsname); origin_nvfs = fsavl_find(local_avl, originguid, NULL); VERIFY(0 == nvlist_lookup_string(origin_nvfs, "name", &origin_fsname)); (void) strlcpy(zc.zc_value, origin_fsname, sizeof (zc.zc_value)); (void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name)); error = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc); if (error == 0) progress = B_TRUE; break; } default: break; case -1: fsavl_destroy(local_avl); nvlist_free(local_nv); return (-1); } /* * We had/have the wrong origin, therefore our * list of snapshots is wrong. Need to handle * them on the next pass. */ needagain = B_TRUE; continue; } for (snapelem = nvlist_next_nvpair(snaps, NULL); snapelem; snapelem = nextsnapelem) { uint64_t thisguid; char *stream_snapname; nvlist_t *found, *props; nextsnapelem = nvlist_next_nvpair(snaps, snapelem); VERIFY(0 == nvpair_value_uint64(snapelem, &thisguid)); found = fsavl_find(stream_avl, thisguid, &stream_snapname); /* check for delete */ if (found == NULL) { char name[ZFS_MAXNAMELEN]; if (!flags->force) continue; (void) snprintf(name, sizeof (name), "%s@%s", fsname, nvpair_name(snapelem)); error = recv_destroy(hdl, name, strlen(fsname)+1, newname, flags); if (error) needagain = B_TRUE; else progress = B_TRUE; continue; } stream_nvfs = found; if (0 == nvlist_lookup_nvlist(stream_nvfs, "snapprops", &props) && 0 == nvlist_lookup_nvlist(props, stream_snapname, &props)) { zfs_cmd_t zc = { 0 }; zc.zc_cookie = B_TRUE; /* received */ (void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s", fsname, nvpair_name(snapelem)); if (zcmd_write_src_nvlist(hdl, &zc, props) == 0) { (void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc); zcmd_free_nvlists(&zc); } } /* check for different snapname */ if (strcmp(nvpair_name(snapelem), stream_snapname) != 0) { char name[ZFS_MAXNAMELEN]; char tryname[ZFS_MAXNAMELEN]; (void) snprintf(name, sizeof (name), "%s@%s", fsname, nvpair_name(snapelem)); (void) snprintf(tryname, sizeof (name), "%s@%s", fsname, stream_snapname); error = recv_rename(hdl, name, tryname, strlen(fsname)+1, newname, flags); if (error) needagain = B_TRUE; else progress = B_TRUE; } if (strcmp(stream_snapname, fromsnap) == 0) fromguid = thisguid; } /* check for delete */ if (stream_nvfs == NULL) { if (!flags->force) continue; error = recv_destroy(hdl, fsname, strlen(tofs)+1, newname, flags); if (error) needagain = B_TRUE; else progress = B_TRUE; continue; } if (fromguid == 0) { if (flags->verbose) { (void) printf("local fs %s does not have " "fromsnap (%s in stream); must have " "been deleted locally; ignoring\n", fsname, fromsnap); } continue; } VERIFY(0 == nvlist_lookup_string(stream_nvfs, "name", &stream_fsname)); VERIFY(0 == nvlist_lookup_uint64(stream_nvfs, "parentfromsnap", &stream_parent_fromsnap_guid)); s1 = strrchr(fsname, '/'); s2 = strrchr(stream_fsname, '/'); /* * Check for rename. If the exact receive path is specified, it * does not count as a rename, but we still need to check the * datasets beneath it. */ if ((stream_parent_fromsnap_guid != 0 && parent_fromsnap_guid != 0 && stream_parent_fromsnap_guid != parent_fromsnap_guid) || ((flags->isprefix || strcmp(tofs, fsname) != 0) && (s1 != NULL) && (s2 != NULL) && strcmp(s1, s2) != 0)) { nvlist_t *parent; char tryname[ZFS_MAXNAMELEN]; parent = fsavl_find(local_avl, stream_parent_fromsnap_guid, NULL); /* * NB: parent might not be found if we used the * tosnap for stream_parent_fromsnap_guid, * because the parent is a newly-created fs; * we'll be able to rename it after we recv the * new fs. */ if (parent != NULL) { char *pname; VERIFY(0 == nvlist_lookup_string(parent, "name", &pname)); (void) snprintf(tryname, sizeof (tryname), "%s%s", pname, strrchr(stream_fsname, '/')); } else { tryname[0] = '\0'; if (flags->verbose) { (void) printf("local fs %s new parent " "not found\n", fsname); } } newname[0] = '\0'; error = recv_rename(hdl, fsname, tryname, strlen(tofs)+1, newname, flags); if (renamed != NULL && newname[0] != '\0') { VERIFY(0 == nvlist_add_boolean(renamed, newname)); } if (error) needagain = B_TRUE; else progress = B_TRUE; } } fsavl_destroy(local_avl); nvlist_free(local_nv); if (needagain && progress) { /* do another pass to fix up temporary names */ if (flags->verbose) (void) printf("another pass:\n"); goto again; } return (needagain); } static int zfs_receive_package(libzfs_handle_t *hdl, int fd, const char *destname, recvflags_t *flags, dmu_replay_record_t *drr, zio_cksum_t *zc, char **top_zfs, int cleanup_fd, uint64_t *action_handlep) { nvlist_t *stream_nv = NULL; avl_tree_t *stream_avl = NULL; char *fromsnap = NULL; char *cp; char tofs[ZFS_MAXNAMELEN]; char sendfs[ZFS_MAXNAMELEN]; char errbuf[1024]; dmu_replay_record_t drre; int error; boolean_t anyerr = B_FALSE; boolean_t softerr = B_FALSE; boolean_t recursive; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive")); assert(drr->drr_type == DRR_BEGIN); assert(drr->drr_u.drr_begin.drr_magic == DMU_BACKUP_MAGIC); assert(DMU_GET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo) == DMU_COMPOUNDSTREAM); /* * Read in the nvlist from the stream. */ if (drr->drr_payloadlen != 0) { error = recv_read_nvlist(hdl, fd, drr->drr_payloadlen, &stream_nv, flags->byteswap, zc); if (error) { error = zfs_error(hdl, EZFS_BADSTREAM, errbuf); goto out; } } recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") == ENOENT); if (recursive && strchr(destname, '@')) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot specify snapshot name for multi-snapshot stream")); error = zfs_error(hdl, EZFS_BADSTREAM, errbuf); goto out; } /* * Read in the end record and verify checksum. */ if (0 != (error = recv_read(hdl, fd, &drre, sizeof (drre), flags->byteswap, NULL))) goto out; if (flags->byteswap) { drre.drr_type = BSWAP_32(drre.drr_type); drre.drr_u.drr_end.drr_checksum.zc_word[0] = BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[0]); drre.drr_u.drr_end.drr_checksum.zc_word[1] = BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[1]); drre.drr_u.drr_end.drr_checksum.zc_word[2] = BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[2]); drre.drr_u.drr_end.drr_checksum.zc_word[3] = BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[3]); } if (drre.drr_type != DRR_END) { error = zfs_error(hdl, EZFS_BADSTREAM, errbuf); goto out; } if (!ZIO_CHECKSUM_EQUAL(drre.drr_u.drr_end.drr_checksum, *zc)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "incorrect header checksum")); error = zfs_error(hdl, EZFS_BADSTREAM, errbuf); goto out; } (void) nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap); if (drr->drr_payloadlen != 0) { nvlist_t *stream_fss; VERIFY(0 == nvlist_lookup_nvlist(stream_nv, "fss", &stream_fss)); if ((stream_avl = fsavl_create(stream_fss)) == NULL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "couldn't allocate avl tree")); error = zfs_error(hdl, EZFS_NOMEM, errbuf); goto out; } if (fromsnap != NULL) { nvlist_t *renamed = NULL; nvpair_t *pair = NULL; (void) strlcpy(tofs, destname, ZFS_MAXNAMELEN); if (flags->isprefix) { struct drr_begin *drrb = &drr->drr_u.drr_begin; int i; if (flags->istail) { cp = strrchr(drrb->drr_toname, '/'); if (cp == NULL) { (void) strlcat(tofs, "/", ZFS_MAXNAMELEN); i = 0; } else { i = (cp - drrb->drr_toname); } } else { i = strcspn(drrb->drr_toname, "/@"); } /* zfs_receive_one() will create_parents() */ (void) strlcat(tofs, &drrb->drr_toname[i], ZFS_MAXNAMELEN); *strchr(tofs, '@') = '\0'; } if (recursive && !flags->dryrun && !flags->nomount) { VERIFY(0 == nvlist_alloc(&renamed, NV_UNIQUE_NAME, 0)); } softerr = recv_incremental_replication(hdl, tofs, flags, stream_nv, stream_avl, renamed); /* Unmount renamed filesystems before receiving. */ while ((pair = nvlist_next_nvpair(renamed, pair)) != NULL) { zfs_handle_t *zhp; prop_changelist_t *clp = NULL; zhp = zfs_open(hdl, nvpair_name(pair), ZFS_TYPE_FILESYSTEM); if (zhp != NULL) { clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, 0, 0); zfs_close(zhp); if (clp != NULL) { softerr |= changelist_prefix(clp); changelist_free(clp); } } } nvlist_free(renamed); } } /* * Get the fs specified by the first path in the stream (the top level * specified by 'zfs send') and pass it to each invocation of * zfs_receive_one(). */ (void) strlcpy(sendfs, drr->drr_u.drr_begin.drr_toname, ZFS_MAXNAMELEN); if ((cp = strchr(sendfs, '@')) != NULL) *cp = '\0'; /* Finally, receive each contained stream */ do { /* * we should figure out if it has a recoverable * error, in which case do a recv_skip() and drive on. * Note, if we fail due to already having this guid, * zfs_receive_one() will take care of it (ie, * recv_skip() and return 0). */ error = zfs_receive_impl(hdl, destname, flags, fd, sendfs, stream_nv, stream_avl, top_zfs, cleanup_fd, action_handlep); if (error == ENODATA) { error = 0; break; } anyerr |= error; } while (error == 0); if (drr->drr_payloadlen != 0 && fromsnap != NULL) { /* * Now that we have the fs's they sent us, try the * renames again. */ softerr = recv_incremental_replication(hdl, tofs, flags, stream_nv, stream_avl, NULL); } out: fsavl_destroy(stream_avl); if (stream_nv) nvlist_free(stream_nv); if (softerr) error = -2; if (anyerr) error = -1; return (error); } static void trunc_prop_errs(int truncated) { ASSERT(truncated != 0); if (truncated == 1) (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "1 more property could not be set\n")); else (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "%d more properties could not be set\n"), truncated); } static int recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap) { dmu_replay_record_t *drr; void *buf = malloc(1<<20); char errbuf[1024]; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive:")); /* XXX would be great to use lseek if possible... */ drr = buf; while (recv_read(hdl, fd, drr, sizeof (dmu_replay_record_t), byteswap, NULL) == 0) { if (byteswap) drr->drr_type = BSWAP_32(drr->drr_type); switch (drr->drr_type) { case DRR_BEGIN: /* NB: not to be used on v2 stream packages */ if (drr->drr_payloadlen != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid substream header")); return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); } break; case DRR_END: free(buf); return (0); case DRR_OBJECT: if (byteswap) { drr->drr_u.drr_object.drr_bonuslen = BSWAP_32(drr->drr_u.drr_object. drr_bonuslen); } (void) recv_read(hdl, fd, buf, P2ROUNDUP(drr->drr_u.drr_object.drr_bonuslen, 8), B_FALSE, NULL); break; case DRR_WRITE: if (byteswap) { drr->drr_u.drr_write.drr_length = BSWAP_64(drr->drr_u.drr_write.drr_length); } (void) recv_read(hdl, fd, buf, drr->drr_u.drr_write.drr_length, B_FALSE, NULL); break; case DRR_SPILL: if (byteswap) { drr->drr_u.drr_write.drr_length = BSWAP_64(drr->drr_u.drr_spill.drr_length); } (void) recv_read(hdl, fd, buf, drr->drr_u.drr_spill.drr_length, B_FALSE, NULL); break; case DRR_WRITE_BYREF: case DRR_FREEOBJECTS: case DRR_FREE: break; default: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid record type")); return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); } } free(buf); return (-1); } /* * Restores a backup of tosnap from the file descriptor specified by infd. */ static int zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap, recvflags_t *flags, dmu_replay_record_t *drr, dmu_replay_record_t *drr_noswap, const char *sendfs, nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs, int cleanup_fd, uint64_t *action_handlep) { zfs_cmd_t zc = { 0 }; time_t begin_time; int ioctl_err, ioctl_errno, err; char *cp; struct drr_begin *drrb = &drr->drr_u.drr_begin; char errbuf[1024]; char prop_errbuf[1024]; const char *chopprefix; boolean_t newfs = B_FALSE; boolean_t stream_wantsnewfs; uint64_t parent_snapguid = 0; prop_changelist_t *clp = NULL; nvlist_t *snapprops_nvlist = NULL; zprop_errflags_t prop_errflags; boolean_t recursive; begin_time = time(NULL); (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive")); recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") == ENOENT); if (stream_avl != NULL) { char *snapname; nvlist_t *fs = fsavl_find(stream_avl, drrb->drr_toguid, &snapname); nvlist_t *props; int ret; (void) nvlist_lookup_uint64(fs, "parentfromsnap", &parent_snapguid); err = nvlist_lookup_nvlist(fs, "props", &props); if (err) VERIFY(0 == nvlist_alloc(&props, NV_UNIQUE_NAME, 0)); if (flags->canmountoff) { VERIFY(0 == nvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_CANMOUNT), 0)); } ret = zcmd_write_src_nvlist(hdl, &zc, props); if (err) nvlist_free(props); if (0 == nvlist_lookup_nvlist(fs, "snapprops", &props)) { VERIFY(0 == nvlist_lookup_nvlist(props, snapname, &snapprops_nvlist)); } if (ret != 0) return (-1); } cp = NULL; /* * Determine how much of the snapshot name stored in the stream * we are going to tack on to the name they specified on the * command line, and how much we are going to chop off. * * If they specified a snapshot, chop the entire name stored in * the stream. */ if (flags->istail) { /* * A filesystem was specified with -e. We want to tack on only * the tail of the sent snapshot path. */ if (strchr(tosnap, '@')) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " "argument - snapshot not allowed with -e")); return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); } chopprefix = strrchr(sendfs, '/'); if (chopprefix == NULL) { /* * The tail is the poolname, so we need to * prepend a path separator. */ int len = strlen(drrb->drr_toname); cp = malloc(len + 2); cp[0] = '/'; (void) strcpy(&cp[1], drrb->drr_toname); chopprefix = cp; } else { chopprefix = drrb->drr_toname + (chopprefix - sendfs); } } else if (flags->isprefix) { /* * A filesystem was specified with -d. We want to tack on * everything but the first element of the sent snapshot path * (all but the pool name). */ if (strchr(tosnap, '@')) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " "argument - snapshot not allowed with -d")); return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); } chopprefix = strchr(drrb->drr_toname, '/'); if (chopprefix == NULL) chopprefix = strchr(drrb->drr_toname, '@'); } else if (strchr(tosnap, '@') == NULL) { /* * If a filesystem was specified without -d or -e, we want to * tack on everything after the fs specified by 'zfs send'. */ chopprefix = drrb->drr_toname + strlen(sendfs); } else { /* A snapshot was specified as an exact path (no -d or -e). */ if (recursive) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot specify snapshot name for multi-snapshot " "stream")); return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); } chopprefix = drrb->drr_toname + strlen(drrb->drr_toname); } ASSERT(strstr(drrb->drr_toname, sendfs) == drrb->drr_toname); ASSERT(chopprefix > drrb->drr_toname); ASSERT(chopprefix <= drrb->drr_toname + strlen(drrb->drr_toname)); ASSERT(chopprefix[0] == '/' || chopprefix[0] == '@' || chopprefix[0] == '\0'); /* * Determine name of destination snapshot, store in zc_value. */ (void) strcpy(zc.zc_top_ds, tosnap); (void) strcpy(zc.zc_value, tosnap); (void) strncat(zc.zc_value, chopprefix, sizeof (zc.zc_value)); free(cp); if (!zfs_name_valid(zc.zc_value, ZFS_TYPE_SNAPSHOT)) { zcmd_free_nvlists(&zc); return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf)); } /* * Determine the name of the origin snapshot, store in zc_string. */ if (drrb->drr_flags & DRR_FLAG_CLONE) { if (guid_to_name(hdl, zc.zc_value, drrb->drr_fromguid, zc.zc_string) != 0) { zcmd_free_nvlists(&zc); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "local origin for clone %s does not exist"), zc.zc_value); return (zfs_error(hdl, EZFS_NOENT, errbuf)); } if (flags->verbose) (void) printf("found clone origin %s\n", zc.zc_string); } stream_wantsnewfs = (drrb->drr_fromguid == 0 || (drrb->drr_flags & DRR_FLAG_CLONE)); if (stream_wantsnewfs) { /* * if the parent fs does not exist, look for it based on * the parent snap GUID */ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive new filesystem stream")); (void) strcpy(zc.zc_name, zc.zc_value); cp = strrchr(zc.zc_name, '/'); if (cp) *cp = '\0'; if (cp && !zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) { char suffix[ZFS_MAXNAMELEN]; (void) strcpy(suffix, strrchr(zc.zc_value, '/')); if (guid_to_name(hdl, zc.zc_name, parent_snapguid, zc.zc_value) == 0) { *strchr(zc.zc_value, '@') = '\0'; (void) strcat(zc.zc_value, suffix); } } } else { /* * if the fs does not exist, look for it based on the * fromsnap GUID */ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive incremental stream")); (void) strcpy(zc.zc_name, zc.zc_value); *strchr(zc.zc_name, '@') = '\0'; /* * If the exact receive path was specified and this is the * topmost path in the stream, then if the fs does not exist we * should look no further. */ if ((flags->isprefix || (*(chopprefix = drrb->drr_toname + strlen(sendfs)) != '\0' && *chopprefix != '@')) && !zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) { char snap[ZFS_MAXNAMELEN]; (void) strcpy(snap, strchr(zc.zc_value, '@')); if (guid_to_name(hdl, zc.zc_name, drrb->drr_fromguid, zc.zc_value) == 0) { *strchr(zc.zc_value, '@') = '\0'; (void) strcat(zc.zc_value, snap); } } } (void) strcpy(zc.zc_name, zc.zc_value); *strchr(zc.zc_name, '@') = '\0'; if (zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) { zfs_handle_t *zhp; /* * Destination fs exists. Therefore this should either * be an incremental, or the stream specifies a new fs * (full stream or clone) and they want us to blow it * away (and have therefore specified -F and removed any * snapshots). */ if (stream_wantsnewfs) { if (!flags->force) { zcmd_free_nvlists(&zc); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination '%s' exists\n" "must specify -F to overwrite it"), zc.zc_name); return (zfs_error(hdl, EZFS_EXISTS, errbuf)); } if (ioctl(hdl->libzfs_fd, ZFS_IOC_SNAPSHOT_LIST_NEXT, &zc) == 0) { zcmd_free_nvlists(&zc); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination has snapshots (eg. %s)\n" "must destroy them to overwrite it"), zc.zc_name); return (zfs_error(hdl, EZFS_EXISTS, errbuf)); } } if ((zhp = zfs_open(hdl, zc.zc_name, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) { zcmd_free_nvlists(&zc); return (-1); } if (stream_wantsnewfs && zhp->zfs_dmustats.dds_origin[0]) { zcmd_free_nvlists(&zc); zfs_close(zhp); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination '%s' is a clone\n" "must destroy it to overwrite it"), zc.zc_name); return (zfs_error(hdl, EZFS_EXISTS, errbuf)); } if (!flags->dryrun && zhp->zfs_type == ZFS_TYPE_FILESYSTEM && stream_wantsnewfs) { /* We can't do online recv in this case */ clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, 0); if (clp == NULL) { zfs_close(zhp); zcmd_free_nvlists(&zc); return (-1); } if (changelist_prefix(clp) != 0) { changelist_free(clp); zfs_close(zhp); zcmd_free_nvlists(&zc); return (-1); } } zfs_close(zhp); } else { /* * Destination filesystem does not exist. Therefore we better * be creating a new filesystem (either from a full backup, or * a clone). It would therefore be invalid if the user * specified only the pool name (i.e. if the destination name * contained no slash character). */ if (!stream_wantsnewfs || (cp = strrchr(zc.zc_name, '/')) == NULL) { zcmd_free_nvlists(&zc); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination '%s' does not exist"), zc.zc_name); return (zfs_error(hdl, EZFS_NOENT, errbuf)); } /* * Trim off the final dataset component so we perform the * recvbackup ioctl to the filesystems's parent. */ *cp = '\0'; if (flags->isprefix && !flags->istail && !flags->dryrun && create_parents(hdl, zc.zc_value, strlen(tosnap)) != 0) { zcmd_free_nvlists(&zc); return (zfs_error(hdl, EZFS_BADRESTORE, errbuf)); } newfs = B_TRUE; } zc.zc_begin_record = drr_noswap->drr_u.drr_begin; zc.zc_cookie = infd; zc.zc_guid = flags->force; if (flags->verbose) { (void) printf("%s %s stream of %s into %s\n", flags->dryrun ? "would receive" : "receiving", drrb->drr_fromguid ? "incremental" : "full", drrb->drr_toname, zc.zc_value); (void) fflush(stdout); } if (flags->dryrun) { zcmd_free_nvlists(&zc); return (recv_skip(hdl, infd, flags->byteswap)); } zc.zc_nvlist_dst = (uint64_t)(uintptr_t)prop_errbuf; zc.zc_nvlist_dst_size = sizeof (prop_errbuf); zc.zc_cleanup_fd = cleanup_fd; zc.zc_action_handle = *action_handlep; err = ioctl_err = zfs_ioctl(hdl, ZFS_IOC_RECV, &zc); ioctl_errno = errno; prop_errflags = (zprop_errflags_t)zc.zc_obj; if (err == 0) { nvlist_t *prop_errors; VERIFY(0 == nvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst, zc.zc_nvlist_dst_size, &prop_errors, 0)); nvpair_t *prop_err = NULL; while ((prop_err = nvlist_next_nvpair(prop_errors, prop_err)) != NULL) { char tbuf[1024]; zfs_prop_t prop; int intval; prop = zfs_name_to_prop(nvpair_name(prop_err)); (void) nvpair_value_int32(prop_err, &intval); if (strcmp(nvpair_name(prop_err), ZPROP_N_MORE_ERRORS) == 0) { trunc_prop_errs(intval); break; } else { (void) snprintf(tbuf, sizeof (tbuf), dgettext(TEXT_DOMAIN, "cannot receive %s property on %s"), nvpair_name(prop_err), zc.zc_name); zfs_setprop_error(hdl, prop, intval, tbuf); } } nvlist_free(prop_errors); } zc.zc_nvlist_dst = 0; zc.zc_nvlist_dst_size = 0; zcmd_free_nvlists(&zc); if (err == 0 && snapprops_nvlist) { zfs_cmd_t zc2 = { 0 }; (void) strcpy(zc2.zc_name, zc.zc_value); zc2.zc_cookie = B_TRUE; /* received */ if (zcmd_write_src_nvlist(hdl, &zc2, snapprops_nvlist) == 0) { (void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc2); zcmd_free_nvlists(&zc2); } } if (err && (ioctl_errno == ENOENT || ioctl_errno == EEXIST)) { /* * It may be that this snapshot already exists, * in which case we want to consume & ignore it * rather than failing. */ avl_tree_t *local_avl; nvlist_t *local_nv, *fs; cp = strchr(zc.zc_value, '@'); /* * XXX Do this faster by just iterating over snaps in * this fs. Also if zc_value does not exist, we will * get a strange "does not exist" error message. */ *cp = '\0'; if (gather_nvlist(hdl, zc.zc_value, NULL, NULL, B_FALSE, &local_nv, &local_avl) == 0) { *cp = '@'; fs = fsavl_find(local_avl, drrb->drr_toguid, NULL); fsavl_destroy(local_avl); nvlist_free(local_nv); if (fs != NULL) { if (flags->verbose) { (void) printf("snap %s already exists; " "ignoring\n", zc.zc_value); } err = ioctl_err = recv_skip(hdl, infd, flags->byteswap); } } *cp = '@'; } if (ioctl_err != 0) { switch (ioctl_errno) { case ENODEV: cp = strchr(zc.zc_value, '@'); *cp = '\0'; zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "most recent snapshot of %s does not\n" "match incremental source"), zc.zc_value); (void) zfs_error(hdl, EZFS_BADRESTORE, errbuf); *cp = '@'; break; case ETXTBSY: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination %s has been modified\n" "since most recent snapshot"), zc.zc_name); (void) zfs_error(hdl, EZFS_BADRESTORE, errbuf); break; case EEXIST: cp = strchr(zc.zc_value, '@'); if (newfs) { /* it's the containing fs that exists */ *cp = '\0'; } zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination already exists")); (void) zfs_error_fmt(hdl, EZFS_EXISTS, dgettext(TEXT_DOMAIN, "cannot restore to %s"), zc.zc_value); *cp = '@'; break; case EINVAL: (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf); break; case ECKSUM: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid stream (checksum mismatch)")); (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf); break; case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded to receive this stream.")); (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); break; case EDQUOT: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "destination %s space quota exceeded"), zc.zc_name); (void) zfs_error(hdl, EZFS_NOSPC, errbuf); break; default: (void) zfs_standard_error(hdl, ioctl_errno, errbuf); } } /* * Mount the target filesystem (if created). Also mount any * children of the target filesystem if we did a replication * receive (indicated by stream_avl being non-NULL). */ cp = strchr(zc.zc_value, '@'); if (cp && (ioctl_err == 0 || !newfs)) { zfs_handle_t *h; *cp = '\0'; h = zfs_open(hdl, zc.zc_value, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME); if (h != NULL) { if (h->zfs_type == ZFS_TYPE_VOLUME) { *cp = '@'; } else if (newfs || stream_avl) { /* * Track the first/top of hierarchy fs, * for mounting and sharing later. */ if (top_zfs && *top_zfs == NULL) *top_zfs = zfs_strdup(hdl, zc.zc_value); } zfs_close(h); } *cp = '@'; } if (clp) { err |= changelist_postfix(clp); changelist_free(clp); } if (prop_errflags & ZPROP_ERR_NOCLEAR) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: " "failed to clear unreceived properties on %s"), zc.zc_name); (void) fprintf(stderr, "\n"); } if (prop_errflags & ZPROP_ERR_NORESTORE) { (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: " "failed to restore original properties on %s"), zc.zc_name); (void) fprintf(stderr, "\n"); } if (err || ioctl_err) return (-1); *action_handlep = zc.zc_action_handle; if (flags->verbose) { char buf1[64]; char buf2[64]; uint64_t bytes = zc.zc_cookie; time_t delta = time(NULL) - begin_time; if (delta == 0) delta = 1; zfs_nicenum(bytes, buf1, sizeof (buf1)); zfs_nicenum(bytes/delta, buf2, sizeof (buf1)); (void) printf("received %sB stream in %lu seconds (%sB/sec)\n", buf1, delta, buf2); } return (0); } static int zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap, recvflags_t *flags, int infd, const char *sendfs, nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs, int cleanup_fd, uint64_t *action_handlep) { int err; dmu_replay_record_t drr, drr_noswap; struct drr_begin *drrb = &drr.drr_u.drr_begin; char errbuf[1024]; zio_cksum_t zcksum = { 0 }; uint64_t featureflags; int hdrtype; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot receive")); if (flags->isprefix && !zfs_dataset_exists(hdl, tosnap, ZFS_TYPE_DATASET)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified fs " "(%s) does not exist"), tosnap); return (zfs_error(hdl, EZFS_NOENT, errbuf)); } /* read in the BEGIN record */ if (0 != (err = recv_read(hdl, infd, &drr, sizeof (drr), B_FALSE, &zcksum))) return (err); if (drr.drr_type == DRR_END || drr.drr_type == BSWAP_32(DRR_END)) { /* It's the double end record at the end of a package */ return (ENODATA); } /* the kernel needs the non-byteswapped begin record */ drr_noswap = drr; flags->byteswap = B_FALSE; if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { /* * We computed the checksum in the wrong byteorder in * recv_read() above; do it again correctly. */ bzero(&zcksum, sizeof (zio_cksum_t)); fletcher_4_incremental_byteswap(&drr, sizeof (drr), &zcksum); flags->byteswap = B_TRUE; drr.drr_type = BSWAP_32(drr.drr_type); drr.drr_payloadlen = BSWAP_32(drr.drr_payloadlen); drrb->drr_magic = BSWAP_64(drrb->drr_magic); drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo); drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time); drrb->drr_type = BSWAP_32(drrb->drr_type); drrb->drr_flags = BSWAP_32(drrb->drr_flags); drrb->drr_toguid = BSWAP_64(drrb->drr_toguid); drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid); } if (drrb->drr_magic != DMU_BACKUP_MAGIC || drr.drr_type != DRR_BEGIN) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " "stream (bad magic number)")); return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); } featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); hdrtype = DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo); if (!DMU_STREAM_SUPPORTED(featureflags) || (hdrtype != DMU_SUBSTREAM && hdrtype != DMU_COMPOUNDSTREAM)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "stream has unsupported feature, feature flags = %lx"), featureflags); return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); } if (strchr(drrb->drr_toname, '@') == NULL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid " "stream (bad snapshot name)")); return (zfs_error(hdl, EZFS_BADSTREAM, errbuf)); } if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_SUBSTREAM) { char nonpackage_sendfs[ZFS_MAXNAMELEN]; if (sendfs == NULL) { /* * We were not called from zfs_receive_package(). Get * the fs specified by 'zfs send'. */ char *cp; (void) strlcpy(nonpackage_sendfs, drr.drr_u.drr_begin.drr_toname, ZFS_MAXNAMELEN); if ((cp = strchr(nonpackage_sendfs, '@')) != NULL) *cp = '\0'; sendfs = nonpackage_sendfs; } return (zfs_receive_one(hdl, infd, tosnap, flags, &drr, &drr_noswap, sendfs, stream_nv, stream_avl, top_zfs, cleanup_fd, action_handlep)); } else { assert(DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_COMPOUNDSTREAM); return (zfs_receive_package(hdl, infd, tosnap, flags, &drr, &zcksum, top_zfs, cleanup_fd, action_handlep)); } } /* * Restores a backup of tosnap from the file descriptor specified by infd. * Return 0 on total success, -2 if some things couldn't be * destroyed/renamed/promoted, -1 if some things couldn't be received. * (-1 will override -2). */ int zfs_receive(libzfs_handle_t *hdl, const char *tosnap, recvflags_t *flags, int infd, avl_tree_t *stream_avl) { char *top_zfs = NULL; int err; int cleanup_fd; uint64_t action_handle = 0; cleanup_fd = open(ZFS_DEV, O_RDWR|O_EXCL); VERIFY(cleanup_fd >= 0); err = zfs_receive_impl(hdl, tosnap, flags, infd, NULL, NULL, stream_avl, &top_zfs, cleanup_fd, &action_handle); VERIFY(0 == close(cleanup_fd)); if (err == 0 && !flags->nomount && top_zfs) { zfs_handle_t *zhp; prop_changelist_t *clp; zhp = zfs_open(hdl, top_zfs, ZFS_TYPE_FILESYSTEM); if (zhp != NULL) { clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, CL_GATHER_MOUNT_ALWAYS, 0); zfs_close(zhp); if (clp != NULL) { /* mount and share received datasets */ err = changelist_postfix(clp); changelist_free(clp); } } if (zhp == NULL || clp == NULL || err) err = -1; } if (top_zfs) free(top_zfs); return (err); } Index: projects/nand/cddl/contrib/opensolaris/lib/libzpool/common/sys/zfs_context.h =================================================================== --- projects/nand/cddl/contrib/opensolaris/lib/libzpool/common/sys/zfs_context.h (revision 235262) +++ projects/nand/cddl/contrib/opensolaris/lib/libzpool/common/sys/zfs_context.h (revision 235263) @@ -1,692 +1,694 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Joyent, Inc. All rights reserved. */ #ifndef _SYS_ZFS_CONTEXT_H #define _SYS_ZFS_CONTEXT_H #ifdef __cplusplus extern "C" { #endif #define _SYS_MUTEX_H #define _SYS_RWLOCK_H #define _SYS_CONDVAR_H #define _SYS_SYSTM_H #define _SYS_DEBUG_H #define _SYS_T_LOCK_H #define _SYS_VNODE_H #define _SYS_VFS_H #define _SYS_SUNDDI_H #define _SYS_CALLB_H #define _SYS_SCHED_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ZFS_EXPORTS_PATH "/etc/zfs/exports" /* * Debugging */ /* * Note that we are not using the debugging levels. */ #define CE_CONT 0 /* continuation */ #define CE_NOTE 1 /* notice */ #define CE_WARN 2 /* warning */ #define CE_PANIC 3 /* panic */ #define CE_IGNORE 4 /* print nothing */ /* * ZFS debugging */ #define ZFS_LOG(...) do { } while (0) typedef u_longlong_t rlim64_t; #define RLIM64_INFINITY ((rlim64_t)-3) #ifdef ZFS_DEBUG extern void dprintf_setup(int *argc, char **argv); #endif /* ZFS_DEBUG */ extern void cmn_err(int, const char *, ...); extern void vcmn_err(int, const char *, __va_list); extern void panic(const char *, ...); extern void vpanic(const char *, __va_list); #define fm_panic panic extern int aok; /* This definition is copied from assert.h. */ #if defined(__STDC__) #if __STDC_VERSION__ - 0 >= 199901L #define zverify(EX) (void)((EX) || (aok) || \ (__assert(#EX, __FILE__, __LINE__), 0)) #else #define zverify(EX) (void)((EX) || (aok) || \ (__assert(#EX, __FILE__, __LINE__), 0)) #endif /* __STDC_VERSION__ - 0 >= 199901L */ #else #define zverify(EX) (void)((EX) || (aok) || \ (_assert("EX", __FILE__, __LINE__), 0)) #endif /* __STDC__ */ #define VERIFY zverify #define ASSERT zverify #undef assert #define assert zverify extern void __assert(const char *, const char *, int); #ifdef lint #define VERIFY3_IMPL(x, y, z, t) if (x == z) ((void)0) #else /* BEGIN CSTYLED */ #define VERIFY3_IMPL(LEFT, OP, RIGHT, TYPE) do { \ const TYPE __left = (TYPE)(LEFT); \ const TYPE __right = (TYPE)(RIGHT); \ if (!(__left OP __right) && (!aok)) { \ char *__buf = alloca(256); \ (void) snprintf(__buf, 256, "%s %s %s (0x%llx %s 0x%llx)", \ #LEFT, #OP, #RIGHT, \ (u_longlong_t)__left, #OP, (u_longlong_t)__right); \ __assert(__buf, __FILE__, __LINE__); \ } \ _NOTE(CONSTCOND) } while (0) /* END CSTYLED */ #endif /* lint */ #define VERIFY3S(x, y, z) VERIFY3_IMPL(x, y, z, int64_t) #define VERIFY3U(x, y, z) VERIFY3_IMPL(x, y, z, uint64_t) #define VERIFY3P(x, y, z) VERIFY3_IMPL(x, y, z, uintptr_t) #ifdef NDEBUG #define ASSERT3S(x, y, z) ((void)0) #define ASSERT3U(x, y, z) ((void)0) #define ASSERT3P(x, y, z) ((void)0) #else #define ASSERT3S(x, y, z) VERIFY3S(x, y, z) #define ASSERT3U(x, y, z) VERIFY3U(x, y, z) #define ASSERT3P(x, y, z) VERIFY3P(x, y, z) #endif /* * DTrace SDT probes have different signatures in userland than they do in * kernel. If they're being used in kernel code, re-define them out of * existence for their counterparts in libzpool. */ #ifdef DTRACE_PROBE #undef DTRACE_PROBE #define DTRACE_PROBE(a) ((void)0) #endif /* DTRACE_PROBE */ #ifdef DTRACE_PROBE1 #undef DTRACE_PROBE1 #define DTRACE_PROBE1(a, b, c) ((void)0) #endif /* DTRACE_PROBE1 */ #ifdef DTRACE_PROBE2 #undef DTRACE_PROBE2 #define DTRACE_PROBE2(a, b, c, d, e) ((void)0) #endif /* DTRACE_PROBE2 */ #ifdef DTRACE_PROBE3 #undef DTRACE_PROBE3 #define DTRACE_PROBE3(a, b, c, d, e, f, g) ((void)0) #endif /* DTRACE_PROBE3 */ #ifdef DTRACE_PROBE4 #undef DTRACE_PROBE4 #define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i) ((void)0) #endif /* DTRACE_PROBE4 */ /* * Threads */ #define curthread ((void *)(uintptr_t)thr_self()) typedef struct kthread kthread_t; #define thread_create(stk, stksize, func, arg, len, pp, state, pri) \ zk_thread_create(func, arg) #define thread_exit() thr_exit(NULL) #define thread_join(t) panic("libzpool cannot join threads") #define newproc(f, a, cid, pri, ctp, pid) (ENOSYS) /* in libzpool, p0 exists only to have its address taken */ struct proc { uintptr_t this_is_never_used_dont_dereference_it; }; extern struct proc p0; +#define curproc (&p0) #define PS_NONE -1 extern kthread_t *zk_thread_create(void (*func)(), void *arg); #define issig(why) (FALSE) #define ISSIG(thr, why) (FALSE) /* * Mutexes */ typedef struct kmutex { void *m_owner; boolean_t initialized; mutex_t m_lock; } kmutex_t; #define MUTEX_DEFAULT USYNC_THREAD #undef MUTEX_HELD #undef MUTEX_NOT_HELD #define MUTEX_HELD(m) ((m)->m_owner == curthread) #define MUTEX_NOT_HELD(m) (!MUTEX_HELD(m)) #define _mutex_held(m) pthread_mutex_isowned_np(m) /* * Argh -- we have to get cheesy here because the kernel and userland * have different signatures for the same routine. */ //extern int _mutex_init(mutex_t *mp, int type, void *arg); //extern int _mutex_destroy(mutex_t *mp); //extern int _mutex_owned(mutex_t *mp); #define mutex_init(mp, b, c, d) zmutex_init((kmutex_t *)(mp)) #define mutex_destroy(mp) zmutex_destroy((kmutex_t *)(mp)) #define mutex_owned(mp) zmutex_owned((kmutex_t *)(mp)) extern void zmutex_init(kmutex_t *mp); extern void zmutex_destroy(kmutex_t *mp); extern int zmutex_owned(kmutex_t *mp); extern void mutex_enter(kmutex_t *mp); extern void mutex_exit(kmutex_t *mp); extern int mutex_tryenter(kmutex_t *mp); extern void *mutex_owner(kmutex_t *mp); /* * RW locks */ typedef struct krwlock { int rw_count; void *rw_owner; boolean_t initialized; rwlock_t rw_lock; } krwlock_t; typedef int krw_t; #define RW_READER 0 #define RW_WRITER 1 #define RW_DEFAULT USYNC_THREAD #undef RW_READ_HELD #define RW_READ_HELD(x) ((x)->rw_owner == NULL && (x)->rw_count > 0) #undef RW_WRITE_HELD #define RW_WRITE_HELD(x) ((x)->rw_owner == curthread) #define RW_LOCK_HELD(x) rw_lock_held(x) extern void rw_init(krwlock_t *rwlp, char *name, int type, void *arg); extern void rw_destroy(krwlock_t *rwlp); extern void rw_enter(krwlock_t *rwlp, krw_t rw); extern int rw_tryenter(krwlock_t *rwlp, krw_t rw); extern int rw_tryupgrade(krwlock_t *rwlp); extern void rw_exit(krwlock_t *rwlp); extern int rw_lock_held(krwlock_t *rwlp); #define rw_downgrade(rwlp) do { } while (0) extern uid_t crgetuid(cred_t *cr); extern gid_t crgetgid(cred_t *cr); extern int crgetngroups(cred_t *cr); extern gid_t *crgetgroups(cred_t *cr); /* * Condition variables */ typedef cond_t kcondvar_t; #define CV_DEFAULT USYNC_THREAD extern void cv_init(kcondvar_t *cv, char *name, int type, void *arg); extern void cv_destroy(kcondvar_t *cv); extern void cv_wait(kcondvar_t *cv, kmutex_t *mp); extern clock_t cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime); extern void cv_signal(kcondvar_t *cv); extern void cv_broadcast(kcondvar_t *cv); /* * Kernel memory */ #define KM_SLEEP UMEM_NOFAIL #define KM_PUSHPAGE KM_SLEEP #define KM_NOSLEEP UMEM_DEFAULT #define KMC_NODEBUG UMC_NODEBUG #define KMC_NOTOUCH 0 /* not needed for userland caches */ #define KM_NODEBUG 0 #define kmem_alloc(_s, _f) umem_alloc(_s, _f) #define kmem_zalloc(_s, _f) umem_zalloc(_s, _f) #define kmem_free(_b, _s) umem_free(_b, _s) #define kmem_size() (physmem * PAGESIZE) #define kmem_cache_create(_a, _b, _c, _d, _e, _f, _g, _h, _i) \ umem_cache_create(_a, _b, _c, _d, _e, _f, _g, _h, _i) #define kmem_cache_destroy(_c) umem_cache_destroy(_c) #define kmem_cache_alloc(_c, _f) umem_cache_alloc(_c, _f) #define kmem_cache_free(_c, _b) umem_cache_free(_c, _b) #define kmem_debugging() 0 #define kmem_cache_reap_now(_c) /* nothing */ #define kmem_cache_set_move(_c, _cb) /* nothing */ #define POINTER_INVALIDATE(_pp) /* nothing */ #define POINTER_IS_VALID(_p) 0 typedef umem_cache_t kmem_cache_t; typedef enum kmem_cbrc { KMEM_CBRC_YES, KMEM_CBRC_NO, KMEM_CBRC_LATER, KMEM_CBRC_DONT_NEED, KMEM_CBRC_DONT_KNOW } kmem_cbrc_t; /* * Task queues */ typedef struct taskq taskq_t; typedef uintptr_t taskqid_t; typedef void (task_func_t)(void *); #define TASKQ_PREPOPULATE 0x0001 #define TASKQ_CPR_SAFE 0x0002 /* Use CPR safe protocol */ #define TASKQ_DYNAMIC 0x0004 /* Use dynamic thread scheduling */ #define TASKQ_THREADS_CPU_PCT 0x0008 /* Scale # threads by # cpus */ #define TASKQ_DC_BATCH 0x0010 /* Mark threads as batch */ #define TQ_SLEEP KM_SLEEP /* Can block for memory */ #define TQ_NOSLEEP KM_NOSLEEP /* cannot block for memory; may fail */ #define TQ_NOQUEUE 0x02 /* Do not enqueue if can't dispatch */ #define TQ_FRONT 0x08 /* Queue in front */ extern taskq_t *system_taskq; extern taskq_t *taskq_create(const char *, int, pri_t, int, int, uint_t); #define taskq_create_proc(a, b, c, d, e, p, f) \ (taskq_create(a, b, c, d, e, f)) #define taskq_create_sysdc(a, b, d, e, p, dc, f) \ (taskq_create(a, b, maxclsyspri, d, e, f)) extern taskqid_t taskq_dispatch(taskq_t *, task_func_t, void *, uint_t); extern void taskq_destroy(taskq_t *); extern void taskq_wait(taskq_t *); extern int taskq_member(taskq_t *, void *); extern void system_taskq_init(void); extern void system_taskq_fini(void); #define taskq_dispatch_safe(tq, func, arg, flags, task) \ taskq_dispatch((tq), (func), (arg), (flags)) #define XVA_MAPSIZE 3 #define XVA_MAGIC 0x78766174 /* * vnodes */ typedef struct vnode { uint64_t v_size; int v_fd; char *v_path; } vnode_t; #define AV_SCANSTAMP_SZ 32 /* length of anti-virus scanstamp */ typedef struct xoptattr { timestruc_t xoa_createtime; /* Create time of file */ uint8_t xoa_archive; uint8_t xoa_system; uint8_t xoa_readonly; uint8_t xoa_hidden; uint8_t xoa_nounlink; uint8_t xoa_immutable; uint8_t xoa_appendonly; uint8_t xoa_nodump; uint8_t xoa_settable; uint8_t xoa_opaque; uint8_t xoa_av_quarantined; uint8_t xoa_av_modified; uint8_t xoa_av_scanstamp[AV_SCANSTAMP_SZ]; uint8_t xoa_reparse; uint8_t xoa_offline; uint8_t xoa_sparse; } xoptattr_t; typedef struct vattr { uint_t va_mask; /* bit-mask of attributes */ u_offset_t va_size; /* file size in bytes */ } vattr_t; typedef struct xvattr { vattr_t xva_vattr; /* Embedded vattr structure */ uint32_t xva_magic; /* Magic Number */ uint32_t xva_mapsize; /* Size of attr bitmap (32-bit words) */ uint32_t *xva_rtnattrmapp; /* Ptr to xva_rtnattrmap[] */ uint32_t xva_reqattrmap[XVA_MAPSIZE]; /* Requested attrs */ uint32_t xva_rtnattrmap[XVA_MAPSIZE]; /* Returned attrs */ xoptattr_t xva_xoptattrs; /* Optional attributes */ } xvattr_t; typedef struct vsecattr { uint_t vsa_mask; /* See below */ int vsa_aclcnt; /* ACL entry count */ void *vsa_aclentp; /* pointer to ACL entries */ int vsa_dfaclcnt; /* default ACL entry count */ void *vsa_dfaclentp; /* pointer to default ACL entries */ size_t vsa_aclentsz; /* ACE size in bytes of vsa_aclentp */ } vsecattr_t; #define AT_TYPE 0x00001 #define AT_MODE 0x00002 #define AT_UID 0x00004 #define AT_GID 0x00008 #define AT_FSID 0x00010 #define AT_NODEID 0x00020 #define AT_NLINK 0x00040 #define AT_SIZE 0x00080 #define AT_ATIME 0x00100 #define AT_MTIME 0x00200 #define AT_CTIME 0x00400 #define AT_RDEV 0x00800 #define AT_BLKSIZE 0x01000 #define AT_NBLOCKS 0x02000 #define AT_SEQ 0x08000 #define AT_XVATTR 0x10000 #define CRCREAT 0 extern int fop_getattr(vnode_t *vp, vattr_t *vap); #define VOP_CLOSE(vp, f, c, o, cr, ct) 0 #define VOP_PUTPAGE(vp, of, sz, fl, cr, ct) 0 #define VOP_GETATTR(vp, vap, cr) fop_getattr((vp), (vap)); #define VOP_FSYNC(vp, f, cr, ct) fsync((vp)->v_fd) #define VN_RELE(vp) vn_close(vp, 0, NULL, NULL) #define VN_RELE_ASYNC(vp, taskq) vn_close(vp, 0, NULL, NULL) #define vn_lock(vp, type) #define VOP_UNLOCK(vp, type) #ifdef VFS_LOCK_GIANT #undef VFS_LOCK_GIANT #endif #define VFS_LOCK_GIANT(mp) 0 #ifdef VFS_UNLOCK_GIANT #undef VFS_UNLOCK_GIANT #endif #define VFS_UNLOCK_GIANT(vfslocked) extern int vn_open(char *path, int x1, int oflags, int mode, vnode_t **vpp, int x2, int x3); extern int vn_openat(char *path, int x1, int oflags, int mode, vnode_t **vpp, int x2, int x3, vnode_t *vp, int fd); extern int vn_rdwr(int uio, vnode_t *vp, void *addr, ssize_t len, offset_t offset, int x1, int x2, rlim64_t x3, void *x4, ssize_t *residp); extern void vn_close(vnode_t *vp, int openflag, cred_t *cr, kthread_t *td); #define vn_remove(path, x1, x2) remove(path) #define vn_rename(from, to, seg) rename((from), (to)) #define vn_is_readonly(vp) B_FALSE extern vnode_t *rootdir; #include /* for FREAD, FWRITE, etc */ #define FTRUNC O_TRUNC /* * Random stuff */ #define ddi_get_lbolt() (gethrtime() >> 23) #define ddi_get_lbolt64() (gethrtime() >> 23) #define hz 119 /* frequency when using gethrtime() >> 23 for lbolt */ extern void delay(clock_t ticks); #define gethrestime_sec() time(NULL) #define gethrestime(t) \ do {\ (t)->tv_sec = gethrestime_sec();\ (t)->tv_nsec = 0;\ } while (0); #define max_ncpus 64 #define minclsyspri 60 #define maxclsyspri 99 #define CPU_SEQID (thr_self() & (max_ncpus - 1)) #define kcred NULL #define CRED() NULL #ifndef ptob #define ptob(x) ((x) * PAGESIZE) #endif extern uint64_t physmem; extern int highbit(ulong_t i); extern int random_get_bytes(uint8_t *ptr, size_t len); extern int random_get_pseudo_bytes(uint8_t *ptr, size_t len); extern void kernel_init(int); extern void kernel_fini(void); struct spa; extern void nicenum(uint64_t num, char *buf); extern void show_pool_stats(struct spa *); typedef struct callb_cpr { kmutex_t *cc_lockp; } callb_cpr_t; #define CALLB_CPR_INIT(cp, lockp, func, name) { \ (cp)->cc_lockp = lockp; \ } #define CALLB_CPR_SAFE_BEGIN(cp) { \ ASSERT(MUTEX_HELD((cp)->cc_lockp)); \ } #define CALLB_CPR_SAFE_END(cp, lockp) { \ ASSERT(MUTEX_HELD((cp)->cc_lockp)); \ } #define CALLB_CPR_EXIT(cp) { \ ASSERT(MUTEX_HELD((cp)->cc_lockp)); \ mutex_exit((cp)->cc_lockp); \ } #define zone_dataset_visible(x, y) (1) #define INGLOBALZONE(z) (1) extern char *kmem_asprintf(const char *fmt, ...); #define strfree(str) kmem_free((str), strlen(str)+1) /* * Hostname information */ extern struct utsname utsname; extern char hw_serial[]; /* for userland-emulated hostid access */ extern int ddi_strtoul(const char *str, char **nptr, int base, unsigned long *result); extern int ddi_strtoull(const char *str, char **nptr, int base, u_longlong_t *result); /* ZFS Boot Related stuff. */ struct _buf { intptr_t _fd; }; struct bootstat { uint64_t st_size; }; typedef struct ace_object { uid_t a_who; uint32_t a_access_mask; uint16_t a_flags; uint16_t a_type; uint8_t a_obj_type[16]; uint8_t a_inherit_obj_type[16]; } ace_object_t; #define ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05 #define ACE_ACCESS_DENIED_OBJECT_ACE_TYPE 0x06 #define ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07 #define ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08 extern struct _buf *kobj_open_file(char *name); extern int kobj_read_file(struct _buf *file, char *buf, unsigned size, unsigned off); extern void kobj_close_file(struct _buf *file); extern int kobj_get_filesize(struct _buf *file, uint64_t *size); extern int zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr); extern int zfs_secpolicy_rename_perms(const char *from, const char *to, cred_t *cr); extern int zfs_secpolicy_destroy_perms(const char *name, cred_t *cr); extern zoneid_t getzoneid(void); /* Random compatibility stuff. */ #define lbolt (gethrtime() >> 23) #define lbolt64 (gethrtime() >> 23) extern uint64_t physmem; #define gethrestime_sec() time(NULL) #define pwrite64(d, p, n, o) pwrite(d, p, n, o) #define readdir64(d) readdir(d) #define SIGPENDING(td) (0) #define root_mount_wait() do { } while (0) #define root_mounted() (1) struct file { void *dummy; }; #define FCREAT O_CREAT #define FOFFMAX 0x0 /* SID stuff */ typedef struct ksiddomain { uint_t kd_ref; uint_t kd_len; char *kd_name; } ksiddomain_t; ksiddomain_t *ksid_lookupdomain(const char *); void ksiddomain_rele(ksiddomain_t *); typedef uint32_t idmap_rid_t; #define DDI_SLEEP KM_SLEEP #define ddi_log_sysevent(_a, _b, _c, _d, _e, _f, _g) (0) #define SX_SYSINIT(name, lock, desc) #define SYSCTL_DECL(...) #define SYSCTL_NODE(...) #define SYSCTL_INT(...) #define SYSCTL_UINT(...) #define SYSCTL_ULONG(...) #define SYSCTL_QUAD(...) #define SYSCTL_UQUAD(...) #ifdef TUNABLE_INT #undef TUNABLE_INT #undef TUNABLE_ULONG #undef TUNABLE_QUAD #endif #define TUNABLE_INT(...) #define TUNABLE_ULONG(...) #define TUNABLE_QUAD(...) /* Errors */ #ifndef ERESTART #define ERESTART (-1) #endif #ifdef __cplusplus } #endif #endif /* _SYS_ZFS_CONTEXT_H */ Index: projects/nand/cddl/contrib/opensolaris =================================================================== --- projects/nand/cddl/contrib/opensolaris (revision 235262) +++ projects/nand/cddl/contrib/opensolaris (revision 235263) Property changes on: projects/nand/cddl/contrib/opensolaris ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/cddl/contrib/opensolaris:r235157-235262 Index: projects/nand/contrib/bind9 =================================================================== --- projects/nand/contrib/bind9 (revision 235262) +++ projects/nand/contrib/bind9 (revision 235263) Property changes on: projects/nand/contrib/bind9 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/bind9:r235157-235262 Index: projects/nand/contrib/binutils =================================================================== --- projects/nand/contrib/binutils (revision 235262) +++ projects/nand/contrib/binutils (revision 235263) Property changes on: projects/nand/contrib/binutils ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/binutils:r235157-235262 Index: projects/nand/contrib/bzip2 =================================================================== --- projects/nand/contrib/bzip2 (revision 235262) +++ projects/nand/contrib/bzip2 (revision 235263) Property changes on: projects/nand/contrib/bzip2 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/bzip2:r235157-235262 Index: projects/nand/contrib/com_err =================================================================== --- projects/nand/contrib/com_err (revision 235262) +++ projects/nand/contrib/com_err (revision 235263) Property changes on: projects/nand/contrib/com_err ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/com_err:r235157-235262 Index: projects/nand/contrib/compiler-rt =================================================================== --- projects/nand/contrib/compiler-rt (revision 235262) +++ projects/nand/contrib/compiler-rt (revision 235263) Property changes on: projects/nand/contrib/compiler-rt ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/compiler-rt:r235157-235262 Index: projects/nand/contrib/dialog =================================================================== --- projects/nand/contrib/dialog (revision 235262) +++ projects/nand/contrib/dialog (revision 235263) Property changes on: projects/nand/contrib/dialog ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/dialog:r235157-235262 Index: projects/nand/contrib/ee =================================================================== --- projects/nand/contrib/ee (revision 235262) +++ projects/nand/contrib/ee (revision 235263) Property changes on: projects/nand/contrib/ee ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/ee:r235157-235262 Index: projects/nand/contrib/expat =================================================================== --- projects/nand/contrib/expat (revision 235262) +++ projects/nand/contrib/expat (revision 235263) Property changes on: projects/nand/contrib/expat ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/expat:r235157-235262 Index: projects/nand/contrib/file =================================================================== --- projects/nand/contrib/file (revision 235262) +++ projects/nand/contrib/file (revision 235263) Property changes on: projects/nand/contrib/file ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/file:r235157-235262 Index: projects/nand/contrib/gcc =================================================================== --- projects/nand/contrib/gcc (revision 235262) +++ projects/nand/contrib/gcc (revision 235263) Property changes on: projects/nand/contrib/gcc ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/gcc:r235157-235262 Index: projects/nand/contrib/gdb =================================================================== --- projects/nand/contrib/gdb (revision 235262) +++ projects/nand/contrib/gdb (revision 235263) Property changes on: projects/nand/contrib/gdb ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/gdb:r235157-235262 Index: projects/nand/contrib/gdtoa =================================================================== --- projects/nand/contrib/gdtoa (revision 235262) +++ projects/nand/contrib/gdtoa (revision 235263) Property changes on: projects/nand/contrib/gdtoa ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/gdtoa:r235157-235262 Index: projects/nand/contrib/gnu-sort =================================================================== --- projects/nand/contrib/gnu-sort (revision 235262) +++ projects/nand/contrib/gnu-sort (revision 235263) Property changes on: projects/nand/contrib/gnu-sort ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/gnu-sort:r235157-235262 Index: projects/nand/contrib/groff =================================================================== --- projects/nand/contrib/groff (revision 235262) +++ projects/nand/contrib/groff (revision 235263) Property changes on: projects/nand/contrib/groff ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/groff:r235157-235262 Index: projects/nand/contrib/jemalloc/ChangeLog =================================================================== --- projects/nand/contrib/jemalloc/ChangeLog (revision 235262) +++ projects/nand/contrib/jemalloc/ChangeLog (revision 235263) @@ -1,329 +1,336 @@ Following are change highlights associated with official releases. Important bug fixes are all mentioned, but internal enhancements are omitted here for brevity (even though they are more fun to write about). Much more detail can be found in the git revision history: http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git git://canonware.com/jemalloc.git * 3.0.0 (XXX not yet released) Although this version adds some major new features, the primary focus is on internal code cleanup that facilitates maintainability and portability, most of which is not reflected in the ChangeLog. This is the first release to incorporate substantial contributions from numerous other developers, and the result is a more broadly useful allocator (see the git revision history for contribution details). Note that the license has been unified, thanks to Facebook granting a license under the same terms as the other copyright holders (see COPYING). New features: - Implement Valgrind support, redzones, and quarantine. - - Add support for additional operating systems: + - Add support for additional platforms: + FreeBSD + Mac OS X Lion + + MinGW - Add support for additional architectures: + MIPS + SH4 + Tilera - Add support for cross compiling. - Add nallocm(), which rounds a request size up to the nearest size class without actually allocating. - Implement aligned_alloc() (blame C11). - Add the --disable-munmap option, and make it the default on Linux. - Add the --with-mangling option. - Add the --disable-experimental option. - Add the "thread.tcache.enabled" mallctl. - Add the "opt.prof_final" mallctl. - Update pprof (from gperftools 2.0). Incompatible changes: - Enable stats by default. - Enable fill by default. - Disable lazy locking by default. - Rename the "tcache.flush" mallctl to "thread.tcache.flush". - Rename the "arenas.pagesize" mallctl to "arenas.page". - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB). - Change the "opt.prof_accum" default from true to false. Removed features: - Remove the swap feature, including the "config.swap", "swap.avail", "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls. - Remove highruns statistics, including the "stats.arenas..bins..highruns" and "stats.arenas..lruns..highruns" mallctls. - As part of small size class refactoring, remove the "opt.lg_[qc]space_max", "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and "arenas.[tqcs]bins" mallctls. - Remove the "arenas.chunksize" mallctl. - Remove the "opt.lg_prof_tcmax" option. - Remove the "opt.lg_prof_bt_max" option. - Remove the "opt.lg_tcache_gc_sweep" option. - Remove the --disable-tiny option, including the "config.tiny" mallctl. - Remove the --enable-dynamic-page-shift configure option. - Remove the --enable-sysv configure option. Bug fixes: - - Fix fork-related bugs that could cause deadlock in children between fork - and exec. - Fix a statistics-related bug in the "thread.arena" mallctl that could cause invalid statistics and crashes. - - Work around TLS dallocation via free() on Linux. This bug could cause + - Work around TLS deallocation via free() on Linux. This bug could cause write-after-free memory corruption. + - Fix a potential deadlock that could occur during interval- and + growth-triggered heap profile dumps. - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could cause memory corruption and crashes with --enable-dss specified. + - Fix fork-related bugs that could cause deadlock in children between fork + and exec. - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter. - Fix realloc(p, 0) to act like free(p). - Do not enforce minimum alignment in memalign(). - Check for NULL pointer in malloc_usable_size(). + - Fix an off-by-one heap profile statistics bug that could be observed in + interval- and growth-triggered heap profiles. + - Fix the "epoch" mallctl to update cached stats even if the passed in epoch + is 0. - Fix bin->runcur management to fix a layout policy bug. This bug did not affect correctness. - Fix a bug in choose_arena_hard() that potentially caused more arenas to be initialized than necessary. - Add missing "opt.lg_tcache_max" mallctl implementation. - Use glibc allocator hooks to make mixed allocator usage less likely. - Fix build issues for --disable-tcache. - Don't mangle pthread_create() when --with-private-namespace is specified. * 2.2.5 (November 14, 2011) Bug fixes: - Fix huge_ralloc() race when using mremap(2). This is a serious bug that could cause memory corruption and/or crashes. - Fix huge_ralloc() to maintain chunk statistics. - Fix malloc_stats_print(..., "a") output. * 2.2.4 (November 5, 2011) Bug fixes: - Initialize arenas_tsd before using it. This bug existed for 2.2.[0-3], as well as for --disable-tls builds in earlier releases. - Do not assume a 4 KiB page size in test/rallocm.c. * 2.2.3 (August 31, 2011) This version fixes numerous bugs related to heap profiling. Bug fixes: - Fix a prof-related race condition. This bug could cause memory corruption, but only occurred in non-default configurations (prof_accum:false). - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is excluded from backtraces). - Fix a prof-related bug in realloc() (only triggered by OOM errors). - Fix prof-related bugs in allocm() and rallocm(). - Fix prof_tdata_cleanup() for --disable-tls builds. - Fix a relative include path, to fix objdir builds. * 2.2.2 (July 30, 2011) Bug fixes: - Fix a build error for --disable-tcache. - Fix assertions in arena_purge() (for real this time). - Add the --with-private-namespace option. This is a workaround for symbol conflicts that can inadvertently arise when using static libraries. * 2.2.1 (March 30, 2011) Bug fixes: - Implement atomic operations for x86/x64. This fixes compilation failures for versions of gcc that are still in wide use. - Fix an assertion in arena_purge(). * 2.2.0 (March 22, 2011) This version incorporates several improvements to algorithms and data structures that tend to reduce fragmentation and increase speed. New features: - Add the "stats.cactive" mallctl. - Update pprof (from google-perftools 1.7). - Improve backtracing-related configuration logic, and add the --disable-prof-libgcc option. Bug fixes: - Change default symbol visibility from "internal", to "hidden", which decreases the overhead of library-internal function calls. - Fix symbol visibility so that it is also set on OS X. - Fix a build dependency regression caused by the introduction of the .pic.o suffix for PIC object files. - Add missing checks for mutex initialization failures. - Don't use libgcc-based backtracing except on x64, where it is known to work. - Fix deadlocks on OS X that were due to memory allocation in pthread_mutex_lock(). - Heap profiling-specific fixes: + Fix memory corruption due to integer overflow in small region index computation, when using a small enough sample interval that profiling context pointers are stored in small run headers. + Fix a bootstrap ordering bug that only occurred with TLS disabled. + Fix a rallocm() rsize bug. + Fix error detection bugs for aligned memory allocation. * 2.1.3 (March 14, 2011) Bug fixes: - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix for OS X in 2.1.2). - Fix a "thread.arena" mallctl bug. - Fix a thread cache stats merging bug. * 2.1.2 (March 2, 2011) Bug fixes: - Fix "thread.{de,}allocatedp" mallctl for OS X. - Add missing jemalloc.a to build system. * 2.1.1 (January 31, 2011) Bug fixes: - Fix aligned huge reallocation (affected allocm()). - Fix the ALLOCM_LG_ALIGN macro definition. - Fix a heap dumping deadlock. - Fix a "thread.arena" mallctl bug. * 2.1.0 (December 3, 2010) This version incorporates some optimizations that can't quite be considered bug fixes. New features: - Use Linux's mremap(2) for huge object reallocation when possible. - Avoid locking in mallctl*() when possible. - Add the "thread.[de]allocatedp" mallctl's. - Convert the manual page source from roff to DocBook, and generate both roff and HTML manuals. Bug fixes: - Fix a crash due to incorrect bootstrap ordering. This only impacted --enable-debug --enable-dss configurations. - Fix a minor statistics bug for mallctl("swap.avail", ...). * 2.0.1 (October 29, 2010) Bug fixes: - Fix a race condition in heap profiling that could cause undefined behavior if "opt.prof_accum" were disabled. - Add missing mutex unlocks for some OOM error paths in the heap profiling code. - Fix a compilation error for non-C99 builds. * 2.0.0 (October 24, 2010) This version focuses on the experimental *allocm() API, and on improved run-time configuration/introspection. Nonetheless, numerous performance improvements are also included. New features: - Implement the experimental {,r,s,d}allocm() API, which provides a superset of the functionality available via malloc(), calloc(), posix_memalign(), realloc(), malloc_usable_size(), and free(). These functions can be used to allocate/reallocate aligned zeroed memory, ask for optional extra memory during reallocation, prevent object movement during reallocation, etc. - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is more human-readable, and more flexible. For example: JEMALLOC_OPTIONS=AJP is now: MALLOC_CONF=abort:true,fill:true,stats_print:true - Port to Apple OS X. Sponsored by Mozilla. - Make it possible for the application to control thread-->arena mappings via the "thread.arena" mallctl. - Add compile-time support for all TLS-related functionality via pthreads TSD. This is mainly of interest for OS X, which does not support TLS, but has a TSD implementation with similar performance. - Override memalign() and valloc() if they are provided by the system. - Add the "arenas.purge" mallctl, which can be used to synchronously purge all dirty unused pages. - Make cumulative heap profiling data optional, so that it is possible to limit the amount of memory consumed by heap profiling data structures. - Add per thread allocation counters that can be accessed via the "thread.allocated" and "thread.deallocated" mallctls. Incompatible changes: - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above). - Increase default backtrace depth from 4 to 128 for heap profiling. - Disable interval-based profile dumps by default. Bug fixes: - Remove bad assertions in fork handler functions. These assertions could cause aborts for some combinations of configure settings. - Fix strerror_r() usage to deal with non-standard semantics in GNU libc. - Fix leak context reporting. This bug tended to cause the number of contexts to be underreported (though the reported number of objects and bytes were correct). - Fix a realloc() bug for large in-place growing reallocation. This bug could cause memory corruption, but it was hard to trigger. - Fix an allocation bug for small allocations that could be triggered if multiple threads raced to create a new run of backing pages. - Enhance the heap profiler to trigger samples based on usable size, rather than request size. - Fix a heap profiling bug due to sometimes losing track of requested object size for sampled objects. * 1.0.3 (August 12, 2010) Bug fixes: - Fix the libunwind-based implementation of stack backtracing (used for heap profiling). This bug could cause zero-length backtraces to be reported. - Add a missing mutex unlock in library initialization code. If multiple threads raced to initialize malloc, some of them could end up permanently blocked. * 1.0.2 (May 11, 2010) Bug fixes: - Fix junk filling of large objects, which could cause memory corruption. - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual memory limits could cause swap file configuration to fail. Contributed by Jordan DeLong. * 1.0.1 (April 14, 2010) Bug fixes: - Fix compilation when --enable-fill is specified. - Fix threads-related profiling bugs that affected accuracy and caused memory to be leaked during thread exit. - Fix dirty page purging race conditions that could cause crashes. - Fix crash in tcache flushing code during thread destruction. * 1.0.0 (April 11, 2010) This release focuses on speed and run-time introspection. Numerous algorithmic improvements make this release substantially faster than its predecessors. New features: - Implement autoconf-based configuration system. - Add mallctl*(), for the purposes of introspection and run-time configuration. - Make it possible for the application to manually flush a thread's cache, via the "tcache.flush" mallctl. - Base maximum dirty page count on proportion of active memory. - Compute various addtional run-time statistics, including per size class statistics for large objects. - Expose malloc_stats_print(), which can be called repeatedly by the application. - Simplify the malloc_message() signature to only take one string argument, and incorporate an opaque data pointer argument for use by the application in combination with malloc_stats_print(). - Add support for allocation backed by one or more swap files, and allow the application to disable over-commit if swap files are in use. - Implement allocation profiling and leak checking. Removed features: - Remove the dynamic arena rebalancing code, since thread-specific caching reduces its utility. Bug fixes: - Modify chunk allocation to work when address space layout randomization (ASLR) is in use. - Fix thread cleanup bugs related to TLS destruction. - Handle 0-size allocation requests in posix_memalign(). - Fix a chunk leak. The leaked chunks were never touched, so this impacted virtual memory usage, but not physical memory usage. * linux_2008082[78]a (August 27/28, 2008) These snapshot releases are the simple result of incorporating Linux-specific support into the FreeBSD malloc sources. -------------------------------------------------------------------------------- vim:filetype=text:textwidth=80 Index: projects/nand/contrib/jemalloc/FREEBSD-Xlist =================================================================== --- projects/nand/contrib/jemalloc/FREEBSD-Xlist (revision 235262) +++ projects/nand/contrib/jemalloc/FREEBSD-Xlist (revision 235263) @@ -1,23 +1,24 @@ $FreeBSD$ .git .gitignore FREEBSD-* INSTALL Makefile* README autogen.sh autom4te.cache/ bin/ config.* configure* doc/*.in doc/*.xml doc/*.xsl doc/*.html include/jemalloc/internal/jemalloc_internal.h.in include/jemalloc/internal/size_classes.sh include/jemalloc/jemalloc.h.in include/jemalloc/jemalloc_defs.h.in +include/msvc_compat/ install-sh src/zone.c test/ Index: projects/nand/contrib/jemalloc/FREEBSD-diffs =================================================================== --- projects/nand/contrib/jemalloc/FREEBSD-diffs (revision 235262) +++ projects/nand/contrib/jemalloc/FREEBSD-diffs (revision 235263) @@ -1,262 +1,267 @@ diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in -index e8a5722..cec85b5 100644 +index 93c16dc..b5c5595 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -51,12 +51,23 @@ This manual describes jemalloc @jemalloc_version@. More information can be found at the jemalloc website. + + The following configuration options are enabled in libc's built-in + jemalloc: , + , , + , , + , , + , , and + . Additionally, + is enabled in development versions of + FreeBSD (controlled by the MALLOC_PRODUCTION make + variable). SYNOPSIS #include <stdlib.h> -#include <jemalloc/jemalloc.h> +#include <malloc_np.h> Standard API -@@ -2091,4 +2102,16 @@ malloc_conf = "lg_chunk:24";]]> +@@ -2101,4 +2112,16 @@ malloc_conf = "lg_chunk:24";]]> The posix_memalign function conforms to IEEE Std 1003.1-2001 (“POSIX.1”). + + HISTORY + The malloc_usable_size and + posix_memalign functions first appeared in + FreeBSD 7.0. + + The aligned_alloc, + malloc_stats_print, + mallctl*, and + *allocm functions first appeared in + FreeBSD 10.0. + diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in -index b61abe8..edbb437 100644 +index 268cd14..cfb1fb9 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -1,5 +1,8 @@ #ifndef JEMALLOC_INTERNAL_H #define JEMALLOC_INTERNAL_H +#include "libc_private.h" +#include "namespace.h" + - #include - #include - #include -@@ -35,6 +38,9 @@ - #include #include + #ifdef _WIN32 + # include +@@ -54,6 +57,9 @@ typedef intptr_t ssize_t; + #endif + #include +#include "un-namespace.h" +#include "libc_private.h" + #define JEMALLOC_NO_DEMANGLE #include "../jemalloc@install_suffix@.h" diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h -index 8837ef5..d7133f4 100644 +index de44e14..564d604 100644 --- a/include/jemalloc/internal/mutex.h +++ b/include/jemalloc/internal/mutex.h -@@ -39,9 +39,6 @@ struct malloc_mutex_s { +@@ -43,9 +43,6 @@ struct malloc_mutex_s { #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; -#else -# undef isthreaded /* Undo private_namespace.h definition. */ -# define isthreaded true #endif bool malloc_mutex_init(malloc_mutex_t *mutex); diff --git a/include/jemalloc/internal/private_namespace.h b/include/jemalloc/internal/private_namespace.h -index bb1b63e..00eb169 100644 +index b816647..b8ce6b1 100644 --- a/include/jemalloc/internal/private_namespace.h +++ b/include/jemalloc/internal/private_namespace.h -@@ -165,7 +165,6 @@ +@@ -186,7 +186,6 @@ #define iqalloc JEMALLOC_N(iqalloc) #define iralloc JEMALLOC_N(iralloc) #define isalloc JEMALLOC_N(isalloc) -#define isthreaded JEMALLOC_N(isthreaded) #define ivsalloc JEMALLOC_N(ivsalloc) #define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) #define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent) diff --git a/include/jemalloc/jemalloc.h.in b/include/jemalloc/jemalloc.h.in -index f0581db..f26d8bc 100644 +index ad06948..505dd38 100644 --- a/include/jemalloc/jemalloc.h.in +++ b/include/jemalloc/jemalloc.h.in @@ -15,6 +15,7 @@ extern "C" { #define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" #include "jemalloc_defs@install_suffix@.h" +#include "jemalloc_FreeBSD.h" #ifdef JEMALLOC_EXPERIMENTAL #define ALLOCM_LG_ALIGN(la) (la) diff --git a/include/jemalloc/jemalloc_FreeBSD.h b/include/jemalloc/jemalloc_FreeBSD.h new file mode 100644 -index 0000000..2c5797f +index 0000000..9efab93 --- /dev/null +++ b/include/jemalloc/jemalloc_FreeBSD.h -@@ -0,0 +1,76 @@ +@@ -0,0 +1,80 @@ +/* + * Override settings that were generated in jemalloc_defs.h as necessary. + */ + +#undef JEMALLOC_OVERRIDE_VALLOC + +#ifndef MALLOC_PRODUCTION +#define JEMALLOC_DEBUG +#endif + +/* + * The following are architecture-dependent, so conditionally define them for + * each supported architecture. + */ +#undef CPU_SPINWAIT +#undef JEMALLOC_TLS_MODEL +#undef STATIC_PAGE_SHIFT +#undef LG_SIZEOF_PTR +#undef LG_SIZEOF_INT +#undef LG_SIZEOF_LONG +#undef LG_SIZEOF_INTMAX_T + +#ifdef __i386__ +# define LG_SIZEOF_PTR 2 +# define CPU_SPINWAIT __asm__ volatile("pause") +# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) +#endif +#ifdef __ia64__ +# define LG_SIZEOF_PTR 3 +#endif +#ifdef __sparc64__ +# define LG_SIZEOF_PTR 3 +# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) +#endif +#ifdef __amd64__ +# define LG_SIZEOF_PTR 3 +# define CPU_SPINWAIT __asm__ volatile("pause") +# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) +#endif +#ifdef __arm__ +# define LG_SIZEOF_PTR 2 +#endif +#ifdef __mips__ ++#ifdef __mips_n64 ++# define LG_SIZEOF_PTR 3 ++#else +# define LG_SIZEOF_PTR 2 +#endif ++#endif +#ifdef __powerpc64__ +# define LG_SIZEOF_PTR 3 +#elif defined(__powerpc__) +# define LG_SIZEOF_PTR 2 +#endif + +#ifndef JEMALLOC_TLS_MODEL +# define JEMALLOC_TLS_MODEL /* Default. */ +#endif +#ifdef __clang__ +# undef JEMALLOC_TLS_MODEL +# define JEMALLOC_TLS_MODEL /* clang does not support tls_model yet. */ +#endif + +#define STATIC_PAGE_SHIFT PAGE_SHIFT +#define LG_SIZEOF_INT 2 +#define LG_SIZEOF_LONG LG_SIZEOF_PTR +#define LG_SIZEOF_INTMAX_T 3 + +/* Disable lazy-lock machinery, mangle isthreaded, and adjust its type. */ +#undef JEMALLOC_LAZY_LOCK +extern int __isthreaded; +#define isthreaded ((bool)__isthreaded) + +/* Mangle. */ +#define open _open +#define read _read +#define write _write +#define close _close +#define pthread_mutex_lock _pthread_mutex_lock +#define pthread_mutex_unlock _pthread_mutex_unlock diff --git a/src/jemalloc.c b/src/jemalloc.c -index f9c8916..8e24a5a 100644 +index d42e91d..cdf6222 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c -@@ -8,6 +8,9 @@ malloc_tsd_data(, arenas, arena_t *, NULL) +@@ -8,6 +8,10 @@ malloc_tsd_data(, arenas, arena_t *, NULL) malloc_tsd_data(, thread_allocated, thread_allocated_t, THREAD_ALLOCATED_INITIALIZER) -+const char *__malloc_options_1_0; ++/* Work around : */ ++const char *__malloc_options_1_0 = NULL; +__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); + /* Runtime configuration options. */ - const char *je_malloc_conf JEMALLOC_ATTR(visibility("default")); + const char *je_malloc_conf; #ifdef JEMALLOC_DEBUG -@@ -401,7 +404,8 @@ malloc_conf_init(void) +@@ -429,7 +433,8 @@ malloc_conf_init(void) #endif ; - if ((opts = getenv(envname)) != NULL) { + if (issetugid() == 0 && (opts = getenv(envname)) != + NULL) { /* * Do nothing; opts is already initialized to * the value of the MALLOC_CONF environment diff --git a/src/mutex.c b/src/mutex.c -index 4b8ce57..7be5fc9 100644 +index 37a843e..4a90a05 100644 --- a/src/mutex.c +++ b/src/mutex.c -@@ -63,6 +63,17 @@ pthread_create(pthread_t *__restrict thread, +@@ -66,6 +66,17 @@ pthread_create(pthread_t *__restrict thread, #ifdef JEMALLOC_MUTEX_INIT_CB int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); + +__weak_reference(_pthread_mutex_init_calloc_cb_stub, + _pthread_mutex_init_calloc_cb); + +int +_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex, + void *(calloc_cb)(size_t, size_t)) +{ + + return (0); +} #endif bool diff --git a/src/util.c b/src/util.c -index 99ae26d..b80676c 100644 +index 9b73c3e..f94799f 100644 --- a/src/util.c +++ b/src/util.c -@@ -60,6 +60,22 @@ wrtmessage(void *cbopaque, const char *s) - void (*je_malloc_message)(void *, const char *s) - JEMALLOC_ATTR(visibility("default")) = wrtmessage; +@@ -58,6 +58,22 @@ wrtmessage(void *cbopaque, const char *s) -+JEMALLOC_CATTR(visibility("hidden"), static) + JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); + ++JEMALLOC_ATTR(visibility("hidden")) +void +wrtmessage_1_0(const char *s1, const char *s2, const char *s3, + const char *s4) +{ + + wrtmessage(NULL, s1); + wrtmessage(NULL, s2); + wrtmessage(NULL, s3); + wrtmessage(NULL, s4); +} + +void (*__malloc_message_1_0)(const char *s1, const char *s2, const char *s3, + const char *s4) = wrtmessage_1_0; +__sym_compat(_malloc_message, __malloc_message_1_0, FBSD_1.0); + /* - * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so - * provide a wrapper. + * Wrapper around malloc_message() that avoids the need for + * je_malloc_message(...) throughout the code. Index: projects/nand/contrib/jemalloc/VERSION =================================================================== --- projects/nand/contrib/jemalloc/VERSION (revision 235262) +++ projects/nand/contrib/jemalloc/VERSION (revision 235263) @@ -1 +1 @@ -1.0.0-286-ga8f8d7540d66ddee7337db80c92890916e1063ca +1.0.0-335-g37b6f95dcd866f51c91488531a2efc3ed4c2b754 Index: projects/nand/contrib/jemalloc/doc/jemalloc.3 =================================================================== --- projects/nand/contrib/jemalloc/doc/jemalloc.3 (revision 235262) +++ projects/nand/contrib/jemalloc/doc/jemalloc.3 (revision 235263) @@ -1,1471 +1,1477 @@ '\" t .\" Title: JEMALLOC .\" Author: Jason Evans .\" Generator: DocBook XSL Stylesheets v1.76.1 -.\" Date: 04/21/2012 +.\" Date: 05/09/2012 .\" Manual: User Manual -.\" Source: jemalloc 1.0.0-286-ga8f8d7540d66ddee7337db80c92890916e1063ca +.\" Source: jemalloc 1.0.0-335-g37b6f95dcd866f51c91488531a2efc3ed4c2b754 .\" Language: English .\" -.TH "JEMALLOC" "3" "04/21/2012" "jemalloc 1.0.0-286-ga8f8d7540d" "User Manual" +.TH "JEMALLOC" "3" "05/09/2012" "jemalloc 1.0.0-335-g37b6f95dcd" "User Manual" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" jemalloc \- general purpose memory allocation functions .SH "LIBRARY" .PP -This manual describes jemalloc 1\&.0\&.0\-286\-ga8f8d7540d66ddee7337db80c92890916e1063ca\&. More information can be found at the +This manual describes jemalloc 1\&.0\&.0\-335\-g37b6f95dcd866f51c91488531a2efc3ed4c2b754\&. More information can be found at the \m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&. .PP The following configuration options are enabled in libc\*(Aqs built\-in jemalloc: \fB\-\-enable\-dss\fR, \fB\-\-enable\-experimental\fR, \fB\-\-enable\-fill\fR, \fB\-\-enable\-lazy\-lock\fR, \fB\-\-enable\-munmap\fR, \fB\-\-enable\-stats\fR, \fB\-\-enable\-tcache\fR, \fB\-\-enable\-tls\fR, \fB\-\-enable\-utrace\fR, and \fB\-\-enable\-xmalloc\fR\&. Additionally, \fB\-\-enable\-debug\fR is enabled in development versions of FreeBSD (controlled by the \fBMALLOC_PRODUCTION\fR make variable)\&. .SH "SYNOPSIS" .sp .ft B .nf #include #include .fi .ft .SS "Standard API" .HP \w'void\ *malloc('u .BI "void *malloc(size_t\ " "size" ");" .HP \w'void\ *calloc('u .BI "void *calloc(size_t\ " "number" ", size_t\ " "size" ");" .HP \w'int\ posix_memalign('u .BI "int posix_memalign(void\ **" "ptr" ", size_t\ " "alignment" ", size_t\ " "size" ");" .HP \w'void\ *aligned_alloc('u .BI "void *aligned_alloc(size_t\ " "alignment" ", size_t\ " "size" ");" .HP \w'void\ *realloc('u .BI "void *realloc(void\ *" "ptr" ", size_t\ " "size" ");" .HP \w'void\ free('u .BI "void free(void\ *" "ptr" ");" .SS "Non\-standard API" .HP \w'size_t\ malloc_usable_size('u .BI "size_t malloc_usable_size(const\ void\ *" "ptr" ");" .HP \w'void\ malloc_stats_print('u .BI "void malloc_stats_print(void\ " "(*write_cb)" "\ (void\ *,\ const\ char\ *), void\ *" "cbopaque" ", const\ char\ *" "opts" ");" .HP \w'int\ mallctl('u .BI "int mallctl(const\ char\ *" "name" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");" .HP \w'int\ mallctlnametomib('u .BI "int mallctlnametomib(const\ char\ *" "name" ", size_t\ *" "mibp" ", size_t\ *" "miblenp" ");" .HP \w'int\ mallctlbymib('u .BI "int mallctlbymib(const\ size_t\ *" "mib" ", size_t\ " "miblen" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");" .HP \w'void\ (*malloc_message)('u .BI "void (*malloc_message)(void\ *" "cbopaque" ", const\ char\ *" "s" ");" .PP const char *\fImalloc_conf\fR; .SS "Experimental API" .HP \w'int\ allocm('u .BI "int allocm(void\ **" "ptr" ", size_t\ *" "rsize" ", size_t\ " "size" ", int\ " "flags" ");" .HP \w'int\ rallocm('u .BI "int rallocm(void\ **" "ptr" ", size_t\ *" "rsize" ", size_t\ " "size" ", size_t\ " "extra" ", int\ " "flags" ");" .HP \w'int\ sallocm('u .BI "int sallocm(const\ void\ *" "ptr" ", size_t\ *" "rsize" ", int\ " "flags" ");" .HP \w'int\ dallocm('u .BI "int dallocm(void\ *" "ptr" ", int\ " "flags" ");" .HP \w'int\ nallocm('u .BI "int nallocm(size_t\ *" "rsize" ", size_t\ " "size" ", int\ " "flags" ");" .SH "DESCRIPTION" .SS "Standard API" .PP The \fBmalloc\fR\fB\fR function allocates \fIsize\fR bytes of uninitialized memory\&. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object\&. .PP The \fBcalloc\fR\fB\fR function allocates space for \fInumber\fR objects, each \fIsize\fR bytes in length\&. The result is identical to calling \fBmalloc\fR\fB\fR with an argument of \fInumber\fR * \fIsize\fR, with the exception that the allocated memory is explicitly initialized to zero bytes\&. .PP The \fBposix_memalign\fR\fB\fR function allocates \fIsize\fR bytes of memory such that the allocation\*(Aqs base address is an even multiple of \fIalignment\fR, and returns the allocation in the value pointed to by \fIptr\fR\&. The requested \fIalignment\fR must be a power of 2 at least as large as sizeof(\fBvoid *\fR)\&. .PP The \fBaligned_alloc\fR\fB\fR function allocates \fIsize\fR bytes of memory such that the allocation\*(Aqs base address is an even multiple of \fIalignment\fR\&. The requested \fIalignment\fR must be a power of 2\&. Behavior is undefined if \fIsize\fR is not an integral multiple of \fIalignment\fR\&. .PP The \fBrealloc\fR\fB\fR function changes the size of the previously allocated memory referenced by \fIptr\fR to \fIsize\fR bytes\&. The contents of the memory are unchanged up to the lesser of the new and old sizes\&. If the new size is larger, the contents of the newly allocated portion of the memory are undefined\&. Upon success, the memory referenced by \fIptr\fR is freed and a pointer to the newly allocated memory is returned\&. Note that \fBrealloc\fR\fB\fR may move the memory allocation, resulting in a different return value than \fIptr\fR\&. If \fIptr\fR is \fBNULL\fR, the \fBrealloc\fR\fB\fR function behaves identically to \fBmalloc\fR\fB\fR for the specified size\&. .PP The \fBfree\fR\fB\fR function causes the allocated memory referenced by \fIptr\fR to be made available for future allocations\&. If \fIptr\fR is \fBNULL\fR, no action occurs\&. .SS "Non\-standard API" .PP The \fBmalloc_usable_size\fR\fB\fR function returns the usable size of the allocation pointed to by \fIptr\fR\&. The return value may be larger than the size that was requested during allocation\&. The \fBmalloc_usable_size\fR\fB\fR function is not a mechanism for in\-place \fBrealloc\fR\fB\fR; rather it is provided solely as a tool for introspection purposes\&. Any discrepancy between the requested allocation size and the size reported by \fBmalloc_usable_size\fR\fB\fR should not be depended on, since such behavior is entirely implementation\-dependent\&. .PP The \fBmalloc_stats_print\fR\fB\fR function writes human\-readable summary statistics via the \fIwrite_cb\fR callback function pointer and \fIcbopaque\fR data passed to \fIwrite_cb\fR, or \fBmalloc_message\fR\fB\fR if \fIwrite_cb\fR is \fBNULL\fR\&. This function can be called repeatedly\&. General information that never changes during execution can be omitted by specifying "g" as a character within the \fIopts\fR string\&. Note that \fBmalloc_message\fR\fB\fR uses the \fBmallctl*\fR\fB\fR functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously\&. If \fB\-\-enable\-stats\fR is specified during configuration, \(lqm\(rq and \(lqa\(rq can be specified to omit merged arena and per arena statistics, respectively; \(lqb\(rq and \(lql\(rq can be specified to omit per size class statistics for bins and large objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&. .PP The \fBmallctl\fR\fB\fR function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions\&. The period\-separated \fIname\fR argument specifies a location in a tree\-structured namespace; see the MALLCTL NAMESPACE section for documentation on the tree contents\&. To read a value, pass a pointer via \fIoldp\fR to adequate space to contain the value, and a pointer to its length via \fIoldlenp\fR; otherwise pass \fBNULL\fR and \fBNULL\fR\&. Similarly, to write a value, pass a pointer to the value via \fInewp\fR, and its length via \fInewlen\fR; otherwise pass \fBNULL\fR and \fB0\fR\&. .PP The \fBmallctlnametomib\fR\fB\fR function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a \(lqManagement Information Base\(rq (MIB) that can be passed repeatedly to \fBmallctlbymib\fR\fB\fR\&. Upon successful return from \fBmallctlnametomib\fR\fB\fR, \fImibp\fR contains an array of \fI*miblenp\fR integers, where \fI*miblenp\fR is the lesser of the number of components in \fIname\fR and the input value of \fI*miblenp\fR\&. Thus it is possible to pass a \fI*miblenp\fR that is smaller than the number of period\-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB\&. For name components that are integers (e\&.g\&. the 2 in "arenas\&.bin\&.2\&.size"), the corresponding MIB component will always be that integer\&. Therefore, it is legitimate to construct code like the following: .sp .if n \{\ .RS 4 .\} .nf unsigned nbins, i; int mib[4]; size_t len, miblen; len = sizeof(nbins); mallctl("arenas\&.nbins", &nbins, &len, NULL, 0); miblen = 4; mallnametomib("arenas\&.bin\&.0\&.size", mib, &miblen); for (i = 0; i < nbins; i++) { size_t bin_size; mib[2] = i; len = sizeof(bin_size); mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); /* Do something with bin_size\&.\&.\&. */ } .fi .if n \{\ .RE .\} .SS "Experimental API" .PP The experimental API is subject to change or removal without regard for backward compatibility\&. If \fB\-\-disable\-experimental\fR is specified during configuration, the experimental API is omitted\&. .PP The \fBallocm\fR\fB\fR, \fBrallocm\fR\fB\fR, \fBsallocm\fR\fB\fR, \fBdallocm\fR\fB\fR, and \fBnallocm\fR\fB\fR functions all have a \fIflags\fR argument that can be used to specify options\&. The functions only check the options that are contextually relevant\&. Use bitwise or (|) operations to specify one or more of the following: .PP \fBALLOCM_LG_ALIGN(\fR\fB\fIla\fR\fR\fB) \fR .RS 4 Align the memory allocation to start at an address that is a multiple of (1 << \fIla\fR)\&. This macro does not validate that \fIla\fR is within the valid range\&. .RE .PP \fBALLOCM_ALIGN(\fR\fB\fIa\fR\fR\fB) \fR .RS 4 Align the memory allocation to start at an address that is a multiple of \fIa\fR, where \fIa\fR is a power of two\&. This macro does not validate that \fIa\fR is a power of 2\&. .RE .PP \fBALLOCM_ZERO\fR .RS 4 Initialize newly allocated memory to contain zero bytes\&. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes\&. If this option is absent, newly allocated memory is uninitialized\&. .RE .PP \fBALLOCM_NO_MOVE\fR .RS 4 For reallocation, fail rather than moving the object\&. This constraint can apply to both growth and shrinkage\&. .RE .PP The \fBallocm\fR\fB\fR function allocates at least \fIsize\fR bytes of memory, sets \fI*ptr\fR to the base address of the allocation, and sets \fI*rsize\fR to the real size of the allocation if \fIrsize\fR is not \fBNULL\fR\&. Behavior is undefined if \fIsize\fR is \fB0\fR\&. .PP The \fBrallocm\fR\fB\fR function resizes the allocation at \fI*ptr\fR to be at least \fIsize\fR bytes, sets \fI*ptr\fR to the base address of the allocation if it moved, and sets \fI*rsize\fR to the real size of the allocation if \fIrsize\fR is not \fBNULL\fR\&. If \fIextra\fR is non\-zero, an attempt is made to resize the allocation to be at least \fIsize\fR + \fIextra\fR) bytes, though inability to allocate the extra byte(s) will not by itself result in failure\&. Behavior is undefined if \fIsize\fR is \fB0\fR, or if (\fIsize\fR + \fIextra\fR > \fBSIZE_T_MAX\fR)\&. .PP The \fBsallocm\fR\fB\fR function sets \fI*rsize\fR to the real size of the allocation\&. .PP The \fBdallocm\fR\fB\fR function causes the memory referenced by \fIptr\fR to be made available for future allocations\&. .PP The \fBnallocm\fR\fB\fR function allocates no memory, but it performs the same size computation as the \fBallocm\fR\fB\fR function, and if \fIrsize\fR is not \fBNULL\fR it sets \fI*rsize\fR to the real size of the allocation that would result from the equivalent \fBallocm\fR\fB\fR function call\&. Behavior is undefined if \fIsize\fR is \fB0\fR\&. .SH "TUNING" .PP Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile\- or run\-time\&. .PP The string pointed to by the global variable \fImalloc_conf\fR, the \(lqname\(rq of the file referenced by the symbolic link named /etc/malloc\&.conf, and the value of the environment variable \fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&. .PP An options string is a comma\-separated list of option:value pairs\&. There is one key corresponding to each "opt\&.*" mallctl (see the MALLCTL NAMESPACE section for options documentation)\&. For example, abort:true,narenas:1 sets the "opt\&.abort" and "opt\&.narenas" options\&. Some options have boolean values (true/false), others have integer values (base 8, 10, or 16, depending on prefix), and yet others have raw string values\&. .SH "IMPLEMENTATION NOTES" .PP Traditionally, allocators have used \fBsbrk\fR(2) to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory\&. If \fB\-\-enable\-dss\fR is specified during configuration, this allocator uses both \fBmmap\fR(2) and \fBsbrk\fR(2), in that order of preference; otherwise only \fBmmap\fR(2) is used\&. .PP This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi\-processor systems\&. This works well with regard to threading scalability, but incurs some costs\&. There is a small fixed per\-arena overhead, and additionally, arenas manage memory completely independently of each other, which means a small fixed increase in overall memory fragmentation\&. These overheads are not generally an issue, given the number of arenas normally used\&. Note that using substantially more arenas than the default is not likely to improve performance, mainly due to reduced cache performance\&. However, it may make sense to reduce the number of arenas if an application does not make much use of the allocation functions\&. .PP In addition to multiple arenas, unless \fB\-\-disable\-tcache\fR is specified during configuration, this allocator supports thread\-specific caching for small and large objects, in order to make it possible to completely avoid synchronization for most allocation requests\&. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache\&. .PP Memory is conceptually broken into equal\-sized chunks, where the chunk size is a power of two that is greater than the page size\&. Chunks are always aligned to multiples of the chunk size\&. This alignment makes it possible to find metadata for user objects very quickly\&. .PP User objects are broken into three categories according to size: small, large, and huge\&. Small objects are smaller than one page\&. Large objects are smaller than the chunk size\&. Huge objects are a multiple of the chunk size\&. Small and large objects are managed by arenas; huge objects are managed separately in a single data structure that is shared by all threads\&. Huge objects are used by applications infrequently enough that this single data structure is not a scalability issue\&. .PP Each chunk that is managed by an arena tracks its contents as runs of contiguous pages (unused, backing a set of small objects, or backing one large object)\&. The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time\&. .PP Small objects are managed in groups by page runs\&. Each run maintains a frontier and free list to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least sizeof(\fBdouble\fR)\&. All other small object size classes are multiples of the quantum, spaced such that internal fragmentation is limited to approximately 25% for all but the smallest size classes\&. Allocation requests that are larger than the maximum small size class, but small enough to fit in an arena\-managed chunk (see the "opt\&.lg_chunk" option), are rounded up to the nearest run size\&. Allocation requests that are too large to fit in an arena\-managed chunk are rounded up to the nearest multiple of the chunk size\&. .PP Allocations are packed tightly together, which can be an issue for multi\-threaded applications\&. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating\&. .PP Assuming 4 MiB chunks, 4 KiB pages, and a 16\-byte quantum on a 64\-bit system, the size classes in each category are as shown in Table 1\&. .sp .it 1 an-trap .nr an-no-space-flag 1 .nr an-break-flag 1 .br .B Table\ \&1.\ \&Size classes .TS allbox tab(:); lB rB lB. T{ Category T}:T{ Spacing T}:T{ Size T} .T& l r l ^ r l ^ r l ^ r l ^ r l ^ r l ^ r l l r l l r l. T{ Small T}:T{ lg T}:T{ [8] T} :T{ 16 T}:T{ [16, 32, 48, \&.\&.\&., 128] T} :T{ 32 T}:T{ [160, 192, 224, 256] T} :T{ 64 T}:T{ [320, 384, 448, 512] T} :T{ 128 T}:T{ [640, 768, 896, 1024] T} :T{ 256 T}:T{ [1280, 1536, 1792, 2048] T} :T{ 512 T}:T{ [2560, 3072, 3584] T} T{ Large T}:T{ 4 KiB T}:T{ [4 KiB, 8 KiB, 12 KiB, \&.\&.\&., 4072 KiB] T} T{ Huge T}:T{ 4 MiB T}:T{ [4 MiB, 8 MiB, 12 MiB, \&.\&.\&.] T} .TE .sp 1 .SH "MALLCTL NAMESPACE" .PP The following names are defined in the namespace accessible via the \fBmallctl*\fR\fB\fR functions\&. Value types are specified in parentheses, their readable/writable statuses are encoded as rw, r\-, \-w, or \-\-, and required build configuration flags follow, if any\&. A name element encoded as or indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection\&. In the case of "stats\&.arenas\&.\&.*", equal to "arenas\&.narenas" can be used to access the summation of statistics from all arenas\&. Take special note of the "epoch" mallctl, which controls refreshing of cached dynamic statistics\&. .PP "version" (\fBconst char *\fR) r\- .RS 4 Return the jemalloc version string\&. .RE .PP "epoch" (\fBuint64_t\fR) rw .RS 4 If a value is passed in, refresh the data from which the \fBmallctl*\fR\fB\fR functions report values, and increment the epoch\&. Return the current epoch\&. This is useful for detecting whether another thread caused a refresh\&. .RE .PP "config\&.debug" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-debug\fR was specified during build configuration\&. .RE .PP "config\&.dss" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-dss\fR was specified during build configuration\&. .RE .PP "config\&.fill" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-fill\fR was specified during build configuration\&. .RE .PP "config\&.lazy_lock" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-lazy\-lock\fR was specified during build configuration\&. .RE .PP +"config\&.mremap" (\fBbool\fR) r\- +.RS 4 +\fB\-\-enable\-mremap\fR +was specified during build configuration\&. +.RE +.PP "config\&.munmap" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-munmap\fR was specified during build configuration\&. .RE .PP "config\&.prof" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-prof\fR was specified during build configuration\&. .RE .PP "config\&.prof_libgcc" (\fBbool\fR) r\- .RS 4 \fB\-\-disable\-prof\-libgcc\fR was not specified during build configuration\&. .RE .PP "config\&.prof_libunwind" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-prof\-libunwind\fR was specified during build configuration\&. .RE .PP "config\&.stats" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-stats\fR was specified during build configuration\&. .RE .PP "config\&.tcache" (\fBbool\fR) r\- .RS 4 \fB\-\-disable\-tcache\fR was not specified during build configuration\&. .RE .PP "config\&.tls" (\fBbool\fR) r\- .RS 4 \fB\-\-disable\-tls\fR was not specified during build configuration\&. .RE .PP "config\&.utrace" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-utrace\fR was specified during build configuration\&. .RE .PP "config\&.valgrind" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-valgrind\fR was specified during build configuration\&. .RE .PP "config\&.xmalloc" (\fBbool\fR) r\- .RS 4 \fB\-\-enable\-xmalloc\fR was specified during build configuration\&. .RE .PP "opt\&.abort" (\fBbool\fR) r\- .RS 4 Abort\-on\-warning enabled/disabled\&. If true, most warnings are fatal\&. The process will call \fBabort\fR(3) in these cases\&. This option is disabled by default unless \fB\-\-enable\-debug\fR is specified during configuration, in which case it is enabled by default\&. .RE .PP "opt\&.lg_chunk" (\fBsize_t\fR) r\- .RS 4 Virtual memory chunk size (log base 2)\&. The default chunk size is 4 MiB (2^22)\&. .RE .PP "opt\&.narenas" (\fBsize_t\fR) r\- .RS 4 Maximum number of arenas to use\&. The default maximum number of arenas is four times the number of CPUs, or one if there is a single CPU\&. .RE .PP "opt\&.lg_dirty_mult" (\fBssize_t\fR) r\- .RS 4 Per\-arena minimum ratio (log base 2) of active to dirty pages\&. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via \fBmadvise\fR(2) or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 32:1 (2^5:1); an option value of \-1 will disable dirty page purging\&. .RE .PP "opt\&.stats_print" (\fBbool\fR) r\- .RS 4 Enable/disable statistics printing at exit\&. If enabled, the \fBmalloc_stats_print\fR\fB\fR function is called at program exit via an \fBatexit\fR(3) function\&. If \fB\-\-enable\-stats\fR is specified during configuration, this has the potential to cause deadlock for a multi\-threaded process that exits while one or more threads are executing in the memory allocation functions\&. Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development\&. This option is disabled by default\&. .RE .PP "opt\&.junk" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR] .RS 4 Junk filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to 0xa5\&. All deallocated memory will be initialized to 0x5a\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default unless \fB\-\-enable\-debug\fR is specified during configuration, in which case it is enabled by default\&. .RE .PP "opt\&.quarantine" (\fBsize_t\fR) r\- [\fB\-\-enable\-fill\fR] .RS 4 Per thread quarantine size in bytes\&. If non\-zero, each thread maintains a FIFO object quarantine that stores up to the specified number of bytes of memory\&. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk\-filled if the "opt\&.junk" option is enabled\&. This feature is of particular use in combination with \m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0\&. .RE .PP "opt\&.redzone" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR] .RS 4 Redzones enabled/disabled\&. If enabled, small allocations have redzones before and after them\&. Furthermore, if the "opt\&.junk" option is enabled, the redzones are checked for corruption during deallocation\&. However, the primary intended purpose of this feature is to be used in combination with \m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default\&. .RE .PP "opt\&.zero" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR] .RS 4 Zero filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to 0\&. Note that this initialization only happens once for each byte, so \fBrealloc\fR\fB\fR and \fBrallocm\fR\fB\fR calls do not zero memory that was previously allocated\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default\&. .RE .PP "opt\&.utrace" (\fBbool\fR) r\- [\fB\-\-enable\-utrace\fR] .RS 4 Allocation tracing based on \fButrace\fR(2) enabled/disabled\&. This option is disabled by default\&. .RE .PP "opt\&.valgrind" (\fBbool\fR) r\- [\fB\-\-enable\-valgrind\fR] .RS 4 \m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2 support enabled/disabled\&. If enabled, several other options are automatically modified during options processing to work well with Valgrind: "opt\&.junk" and "opt\&.zero" are set to false, "opt\&.quarantine" is set to 16 MiB, and "opt\&.redzone" is set to true\&. This option is disabled by default\&. .RE .PP "opt\&.xmalloc" (\fBbool\fR) r\- [\fB\-\-enable\-xmalloc\fR] .RS 4 Abort\-on\-out\-of\-memory enabled/disabled\&. If enabled, rather than returning failure for any allocation function, display a diagnostic message on \fBSTDERR_FILENO\fR and cause the program to drop core (using \fBabort\fR(3))\&. If an application is designed to depend on this behavior, set the option at compile time by including the following in the source code: .sp .if n \{\ .RS 4 .\} .nf malloc_conf = "xmalloc:true"; .fi .if n \{\ .RE .\} .sp This option is disabled by default\&. .RE .PP "opt\&.tcache" (\fBbool\fR) r\- [\fB\-\-enable\-tcache\fR] .RS 4 Thread\-specific caching enabled/disabled\&. When there are multiple threads, each thread uses a thread\-specific cache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the "opt\&.lg_tcache_max" option for related tuning information\&. This option is enabled by default\&. .RE .PP "opt\&.lg_tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR] .RS 4 Maximum size class (log base 2) to cache in the thread\-specific cache\&. At a minimum, all small size classes are cached, and at a maximum all large size classes are cached\&. The default maximum is 32 KiB (2^15)\&. .RE .PP "opt\&.prof" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Memory profiling enabled/disabled\&. If enabled, profile memory allocation activity\&. See the "opt\&.prof_active" option for on\-the\-fly activation/deactivation\&. See the "opt\&.lg_prof_sample" option for probabilistic sampling control\&. See the "opt\&.prof_accum" option for control of cumulative sample reporting\&. See the "opt\&.lg_prof_interval" option for information on interval\-triggered profile dumping, the "opt\&.prof_gdump" option for information on high\-water\-triggered profile dumping, and the "opt\&.prof_final" option for final profile dumping\&. Profile output is compatible with the included \fBpprof\fR Perl script, which originates from the \m[blue]\fBgperftools package\fR\m[]\&\s-2\u[3]\d\s+2\&. .RE .PP "opt\&.prof_prefix" (\fBconst char *\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Filename prefix for profile dumps\&. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled)\&. The default prefix is jeprof\&. .RE .PP "opt\&.prof_active" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Profiling activated/deactivated\&. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the "opt\&.prof" option) but inactive, then toggle profiling at any time during program execution with the "prof\&.active" mallctl\&. This option is enabled by default\&. .RE .PP "opt\&.lg_prof_sample" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity\&. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead\&. The default sample interval is 512 KiB (2^19 B)\&. .RE .PP "opt\&.prof_accum" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Reporting of cumulative object/byte counts in profile dumps enabled/disabled\&. If this option is enabled, every unique backtrace must be stored for the duration of execution\&. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest\&. This option is disabled by default\&. .RE .PP "opt\&.lg_prof_interval" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Average interval (log base 2) between memory profile dumps, as measured in bytes of allocation activity\&. The actual interval between dumps may be sporadic because decentralized allocation counters are used to avoid synchronization bottlenecks\&. Profiles are dumped to files named according to the pattern \&.\&.\&.i\&.heap, where is controlled by the "opt\&.prof_prefix" option\&. By default, interval\-triggered profile dumping is disabled (encoded as \-1)\&. .RE .PP "opt\&.prof_gdump" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Trigger a memory profile dump every time the total virtual memory exceeds the previous maximum\&. Profiles are dumped to files named according to the pattern \&.\&.\&.u\&.heap, where is controlled by the "opt\&.prof_prefix" option\&. This option is disabled by default\&. .RE .PP "opt\&.prof_final" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Use an \fBatexit\fR(3) function to dump final memory usage to a file named according to the pattern \&.\&.\&.f\&.heap, where is controlled by the "opt\&.prof_prefix" option\&. This option is enabled by default\&. .RE .PP "opt\&.prof_leak" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Leak reporting enabled/disabled\&. If enabled, use an \fBatexit\fR(3) function to report memory leaks detected by allocation sampling\&. See the "opt\&.prof" option for information on analyzing heap profile output\&. This option is disabled by default\&. .RE .PP "thread\&.arena" (\fBunsigned\fR) rw .RS 4 Get or set the arena associated with the calling thread\&. The arena index must be less than the maximum number of arenas (see the "arenas\&.narenas" mallctl)\&. If the specified arena was not initialized beforehand (see the "arenas\&.initialized" mallctl), it will be automatically initialized as a side effect of calling this interface\&. .RE .PP "thread\&.allocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Get the total number of bytes ever allocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&. .RE .PP "thread\&.allocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Get a pointer to the the value that is returned by the "thread\&.allocated" mallctl\&. This is useful for avoiding the overhead of repeated \fBmallctl*\fR\fB\fR calls\&. .RE .PP "thread\&.deallocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Get the total number of bytes ever deallocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&. .RE .PP "thread\&.deallocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Get a pointer to the the value that is returned by the "thread\&.deallocated" mallctl\&. This is useful for avoiding the overhead of repeated \fBmallctl*\fR\fB\fR calls\&. .RE .PP "thread\&.tcache\&.enabled" (\fBbool\fR) rw [\fB\-\-enable\-tcache\fR] .RS 4 Enable/disable calling thread\*(Aqs tcache\&. The tcache is implicitly flushed as a side effect of becoming disabled (see "thread\&.tcache\&.flush")\&. .RE .PP "thread\&.tcache\&.flush" (\fBvoid\fR) \-\- [\fB\-\-enable\-tcache\fR] .RS 4 Flush calling thread\*(Aqs tcache\&. This interface releases all cached objects and internal data structures associated with the calling thread\*(Aqs thread\-specific cache\&. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits\&. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful\&. .RE .PP "arenas\&.narenas" (\fBunsigned\fR) r\- .RS 4 Maximum number of arenas\&. .RE .PP "arenas\&.initialized" (\fBbool *\fR) r\- .RS 4 An array of "arenas\&.narenas" booleans\&. Each boolean indicates whether the corresponding arena is initialized\&. .RE .PP "arenas\&.quantum" (\fBsize_t\fR) r\- .RS 4 Quantum size\&. .RE .PP "arenas\&.page" (\fBsize_t\fR) r\- .RS 4 Page size\&. .RE .PP "arenas\&.tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR] .RS 4 Maximum thread\-cached size class\&. .RE .PP "arenas\&.nbins" (\fBunsigned\fR) r\- .RS 4 Number of bin size classes\&. .RE .PP "arenas\&.nhbins" (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR] .RS 4 Total number of thread cache bin size classes\&. .RE .PP "arenas\&.bin\&.\&.size" (\fBsize_t\fR) r\- .RS 4 Maximum size supported by size class\&. .RE .PP "arenas\&.bin\&.\&.nregs" (\fBuint32_t\fR) r\- .RS 4 Number of regions per page run\&. .RE .PP "arenas\&.bin\&.\&.run_size" (\fBsize_t\fR) r\- .RS 4 Number of bytes per page run\&. .RE .PP "arenas\&.nlruns" (\fBsize_t\fR) r\- .RS 4 Total number of large size classes\&. .RE .PP "arenas\&.lrun\&.\&.size" (\fBsize_t\fR) r\- .RS 4 Maximum size supported by this large size class\&. .RE .PP "arenas\&.purge" (\fBunsigned\fR) \-w .RS 4 Purge unused dirty pages for the specified arena, or for all arenas if none is specified\&. .RE .PP "prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR] .RS 4 Control whether sampling is currently active\&. See the "opt\&.prof_active" option for additional information\&. .RE .PP "prof\&.dump" (\fBconst char *\fR) \-w [\fB\-\-enable\-prof\fR] .RS 4 Dump a memory profile to the specified file, or if NULL is specified, to a file according to the pattern \&.\&.\&.m\&.heap, where is controlled by the "opt\&.prof_prefix" option\&. .RE .PP "prof\&.interval" (\fBuint64_t\fR) r\- [\fB\-\-enable\-prof\fR] .RS 4 Average number of bytes allocated between inverval\-based profile dumps\&. See the "opt\&.lg_prof_interval" option for additional information\&. .RE .PP "stats\&.cactive" (\fBsize_t *\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Pointer to a counter that contains an approximate count of the current number of bytes in active pages\&. The estimate may be high, but never low, because each arena rounds up to the nearest multiple of the chunk size when computing its contribution to the counter\&. Note that the "epoch" mallctl has no bearing on this counter\&. Furthermore, counter consistency is maintained via atomic operations, so it is necessary to use an atomic operation in order to guarantee a consistent read when dereferencing the pointer\&. .RE .PP "stats\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Total number of bytes allocated by the application\&. .RE .PP "stats\&.active" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Total number of bytes in active pages allocated by the application\&. This is a multiple of the page size, and greater than or equal to "stats\&.allocated"\&. .RE .PP "stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Total number of bytes in chunks mapped on behalf of the application\&. This is a multiple of the chunk size, and is at least as large as "stats\&.active"\&. This does not include inactive chunks embedded in the DSS\&. .RE .PP "stats\&.chunks\&.current" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Total number of chunks actively mapped on behalf of the application\&. This does not include inactive chunks embedded in the DSS\&. .RE .PP "stats\&.chunks\&.total" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of chunks allocated\&. .RE .PP "stats\&.chunks\&.high" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Maximum number of active chunks at any time thus far\&. .RE .PP "stats\&.huge\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of bytes currently allocated by huge objects\&. .RE .PP "stats\&.huge\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of huge allocation requests\&. .RE .PP "stats\&.huge\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of huge deallocation requests\&. .RE .PP "stats\&.arenas\&.\&.nthreads" (\fBunsigned\fR) r\- .RS 4 Number of threads currently assigned to arena\&. .RE .PP "stats\&.arenas\&.\&.pactive" (\fBsize_t\fR) r\- .RS 4 Number of pages in active runs\&. .RE .PP "stats\&.arenas\&.\&.pdirty" (\fBsize_t\fR) r\- .RS 4 Number of pages within unused runs that are potentially dirty, and for which \fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR or similar has not been called\&. .RE .PP "stats\&.arenas\&.\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of mapped bytes\&. .RE .PP "stats\&.arenas\&.\&.npurge" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of dirty page purge sweeps performed\&. .RE .PP "stats\&.arenas\&.\&.nmadvise" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of \fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR or similar calls made to purge dirty pages\&. .RE .PP "stats\&.arenas\&.\&.npurged" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of pages purged\&. .RE .PP "stats\&.arenas\&.\&.small\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of bytes currently allocated by small objects\&. .RE .PP "stats\&.arenas\&.\&.small\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocation requests served by small bins\&. .RE .PP "stats\&.arenas\&.\&.small\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of small objects returned to bins\&. .RE .PP "stats\&.arenas\&.\&.small\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of small allocation requests\&. .RE .PP "stats\&.arenas\&.\&.large\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Number of bytes currently allocated by large objects\&. .RE .PP "stats\&.arenas\&.\&.large\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of large allocation requests served directly by the arena\&. .RE .PP "stats\&.arenas\&.\&.large\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of large deallocation requests served directly by the arena\&. .RE .PP "stats\&.arenas\&.\&.large\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of large allocation requests\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Current number of bytes allocated by bin\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocations served by bin\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocations returned to bin\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocation requests\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.nfills" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR] .RS 4 Cumulative number of tcache fills\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.nflushes" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR] .RS 4 Cumulative number of tcache flushes\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.nruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of runs created\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.nreruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of times the current run from which to allocate changed\&. .RE .PP "stats\&.arenas\&.\&.bins\&.\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Current number of runs\&. .RE .PP "stats\&.arenas\&.\&.lruns\&.\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocation requests for this size class served directly by the arena\&. .RE .PP "stats\&.arenas\&.\&.lruns\&.\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of deallocation requests for this size class served directly by the arena\&. .RE .PP "stats\&.arenas\&.\&.lruns\&.\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Cumulative number of allocation requests for this size class\&. .RE .PP "stats\&.arenas\&.\&.lruns\&.\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] .RS 4 Current number of runs for this size class\&. .RE .SH "DEBUGGING MALLOC PROBLEMS" .PP When debugging, it is a good idea to configure/build jemalloc with the \fB\-\-enable\-debug\fR and \fB\-\-enable\-fill\fR options, and recompile the program with suitable options and symbols for debugger support\&. When so configured, jemalloc incorporates a wide variety of run\-time assertions that catch application errors such as double\-free, write\-after\-free, etc\&. .PP Programs often accidentally depend on \(lquninitialized\(rq memory actually being filled with zero bytes\&. Junk filling (see the "opt\&.junk" option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps\&. Conversely, zero filling (see the "opt\&.zero" option) eliminates the symptoms of such bugs\&. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs\&. .PP This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive\&. However, jemalloc does integrate with the most excellent \m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2 tool if the \fB\-\-enable\-valgrind\fR configuration option is enabled and the "opt\&.valgrind" option is enabled\&. .SH "DIAGNOSTIC MESSAGES" .PP If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor \fBSTDERR_FILENO\fR\&. Errors will result in the process dumping core\&. If the "opt\&.abort" option is set, most warnings are treated as errors\&. .PP The \fImalloc_message\fR variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the \fBSTDERR_FILENO\fR file descriptor is not suitable for this\&. \fBmalloc_message\fR\fB\fR takes the \fIcbopaque\fR pointer argument that is \fBNULL\fR unless overridden by the arguments in a call to \fBmalloc_stats_print\fR\fB\fR, followed by a string pointer\&. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock\&. .PP All messages are prefixed by \(lq:\(rq\&. .SH "RETURN VALUES" .SS "Standard API" .PP The \fBmalloc\fR\fB\fR and \fBcalloc\fR\fB\fR functions return a pointer to the allocated memory if successful; otherwise a \fBNULL\fR pointer is returned and \fIerrno\fR is set to ENOMEM\&. .PP The \fBposix_memalign\fR\fB\fR function returns the value 0 if successful; otherwise it returns an error value\&. The \fBposix_memalign\fR\fB\fR function will fail if: .PP EINVAL .RS 4 The \fIalignment\fR parameter is not a power of 2 at least as large as sizeof(\fBvoid *\fR)\&. .RE .PP ENOMEM .RS 4 Memory allocation error\&. .RE .PP The \fBaligned_alloc\fR\fB\fR function returns a pointer to the allocated memory if successful; otherwise a \fBNULL\fR pointer is returned and \fIerrno\fR is set\&. The \fBaligned_alloc\fR\fB\fR function will fail if: .PP EINVAL .RS 4 The \fIalignment\fR parameter is not a power of 2\&. .RE .PP ENOMEM .RS 4 Memory allocation error\&. .RE .PP The \fBrealloc\fR\fB\fR function returns a pointer, possibly identical to \fIptr\fR, to the allocated memory if successful; otherwise a \fBNULL\fR pointer is returned, and \fIerrno\fR is set to ENOMEM if the error was the result of an allocation failure\&. The \fBrealloc\fR\fB\fR function always leaves the original buffer intact when an error occurs\&. .PP The \fBfree\fR\fB\fR function returns no value\&. .SS "Non\-standard API" .PP The \fBmalloc_usable_size\fR\fB\fR function returns the usable size of the allocation pointed to by \fIptr\fR\&. .PP The \fBmallctl\fR\fB\fR, \fBmallctlnametomib\fR\fB\fR, and \fBmallctlbymib\fR\fB\fR functions return 0 on success; otherwise they return an error value\&. The functions will fail if: .PP EINVAL .RS 4 \fInewp\fR is not \fBNULL\fR, and \fInewlen\fR is too large or too small\&. Alternatively, \fI*oldlenp\fR is too large or too small; in this case as much data as possible are read despite the error\&. .RE .PP ENOMEM .RS 4 \fI*oldlenp\fR is too short to hold the requested value\&. .RE .PP ENOENT .RS 4 \fIname\fR or \fImib\fR specifies an unknown/invalid value\&. .RE .PP EPERM .RS 4 Attempt to read or write void value, or attempt to write read\-only value\&. .RE .PP EAGAIN .RS 4 A memory allocation failure occurred\&. .RE .PP EFAULT .RS 4 An interface with side effects failed in some way not directly related to \fBmallctl*\fR\fB\fR read/write processing\&. .RE .SS "Experimental API" .PP The \fBallocm\fR\fB\fR, \fBrallocm\fR\fB\fR, \fBsallocm\fR\fB\fR, \fBdallocm\fR\fB\fR, and \fBnallocm\fR\fB\fR functions return \fBALLOCM_SUCCESS\fR on success; otherwise they return an error value\&. The \fBallocm\fR\fB\fR, \fBrallocm\fR\fB\fR, and \fBnallocm\fR\fB\fR functions will fail if: .PP ALLOCM_ERR_OOM .RS 4 Out of memory\&. Insufficient contiguous memory was available to service the allocation request\&. The \fBallocm\fR\fB\fR function additionally sets \fI*ptr\fR to \fBNULL\fR, whereas the \fBrallocm\fR\fB\fR function leaves \fB*ptr\fR unmodified\&. .RE The \fBrallocm\fR\fB\fR function will also fail if: .PP ALLOCM_ERR_NOT_MOVED .RS 4 \fBALLOCM_NO_MOVE\fR was specified, but the reallocation request could not be serviced without moving the object\&. .RE .SH "ENVIRONMENT" .PP The following environment variable affects the execution of the allocation functions: .PP \fBMALLOC_CONF\fR .RS 4 If the environment variable \fBMALLOC_CONF\fR is set, the characters it contains will be interpreted as options\&. .RE .SH "EXAMPLES" .PP To dump core whenever a problem occurs: .sp .if n \{\ .RS 4 .\} .nf ln \-s \*(Aqabort:true\*(Aq /etc/malloc\&.conf .fi .if n \{\ .RE .\} .PP To specify in the source a chunk size that is 16 MiB: .sp .if n \{\ .RS 4 .\} .nf malloc_conf = "lg_chunk:24"; .fi .if n \{\ .RE .\} .SH "SEE ALSO" .PP \fBmadvise\fR(2), \fBmmap\fR(2), \fBsbrk\fR(2), \fButrace\fR(2), \fBalloca\fR(3), \fBatexit\fR(3), \fBgetpagesize\fR(3) .SH "STANDARDS" .PP The \fBmalloc\fR\fB\fR, \fBcalloc\fR\fB\fR, \fBrealloc\fR\fB\fR, and \fBfree\fR\fB\fR functions conform to ISO/IEC 9899:1990 (\(lqISO C90\(rq)\&. .PP The \fBposix_memalign\fR\fB\fR function conforms to IEEE Std 1003\&.1\-2001 (\(lqPOSIX\&.1\(rq)\&. .SH "HISTORY" .PP The \fBmalloc_usable_size\fR\fB\fR and \fBposix_memalign\fR\fB\fR functions first appeared in FreeBSD 7\&.0\&. .PP The \fBaligned_alloc\fR\fB\fR, \fBmalloc_stats_print\fR\fB\fR, \fBmallctl*\fR\fB\fR, and \fB*allocm\fR\fB\fR functions first appeared in FreeBSD 10\&.0\&. .SH "AUTHOR" .PP \fBJason Evans\fR .RS 4 .RE .SH "NOTES" .IP " 1." 4 jemalloc website .RS 4 \%http://www.canonware.com/jemalloc/ .RE .IP " 2." 4 Valgrind .RS 4 -\%http://http://valgrind.org/ +\%http://valgrind.org/ .RE .IP " 3." 4 gperftools package .RS 4 \%http://code.google.com/p/gperftools/ .RE Index: projects/nand/contrib/jemalloc/include/jemalloc/internal/arena.h =================================================================== --- projects/nand/contrib/jemalloc/include/jemalloc/internal/arena.h (revision 235262) +++ projects/nand/contrib/jemalloc/include/jemalloc/internal/arena.h (revision 235263) @@ -1,725 +1,970 @@ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized * as small as possible such that this setting is still honored, without * violating other constraints. The goal is to make runs as small as possible * without exceeding a per run external fragmentation threshold. * * We use binary fixed point math for overhead computations, where the binary * point is implicitly RUN_BFP bits to the left. * * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be * honored for some/all object sizes, since when heap profiling is enabled * there is one pointer of header overhead per object (plus a constant). This * constraint is relaxed (ignored) for runs that are so small that the * per-region overhead is greater than: * * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP)) */ #define RUN_BFP 12 /* \/ Implicit binary fixed point. */ #define RUN_MAX_OVRHD 0x0000003dU #define RUN_MAX_OVRHD_RELAX 0x00001800U /* Maximum number of regions in one run. */ #define LG_RUN_MAXREGS 11 #define RUN_MAXREGS (1U << LG_RUN_MAXREGS) /* * Minimum redzone size. Redzones may be larger than this if necessary to * preserve region alignment. */ #define REDZONE_MINSIZE 16 /* * The minimum ratio of active:dirty pages per arena is computed as: * * (nactive >> opt_lg_dirty_mult) >= ndirty * * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32 * times as many active pages as dirty pages. */ #define LG_DIRTY_MULT_DEFAULT 5 typedef struct arena_chunk_map_s arena_chunk_map_t; typedef struct arena_chunk_s arena_chunk_t; typedef struct arena_run_s arena_run_t; typedef struct arena_bin_info_s arena_bin_info_t; typedef struct arena_bin_s arena_bin_t; typedef struct arena_s arena_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS /* Each element of the chunk map corresponds to one page within the chunk. */ struct arena_chunk_map_s { #ifndef JEMALLOC_PROF /* * Overlay prof_ctx in order to allow it to be referenced by dead code. * Such antics aren't warranted for per arena data structures, but * chunk map overhead accounts for a percentage of memory, rather than * being just a fixed cost. */ union { #endif union { /* * Linkage for run trees. There are two disjoint uses: * * 1) arena_t's runs_avail_{clean,dirty} trees. * 2) arena_run_t conceptually uses this linkage for in-use * non-full runs, rather than directly embedding linkage. */ rb_node(arena_chunk_map_t) rb_link; /* * List of runs currently in purgatory. arena_chunk_purge() * temporarily allocates runs that contain dirty pages while * purging, so that other threads cannot use the runs while the * purging thread is operating without the arena lock held. */ ql_elm(arena_chunk_map_t) ql_link; } u; /* Profile counters, used for large object runs. */ prof_ctx_t *prof_ctx; #ifndef JEMALLOC_PROF }; /* union { ... }; */ #endif /* * Run address (or size) and various flags are stored together. The bit * layout looks like (assuming 32-bit system): * * ???????? ???????? ????---- ----dula * * ? : Unallocated: Run address for first/last pages, unset for internal * pages. * Small: Run page offset. * Large: Run size for first page, unset for trailing pages. * - : Unused. * d : dirty? * u : unzeroed? * l : large? * a : allocated? * * Following are example bit patterns for the three types of runs. * * p : run page offset * s : run size - * c : (binind+1) for size class (used only if prof_promote is true) + * n : binind for size class; large objects set these to BININD_INVALID + * except for promoted allocations (see prof_promote) * x : don't care * - : 0 * + : 1 * [DULA] : bit set * [dula] : bit unset * * Unallocated (clean): - * ssssssss ssssssss ssss---- ----du-a - * xxxxxxxx xxxxxxxx xxxx---- -----Uxx - * ssssssss ssssssss ssss---- ----dU-a + * ssssssss ssssssss ssss1111 1111du-a + * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx + * ssssssss ssssssss ssss1111 1111dU-a * * Unallocated (dirty): - * ssssssss ssssssss ssss---- ----D--a - * xxxxxxxx xxxxxxxx xxxx---- ----xxxx - * ssssssss ssssssss ssss---- ----D--a + * ssssssss ssssssss ssss1111 1111D--a + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * ssssssss ssssssss ssss1111 1111D--a * * Small: - * pppppppp pppppppp pppp---- ----d--A - * pppppppp pppppppp pppp---- -------A - * pppppppp pppppppp pppp---- ----d--A + * pppppppp pppppppp ppppnnnn nnnnd--A + * pppppppp pppppppp ppppnnnn nnnn---A + * pppppppp pppppppp ppppnnnn nnnnd--A * * Large: - * ssssssss ssssssss ssss---- ----D-LA - * xxxxxxxx xxxxxxxx xxxx---- ----xxxx - * -------- -------- -------- ----D-LA + * ssssssss ssssssss ssss1111 1111D-LA + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * -------- -------- ----1111 1111D-LA * * Large (sampled, size <= PAGE): - * ssssssss ssssssss sssscccc ccccD-LA + * ssssssss ssssssss ssssnnnn nnnnD-LA * * Large (not sampled, size == PAGE): - * ssssssss ssssssss ssss---- ----D-LA + * ssssssss ssssssss ssss1111 1111D-LA */ size_t bits; -#define CHUNK_MAP_CLASS_SHIFT 4 -#define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U) -#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU) +#define CHUNK_MAP_BININD_SHIFT 4 +#define BININD_INVALID ((size_t)0xffU) +/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */ +#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U) +#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK +#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU) #define CHUNK_MAP_DIRTY ((size_t)0x8U) #define CHUNK_MAP_UNZEROED ((size_t)0x4U) #define CHUNK_MAP_LARGE ((size_t)0x2U) #define CHUNK_MAP_ALLOCATED ((size_t)0x1U) #define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED }; typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; /* Arena chunk header. */ struct arena_chunk_s { /* Arena that owns the chunk. */ arena_t *arena; /* Linkage for the arena's chunks_dirty list. */ ql_elm(arena_chunk_t) link_dirty; /* * True if the chunk is currently in the chunks_dirty list, due to * having at some point contained one or more dirty pages. Removal * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible. */ bool dirtied; /* Number of dirty pages. */ size_t ndirty; /* * Map of pages within chunk that keeps track of free/large/small. The * first map_bias entries are omitted, since the chunk header does not * need to be tracked in the map. This omission saves a header page * for common chunk sizes (e.g. 4 MiB). */ arena_chunk_map_t map[1]; /* Dynamically sized. */ }; typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; struct arena_run_s { /* Bin this run is associated with. */ arena_bin_t *bin; /* Index of next region that has never been allocated, or nregs. */ uint32_t nextind; /* Number of free regions in run. */ unsigned nfree; }; /* * Read-only information associated with each element of arena_t's bins array * is stored separately, partly to reduce memory usage (only one copy, rather * than one per arena), but mainly to avoid false cacheline sharing. * * Each run has the following layout: * * /--------------------\ * | arena_run_t header | * | ... | * bitmap_offset | bitmap | * | ... | * ctx0_offset | ctx map | * | ... | * |--------------------| * | redzone | * reg0_offset | region 0 | * | redzone | * |--------------------| \ * | redzone | | * | region 1 | > reg_interval * | redzone | / * |--------------------| * | ... | * | ... | * | ... | * |--------------------| * | redzone | * | region nregs-1 | * | redzone | * |--------------------| * | alignment pad? | * \--------------------/ * * reg_interval has at least the same minimum alignment as reg_size; this * preserves the alignment constraint that sa2u() depends on. Alignment pad is * either 0 or redzone_size; it is present only if needed to align reg0_offset. */ struct arena_bin_info_s { /* Size of regions in a run for this bin's size class. */ size_t reg_size; /* Redzone size. */ size_t redzone_size; /* Interval between regions (reg_size + (redzone_size << 1)). */ size_t reg_interval; /* Total size of a run for this bin's size class. */ size_t run_size; /* Total number of regions in a run for this bin's size class. */ uint32_t nregs; /* * Offset of first bitmap_t element in a run header for this bin's size * class. */ uint32_t bitmap_offset; /* * Metadata used to manipulate bitmaps for runs associated with this * bin. */ bitmap_info_t bitmap_info; /* * Offset of first (prof_ctx_t *) in a run header for this bin's size * class, or 0 if (config_prof == false || opt_prof == false). */ uint32_t ctx0_offset; /* Offset of first region in a run for this bin's size class. */ uint32_t reg0_offset; }; struct arena_bin_s { /* * All operations on runcur, runs, and stats require that lock be * locked. Run allocation/deallocation are protected by the arena lock, * which may be acquired while holding one or more bin locks, but not * vise versa. */ malloc_mutex_t lock; /* * Current run being used to service allocations of this bin's size * class. */ arena_run_t *runcur; /* * Tree of non-full runs. This tree is used when looking for an * existing run when runcur is no longer usable. We choose the * non-full run that is lowest in memory; this policy tends to keep * objects packed well, and it can also help reduce the number of * almost-empty chunks. */ arena_run_tree_t runs; /* Bin statistics. */ malloc_bin_stats_t stats; }; struct arena_s { /* This arena's index within the arenas array. */ unsigned ind; /* * Number of threads currently assigned to this arena. This field is * protected by arenas_lock. */ unsigned nthreads; /* * There are three classes of arena operations from a locking * perspective: * 1) Thread asssignment (modifies nthreads) is protected by * arenas_lock. * 2) Bin-related operations are protected by bin locks. * 3) Chunk- and run-related operations are protected by this mutex. */ malloc_mutex_t lock; arena_stats_t stats; /* * List of tcaches for extant threads associated with this arena. * Stats from these are merged incrementally, and at exit. */ ql_head(tcache_t) tcache_ql; uint64_t prof_accumbytes; /* List of dirty-page-containing chunks this arena manages. */ ql_head(arena_chunk_t) chunks_dirty; /* * In order to avoid rapid chunk allocation/deallocation when an arena * oscillates right on the cusp of needing a new chunk, cache the most * recently freed chunk. The spare is left in the arena's chunk trees * until it is deleted. * * There is one spare chunk per arena, rather than one spare total, in * order to avoid interactions between multiple threads that could make * a single spare inadequate. */ arena_chunk_t *spare; /* Number of pages in active runs. */ size_t nactive; /* * Current count of pages within unused runs that are potentially * dirty, and for which madvise(... MADV_DONTNEED) has not been called. * By tracking this, we can institute a limit on how much dirty unused * memory is mapped for each arena. */ size_t ndirty; /* * Approximate number of pages being purged. It is possible for * multiple threads to purge dirty pages concurrently, and they use * npurgatory to indicate the total number of pages all threads are * attempting to purge. */ size_t npurgatory; /* * Size/address-ordered trees of this arena's available runs. The trees * are used for first-best-fit run allocation. The dirty tree contains * runs with dirty pages (i.e. very likely to have been touched and * therefore have associated physical pages), whereas the clean tree * contains runs with pages that either have no associated physical * pages, or have pages that the kernel may recycle at any time due to * previous madvise(2) calls. The dirty tree is used in preference to * the clean tree for allocations, because using dirty pages reduces * the amount of dirty purging necessary to keep the active:dirty page * ratio below the purge threshold. */ arena_avail_tree_t runs_avail_clean; arena_avail_tree_t runs_avail_dirty; /* bins is used to store trees of free regions. */ arena_bin_t bins[NBINS]; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern ssize_t opt_lg_dirty_mult; /* * small_size2bin is a compact lookup table that rounds request sizes up to * size classes. In order to reduce cache footprint, the table is compressed, * and all accesses are via the SMALL_SIZE2BIN macro. */ extern uint8_t const small_size2bin[]; #define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN]) extern arena_bin_info_t arena_bin_info[NBINS]; /* Number of large size classes. */ #define nlclasses (chunk_npages - map_bias) void arena_purge_all(arena_t *arena); void arena_prof_accum(arena_t *arena, uint64_t accumbytes); void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, uint64_t prof_accumbytes); void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero); void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); void *arena_malloc_small(arena_t *arena, size_t size, bool zero); void *arena_malloc_large(arena_t *arena, size_t size, bool zero); void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); void arena_prof_promoted(const void *ptr, size_t size); -void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, +void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_chunk_map_t *mapelm); +void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t pageind, arena_chunk_map_t *mapelm); +void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t pageind); +void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, + void *ptr); void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats); void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache); bool arena_new(arena_t *arena, unsigned ind); void arena_boot(void); void arena_prefork(arena_t *arena); void arena_postfork_parent(arena_t *arena); void arena_postfork_child(arena_t *arena); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE +arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind); +size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); +void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, + size_t size, size_t flags); +void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, + size_t size); +void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, + size_t size, size_t flags); +void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, + size_t binind); +void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, + size_t runind, size_t binind, size_t flags); +void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, + size_t unzeroed); +size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr); prof_ctx_t *arena_prof_ctx_get(const void *ptr); void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); size_t arena_salloc(const void *ptr, bool demote); void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) +# ifdef JEMALLOC_ARENA_INLINE_A +JEMALLOC_INLINE arena_chunk_map_t * +arena_mapp_get(arena_chunk_t *chunk, size_t pageind) +{ + + assert(pageind >= map_bias); + assert(pageind < chunk_npages); + + return (&chunk->map[pageind-map_bias]); +} + +JEMALLOC_INLINE size_t * +arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) +{ + + return (&arena_mapp_get(chunk, pageind)->bits); +} + JEMALLOC_INLINE size_t +arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) +{ + + return (*arena_mapbitsp_get(chunk, pageind)); +} + +JEMALLOC_INLINE size_t +arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); + return (mapbits & ~PAGE_MASK); +} + +JEMALLOC_INLINE size_t +arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == + (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); + return (mapbits & ~PAGE_MASK); +} + +JEMALLOC_INLINE size_t +arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == + CHUNK_MAP_ALLOCATED); + return (mapbits >> LG_PAGE); +} + +JEMALLOC_INLINE size_t +arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + size_t binind; + + mapbits = arena_mapbits_get(chunk, pageind); + binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; + assert(binind < NBINS || binind == BININD_INVALID); + return (binind); +} + +JEMALLOC_INLINE size_t +arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + return (mapbits & CHUNK_MAP_DIRTY); +} + +JEMALLOC_INLINE size_t +arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + return (mapbits & CHUNK_MAP_UNZEROED); +} + +JEMALLOC_INLINE size_t +arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + return (mapbits & CHUNK_MAP_LARGE); +} + +JEMALLOC_INLINE size_t +arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + return (mapbits & CHUNK_MAP_ALLOCATED); +} + +JEMALLOC_INLINE void +arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, + size_t flags) +{ + size_t *mapbitsp; + + mapbitsp = arena_mapbitsp_get(chunk, pageind); + assert((size & PAGE_MASK) == 0); + assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0); + *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags; +} + +JEMALLOC_INLINE void +arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, + size_t size) +{ + size_t *mapbitsp; + + mapbitsp = arena_mapbitsp_get(chunk, pageind); + assert((size & PAGE_MASK) == 0); + assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); + *mapbitsp = size | (*mapbitsp & PAGE_MASK); +} + +JEMALLOC_INLINE void +arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, + size_t flags) +{ + size_t *mapbitsp; + + mapbitsp = arena_mapbitsp_get(chunk, pageind); + assert((size & PAGE_MASK) == 0); + assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0); + *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE | + CHUNK_MAP_ALLOCATED; +} + +JEMALLOC_INLINE void +arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, + size_t binind) +{ + size_t *mapbitsp; + + assert(binind <= BININD_INVALID); + mapbitsp = arena_mapbitsp_get(chunk, pageind); + assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE); + *mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind << + CHUNK_MAP_BININD_SHIFT); +} + +JEMALLOC_INLINE void +arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, + size_t binind, size_t flags) +{ + size_t *mapbitsp; + + assert(binind < BININD_INVALID); + mapbitsp = arena_mapbitsp_get(chunk, pageind); + assert(pageind - runind >= map_bias); + assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0); + *mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) | + flags | CHUNK_MAP_ALLOCATED; +} + +JEMALLOC_INLINE void +arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, + size_t unzeroed) +{ + size_t *mapbitsp; + + mapbitsp = arena_mapbitsp_get(chunk, pageind); + *mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed; +} + +JEMALLOC_INLINE size_t +arena_ptr_small_binind_get(const void *ptr, size_t mapbits) +{ + size_t binind; + + binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; + + if (config_debug) { + arena_chunk_t *chunk; + arena_t *arena; + size_t pageind; + size_t actual_mapbits; + arena_run_t *run; + arena_bin_t *bin; + size_t actual_binind; + arena_bin_info_t *bin_info; + + assert(binind != BININD_INVALID); + assert(binind < NBINS); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena = chunk->arena; + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + actual_mapbits = arena_mapbits_get(chunk, pageind); + assert(mapbits == actual_mapbits); + assert(arena_mapbits_large_get(chunk, pageind) == 0); + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - + (actual_mapbits >> LG_PAGE)) << LG_PAGE)); + bin = run->bin; + actual_binind = bin - arena->bins; + assert(binind == actual_binind); + bin_info = &arena_bin_info[actual_binind]; + assert(((uintptr_t)ptr - ((uintptr_t)run + + (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval + == 0); + } + + return (binind); +} +# endif /* JEMALLOC_ARENA_INLINE_A */ + +# ifdef JEMALLOC_ARENA_INLINE_B +JEMALLOC_INLINE size_t arena_bin_index(arena_t *arena, arena_bin_t *bin) { size_t binind = bin - arena->bins; assert(binind < NBINS); return (binind); } JEMALLOC_INLINE unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) { unsigned shift, diff, regind; size_t interval; /* * Freeing a pointer lower than region zero can cause assertion * failure. */ assert((uintptr_t)ptr >= (uintptr_t)run + (uintptr_t)bin_info->reg0_offset); /* * Avoid doing division with a variable divisor if possible. Using * actual division here can reduce allocator throughput by over 20%! */ diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin_info->reg0_offset); /* Rescale (factor powers of 2 out of the numerator and denominator). */ interval = bin_info->reg_interval; shift = ffs(interval) - 1; diff >>= shift; interval >>= shift; if (interval == 1) { /* The divisor was a power of 2. */ regind = diff; } else { /* * To divide by a number D that is not a power of two we * multiply by (2^21 / D) and then right shift by 21 positions. * * X / D * * becomes * * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT * * We can omit the first three elements, because we never * divide by 0, and 1 and 2 are both powers of two, which are * handled above. */ #define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) #define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) static const unsigned interval_invs[] = { SIZE_INV(3), SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) }; if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + 2)) { regind = (diff * interval_invs[interval - 3]) >> SIZE_INV_SHIFT; } else regind = diff / interval; #undef SIZE_INV #undef SIZE_INV_SHIFT } assert(diff == regind * interval); assert(regind < bin_info->nregs); return (regind); } JEMALLOC_INLINE prof_ctx_t * arena_prof_ctx_get(const void *ptr) { prof_ctx_t *ret; arena_chunk_t *chunk; size_t pageind, mapbits; cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = chunk->map[pageind-map_bias].bits; + mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); if ((mapbits & CHUNK_MAP_LARGE) == 0) { if (prof_promote) ret = (prof_ctx_t *)(uintptr_t)1U; else { arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE)); - size_t binind = arena_bin_index(chunk->arena, run->bin); + size_t binind = arena_ptr_small_binind_get(ptr, + mapbits); arena_bin_info_t *bin_info = &arena_bin_info[binind]; unsigned regind; regind = arena_run_regind(run, bin_info, ptr); ret = *(prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset + (regind * sizeof(prof_ctx_t *))); } } else - ret = chunk->map[pageind-map_bias].prof_ctx; + ret = arena_mapp_get(chunk, pageind)->prof_ctx; return (ret); } JEMALLOC_INLINE void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) { arena_chunk_t *chunk; size_t pageind, mapbits; cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = chunk->map[pageind-map_bias].bits; + mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); if ((mapbits & CHUNK_MAP_LARGE) == 0) { if (prof_promote == false) { arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE)); - arena_bin_t *bin = run->bin; size_t binind; arena_bin_info_t *bin_info; unsigned regind; - binind = arena_bin_index(chunk->arena, bin); + binind = arena_ptr_small_binind_get(ptr, mapbits); bin_info = &arena_bin_info[binind]; regind = arena_run_regind(run, bin_info, ptr); *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset + (regind * sizeof(prof_ctx_t *)))) = ctx; } else assert((uintptr_t)ctx == (uintptr_t)1U); } else - chunk->map[pageind-map_bias].prof_ctx = ctx; + arena_mapp_get(chunk, pageind)->prof_ctx = ctx; } JEMALLOC_INLINE void * arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) { tcache_t *tcache; assert(size != 0); assert(size <= arena_maxclass); if (size <= SMALL_MAXCLASS) { if (try_tcache && (tcache = tcache_get(true)) != NULL) return (tcache_alloc_small(tcache, size, zero)); else { return (arena_malloc_small(choose_arena(arena), size, zero)); } } else { /* * Initialize tcache after checking size in order to avoid * infinite recursion during tcache initialization. */ if (try_tcache && size <= tcache_maxclass && (tcache = tcache_get(true)) != NULL) return (tcache_alloc_large(tcache, size, zero)); else { return (arena_malloc_large(choose_arena(arena), size, zero)); } } } /* Return the size of the allocation pointed to by ptr. */ JEMALLOC_INLINE size_t arena_salloc(const void *ptr, bool demote) { size_t ret; arena_chunk_t *chunk; - size_t pageind, mapbits; + size_t pageind, binind; assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = chunk->map[pageind-map_bias].bits; - assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); - if ((mapbits & CHUNK_MAP_LARGE) == 0) { - arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + - (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE)); - size_t binind = arena_bin_index(chunk->arena, run->bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - assert(((uintptr_t)ptr - ((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval - == 0); - ret = bin_info->reg_size; - } else { + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + binind = arena_mapbits_binind_get(chunk, pageind); + if (binind == BININD_INVALID || (config_prof && demote == false && + prof_promote && arena_mapbits_large_get(chunk, pageind) != 0)) { + /* + * Large allocation. In the common case (demote == true), and + * as this is an inline function, most callers will only end up + * looking at binind to determine that ptr is a small + * allocation. + */ assert(((uintptr_t)ptr & PAGE_MASK) == 0); - ret = mapbits & ~PAGE_MASK; - if (config_prof && demote && prof_promote && ret == PAGE && - (mapbits & CHUNK_MAP_CLASS_MASK) != 0) { - size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >> - CHUNK_MAP_CLASS_SHIFT) - 1; - assert(binind < NBINS); - ret = arena_bin_info[binind].reg_size; - } + ret = arena_mapbits_large_size_get(chunk, pageind); assert(ret != 0); + assert(pageind + (ret>>LG_PAGE) <= chunk_npages); + assert(ret == PAGE || arena_mapbits_large_size_get(chunk, + pageind+(ret>>LG_PAGE)-1) == 0); + assert(binind == arena_mapbits_binind_get(chunk, + pageind+(ret>>LG_PAGE)-1)); + assert(arena_mapbits_dirty_get(chunk, pageind) == + arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1)); + } else { + /* + * Small allocation (possibly promoted to a large object due to + * prof_promote). + */ + assert(arena_mapbits_large_get(chunk, pageind) != 0 || + arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, + pageind)) == binind); + ret = arena_bin_info[binind].reg_size; } return (ret); } JEMALLOC_INLINE void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache) { - size_t pageind; - arena_chunk_map_t *mapelm; + size_t pageind, mapbits; tcache_t *tcache; assert(arena != NULL); assert(chunk->arena == arena); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapelm = &chunk->map[pageind-map_bias]; - assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0); - if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) { + mapbits = arena_mapbits_get(chunk, pageind); + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + if ((mapbits & CHUNK_MAP_LARGE) == 0) { /* Small allocation. */ - if (try_tcache && (tcache = tcache_get(false)) != NULL) - tcache_dalloc_small(tcache, ptr); - else { - arena_run_t *run; - arena_bin_t *bin; + if (try_tcache && (tcache = tcache_get(false)) != NULL) { + size_t binind; - run = (arena_run_t *)((uintptr_t)chunk + - (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) << - LG_PAGE)); - bin = run->bin; - if (config_debug) { - size_t binind = arena_bin_index(arena, bin); - UNUSED arena_bin_info_t *bin_info = - &arena_bin_info[binind]; - assert(((uintptr_t)ptr - ((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset)) % - bin_info->reg_interval == 0); - } - malloc_mutex_lock(&bin->lock); - arena_dalloc_bin(arena, chunk, ptr, mapelm); - malloc_mutex_unlock(&bin->lock); - } + binind = arena_ptr_small_binind_get(ptr, mapbits); + tcache_dalloc_small(tcache, ptr, binind); + } else + arena_dalloc_small(arena, chunk, ptr, pageind); } else { - size_t size = mapelm->bits & ~PAGE_MASK; + size_t size = arena_mapbits_large_size_get(chunk, pageind); assert(((uintptr_t)ptr & PAGE_MASK) == 0); if (try_tcache && size <= tcache_maxclass && (tcache = tcache_get(false)) != NULL) { tcache_dalloc_large(tcache, ptr, size); - } else { - malloc_mutex_lock(&arena->lock); + } else arena_dalloc_large(arena, chunk, ptr); - malloc_mutex_unlock(&arena->lock); - } } } +# endif /* JEMALLOC_ARENA_INLINE_B */ #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ Index: projects/nand/contrib/jemalloc/include/jemalloc/internal/atomic.h =================================================================== --- projects/nand/contrib/jemalloc/include/jemalloc/internal/atomic.h (revision 235262) +++ projects/nand/contrib/jemalloc/include/jemalloc/internal/atomic.h (revision 235263) @@ -1,276 +1,304 @@ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #define atomic_read_uint64(p) atomic_add_uint64(p, 0) #define atomic_read_uint32(p) atomic_add_uint32(p, 0) #define atomic_read_z(p) atomic_add_z(p, 0) #define atomic_read_u(p) atomic_add_u(p, 0) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); size_t atomic_add_z(size_t *p, size_t x); size_t atomic_sub_z(size_t *p, size_t x); unsigned atomic_add_u(unsigned *p, unsigned x); unsigned atomic_sub_u(unsigned *p, unsigned x); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) /******************************************************************************/ /* 64-bit operations. */ #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) # ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (__sync_sub_and_fetch(p, x)); } +#elif (defined(_MSC_VER)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (InterlockedExchangeAdd64(p, x)); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (InterlockedExchangeAdd64(p, -((int64_t)x))); +} #elif (defined(JEMALLOC_OSATOMIC)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); } # elif (defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { asm volatile ( "lock; xaddq %0, %1;" : "+r" (x), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { x = (uint64_t)(-(int64_t)x); asm volatile ( "lock; xaddq %0, %1;" : "+r" (x), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (x); } # elif (defined(JEMALLOC_ATOMIC9)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { /* * atomic_fetchadd_64() doesn't exist, but we only ever use this * function on LP64 systems, so atomic_fetchadd_long() will do. */ assert(sizeof(uint64_t) == sizeof(unsigned long)); return (atomic_fetchadd_long(p, (unsigned long)x) + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { assert(sizeof(uint64_t) == sizeof(unsigned long)); return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); } # elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (__sync_sub_and_fetch(p, x)); } # else # error "Missing implementation for 64-bit atomic operations" # endif #endif /******************************************************************************/ /* 32-bit operations. */ #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (__sync_sub_and_fetch(p, x)); +} +#elif (defined(_MSC_VER)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (InterlockedExchangeAdd(p, x)); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (InterlockedExchangeAdd(p, -((int32_t)x))); } #elif (defined(JEMALLOC_OSATOMIC)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); } #elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { asm volatile ( "lock; xaddl %0, %1;" : "+r" (x), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { x = (uint32_t)(-(int32_t)x); asm volatile ( "lock; xaddl %0, %1;" : "+r" (x), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (x); } #elif (defined(JEMALLOC_ATOMIC9)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (atomic_fetchadd_32(p, x) + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); } #elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (__sync_sub_and_fetch(p, x)); } #else # error "Missing implementation for 32-bit atomic operations" #endif /******************************************************************************/ /* size_t operations. */ JEMALLOC_INLINE size_t atomic_add_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_PTR == 2) return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE size_t atomic_sub_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_PTR == 2) return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } /******************************************************************************/ /* unsigned operations. */ JEMALLOC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_INT == 2) return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_INT == 2) return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } /******************************************************************************/ #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ Index: projects/nand/contrib/jemalloc/include/jemalloc/internal/ctl.h =================================================================== --- projects/nand/contrib/jemalloc/include/jemalloc/internal/ctl.h (revision 235262) +++ projects/nand/contrib/jemalloc/include/jemalloc/internal/ctl.h (revision 235263) @@ -1,109 +1,112 @@ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct ctl_node_s ctl_node_t; +typedef struct ctl_named_node_s ctl_named_node_t; +typedef struct ctl_indexed_node_s ctl_indexed_node_t; typedef struct ctl_arena_stats_s ctl_arena_stats_t; typedef struct ctl_stats_s ctl_stats_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct ctl_node_s { bool named; - union { - struct { - const char *name; - /* If (nchildren == 0), this is a terminal node. */ - unsigned nchildren; - const ctl_node_t *children; - } named; - struct { - const ctl_node_t *(*index)(const size_t *, size_t, - size_t); - } indexed; - } u; - int (*ctl)(const size_t *, size_t, void *, size_t *, void *, - size_t); +}; + +struct ctl_named_node_s { + struct ctl_node_s node; + const char *name; + /* If (nchildren == 0), this is a terminal node. */ + unsigned nchildren; + const ctl_node_t *children; + int (*ctl)(const size_t *, size_t, void *, size_t *, + void *, size_t); +}; + +struct ctl_indexed_node_s { + struct ctl_node_s node; + const ctl_named_node_t *(*index)(const size_t *, size_t, size_t); }; struct ctl_arena_stats_s { bool initialized; unsigned nthreads; size_t pactive; size_t pdirty; arena_stats_t astats; /* Aggregate stats for small size classes, based on bin stats. */ size_t allocated_small; uint64_t nmalloc_small; uint64_t ndalloc_small; uint64_t nrequests_small; malloc_bin_stats_t bstats[NBINS]; malloc_large_stats_t *lstats; /* nlclasses elements. */ }; struct ctl_stats_s { size_t allocated; size_t active; size_t mapped; struct { size_t current; /* stats_chunks.curchunks */ uint64_t total; /* stats_chunks.nchunks */ size_t high; /* stats_chunks.highchunks */ } chunks; struct { size_t allocated; /* huge_allocated */ uint64_t nmalloc; /* huge_nmalloc */ uint64_t ndalloc; /* huge_ndalloc */ } huge; ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); bool ctl_boot(void); #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ != 0) { \ malloc_printf( \ ": Failure in xmallctl(\"%s\", ...)\n", \ name); \ abort(); \ } \ } while (0) #define xmallctlnametomib(name, mibp, miblenp) do { \ if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ malloc_printf(": Failure in " \ "xmallctlnametomib(\"%s\", ...)\n", name); \ abort(); \ } \ } while (0) #define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ newlen) != 0) { \ malloc_write( \ ": Failure in xmallctlbymib()\n"); \ abort(); \ } \ } while (0) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ Index: projects/nand/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h =================================================================== --- projects/nand/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h (revision 235262) +++ projects/nand/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h (revision 235263) @@ -1,882 +1,937 @@ #ifndef JEMALLOC_INTERNAL_H #define JEMALLOC_INTERNAL_H #include "libc_private.h" #include "namespace.h" -#include -#include -#include -#if !defined(SYS_write) && defined(__NR_write) -#define SYS_write __NR_write +#include +#ifdef _WIN32 +# include +# define ENOENT ERROR_PATH_NOT_FOUND +# define EINVAL ERROR_BAD_ARGUMENTS +# define EAGAIN ERROR_OUTOFMEMORY +# define EPERM ERROR_WRITE_FAULT +# define EFAULT ERROR_INVALID_ADDRESS +# define ENOMEM ERROR_NOT_ENOUGH_MEMORY +# undef ERANGE +# define ERANGE ERROR_INVALID_DATA +#else +# include +# include +# include +# if !defined(SYS_write) && defined(__NR_write) +# define SYS_write __NR_write +# endif +# include +# include +# include #endif -#include #include -#include -#include #include #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif -#include -#include #include #include #include #include #include #include #ifndef offsetof # define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) #endif #include #include #include #include -#include +#ifdef _MSC_VER +# include +typedef intptr_t ssize_t; +# define PATH_MAX 1024 +# define STDERR_FILENO 2 +# define __func__ __FUNCTION__ +/* Disable warnings about deprecated system functions */ +# pragma warning(disable: 4996) +#else +# include +#endif #include -#include -#include #include "un-namespace.h" #include "libc_private.h" #define JEMALLOC_NO_DEMANGLE #include "../jemalloc.h" #ifdef JEMALLOC_UTRACE #include #endif #ifdef JEMALLOC_VALGRIND #include #include #endif #include "jemalloc/internal/private_namespace.h" #ifdef JEMALLOC_CC_SILENCE #define UNUSED JEMALLOC_ATTR(unused) #else #define UNUSED #endif static const bool config_debug = #ifdef JEMALLOC_DEBUG true #else false #endif ; static const bool config_dss = #ifdef JEMALLOC_DSS true #else false #endif ; static const bool config_fill = #ifdef JEMALLOC_FILL true #else false #endif ; static const bool config_lazy_lock = #ifdef JEMALLOC_LAZY_LOCK true #else false #endif ; static const bool config_prof = #ifdef JEMALLOC_PROF true #else false #endif ; static const bool config_prof_libgcc = #ifdef JEMALLOC_PROF_LIBGCC true #else false #endif ; static const bool config_prof_libunwind = #ifdef JEMALLOC_PROF_LIBUNWIND true #else false #endif ; +static const bool config_mremap = +#ifdef JEMALLOC_MREMAP + true +#else + false +#endif + ; static const bool config_munmap = #ifdef JEMALLOC_MUNMAP true #else false #endif ; static const bool config_stats = #ifdef JEMALLOC_STATS true #else false #endif ; static const bool config_tcache = #ifdef JEMALLOC_TCACHE true #else false #endif ; static const bool config_tls = #ifdef JEMALLOC_TLS true #else false #endif ; static const bool config_utrace = #ifdef JEMALLOC_UTRACE true #else false #endif ; static const bool config_valgrind = #ifdef JEMALLOC_VALGRIND true #else false #endif ; static const bool config_xmalloc = #ifdef JEMALLOC_XMALLOC true #else false #endif ; static const bool config_ivsalloc = #ifdef JEMALLOC_IVSALLOC true #else false #endif ; #ifdef JEMALLOC_ATOMIC9 #include #endif #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) #include #endif #ifdef JEMALLOC_ZONE #include #include #include #include #endif #define RB_COMPACT #include "jemalloc/internal/rb.h" #include "jemalloc/internal/qr.h" #include "jemalloc/internal/ql.h" /* * jemalloc can conceptually be broken into components (arena, tcache, etc.), * but there are circular dependencies that cannot be broken without * substantial performance degradation. In order to reduce the effect on * visual code flow, read the header files in multiple passes, with one of the * following cpp variables defined during each pass: * * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data * types. * JEMALLOC_H_STRUCTS : Data structures. * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. * JEMALLOC_H_INLINES : Inline functions. */ /******************************************************************************/ #define JEMALLOC_H_TYPES #define ALLOCM_LG_ALIGN_MASK ((int)0x3f) #define ZU(z) ((size_t)z) #ifndef __DECONST # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) #endif #ifdef JEMALLOC_DEBUG /* Disable inlining to make debugging easier. */ # define JEMALLOC_INLINE # define inline #else # define JEMALLOC_ENABLE_INLINE # define JEMALLOC_INLINE static inline +# ifdef _MSC_VER +# define inline _inline +# endif #endif /* Smallest size class to support. */ #define LG_TINY_MIN 3 #define TINY_MIN (1U << LG_TINY_MIN) /* * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size * classes). */ #ifndef LG_QUANTUM -# ifdef __i386__ +# if (defined(__i386__) || defined(_M_IX86)) # define LG_QUANTUM 4 # endif # ifdef __ia64__ # define LG_QUANTUM 4 # endif # ifdef __alpha__ # define LG_QUANTUM 4 # endif # ifdef __sparc64__ # define LG_QUANTUM 4 # endif -# if (defined(__amd64__) || defined(__x86_64__)) +# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) # define LG_QUANTUM 4 # endif # ifdef __arm__ # define LG_QUANTUM 3 # endif # ifdef __mips__ # define LG_QUANTUM 3 # endif # ifdef __powerpc__ # define LG_QUANTUM 4 # endif # ifdef __s390x__ # define LG_QUANTUM 4 # endif # ifdef __SH4__ # define LG_QUANTUM 4 # endif # ifdef __tile__ # define LG_QUANTUM 4 # endif # ifndef LG_QUANTUM # error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" # endif #endif #define QUANTUM ((size_t)(1U << LG_QUANTUM)) #define QUANTUM_MASK (QUANTUM - 1) /* Return the smallest quantum multiple that is >= a. */ #define QUANTUM_CEILING(a) \ (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) #define LONG ((size_t)(1U << LG_SIZEOF_LONG)) #define LONG_MASK (LONG - 1) /* Return the smallest long multiple that is >= a. */ #define LONG_CEILING(a) \ (((a) + LONG_MASK) & ~LONG_MASK) #define SIZEOF_PTR (1U << LG_SIZEOF_PTR) #define PTR_MASK (SIZEOF_PTR - 1) /* Return the smallest (void *) multiple that is >= a. */ #define PTR_CEILING(a) \ (((a) + PTR_MASK) & ~PTR_MASK) /* * Maximum size of L1 cache line. This is used to avoid cache line aliasing. * In addition, this controls the spacing of cacheline-spaced size classes. + * + * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can + * only handle raw constants. */ #define LG_CACHELINE 6 -#define CACHELINE ((size_t)(1U << LG_CACHELINE)) +#define CACHELINE 64 #define CACHELINE_MASK (CACHELINE - 1) /* Return the smallest cacheline multiple that is >= s. */ #define CACHELINE_CEILING(s) \ (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) /* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ #ifdef PAGE_MASK # undef PAGE_MASK #endif #define LG_PAGE STATIC_PAGE_SHIFT #define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) #define PAGE_MASK ((size_t)(PAGE - 1)) /* Return the smallest pagesize multiple that is >= s. */ #define PAGE_CEILING(s) \ (((s) + PAGE_MASK) & ~PAGE_MASK) /* Return the nearest aligned address at or below a. */ #define ALIGNMENT_ADDR2BASE(a, alignment) \ ((void *)((uintptr_t)(a) & (-(alignment)))) /* Return the offset between a and the nearest aligned address at or below a. */ #define ALIGNMENT_ADDR2OFFSET(a, alignment) \ ((size_t)((uintptr_t)(a) & (alignment - 1))) /* Return the smallest alignment multiple that is >= s. */ #define ALIGNMENT_CEILING(s, alignment) \ (((s) + (alignment - 1)) & (-(alignment))) +/* Declare a variable length array */ +#if __STDC_VERSION__ < 199901L +# ifdef _MSC_VER +# include +# define alloca _alloca +# else +# include +# endif +# define VARIABLE_ARRAY(type, name, count) \ + type *name = alloca(sizeof(type) * count) +#else +# define VARIABLE_ARRAY(type, name, count) type name[count] +#endif + #ifdef JEMALLOC_VALGRIND /* * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions * so that when Valgrind reports errors, there are no extra stack frames * in the backtraces. * * The size that is reported to valgrind must be consistent through a chain of * malloc..realloc..realloc calls. Request size isn't recorded anywhere in * jemalloc, so it is critical that all callers of these macros provide usize * rather than request size. As a result, buffer overflow detection is * technically weakened for the standard API, though it is generally accepted * practice to consider any extra bytes reported by malloc_usable_size() as * usable space. */ #define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ if (config_valgrind && opt_valgrind && cond) \ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ } while (0) #define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ old_rzsize, zero) do { \ if (config_valgrind && opt_valgrind) { \ size_t rzsize = p2rz(ptr); \ \ if (ptr == old_ptr) { \ VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ usize, rzsize); \ if (zero && old_usize < usize) { \ VALGRIND_MAKE_MEM_DEFINED( \ (void *)((uintptr_t)ptr + \ old_usize), usize - old_usize); \ } \ } else { \ if (old_ptr != NULL) { \ VALGRIND_FREELIKE_BLOCK(old_ptr, \ old_rzsize); \ } \ if (ptr != NULL) { \ size_t copy_size = (old_usize < usize) \ ? old_usize : usize; \ size_t tail_size = usize - copy_size; \ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ rzsize, false); \ if (copy_size > 0) { \ VALGRIND_MAKE_MEM_DEFINED(ptr, \ copy_size); \ } \ if (zero && tail_size > 0) { \ VALGRIND_MAKE_MEM_DEFINED( \ (void *)((uintptr_t)ptr + \ copy_size), tail_size); \ } \ } \ } \ } \ } while (0) #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ if (config_valgrind && opt_valgrind) \ VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ } while (0) #else #define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) #define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) #define VALGRIND_FREELIKE_BLOCK(addr, rzB) #define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) #define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) #define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) #define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ old_rzsize, zero) #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) #endif #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/arena.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/prof.h" #undef JEMALLOC_H_TYPES /******************************************************************************/ #define JEMALLOC_H_STRUCTS #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/arena.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/prof.h" typedef struct { uint64_t allocated; uint64_t deallocated; } thread_allocated_t; /* * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro * argument. */ #define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0}) #undef JEMALLOC_H_STRUCTS /******************************************************************************/ #define JEMALLOC_H_EXTERNS extern bool opt_abort; extern bool opt_junk; extern size_t opt_quarantine; extern bool opt_redzone; extern bool opt_utrace; extern bool opt_valgrind; extern bool opt_xmalloc; extern bool opt_zero; extern size_t opt_narenas; /* Number of CPUs. */ extern unsigned ncpus; extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ /* * Arenas that are used to service external requests. Not all elements of the * arenas array are necessarily used; arenas are created lazily as needed. */ extern arena_t **arenas; extern unsigned narenas; arena_t *arenas_extend(unsigned ind); void arenas_cleanup(void *arg); arena_t *choose_arena_hard(void); void jemalloc_prefork(void); void jemalloc_postfork_parent(void); void jemalloc_postfork_child(void); #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/arena.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/prof.h" #undef JEMALLOC_H_EXTERNS /******************************************************************************/ #define JEMALLOC_H_INLINES #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #ifndef JEMALLOC_ENABLE_INLINE malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) size_t s2u(size_t size); size_t sa2u(size_t size, size_t alignment); arena_t *choose_arena(arena_t *arena); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) /* * Map of pthread_self() --> arenas[???], used for selecting an arena to use * for allocations. */ malloc_tsd_externs(arenas, arena_t *) malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup) /* * Compute usable size that would result from allocating an object with the * specified size. */ JEMALLOC_INLINE size_t s2u(size_t size) { if (size <= SMALL_MAXCLASS) return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); if (size <= arena_maxclass) return (PAGE_CEILING(size)); return (CHUNK_CEILING(size)); } /* * Compute usable size that would result from allocating an object with the * specified size and alignment. */ JEMALLOC_INLINE size_t sa2u(size_t size, size_t alignment) { size_t usize; assert(alignment != 0 && ((alignment - 1) & alignment) == 0); /* * Round size up to the nearest multiple of alignment. * * This done, we can take advantage of the fact that for each small * size class, every object is aligned at the smallest power of two * that is non-zero in the base two representation of the size. For * example: * * Size | Base 2 | Minimum alignment * -----+----------+------------------ * 96 | 1100000 | 32 * 144 | 10100000 | 32 * 192 | 11000000 | 64 */ usize = ALIGNMENT_CEILING(size, alignment); /* * (usize < size) protects against the combination of maximal * alignment and size greater than maximal alignment. */ if (usize < size) { /* size_t overflow. */ return (0); } if (usize <= arena_maxclass && alignment <= PAGE) { if (usize <= SMALL_MAXCLASS) return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); return (PAGE_CEILING(usize)); } else { size_t run_size; /* * We can't achieve subpage alignment, so round up alignment * permanently; it makes later calculations simpler. */ alignment = PAGE_CEILING(alignment); usize = PAGE_CEILING(size); /* * (usize < size) protects against very large sizes within * PAGE of SIZE_T_MAX. * * (usize + alignment < usize) protects against the * combination of maximal alignment and usize large enough * to cause overflow. This is similar to the first overflow * check above, but it needs to be repeated due to the new * usize value, which may now be *equal* to maximal * alignment, whereas before we only detected overflow if the * original size was *greater* than maximal alignment. */ if (usize < size || usize + alignment < usize) { /* size_t overflow. */ return (0); } /* * Calculate the size of the over-size run that arena_palloc() * would need to allocate in order to guarantee the alignment. * If the run wouldn't fit within a chunk, round up to a huge * allocation size. */ run_size = usize + alignment - PAGE; if (run_size <= arena_maxclass) return (PAGE_CEILING(usize)); return (CHUNK_CEILING(usize)); } } /* Choose an arena based on a per-thread value. */ JEMALLOC_INLINE arena_t * choose_arena(arena_t *arena) { arena_t *ret; if (arena != NULL) return (arena); if ((ret = *arenas_tsd_get()) == NULL) { ret = choose_arena_hard(); assert(ret != NULL); } return (ret); } #endif #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/rtree.h" +/* + * Include arena.h twice in order to resolve circular dependencies with + * tcache.h. + */ +#define JEMALLOC_ARENA_INLINE_A +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_INLINE_A #include "jemalloc/internal/tcache.h" +#define JEMALLOC_ARENA_INLINE_B #include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_INLINE_B #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #ifndef JEMALLOC_ENABLE_INLINE void *imalloc(size_t size); void *icalloc(size_t size); void *ipalloc(size_t usize, size_t alignment, bool zero); size_t isalloc(const void *ptr, bool demote); size_t ivsalloc(const void *ptr, bool demote); size_t u2rz(size_t usize); size_t p2rz(const void *ptr); void idalloc(void *ptr); void iqalloc(void *ptr); void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, bool no_move); malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) JEMALLOC_INLINE void * imalloc(size_t size) { assert(size != 0); if (size <= arena_maxclass) return (arena_malloc(NULL, size, false, true)); else return (huge_malloc(size, false)); } JEMALLOC_INLINE void * icalloc(size_t size) { if (size <= arena_maxclass) return (arena_malloc(NULL, size, true, true)); else return (huge_malloc(size, true)); } JEMALLOC_INLINE void * ipalloc(size_t usize, size_t alignment, bool zero) { void *ret; assert(usize != 0); assert(usize == sa2u(usize, alignment)); if (usize <= arena_maxclass && alignment <= PAGE) ret = arena_malloc(NULL, usize, zero, true); else { if (usize <= arena_maxclass) { ret = arena_palloc(choose_arena(NULL), usize, alignment, zero); } else if (alignment <= chunksize) ret = huge_malloc(usize, zero); else ret = huge_palloc(usize, alignment, zero); } assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); return (ret); } /* * Typical usage: * void *ptr = [...] * size_t sz = isalloc(ptr, config_prof); */ JEMALLOC_INLINE size_t isalloc(const void *ptr, bool demote) { size_t ret; arena_chunk_t *chunk; assert(ptr != NULL); /* Demotion only makes sense if config_prof is true. */ assert(config_prof || demote == false); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) ret = arena_salloc(ptr, demote); else ret = huge_salloc(ptr); return (ret); } JEMALLOC_INLINE size_t ivsalloc(const void *ptr, bool demote) { /* Return 0 if ptr is not within a chunk managed by jemalloc. */ if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) return (0); return (isalloc(ptr, demote)); } JEMALLOC_INLINE size_t u2rz(size_t usize) { size_t ret; if (usize <= SMALL_MAXCLASS) { size_t binind = SMALL_SIZE2BIN(usize); ret = arena_bin_info[binind].redzone_size; } else ret = 0; return (ret); } JEMALLOC_INLINE size_t p2rz(const void *ptr) { size_t usize = isalloc(ptr, false); return (u2rz(usize)); } JEMALLOC_INLINE void idalloc(void *ptr) { arena_chunk_t *chunk; assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) arena_dalloc(chunk->arena, chunk, ptr, true); else huge_dalloc(ptr, true); } JEMALLOC_INLINE void iqalloc(void *ptr) { if (config_fill && opt_quarantine) quarantine(ptr); else idalloc(ptr); } JEMALLOC_INLINE void * iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, bool no_move) { void *ret; size_t oldsize; assert(ptr != NULL); assert(size != 0); oldsize = isalloc(ptr, config_prof); if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) != 0) { size_t usize, copysize; /* * Existing object alignment is inadequate; allocate new space * and copy. */ if (no_move) return (NULL); usize = sa2u(size + extra, alignment); if (usize == 0) return (NULL); ret = ipalloc(usize, alignment, zero); if (ret == NULL) { if (extra == 0) return (NULL); /* Try again, without extra this time. */ usize = sa2u(size, alignment); if (usize == 0) return (NULL); ret = ipalloc(usize, alignment, zero); if (ret == NULL) return (NULL); } /* * Copy at most size bytes (not size+extra), since the caller * has no expectation that the extra bytes will be reliably * preserved. */ copysize = (size < oldsize) ? size : oldsize; memcpy(ret, ptr, copysize); iqalloc(ptr); return (ret); } if (no_move) { if (size <= arena_maxclass) { return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero)); } else { return (huge_ralloc_no_move(ptr, oldsize, size, extra)); } } else { if (size + extra <= arena_maxclass) { return (arena_ralloc(ptr, oldsize, size, extra, alignment, zero, true)); } else { return (huge_ralloc(ptr, oldsize, size, extra, alignment, zero)); } } } malloc_tsd_externs(thread_allocated, thread_allocated_t) malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t, THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) #endif #include "jemalloc/internal/prof.h" #undef JEMALLOC_H_INLINES /******************************************************************************/ #endif /* JEMALLOC_INTERNAL_H */ Index: projects/nand/contrib/jemalloc/include/jemalloc/internal/mutex.h =================================================================== --- projects/nand/contrib/jemalloc/include/jemalloc/internal/mutex.h (revision 235262) +++ projects/nand/contrib/jemalloc/include/jemalloc/internal/mutex.h (revision 235263) @@ -1,88 +1,96 @@ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct malloc_mutex_s malloc_mutex_t; -#ifdef JEMALLOC_OSSPIN -#define MALLOC_MUTEX_INITIALIZER {0} +#ifdef _WIN32 +# define MALLOC_MUTEX_INITIALIZER +#elif (defined(JEMALLOC_OSSPIN)) +# define MALLOC_MUTEX_INITIALIZER {0} #elif (defined(JEMALLOC_MUTEX_INIT_CB)) -#define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} +# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} #else # if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \ defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP # define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} # else # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT # define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} # endif #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct malloc_mutex_s { -#ifdef JEMALLOC_OSSPIN +#ifdef _WIN32 + CRITICAL_SECTION lock; +#elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) pthread_mutex_t lock; malloc_mutex_t *postponed_next; #else pthread_mutex_t lock; #endif }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; #endif bool malloc_mutex_init(malloc_mutex_t *mutex); void malloc_mutex_prefork(malloc_mutex_t *mutex); void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); void malloc_mutex_postfork_child(malloc_mutex_t *mutex); bool mutex_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void malloc_mutex_lock(malloc_mutex_t *mutex); void malloc_mutex_unlock(malloc_mutex_t *mutex); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) JEMALLOC_INLINE void malloc_mutex_lock(malloc_mutex_t *mutex) { if (isthreaded) { -#ifdef JEMALLOC_OSSPIN +#ifdef _WIN32 + EnterCriticalSection(&mutex->lock); +#elif (defined(JEMALLOC_OSSPIN)) OSSpinLockLock(&mutex->lock); #else pthread_mutex_lock(&mutex->lock); #endif } } JEMALLOC_INLINE void malloc_mutex_unlock(malloc_mutex_t *mutex) { if (isthreaded) { -#ifdef JEMALLOC_OSSPIN +#ifdef _WIN32 + LeaveCriticalSection(&mutex->lock); +#elif (defined(JEMALLOC_OSSPIN)) OSSpinLockUnlock(&mutex->lock); #else pthread_mutex_unlock(&mutex->lock); #endif } } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ Index: projects/nand/contrib/jemalloc/include/jemalloc/internal/private_namespace.h =================================================================== --- projects/nand/contrib/jemalloc/include/jemalloc/internal/private_namespace.h (revision 235262) +++ projects/nand/contrib/jemalloc/include/jemalloc/internal/private_namespace.h (revision 235263) @@ -1,316 +1,338 @@ #define a0calloc JEMALLOC_N(a0calloc) #define a0free JEMALLOC_N(a0free) #define a0malloc JEMALLOC_N(a0malloc) #define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small) #define arena_bin_index JEMALLOC_N(arena_bin_index) #define arena_bin_info JEMALLOC_N(arena_bin_info) #define arena_boot JEMALLOC_N(arena_boot) #define arena_dalloc JEMALLOC_N(arena_dalloc) #define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin) +#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked) #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) #define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) +#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked) +#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small) #define arena_malloc JEMALLOC_N(arena_malloc) #define arena_malloc_large JEMALLOC_N(arena_malloc_large) #define arena_malloc_small JEMALLOC_N(arena_malloc_small) +#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get) +#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get) +#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get) +#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get) +#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set) +#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get) +#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set) +#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get) +#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get) +#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set) +#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set) +#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get) +#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set) +#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get) +#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set) +#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get) +#define arena_mapp_get JEMALLOC_N(arena_mapp_get) #define arena_maxclass JEMALLOC_N(arena_maxclass) #define arena_new JEMALLOC_N(arena_new) #define arena_palloc JEMALLOC_N(arena_palloc) #define arena_postfork_child JEMALLOC_N(arena_postfork_child) #define arena_postfork_parent JEMALLOC_N(arena_postfork_parent) #define arena_prefork JEMALLOC_N(arena_prefork) #define arena_prof_accum JEMALLOC_N(arena_prof_accum) #define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get) #define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set) #define arena_prof_promoted JEMALLOC_N(arena_prof_promoted) +#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get) #define arena_purge_all JEMALLOC_N(arena_purge_all) #define arena_ralloc JEMALLOC_N(arena_ralloc) #define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) #define arena_run_regind JEMALLOC_N(arena_run_regind) #define arena_salloc JEMALLOC_N(arena_salloc) #define arena_stats_merge JEMALLOC_N(arena_stats_merge) #define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) #define arenas JEMALLOC_N(arenas) #define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index) #define arenas_booted JEMALLOC_N(arenas_booted) #define arenas_cleanup JEMALLOC_N(arenas_cleanup) #define arenas_extend JEMALLOC_N(arenas_extend) #define arenas_initialized JEMALLOC_N(arenas_initialized) #define arenas_lock JEMALLOC_N(arenas_lock) #define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index) #define arenas_tls JEMALLOC_N(arenas_tls) #define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot) #define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper) #define arenas_tsd_get JEMALLOC_N(arenas_tsd_get) #define arenas_tsd_set JEMALLOC_N(arenas_tsd_set) #define atomic_add_u JEMALLOC_N(atomic_add_u) #define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32) #define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64) #define atomic_add_z JEMALLOC_N(atomic_add_z) #define atomic_sub_u JEMALLOC_N(atomic_sub_u) #define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32) #define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64) #define atomic_sub_z JEMALLOC_N(atomic_sub_z) #define base_alloc JEMALLOC_N(base_alloc) #define base_boot JEMALLOC_N(base_boot) #define base_calloc JEMALLOC_N(base_calloc) #define base_node_alloc JEMALLOC_N(base_node_alloc) #define base_node_dealloc JEMALLOC_N(base_node_dealloc) #define base_postfork_child JEMALLOC_N(base_postfork_child) #define base_postfork_parent JEMALLOC_N(base_postfork_parent) #define base_prefork JEMALLOC_N(base_prefork) #define bitmap_full JEMALLOC_N(bitmap_full) #define bitmap_get JEMALLOC_N(bitmap_get) #define bitmap_info_init JEMALLOC_N(bitmap_info_init) #define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups) #define bitmap_init JEMALLOC_N(bitmap_init) #define bitmap_set JEMALLOC_N(bitmap_set) #define bitmap_sfu JEMALLOC_N(bitmap_sfu) #define bitmap_size JEMALLOC_N(bitmap_size) #define bitmap_unset JEMALLOC_N(bitmap_unset) #define bt_init JEMALLOC_N(bt_init) #define buferror JEMALLOC_N(buferror) #define choose_arena JEMALLOC_N(choose_arena) #define choose_arena_hard JEMALLOC_N(choose_arena_hard) #define chunk_alloc JEMALLOC_N(chunk_alloc) #define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) #define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) #define chunk_boot JEMALLOC_N(chunk_boot) #define chunk_dealloc JEMALLOC_N(chunk_dealloc) #define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap) #define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) #define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child) #define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent) #define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork) #define chunk_in_dss JEMALLOC_N(chunk_in_dss) #define chunk_npages JEMALLOC_N(chunk_npages) #define chunks_mtx JEMALLOC_N(chunks_mtx) #define chunks_rtree JEMALLOC_N(chunks_rtree) #define chunksize JEMALLOC_N(chunksize) #define chunksize_mask JEMALLOC_N(chunksize_mask) #define ckh_bucket_search JEMALLOC_N(ckh_bucket_search) #define ckh_count JEMALLOC_N(ckh_count) #define ckh_delete JEMALLOC_N(ckh_delete) #define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert) #define ckh_insert JEMALLOC_N(ckh_insert) #define ckh_isearch JEMALLOC_N(ckh_isearch) #define ckh_iter JEMALLOC_N(ckh_iter) #define ckh_new JEMALLOC_N(ckh_new) #define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash) #define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp) #define ckh_rebuild JEMALLOC_N(ckh_rebuild) #define ckh_remove JEMALLOC_N(ckh_remove) #define ckh_search JEMALLOC_N(ckh_search) #define ckh_string_hash JEMALLOC_N(ckh_string_hash) #define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp) #define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert) #define ckh_try_insert JEMALLOC_N(ckh_try_insert) #define ctl_boot JEMALLOC_N(ctl_boot) #define ctl_bymib JEMALLOC_N(ctl_bymib) #define ctl_byname JEMALLOC_N(ctl_byname) #define ctl_nametomib JEMALLOC_N(ctl_nametomib) #define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first) #define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert) #define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter) #define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse) #define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start) #define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last) #define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new) #define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next) #define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch) #define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev) #define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch) #define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove) #define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter) #define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse) #define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start) #define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search) #define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first) #define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert) #define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter) #define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse) #define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start) #define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last) #define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new) #define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next) #define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch) #define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev) #define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch) #define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove) #define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter) #define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse) #define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start) #define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search) #define hash JEMALLOC_N(hash) #define huge_allocated JEMALLOC_N(huge_allocated) #define huge_boot JEMALLOC_N(huge_boot) #define huge_dalloc JEMALLOC_N(huge_dalloc) #define huge_malloc JEMALLOC_N(huge_malloc) #define huge_mtx JEMALLOC_N(huge_mtx) #define huge_ndalloc JEMALLOC_N(huge_ndalloc) #define huge_nmalloc JEMALLOC_N(huge_nmalloc) #define huge_palloc JEMALLOC_N(huge_palloc) #define huge_postfork_child JEMALLOC_N(huge_postfork_child) #define huge_postfork_parent JEMALLOC_N(huge_postfork_parent) #define huge_prefork JEMALLOC_N(huge_prefork) #define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get) #define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set) #define huge_ralloc JEMALLOC_N(huge_ralloc) #define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move) #define huge_salloc JEMALLOC_N(huge_salloc) #define iallocm JEMALLOC_N(iallocm) #define icalloc JEMALLOC_N(icalloc) #define idalloc JEMALLOC_N(idalloc) #define imalloc JEMALLOC_N(imalloc) #define ipalloc JEMALLOC_N(ipalloc) #define iqalloc JEMALLOC_N(iqalloc) #define iralloc JEMALLOC_N(iralloc) #define isalloc JEMALLOC_N(isalloc) #define ivsalloc JEMALLOC_N(ivsalloc) #define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) #define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent) #define jemalloc_prefork JEMALLOC_N(jemalloc_prefork) #define malloc_cprintf JEMALLOC_N(malloc_cprintf) #define malloc_mutex_init JEMALLOC_N(malloc_mutex_init) #define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock) #define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child) #define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent) #define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork) #define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock) #define malloc_printf JEMALLOC_N(malloc_printf) #define malloc_snprintf JEMALLOC_N(malloc_snprintf) #define malloc_strtoumax JEMALLOC_N(malloc_strtoumax) #define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot) #define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register) #define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc) #define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc) #define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup) #define malloc_vcprintf JEMALLOC_N(malloc_vcprintf) #define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf) #define malloc_write JEMALLOC_N(malloc_write) #define map_bias JEMALLOC_N(map_bias) #define mb_write JEMALLOC_N(mb_write) #define mutex_boot JEMALLOC_N(mutex_boot) #define narenas JEMALLOC_N(narenas) #define ncpus JEMALLOC_N(ncpus) #define nhbins JEMALLOC_N(nhbins) #define opt_abort JEMALLOC_N(opt_abort) #define opt_junk JEMALLOC_N(opt_junk) #define opt_lg_chunk JEMALLOC_N(opt_lg_chunk) #define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult) #define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval) #define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample) #define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max) #define opt_narenas JEMALLOC_N(opt_narenas) #define opt_prof JEMALLOC_N(opt_prof) #define opt_prof_accum JEMALLOC_N(opt_prof_accum) #define opt_prof_active JEMALLOC_N(opt_prof_active) #define opt_prof_final JEMALLOC_N(opt_prof_final) #define opt_prof_gdump JEMALLOC_N(opt_prof_gdump) #define opt_prof_leak JEMALLOC_N(opt_prof_leak) #define opt_prof_prefix JEMALLOC_N(opt_prof_prefix) #define opt_quarantine JEMALLOC_N(opt_quarantine) #define opt_redzone JEMALLOC_N(opt_redzone) #define opt_stats_print JEMALLOC_N(opt_stats_print) #define opt_tcache JEMALLOC_N(opt_tcache) #define opt_utrace JEMALLOC_N(opt_utrace) #define opt_valgrind JEMALLOC_N(opt_valgrind) #define opt_xmalloc JEMALLOC_N(opt_xmalloc) #define opt_zero JEMALLOC_N(opt_zero) #define p2rz JEMALLOC_N(p2rz) #define pages_purge JEMALLOC_N(pages_purge) #define pow2_ceil JEMALLOC_N(pow2_ceil) #define prof_backtrace JEMALLOC_N(prof_backtrace) #define prof_boot0 JEMALLOC_N(prof_boot0) #define prof_boot1 JEMALLOC_N(prof_boot1) #define prof_boot2 JEMALLOC_N(prof_boot2) #define prof_ctx_get JEMALLOC_N(prof_ctx_get) #define prof_ctx_set JEMALLOC_N(prof_ctx_set) #define prof_free JEMALLOC_N(prof_free) #define prof_gdump JEMALLOC_N(prof_gdump) #define prof_idump JEMALLOC_N(prof_idump) #define prof_interval JEMALLOC_N(prof_interval) #define prof_lookup JEMALLOC_N(prof_lookup) #define prof_malloc JEMALLOC_N(prof_malloc) #define prof_mdump JEMALLOC_N(prof_mdump) -#define prof_lookup JEMALLOC_N(prof_lookup) #define prof_promote JEMALLOC_N(prof_promote) #define prof_realloc JEMALLOC_N(prof_realloc) #define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update) #define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update) #define prof_tdata_booted JEMALLOC_N(prof_tdata_booted) #define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup) +#define prof_tdata_get JEMALLOC_N(prof_tdata_get) #define prof_tdata_init JEMALLOC_N(prof_tdata_init) #define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized) #define prof_tdata_tls JEMALLOC_N(prof_tdata_tls) #define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot) #define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper) #define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get) #define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set) #define quarantine JEMALLOC_N(quarantine) #define quarantine_boot JEMALLOC_N(quarantine_boot) #define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot) #define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper) #define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get) #define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set) #define register_zone JEMALLOC_N(register_zone) #define rtree_get JEMALLOC_N(rtree_get) #define rtree_get_locked JEMALLOC_N(rtree_get_locked) #define rtree_new JEMALLOC_N(rtree_new) #define rtree_set JEMALLOC_N(rtree_set) #define s2u JEMALLOC_N(s2u) #define sa2u JEMALLOC_N(sa2u) #define stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index) #define stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index) #define stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index) #define stats_cactive JEMALLOC_N(stats_cactive) #define stats_cactive_add JEMALLOC_N(stats_cactive_add) #define stats_cactive_get JEMALLOC_N(stats_cactive_get) #define stats_cactive_sub JEMALLOC_N(stats_cactive_sub) #define stats_chunks JEMALLOC_N(stats_chunks) #define stats_print JEMALLOC_N(stats_print) #define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy) #define tcache_alloc_large JEMALLOC_N(tcache_alloc_large) #define tcache_alloc_small JEMALLOC_N(tcache_alloc_small) #define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard) #define tcache_arena_associate JEMALLOC_N(tcache_arena_associate) #define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate) #define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large) #define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small) #define tcache_bin_info JEMALLOC_N(tcache_bin_info) #define tcache_boot0 JEMALLOC_N(tcache_boot0) #define tcache_boot1 JEMALLOC_N(tcache_boot1) #define tcache_booted JEMALLOC_N(tcache_booted) #define tcache_create JEMALLOC_N(tcache_create) #define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large) #define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small) #define tcache_destroy JEMALLOC_N(tcache_destroy) #define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted) #define tcache_enabled_get JEMALLOC_N(tcache_enabled_get) #define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized) #define tcache_enabled_set JEMALLOC_N(tcache_enabled_set) #define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls) #define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot) #define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper) #define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get) #define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set) #define tcache_event JEMALLOC_N(tcache_event) -#define tcache_initialized JEMALLOC_N(tcache_initialized) +#define tcache_event_hard JEMALLOC_N(tcache_event_hard) #define tcache_flush JEMALLOC_N(tcache_flush) #define tcache_get JEMALLOC_N(tcache_get) +#define tcache_initialized JEMALLOC_N(tcache_initialized) #define tcache_maxclass JEMALLOC_N(tcache_maxclass) -#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) #define tcache_salloc JEMALLOC_N(tcache_salloc) +#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) #define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup) #define tcache_tls JEMALLOC_N(tcache_tls) #define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot) #define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper) #define tcache_tsd_get JEMALLOC_N(tcache_tsd_get) #define tcache_tsd_set JEMALLOC_N(tcache_tsd_set) #define thread_allocated_booted JEMALLOC_N(thread_allocated_booted) #define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized) #define thread_allocated_tls JEMALLOC_N(thread_allocated_tls) #define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot) #define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper) #define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get) #define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set) #define u2rz JEMALLOC_N(u2rz) Index: projects/nand/contrib/jemalloc/include/jemalloc/internal/prof.h =================================================================== --- projects/nand/contrib/jemalloc/include/jemalloc/internal/prof.h (revision 235262) +++ projects/nand/contrib/jemalloc/include/jemalloc/internal/prof.h (revision 235263) @@ -1,536 +1,578 @@ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct prof_bt_s prof_bt_t; typedef struct prof_cnt_s prof_cnt_t; typedef struct prof_thr_cnt_s prof_thr_cnt_t; typedef struct prof_ctx_s prof_ctx_t; typedef struct prof_tdata_s prof_tdata_t; /* Option defaults. */ #define PROF_PREFIX_DEFAULT "jeprof" #define LG_PROF_SAMPLE_DEFAULT 19 #define LG_PROF_INTERVAL_DEFAULT -1 /* * Hard limit on stack backtrace depth. The version of prof_backtrace() that * is based on __builtin_return_address() necessarily has a hard-coded number * of backtrace frame handlers, and should be kept in sync with this setting. */ #define PROF_BT_MAX 128 /* Maximum number of backtraces to store in each per thread LRU cache. */ #define PROF_TCMAX 1024 /* Initial hash table size. */ #define PROF_CKH_MINITEMS 64 /* Size of memory buffer to use when writing dump files. */ #define PROF_DUMP_BUFSIZE 65536 /* Size of stack-allocated buffer used by prof_printf(). */ #define PROF_PRINTF_BUFSIZE 128 /* * Number of mutexes shared among all ctx's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ #define PROF_NCTX_LOCKS 1024 +/* + * prof_tdata pointers close to NULL are used to encode state information that + * is used for cleaning up during thread shutdown. + */ +#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) +#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) +#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY + #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct prof_bt_s { /* Backtrace, stored as len program counters. */ void **vec; unsigned len; }; #ifdef JEMALLOC_PROF_LIBGCC /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ typedef struct { prof_bt_t *bt; unsigned nignore; unsigned max; } prof_unwind_data_t; #endif struct prof_cnt_s { /* * Profiling counters. An allocation/deallocation pair can operate on * different prof_thr_cnt_t objects that are linked into the same * prof_ctx_t cnts_ql, so it is possible for the cur* counters to go * negative. In principle it is possible for the *bytes counters to * overflow/underflow, but a general solution would require something * like 128-bit counters; this implementation doesn't bother to solve * that problem. */ int64_t curobjs; int64_t curbytes; uint64_t accumobjs; uint64_t accumbytes; }; struct prof_thr_cnt_s { /* Linkage into prof_ctx_t's cnts_ql. */ ql_elm(prof_thr_cnt_t) cnts_link; /* Linkage into thread's LRU. */ ql_elm(prof_thr_cnt_t) lru_link; /* * Associated context. If a thread frees an object that it did not * allocate, it is possible that the context is not cached in the * thread's hash table, in which case it must be able to look up the * context, insert a new prof_thr_cnt_t into the thread's hash table, * and link it into the prof_ctx_t's cnts_ql. */ prof_ctx_t *ctx; /* * Threads use memory barriers to update the counters. Since there is * only ever one writer, the only challenge is for the reader to get a * consistent read of the counters. * * The writer uses this series of operations: * * 1) Increment epoch to an odd number. * 2) Update counters. * 3) Increment epoch to an even number. * * The reader must assure 1) that the epoch is even while it reads the * counters, and 2) that the epoch doesn't change between the time it * starts and finishes reading the counters. */ unsigned epoch; /* Profiling counters. */ prof_cnt_t cnts; }; struct prof_ctx_s { /* Associated backtrace. */ prof_bt_t *bt; - /* Protects cnt_merged and cnts_ql. */ + /* Protects nlimbo, cnt_merged, and cnts_ql. */ malloc_mutex_t *lock; + /* + * Number of threads that currently cause this ctx to be in a state of + * limbo due to one of: + * - Initializing per thread counters associated with this ctx. + * - Preparing to destroy this ctx. + * nlimbo must be 1 (single destroyer) in order to safely destroy the + * ctx. + */ + unsigned nlimbo; + /* Temporary storage for summation during dump. */ prof_cnt_t cnt_summed; /* When threads exit, they merge their stats into cnt_merged. */ prof_cnt_t cnt_merged; /* * List of profile counters, one for each thread that has allocated in * this context. */ ql_head(prof_thr_cnt_t) cnts_ql; }; struct prof_tdata_s { /* * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a * cache of backtraces, with associated thread-specific prof_thr_cnt_t * objects. Other threads may read the prof_thr_cnt_t contents, but no * others will ever write them. * * Upon thread exit, the thread must merge all the prof_thr_cnt_t * counter data into the associated prof_ctx_t objects, and unlink/free * the prof_thr_cnt_t objects. */ ckh_t bt2cnt; /* LRU for contents of bt2cnt. */ ql_head(prof_thr_cnt_t) lru_ql; /* Backtrace vector, used for calls to prof_backtrace(). */ void **vec; /* Sampling state. */ uint64_t prng_state; uint64_t threshold; uint64_t accum; + + /* State used to avoid dumping while operating on prof internals. */ + bool enq; + bool enq_idump; + bool enq_gdump; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_prof; /* * Even if opt_prof is true, sampling can be temporarily disabled by setting * opt_prof_active to false. No locking is used when updating opt_prof_active, * so there are no guarantees regarding how long it will take for all threads * to notice state changes. */ extern bool opt_prof_active; extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ extern bool opt_prof_gdump; /* High-water memory dumping. */ extern bool opt_prof_final; /* Final profile dumping. */ extern bool opt_prof_leak; /* Dump leak summary at exit. */ extern bool opt_prof_accum; /* Report cumulative bytes. */ extern char opt_prof_prefix[PATH_MAX + 1]; /* * Profile dump interval, measured in bytes allocated. Each arena triggers a * profile dump when it reaches this threshold. The effect is that the * interval between profile dumps averages prof_interval, though the actual * interval between dumps will tend to be sporadic, and the interval will be a * maximum of approximately (prof_interval * narenas). */ extern uint64_t prof_interval; /* * If true, promote small sampled objects to large objects, since small run * headers do not have embedded profile context pointers. */ extern bool prof_promote; void bt_init(prof_bt_t *bt, void **vec); void prof_backtrace(prof_bt_t *bt, unsigned nignore); prof_thr_cnt_t *prof_lookup(prof_bt_t *bt); void prof_idump(void); bool prof_mdump(const char *filename); void prof_gdump(void); prof_tdata_t *prof_tdata_init(void); void prof_tdata_cleanup(void *arg); void prof_boot0(void); void prof_boot1(void); bool prof_boot2(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #define PROF_ALLOC_PREP(nignore, size, ret) do { \ prof_tdata_t *prof_tdata; \ prof_bt_t bt; \ \ assert(size == s2u(size)); \ \ - prof_tdata = *prof_tdata_tsd_get(); \ - if (prof_tdata == NULL) { \ - prof_tdata = prof_tdata_init(); \ - if (prof_tdata == NULL) { \ + prof_tdata = prof_tdata_get(); \ + if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \ + if (prof_tdata != NULL) \ + ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ + else \ ret = NULL; \ - break; \ - } \ + break; \ } \ \ if (opt_prof_active == false) { \ /* Sampling is currently inactive, so avoid sampling. */\ ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ } else if (opt_lg_prof_sample == 0) { \ /* Don't bother with sampling logic, since sampling */\ /* interval is 1. */\ bt_init(&bt, prof_tdata->vec); \ prof_backtrace(&bt, nignore); \ ret = prof_lookup(&bt); \ } else { \ if (prof_tdata->threshold == 0) { \ /* Initialize. Seed the prng differently for */\ /* each thread. */\ prof_tdata->prng_state = \ (uint64_t)(uintptr_t)&size; \ prof_sample_threshold_update(prof_tdata); \ } \ \ /* Determine whether to capture a backtrace based on */\ /* whether size is enough for prof_accum to reach */\ /* prof_tdata->threshold. However, delay updating */\ /* these variables until prof_{m,re}alloc(), because */\ /* we don't know for sure that the allocation will */\ /* succeed. */\ /* */\ /* Use subtraction rather than addition to avoid */\ /* potential integer overflow. */\ if (size >= prof_tdata->threshold - \ prof_tdata->accum) { \ bt_init(&bt, prof_tdata->vec); \ prof_backtrace(&bt, nignore); \ ret = prof_lookup(&bt); \ } else \ ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ } \ } while (0) #ifndef JEMALLOC_ENABLE_INLINE malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *) +prof_tdata_t *prof_tdata_get(void); void prof_sample_threshold_update(prof_tdata_t *prof_tdata); prof_ctx_t *prof_ctx_get(const void *ptr); void prof_ctx_set(const void *ptr, prof_ctx_t *ctx); bool prof_sample_accum_update(size_t size); void prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt); void prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, size_t old_size, prof_ctx_t *old_ctx); void prof_free(const void *ptr, size_t size); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) /* Thread-specific backtrace cache, used to reduce bt2ctx contention. */ malloc_tsd_externs(prof_tdata, prof_tdata_t *) malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL, prof_tdata_cleanup) +JEMALLOC_INLINE prof_tdata_t * +prof_tdata_get(void) +{ + prof_tdata_t *prof_tdata; + + cassert(config_prof); + + prof_tdata = *prof_tdata_tsd_get(); + if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { + if (prof_tdata == NULL) + prof_tdata = prof_tdata_init(); + } + + return (prof_tdata); +} + JEMALLOC_INLINE void prof_sample_threshold_update(prof_tdata_t *prof_tdata) { uint64_t r; double u; cassert(config_prof); /* * Compute sample threshold as a geometrically distributed random * variable with mean (2^opt_lg_prof_sample). * * __ __ * | log(u) | 1 * prof_tdata->threshold = | -------- |, where p = ------------------- * | log(1-p) | opt_lg_prof_sample * 2 * * For more information on the math, see: * * Non-Uniform Random Variate Generation * Luc Devroye * Springer-Verlag, New York, 1986 * pp 500 * (http://cg.scs.carleton.ca/~luc/rnbookindex.html) */ prng64(r, 53, prof_tdata->prng_state, UINT64_C(6364136223846793005), UINT64_C(1442695040888963407)); u = (double)r * (1.0/9007199254740992.0L); prof_tdata->threshold = (uint64_t)(log(u) / log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample)))) + (uint64_t)1U; } JEMALLOC_INLINE prof_ctx_t * prof_ctx_get(const void *ptr) { prof_ctx_t *ret; arena_chunk_t *chunk; cassert(config_prof); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) { /* Region. */ ret = arena_prof_ctx_get(ptr); } else ret = huge_prof_ctx_get(ptr); return (ret); } JEMALLOC_INLINE void prof_ctx_set(const void *ptr, prof_ctx_t *ctx) { arena_chunk_t *chunk; cassert(config_prof); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) { /* Region. */ arena_prof_ctx_set(ptr, ctx); } else huge_prof_ctx_set(ptr, ctx); } JEMALLOC_INLINE bool prof_sample_accum_update(size_t size) { prof_tdata_t *prof_tdata; cassert(config_prof); /* Sampling logic is unnecessary if the interval is 1. */ assert(opt_lg_prof_sample != 0); prof_tdata = *prof_tdata_tsd_get(); - assert(prof_tdata != NULL); + if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) + return (true); /* Take care to avoid integer overflow. */ if (size >= prof_tdata->threshold - prof_tdata->accum) { prof_tdata->accum -= (prof_tdata->threshold - size); /* Compute new sample threshold. */ prof_sample_threshold_update(prof_tdata); while (prof_tdata->accum >= prof_tdata->threshold) { prof_tdata->accum -= prof_tdata->threshold; prof_sample_threshold_update(prof_tdata); } return (false); } else { prof_tdata->accum += size; return (true); } } JEMALLOC_INLINE void prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt) { cassert(config_prof); assert(ptr != NULL); assert(size == isalloc(ptr, true)); if (opt_lg_prof_sample != 0) { if (prof_sample_accum_update(size)) { /* * Don't sample. For malloc()-like allocation, it is * always possible to tell in advance how large an * object's usable size will be, so there should never * be a difference between the size passed to * PROF_ALLOC_PREP() and prof_malloc(). */ assert((uintptr_t)cnt == (uintptr_t)1U); } } if ((uintptr_t)cnt > (uintptr_t)1U) { prof_ctx_set(ptr, cnt->ctx); cnt->epoch++; /*********/ mb_write(); /*********/ cnt->cnts.curobjs++; cnt->cnts.curbytes += size; if (opt_prof_accum) { cnt->cnts.accumobjs++; cnt->cnts.accumbytes += size; } /*********/ mb_write(); /*********/ cnt->epoch++; /*********/ mb_write(); /*********/ } else prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); } JEMALLOC_INLINE void prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, size_t old_size, prof_ctx_t *old_ctx) { prof_thr_cnt_t *told_cnt; cassert(config_prof); assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U); if (ptr != NULL) { assert(size == isalloc(ptr, true)); if (opt_lg_prof_sample != 0) { if (prof_sample_accum_update(size)) { /* * Don't sample. The size passed to * PROF_ALLOC_PREP() was larger than what * actually got allocated, so a backtrace was * captured for this allocation, even though * its actual size was insufficient to cross * the sample threshold. */ cnt = (prof_thr_cnt_t *)(uintptr_t)1U; } } } if ((uintptr_t)old_ctx > (uintptr_t)1U) { told_cnt = prof_lookup(old_ctx->bt); if (told_cnt == NULL) { /* * It's too late to propagate OOM for this realloc(), * so operate directly on old_cnt->ctx->cnt_merged. */ malloc_mutex_lock(old_ctx->lock); old_ctx->cnt_merged.curobjs--; old_ctx->cnt_merged.curbytes -= old_size; malloc_mutex_unlock(old_ctx->lock); told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; } } else told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; if ((uintptr_t)told_cnt > (uintptr_t)1U) told_cnt->epoch++; if ((uintptr_t)cnt > (uintptr_t)1U) { prof_ctx_set(ptr, cnt->ctx); cnt->epoch++; } else prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); /*********/ mb_write(); /*********/ if ((uintptr_t)told_cnt > (uintptr_t)1U) { told_cnt->cnts.curobjs--; told_cnt->cnts.curbytes -= old_size; } if ((uintptr_t)cnt > (uintptr_t)1U) { cnt->cnts.curobjs++; cnt->cnts.curbytes += size; if (opt_prof_accum) { cnt->cnts.accumobjs++; cnt->cnts.accumbytes += size; } } /*********/ mb_write(); /*********/ if ((uintptr_t)told_cnt > (uintptr_t)1U) told_cnt->epoch++; if ((uintptr_t)cnt > (uintptr_t)1U) cnt->epoch++; /*********/ mb_write(); /* Not strictly necessary. */ } JEMALLOC_INLINE void prof_free(const void *ptr, size_t size) { prof_ctx_t *ctx = prof_ctx_get(ptr); cassert(config_prof); if ((uintptr_t)ctx > (uintptr_t)1) { + prof_thr_cnt_t *tcnt; assert(size == isalloc(ptr, true)); - prof_thr_cnt_t *tcnt = prof_lookup(ctx->bt); + tcnt = prof_lookup(ctx->bt); if (tcnt != NULL) { tcnt->epoch++; /*********/ mb_write(); /*********/ tcnt->cnts.curobjs--; tcnt->cnts.curbytes -= size; /*********/ mb_write(); /*********/ tcnt->epoch++; /*********/ mb_write(); /*********/ } else { /* * OOM during free() cannot be propagated, so operate * directly on cnt->ctx->cnt_merged. */ malloc_mutex_lock(ctx->lock); ctx->cnt_merged.curobjs--; ctx->cnt_merged.curbytes -= size; malloc_mutex_unlock(ctx->lock); } } } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ Index: projects/nand/contrib/jemalloc/include/jemalloc/internal/tcache.h =================================================================== --- projects/nand/contrib/jemalloc/include/jemalloc/internal/tcache.h (revision 235262) +++ projects/nand/contrib/jemalloc/include/jemalloc/internal/tcache.h (revision 235263) @@ -1,495 +1,440 @@ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct tcache_bin_info_s tcache_bin_info_t; typedef struct tcache_bin_s tcache_bin_t; typedef struct tcache_s tcache_t; /* * tcache pointers close to NULL are used to encode state information that is * used for two purposes: preventing thread caching on a per thread basis and * cleaning up during thread shutdown. */ #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY /* * Absolute maximum number of cache slots for each small bin in the thread * cache. This is an additional constraint beyond that imposed as: twice the * number of regions per run for this size class. * * This constant must be an even number. */ #define TCACHE_NSLOTS_SMALL_MAX 200 /* Number of cache slots for large size classes. */ #define TCACHE_NSLOTS_LARGE 20 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ #define LG_TCACHE_MAXCLASS_DEFAULT 15 /* * TCACHE_GC_SWEEP is the approximate number of allocation events between * full GC sweeps. Integer rounding may cause the actual number to be * slightly higher, since GC is performed incrementally. */ #define TCACHE_GC_SWEEP 8192 /* Number of tcache allocation/deallocation events between incremental GCs. */ #define TCACHE_GC_INCR \ ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS typedef enum { tcache_enabled_false = 0, /* Enable cast to/from bool. */ tcache_enabled_true = 1, tcache_enabled_default = 2 } tcache_enabled_t; /* * Read-only information associated with each element of tcache_t's tbins array * is stored separately, mainly to reduce memory usage. */ struct tcache_bin_info_s { unsigned ncached_max; /* Upper limit on ncached. */ }; struct tcache_bin_s { tcache_bin_stats_t tstats; int low_water; /* Min # cached since last GC. */ unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ unsigned ncached; /* # of cached objects. */ void **avail; /* Stack of available objects. */ }; struct tcache_s { ql_elm(tcache_t) link; /* Used for aggregating stats. */ uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */ arena_t *arena; /* This thread's arena. */ unsigned ev_cnt; /* Event count since incremental GC. */ unsigned next_gc_bin; /* Next bin to GC. */ tcache_bin_t tbins[1]; /* Dynamically sized. */ /* * The pointer stacks associated with tbins follow as a contiguous * array. During tcache initialization, the avail pointer in each * element of tbins is initialized to point to the proper offset within * this array. */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_tcache; extern ssize_t opt_lg_tcache_max; extern tcache_bin_info_t *tcache_bin_info; /* * Number of tcache bins. There are NBINS small-object bins, plus 0 or more * large-object bins. */ extern size_t nhbins; /* Maximum cached size class. */ extern size_t tcache_maxclass; size_t tcache_salloc(const void *ptr); +void tcache_event_hard(tcache_t *tcache); void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind); void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, tcache_t *tcache); void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, tcache_t *tcache); void tcache_arena_associate(tcache_t *tcache, arena_t *arena); void tcache_arena_dissociate(tcache_t *tcache); tcache_t *tcache_create(arena_t *arena); void tcache_destroy(tcache_t *tcache); void tcache_thread_cleanup(void *arg); void tcache_stats_merge(tcache_t *tcache, arena_t *arena); bool tcache_boot0(void); bool tcache_boot1(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *) malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t) void tcache_event(tcache_t *tcache); void tcache_flush(void); bool tcache_enabled_get(void); tcache_t *tcache_get(bool create); void tcache_enabled_set(bool enabled); void *tcache_alloc_easy(tcache_bin_t *tbin); void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero); void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero); -void tcache_dalloc_small(tcache_t *tcache, void *ptr); +void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind); void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) /* Map of thread-specific caches. */ malloc_tsd_externs(tcache, tcache_t *) malloc_tsd_funcs(JEMALLOC_INLINE, tcache, tcache_t *, NULL, tcache_thread_cleanup) /* Per thread flag that allows thread caches to be disabled. */ malloc_tsd_externs(tcache_enabled, tcache_enabled_t) malloc_tsd_funcs(JEMALLOC_INLINE, tcache_enabled, tcache_enabled_t, tcache_enabled_default, malloc_tsd_no_cleanup) JEMALLOC_INLINE void tcache_flush(void) { tcache_t *tcache; cassert(config_tcache); tcache = *tcache_tsd_get(); if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) return; tcache_destroy(tcache); tcache = NULL; tcache_tsd_set(&tcache); } JEMALLOC_INLINE bool tcache_enabled_get(void) { tcache_enabled_t tcache_enabled; cassert(config_tcache); tcache_enabled = *tcache_enabled_tsd_get(); if (tcache_enabled == tcache_enabled_default) { tcache_enabled = (tcache_enabled_t)opt_tcache; tcache_enabled_tsd_set(&tcache_enabled); } return ((bool)tcache_enabled); } JEMALLOC_INLINE void tcache_enabled_set(bool enabled) { tcache_enabled_t tcache_enabled; tcache_t *tcache; cassert(config_tcache); tcache_enabled = (tcache_enabled_t)enabled; tcache_enabled_tsd_set(&tcache_enabled); tcache = *tcache_tsd_get(); if (enabled) { if (tcache == TCACHE_STATE_DISABLED) { tcache = NULL; tcache_tsd_set(&tcache); } } else /* disabled */ { if (tcache > TCACHE_STATE_MAX) { tcache_destroy(tcache); tcache = NULL; } if (tcache == NULL) { tcache = TCACHE_STATE_DISABLED; tcache_tsd_set(&tcache); } } } JEMALLOC_INLINE tcache_t * tcache_get(bool create) { tcache_t *tcache; if (config_tcache == false) return (NULL); if (config_lazy_lock && isthreaded == false) return (NULL); tcache = *tcache_tsd_get(); if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) { if (tcache == TCACHE_STATE_DISABLED) return (NULL); if (tcache == NULL) { if (create == false) { /* * Creating a tcache here would cause * allocation as a side effect of free(). * Ordinarily that would be okay since * tcache_create() failure is a soft failure * that doesn't propagate. However, if TLS * data are freed via free() as in glibc, * subtle corruption could result from setting * a TLS variable after its backing memory is * freed. */ return (NULL); } if (tcache_enabled_get() == false) { tcache_enabled_set(false); /* Memoize. */ return (NULL); } return (tcache_create(choose_arena(NULL))); } if (tcache == TCACHE_STATE_PURGATORY) { /* * Make a note that an allocator function was called * after tcache_thread_cleanup() was called. */ tcache = TCACHE_STATE_REINCARNATED; tcache_tsd_set(&tcache); return (NULL); } if (tcache == TCACHE_STATE_REINCARNATED) return (NULL); not_reached(); } return (tcache); } JEMALLOC_INLINE void tcache_event(tcache_t *tcache) { if (TCACHE_GC_INCR == 0) return; tcache->ev_cnt++; assert(tcache->ev_cnt <= TCACHE_GC_INCR); - if (tcache->ev_cnt == TCACHE_GC_INCR) { - size_t binind = tcache->next_gc_bin; - tcache_bin_t *tbin = &tcache->tbins[binind]; - tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; - - if (tbin->low_water > 0) { - /* - * Flush (ceiling) 3/4 of the objects below the low - * water mark. - */ - if (binind < NBINS) { - tcache_bin_flush_small(tbin, binind, - tbin->ncached - tbin->low_water + - (tbin->low_water >> 2), tcache); - } else { - tcache_bin_flush_large(tbin, binind, - tbin->ncached - tbin->low_water + - (tbin->low_water >> 2), tcache); - } - /* - * Reduce fill count by 2X. Limit lg_fill_div such that - * the fill count is always at least 1. - */ - if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) - >= 1) - tbin->lg_fill_div++; - } else if (tbin->low_water < 0) { - /* - * Increase fill count by 2X. Make sure lg_fill_div - * stays greater than 0. - */ - if (tbin->lg_fill_div > 1) - tbin->lg_fill_div--; - } - tbin->low_water = tbin->ncached; - - tcache->next_gc_bin++; - if (tcache->next_gc_bin == nhbins) - tcache->next_gc_bin = 0; - tcache->ev_cnt = 0; - } + if (tcache->ev_cnt == TCACHE_GC_INCR) + tcache_event_hard(tcache); } JEMALLOC_INLINE void * tcache_alloc_easy(tcache_bin_t *tbin) { void *ret; if (tbin->ncached == 0) { tbin->low_water = -1; return (NULL); } tbin->ncached--; if ((int)tbin->ncached < tbin->low_water) tbin->low_water = tbin->ncached; ret = tbin->avail[tbin->ncached]; return (ret); } JEMALLOC_INLINE void * tcache_alloc_small(tcache_t *tcache, size_t size, bool zero) { void *ret; size_t binind; tcache_bin_t *tbin; binind = SMALL_SIZE2BIN(size); assert(binind < NBINS); tbin = &tcache->tbins[binind]; ret = tcache_alloc_easy(tbin); if (ret == NULL) { ret = tcache_alloc_small_hard(tcache, tbin, binind); if (ret == NULL) return (NULL); } assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size); if (zero == false) { if (config_fill) { if (opt_junk) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); } else if (opt_zero) memset(ret, 0, size); } } else { if (config_fill && opt_junk) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); } if (config_stats) tbin->tstats.nrequests++; if (config_prof) tcache->prof_accumbytes += arena_bin_info[binind].reg_size; tcache_event(tcache); return (ret); } JEMALLOC_INLINE void * tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) { void *ret; size_t binind; tcache_bin_t *tbin; size = PAGE_CEILING(size); assert(size <= tcache_maxclass); binind = NBINS + (size >> LG_PAGE) - 1; assert(binind < nhbins); tbin = &tcache->tbins[binind]; ret = tcache_alloc_easy(tbin); if (ret == NULL) { /* * Only allocate one large object at a time, because it's quite * expensive to create one and not use it. */ ret = arena_malloc_large(tcache->arena, size, zero); if (ret == NULL) return (NULL); } else { - if (config_prof) { + if (config_prof && prof_promote && size == PAGE) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret); size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> LG_PAGE); - chunk->map[pageind-map_bias].bits &= - ~CHUNK_MAP_CLASS_MASK; + arena_mapbits_large_binind_set(chunk, pageind, + BININD_INVALID); } if (zero == false) { if (config_fill) { if (opt_junk) memset(ret, 0xa5, size); else if (opt_zero) memset(ret, 0, size); } } else { VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); } if (config_stats) tbin->tstats.nrequests++; if (config_prof) tcache->prof_accumbytes += size; } tcache_event(tcache); return (ret); } JEMALLOC_INLINE void -tcache_dalloc_small(tcache_t *tcache, void *ptr) +tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind) { - arena_t *arena; - arena_chunk_t *chunk; - arena_run_t *run; - arena_bin_t *bin; tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; - size_t pageind, binind; - arena_chunk_map_t *mapelm; assert(tcache_salloc(ptr) <= SMALL_MAXCLASS); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapelm = &chunk->map[pageind-map_bias]; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - (mapelm->bits >> LG_PAGE)) << LG_PAGE)); - bin = run->bin; - binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) / - sizeof(arena_bin_t); - assert(binind < NBINS); if (config_fill && opt_junk) arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (tbin->ncached == tbin_info->ncached_max) { tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >> 1), tcache); } assert(tbin->ncached < tbin_info->ncached_max); tbin->avail[tbin->ncached] = ptr; tbin->ncached++; tcache_event(tcache); } JEMALLOC_INLINE void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size) { size_t binind; tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; assert((size & PAGE_MASK) == 0); assert(tcache_salloc(ptr) > SMALL_MAXCLASS); assert(tcache_salloc(ptr) <= tcache_maxclass); binind = NBINS + (size >> LG_PAGE) - 1; if (config_fill && opt_junk) memset(ptr, 0x5a, size); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (tbin->ncached == tbin_info->ncached_max) { tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >> 1), tcache); } assert(tbin->ncached < tbin_info->ncached_max); tbin->avail[tbin->ncached] = ptr; tbin->ncached++; tcache_event(tcache); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ Index: projects/nand/contrib/jemalloc/include/jemalloc/internal/tsd.h =================================================================== --- projects/nand/contrib/jemalloc/include/jemalloc/internal/tsd.h (revision 235262) +++ projects/nand/contrib/jemalloc/include/jemalloc/internal/tsd.h (revision 235263) @@ -1,296 +1,397 @@ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* Maximum number of malloc_tsd users with cleanup functions. */ #define MALLOC_TSD_CLEANUPS_MAX 8 typedef bool (*malloc_tsd_cleanup_t)(void); /* * TLS/TSD-agnostic macro-based implementation of thread-specific data. There * are four macros that support (at least) three use cases: file-private, * library-private, and library-private inlined. Following is an example * library-private tsd variable: * * In example.h: * typedef struct { * int x; * int y; * } example_t; * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) * malloc_tsd_protos(, example, example_t *) * malloc_tsd_externs(example, example_t *) * In example.c: * malloc_tsd_data(, example, example_t *, EX_INITIALIZER) * malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER, * example_tsd_cleanup) * * The result is a set of generated functions, e.g.: * * bool example_tsd_boot(void) {...} * example_t **example_tsd_get() {...} * void example_tsd_set(example_t **val) {...} * * Note that all of the functions deal in terms of (a_type *) rather than * (a_type) so that it is possible to support non-pointer types (unlike * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is * cast to (void *). This means that the cleanup function needs to cast *and* * dereference the function argument, e.g.: * * void * example_tsd_cleanup(void *arg) * { * example_t *example = *(example_t **)arg; * * [...] * if ([want the cleanup function to be called again]) { * example_tsd_set(&example); * } * } * * If example_tsd_set() is called within example_tsd_cleanup(), it will be * called again. This is similar to how pthreads TSD destruction works, except * that pthreads only calls the cleanup function again if the value was set to * non-NULL. */ /* malloc_tsd_protos(). */ #define malloc_tsd_protos(a_attr, a_name, a_type) \ a_attr bool \ a_name##_tsd_boot(void); \ a_attr a_type * \ a_name##_tsd_get(void); \ a_attr void \ a_name##_tsd_set(a_type *val); /* malloc_tsd_externs(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_externs(a_name, a_type) \ extern __thread a_type a_name##_tls; \ extern __thread bool a_name##_initialized; \ extern bool a_name##_booted; #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_externs(a_name, a_type) \ extern __thread a_type a_name##_tls; \ extern pthread_key_t a_name##_tsd; \ extern bool a_name##_booted; +#elif (defined(_WIN32)) +#define malloc_tsd_externs(a_name, a_type) \ +extern DWORD a_name##_tsd; \ +extern bool a_name##_booted; #else #define malloc_tsd_externs(a_name, a_type) \ extern pthread_key_t a_name##_tsd; \ extern bool a_name##_booted; #endif /* malloc_tsd_data(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr __thread a_type JEMALLOC_TLS_MODEL \ a_name##_tls = a_initializer; \ a_attr __thread bool JEMALLOC_TLS_MODEL \ a_name##_initialized = false; \ a_attr bool a_name##_booted = false; #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr __thread a_type JEMALLOC_TLS_MODEL \ a_name##_tls = a_initializer; \ a_attr pthread_key_t a_name##_tsd; \ a_attr bool a_name##_booted = false; +#elif (defined(_WIN32)) +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr DWORD a_name##_tsd; \ +a_attr bool a_name##_booted = false; #else #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr pthread_key_t a_name##_tsd; \ a_attr bool a_name##_booted = false; #endif /* malloc_tsd_funcs(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##_tsd_cleanup_wrapper(void) \ { \ \ if (a_name##_initialized) { \ a_name##_initialized = false; \ a_cleanup(&a_name##_tls); \ } \ return (a_name##_initialized); \ } \ a_attr bool \ a_name##_tsd_boot(void) \ { \ \ if (a_cleanup != malloc_tsd_no_cleanup) { \ malloc_tsd_cleanup_register( \ &a_name##_tsd_cleanup_wrapper); \ } \ a_name##_booted = true; \ return (false); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##_tsd_get(void) \ { \ \ assert(a_name##_booted); \ return (&a_name##_tls); \ } \ a_attr void \ a_name##_tsd_set(a_type *val) \ { \ \ assert(a_name##_booted); \ a_name##_tls = (*val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ a_name##_initialized = true; \ } #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##_tsd_boot(void) \ { \ \ if (a_cleanup != malloc_tsd_no_cleanup) { \ if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \ return (true); \ } \ a_name##_booted = true; \ return (false); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##_tsd_get(void) \ { \ \ assert(a_name##_booted); \ return (&a_name##_tls); \ } \ a_attr void \ a_name##_tsd_set(a_type *val) \ { \ \ assert(a_name##_booted); \ a_name##_tls = (*val); \ if (a_cleanup != malloc_tsd_no_cleanup) { \ if (pthread_setspecific(a_name##_tsd, \ (void *)(&a_name##_tls))) { \ malloc_write(": Error" \ " setting TSD for "#a_name"\n"); \ if (opt_abort) \ abort(); \ } \ } \ +} +#elif (defined(_WIN32)) +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Data structure. */ \ +typedef struct { \ + bool initialized; \ + a_type val; \ +} a_name##_tsd_wrapper_t; \ +/* Initialization/cleanup. */ \ +a_attr bool \ +a_name##_tsd_cleanup_wrapper(void) \ +{ \ + a_name##_tsd_wrapper_t *wrapper; \ + \ + wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \ + if (wrapper == NULL) \ + return (false); \ + if (a_cleanup != malloc_tsd_no_cleanup && \ + wrapper->initialized) { \ + a_type val = wrapper->val; \ + a_type tsd_static_data = a_initializer; \ + wrapper->initialized = false; \ + wrapper->val = tsd_static_data; \ + a_cleanup(&val); \ + if (wrapper->initialized) { \ + /* Trigger another cleanup round. */ \ + return (true); \ + } \ + } \ + malloc_tsd_dalloc(wrapper); \ + return (false); \ +} \ +a_attr bool \ +a_name##_tsd_boot(void) \ +{ \ + \ + a_name##_tsd = TlsAlloc(); \ + if (a_name##_tsd == TLS_OUT_OF_INDEXES) \ + return (true); \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + malloc_tsd_cleanup_register( \ + &a_name##_tsd_cleanup_wrapper); \ + } \ + a_name##_booted = true; \ + return (false); \ +} \ +/* Get/set. */ \ +a_attr a_name##_tsd_wrapper_t * \ +a_name##_tsd_get_wrapper(void) \ +{ \ + a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ + TlsGetValue(a_name##_tsd); \ + \ + if (wrapper == NULL) { \ + wrapper = (a_name##_tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } else { \ + static a_type tsd_static_data = a_initializer; \ + wrapper->initialized = false; \ + wrapper->val = tsd_static_data; \ + } \ + if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \ + malloc_write(": Error setting" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ + } \ + return (wrapper); \ +} \ +a_attr a_type * \ +a_name##_tsd_get(void) \ +{ \ + a_name##_tsd_wrapper_t *wrapper; \ + \ + assert(a_name##_booted); \ + wrapper = a_name##_tsd_get_wrapper(); \ + return (&wrapper->val); \ +} \ +a_attr void \ +a_name##_tsd_set(a_type *val) \ +{ \ + a_name##_tsd_wrapper_t *wrapper; \ + \ + assert(a_name##_booted); \ + wrapper = a_name##_tsd_get_wrapper(); \ + wrapper->val = *(val); \ + if (a_cleanup != malloc_tsd_no_cleanup) \ + wrapper->initialized = true; \ } #else #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Data structure. */ \ typedef struct { \ bool initialized; \ a_type val; \ } a_name##_tsd_wrapper_t; \ /* Initialization/cleanup. */ \ a_attr void \ a_name##_tsd_cleanup_wrapper(void *arg) \ { \ a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\ \ if (a_cleanup != malloc_tsd_no_cleanup && \ wrapper->initialized) { \ wrapper->initialized = false; \ a_cleanup(&wrapper->val); \ if (wrapper->initialized) { \ /* Trigger another cleanup round. */ \ if (pthread_setspecific(a_name##_tsd, \ (void *)wrapper)) { \ malloc_write(": Error" \ " setting TSD for "#a_name"\n"); \ if (opt_abort) \ abort(); \ } \ return; \ } \ } \ malloc_tsd_dalloc(wrapper); \ } \ a_attr bool \ a_name##_tsd_boot(void) \ { \ \ if (pthread_key_create(&a_name##_tsd, \ a_name##_tsd_cleanup_wrapper) != 0) \ return (true); \ a_name##_booted = true; \ return (false); \ } \ /* Get/set. */ \ a_attr a_name##_tsd_wrapper_t * \ a_name##_tsd_get_wrapper(void) \ { \ a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ pthread_getspecific(a_name##_tsd); \ \ if (wrapper == NULL) { \ wrapper = (a_name##_tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ if (wrapper == NULL) { \ malloc_write(": Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } else { \ static a_type tsd_static_data = a_initializer; \ wrapper->initialized = false; \ wrapper->val = tsd_static_data; \ } \ if (pthread_setspecific(a_name##_tsd, \ (void *)wrapper)) { \ malloc_write(": Error setting" \ " TSD for "#a_name"\n"); \ abort(); \ } \ } \ return (wrapper); \ } \ a_attr a_type * \ a_name##_tsd_get(void) \ { \ a_name##_tsd_wrapper_t *wrapper; \ \ assert(a_name##_booted); \ wrapper = a_name##_tsd_get_wrapper(); \ return (&wrapper->val); \ } \ a_attr void \ a_name##_tsd_set(a_type *val) \ { \ a_name##_tsd_wrapper_t *wrapper; \ \ assert(a_name##_booted); \ wrapper = a_name##_tsd_get_wrapper(); \ wrapper->val = *(val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ wrapper->initialized = true; \ } #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *malloc_tsd_malloc(size_t size); void malloc_tsd_dalloc(void *wrapper); void malloc_tsd_no_cleanup(void *); void malloc_tsd_cleanup_register(bool (*f)(void)); void malloc_tsd_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ Index: projects/nand/contrib/jemalloc/include/jemalloc/internal/util.h =================================================================== --- projects/nand/contrib/jemalloc/include/jemalloc/internal/util.h (revision 235262) +++ projects/nand/contrib/jemalloc/include/jemalloc/internal/util.h (revision 235263) @@ -1,146 +1,160 @@ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* Size of stack-allocated buffer passed to buferror(). */ #define BUFERROR_BUF 64 /* * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be * large enough for all possible uses within jemalloc. */ #define MALLOC_PRINTF_BUFSIZE 4096 /* * Wrap a cpp argument that contains commas such that it isn't broken up into * multiple arguments. */ #define JEMALLOC_CONCAT(...) __VA_ARGS__ /* * Silence compiler warnings due to uninitialized values. This is used * wherever the compiler fails to recognize that the variable is never used * uninitialized. */ #ifdef JEMALLOC_CC_SILENCE # define JEMALLOC_CC_SILENCE_INIT(v) = v #else # define JEMALLOC_CC_SILENCE_INIT(v) #endif /* * Define a custom assert() in order to reduce the chances of deadlock during * assertion failure. */ #ifndef assert #define assert(e) do { \ if (config_debug && !(e)) { \ malloc_printf( \ ": %s:%d: Failed assertion: \"%s\"\n", \ __FILE__, __LINE__, #e); \ abort(); \ } \ } while (0) #endif /* Use to assert a particular configuration, e.g., cassert(config_debug). */ #define cassert(c) do { \ if ((c) == false) \ assert(false); \ } while (0) #ifndef not_reached #define not_reached() do { \ if (config_debug) { \ malloc_printf( \ ": %s:%d: Unreachable code reached\n", \ __FILE__, __LINE__); \ abort(); \ } \ } while (0) #endif #ifndef not_implemented #define not_implemented() do { \ if (config_debug) { \ malloc_printf(": %s:%d: Not implemented\n", \ __FILE__, __LINE__); \ abort(); \ } \ } while (0) #endif #define assert_not_implemented(e) do { \ if (config_debug && !(e)) \ not_implemented(); \ } while (0) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -extern void (*je_malloc_message)(void *wcbopaque, const char *s); - -int buferror(int errnum, char *buf, size_t buflen); +int buferror(char *buf, size_t buflen); uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base); +void malloc_write(const char *s); /* * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating * point math. */ int malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap); int malloc_snprintf(char *str, size_t size, const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4)); void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap); void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4)); void malloc_printf(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2)); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE size_t pow2_ceil(size_t x); void malloc_write(const char *s); +void set_errno(int errnum); +int get_errno(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) /* Compute the smallest power of 2 that is >= x. */ JEMALLOC_INLINE size_t pow2_ceil(size_t x) { x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; #if (LG_SIZEOF_PTR == 3) x |= x >> 32; #endif x++; return (x); } -/* - * Wrapper around malloc_message() that avoids the need for - * je_malloc_message(...) throughout the code. - */ +/* Sets error code */ JEMALLOC_INLINE void -malloc_write(const char *s) +set_errno(int errnum) { - je_malloc_message(NULL, s); +#ifdef _WIN32 + SetLastError(errnum); +#else + errno = errnum; +#endif +} + +/* Get last error code */ +JEMALLOC_INLINE int +get_errno(void) +{ + +#ifdef _WIN32 + return (GetLastError()); +#else + return (errno); +#endif } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ Index: projects/nand/contrib/jemalloc/include/jemalloc/jemalloc.h =================================================================== --- projects/nand/contrib/jemalloc/include/jemalloc/jemalloc.h (revision 235262) +++ projects/nand/contrib/jemalloc/include/jemalloc/jemalloc.h (revision 235263) @@ -1,141 +1,155 @@ #ifndef JEMALLOC_H_ #define JEMALLOC_H_ #ifdef __cplusplus extern "C" { #endif #include #include -#define JEMALLOC_VERSION "1.0.0-286-ga8f8d7540d66ddee7337db80c92890916e1063ca" +#define JEMALLOC_VERSION "1.0.0-335-g37b6f95dcd866f51c91488531a2efc3ed4c2b754" #define JEMALLOC_VERSION_MAJOR 1 #define JEMALLOC_VERSION_MINOR 0 #define JEMALLOC_VERSION_BUGFIX 0 -#define JEMALLOC_VERSION_NREV 286 -#define JEMALLOC_VERSION_GID "a8f8d7540d66ddee7337db80c92890916e1063ca" +#define JEMALLOC_VERSION_NREV 335 +#define JEMALLOC_VERSION_GID "37b6f95dcd866f51c91488531a2efc3ed4c2b754" #include "jemalloc_defs.h" #include "jemalloc_FreeBSD.h" #ifdef JEMALLOC_EXPERIMENTAL #define ALLOCM_LG_ALIGN(la) (la) #if LG_SIZEOF_PTR == 2 #define ALLOCM_ALIGN(a) (ffs(a)-1) #else #define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) #endif #define ALLOCM_ZERO ((int)0x40) #define ALLOCM_NO_MOVE ((int)0x80) #define ALLOCM_SUCCESS 0 #define ALLOCM_ERR_OOM 1 #define ALLOCM_ERR_NOT_MOVED 2 #endif /* * The je_ prefix on the following public symbol declarations is an artifact of * namespace management, and should be omitted in application code unless * JEMALLOC_NO_DEMANGLE is defined (see below). */ -extern const char *je_malloc_conf; -extern void (*je_malloc_message)(void *, const char *); +extern JEMALLOC_EXPORT const char *je_malloc_conf; +extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, + const char *s); -void *je_malloc(size_t size) JEMALLOC_ATTR(malloc); -void *je_calloc(size_t num, size_t size) JEMALLOC_ATTR(malloc); -int je_posix_memalign(void **memptr, size_t alignment, size_t size) - JEMALLOC_ATTR(nonnull(1)); -void *je_aligned_alloc(size_t alignment, size_t size) JEMALLOC_ATTR(malloc); -void *je_realloc(void *ptr, size_t size); -void je_free(void *ptr); +JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc); +JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size) + JEMALLOC_ATTR(malloc); +JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment, + size_t size) JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size) + JEMALLOC_ATTR(malloc); +JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size); +JEMALLOC_EXPORT void je_free(void *ptr); -size_t je_malloc_usable_size(const void *ptr); -void je_malloc_stats_print(void (*write_cb)(void *, const char *), - void *je_cbopaque, const char *opts); -int je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen); -int je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp); -int je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size) + JEMALLOC_ATTR(malloc); +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc); +#endif + +JEMALLOC_EXPORT size_t je_malloc_usable_size(const void *ptr); +JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *, + const char *), void *je_cbopaque, const char *opts); +JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp, + size_t *miblenp); +JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen); #ifdef JEMALLOC_EXPERIMENTAL -int je_allocm(void **ptr, size_t *rsize, size_t size, int flags) - JEMALLOC_ATTR(nonnull(1)); -int je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, +JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size, int flags) JEMALLOC_ATTR(nonnull(1)); -int je_sallocm(const void *ptr, size_t *rsize, int flags) +JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size, + size_t extra, int flags) JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags) JEMALLOC_ATTR(nonnull(1)); -int je_dallocm(void *ptr, int flags) JEMALLOC_ATTR(nonnull(1)); -int je_nallocm(size_t *rsize, size_t size, int flags); +JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags) + JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags); #endif /* * By default application code must explicitly refer to mangled symbol names, * so that it is possible to use jemalloc in conjunction with another allocator * in the same application. Define JEMALLOC_MANGLE in order to cause automatic * name mangling that matches the API prefixing that happened as a result of * --with-mangling and/or --with-jemalloc-prefix configuration settings. */ #ifdef JEMALLOC_MANGLE #ifndef JEMALLOC_NO_DEMANGLE #define JEMALLOC_NO_DEMANGLE #endif #define malloc_conf je_malloc_conf #define malloc_message je_malloc_message #define malloc je_malloc #define calloc je_calloc #define posix_memalign je_posix_memalign #define aligned_alloc je_aligned_alloc #define realloc je_realloc #define free je_free #define malloc_usable_size je_malloc_usable_size #define malloc_stats_print je_malloc_stats_print #define mallctl je_mallctl #define mallctlnametomib je_mallctlnametomib #define mallctlbymib je_mallctlbymib #define memalign je_memalign #define valloc je_valloc #ifdef JEMALLOC_EXPERIMENTAL #define allocm je_allocm #define rallocm je_rallocm #define sallocm je_sallocm #define dallocm je_dallocm #define nallocm je_nallocm #endif #endif /* * The je_* macros can be used as stable alternative names for the public * jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant * for use in jemalloc itself, but it can be used by application code to * provide isolation from the name mangling specified via --with-mangling * and/or --with-jemalloc-prefix. */ #ifndef JEMALLOC_NO_DEMANGLE #undef je_malloc_conf #undef je_malloc_message #undef je_malloc #undef je_calloc #undef je_posix_memalign #undef je_aligned_alloc #undef je_realloc #undef je_free #undef je_malloc_usable_size #undef je_malloc_stats_print #undef je_mallctl #undef je_mallctlnametomib #undef je_mallctlbymib #undef je_memalign #undef je_valloc #ifdef JEMALLOC_EXPERIMENTAL #undef je_allocm #undef je_rallocm #undef je_sallocm #undef je_dallocm #undef je_nallocm #endif #endif #ifdef __cplusplus }; #endif #endif /* JEMALLOC_H_ */ Index: projects/nand/contrib/jemalloc/include/jemalloc/jemalloc_defs.h =================================================================== --- projects/nand/contrib/jemalloc/include/jemalloc/jemalloc_defs.h (revision 235262) +++ projects/nand/contrib/jemalloc/include/jemalloc/jemalloc_defs.h (revision 235263) @@ -1,235 +1,254 @@ /* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */ /* * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all * public APIs to be prefixed. This makes it possible, with some care, to use * multiple allocators simultaneously. */ /* #undef JEMALLOC_PREFIX */ /* #undef JEMALLOC_CPREFIX */ /* * Name mangling for public symbols is controlled by --with-mangling and * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by * these macro definitions. */ #define je_malloc_conf malloc_conf #define je_malloc_message malloc_message #define je_malloc malloc #define je_calloc calloc #define je_posix_memalign posix_memalign #define je_aligned_alloc aligned_alloc #define je_realloc realloc #define je_free free #define je_malloc_usable_size malloc_usable_size #define je_malloc_stats_print malloc_stats_print #define je_mallctl mallctl #define je_mallctlnametomib mallctlnametomib #define je_mallctlbymib mallctlbymib /* #undef je_memalign */ #define je_valloc valloc #define je_allocm allocm #define je_rallocm rallocm #define je_sallocm sallocm #define je_dallocm dallocm #define je_nallocm nallocm /* * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. * For shared libraries, symbol visibility mechanisms prevent these symbols * from being exported, but for static libraries, naming collisions are a real * possibility. */ #define JEMALLOC_PRIVATE_NAMESPACE "__jemalloc_" #define JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix) __jemalloc_##string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix /* * Hyper-threaded CPUs may need a special instruction inside spin loops in * order to yield to another virtual CPU. */ #define CPU_SPINWAIT __asm__ volatile("pause") /* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ #define JEMALLOC_ATOMIC9 1 /* * Defined if OSAtomic*() functions are available, as provided by Darwin, and * documented in the atomic(3) manual page. */ /* #undef JEMALLOC_OSATOMIC */ /* * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the * functions are defined in libgcc instead of being inlines) */ /* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */ /* * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the * functions are defined in libgcc instead of being inlines) */ /* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */ /* * Defined if OSSpin*() functions are available, as provided by Darwin, and * documented in the spinlock(3) manual page. */ /* #undef JEMALLOC_OSSPIN */ /* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc * bootstrapping will cause recursion into the pthreads library. Therefore, if * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in * malloc_tsd. */ #define JEMALLOC_MALLOC_THREAD_CLEANUP /* * Defined if threaded initialization is known to be safe on this platform. * Among other things, it must be possible to initialize a mutex without * triggering allocation in order for threaded allocation to be safe. */ /* #undef JEMALLOC_THREADED_INIT */ /* * Defined if the pthreads implementation defines * _pthread_mutex_init_calloc_cb(), in which case the function is used in order * to avoid recursive allocation during mutex initialization. */ #define JEMALLOC_MUTEX_INIT_CB 1 /* Defined if __attribute__((...)) syntax is supported. */ #define JEMALLOC_HAVE_ATTR #ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_CATTR(s, a) __attribute__((s)) -# define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,) +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +#elif _MSC_VER +# define JEMALLOC_ATTR(s) +# ifdef DLLEXPORT +# define JEMALLOC_EXPORT __declspec(dllexport) +# else +# define JEMALLOC_EXPORT __declspec(dllimport) +# endif +# define JEMALLOC_ALIGNED(s) __declspec(align(s)) +# define JEMALLOC_SECTION(s) __declspec(allocate(s)) +# define JEMALLOC_NOINLINE __declspec(noinline) #else -# define JEMALLOC_CATTR(s, a) a -# define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,) +# define JEMALLOC_ATTR(s) +# define JEMALLOC_EXPORT +# define JEMALLOC_ALIGNED(s) +# define JEMALLOC_SECTION(s) +# define JEMALLOC_NOINLINE #endif /* Defined if sbrk() is supported. */ #define JEMALLOC_HAVE_SBRK /* Non-empty if the tls_model attribute is supported. */ #define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) /* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ #define JEMALLOC_CC_SILENCE /* * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables * inline functions. */ /* #undef JEMALLOC_DEBUG */ /* JEMALLOC_STATS enables statistics calculation. */ #define JEMALLOC_STATS /* JEMALLOC_PROF enables allocation profiling. */ /* #undef JEMALLOC_PROF */ /* Use libunwind for profile backtracing if defined. */ /* #undef JEMALLOC_PROF_LIBUNWIND */ /* Use libgcc for profile backtracing if defined. */ /* #undef JEMALLOC_PROF_LIBGCC */ /* Use gcc intrinsics for profile backtracing if defined. */ /* #undef JEMALLOC_PROF_GCC */ /* * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. * This makes it possible to allocate/deallocate objects without any locking * when the cache is in the steady state. */ #define JEMALLOC_TCACHE /* * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage * segment (DSS). */ #define JEMALLOC_DSS /* Support memory filling (junk/zero/quarantine/redzone). */ #define JEMALLOC_FILL /* Support the experimental API. */ #define JEMALLOC_EXPERIMENTAL /* Support utrace(2)-based tracing. */ #define JEMALLOC_UTRACE /* Support Valgrind. */ /* #undef JEMALLOC_VALGRIND */ /* Support optional abort() on OOM. */ #define JEMALLOC_XMALLOC /* Support lazy locking (avoid locking unless a second thread is launched). */ #define JEMALLOC_LAZY_LOCK /* One page is 2^STATIC_PAGE_SHIFT bytes. */ #define STATIC_PAGE_SHIFT 12 /* * If defined, use munmap() to unmap freed chunks, rather than storing them for - * later reuse. This is automatically disabled if configuration determines - * that common sequences of mmap()/munmap() calls will cause virtual memory map - * holes. + * later reuse. This is disabled by default on Linux because common sequences + * of mmap()/munmap() calls will cause virtual memory map holes. */ #define JEMALLOC_MUNMAP +/* + * If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is + * disabled by default because it is Linux-specific and it will cause virtual + * memory map holes, much like munmap(2) does. + */ +/* #undef JEMALLOC_MREMAP */ + /* TLS is used to map arenas and magazine caches to threads. */ #define JEMALLOC_TLS /* * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside * within jemalloc-owned chunks before dereferencing them. */ /* #undef JEMALLOC_IVSALLOC */ /* * Define overrides for non-standard allocator-related functions if they * are present on the system. */ /* #undef JEMALLOC_OVERRIDE_MEMALIGN */ #define JEMALLOC_OVERRIDE_VALLOC /* * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. */ /* #undef JEMALLOC_ZONE */ /* #undef JEMALLOC_ZONE_VERSION */ - -/* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). */ -/* #undef JEMALLOC_MREMAP_FIXED */ /* * Methods for purging unused pages differ between operating systems. * * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, * such that new pages will be demand-zeroed if * the address region is later touched. * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being * unused, such that they will be discarded rather * than swapped out. */ /* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */ #define JEMALLOC_PURGE_MADVISE_FREE /* sizeof(void *) == 2^LG_SIZEOF_PTR. */ #define LG_SIZEOF_PTR 3 /* sizeof(int) == 2^LG_SIZEOF_INT. */ #define LG_SIZEOF_INT 2 /* sizeof(long) == 2^LG_SIZEOF_LONG. */ #define LG_SIZEOF_LONG 3 /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ #define LG_SIZEOF_INTMAX_T 3 Index: projects/nand/contrib/jemalloc/src/arena.c =================================================================== --- projects/nand/contrib/jemalloc/src/arena.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/arena.c (revision 235263) @@ -1,2210 +1,2223 @@ #define JEMALLOC_ARENA_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; arena_bin_info_t arena_bin_info[NBINS]; -JEMALLOC_ATTR(aligned(CACHELINE)) +JEMALLOC_ALIGNED(CACHELINE) const uint8_t small_size2bin[] = { #define S2B_8(i) i, #define S2B_16(i) S2B_8(i) S2B_8(i) #define S2B_32(i) S2B_16(i) S2B_16(i) #define S2B_64(i) S2B_32(i) S2B_32(i) #define S2B_128(i) S2B_64(i) S2B_64(i) #define S2B_256(i) S2B_128(i) S2B_128(i) #define S2B_512(i) S2B_256(i) S2B_256(i) #define S2B_1024(i) S2B_512(i) S2B_512(i) #define S2B_2048(i) S2B_1024(i) S2B_1024(i) #define S2B_4096(i) S2B_2048(i) S2B_2048(i) #define S2B_8192(i) S2B_4096(i) S2B_4096(i) #define SIZE_CLASS(bin, delta, size) \ S2B_##delta(bin) SIZE_CLASSES #undef S2B_8 #undef S2B_16 #undef S2B_32 #undef S2B_64 #undef S2B_128 #undef S2B_256 #undef S2B_512 #undef S2B_1024 #undef S2B_2048 #undef S2B_4096 #undef S2B_8192 #undef SIZE_CLASS }; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size, - bool large, bool zero); + bool large, size_t binind, bool zero); static arena_chunk_t *arena_chunk_alloc(arena_t *arena); static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large, - bool zero); + size_t binind, bool zero); static void arena_purge(arena_t *arena, bool all); static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty); static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize); static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize, bool dirty); static arena_run_t *arena_bin_runs_first(arena_bin_t *bin); static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run); static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run); static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin); static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin); static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin); static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t oldsize, size_t size); static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size); static void bin_info_init(void); /******************************************************************************/ static inline int arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) { uintptr_t a_mapelm = (uintptr_t)a; uintptr_t b_mapelm = (uintptr_t)b; assert(a != NULL); assert(b != NULL); return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); } /* Generate red-black tree functions. */ rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, u.rb_link, arena_run_comp) static inline int arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) { int ret; size_t a_size = a->bits & ~PAGE_MASK; size_t b_size = b->bits & ~PAGE_MASK; assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits & CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY)); ret = (a_size > b_size) - (a_size < b_size); if (ret == 0) { uintptr_t a_mapelm, b_mapelm; if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY) a_mapelm = (uintptr_t)a; else { /* * Treat keys as though they are lower than anything * else. */ a_mapelm = 0; } b_mapelm = (uintptr_t)b; ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); } return (ret); } /* Generate red-black tree functions. */ rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, u.rb_link, arena_avail_comp) static inline void * arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) { void *ret; unsigned regind; bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + (uintptr_t)bin_info->bitmap_offset); assert(run->nfree > 0); assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false); regind = bitmap_sfu(bitmap, &bin_info->bitmap_info); ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset + (uintptr_t)(bin_info->reg_interval * regind)); run->nfree--; if (regind == run->nextind) run->nextind++; assert(regind < run->nextind); return (ret); } static inline void arena_run_reg_dalloc(arena_run_t *run, void *ptr) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t binind = arena_bin_index(chunk->arena, run->bin); + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + size_t mapbits = arena_mapbits_get(chunk, pageind); + size_t binind = arena_ptr_small_binind_get(ptr, mapbits); arena_bin_info_t *bin_info = &arena_bin_info[binind]; unsigned regind = arena_run_regind(run, bin_info, ptr); bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + (uintptr_t)bin_info->bitmap_offset); assert(run->nfree < bin_info->nregs); /* Freeing an interior pointer can cause assertion failure. */ assert(((uintptr_t)ptr - ((uintptr_t)run + (uintptr_t)bin_info->reg0_offset)) % (uintptr_t)bin_info->reg_interval == 0); assert((uintptr_t)ptr >= (uintptr_t)run + (uintptr_t)bin_info->reg0_offset); /* Freeing an unallocated pointer can cause assertion failure. */ assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind)); bitmap_unset(bitmap, &bin_info->bitmap_info, regind); run->nfree++; } static inline void arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) { size_t i; UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); for (i = 0; i < PAGE / sizeof(size_t); i++) assert(p[i] == 0); } static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large, - bool zero) + size_t binind, bool zero) { arena_chunk_t *chunk; size_t run_ind, total_pages, need_pages, rem_pages, i; size_t flag_dirty; arena_avail_tree_t *runs_avail; + assert((large && binind == BININD_INVALID) || (large == false && binind + != BININD_INVALID)); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY; + flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty : &arena->runs_avail_clean; - total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >> + total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> LG_PAGE; - assert((chunk->map[run_ind+total_pages-1-map_bias].bits & - CHUNK_MAP_DIRTY) == flag_dirty); + assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == + flag_dirty); need_pages = (size >> LG_PAGE); assert(need_pages > 0); assert(need_pages <= total_pages); rem_pages = total_pages - need_pages; - arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]); + arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, run_ind)); if (config_stats) { /* * Update stats_cactive if nactive is crossing a chunk * multiple. */ size_t cactive_diff = CHUNK_CEILING((arena->nactive + need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << LG_PAGE); if (cactive_diff != 0) stats_cactive_add(cactive_diff); } arena->nactive += need_pages; /* Keep track of trailing unused pages for later use. */ if (rem_pages > 0) { if (flag_dirty != 0) { - chunk->map[run_ind+need_pages-map_bias].bits = - (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY; - chunk->map[run_ind+total_pages-1-map_bias].bits = - (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY; + arena_mapbits_unallocated_set(chunk, run_ind+need_pages, + (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY); + arena_mapbits_unallocated_set(chunk, + run_ind+total_pages-1, (rem_pages << LG_PAGE), + CHUNK_MAP_DIRTY); } else { - chunk->map[run_ind+need_pages-map_bias].bits = - (rem_pages << LG_PAGE) | - (chunk->map[run_ind+need_pages-map_bias].bits & - CHUNK_MAP_UNZEROED); - chunk->map[run_ind+total_pages-1-map_bias].bits = - (rem_pages << LG_PAGE) | - (chunk->map[run_ind+total_pages-1-map_bias].bits & - CHUNK_MAP_UNZEROED); + arena_mapbits_unallocated_set(chunk, run_ind+need_pages, + (rem_pages << LG_PAGE), + arena_mapbits_unzeroed_get(chunk, + run_ind+need_pages)); + arena_mapbits_unallocated_set(chunk, + run_ind+total_pages-1, (rem_pages << LG_PAGE), + arena_mapbits_unzeroed_get(chunk, + run_ind+total_pages-1)); } - arena_avail_tree_insert(runs_avail, - &chunk->map[run_ind+need_pages-map_bias]); + arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk, + run_ind+need_pages)); } /* Update dirty page accounting. */ if (flag_dirty != 0) { chunk->ndirty -= need_pages; arena->ndirty -= need_pages; } /* * Update the page map separately for large vs. small runs, since it is * possible to avoid iteration for large mallocs. */ if (large) { if (zero) { if (flag_dirty == 0) { /* * The run is clean, so some pages may be * zeroed (i.e. never before touched). */ for (i = 0; i < need_pages; i++) { - if ((chunk->map[run_ind+i-map_bias].bits - & CHUNK_MAP_UNZEROED) != 0) { + if (arena_mapbits_unzeroed_get(chunk, + run_ind+i) != 0) { VALGRIND_MAKE_MEM_UNDEFINED( (void *)((uintptr_t) chunk + ((run_ind+i) << LG_PAGE)), PAGE); memset((void *)((uintptr_t) chunk + ((run_ind+i) << LG_PAGE)), 0, PAGE); } else if (config_debug) { VALGRIND_MAKE_MEM_DEFINED( (void *)((uintptr_t) chunk + ((run_ind+i) << LG_PAGE)), PAGE); arena_chunk_validate_zeroed( chunk, run_ind+i); } } } else { /* * The run is dirty, so all pages must be * zeroed. */ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, (need_pages << LG_PAGE)); } } /* * Set the last element first, in case the run only contains one * page (i.e. both statements set the same element). */ - chunk->map[run_ind+need_pages-1-map_bias].bits = - CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty; - chunk->map[run_ind-map_bias].bits = size | flag_dirty | - CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, + flag_dirty); + arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); } else { assert(zero == false); /* * Propagate the dirty and unzeroed flags to the allocated * small run, so that arena_dalloc_bin_run() has the ability to * conditionally trim clean pages. */ - chunk->map[run_ind-map_bias].bits = - (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) | - CHUNK_MAP_ALLOCATED | flag_dirty; + arena_mapbits_small_set(chunk, run_ind, 0, binind, + arena_mapbits_unzeroed_get(chunk, run_ind) | flag_dirty); /* * The first page will always be dirtied during small run * initialization, so a validation failure here would not * actually cause an observable failure. */ if (config_debug && flag_dirty == 0 && - (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) - == 0) + arena_mapbits_unzeroed_get(chunk, run_ind) == 0) arena_chunk_validate_zeroed(chunk, run_ind); for (i = 1; i < need_pages - 1; i++) { - chunk->map[run_ind+i-map_bias].bits = (i << LG_PAGE) - | (chunk->map[run_ind+i-map_bias].bits & - CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED; + arena_mapbits_small_set(chunk, run_ind+i, i, + binind, arena_mapbits_unzeroed_get(chunk, + run_ind+i)); if (config_debug && flag_dirty == 0 && - (chunk->map[run_ind+i-map_bias].bits & - CHUNK_MAP_UNZEROED) == 0) + arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) arena_chunk_validate_zeroed(chunk, run_ind+i); } - chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages - - 1) << LG_PAGE) | - (chunk->map[run_ind+need_pages-1-map_bias].bits & - CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty; + arena_mapbits_small_set(chunk, run_ind+need_pages-1, + need_pages-1, binind, arena_mapbits_unzeroed_get(chunk, + run_ind+need_pages-1) | flag_dirty); if (config_debug && flag_dirty == 0 && - (chunk->map[run_ind+need_pages-1-map_bias].bits & - CHUNK_MAP_UNZEROED) == 0) { + arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) == + 0) { arena_chunk_validate_zeroed(chunk, run_ind+need_pages-1); } } } static arena_chunk_t * arena_chunk_alloc(arena_t *arena) { arena_chunk_t *chunk; size_t i; if (arena->spare != NULL) { arena_avail_tree_t *runs_avail; chunk = arena->spare; arena->spare = NULL; /* Insert the run into the appropriate runs_avail_* tree. */ - if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0) + if (arena_mapbits_dirty_get(chunk, map_bias) == 0) runs_avail = &arena->runs_avail_clean; else runs_avail = &arena->runs_avail_dirty; - assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass); - assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK) - == arena_maxclass); - assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) == - (chunk->map[chunk_npages-1-map_bias].bits & - CHUNK_MAP_DIRTY)); - arena_avail_tree_insert(runs_avail, &chunk->map[0]); + assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == + arena_maxclass); + assert(arena_mapbits_unallocated_size_get(chunk, + chunk_npages-1) == arena_maxclass); + assert(arena_mapbits_dirty_get(chunk, map_bias) == + arena_mapbits_dirty_get(chunk, chunk_npages-1)); + arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk, + map_bias)); } else { bool zero; size_t unzeroed; zero = false; malloc_mutex_unlock(&arena->lock); chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false, &zero); malloc_mutex_lock(&arena->lock); if (chunk == NULL) return (NULL); if (config_stats) arena->stats.mapped += chunksize; chunk->arena = arena; ql_elm_new(chunk, link_dirty); chunk->dirtied = false; /* * Claim that no pages are in use, since the header is merely * overhead. */ chunk->ndirty = 0; /* * Initialize the map to contain one maximal free untouched run. * Mark the pages as zeroed iff chunk_alloc() returned a zeroed * chunk. */ unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; - chunk->map[0].bits = arena_maxclass | unzeroed; + arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass, + unzeroed); /* * There is no need to initialize the internal page map entries * unless the chunk is not zeroed. */ if (zero == false) { for (i = map_bias+1; i < chunk_npages-1; i++) - chunk->map[i-map_bias].bits = unzeroed; + arena_mapbits_unzeroed_set(chunk, i, unzeroed); } else if (config_debug) { - for (i = map_bias+1; i < chunk_npages-1; i++) - assert(chunk->map[i-map_bias].bits == unzeroed); + for (i = map_bias+1; i < chunk_npages-1; i++) { + assert(arena_mapbits_unzeroed_get(chunk, i) == + unzeroed); + } } - chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass | - unzeroed; + arena_mapbits_unallocated_set(chunk, chunk_npages-1, + arena_maxclass, unzeroed); /* Insert the run into the runs_avail_clean tree. */ arena_avail_tree_insert(&arena->runs_avail_clean, - &chunk->map[0]); + arena_mapp_get(chunk, map_bias)); } return (chunk); } static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) { arena_avail_tree_t *runs_avail; /* * Remove run from the appropriate runs_avail_* tree, so that the arena * does not use it. */ - if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0) + if (arena_mapbits_dirty_get(chunk, map_bias) == 0) runs_avail = &arena->runs_avail_clean; else runs_avail = &arena->runs_avail_dirty; - arena_avail_tree_remove(runs_avail, &chunk->map[0]); + arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, map_bias)); if (arena->spare != NULL) { arena_chunk_t *spare = arena->spare; arena->spare = chunk; if (spare->dirtied) { ql_remove(&chunk->arena->chunks_dirty, spare, link_dirty); arena->ndirty -= spare->ndirty; } malloc_mutex_unlock(&arena->lock); chunk_dealloc((void *)spare, chunksize, true); malloc_mutex_lock(&arena->lock); if (config_stats) arena->stats.mapped -= chunksize; } else arena->spare = chunk; } static arena_run_t * -arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero) +arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind, + bool zero) { arena_chunk_t *chunk; arena_run_t *run; arena_chunk_map_t *mapelm, key; assert(size <= arena_maxclass); assert((size & PAGE_MASK) == 0); + assert((large && binind == BININD_INVALID) || (large == false && binind + != BININD_INVALID)); /* Search the arena's chunks for the lowest best fit. */ key.bits = size | CHUNK_MAP_KEY; mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key); if (mapelm != NULL) { arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); size_t pageind = (((uintptr_t)mapelm - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + map_bias; run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << LG_PAGE)); - arena_run_split(arena, run, size, large, zero); + arena_run_split(arena, run, size, large, binind, zero); return (run); } mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key); if (mapelm != NULL) { arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); size_t pageind = (((uintptr_t)mapelm - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + map_bias; run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << LG_PAGE)); - arena_run_split(arena, run, size, large, zero); + arena_run_split(arena, run, size, large, binind, zero); return (run); } /* * No usable runs. Create a new chunk from which to allocate the run. */ chunk = arena_chunk_alloc(arena); if (chunk != NULL) { run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); - arena_run_split(arena, run, size, large, zero); + arena_run_split(arena, run, size, large, binind, zero); return (run); } /* * arena_chunk_alloc() failed, but another thread may have made * sufficient memory available while this one dropped arena->lock in * arena_chunk_alloc(), so search one more time. */ mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key); if (mapelm != NULL) { arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); size_t pageind = (((uintptr_t)mapelm - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + map_bias; run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << LG_PAGE)); - arena_run_split(arena, run, size, large, zero); + arena_run_split(arena, run, size, large, binind, zero); return (run); } mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key); if (mapelm != NULL) { arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); size_t pageind = (((uintptr_t)mapelm - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + map_bias; run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << LG_PAGE)); - arena_run_split(arena, run, size, large, zero); + arena_run_split(arena, run, size, large, binind, zero); return (run); } return (NULL); } static inline void arena_maybe_purge(arena_t *arena) { /* Enforce opt_lg_dirty_mult. */ if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory && (arena->ndirty - arena->npurgatory) > chunk_npages && (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - arena->npurgatory)) arena_purge(arena, false); } static inline void arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk) { ql_head(arena_chunk_map_t) mapelms; arena_chunk_map_t *mapelm; size_t pageind, flag_unzeroed; size_t ndirty; size_t nmadvise; ql_new(&mapelms); flag_unzeroed = #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED /* * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous * mappings, but not for file-backed mappings. */ 0 #else CHUNK_MAP_UNZEROED #endif ; /* * If chunk is the spare, temporarily re-allocate it, 1) so that its * run is reinserted into runs_avail_dirty, and 2) so that it cannot be * completely discarded by another thread while arena->lock is dropped * by this thread. Note that the arena_run_dalloc() call will * implicitly deallocate the chunk, so no explicit action is required * in this function to deallocate the chunk. * * Note that once a chunk contains dirty pages, it cannot again contain * a single run unless 1) it is a dirty run, or 2) this function purges * dirty pages and causes the transition to a single clean run. Thus * (chunk == arena->spare) is possible, but it is not possible for * this function to be called on the spare unless it contains a dirty * run. */ if (chunk == arena->spare) { - assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0); + assert(arena_mapbits_dirty_get(chunk, map_bias) != 0); arena_chunk_alloc(arena); } /* Temporarily allocate all free dirty runs within chunk. */ for (pageind = map_bias; pageind < chunk_npages;) { - mapelm = &chunk->map[pageind-map_bias]; - if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) { + mapelm = arena_mapp_get(chunk, pageind); + if (arena_mapbits_allocated_get(chunk, pageind) == 0) { size_t npages; - npages = mapelm->bits >> LG_PAGE; + npages = arena_mapbits_unallocated_size_get(chunk, + pageind) >> LG_PAGE; assert(pageind + npages <= chunk_npages); - if (mapelm->bits & CHUNK_MAP_DIRTY) { + if (arena_mapbits_dirty_get(chunk, pageind)) { size_t i; arena_avail_tree_remove( &arena->runs_avail_dirty, mapelm); - mapelm->bits = (npages << LG_PAGE) | - flag_unzeroed | CHUNK_MAP_LARGE | - CHUNK_MAP_ALLOCATED; + arena_mapbits_large_set(chunk, pageind, + (npages << LG_PAGE), flag_unzeroed); /* * Update internal elements in the page map, so * that CHUNK_MAP_UNZEROED is properly set. */ for (i = 1; i < npages - 1; i++) { - chunk->map[pageind+i-map_bias].bits = - flag_unzeroed; + arena_mapbits_unzeroed_set(chunk, + pageind+i, flag_unzeroed); } if (npages > 1) { - chunk->map[ - pageind+npages-1-map_bias].bits = - flag_unzeroed | CHUNK_MAP_LARGE | - CHUNK_MAP_ALLOCATED; + arena_mapbits_large_set(chunk, + pageind+npages-1, 0, flag_unzeroed); } if (config_stats) { /* * Update stats_cactive if nactive is * crossing a chunk multiple. */ size_t cactive_diff = CHUNK_CEILING((arena->nactive + npages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << LG_PAGE); if (cactive_diff != 0) stats_cactive_add(cactive_diff); } arena->nactive += npages; /* Append to list for later processing. */ ql_elm_new(mapelm, u.ql_link); ql_tail_insert(&mapelms, mapelm, u.ql_link); } pageind += npages; } else { /* Skip allocated run. */ - if (mapelm->bits & CHUNK_MAP_LARGE) - pageind += mapelm->bits >> LG_PAGE; + if (arena_mapbits_large_get(chunk, pageind)) + pageind += arena_mapbits_large_size_get(chunk, + pageind) >> LG_PAGE; else { + size_t binind; + arena_bin_info_t *bin_info; arena_run_t *run = (arena_run_t *)((uintptr_t) chunk + (uintptr_t)(pageind << LG_PAGE)); - assert((mapelm->bits >> LG_PAGE) == 0); - size_t binind = arena_bin_index(arena, - run->bin); - arena_bin_info_t *bin_info = - &arena_bin_info[binind]; + assert(arena_mapbits_small_runind_get(chunk, + pageind) == 0); + binind = arena_bin_index(arena, run->bin); + bin_info = &arena_bin_info[binind]; pageind += bin_info->run_size >> LG_PAGE; } } } assert(pageind == chunk_npages); if (config_debug) ndirty = chunk->ndirty; if (config_stats) arena->stats.purged += chunk->ndirty; arena->ndirty -= chunk->ndirty; chunk->ndirty = 0; ql_remove(&arena->chunks_dirty, chunk, link_dirty); chunk->dirtied = false; malloc_mutex_unlock(&arena->lock); if (config_stats) nmadvise = 0; ql_foreach(mapelm, &mapelms, u.ql_link) { size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / sizeof(arena_chunk_map_t)) + map_bias; - size_t npages = mapelm->bits >> LG_PAGE; + size_t npages = arena_mapbits_large_size_get(chunk, pageind) >> + LG_PAGE; assert(pageind + npages <= chunk_npages); assert(ndirty >= npages); if (config_debug) ndirty -= npages; pages_purge((void *)((uintptr_t)chunk + (pageind << LG_PAGE)), (npages << LG_PAGE)); if (config_stats) nmadvise++; } assert(ndirty == 0); malloc_mutex_lock(&arena->lock); if (config_stats) arena->stats.nmadvise += nmadvise; /* Deallocate runs. */ for (mapelm = ql_first(&mapelms); mapelm != NULL; mapelm = ql_first(&mapelms)) { size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / sizeof(arena_chunk_map_t)) + map_bias; arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind << LG_PAGE)); ql_remove(&mapelms, mapelm, u.ql_link); arena_run_dalloc(arena, run, false); } } static void arena_purge(arena_t *arena, bool all) { arena_chunk_t *chunk; size_t npurgatory; if (config_debug) { size_t ndirty = 0; ql_foreach(chunk, &arena->chunks_dirty, link_dirty) { assert(chunk->dirtied); ndirty += chunk->ndirty; } assert(ndirty == arena->ndirty); } assert(arena->ndirty > arena->npurgatory || all); assert(arena->ndirty - arena->npurgatory > chunk_npages || all); assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - arena->npurgatory) || all); if (config_stats) arena->stats.npurge++; /* * Compute the minimum number of pages that this thread should try to * purge, and add the result to arena->npurgatory. This will keep * multiple threads from racing to reduce ndirty below the threshold. */ npurgatory = arena->ndirty - arena->npurgatory; if (all == false) { assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult); npurgatory -= arena->nactive >> opt_lg_dirty_mult; } arena->npurgatory += npurgatory; while (npurgatory > 0) { /* Get next chunk with dirty pages. */ chunk = ql_first(&arena->chunks_dirty); if (chunk == NULL) { /* * This thread was unable to purge as many pages as * originally intended, due to races with other threads * that either did some of the purging work, or re-used * dirty pages. */ arena->npurgatory -= npurgatory; return; } while (chunk->ndirty == 0) { ql_remove(&arena->chunks_dirty, chunk, link_dirty); chunk->dirtied = false; chunk = ql_first(&arena->chunks_dirty); if (chunk == NULL) { /* Same logic as for above. */ arena->npurgatory -= npurgatory; return; } } if (chunk->ndirty > npurgatory) { /* * This thread will, at a minimum, purge all the dirty * pages in chunk, so set npurgatory to reflect this * thread's commitment to purge the pages. This tends * to reduce the chances of the following scenario: * * 1) This thread sets arena->npurgatory such that * (arena->ndirty - arena->npurgatory) is at the * threshold. * 2) This thread drops arena->lock. * 3) Another thread causes one or more pages to be * dirtied, and immediately determines that it must * purge dirty pages. * * If this scenario *does* play out, that's okay, * because all of the purging work being done really * needs to happen. */ arena->npurgatory += chunk->ndirty - npurgatory; npurgatory = chunk->ndirty; } arena->npurgatory -= chunk->ndirty; npurgatory -= chunk->ndirty; arena_chunk_purge(arena, chunk); } } void arena_purge_all(arena_t *arena) { malloc_mutex_lock(&arena->lock); arena_purge(arena, true); malloc_mutex_unlock(&arena->lock); } static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty) { arena_chunk_t *chunk; size_t size, run_ind, run_pages, flag_dirty; arena_avail_tree_t *runs_avail; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); assert(run_ind >= map_bias); assert(run_ind < chunk_npages); - if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) { - size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK; + if (arena_mapbits_large_get(chunk, run_ind) != 0) { + size = arena_mapbits_large_size_get(chunk, run_ind); assert(size == PAGE || - (chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits & - ~PAGE_MASK) == 0); - assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits & - CHUNK_MAP_LARGE) != 0); - assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits & - CHUNK_MAP_ALLOCATED) != 0); + arena_mapbits_large_size_get(chunk, + run_ind+(size>>LG_PAGE)-1) == 0); } else { size_t binind = arena_bin_index(arena, run->bin); arena_bin_info_t *bin_info = &arena_bin_info[binind]; size = bin_info->run_size; } run_pages = (size >> LG_PAGE); if (config_stats) { /* * Update stats_cactive if nactive is crossing a chunk * multiple. */ size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) - CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE); if (cactive_diff != 0) stats_cactive_sub(cactive_diff); } arena->nactive -= run_pages; /* * The run is dirty if the caller claims to have dirtied it, as well as * if it was already dirty before being allocated. */ - if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0) + if (arena_mapbits_dirty_get(chunk, run_ind) != 0) dirty = true; flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; runs_avail = dirty ? &arena->runs_avail_dirty : &arena->runs_avail_clean; /* Mark pages as unallocated in the chunk map. */ if (dirty) { - chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY; - chunk->map[run_ind+run_pages-1-map_bias].bits = size | - CHUNK_MAP_DIRTY; + arena_mapbits_unallocated_set(chunk, run_ind, size, + CHUNK_MAP_DIRTY); + arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, + CHUNK_MAP_DIRTY); chunk->ndirty += run_pages; arena->ndirty += run_pages; } else { - chunk->map[run_ind-map_bias].bits = size | - (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED); - chunk->map[run_ind+run_pages-1-map_bias].bits = size | - (chunk->map[run_ind+run_pages-1-map_bias].bits & - CHUNK_MAP_UNZEROED); + arena_mapbits_unallocated_set(chunk, run_ind, size, + arena_mapbits_unzeroed_get(chunk, run_ind)); + arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, + arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); } /* Try to coalesce forward. */ if (run_ind + run_pages < chunk_npages && - (chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED) - == 0 && (chunk->map[run_ind+run_pages-map_bias].bits & - CHUNK_MAP_DIRTY) == flag_dirty) { - size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits & - ~PAGE_MASK; + arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && + arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) { + size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, + run_ind+run_pages); size_t nrun_pages = nrun_size >> LG_PAGE; /* * Remove successor from runs_avail; the coalesced run is * inserted later. */ - assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits - & ~PAGE_MASK) == nrun_size); - assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits - & CHUNK_MAP_ALLOCATED) == 0); - assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits - & CHUNK_MAP_DIRTY) == flag_dirty); + assert(arena_mapbits_unallocated_size_get(chunk, + run_ind+run_pages+nrun_pages-1) == nrun_size); + assert(arena_mapbits_dirty_get(chunk, + run_ind+run_pages+nrun_pages-1) == flag_dirty); arena_avail_tree_remove(runs_avail, - &chunk->map[run_ind+run_pages-map_bias]); + arena_mapp_get(chunk, run_ind+run_pages)); size += nrun_size; run_pages += nrun_pages; - chunk->map[run_ind-map_bias].bits = size | - (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK); - chunk->map[run_ind+run_pages-1-map_bias].bits = size | - (chunk->map[run_ind+run_pages-1-map_bias].bits & - CHUNK_MAP_FLAGS_MASK); + arena_mapbits_unallocated_size_set(chunk, run_ind, size); + arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, + size); } /* Try to coalesce backward. */ - if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits & - CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits & - CHUNK_MAP_DIRTY) == flag_dirty) { - size_t prun_size = chunk->map[run_ind-1-map_bias].bits & - ~PAGE_MASK; + if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1) + == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) { + size_t prun_size = arena_mapbits_unallocated_size_get(chunk, + run_ind-1); size_t prun_pages = prun_size >> LG_PAGE; run_ind -= prun_pages; /* * Remove predecessor from runs_avail; the coalesced run is * inserted later. */ - assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) - == prun_size); - assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED) - == 0); - assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) - == flag_dirty); - arena_avail_tree_remove(runs_avail, - &chunk->map[run_ind-map_bias]); + assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == + prun_size); + assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); + arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, + run_ind)); size += prun_size; run_pages += prun_pages; - chunk->map[run_ind-map_bias].bits = size | - (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK); - chunk->map[run_ind+run_pages-1-map_bias].bits = size | - (chunk->map[run_ind+run_pages-1-map_bias].bits & - CHUNK_MAP_FLAGS_MASK); + arena_mapbits_unallocated_size_set(chunk, run_ind, size); + arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, + size); } /* Insert into runs_avail, now that coalescing is complete. */ - assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) == - (chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK)); - assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == - (chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY)); - arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]); + assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == + arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); + assert(arena_mapbits_dirty_get(chunk, run_ind) == + arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); + arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk, run_ind)); if (dirty) { /* * Insert into chunks_dirty before potentially calling * arena_chunk_dealloc(), so that chunks_dirty and * arena->ndirty are consistent. */ if (chunk->dirtied == false) { ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty); chunk->dirtied = true; } } - /* - * Deallocate chunk if it is now completely unused. The bit - * manipulation checks whether the first run is unallocated and extends - * to the end of the chunk. - */ - if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) == - arena_maxclass) + /* Deallocate chunk if it is now completely unused. */ + if (size == arena_maxclass) { + assert(run_ind == map_bias); + assert(run_pages == (arena_maxclass >> LG_PAGE)); + assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); + assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == + arena_maxclass); arena_chunk_dealloc(arena, chunk); + } /* * It is okay to do dirty page processing here even if the chunk was * deallocated above, since in that case it is the spare. Waiting * until after possible chunk deallocation to do dirty processing * allows for an old spare to be fully deallocated, thus decreasing the * chances of spuriously crossing the dirty page purging threshold. */ if (dirty) arena_maybe_purge(arena); } static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize) { size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; size_t head_npages = (oldsize - newsize) >> LG_PAGE; - size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY; + size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); assert(oldsize > newsize); /* * Update the chunk map so that arena_run_dalloc() can treat the * leading run as separately allocated. Set the last element of each * run first, in case of single-page runs. */ - assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0); - assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0); - chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty | - (chunk->map[pageind+head_npages-1-map_bias].bits & - CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; - chunk->map[pageind-map_bias].bits = (oldsize - newsize) - | flag_dirty | (chunk->map[pageind-map_bias].bits & - CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); + arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | + arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1)); + arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | + arena_mapbits_unzeroed_get(chunk, pageind)); if (config_debug) { UNUSED size_t tail_npages = newsize >> LG_PAGE; - assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] - .bits & ~PAGE_MASK) == 0); - assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] - .bits & CHUNK_MAP_DIRTY) == flag_dirty); - assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] - .bits & CHUNK_MAP_LARGE) != 0); - assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] - .bits & CHUNK_MAP_ALLOCATED) != 0); + assert(arena_mapbits_large_size_get(chunk, + pageind+head_npages+tail_npages-1) == 0); + assert(arena_mapbits_dirty_get(chunk, + pageind+head_npages+tail_npages-1) == flag_dirty); } - chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty | - (chunk->map[pageind+head_npages-map_bias].bits & - CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + arena_mapbits_large_set(chunk, pageind+head_npages, newsize, flag_dirty + | arena_mapbits_unzeroed_get(chunk, pageind+head_npages)); arena_run_dalloc(arena, run, false); } static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) { size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; size_t head_npages = newsize >> LG_PAGE; - size_t tail_npages = (oldsize - newsize) >> LG_PAGE; - size_t flag_dirty = chunk->map[pageind-map_bias].bits & - CHUNK_MAP_DIRTY; + size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); assert(oldsize > newsize); /* * Update the chunk map so that arena_run_dalloc() can treat the * trailing run as separately allocated. Set the last element of each * run first, in case of single-page runs. */ - assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0); - assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0); - chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty | - (chunk->map[pageind+head_npages-1-map_bias].bits & - CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; - chunk->map[pageind-map_bias].bits = newsize | flag_dirty | - (chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) | - CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); + arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | + arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1)); + arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | + arena_mapbits_unzeroed_get(chunk, pageind)); - assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & - ~PAGE_MASK) == 0); - assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & - CHUNK_MAP_LARGE) != 0); - assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & - CHUNK_MAP_ALLOCATED) != 0); - chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits = - flag_dirty | - (chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & - CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; - chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) | - flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits & - CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + if (config_debug) { + UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; + assert(arena_mapbits_large_size_get(chunk, + pageind+head_npages+tail_npages-1) == 0); + assert(arena_mapbits_dirty_get(chunk, + pageind+head_npages+tail_npages-1) == flag_dirty); + } + arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, + flag_dirty | arena_mapbits_unzeroed_get(chunk, + pageind+head_npages)); arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), dirty); } static arena_run_t * arena_bin_runs_first(arena_bin_t *bin) { arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs); if (mapelm != NULL) { arena_chunk_t *chunk; size_t pageind; + arena_run_t *run; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / sizeof(arena_chunk_map_t))) + map_bias; - arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + - (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) << + run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - + arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); return (run); } return (NULL); } static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) { arena_chunk_t *chunk = CHUNK_ADDR2BASE(run); size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias]; + arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); assert(arena_run_tree_search(&bin->runs, mapelm) == NULL); arena_run_tree_insert(&bin->runs, mapelm); } static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias]; + arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); assert(arena_run_tree_search(&bin->runs, mapelm) != NULL); arena_run_tree_remove(&bin->runs, mapelm); } static arena_run_t * arena_bin_nonfull_run_tryget(arena_bin_t *bin) { arena_run_t *run = arena_bin_runs_first(bin); if (run != NULL) { arena_bin_runs_remove(bin, run); if (config_stats) bin->stats.reruns++; } return (run); } static arena_run_t * arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) { arena_run_t *run; size_t binind; arena_bin_info_t *bin_info; /* Look for a usable run. */ run = arena_bin_nonfull_run_tryget(bin); if (run != NULL) return (run); /* No existing runs have any space available. */ binind = arena_bin_index(arena, bin); bin_info = &arena_bin_info[binind]; /* Allocate a new run. */ malloc_mutex_unlock(&bin->lock); /******************************/ malloc_mutex_lock(&arena->lock); - run = arena_run_alloc(arena, bin_info->run_size, false, false); + run = arena_run_alloc(arena, bin_info->run_size, false, binind, false); if (run != NULL) { bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + (uintptr_t)bin_info->bitmap_offset); /* Initialize run internals. */ + VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset - + bin_info->redzone_size); run->bin = bin; run->nextind = 0; run->nfree = bin_info->nregs; bitmap_init(bitmap, &bin_info->bitmap_info); } malloc_mutex_unlock(&arena->lock); /********************************/ malloc_mutex_lock(&bin->lock); if (run != NULL) { if (config_stats) { bin->stats.nruns++; bin->stats.curruns++; } return (run); } /* * arena_run_alloc() failed, but another thread may have made * sufficient memory available while this one dropped bin->lock above, * so search one more time. */ run = arena_bin_nonfull_run_tryget(bin); if (run != NULL) return (run); return (NULL); } /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ static void * arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) { void *ret; size_t binind; arena_bin_info_t *bin_info; arena_run_t *run; binind = arena_bin_index(arena, bin); bin_info = &arena_bin_info[binind]; bin->runcur = NULL; run = arena_bin_nonfull_run_get(arena, bin); if (bin->runcur != NULL && bin->runcur->nfree > 0) { /* * Another thread updated runcur while this one ran without the * bin lock in arena_bin_nonfull_run_get(). */ assert(bin->runcur->nfree > 0); ret = arena_run_reg_alloc(bin->runcur, bin_info); if (run != NULL) { arena_chunk_t *chunk; /* * arena_run_alloc() may have allocated run, or it may * have pulled run from the bin's run tree. Therefore * it is unsafe to make any assumptions about how run * has previously been used, and arena_bin_lower_run() * must be called, as if a region were just deallocated * from the run. */ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); if (run->nfree == bin_info->nregs) arena_dalloc_bin_run(arena, chunk, run, bin); else arena_bin_lower_run(arena, chunk, run, bin); } return (ret); } if (run == NULL) return (NULL); bin->runcur = run; assert(bin->runcur->nfree > 0); return (arena_run_reg_alloc(bin->runcur, bin_info)); } void arena_prof_accum(arena_t *arena, uint64_t accumbytes) { cassert(config_prof); if (config_prof && prof_interval != 0) { arena->prof_accumbytes += accumbytes; if (arena->prof_accumbytes >= prof_interval) { prof_idump(); arena->prof_accumbytes -= prof_interval; } } } void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; arena_bin_t *bin; arena_run_t *run; void *ptr; assert(tbin->ncached == 0); if (config_prof) { malloc_mutex_lock(&arena->lock); arena_prof_accum(arena, prof_accumbytes); malloc_mutex_unlock(&arena->lock); } bin = &arena->bins[binind]; malloc_mutex_lock(&bin->lock); for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> tbin->lg_fill_div); i < nfill; i++) { if ((run = bin->runcur) != NULL && run->nfree > 0) ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); else ptr = arena_bin_malloc_hard(arena, bin); if (ptr == NULL) break; if (config_fill && opt_junk) { arena_alloc_junk_small(ptr, &arena_bin_info[binind], true); } /* Insert such that low regions get used first. */ tbin->avail[nfill - 1 - i] = ptr; } if (config_stats) { bin->stats.allocated += i * arena_bin_info[binind].reg_size; bin->stats.nmalloc += i; bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.nfills++; tbin->tstats.nrequests = 0; } malloc_mutex_unlock(&bin->lock); tbin->ncached = i; } void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) { if (zero) { size_t redzone_size = bin_info->redzone_size; memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, redzone_size); memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, redzone_size); } else { memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, bin_info->reg_interval); } } void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) { size_t size = bin_info->reg_size; size_t redzone_size = bin_info->redzone_size; size_t i; bool error = false; for (i = 1; i <= redzone_size; i++) { unsigned byte; if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) { error = true; malloc_printf(": Corrupt redzone " "%zu byte%s before %p (size %zu), byte=%#x\n", i, (i == 1) ? "" : "s", ptr, size, byte); } } for (i = 0; i < redzone_size; i++) { unsigned byte; if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) { error = true; malloc_printf(": Corrupt redzone " "%zu byte%s after end of %p (size %zu), byte=%#x\n", i, (i == 1) ? "" : "s", ptr, size, byte); } } if (opt_abort && error) abort(); memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, bin_info->reg_interval); } void * arena_malloc_small(arena_t *arena, size_t size, bool zero) { void *ret; arena_bin_t *bin; arena_run_t *run; size_t binind; binind = SMALL_SIZE2BIN(size); assert(binind < NBINS); bin = &arena->bins[binind]; size = arena_bin_info[binind].reg_size; malloc_mutex_lock(&bin->lock); if ((run = bin->runcur) != NULL && run->nfree > 0) ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); else ret = arena_bin_malloc_hard(arena, bin); if (ret == NULL) { malloc_mutex_unlock(&bin->lock); return (NULL); } if (config_stats) { bin->stats.allocated += size; bin->stats.nmalloc++; bin->stats.nrequests++; } malloc_mutex_unlock(&bin->lock); if (config_prof && isthreaded == false) { malloc_mutex_lock(&arena->lock); arena_prof_accum(arena, size); malloc_mutex_unlock(&arena->lock); } if (zero == false) { if (config_fill) { if (opt_junk) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); } else if (opt_zero) memset(ret, 0, size); } } else { if (config_fill && opt_junk) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); } return (ret); } void * arena_malloc_large(arena_t *arena, size_t size, bool zero) { void *ret; /* Large allocation. */ size = PAGE_CEILING(size); malloc_mutex_lock(&arena->lock); - ret = (void *)arena_run_alloc(arena, size, true, zero); + ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero); if (ret == NULL) { malloc_mutex_unlock(&arena->lock); return (NULL); } if (config_stats) { arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += size; arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; } if (config_prof) arena_prof_accum(arena, size); malloc_mutex_unlock(&arena->lock); if (zero == false) { if (config_fill) { if (opt_junk) memset(ret, 0xa5, size); else if (opt_zero) memset(ret, 0, size); } } return (ret); } /* Only handles large allocations that require more than page alignment. */ void * arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) { void *ret; size_t alloc_size, leadsize, trailsize; arena_run_t *run; arena_chunk_t *chunk; assert((size & PAGE_MASK) == 0); alignment = PAGE_CEILING(alignment); alloc_size = size + alignment - PAGE; malloc_mutex_lock(&arena->lock); - run = arena_run_alloc(arena, alloc_size, true, zero); + run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero); if (run == NULL) { malloc_mutex_unlock(&arena->lock); return (NULL); } chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) - (uintptr_t)run; assert(alloc_size >= leadsize + size); trailsize = alloc_size - leadsize - size; ret = (void *)((uintptr_t)run + leadsize); if (leadsize != 0) { arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size - leadsize); } if (trailsize != 0) { arena_run_trim_tail(arena, chunk, ret, size + trailsize, size, false); } if (config_stats) { arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += size; arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; } malloc_mutex_unlock(&arena->lock); if (config_fill && zero == false) { if (opt_junk) memset(ret, 0xa5, size); else if (opt_zero) memset(ret, 0, size); } return (ret); } void arena_prof_promoted(const void *ptr, size_t size) { arena_chunk_t *chunk; size_t pageind, binind; cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); assert(isalloc(ptr, false) == PAGE); assert(isalloc(ptr, true) == PAGE); assert(size <= SMALL_MAXCLASS); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; binind = SMALL_SIZE2BIN(size); assert(binind < NBINS); - chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits & - ~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT); + arena_mapbits_large_binind_set(chunk, pageind, binind); assert(isalloc(ptr, false) == PAGE); assert(isalloc(ptr, true) == size); } static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin) { /* Dissociate run from bin. */ if (run == bin->runcur) bin->runcur = NULL; else { size_t binind = arena_bin_index(chunk->arena, bin); arena_bin_info_t *bin_info = &arena_bin_info[binind]; if (bin_info->nregs != 1) { /* * This block's conditional is necessary because if the * run only contains one region, then it never gets * inserted into the non-full runs tree. */ arena_bin_runs_remove(bin, run); } } } static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin) { size_t binind; arena_bin_info_t *bin_info; size_t npages, run_ind, past; assert(run != bin->runcur); - assert(arena_run_tree_search(&bin->runs, &chunk->map[ - (((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)-map_bias]) == NULL); + assert(arena_run_tree_search(&bin->runs, + arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)) + == NULL); binind = arena_bin_index(chunk->arena, run->bin); bin_info = &arena_bin_info[binind]; malloc_mutex_unlock(&bin->lock); /******************************/ npages = bin_info->run_size >> LG_PAGE; run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); past = (size_t)(PAGE_CEILING((uintptr_t)run + (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind * bin_info->reg_interval - bin_info->redzone_size) - (uintptr_t)chunk) >> LG_PAGE); malloc_mutex_lock(&arena->lock); /* * If the run was originally clean, and some pages were never touched, * trim the clean pages before deallocating the dirty portion of the * run. */ - if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past - - run_ind < npages) { + if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind < + npages) { /* * Trim clean pages. Convert to large run beforehand. Set the * last map element first, in case this is a one-page run. */ - chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE | - (chunk->map[run_ind+npages-1-map_bias].bits & - CHUNK_MAP_FLAGS_MASK); - chunk->map[run_ind-map_bias].bits = bin_info->run_size | - CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits & - CHUNK_MAP_FLAGS_MASK); + arena_mapbits_large_set(chunk, run_ind+npages-1, 0, + arena_mapbits_unzeroed_get(chunk, run_ind+npages-1)); + arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, + arena_mapbits_unzeroed_get(chunk, run_ind)); arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE), ((past - run_ind) << LG_PAGE), false); /* npages = past - run_ind; */ } arena_run_dalloc(arena, run, true); malloc_mutex_unlock(&arena->lock); /****************************/ malloc_mutex_lock(&bin->lock); if (config_stats) bin->stats.curruns--; } static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin) { /* * Make sure that if bin->runcur is non-NULL, it refers to the lowest * non-full run. It is okay to NULL runcur out rather than proactively * keeping it pointing at the lowest non-full run. */ if ((uintptr_t)run < (uintptr_t)bin->runcur) { /* Switch runcur. */ if (bin->runcur->nfree > 0) arena_bin_runs_insert(bin, bin->runcur); bin->runcur = run; if (config_stats) bin->stats.reruns++; } else arena_bin_runs_insert(bin, run); } void -arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, +arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_chunk_map_t *mapelm) { size_t pageind; arena_run_t *run; arena_bin_t *bin; - size_t size; + arena_bin_info_t *bin_info; + size_t size, binind; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - (mapelm->bits >> LG_PAGE)) << LG_PAGE)); + arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); bin = run->bin; - size_t binind = arena_bin_index(arena, bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; + binind = arena_ptr_small_binind_get(ptr, mapelm->bits); + bin_info = &arena_bin_info[binind]; if (config_fill || config_stats) size = bin_info->reg_size; if (config_fill && opt_junk) arena_dalloc_junk_small(ptr, bin_info); arena_run_reg_dalloc(run, ptr); if (run->nfree == bin_info->nregs) { arena_dissociate_bin_run(chunk, run, bin); arena_dalloc_bin_run(arena, chunk, run, bin); } else if (run->nfree == 1 && run != bin->runcur) arena_bin_lower_run(arena, chunk, run, bin); if (config_stats) { bin->stats.allocated -= size; bin->stats.ndalloc++; } } void +arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t pageind, arena_chunk_map_t *mapelm) +{ + arena_run_t *run; + arena_bin_t *bin; + + run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - + arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); + bin = run->bin; + malloc_mutex_lock(&bin->lock); + arena_dalloc_bin_locked(arena, chunk, ptr, mapelm); + malloc_mutex_unlock(&bin->lock); +} + +void +arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t pageind) +{ + arena_chunk_map_t *mapelm; + + if (config_debug) { + /* arena_ptr_small_binind_get() does extra sanity checking. */ + assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, + pageind)) != BININD_INVALID); + } + mapelm = arena_mapp_get(chunk, pageind); + arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm); +} +void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) { unsigned i; malloc_mutex_lock(&arena->lock); *nactive += arena->nactive; *ndirty += arena->ndirty; astats->mapped += arena->stats.mapped; astats->npurge += arena->stats.npurge; astats->nmadvise += arena->stats.nmadvise; astats->purged += arena->stats.purged; astats->allocated_large += arena->stats.allocated_large; astats->nmalloc_large += arena->stats.nmalloc_large; astats->ndalloc_large += arena->stats.ndalloc_large; astats->nrequests_large += arena->stats.nrequests_large; for (i = 0; i < nlclasses; i++) { lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; lstats[i].nrequests += arena->stats.lstats[i].nrequests; lstats[i].curruns += arena->stats.lstats[i].curruns; } malloc_mutex_unlock(&arena->lock); for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; malloc_mutex_lock(&bin->lock); bstats[i].allocated += bin->stats.allocated; bstats[i].nmalloc += bin->stats.nmalloc; bstats[i].ndalloc += bin->stats.ndalloc; bstats[i].nrequests += bin->stats.nrequests; if (config_tcache) { bstats[i].nfills += bin->stats.nfills; bstats[i].nflushes += bin->stats.nflushes; } bstats[i].nruns += bin->stats.nruns; bstats[i].reruns += bin->stats.reruns; bstats[i].curruns += bin->stats.curruns; malloc_mutex_unlock(&bin->lock); } } void -arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) +arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) { if (config_fill || config_stats) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK; + size_t size = arena_mapbits_large_size_get(chunk, pageind); if (config_fill && config_stats && opt_junk) memset(ptr, 0x5a, size); if (config_stats) { arena->stats.ndalloc_large++; arena->stats.allocated_large -= size; arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++; arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--; } } arena_run_dalloc(arena, (arena_run_t *)ptr, true); } +void +arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) +{ + + malloc_mutex_lock(&arena->lock); + arena_dalloc_large_locked(arena, chunk, ptr); + malloc_mutex_unlock(&arena->lock); +} + static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t oldsize, size_t size) { assert(size < oldsize); /* * Shrink the run, and make trailing pages available for other * allocations. */ malloc_mutex_lock(&arena->lock); arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size, true); if (config_stats) { arena->stats.ndalloc_large++; arena->stats.allocated_large -= oldsize; arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += size; arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; } malloc_mutex_unlock(&arena->lock); } static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t npages = oldsize >> LG_PAGE; size_t followsize; - assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK)); + assert(oldsize == arena_mapbits_large_size_get(chunk, pageind)); /* Try to extend the run. */ assert(size + extra > oldsize); malloc_mutex_lock(&arena->lock); if (pageind + npages < chunk_npages && - (chunk->map[pageind+npages-map_bias].bits - & CHUNK_MAP_ALLOCATED) == 0 && (followsize = - chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size - - oldsize) { + arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && + (followsize = arena_mapbits_unallocated_size_get(chunk, + pageind+npages)) >= size - oldsize) { /* * The next run is available and sufficiently large. Split the * following run, then merge the first part with the existing * allocation. */ size_t flag_dirty; size_t splitsize = (oldsize + followsize <= size + extra) ? followsize : size + extra - oldsize; arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk + - ((pageind+npages) << LG_PAGE)), splitsize, true, zero); + ((pageind+npages) << LG_PAGE)), splitsize, true, + BININD_INVALID, zero); size = oldsize + splitsize; npages = size >> LG_PAGE; /* * Mark the extended run as dirty if either portion of the run * was dirty before allocation. This is rather pedantic, * because there's not actually any sequence of events that * could cause the resulting run to be passed to * arena_run_dalloc() with the dirty argument set to false * (which is when dirty flag consistency would really matter). */ - flag_dirty = (chunk->map[pageind-map_bias].bits & - CHUNK_MAP_DIRTY) | - (chunk->map[pageind+npages-1-map_bias].bits & - CHUNK_MAP_DIRTY); - chunk->map[pageind-map_bias].bits = size | flag_dirty - | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; - chunk->map[pageind+npages-1-map_bias].bits = flag_dirty | - CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | + arena_mapbits_dirty_get(chunk, pageind+npages-1); + arena_mapbits_large_set(chunk, pageind, size, flag_dirty); + arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty); if (config_stats) { arena->stats.ndalloc_large++; arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> LG_PAGE) - - 1].ndalloc++; - arena->stats.lstats[(oldsize >> LG_PAGE) - - 1].curruns--; + arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; + arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += size; arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - - 1].nrequests++; + arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; } malloc_mutex_unlock(&arena->lock); return (false); } malloc_mutex_unlock(&arena->lock); return (true); } /* * Try to resize a large allocation, in order to avoid copying. This will * always fail if growing an object, and the following run is already in use. */ static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { size_t psize; psize = PAGE_CEILING(size + extra); if (psize == oldsize) { /* Same size class. */ if (config_fill && opt_junk && size < oldsize) { memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size); } return (false); } else { arena_chunk_t *chunk; arena_t *arena; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena = chunk->arena; if (psize < oldsize) { /* Fill before shrinking in order avoid a race. */ if (config_fill && opt_junk) { memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size); } arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, psize); return (false); } else { bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, PAGE_CEILING(size), psize - PAGE_CEILING(size), zero); if (config_fill && ret == false && zero == false && opt_zero) { memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize); } return (ret); } } } void * arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { /* * Avoid moving the allocation if the size class can be left the same. */ if (oldsize <= arena_maxclass) { if (oldsize <= SMALL_MAXCLASS) { assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size == oldsize); if ((size + extra <= SMALL_MAXCLASS && SMALL_SIZE2BIN(size + extra) == SMALL_SIZE2BIN(oldsize)) || (size <= oldsize && size + extra >= oldsize)) { if (config_fill && opt_junk && size < oldsize) { memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size); } return (ptr); } } else { assert(size <= arena_maxclass); if (size + extra > SMALL_MAXCLASS) { if (arena_ralloc_large(ptr, oldsize, size, extra, zero) == false) return (ptr); } } } /* Reallocation would require a move. */ return (NULL); } void * arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache) { void *ret; size_t copysize; /* Try to avoid moving the allocation. */ ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero); if (ret != NULL) return (ret); /* * size and oldsize are different enough that we need to move the * object. In that case, fall back to allocating new space and * copying. */ if (alignment != 0) { size_t usize = sa2u(size + extra, alignment); if (usize == 0) return (NULL); ret = ipalloc(usize, alignment, zero); } else ret = arena_malloc(NULL, size + extra, zero, try_tcache); if (ret == NULL) { if (extra == 0) return (NULL); /* Try again, this time without extra. */ if (alignment != 0) { size_t usize = sa2u(size, alignment); if (usize == 0) return (NULL); ret = ipalloc(usize, alignment, zero); } else ret = arena_malloc(NULL, size, zero, try_tcache); if (ret == NULL) return (NULL); } /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ /* * Copy at most size bytes (not size+extra), since the caller has no * expectation that the extra bytes will be reliably preserved. */ copysize = (size < oldsize) ? size : oldsize; + VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); memcpy(ret, ptr, copysize); iqalloc(ptr); return (ret); } bool arena_new(arena_t *arena, unsigned ind) { unsigned i; arena_bin_t *bin; arena->ind = ind; arena->nthreads = 0; if (malloc_mutex_init(&arena->lock)) return (true); if (config_stats) { memset(&arena->stats, 0, sizeof(arena_stats_t)); arena->stats.lstats = (malloc_large_stats_t *)base_alloc(nlclasses * sizeof(malloc_large_stats_t)); if (arena->stats.lstats == NULL) return (true); memset(arena->stats.lstats, 0, nlclasses * sizeof(malloc_large_stats_t)); if (config_tcache) ql_new(&arena->tcache_ql); } if (config_prof) arena->prof_accumbytes = 0; /* Initialize chunks. */ ql_new(&arena->chunks_dirty); arena->spare = NULL; arena->nactive = 0; arena->ndirty = 0; arena->npurgatory = 0; arena_avail_tree_new(&arena->runs_avail_clean); arena_avail_tree_new(&arena->runs_avail_dirty); /* Initialize bins. */ for (i = 0; i < NBINS; i++) { bin = &arena->bins[i]; if (malloc_mutex_init(&bin->lock)) return (true); bin->runcur = NULL; arena_run_tree_new(&bin->runs); if (config_stats) memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); } return (false); } /* * Calculate bin_info->run_size such that it meets the following constraints: * * *) bin_info->run_size >= min_run_size * *) bin_info->run_size <= arena_maxclass * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed). * *) bin_info->nregs <= RUN_MAXREGS * * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also * calculated here, since these settings are all interdependent. */ static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) { size_t pad_size; size_t try_run_size, good_run_size; uint32_t try_nregs, good_nregs; uint32_t try_hdr_size, good_hdr_size; uint32_t try_bitmap_offset, good_bitmap_offset; uint32_t try_ctx0_offset, good_ctx0_offset; uint32_t try_redzone0_offset, good_redzone0_offset; assert(min_run_size >= PAGE); assert(min_run_size <= arena_maxclass); /* * Determine redzone size based on minimum alignment and minimum * redzone size. Add padding to the end of the run if it is needed to * align the regions. The padding allows each redzone to be half the * minimum alignment; without the padding, each redzone would have to * be twice as large in order to maintain alignment. */ if (config_fill && opt_redzone) { size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1); if (align_min <= REDZONE_MINSIZE) { bin_info->redzone_size = REDZONE_MINSIZE; pad_size = 0; } else { bin_info->redzone_size = align_min >> 1; pad_size = bin_info->redzone_size; } } else { bin_info->redzone_size = 0; pad_size = 0; } bin_info->reg_interval = bin_info->reg_size + (bin_info->redzone_size << 1); /* * Calculate known-valid settings before entering the run_size * expansion loop, so that the first part of the loop always copies * valid settings. * * The do..while loop iteratively reduces the number of regions until * the run header and the regions no longer overlap. A closed formula * would be quite messy, since there is an interdependency between the * header's mask length and the number of regions. */ try_run_size = min_run_size; try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin_info->reg_interval) + 1; /* Counter-act try_nregs-- in loop. */ if (try_nregs > RUN_MAXREGS) { try_nregs = RUN_MAXREGS + 1; /* Counter-act try_nregs-- in loop. */ } do { try_nregs--; try_hdr_size = sizeof(arena_run_t); /* Pad to a long boundary. */ try_hdr_size = LONG_CEILING(try_hdr_size); try_bitmap_offset = try_hdr_size; /* Add space for bitmap. */ try_hdr_size += bitmap_size(try_nregs); if (config_prof && opt_prof && prof_promote == false) { /* Pad to a quantum boundary. */ try_hdr_size = QUANTUM_CEILING(try_hdr_size); try_ctx0_offset = try_hdr_size; /* Add space for one (prof_ctx_t *) per region. */ try_hdr_size += try_nregs * sizeof(prof_ctx_t *); } else try_ctx0_offset = 0; try_redzone0_offset = try_run_size - (try_nregs * bin_info->reg_interval) - pad_size; } while (try_hdr_size > try_redzone0_offset); /* run_size expansion loop. */ do { /* * Copy valid settings before trying more aggressive settings. */ good_run_size = try_run_size; good_nregs = try_nregs; good_hdr_size = try_hdr_size; good_bitmap_offset = try_bitmap_offset; good_ctx0_offset = try_ctx0_offset; good_redzone0_offset = try_redzone0_offset; /* Try more aggressive settings. */ try_run_size += PAGE; try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) / bin_info->reg_interval) + 1; /* Counter-act try_nregs-- in loop. */ if (try_nregs > RUN_MAXREGS) { try_nregs = RUN_MAXREGS + 1; /* Counter-act try_nregs-- in loop. */ } do { try_nregs--; try_hdr_size = sizeof(arena_run_t); /* Pad to a long boundary. */ try_hdr_size = LONG_CEILING(try_hdr_size); try_bitmap_offset = try_hdr_size; /* Add space for bitmap. */ try_hdr_size += bitmap_size(try_nregs); if (config_prof && opt_prof && prof_promote == false) { /* Pad to a quantum boundary. */ try_hdr_size = QUANTUM_CEILING(try_hdr_size); try_ctx0_offset = try_hdr_size; /* * Add space for one (prof_ctx_t *) per region. */ try_hdr_size += try_nregs * sizeof(prof_ctx_t *); } try_redzone0_offset = try_run_size - (try_nregs * bin_info->reg_interval) - pad_size; } while (try_hdr_size > try_redzone0_offset); } while (try_run_size <= arena_maxclass && try_run_size <= arena_maxclass && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) > RUN_MAX_OVRHD_RELAX && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size && try_nregs < RUN_MAXREGS); assert(good_hdr_size <= good_redzone0_offset); /* Copy final settings. */ bin_info->run_size = good_run_size; bin_info->nregs = good_nregs; bin_info->bitmap_offset = good_bitmap_offset; bin_info->ctx0_offset = good_ctx0_offset; bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size; assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs * bin_info->reg_interval) + pad_size == bin_info->run_size); return (good_run_size); } static void bin_info_init(void) { arena_bin_info_t *bin_info; size_t prev_run_size = PAGE; #define SIZE_CLASS(bin, delta, size) \ bin_info = &arena_bin_info[bin]; \ bin_info->reg_size = size; \ prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\ bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); SIZE_CLASSES #undef SIZE_CLASS } void arena_boot(void) { size_t header_size; unsigned i; /* * Compute the header size such that it is large enough to contain the * page map. The page map is biased to omit entries for the header * itself, so some iteration is necessary to compute the map bias. * * 1) Compute safe header_size and map_bias values that include enough * space for an unbiased page map. * 2) Refine map_bias based on (1) to omit the header pages in the page * map. The resulting map_bias may be one too small. * 3) Refine map_bias based on (2). The result will be >= the result * from (2), and will always be correct. */ map_bias = 0; for (i = 0; i < 3; i++) { header_size = offsetof(arena_chunk_t, map) + (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias)); map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK) != 0); } assert(map_bias > 0); arena_maxclass = chunksize - (map_bias << LG_PAGE); bin_info_init(); } void arena_prefork(arena_t *arena) { unsigned i; malloc_mutex_prefork(&arena->lock); for (i = 0; i < NBINS; i++) malloc_mutex_prefork(&arena->bins[i].lock); } void arena_postfork_parent(arena_t *arena) { unsigned i; for (i = 0; i < NBINS; i++) malloc_mutex_postfork_parent(&arena->bins[i].lock); malloc_mutex_postfork_parent(&arena->lock); } void arena_postfork_child(arena_t *arena) { unsigned i; for (i = 0; i < NBINS; i++) malloc_mutex_postfork_child(&arena->bins[i].lock); malloc_mutex_postfork_child(&arena->lock); } Index: projects/nand/contrib/jemalloc/src/chunk.c =================================================================== --- projects/nand/contrib/jemalloc/src/chunk.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/chunk.c (revision 235263) @@ -1,303 +1,314 @@ #define JEMALLOC_CHUNK_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ size_t opt_lg_chunk = LG_CHUNK_DEFAULT; malloc_mutex_t chunks_mtx; chunk_stats_t stats_chunks; /* * Trees of chunks that were previously allocated (trees differ only in node * ordering). These are used when allocating chunks, in an attempt to re-use * address space. Depending on function, different tree orderings are needed, * which is why there are two trees with the same contents. */ static extent_tree_t chunks_szad; static extent_tree_t chunks_ad; rtree_t *chunks_rtree; /* Various chunk-related settings. */ size_t chunksize; size_t chunksize_mask; /* (chunksize - 1). */ size_t chunk_npages; size_t map_bias; size_t arena_maxclass; /* Max size class for arenas. */ /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static void *chunk_recycle(size_t size, size_t alignment, bool *zero); +static void *chunk_recycle(size_t size, size_t alignment, bool base, + bool *zero); static void chunk_record(void *chunk, size_t size); /******************************************************************************/ static void * -chunk_recycle(size_t size, size_t alignment, bool *zero) +chunk_recycle(size_t size, size_t alignment, bool base, bool *zero) { void *ret; extent_node_t *node; extent_node_t key; size_t alloc_size, leadsize, trailsize; + if (base) { + /* + * This function may need to call base_node_{,de}alloc(), but + * the current chunk allocation request is on behalf of the + * base allocator. Avoid deadlock (and if that weren't an + * issue, potential for infinite recursion) by returning NULL. + */ + return (NULL); + } + alloc_size = size + alignment - chunksize; /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); key.addr = NULL; key.size = alloc_size; malloc_mutex_lock(&chunks_mtx); node = extent_tree_szad_nsearch(&chunks_szad, &key); if (node == NULL) { malloc_mutex_unlock(&chunks_mtx); return (NULL); } leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - (uintptr_t)node->addr; - assert(alloc_size >= leadsize + size); - trailsize = alloc_size - leadsize - size; + assert(node->size >= leadsize + size); + trailsize = node->size - leadsize - size; ret = (void *)((uintptr_t)node->addr + leadsize); /* Remove node from the tree. */ extent_tree_szad_remove(&chunks_szad, node); extent_tree_ad_remove(&chunks_ad, node); if (leadsize != 0) { /* Insert the leading space as a smaller chunk. */ node->size = leadsize; extent_tree_szad_insert(&chunks_szad, node); extent_tree_ad_insert(&chunks_ad, node); node = NULL; } if (trailsize != 0) { /* Insert the trailing space as a smaller chunk. */ if (node == NULL) { /* * An additional node is required, but * base_node_alloc() can cause a new base chunk to be * allocated. Drop chunks_mtx in order to avoid * deadlock, and if node allocation fails, deallocate * the result before returning an error. */ malloc_mutex_unlock(&chunks_mtx); node = base_node_alloc(); if (node == NULL) { chunk_dealloc(ret, size, true); return (NULL); } malloc_mutex_lock(&chunks_mtx); } node->addr = (void *)((uintptr_t)(ret) + size); node->size = trailsize; extent_tree_szad_insert(&chunks_szad, node); extent_tree_ad_insert(&chunks_ad, node); node = NULL; } malloc_mutex_unlock(&chunks_mtx); if (node != NULL) base_node_dealloc(node); #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED /* Pages are zeroed as a side effect of pages_purge(). */ *zero = true; #else if (*zero) { VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); } #endif return (ret); } /* * If the caller specifies (*zero == false), it is still possible to receive * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() * takes advantage of this to avoid demanding zeroed chunks, but taking * advantage of them if they are returned. */ void * chunk_alloc(size_t size, size_t alignment, bool base, bool *zero) { void *ret; assert(size != 0); assert((size & chunksize_mask) == 0); + assert(alignment != 0); assert((alignment & chunksize_mask) == 0); - ret = chunk_recycle(size, alignment, zero); + ret = chunk_recycle(size, alignment, base, zero); if (ret != NULL) goto label_return; ret = chunk_alloc_mmap(size, alignment, zero); if (ret != NULL) goto label_return; if (config_dss) { ret = chunk_alloc_dss(size, alignment, zero); if (ret != NULL) goto label_return; } /* All strategies for allocation failed. */ ret = NULL; label_return: if (config_ivsalloc && base == false && ret != NULL) { if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { chunk_dealloc(ret, size, true); return (NULL); } } if ((config_stats || config_prof) && ret != NULL) { bool gdump; malloc_mutex_lock(&chunks_mtx); if (config_stats) stats_chunks.nchunks += (size / chunksize); stats_chunks.curchunks += (size / chunksize); if (stats_chunks.curchunks > stats_chunks.highchunks) { stats_chunks.highchunks = stats_chunks.curchunks; if (config_prof) gdump = true; } else if (config_prof) gdump = false; malloc_mutex_unlock(&chunks_mtx); if (config_prof && opt_prof && opt_prof_gdump && gdump) prof_gdump(); } if (config_debug && *zero && ret != NULL) { size_t i; size_t *p = (size_t *)(uintptr_t)ret; + VALGRIND_MAKE_MEM_DEFINED(ret, size); for (i = 0; i < size / sizeof(size_t); i++) assert(p[i] == 0); } assert(CHUNK_ADDR2BASE(ret) == ret); return (ret); } static void chunk_record(void *chunk, size_t size) { extent_node_t *xnode, *node, *prev, key; pages_purge(chunk, size); - xnode = NULL; + /* + * Allocate a node before acquiring chunks_mtx even though it might not + * be needed, because base_node_alloc() may cause a new base chunk to + * be allocated, which could cause deadlock if chunks_mtx were already + * held. + */ + xnode = base_node_alloc(); + malloc_mutex_lock(&chunks_mtx); - while (true) { - key.addr = (void *)((uintptr_t)chunk + size); - node = extent_tree_ad_nsearch(&chunks_ad, &key); - /* Try to coalesce forward. */ - if (node != NULL && node->addr == key.addr) { + key.addr = (void *)((uintptr_t)chunk + size); + node = extent_tree_ad_nsearch(&chunks_ad, &key); + /* Try to coalesce forward. */ + if (node != NULL && node->addr == key.addr) { + /* + * Coalesce chunk with the following address range. This does + * not change the position within chunks_ad, so only + * remove/insert from/into chunks_szad. + */ + extent_tree_szad_remove(&chunks_szad, node); + node->addr = chunk; + node->size += size; + extent_tree_szad_insert(&chunks_szad, node); + if (xnode != NULL) + base_node_dealloc(xnode); + } else { + /* Coalescing forward failed, so insert a new node. */ + if (xnode == NULL) { /* - * Coalesce chunk with the following address range. - * This does not change the position within chunks_ad, - * so only remove/insert from/into chunks_szad. + * base_node_alloc() failed, which is an exceedingly + * unlikely failure. Leak chunk; its pages have + * already been purged, so this is only a virtual + * memory leak. */ - extent_tree_szad_remove(&chunks_szad, node); - node->addr = chunk; - node->size += size; - extent_tree_szad_insert(&chunks_szad, node); - break; - } else if (xnode == NULL) { - /* - * It is possible that base_node_alloc() will cause a - * new base chunk to be allocated, so take care not to - * deadlock on chunks_mtx, and recover if another thread - * deallocates an adjacent chunk while this one is busy - * allocating xnode. - */ malloc_mutex_unlock(&chunks_mtx); - xnode = base_node_alloc(); - if (xnode == NULL) - return; - malloc_mutex_lock(&chunks_mtx); - } else { - /* Coalescing forward failed, so insert a new node. */ - node = xnode; - xnode = NULL; - node->addr = chunk; - node->size = size; - extent_tree_ad_insert(&chunks_ad, node); - extent_tree_szad_insert(&chunks_szad, node); - break; + return; } + node = xnode; + node->addr = chunk; + node->size = size; + extent_tree_ad_insert(&chunks_ad, node); + extent_tree_szad_insert(&chunks_szad, node); } - /* Discard xnode if it ended up unused due to a race. */ - if (xnode != NULL) - base_node_dealloc(xnode); /* Try to coalesce backward. */ prev = extent_tree_ad_prev(&chunks_ad, node); if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == chunk) { /* * Coalesce chunk with the previous address range. This does * not change the position within chunks_ad, so only * remove/insert node from/into chunks_szad. */ extent_tree_szad_remove(&chunks_szad, prev); extent_tree_ad_remove(&chunks_ad, prev); extent_tree_szad_remove(&chunks_szad, node); node->addr = prev->addr; node->size += prev->size; extent_tree_szad_insert(&chunks_szad, node); base_node_dealloc(prev); } malloc_mutex_unlock(&chunks_mtx); } void chunk_dealloc(void *chunk, size_t size, bool unmap) { assert(chunk != NULL); assert(CHUNK_ADDR2BASE(chunk) == chunk); assert(size != 0); assert((size & chunksize_mask) == 0); if (config_ivsalloc) rtree_set(chunks_rtree, (uintptr_t)chunk, NULL); if (config_stats || config_prof) { malloc_mutex_lock(&chunks_mtx); stats_chunks.curchunks -= (size / chunksize); malloc_mutex_unlock(&chunks_mtx); } if (unmap) { if ((config_dss && chunk_in_dss(chunk)) || chunk_dealloc_mmap(chunk, size)) chunk_record(chunk, size); } } bool chunk_boot(void) { /* Set variables according to the value of opt_lg_chunk. */ chunksize = (ZU(1) << opt_lg_chunk); assert(chunksize >= PAGE); chunksize_mask = chunksize - 1; chunk_npages = (chunksize >> LG_PAGE); if (config_stats || config_prof) { if (malloc_mutex_init(&chunks_mtx)) return (true); memset(&stats_chunks, 0, sizeof(chunk_stats_t)); } if (config_dss && chunk_dss_boot()) return (true); extent_tree_szad_new(&chunks_szad); extent_tree_ad_new(&chunks_ad); if (config_ivsalloc) { chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk); if (chunks_rtree == NULL) return (true); } return (false); } Index: projects/nand/contrib/jemalloc/src/chunk_mmap.c =================================================================== --- projects/nand/contrib/jemalloc/src/chunk_mmap.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/chunk_mmap.c (revision 235263) @@ -1,177 +1,202 @@ #define JEMALLOC_CHUNK_MMAP_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static void *pages_map(void *addr, size_t size); static void pages_unmap(void *addr, size_t size); static void *chunk_alloc_mmap_slow(size_t size, size_t alignment, - bool unaligned, bool *zero); + bool *zero); /******************************************************************************/ static void * pages_map(void *addr, size_t size) { void *ret; + assert(size != 0); + +#ifdef _WIN32 /* + * If VirtualAlloc can't allocate at the given address when one is + * given, it fails and returns NULL. + */ + ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, + PAGE_READWRITE); +#else + /* * We don't use MAP_FIXED here, because it can cause the *replacement* * of existing mappings, and we only want to create new mappings. */ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); assert(ret != NULL); if (ret == MAP_FAILED) ret = NULL; else if (addr != NULL && ret != addr) { /* * We succeeded in mapping memory, but not in the right place. */ if (munmap(ret, size) == -1) { char buf[BUFERROR_BUF]; - buferror(errno, buf, sizeof(buf)); + buferror(buf, sizeof(buf)); malloc_printf(": Error in munmap(): %s\n", buf); + buferror(buf, sizeof(buf)); + malloc_printf(": Error in " +#ifdef _WIN32 + "VirtualFree" +#else + "munmap" +#endif + "(): %s\n", buf); if (opt_abort) abort(); } } +static void * +pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) +{ + void *ret = (void *)((uintptr_t)addr + leadsize); + + assert(alloc_size >= leadsize + size); +#ifdef _WIN32 + { + void *new_addr; + + pages_unmap(addr, alloc_size); + new_addr = pages_map(ret, size); + if (new_addr == ret) + return (ret); + if (new_addr) + pages_unmap(new_addr, size); + return (NULL); + } +#else + { + size_t trailsize = alloc_size - leadsize - size; + + if (leadsize != 0) + pages_unmap(addr, leadsize); + if (trailsize != 0) + pages_unmap((void *)((uintptr_t)ret + size), trailsize); + return (ret); + } +#endif +} + void pages_purge(void *addr, size_t length) { -#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED -# define JEMALLOC_MADV_PURGE MADV_DONTNEED -#elif defined(JEMALLOC_PURGE_MADVISE_FREE) -# define JEMALLOC_MADV_PURGE MADV_FREE +#ifdef _WIN32 + VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE); #else -# error "No method defined for purging unused dirty pages." -#endif +# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED +# define JEMALLOC_MADV_PURGE MADV_DONTNEED +# elif defined(JEMALLOC_PURGE_MADVISE_FREE) +# define JEMALLOC_MADV_PURGE MADV_FREE +# else +# error "No method defined for purging unused dirty pages." +# endif madvise(addr, length, JEMALLOC_MADV_PURGE); +#endif } static void * -chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero) +chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero) { void *ret, *pages; - size_t alloc_size, leadsize, trailsize; + size_t alloc_size, leadsize; alloc_size = size + alignment - PAGE; /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); - pages = pages_map(NULL, alloc_size); - if (pages == NULL) - return (NULL); - leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - - (uintptr_t)pages; - assert(alloc_size >= leadsize + size); - trailsize = alloc_size - leadsize - size; - ret = (void *)((uintptr_t)pages + leadsize); - if (leadsize != 0) { - /* Note that mmap() returned an unaligned mapping. */ - unaligned = true; - pages_unmap(pages, leadsize); - } - if (trailsize != 0) - pages_unmap((void *)((uintptr_t)ret + size), trailsize); + do { + pages = pages_map(NULL, alloc_size); + if (pages == NULL) + return (NULL); + leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - + (uintptr_t)pages; + ret = pages_trim(pages, alloc_size, leadsize, size); + } while (ret == NULL); assert(ret != NULL); *zero = true; return (ret); } void * chunk_alloc_mmap(size_t size, size_t alignment, bool *zero) { void *ret; size_t offset; /* * Ideally, there would be a way to specify alignment to mmap() (like * NetBSD has), but in the absence of such a feature, we have to work * hard to efficiently create aligned mappings. The reliable, but * slow method is to create a mapping that is over-sized, then trim the - * excess. However, that always results in at least one call to + * excess. However, that always results in one or two calls to * pages_unmap(). * - * A more optimistic approach is to try mapping precisely the right - * amount, then try to append another mapping if alignment is off. In - * practice, this works out well as long as the application is not - * interleaving mappings via direct mmap() calls. If we do run into a - * situation where there is an interleaved mapping and we are unable to - * extend an unaligned mapping, our best option is to switch to the - * slow method until mmap() returns another aligned mapping. This will - * tend to leave a gap in the memory map that is too small to cause - * later problems for the optimistic method. - * - * Another possible confounding factor is address space layout - * randomization (ASLR), which causes mmap(2) to disregard the - * requested address. As such, repeatedly trying to extend unaligned - * mappings could result in an infinite loop, so if extension fails, - * immediately fall back to the reliable method of over-allocation - * followed by trimming. + * Optimistically try mapping precisely the right amount before falling + * back to the slow method, with the expectation that the optimistic + * approach works most of the time. */ + assert(alignment != 0); + assert((alignment & chunksize_mask) == 0); + ret = pages_map(NULL, size); if (ret == NULL) return (NULL); - offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); if (offset != 0) { - /* Try to extend chunk boundary. */ - if (pages_map((void *)((uintptr_t)ret + size), chunksize - - offset) == NULL) { - /* - * Extension failed. Clean up, then fall back to the - * reliable-but-expensive method. - */ - pages_unmap(ret, size); - return (chunk_alloc_mmap_slow(size, alignment, true, - zero)); - } else { - /* Clean up unneeded leading space. */ - pages_unmap(ret, chunksize - offset); - ret = (void *)((uintptr_t)ret + (chunksize - offset)); - } + pages_unmap(ret, size); + return (chunk_alloc_mmap_slow(size, alignment, zero)); } assert(ret != NULL); *zero = true; return (ret); } bool chunk_dealloc_mmap(void *chunk, size_t size) { if (config_munmap) pages_unmap(chunk, size); return (config_munmap == false); } Index: projects/nand/contrib/jemalloc/src/ctl.c =================================================================== --- projects/nand/contrib/jemalloc/src/ctl.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/ctl.c (revision 235263) @@ -1,1388 +1,1418 @@ #define JEMALLOC_CTL_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ /* * ctl_mtx protects the following: * - ctl_stats.* * - opt_prof_active */ static malloc_mutex_t ctl_mtx; static bool ctl_initialized; static uint64_t ctl_epoch; static ctl_stats_t ctl_stats; /******************************************************************************/ +/* Helpers for named and indexed nodes. */ + +static inline const ctl_named_node_t * +ctl_named_node(const ctl_node_t *node) +{ + + return ((node->named) ? (const ctl_named_node_t *)node : NULL); +} + +static inline const ctl_named_node_t * +ctl_named_children(const ctl_named_node_t *node, int index) +{ + const ctl_named_node_t *children = ctl_named_node(node->children); + + return (children ? &children[index] : NULL); +} + +static inline const ctl_indexed_node_t * +ctl_indexed_node(const ctl_node_t *node) +{ + + return ((node->named == false) ? (const ctl_indexed_node_t *)node : + NULL); +} + +/******************************************************************************/ /* Function prototypes for non-inline static functions. */ #define CTL_PROTO(n) \ static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ size_t *oldlenp, void *newp, size_t newlen); #define INDEX_PROTO(n) \ -const ctl_node_t *n##_index(const size_t *mib, size_t miblen, \ +const ctl_named_node_t *n##_index(const size_t *mib, size_t miblen, \ size_t i); static bool ctl_arena_init(ctl_arena_stats_t *astats); static void ctl_arena_clear(ctl_arena_stats_t *astats); static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena); static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats); static void ctl_arena_refresh(arena_t *arena, unsigned i); static void ctl_refresh(void); static bool ctl_init(void); static int ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, size_t *depthp); CTL_PROTO(version) CTL_PROTO(epoch) CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_flush) CTL_PROTO(thread_arena) CTL_PROTO(thread_allocated) CTL_PROTO(thread_allocatedp) CTL_PROTO(thread_deallocated) CTL_PROTO(thread_deallocatedp) CTL_PROTO(config_debug) CTL_PROTO(config_dss) CTL_PROTO(config_fill) CTL_PROTO(config_lazy_lock) +CTL_PROTO(config_mremap) CTL_PROTO(config_munmap) CTL_PROTO(config_prof) CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libunwind) CTL_PROTO(config_stats) CTL_PROTO(config_tcache) CTL_PROTO(config_tls) CTL_PROTO(config_utrace) CTL_PROTO(config_valgrind) CTL_PROTO(config_xmalloc) CTL_PROTO(opt_abort) CTL_PROTO(opt_lg_chunk) CTL_PROTO(opt_narenas) CTL_PROTO(opt_lg_dirty_mult) CTL_PROTO(opt_stats_print) CTL_PROTO(opt_junk) CTL_PROTO(opt_zero) CTL_PROTO(opt_quarantine) CTL_PROTO(opt_redzone) CTL_PROTO(opt_utrace) CTL_PROTO(opt_valgrind) CTL_PROTO(opt_xmalloc) CTL_PROTO(opt_tcache) CTL_PROTO(opt_lg_tcache_max) CTL_PROTO(opt_prof) CTL_PROTO(opt_prof_prefix) CTL_PROTO(opt_prof_active) CTL_PROTO(opt_lg_prof_sample) CTL_PROTO(opt_lg_prof_interval) CTL_PROTO(opt_prof_gdump) CTL_PROTO(opt_prof_final) CTL_PROTO(opt_prof_leak) CTL_PROTO(opt_prof_accum) CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_nregs) CTL_PROTO(arenas_bin_i_run_size) INDEX_PROTO(arenas_bin_i) CTL_PROTO(arenas_lrun_i_size) INDEX_PROTO(arenas_lrun_i) CTL_PROTO(arenas_narenas) CTL_PROTO(arenas_initialized) CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_page) CTL_PROTO(arenas_tcache_max) CTL_PROTO(arenas_nbins) CTL_PROTO(arenas_nhbins) CTL_PROTO(arenas_nlruns) CTL_PROTO(arenas_purge) CTL_PROTO(prof_active) CTL_PROTO(prof_dump) CTL_PROTO(prof_interval) CTL_PROTO(stats_chunks_current) CTL_PROTO(stats_chunks_total) CTL_PROTO(stats_chunks_high) CTL_PROTO(stats_huge_allocated) CTL_PROTO(stats_huge_nmalloc) CTL_PROTO(stats_huge_ndalloc) CTL_PROTO(stats_arenas_i_small_allocated) CTL_PROTO(stats_arenas_i_small_nmalloc) CTL_PROTO(stats_arenas_i_small_ndalloc) CTL_PROTO(stats_arenas_i_small_nrequests) CTL_PROTO(stats_arenas_i_large_allocated) CTL_PROTO(stats_arenas_i_large_nmalloc) CTL_PROTO(stats_arenas_i_large_ndalloc) CTL_PROTO(stats_arenas_i_large_nrequests) CTL_PROTO(stats_arenas_i_bins_j_allocated) CTL_PROTO(stats_arenas_i_bins_j_nmalloc) CTL_PROTO(stats_arenas_i_bins_j_ndalloc) CTL_PROTO(stats_arenas_i_bins_j_nrequests) CTL_PROTO(stats_arenas_i_bins_j_nfills) CTL_PROTO(stats_arenas_i_bins_j_nflushes) CTL_PROTO(stats_arenas_i_bins_j_nruns) CTL_PROTO(stats_arenas_i_bins_j_nreruns) CTL_PROTO(stats_arenas_i_bins_j_curruns) INDEX_PROTO(stats_arenas_i_bins_j) CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) CTL_PROTO(stats_arenas_i_lruns_j_nrequests) CTL_PROTO(stats_arenas_i_lruns_j_curruns) INDEX_PROTO(stats_arenas_i_lruns_j) CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pdirty) CTL_PROTO(stats_arenas_i_mapped) CTL_PROTO(stats_arenas_i_npurge) CTL_PROTO(stats_arenas_i_nmadvise) CTL_PROTO(stats_arenas_i_purged) INDEX_PROTO(stats_arenas_i) CTL_PROTO(stats_cactive) CTL_PROTO(stats_allocated) CTL_PROTO(stats_active) CTL_PROTO(stats_mapped) /******************************************************************************/ /* mallctl tree. */ /* Maximum tree depth. */ #define CTL_MAX_DEPTH 6 -#define NAME(n) true, {.named = {n -#define CHILD(c) sizeof(c##_node) / sizeof(ctl_node_t), c##_node}}, NULL -#define CTL(c) 0, NULL}}, c##_ctl +#define NAME(n) {true}, n +#define CHILD(t, c) \ + sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ + (ctl_node_t *)c##_node, \ + NULL +#define CTL(c) 0, NULL, c##_ctl /* * Only handles internal indexed nodes, since there are currently no external * ones. */ -#define INDEX(i) false, {.indexed = {i##_index}}, NULL +#define INDEX(i) {false}, i##_index -static const ctl_node_t tcache_node[] = { +static const ctl_named_node_t tcache_node[] = { {NAME("enabled"), CTL(thread_tcache_enabled)}, {NAME("flush"), CTL(thread_tcache_flush)} }; -static const ctl_node_t thread_node[] = { +static const ctl_named_node_t thread_node[] = { {NAME("arena"), CTL(thread_arena)}, {NAME("allocated"), CTL(thread_allocated)}, {NAME("allocatedp"), CTL(thread_allocatedp)}, {NAME("deallocated"), CTL(thread_deallocated)}, {NAME("deallocatedp"), CTL(thread_deallocatedp)}, - {NAME("tcache"), CHILD(tcache)} + {NAME("tcache"), CHILD(named, tcache)} }; -static const ctl_node_t config_node[] = { +static const ctl_named_node_t config_node[] = { {NAME("debug"), CTL(config_debug)}, {NAME("dss"), CTL(config_dss)}, {NAME("fill"), CTL(config_fill)}, {NAME("lazy_lock"), CTL(config_lazy_lock)}, + {NAME("mremap"), CTL(config_mremap)}, {NAME("munmap"), CTL(config_munmap)}, {NAME("prof"), CTL(config_prof)}, {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, {NAME("stats"), CTL(config_stats)}, {NAME("tcache"), CTL(config_tcache)}, {NAME("tls"), CTL(config_tls)}, {NAME("utrace"), CTL(config_utrace)}, {NAME("valgrind"), CTL(config_valgrind)}, {NAME("xmalloc"), CTL(config_xmalloc)} }; -static const ctl_node_t opt_node[] = { +static const ctl_named_node_t opt_node[] = { {NAME("abort"), CTL(opt_abort)}, {NAME("lg_chunk"), CTL(opt_lg_chunk)}, {NAME("narenas"), CTL(opt_narenas)}, {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, {NAME("stats_print"), CTL(opt_stats_print)}, {NAME("junk"), CTL(opt_junk)}, {NAME("zero"), CTL(opt_zero)}, {NAME("quarantine"), CTL(opt_quarantine)}, {NAME("redzone"), CTL(opt_redzone)}, {NAME("utrace"), CTL(opt_utrace)}, {NAME("valgrind"), CTL(opt_valgrind)}, {NAME("xmalloc"), CTL(opt_xmalloc)}, {NAME("tcache"), CTL(opt_tcache)}, {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, {NAME("prof"), CTL(opt_prof)}, {NAME("prof_prefix"), CTL(opt_prof_prefix)}, {NAME("prof_active"), CTL(opt_prof_active)}, {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, {NAME("prof_gdump"), CTL(opt_prof_gdump)}, {NAME("prof_final"), CTL(opt_prof_final)}, {NAME("prof_leak"), CTL(opt_prof_leak)}, {NAME("prof_accum"), CTL(opt_prof_accum)} }; -static const ctl_node_t arenas_bin_i_node[] = { +static const ctl_named_node_t arenas_bin_i_node[] = { {NAME("size"), CTL(arenas_bin_i_size)}, {NAME("nregs"), CTL(arenas_bin_i_nregs)}, {NAME("run_size"), CTL(arenas_bin_i_run_size)} }; -static const ctl_node_t super_arenas_bin_i_node[] = { - {NAME(""), CHILD(arenas_bin_i)} +static const ctl_named_node_t super_arenas_bin_i_node[] = { + {NAME(""), CHILD(named, arenas_bin_i)} }; -static const ctl_node_t arenas_bin_node[] = { +static const ctl_indexed_node_t arenas_bin_node[] = { {INDEX(arenas_bin_i)} }; -static const ctl_node_t arenas_lrun_i_node[] = { +static const ctl_named_node_t arenas_lrun_i_node[] = { {NAME("size"), CTL(arenas_lrun_i_size)} }; -static const ctl_node_t super_arenas_lrun_i_node[] = { - {NAME(""), CHILD(arenas_lrun_i)} +static const ctl_named_node_t super_arenas_lrun_i_node[] = { + {NAME(""), CHILD(named, arenas_lrun_i)} }; -static const ctl_node_t arenas_lrun_node[] = { +static const ctl_indexed_node_t arenas_lrun_node[] = { {INDEX(arenas_lrun_i)} }; -static const ctl_node_t arenas_node[] = { +static const ctl_named_node_t arenas_node[] = { {NAME("narenas"), CTL(arenas_narenas)}, {NAME("initialized"), CTL(arenas_initialized)}, {NAME("quantum"), CTL(arenas_quantum)}, {NAME("page"), CTL(arenas_page)}, {NAME("tcache_max"), CTL(arenas_tcache_max)}, {NAME("nbins"), CTL(arenas_nbins)}, {NAME("nhbins"), CTL(arenas_nhbins)}, - {NAME("bin"), CHILD(arenas_bin)}, + {NAME("bin"), CHILD(indexed, arenas_bin)}, {NAME("nlruns"), CTL(arenas_nlruns)}, - {NAME("lrun"), CHILD(arenas_lrun)}, + {NAME("lrun"), CHILD(indexed, arenas_lrun)}, {NAME("purge"), CTL(arenas_purge)} }; -static const ctl_node_t prof_node[] = { +static const ctl_named_node_t prof_node[] = { {NAME("active"), CTL(prof_active)}, {NAME("dump"), CTL(prof_dump)}, {NAME("interval"), CTL(prof_interval)} }; -static const ctl_node_t stats_chunks_node[] = { +static const ctl_named_node_t stats_chunks_node[] = { {NAME("current"), CTL(stats_chunks_current)}, {NAME("total"), CTL(stats_chunks_total)}, {NAME("high"), CTL(stats_chunks_high)} }; -static const ctl_node_t stats_huge_node[] = { +static const ctl_named_node_t stats_huge_node[] = { {NAME("allocated"), CTL(stats_huge_allocated)}, {NAME("nmalloc"), CTL(stats_huge_nmalloc)}, {NAME("ndalloc"), CTL(stats_huge_ndalloc)} }; -static const ctl_node_t stats_arenas_i_small_node[] = { +static const ctl_named_node_t stats_arenas_i_small_node[] = { {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} }; -static const ctl_node_t stats_arenas_i_large_node[] = { +static const ctl_named_node_t stats_arenas_i_large_node[] = { {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} }; -static const ctl_node_t stats_arenas_i_bins_j_node[] = { +static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} }; -static const ctl_node_t super_stats_arenas_i_bins_j_node[] = { - {NAME(""), CHILD(stats_arenas_i_bins_j)} +static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_bins_j)} }; -static const ctl_node_t stats_arenas_i_bins_node[] = { +static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { {INDEX(stats_arenas_i_bins_j)} }; -static const ctl_node_t stats_arenas_i_lruns_j_node[] = { +static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} }; -static const ctl_node_t super_stats_arenas_i_lruns_j_node[] = { - {NAME(""), CHILD(stats_arenas_i_lruns_j)} +static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_lruns_j)} }; -static const ctl_node_t stats_arenas_i_lruns_node[] = { +static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { {INDEX(stats_arenas_i_lruns_j)} }; -static const ctl_node_t stats_arenas_i_node[] = { +static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("pactive"), CTL(stats_arenas_i_pactive)}, {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)}, {NAME("npurge"), CTL(stats_arenas_i_npurge)}, {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, {NAME("purged"), CTL(stats_arenas_i_purged)}, - {NAME("small"), CHILD(stats_arenas_i_small)}, - {NAME("large"), CHILD(stats_arenas_i_large)}, - {NAME("bins"), CHILD(stats_arenas_i_bins)}, - {NAME("lruns"), CHILD(stats_arenas_i_lruns)} + {NAME("small"), CHILD(named, stats_arenas_i_small)}, + {NAME("large"), CHILD(named, stats_arenas_i_large)}, + {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, + {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)} }; -static const ctl_node_t super_stats_arenas_i_node[] = { - {NAME(""), CHILD(stats_arenas_i)} +static const ctl_named_node_t super_stats_arenas_i_node[] = { + {NAME(""), CHILD(named, stats_arenas_i)} }; -static const ctl_node_t stats_arenas_node[] = { +static const ctl_indexed_node_t stats_arenas_node[] = { {INDEX(stats_arenas_i)} }; -static const ctl_node_t stats_node[] = { +static const ctl_named_node_t stats_node[] = { {NAME("cactive"), CTL(stats_cactive)}, {NAME("allocated"), CTL(stats_allocated)}, {NAME("active"), CTL(stats_active)}, {NAME("mapped"), CTL(stats_mapped)}, - {NAME("chunks"), CHILD(stats_chunks)}, - {NAME("huge"), CHILD(stats_huge)}, - {NAME("arenas"), CHILD(stats_arenas)} + {NAME("chunks"), CHILD(named, stats_chunks)}, + {NAME("huge"), CHILD(named, stats_huge)}, + {NAME("arenas"), CHILD(indexed, stats_arenas)} }; -static const ctl_node_t root_node[] = { +static const ctl_named_node_t root_node[] = { {NAME("version"), CTL(version)}, {NAME("epoch"), CTL(epoch)}, - {NAME("thread"), CHILD(thread)}, - {NAME("config"), CHILD(config)}, - {NAME("opt"), CHILD(opt)}, - {NAME("arenas"), CHILD(arenas)}, - {NAME("prof"), CHILD(prof)}, - {NAME("stats"), CHILD(stats)} + {NAME("thread"), CHILD(named, thread)}, + {NAME("config"), CHILD(named, config)}, + {NAME("opt"), CHILD(named, opt)}, + {NAME("arenas"), CHILD(named, arenas)}, + {NAME("prof"), CHILD(named, prof)}, + {NAME("stats"), CHILD(named, stats)} }; -static const ctl_node_t super_root_node[] = { - {NAME(""), CHILD(root)} +static const ctl_named_node_t super_root_node[] = { + {NAME(""), CHILD(named, root)} }; #undef NAME #undef CHILD #undef CTL #undef INDEX /******************************************************************************/ static bool ctl_arena_init(ctl_arena_stats_t *astats) { if (astats->lstats == NULL) { astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses * sizeof(malloc_large_stats_t)); if (astats->lstats == NULL) return (true); } return (false); } static void ctl_arena_clear(ctl_arena_stats_t *astats) { astats->pactive = 0; astats->pdirty = 0; if (config_stats) { memset(&astats->astats, 0, sizeof(arena_stats_t)); astats->allocated_small = 0; astats->nmalloc_small = 0; astats->ndalloc_small = 0; astats->nrequests_small = 0; memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); memset(astats->lstats, 0, nlclasses * sizeof(malloc_large_stats_t)); } } static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) { unsigned i; arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats); for (i = 0; i < NBINS; i++) { cstats->allocated_small += cstats->bstats[i].allocated; cstats->nmalloc_small += cstats->bstats[i].nmalloc; cstats->ndalloc_small += cstats->bstats[i].ndalloc; cstats->nrequests_small += cstats->bstats[i].nrequests; } } static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) { unsigned i; sstats->pactive += astats->pactive; sstats->pdirty += astats->pdirty; sstats->astats.mapped += astats->astats.mapped; sstats->astats.npurge += astats->astats.npurge; sstats->astats.nmadvise += astats->astats.nmadvise; sstats->astats.purged += astats->astats.purged; sstats->allocated_small += astats->allocated_small; sstats->nmalloc_small += astats->nmalloc_small; sstats->ndalloc_small += astats->ndalloc_small; sstats->nrequests_small += astats->nrequests_small; sstats->astats.allocated_large += astats->astats.allocated_large; sstats->astats.nmalloc_large += astats->astats.nmalloc_large; sstats->astats.ndalloc_large += astats->astats.ndalloc_large; sstats->astats.nrequests_large += astats->astats.nrequests_large; for (i = 0; i < nlclasses; i++) { sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; sstats->lstats[i].nrequests += astats->lstats[i].nrequests; sstats->lstats[i].curruns += astats->lstats[i].curruns; } for (i = 0; i < NBINS; i++) { sstats->bstats[i].allocated += astats->bstats[i].allocated; sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; sstats->bstats[i].nrequests += astats->bstats[i].nrequests; if (config_tcache) { sstats->bstats[i].nfills += astats->bstats[i].nfills; sstats->bstats[i].nflushes += astats->bstats[i].nflushes; } sstats->bstats[i].nruns += astats->bstats[i].nruns; sstats->bstats[i].reruns += astats->bstats[i].reruns; sstats->bstats[i].curruns += astats->bstats[i].curruns; } } static void ctl_arena_refresh(arena_t *arena, unsigned i) { ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas]; ctl_arena_clear(astats); sstats->nthreads += astats->nthreads; if (config_stats) { ctl_arena_stats_amerge(astats, arena); /* Merge into sum stats as well. */ ctl_arena_stats_smerge(sstats, astats); } else { astats->pactive += arena->nactive; astats->pdirty += arena->ndirty; /* Merge into sum stats as well. */ sstats->pactive += arena->nactive; sstats->pdirty += arena->ndirty; } } static void ctl_refresh(void) { unsigned i; - arena_t *tarenas[narenas]; + VARIABLE_ARRAY(arena_t *, tarenas, narenas); if (config_stats) { malloc_mutex_lock(&chunks_mtx); ctl_stats.chunks.current = stats_chunks.curchunks; ctl_stats.chunks.total = stats_chunks.nchunks; ctl_stats.chunks.high = stats_chunks.highchunks; malloc_mutex_unlock(&chunks_mtx); malloc_mutex_lock(&huge_mtx); ctl_stats.huge.allocated = huge_allocated; ctl_stats.huge.nmalloc = huge_nmalloc; ctl_stats.huge.ndalloc = huge_ndalloc; malloc_mutex_unlock(&huge_mtx); } /* * Clear sum stats, since they will be merged into by * ctl_arena_refresh(). */ ctl_stats.arenas[narenas].nthreads = 0; ctl_arena_clear(&ctl_stats.arenas[narenas]); malloc_mutex_lock(&arenas_lock); memcpy(tarenas, arenas, sizeof(arena_t *) * narenas); for (i = 0; i < narenas; i++) { if (arenas[i] != NULL) ctl_stats.arenas[i].nthreads = arenas[i]->nthreads; else ctl_stats.arenas[i].nthreads = 0; } malloc_mutex_unlock(&arenas_lock); for (i = 0; i < narenas; i++) { bool initialized = (tarenas[i] != NULL); ctl_stats.arenas[i].initialized = initialized; if (initialized) ctl_arena_refresh(tarenas[i], i); } if (config_stats) { ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small + ctl_stats.arenas[narenas].astats.allocated_large + ctl_stats.huge.allocated; ctl_stats.active = (ctl_stats.arenas[narenas].pactive << LG_PAGE) + ctl_stats.huge.allocated; ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); } ctl_epoch++; } static bool ctl_init(void) { bool ret; malloc_mutex_lock(&ctl_mtx); if (ctl_initialized == false) { /* * Allocate space for one extra arena stats element, which * contains summed stats across all arenas. */ ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc( (narenas + 1) * sizeof(ctl_arena_stats_t)); if (ctl_stats.arenas == NULL) { ret = true; goto label_return; } memset(ctl_stats.arenas, 0, (narenas + 1) * sizeof(ctl_arena_stats_t)); /* * Initialize all stats structures, regardless of whether they * ever get used. Lazy initialization would allow errors to * cause inconsistent state to be viewable by the application. */ if (config_stats) { unsigned i; for (i = 0; i <= narenas; i++) { if (ctl_arena_init(&ctl_stats.arenas[i])) { ret = true; goto label_return; } } } ctl_stats.arenas[narenas].initialized = true; ctl_epoch = 0; ctl_refresh(); ctl_initialized = true; } ret = false; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } static int ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, size_t *depthp) { int ret; const char *elm, *tdot, *dot; size_t elen, i, j; - const ctl_node_t *node; + const ctl_named_node_t *node; elm = name; /* Equivalent to strchrnul(). */ dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); if (elen == 0) { ret = ENOENT; goto label_return; } node = super_root_node; for (i = 0; i < *depthp; i++) { - assert(node->named); - assert(node->u.named.nchildren > 0); - if (node->u.named.children[0].named) { - const ctl_node_t *pnode = node; + assert(node); + assert(node->nchildren > 0); + if (ctl_named_node(node->children) != NULL) { + const ctl_named_node_t *pnode = node; /* Children are named. */ - for (j = 0; j < node->u.named.nchildren; j++) { - const ctl_node_t *child = - &node->u.named.children[j]; - if (strlen(child->u.named.name) == elen - && strncmp(elm, child->u.named.name, - elen) == 0) { + for (j = 0; j < node->nchildren; j++) { + const ctl_named_node_t *child = + ctl_named_children(node, j); + if (strlen(child->name) == elen && + strncmp(elm, child->name, elen) == 0) { node = child; if (nodesp != NULL) - nodesp[i] = node; + nodesp[i] = + (const ctl_node_t *)node; mibp[i] = j; break; } } if (node == pnode) { ret = ENOENT; goto label_return; } } else { uintmax_t index; - const ctl_node_t *inode; + const ctl_indexed_node_t *inode; /* Children are indexed. */ index = malloc_strtoumax(elm, NULL, 10); if (index == UINTMAX_MAX || index > SIZE_T_MAX) { ret = ENOENT; goto label_return; } - inode = &node->u.named.children[0]; - node = inode->u.indexed.index(mibp, *depthp, - (size_t)index); + inode = ctl_indexed_node(node->children); + node = inode->index(mibp, *depthp, (size_t)index); if (node == NULL) { ret = ENOENT; goto label_return; } if (nodesp != NULL) - nodesp[i] = node; + nodesp[i] = (const ctl_node_t *)node; mibp[i] = (size_t)index; } if (node->ctl != NULL) { /* Terminal node. */ if (*dot != '\0') { /* * The name contains more elements than are * in this path through the tree. */ ret = ENOENT; goto label_return; } /* Complete lookup successful. */ *depthp = i + 1; break; } /* Update elm. */ if (*dot == '\0') { /* No more elements. */ ret = ENOENT; goto label_return; } elm = &dot[1]; dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); } ret = 0; label_return: return (ret); } int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t depth; ctl_node_t const *nodes[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH]; + const ctl_named_node_t *node; if (ctl_initialized == false && ctl_init()) { ret = EAGAIN; goto label_return; } depth = CTL_MAX_DEPTH; ret = ctl_lookup(name, nodes, mib, &depth); if (ret != 0) goto label_return; - if (nodes[depth-1]->ctl == NULL) { + node = ctl_named_node(nodes[depth-1]); + if (node != NULL && node->ctl) + ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); + else { /* The name refers to a partial path through the ctl tree. */ ret = ENOENT; - goto label_return; } - ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen); label_return: return(ret); } int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) { int ret; if (ctl_initialized == false && ctl_init()) { ret = EAGAIN; goto label_return; } ret = ctl_lookup(name, NULL, mibp, miblenp); label_return: return(ret); } int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - const ctl_node_t *node; + const ctl_named_node_t *node; size_t i; if (ctl_initialized == false && ctl_init()) { ret = EAGAIN; goto label_return; } /* Iterate down the tree. */ node = super_root_node; for (i = 0; i < miblen; i++) { - if (node->u.named.children[0].named) { + assert(node); + assert(node->nchildren > 0); + if (ctl_named_node(node->children) != NULL) { /* Children are named. */ - if (node->u.named.nchildren <= mib[i]) { + if (node->nchildren <= mib[i]) { ret = ENOENT; goto label_return; } - node = &node->u.named.children[mib[i]]; + node = ctl_named_children(node, mib[i]); } else { - const ctl_node_t *inode; + const ctl_indexed_node_t *inode; /* Indexed element. */ - inode = &node->u.named.children[0]; - node = inode->u.indexed.index(mib, miblen, mib[i]); + inode = ctl_indexed_node(node->children); + node = inode->index(mib, miblen, mib[i]); if (node == NULL) { ret = ENOENT; goto label_return; } } } /* Call the ctl function. */ - if (node->ctl == NULL) { + if (node && node->ctl) + ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); + else { /* Partial MIB. */ ret = ENOENT; - goto label_return; } - ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); label_return: return(ret); } bool ctl_boot(void) { if (malloc_mutex_init(&ctl_mtx)) return (true); ctl_initialized = false; return (false); } /******************************************************************************/ /* *_ctl() functions. */ #define READONLY() do { \ if (newp != NULL || newlen != 0) { \ ret = EPERM; \ - goto label_return; \ + goto label_return; \ } \ } while (0) #define WRITEONLY() do { \ if (oldp != NULL || oldlenp != NULL) { \ ret = EPERM; \ - goto label_return; \ + goto label_return; \ } \ } while (0) -#define VOID() do { \ - READONLY(); \ - WRITEONLY(); \ -} while (0) - #define READ(v, t) do { \ if (oldp != NULL && oldlenp != NULL) { \ if (*oldlenp != sizeof(t)) { \ size_t copylen = (sizeof(t) <= *oldlenp) \ ? sizeof(t) : *oldlenp; \ memcpy(oldp, (void *)&v, copylen); \ ret = EINVAL; \ - goto label_return; \ + goto label_return; \ } else \ *(t *)oldp = v; \ } \ } while (0) #define WRITE(v, t) do { \ if (newp != NULL) { \ if (newlen != sizeof(t)) { \ ret = EINVAL; \ - goto label_return; \ + goto label_return; \ } \ v = *(t *)newp; \ } \ } while (0) /* * There's a lot of code duplication in the following macros due to limitations * in how nested cpp macros are expanded. */ #define CTL_RO_CLGEN(c, l, n, v, t) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ if ((c) == false) \ return (ENOENT); \ if (l) \ malloc_mutex_lock(&ctl_mtx); \ READONLY(); \ oldval = v; \ READ(oldval, t); \ \ ret = 0; \ -label_return: \ +label_return: \ if (l) \ malloc_mutex_unlock(&ctl_mtx); \ return (ret); \ } #define CTL_RO_CGEN(c, n, v, t) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ if ((c) == false) \ return (ENOENT); \ malloc_mutex_lock(&ctl_mtx); \ READONLY(); \ oldval = v; \ READ(oldval, t); \ \ ret = 0; \ -label_return: \ +label_return: \ malloc_mutex_unlock(&ctl_mtx); \ return (ret); \ } #define CTL_RO_GEN(n, v, t) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ malloc_mutex_lock(&ctl_mtx); \ READONLY(); \ oldval = v; \ READ(oldval, t); \ \ ret = 0; \ -label_return: \ +label_return: \ malloc_mutex_unlock(&ctl_mtx); \ return (ret); \ } /* * ctl_mtx is not acquired, under the assumption that no pertinent data will * mutate during the call. */ #define CTL_RO_NL_CGEN(c, n, v, t) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ if ((c) == false) \ return (ENOENT); \ READONLY(); \ oldval = v; \ READ(oldval, t); \ \ ret = 0; \ -label_return: \ +label_return: \ return (ret); \ } #define CTL_RO_NL_GEN(n, v, t) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ READONLY(); \ oldval = v; \ READ(oldval, t); \ \ ret = 0; \ -label_return: \ +label_return: \ return (ret); \ } #define CTL_RO_BOOL_CONFIG_GEN(n) \ static int \ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ void *newp, size_t newlen) \ { \ int ret; \ bool oldval; \ \ READONLY(); \ oldval = n; \ READ(oldval, bool); \ \ ret = 0; \ -label_return: \ +label_return: \ return (ret); \ } CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; uint64_t newval; malloc_mutex_lock(&ctl_mtx); - newval = 0; WRITE(newval, uint64_t); - if (newval != 0) + if (newp != NULL) ctl_refresh(); READ(ctl_epoch, uint64_t); ret = 0; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } static int thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (config_tcache == false) return (ENOENT); oldval = tcache_enabled_get(); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } tcache_enabled_set(*(bool *)newp); } READ(oldval, bool); label_return: ret = 0; return (ret); } static int thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (config_tcache == false) return (ENOENT); - VOID(); + READONLY(); + WRITEONLY(); tcache_flush(); ret = 0; label_return: return (ret); } static int thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned newind, oldind; newind = oldind = choose_arena(NULL)->ind; WRITE(newind, unsigned); READ(oldind, unsigned); if (newind != oldind) { arena_t *arena; if (newind >= narenas) { /* New arena index is out of range. */ ret = EFAULT; goto label_return; } /* Initialize arena if necessary. */ malloc_mutex_lock(&arenas_lock); if ((arena = arenas[newind]) == NULL && (arena = arenas_extend(newind)) == NULL) { malloc_mutex_unlock(&arenas_lock); ret = EAGAIN; goto label_return; } assert(arena == arenas[newind]); arenas[oldind]->nthreads--; arenas[newind]->nthreads++; malloc_mutex_unlock(&arenas_lock); /* Set new arena association. */ if (config_tcache) { tcache_t *tcache; if ((uintptr_t)(tcache = *tcache_tsd_get()) > (uintptr_t)TCACHE_STATE_MAX) { tcache_arena_dissociate(tcache); tcache_arena_associate(tcache, arena); } } arenas_tsd_set(&arena); } ret = 0; label_return: return (ret); } CTL_RO_NL_CGEN(config_stats, thread_allocated, thread_allocated_tsd_get()->allocated, uint64_t) CTL_RO_NL_CGEN(config_stats, thread_allocatedp, &thread_allocated_tsd_get()->allocated, uint64_t *) CTL_RO_NL_CGEN(config_stats, thread_deallocated, thread_allocated_tsd_get()->deallocated, uint64_t) CTL_RO_NL_CGEN(config_stats, thread_deallocatedp, &thread_allocated_tsd_get()->deallocated, uint64_t *) /******************************************************************************/ CTL_RO_BOOL_CONFIG_GEN(config_debug) CTL_RO_BOOL_CONFIG_GEN(config_dss) CTL_RO_BOOL_CONFIG_GEN(config_fill) CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) +CTL_RO_BOOL_CONFIG_GEN(config_mremap) CTL_RO_BOOL_CONFIG_GEN(config_munmap) CTL_RO_BOOL_CONFIG_GEN(config_prof) CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) CTL_RO_BOOL_CONFIG_GEN(config_stats) CTL_RO_BOOL_CONFIG_GEN(config_tcache) CTL_RO_BOOL_CONFIG_GEN(config_tls) CTL_RO_BOOL_CONFIG_GEN(config_utrace) CTL_RO_BOOL_CONFIG_GEN(config_valgrind) CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) /******************************************************************************/ CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool) CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool) CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */ CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) /******************************************************************************/ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) -const ctl_node_t * +const ctl_named_node_t * arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) { if (i > NBINS) return (NULL); return (super_arenas_bin_i_node); } CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t) -const ctl_node_t * +const ctl_named_node_t * arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) { if (i > nlclasses) return (NULL); return (super_arenas_lrun_i_node); } CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned) static int arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned nread, i; malloc_mutex_lock(&ctl_mtx); READONLY(); if (*oldlenp != narenas * sizeof(bool)) { ret = EINVAL; nread = (*oldlenp < narenas * sizeof(bool)) ? (*oldlenp / sizeof(bool)) : narenas; } else { ret = 0; nread = narenas; } for (i = 0; i < nread; i++) ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t) static int arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena; WRITEONLY(); arena = UINT_MAX; WRITE(arena, unsigned); if (newp != NULL && arena >= narenas) { ret = EFAULT; goto label_return; } else { - arena_t *tarenas[narenas]; + VARIABLE_ARRAY(arena_t *, tarenas, narenas); malloc_mutex_lock(&arenas_lock); memcpy(tarenas, arenas, sizeof(arena_t *) * narenas); malloc_mutex_unlock(&arenas_lock); if (arena == UINT_MAX) { unsigned i; for (i = 0; i < narenas; i++) { if (tarenas[i] != NULL) arena_purge_all(tarenas[i]); } } else { assert(arena < narenas); if (tarenas[arena] != NULL) arena_purge_all(tarenas[arena]); } } ret = 0; label_return: return (ret); } /******************************************************************************/ static int prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (config_prof == false) return (ENOENT); malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */ oldval = opt_prof_active; if (newp != NULL) { /* * The memory barriers will tend to make opt_prof_active * propagate faster on systems with weak memory ordering. */ mb_write(); WRITE(opt_prof_active, bool); mb_write(); } READ(oldval, bool); ret = 0; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } static int prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *filename = NULL; if (config_prof == false) return (ENOENT); WRITEONLY(); WRITE(filename, const char *); if (prof_mdump(filename)) { ret = EFAULT; goto label_return; } ret = 0; label_return: return (ret); } CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) /******************************************************************************/ CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current, size_t) CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t) CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t) CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t) CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, ctl_stats.arenas[mib[2]].allocated_small, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated, ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) -const ctl_node_t * +const ctl_named_node_t * stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) { if (j > NBINS) return (NULL); return (super_stats_arenas_i_bins_j_node); } CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) -const ctl_node_t * +const ctl_named_node_t * stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) { if (j > nlclasses) return (NULL); return (super_stats_arenas_i_lruns_j_node); } CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, ctl_stats.arenas[mib[2]].astats.mapped, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_purged, ctl_stats.arenas[mib[2]].astats.purged, uint64_t) -const ctl_node_t * +const ctl_named_node_t * stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) { - const ctl_node_t * ret; + const ctl_named_node_t * ret; malloc_mutex_lock(&ctl_mtx); if (ctl_stats.arenas[i].initialized == false) { ret = NULL; goto label_return; } ret = super_stats_arenas_i_node; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); } CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) Index: projects/nand/contrib/jemalloc/src/huge.c =================================================================== --- projects/nand/contrib/jemalloc/src/huge.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/huge.c (revision 235263) @@ -1,312 +1,312 @@ #define JEMALLOC_HUGE_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ uint64_t huge_nmalloc; uint64_t huge_ndalloc; size_t huge_allocated; malloc_mutex_t huge_mtx; /******************************************************************************/ /* Tree of chunks that are stand-alone huge allocations. */ static extent_tree_t huge; void * huge_malloc(size_t size, bool zero) { return (huge_palloc(size, chunksize, zero)); } void * huge_palloc(size_t size, size_t alignment, bool zero) { void *ret; size_t csize; extent_node_t *node; bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ csize = CHUNK_CEILING(size); if (csize == 0) { /* size is large enough to cause size_t wrap-around. */ return (NULL); } /* Allocate an extent node with which to track the chunk. */ node = base_node_alloc(); if (node == NULL) return (NULL); /* * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; ret = chunk_alloc(csize, alignment, false, &is_zeroed); if (ret == NULL) { base_node_dealloc(node); return (NULL); } /* Insert node into huge. */ node->addr = ret; node->size = csize; malloc_mutex_lock(&huge_mtx); extent_tree_ad_insert(&huge, node); if (config_stats) { stats_cactive_add(csize); huge_nmalloc++; huge_allocated += csize; } malloc_mutex_unlock(&huge_mtx); if (config_fill && zero == false) { if (opt_junk) memset(ret, 0xa5, csize); else if (opt_zero && is_zeroed == false) memset(ret, 0, csize); } return (ret); } void * huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) { /* * Avoid moving the allocation if the size class can be left the same. */ if (oldsize > arena_maxclass && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size) && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) { assert(CHUNK_CEILING(oldsize) == oldsize); if (config_fill && opt_junk && size < oldsize) { memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size); } return (ptr); } /* Reallocation would require a move. */ return (NULL); } void * huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero) { void *ret; size_t copysize; /* Try to avoid moving the allocation. */ ret = huge_ralloc_no_move(ptr, oldsize, size, extra); if (ret != NULL) return (ret); /* * size and oldsize are different enough that we need to use a * different size class. In that case, fall back to allocating new * space and copying. */ if (alignment > chunksize) ret = huge_palloc(size + extra, alignment, zero); else ret = huge_malloc(size + extra, zero); if (ret == NULL) { if (extra == 0) return (NULL); /* Try again, this time without extra. */ if (alignment > chunksize) ret = huge_palloc(size, alignment, zero); else ret = huge_malloc(size, zero); if (ret == NULL) return (NULL); } /* * Copy at most size bytes (not size+extra), since the caller has no * expectation that the extra bytes will be reliably preserved. */ copysize = (size < oldsize) ? size : oldsize; +#ifdef JEMALLOC_MREMAP /* * Use mremap(2) if this is a huge-->huge reallocation, and neither the * source nor the destination are in dss. */ -#ifdef JEMALLOC_MREMAP_FIXED if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false))) { size_t newsize = huge_salloc(ret); /* * Remove ptr from the tree of huge allocations before * performing the remap operation, in order to avoid the * possibility of another thread acquiring that mapping before * this one removes it from the tree. */ huge_dalloc(ptr, false); if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED, ret) == MAP_FAILED) { /* * Assuming no chunk management bugs in the allocator, * the only documented way an error can occur here is * if the application changed the map type for a * portion of the old allocation. This is firmly in * undefined behavior territory, so write a diagnostic * message, and optionally abort. */ char buf[BUFERROR_BUF]; - buferror(errno, buf, sizeof(buf)); + buferror(buf, sizeof(buf)); malloc_printf(": Error in mremap(): %s\n", buf); if (opt_abort) abort(); memcpy(ret, ptr, copysize); chunk_dealloc_mmap(ptr, oldsize); } } else #endif { memcpy(ret, ptr, copysize); iqalloc(ptr); } return (ret); } void huge_dalloc(void *ptr, bool unmap) { extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = ptr; node = extent_tree_ad_search(&huge, &key); assert(node != NULL); assert(node->addr == ptr); extent_tree_ad_remove(&huge, node); if (config_stats) { stats_cactive_sub(node->size); huge_ndalloc++; huge_allocated -= node->size; } malloc_mutex_unlock(&huge_mtx); if (unmap && config_fill && config_dss && opt_junk) memset(node->addr, 0x5a, node->size); chunk_dealloc(node->addr, node->size, unmap); base_node_dealloc(node); } size_t huge_salloc(const void *ptr) { size_t ret; extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = __DECONST(void *, ptr); node = extent_tree_ad_search(&huge, &key); assert(node != NULL); ret = node->size; malloc_mutex_unlock(&huge_mtx); return (ret); } prof_ctx_t * huge_prof_ctx_get(const void *ptr) { prof_ctx_t *ret; extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = __DECONST(void *, ptr); node = extent_tree_ad_search(&huge, &key); assert(node != NULL); ret = node->prof_ctx; malloc_mutex_unlock(&huge_mtx); return (ret); } void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) { extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = __DECONST(void *, ptr); node = extent_tree_ad_search(&huge, &key); assert(node != NULL); node->prof_ctx = ctx; malloc_mutex_unlock(&huge_mtx); } bool huge_boot(void) { /* Initialize chunks data. */ if (malloc_mutex_init(&huge_mtx)) return (true); extent_tree_ad_new(&huge); if (config_stats) { huge_nmalloc = 0; huge_ndalloc = 0; huge_allocated = 0; } return (false); } void huge_prefork(void) { malloc_mutex_prefork(&huge_mtx); } void huge_postfork_parent(void) { malloc_mutex_postfork_parent(&huge_mtx); } void huge_postfork_child(void) { malloc_mutex_postfork_child(&huge_mtx); } Index: projects/nand/contrib/jemalloc/src/jemalloc.c =================================================================== --- projects/nand/contrib/jemalloc/src/jemalloc.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/jemalloc.c (revision 235263) @@ -1,1733 +1,1729 @@ #define JEMALLOC_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ malloc_tsd_data(, arenas, arena_t *, NULL) malloc_tsd_data(, thread_allocated, thread_allocated_t, THREAD_ALLOCATED_INITIALIZER) /* Work around : */ const char *__malloc_options_1_0 = NULL; __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); /* Runtime configuration options. */ -const char *je_malloc_conf JEMALLOC_ATTR(visibility("default")); +const char *je_malloc_conf; #ifdef JEMALLOC_DEBUG bool opt_abort = true; # ifdef JEMALLOC_FILL bool opt_junk = true; # else bool opt_junk = false; # endif #else bool opt_abort = false; bool opt_junk = false; #endif size_t opt_quarantine = ZU(0); bool opt_redzone = false; bool opt_utrace = false; bool opt_valgrind = false; bool opt_xmalloc = false; bool opt_zero = false; size_t opt_narenas = 0; unsigned ncpus; malloc_mutex_t arenas_lock; arena_t **arenas; unsigned narenas; /* Set to true once the allocator has been initialized. */ static bool malloc_initialized = false; #ifdef JEMALLOC_THREADED_INIT /* Used to let the initializing thread recursively allocate. */ # define NO_INITIALIZER ((unsigned long)0) # define INITIALIZER pthread_self() # define IS_INITIALIZER (malloc_initializer == pthread_self()) static pthread_t malloc_initializer = NO_INITIALIZER; #else # define NO_INITIALIZER false # define INITIALIZER true # define IS_INITIALIZER malloc_initializer static bool malloc_initializer = NO_INITIALIZER; #endif /* Used to avoid initialization races. */ +#ifdef _WIN32 +static malloc_mutex_t init_lock; + +JEMALLOC_ATTR(constructor) +static void WINAPI +_init_init_lock(void) +{ + + malloc_mutex_init(&init_lock); +} + +#ifdef _MSC_VER +# pragma section(".CRT$XCU", read) +JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) +static const void (WINAPI *init_init_lock)(void) = _init_init_lock; +#endif + +#else static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; +#endif typedef struct { void *p; /* Input pointer (as in realloc(p, s)). */ size_t s; /* Request size. */ void *r; /* Result pointer. */ } malloc_utrace_t; #ifdef JEMALLOC_UTRACE # define UTRACE(a, b, c) do { \ if (opt_utrace) { \ malloc_utrace_t ut; \ ut.p = (a); \ ut.s = (b); \ ut.r = (c); \ utrace(&ut, sizeof(ut)); \ } \ } while (0) #else # define UTRACE(a, b, c) #endif /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static void stats_print_atexit(void); static unsigned malloc_ncpus(void); static bool malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, char const **v_p, size_t *vlen_p); static void malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, size_t vlen); static void malloc_conf_init(void); static bool malloc_init_hard(void); static int imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment); /******************************************************************************/ /* * Begin miscellaneous support functions. */ /* Create a new arena and insert it into the arenas array at index ind. */ arena_t * arenas_extend(unsigned ind) { arena_t *ret; ret = (arena_t *)base_alloc(sizeof(arena_t)); if (ret != NULL && arena_new(ret, ind) == false) { arenas[ind] = ret; return (ret); } /* Only reached if there is an OOM error. */ /* * OOM here is quite inconvenient to propagate, since dealing with it * would require a check for failure in the fast path. Instead, punt * by using arenas[0]. In practice, this is an extremely unlikely * failure. */ malloc_write(": Error initializing arena\n"); if (opt_abort) abort(); return (arenas[0]); } /* Slow path, called only by choose_arena(). */ arena_t * choose_arena_hard(void) { arena_t *ret; if (narenas > 1) { unsigned i, choose, first_null; choose = 0; first_null = narenas; malloc_mutex_lock(&arenas_lock); assert(arenas[0] != NULL); for (i = 1; i < narenas; i++) { if (arenas[i] != NULL) { /* * Choose the first arena that has the lowest * number of threads assigned to it. */ if (arenas[i]->nthreads < arenas[choose]->nthreads) choose = i; } else if (first_null == narenas) { /* * Record the index of the first uninitialized * arena, in case all extant arenas are in use. * * NB: It is possible for there to be * discontinuities in terms of initialized * versus uninitialized arenas, due to the * "thread.arena" mallctl. */ first_null = i; } } if (arenas[choose]->nthreads == 0 || first_null == narenas) { /* * Use an unloaded arena, or the least loaded arena if * all arenas are already initialized. */ ret = arenas[choose]; } else { /* Initialize a new arena. */ ret = arenas_extend(first_null); } ret->nthreads++; malloc_mutex_unlock(&arenas_lock); } else { ret = arenas[0]; malloc_mutex_lock(&arenas_lock); ret->nthreads++; malloc_mutex_unlock(&arenas_lock); } arenas_tsd_set(&ret); return (ret); } static void stats_print_atexit(void) { if (config_tcache && config_stats) { unsigned i; /* * Merge stats from extant threads. This is racy, since * individual threads do not lock when recording tcache stats * events. As a consequence, the final stats may be slightly * out of date by the time they are reported, if other threads * continue to allocate. */ for (i = 0; i < narenas; i++) { arena_t *arena = arenas[i]; if (arena != NULL) { tcache_t *tcache; /* * tcache_stats_merge() locks bins, so if any * code is introduced that acquires both arena * and bin locks in the opposite order, * deadlocks may result. */ malloc_mutex_lock(&arena->lock); ql_foreach(tcache, &arena->tcache_ql, link) { tcache_stats_merge(tcache, arena); } malloc_mutex_unlock(&arena->lock); } } } je_malloc_stats_print(NULL, NULL, NULL); } /* * End miscellaneous support functions. */ /******************************************************************************/ /* * Begin initialization functions. */ static unsigned malloc_ncpus(void) { unsigned ret; long result; +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + result = si.dwNumberOfProcessors; +#else result = sysconf(_SC_NPROCESSORS_ONLN); if (result == -1) { /* Error. */ ret = 1; } +#endif ret = (unsigned)result; return (ret); } void arenas_cleanup(void *arg) { arena_t *arena = *(arena_t **)arg; malloc_mutex_lock(&arenas_lock); arena->nthreads--; malloc_mutex_unlock(&arenas_lock); } static inline bool malloc_init(void) { if (malloc_initialized == false) return (malloc_init_hard()); return (false); } static bool malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, char const **v_p, size_t *vlen_p) { bool accept; const char *opts = *opts_p; *k_p = opts; for (accept = false; accept == false;) { switch (*opts) { case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '_': opts++; break; case ':': opts++; *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; *v_p = opts; accept = true; break; case '\0': if (opts != *opts_p) { malloc_write(": Conf string ends " "with key\n"); } return (true); default: malloc_write(": Malformed conf string\n"); return (true); } } for (accept = false; accept == false;) { switch (*opts) { case ',': opts++; /* * Look ahead one character here, because the next time * this function is called, it will assume that end of * input has been cleanly reached if no input remains, * but we have optimistically already consumed the * comma if one exists. */ if (*opts == '\0') { malloc_write(": Conf string ends " "with comma\n"); } *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; accept = true; break; case '\0': *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; accept = true; break; default: opts++; break; } } *opts_p = opts; return (false); } static void malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, size_t vlen) { malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, (int)vlen, v); } static void malloc_conf_init(void) { unsigned i; char buf[PATH_MAX + 1]; const char *opts, *k, *v; size_t klen, vlen; for (i = 0; i < 3; i++) { /* Get runtime configuration. */ switch (i) { case 0: if (je_malloc_conf != NULL) { /* * Use options that were compiled into the * program. */ opts = je_malloc_conf; } else { /* No configuration specified. */ buf[0] = '\0'; opts = buf; } break; case 1: { +#ifndef _WIN32 int linklen; const char *linkname = -#ifdef JEMALLOC_PREFIX +# ifdef JEMALLOC_PREFIX "/etc/"JEMALLOC_PREFIX"malloc.conf" -#else +# else "/etc/malloc.conf" -#endif +# endif ; if ((linklen = readlink(linkname, buf, sizeof(buf) - 1)) != -1) { /* * Use the contents of the "/etc/malloc.conf" * symbolic link's name. */ buf[linklen] = '\0'; opts = buf; - } else { + } else +#endif + { /* No configuration specified. */ buf[0] = '\0'; opts = buf; } break; } case 2: { const char *envname = #ifdef JEMALLOC_PREFIX JEMALLOC_CPREFIX"MALLOC_CONF" #else "MALLOC_CONF" #endif ; if (issetugid() == 0 && (opts = getenv(envname)) != NULL) { /* * Do nothing; opts is already initialized to * the value of the MALLOC_CONF environment * variable. */ } else { /* No configuration specified. */ buf[0] = '\0'; opts = buf; } break; } default: /* NOTREACHED */ assert(false); buf[0] = '\0'; opts = buf; } while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, &vlen) == false) { #define CONF_HANDLE_BOOL_HIT(o, n, hit) \ if (sizeof(n)-1 == klen && strncmp(n, k, \ klen) == 0) { \ if (strncmp("true", v, vlen) == 0 && \ vlen == sizeof("true")-1) \ o = true; \ else if (strncmp("false", v, vlen) == \ 0 && vlen == sizeof("false")-1) \ o = false; \ else { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } \ hit = true; \ } else \ hit = false; #define CONF_HANDLE_BOOL(o, n) { \ bool hit; \ CONF_HANDLE_BOOL_HIT(o, n, hit); \ if (hit) \ continue; \ } #define CONF_HANDLE_SIZE_T(o, n, min, max) \ if (sizeof(n)-1 == klen && strncmp(n, k, \ klen) == 0) { \ uintmax_t um; \ char *end; \ \ - errno = 0; \ + set_errno(0); \ um = malloc_strtoumax(v, &end, 0); \ - if (errno != 0 || (uintptr_t)end - \ + if (get_errno() != 0 || (uintptr_t)end -\ (uintptr_t)v != vlen) { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } else if (um < min || um > max) { \ malloc_conf_error( \ "Out-of-range conf value", \ k, klen, v, vlen); \ } else \ o = um; \ continue; \ } #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ if (sizeof(n)-1 == klen && strncmp(n, k, \ klen) == 0) { \ long l; \ char *end; \ \ - errno = 0; \ + set_errno(0); \ l = strtol(v, &end, 0); \ - if (errno != 0 || (uintptr_t)end - \ + if (get_errno() != 0 || (uintptr_t)end -\ (uintptr_t)v != vlen) { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } else if (l < (ssize_t)min || l > \ (ssize_t)max) { \ malloc_conf_error( \ "Out-of-range conf value", \ k, klen, v, vlen); \ } else \ o = l; \ continue; \ } #define CONF_HANDLE_CHAR_P(o, n, d) \ if (sizeof(n)-1 == klen && strncmp(n, k, \ klen) == 0) { \ size_t cpylen = (vlen <= \ sizeof(o)-1) ? vlen : \ sizeof(o)-1; \ strncpy(o, v, cpylen); \ o[cpylen] = '\0'; \ continue; \ } CONF_HANDLE_BOOL(opt_abort, "abort") /* * Chunks always require at least one header page, plus * one data page in the absence of redzones, or three * pages in the presence of redzones. In order to * simplify options processing, fix the limit based on * config_fill. */ CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1) CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, SIZE_T_MAX) CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", -1, (sizeof(size_t) << 3) - 1) CONF_HANDLE_BOOL(opt_stats_print, "stats_print") if (config_fill) { CONF_HANDLE_BOOL(opt_junk, "junk") CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 0, SIZE_T_MAX) CONF_HANDLE_BOOL(opt_redzone, "redzone") CONF_HANDLE_BOOL(opt_zero, "zero") } if (config_utrace) { CONF_HANDLE_BOOL(opt_utrace, "utrace") } if (config_valgrind) { bool hit; CONF_HANDLE_BOOL_HIT(opt_valgrind, "valgrind", hit) if (config_fill && opt_valgrind && hit) { opt_junk = false; opt_zero = false; if (opt_quarantine == 0) { opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; } opt_redzone = true; } if (hit) continue; } if (config_xmalloc) { CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") } if (config_tcache) { CONF_HANDLE_BOOL(opt_tcache, "tcache") CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", -1, (sizeof(size_t) << 3) - 1) } if (config_prof) { CONF_HANDLE_BOOL(opt_prof, "prof") CONF_HANDLE_CHAR_P(opt_prof_prefix, "prof_prefix", "jeprof") CONF_HANDLE_BOOL(opt_prof_active, "prof_active") CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, "lg_prof_sample", 0, (sizeof(uint64_t) << 3) - 1) CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, "lg_prof_interval", -1, (sizeof(uint64_t) << 3) - 1) CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") CONF_HANDLE_BOOL(opt_prof_final, "prof_final") CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") } malloc_conf_error("Invalid conf pair", k, klen, v, vlen); #undef CONF_HANDLE_BOOL #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_CHAR_P } } } static bool malloc_init_hard(void) { arena_t *init_arenas[1]; malloc_mutex_lock(&init_lock); if (malloc_initialized || IS_INITIALIZER) { /* * Another thread initialized the allocator before this one * acquired init_lock, or this thread is the initializing * thread, and it is recursively allocating. */ malloc_mutex_unlock(&init_lock); return (false); } #ifdef JEMALLOC_THREADED_INIT if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { /* Busy-wait until the initializing thread completes. */ do { malloc_mutex_unlock(&init_lock); CPU_SPINWAIT; malloc_mutex_lock(&init_lock); } while (malloc_initialized == false); malloc_mutex_unlock(&init_lock); return (false); } #endif malloc_initializer = INITIALIZER; malloc_tsd_boot(); if (config_prof) prof_boot0(); malloc_conf_init(); -#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE)) +#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ + && !defined(_WIN32)) /* Register fork handlers. */ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, jemalloc_postfork_child) != 0) { malloc_write(": Error in pthread_atfork()\n"); if (opt_abort) abort(); } #endif if (opt_stats_print) { /* Print statistics at exit. */ if (atexit(stats_print_atexit) != 0) { malloc_write(": Error in atexit()\n"); if (opt_abort) abort(); } } if (base_boot()) { malloc_mutex_unlock(&init_lock); return (true); } if (chunk_boot()) { malloc_mutex_unlock(&init_lock); return (true); } if (ctl_boot()) { malloc_mutex_unlock(&init_lock); return (true); } if (config_prof) prof_boot1(); arena_boot(); if (config_tcache && tcache_boot0()) { malloc_mutex_unlock(&init_lock); return (true); } if (huge_boot()) { malloc_mutex_unlock(&init_lock); return (true); } if (malloc_mutex_init(&arenas_lock)) return (true); /* * Create enough scaffolding to allow recursive allocation in * malloc_ncpus(). */ narenas = 1; arenas = init_arenas; memset(arenas, 0, sizeof(arena_t *) * narenas); /* * Initialize one arena here. The rest are lazily created in * choose_arena_hard(). */ arenas_extend(0); if (arenas[0] == NULL) { malloc_mutex_unlock(&init_lock); return (true); } /* Initialize allocation counters before any allocations can occur. */ if (config_stats && thread_allocated_tsd_boot()) { malloc_mutex_unlock(&init_lock); return (true); } if (arenas_tsd_boot()) { malloc_mutex_unlock(&init_lock); return (true); } if (config_tcache && tcache_boot1()) { malloc_mutex_unlock(&init_lock); return (true); } if (config_fill && quarantine_boot()) { malloc_mutex_unlock(&init_lock); return (true); } if (config_prof && prof_boot2()) { malloc_mutex_unlock(&init_lock); return (true); } /* Get number of CPUs. */ malloc_mutex_unlock(&init_lock); ncpus = malloc_ncpus(); malloc_mutex_lock(&init_lock); if (mutex_boot()) { malloc_mutex_unlock(&init_lock); return (true); } if (opt_narenas == 0) { /* * For SMP systems, create more than one arena per CPU by * default. */ if (ncpus > 1) opt_narenas = ncpus << 2; else opt_narenas = 1; } narenas = opt_narenas; /* * Make sure that the arenas array can be allocated. In practice, this * limit is enough to allow the allocator to function, but the ctl * machinery will fail to allocate memory at far lower limits. */ if (narenas > chunksize / sizeof(arena_t *)) { narenas = chunksize / sizeof(arena_t *); malloc_printf(": Reducing narenas to limit (%d)\n", narenas); } /* Allocate and initialize arenas. */ arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas); if (arenas == NULL) { malloc_mutex_unlock(&init_lock); return (true); } /* * Zero the array. In practice, this should always be pre-zeroed, * since it was just mmap()ed, but let's be sure. */ memset(arenas, 0, sizeof(arena_t *) * narenas); /* Copy the pointer to the one arena that was already initialized. */ arenas[0] = init_arenas[0]; malloc_initialized = true; malloc_mutex_unlock(&init_lock); return (false); } /* * End initialization functions. */ /******************************************************************************/ /* * Begin malloc(3)-compatible functions. */ -JEMALLOC_ATTR(malloc) -JEMALLOC_ATTR(visibility("default")) void * je_malloc(size_t size) { void *ret; - size_t usize; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); if (malloc_init()) { ret = NULL; goto label_oom; } if (size == 0) size = 1; if (config_prof && opt_prof) { usize = s2u(size); PROF_ALLOC_PREP(1, usize, cnt); if (cnt == NULL) { ret = NULL; goto label_oom; } if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= SMALL_MAXCLASS) { ret = imalloc(SMALL_MAXCLASS+1); if (ret != NULL) arena_prof_promoted(ret, usize); } else ret = imalloc(size); } else { if (config_stats || (config_valgrind && opt_valgrind)) usize = s2u(size); ret = imalloc(size); } label_oom: if (ret == NULL) { if (config_xmalloc && opt_xmalloc) { malloc_write(": Error in malloc(): " "out of memory\n"); abort(); } - errno = ENOMEM; + set_errno(ENOMEM); } if (config_prof && opt_prof && ret != NULL) prof_malloc(ret, usize, cnt); if (config_stats && ret != NULL) { assert(usize == isalloc(ret, config_prof)); thread_allocated_tsd_get()->allocated += usize; } UTRACE(0, size, ret); JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); return (ret); } JEMALLOC_ATTR(nonnull(1)) #ifdef JEMALLOC_PROF /* * Avoid any uncertainty as to how many backtrace frames to ignore in * PROF_ALLOC_PREP(). */ JEMALLOC_ATTR(noinline) #endif static int imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) { int ret; size_t usize; void *result; prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); assert(min_alignment != 0); if (malloc_init()) result = NULL; else { if (size == 0) size = 1; /* Make sure that alignment is a large enough power of 2. */ if (((alignment - 1) & alignment) != 0 || (alignment < min_alignment)) { if (config_xmalloc && opt_xmalloc) { malloc_write(": Error allocating " "aligned memory: invalid alignment\n"); abort(); } result = NULL; ret = EINVAL; goto label_return; } usize = sa2u(size, alignment); if (usize == 0) { result = NULL; ret = ENOMEM; goto label_return; } if (config_prof && opt_prof) { PROF_ALLOC_PREP(2, usize, cnt); if (cnt == NULL) { result = NULL; ret = EINVAL; } else { if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= SMALL_MAXCLASS) { assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0); result = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment, false); if (result != NULL) { arena_prof_promoted(result, usize); } } else { result = ipalloc(usize, alignment, false); } } } else result = ipalloc(usize, alignment, false); } if (result == NULL) { if (config_xmalloc && opt_xmalloc) { malloc_write(": Error allocating aligned " "memory: out of memory\n"); abort(); } ret = ENOMEM; goto label_return; } *memptr = result; ret = 0; label_return: if (config_stats && result != NULL) { assert(usize == isalloc(result, config_prof)); thread_allocated_tsd_get()->allocated += usize; } if (config_prof && opt_prof && result != NULL) prof_malloc(result, usize, cnt); UTRACE(0, size, result); return (ret); } -JEMALLOC_ATTR(nonnull(1)) -JEMALLOC_ATTR(visibility("default")) int je_posix_memalign(void **memptr, size_t alignment, size_t size) { int ret = imemalign(memptr, alignment, size, sizeof(void *)); JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, config_prof), false); return (ret); } -JEMALLOC_ATTR(malloc) -JEMALLOC_ATTR(visibility("default")) void * je_aligned_alloc(size_t alignment, size_t size) { void *ret; int err; if ((err = imemalign(&ret, alignment, size, 1)) != 0) { ret = NULL; - errno = err; + set_errno(err); } JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), false); return (ret); } -JEMALLOC_ATTR(malloc) -JEMALLOC_ATTR(visibility("default")) void * je_calloc(size_t num, size_t size) { void *ret; size_t num_size; - size_t usize; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); if (malloc_init()) { num_size = 0; ret = NULL; goto label_return; } num_size = num * size; if (num_size == 0) { if (num == 0 || size == 0) num_size = 1; else { ret = NULL; goto label_return; } /* * Try to avoid division here. We know that it isn't possible to * overflow during multiplication if neither operand uses any of the * most significant half of the bits in a size_t. */ } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) && (num_size / size != num)) { /* size_t overflow. */ ret = NULL; goto label_return; } if (config_prof && opt_prof) { usize = s2u(num_size); PROF_ALLOC_PREP(1, usize, cnt); if (cnt == NULL) { ret = NULL; goto label_return; } if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= SMALL_MAXCLASS) { ret = icalloc(SMALL_MAXCLASS+1); if (ret != NULL) arena_prof_promoted(ret, usize); } else ret = icalloc(num_size); } else { if (config_stats || (config_valgrind && opt_valgrind)) usize = s2u(num_size); ret = icalloc(num_size); } label_return: if (ret == NULL) { if (config_xmalloc && opt_xmalloc) { malloc_write(": Error in calloc(): out of " "memory\n"); abort(); } - errno = ENOMEM; + set_errno(ENOMEM); } if (config_prof && opt_prof && ret != NULL) prof_malloc(ret, usize, cnt); if (config_stats && ret != NULL) { assert(usize == isalloc(ret, config_prof)); thread_allocated_tsd_get()->allocated += usize; } UTRACE(0, num_size, ret); JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); return (ret); } -JEMALLOC_ATTR(visibility("default")) void * je_realloc(void *ptr, size_t size) { void *ret; - size_t usize; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t old_size = 0; size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL); if (size == 0) { if (ptr != NULL) { /* realloc(ptr, 0) is equivalent to free(p). */ if (config_prof) { old_size = isalloc(ptr, true); if (config_valgrind && opt_valgrind) old_rzsize = p2rz(ptr); } else if (config_stats) { old_size = isalloc(ptr, false); if (config_valgrind && opt_valgrind) old_rzsize = u2rz(old_size); } else if (config_valgrind && opt_valgrind) { old_size = isalloc(ptr, false); old_rzsize = u2rz(old_size); } if (config_prof && opt_prof) { old_ctx = prof_ctx_get(ptr); cnt = NULL; } iqalloc(ptr); ret = NULL; goto label_return; } else size = 1; } if (ptr != NULL) { assert(malloc_initialized || IS_INITIALIZER); if (config_prof) { old_size = isalloc(ptr, true); if (config_valgrind && opt_valgrind) old_rzsize = p2rz(ptr); } else if (config_stats) { old_size = isalloc(ptr, false); if (config_valgrind && opt_valgrind) old_rzsize = u2rz(old_size); } else if (config_valgrind && opt_valgrind) { old_size = isalloc(ptr, false); old_rzsize = u2rz(old_size); } if (config_prof && opt_prof) { usize = s2u(size); old_ctx = prof_ctx_get(ptr); PROF_ALLOC_PREP(1, usize, cnt); if (cnt == NULL) { old_ctx = NULL; ret = NULL; goto label_oom; } if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= SMALL_MAXCLASS) { ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0, false, false); if (ret != NULL) arena_prof_promoted(ret, usize); else old_ctx = NULL; } else { ret = iralloc(ptr, size, 0, 0, false, false); if (ret == NULL) old_ctx = NULL; } } else { if (config_stats || (config_valgrind && opt_valgrind)) usize = s2u(size); ret = iralloc(ptr, size, 0, 0, false, false); } label_oom: if (ret == NULL) { if (config_xmalloc && opt_xmalloc) { malloc_write(": Error in realloc(): " "out of memory\n"); abort(); } - errno = ENOMEM; + set_errno(ENOMEM); } } else { /* realloc(NULL, size) is equivalent to malloc(size). */ if (config_prof && opt_prof) old_ctx = NULL; if (malloc_init()) { if (config_prof && opt_prof) cnt = NULL; ret = NULL; } else { if (config_prof && opt_prof) { usize = s2u(size); PROF_ALLOC_PREP(1, usize, cnt); if (cnt == NULL) ret = NULL; else { if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= SMALL_MAXCLASS) { ret = imalloc(SMALL_MAXCLASS+1); if (ret != NULL) { arena_prof_promoted(ret, usize); } } else ret = imalloc(size); } } else { if (config_stats || (config_valgrind && opt_valgrind)) usize = s2u(size); ret = imalloc(size); } } if (ret == NULL) { if (config_xmalloc && opt_xmalloc) { malloc_write(": Error in realloc(): " "out of memory\n"); abort(); } - errno = ENOMEM; + set_errno(ENOMEM); } } label_return: if (config_prof && opt_prof) prof_realloc(ret, usize, cnt, old_size, old_ctx); if (config_stats && ret != NULL) { thread_allocated_t *ta; assert(usize == isalloc(ret, config_prof)); ta = thread_allocated_tsd_get(); ta->allocated += usize; ta->deallocated += old_size; } UTRACE(ptr, size, ret); JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false); return (ret); } -JEMALLOC_ATTR(visibility("default")) void je_free(void *ptr) { UTRACE(ptr, 0, 0); if (ptr != NULL) { size_t usize; size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); assert(malloc_initialized || IS_INITIALIZER); if (config_prof && opt_prof) { usize = isalloc(ptr, config_prof); prof_free(ptr, usize); } else if (config_stats || config_valgrind) usize = isalloc(ptr, config_prof); if (config_stats) thread_allocated_tsd_get()->deallocated += usize; if (config_valgrind && opt_valgrind) rzsize = p2rz(ptr); iqalloc(ptr); JEMALLOC_VALGRIND_FREE(ptr, rzsize); } } /* * End malloc(3)-compatible functions. */ /******************************************************************************/ /* * Begin non-standard override functions. */ #ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_ATTR(malloc) -JEMALLOC_ATTR(visibility("default")) void * je_memalign(size_t alignment, size_t size) { void *ret JEMALLOC_CC_SILENCE_INIT(NULL); imemalign(&ret, alignment, size, 1); JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); return (ret); } #endif #ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_ATTR(malloc) -JEMALLOC_ATTR(visibility("default")) void * je_valloc(size_t size) { void *ret JEMALLOC_CC_SILENCE_INIT(NULL); imemalign(&ret, PAGE, size, 1); JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); return (ret); } #endif /* * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has * #define je_malloc malloc */ #define malloc_is_malloc 1 #define is_malloc_(a) malloc_is_ ## a #define is_malloc(a) is_malloc_(a) #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) /* * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible * to inconsistently reference libc's malloc(3)-compatible functions * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). * * These definitions interpose hooks in glibc. The functions are actually * passed an extra argument for the caller return address, which will be * ignored. */ -JEMALLOC_ATTR(visibility("default")) -void (* const __free_hook)(void *ptr) = je_free; - -JEMALLOC_ATTR(visibility("default")) -void *(* const __malloc_hook)(size_t size) = je_malloc; - -JEMALLOC_ATTR(visibility("default")) -void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc; - -JEMALLOC_ATTR(visibility("default")) -void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign; +JEMALLOC_EXPORT void (* const __free_hook)(void *ptr) = je_free; +JEMALLOC_EXPORT void *(* const __malloc_hook)(size_t size) = je_malloc; +JEMALLOC_EXPORT void *(* const __realloc_hook)(void *ptr, size_t size) = + je_realloc; +JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) = + je_memalign; #endif /* * End non-standard override functions. */ /******************************************************************************/ /* * Begin non-standard functions. */ -JEMALLOC_ATTR(visibility("default")) size_t je_malloc_usable_size(const void *ptr) { size_t ret; assert(malloc_initialized || IS_INITIALIZER); if (config_ivsalloc) ret = ivsalloc(ptr, config_prof); else ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; return (ret); } -JEMALLOC_ATTR(visibility("default")) void je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { stats_print(write_cb, cbopaque, opts); } -JEMALLOC_ATTR(visibility("default")) int je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { if (malloc_init()) return (EAGAIN); return (ctl_byname(name, oldp, oldlenp, newp, newlen)); } -JEMALLOC_ATTR(visibility("default")) int je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { if (malloc_init()) return (EAGAIN); return (ctl_nametomib(name, mibp, miblenp)); } -JEMALLOC_ATTR(visibility("default")) int je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { if (malloc_init()) return (EAGAIN); return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); } /* * End non-standard functions. */ /******************************************************************************/ /* * Begin experimental functions. */ #ifdef JEMALLOC_EXPERIMENTAL JEMALLOC_INLINE void * iallocm(size_t usize, size_t alignment, bool zero) { assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment))); if (alignment != 0) return (ipalloc(usize, alignment, zero)); else if (zero) return (icalloc(usize)); else return (imalloc(usize)); } -JEMALLOC_ATTR(nonnull(1)) -JEMALLOC_ATTR(visibility("default")) int je_allocm(void **ptr, size_t *rsize, size_t size, int flags) { void *p; size_t usize; size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) & (SIZE_T_MAX-1)); bool zero = flags & ALLOCM_ZERO; - prof_thr_cnt_t *cnt; assert(ptr != NULL); assert(size != 0); if (malloc_init()) goto label_oom; usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); if (usize == 0) goto label_oom; if (config_prof && opt_prof) { + prof_thr_cnt_t *cnt; + PROF_ALLOC_PREP(1, usize, cnt); if (cnt == NULL) goto label_oom; if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= SMALL_MAXCLASS) { size_t usize_promoted = (alignment == 0) ? s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment); assert(usize_promoted != 0); p = iallocm(usize_promoted, alignment, zero); if (p == NULL) goto label_oom; arena_prof_promoted(p, usize); } else { p = iallocm(usize, alignment, zero); if (p == NULL) goto label_oom; } prof_malloc(p, usize, cnt); } else { p = iallocm(usize, alignment, zero); if (p == NULL) goto label_oom; } if (rsize != NULL) *rsize = usize; *ptr = p; if (config_stats) { assert(usize == isalloc(p, config_prof)); thread_allocated_tsd_get()->allocated += usize; } UTRACE(0, size, p); JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); return (ALLOCM_SUCCESS); label_oom: if (config_xmalloc && opt_xmalloc) { malloc_write(": Error in allocm(): " "out of memory\n"); abort(); } *ptr = NULL; UTRACE(0, size, 0); return (ALLOCM_ERR_OOM); } -JEMALLOC_ATTR(nonnull(1)) -JEMALLOC_ATTR(visibility("default")) int je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) { void *p, *q; size_t usize; size_t old_size; size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) & (SIZE_T_MAX-1)); bool zero = flags & ALLOCM_ZERO; bool no_move = flags & ALLOCM_NO_MOVE; - prof_thr_cnt_t *cnt; assert(ptr != NULL); assert(*ptr != NULL); assert(size != 0); assert(SIZE_T_MAX - size >= extra); assert(malloc_initialized || IS_INITIALIZER); p = *ptr; if (config_prof && opt_prof) { + prof_thr_cnt_t *cnt; + /* * usize isn't knowable before iralloc() returns when extra is * non-zero. Therefore, compute its maximum possible value and * use that in PROF_ALLOC_PREP() to decide whether to capture a * backtrace. prof_realloc() will use the actual usize to * decide whether to sample. */ size_t max_usize = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra, alignment); prof_ctx_t *old_ctx = prof_ctx_get(p); old_size = isalloc(p, true); if (config_valgrind && opt_valgrind) old_rzsize = p2rz(p); PROF_ALLOC_PREP(1, max_usize, cnt); if (cnt == NULL) goto label_oom; /* * Use minimum usize to determine whether promotion may happen. */ if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) <= SMALL_MAXCLASS) { q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), alignment, zero, no_move); if (q == NULL) goto label_err; if (max_usize < PAGE) { usize = max_usize; arena_prof_promoted(q, usize); } else usize = isalloc(q, config_prof); } else { q = iralloc(p, size, extra, alignment, zero, no_move); if (q == NULL) goto label_err; usize = isalloc(q, config_prof); } prof_realloc(q, usize, cnt, old_size, old_ctx); if (rsize != NULL) *rsize = usize; } else { if (config_stats) { old_size = isalloc(p, false); if (config_valgrind && opt_valgrind) old_rzsize = u2rz(old_size); } else if (config_valgrind && opt_valgrind) { old_size = isalloc(p, false); old_rzsize = u2rz(old_size); } q = iralloc(p, size, extra, alignment, zero, no_move); if (q == NULL) goto label_err; if (config_stats) usize = isalloc(q, config_prof); if (rsize != NULL) { if (config_stats == false) usize = isalloc(q, config_prof); *rsize = usize; } } *ptr = q; if (config_stats) { thread_allocated_t *ta; ta = thread_allocated_tsd_get(); ta->allocated += usize; ta->deallocated += old_size; } UTRACE(p, size, q); JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero); return (ALLOCM_SUCCESS); label_err: if (no_move) { UTRACE(p, size, q); return (ALLOCM_ERR_NOT_MOVED); } label_oom: if (config_xmalloc && opt_xmalloc) { malloc_write(": Error in rallocm(): " "out of memory\n"); abort(); } UTRACE(p, size, 0); return (ALLOCM_ERR_OOM); } -JEMALLOC_ATTR(nonnull(1)) -JEMALLOC_ATTR(visibility("default")) int je_sallocm(const void *ptr, size_t *rsize, int flags) { size_t sz; assert(malloc_initialized || IS_INITIALIZER); if (config_ivsalloc) sz = ivsalloc(ptr, config_prof); else { assert(ptr != NULL); sz = isalloc(ptr, config_prof); } assert(rsize != NULL); *rsize = sz; return (ALLOCM_SUCCESS); } -JEMALLOC_ATTR(nonnull(1)) -JEMALLOC_ATTR(visibility("default")) int je_dallocm(void *ptr, int flags) { size_t usize; size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); assert(ptr != NULL); assert(malloc_initialized || IS_INITIALIZER); UTRACE(ptr, 0, 0); if (config_stats || config_valgrind) usize = isalloc(ptr, config_prof); if (config_prof && opt_prof) { if (config_stats == false && config_valgrind == false) usize = isalloc(ptr, config_prof); prof_free(ptr, usize); } if (config_stats) thread_allocated_tsd_get()->deallocated += usize; if (config_valgrind && opt_valgrind) rzsize = p2rz(ptr); iqalloc(ptr); JEMALLOC_VALGRIND_FREE(ptr, rzsize); return (ALLOCM_SUCCESS); } -JEMALLOC_ATTR(visibility("default")) int je_nallocm(size_t *rsize, size_t size, int flags) { size_t usize; size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) & (SIZE_T_MAX-1)); assert(size != 0); if (malloc_init()) return (ALLOCM_ERR_OOM); usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); if (usize == 0) return (ALLOCM_ERR_OOM); if (rsize != NULL) *rsize = usize; return (ALLOCM_SUCCESS); } #endif /* * End experimental functions. */ /******************************************************************************/ /* * The following functions are used by threading libraries for protection of * malloc during fork(). */ #ifndef JEMALLOC_MUTEX_INIT_CB void jemalloc_prefork(void) #else -JEMALLOC_ATTR(visibility("default")) -void +JEMALLOC_EXPORT void _malloc_prefork(void) #endif { unsigned i; /* Acquire all mutexes in a safe order. */ malloc_mutex_prefork(&arenas_lock); for (i = 0; i < narenas; i++) { if (arenas[i] != NULL) arena_prefork(arenas[i]); } base_prefork(); huge_prefork(); chunk_dss_prefork(); } #ifndef JEMALLOC_MUTEX_INIT_CB void jemalloc_postfork_parent(void) #else -JEMALLOC_ATTR(visibility("default")) -void +JEMALLOC_EXPORT void _malloc_postfork(void) #endif { unsigned i; /* Release all mutexes, now that fork() has completed. */ chunk_dss_postfork_parent(); huge_postfork_parent(); base_postfork_parent(); for (i = 0; i < narenas; i++) { if (arenas[i] != NULL) arena_postfork_parent(arenas[i]); } malloc_mutex_postfork_parent(&arenas_lock); } void jemalloc_postfork_child(void) { unsigned i; /* Release all mutexes, now that fork() has completed. */ chunk_dss_postfork_child(); huge_postfork_child(); base_postfork_child(); for (i = 0; i < narenas; i++) { if (arenas[i] != NULL) arena_postfork_child(arenas[i]); } malloc_mutex_postfork_child(&arenas_lock); } /******************************************************************************/ /* * The following functions are used for TLS allocation/deallocation in static * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() * is that these avoid accessing TLS variables. */ static void * a0alloc(size_t size, bool zero) { if (malloc_init()) return (NULL); if (size == 0) size = 1; if (size <= arena_maxclass) return (arena_malloc(arenas[0], size, zero, false)); else return (huge_malloc(size, zero)); } void * a0malloc(size_t size) { return (a0alloc(size, false)); } void * a0calloc(size_t num, size_t size) { return (a0alloc(num * size, true)); } void a0free(void *ptr) { arena_chunk_t *chunk; if (ptr == NULL) return; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) arena_dalloc(chunk->arena, chunk, ptr, false); else huge_dalloc(ptr, true); } /******************************************************************************/ Index: projects/nand/contrib/jemalloc/src/mutex.c =================================================================== --- projects/nand/contrib/jemalloc/src/mutex.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/mutex.c (revision 235263) @@ -1,153 +1,160 @@ #define JEMALLOC_MUTEX_C_ #include "jemalloc/internal/jemalloc_internal.h" -#ifdef JEMALLOC_LAZY_LOCK +#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) #include #endif +#ifndef _CRT_SPINCOUNT +#define _CRT_SPINCOUNT 4000 +#endif + /******************************************************************************/ /* Data. */ #ifdef JEMALLOC_LAZY_LOCK bool isthreaded = false; #endif #ifdef JEMALLOC_MUTEX_INIT_CB static bool postpone_init = true; static malloc_mutex_t *postponed_mutexes = NULL; #endif -#ifdef JEMALLOC_LAZY_LOCK +#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) static void pthread_create_once(void); #endif /******************************************************************************/ /* * We intercept pthread_create() calls in order to toggle isthreaded if the * process goes multi-threaded. */ -#ifdef JEMALLOC_LAZY_LOCK +#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, void *(*)(void *), void *__restrict); static void pthread_create_once(void) { pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); if (pthread_create_fptr == NULL) { malloc_write(": Error in dlsym(RTLD_NEXT, " "\"pthread_create\")\n"); abort(); } isthreaded = true; } -JEMALLOC_ATTR(visibility("default")) -int +JEMALLOC_EXPORT int pthread_create(pthread_t *__restrict thread, const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), void *__restrict arg) { static pthread_once_t once_control = PTHREAD_ONCE_INIT; pthread_once(&once_control, pthread_create_once); return (pthread_create_fptr(thread, attr, start_routine, arg)); } #endif /******************************************************************************/ #ifdef JEMALLOC_MUTEX_INIT_CB int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); __weak_reference(_pthread_mutex_init_calloc_cb_stub, _pthread_mutex_init_calloc_cb); int _pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)) { return (0); } #endif bool malloc_mutex_init(malloc_mutex_t *mutex) { -#ifdef JEMALLOC_OSSPIN + +#ifdef _WIN32 + if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, + _CRT_SPINCOUNT)) + return (true); +#elif (defined(JEMALLOC_OSSPIN)) mutex->lock = 0; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) if (postpone_init) { mutex->postponed_next = postponed_mutexes; postponed_mutexes = mutex; } else { if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) != 0) return (true); } #else pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr) != 0) return (true); pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); if (pthread_mutex_init(&mutex->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); return (true); } pthread_mutexattr_destroy(&attr); - #endif return (false); } void malloc_mutex_prefork(malloc_mutex_t *mutex) { malloc_mutex_lock(mutex); } void malloc_mutex_postfork_parent(malloc_mutex_t *mutex) { malloc_mutex_unlock(mutex); } void malloc_mutex_postfork_child(malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB malloc_mutex_unlock(mutex); #else if (malloc_mutex_init(mutex)) { malloc_printf(": Error re-initializing mutex in " "child\n"); if (opt_abort) abort(); } #endif } bool mutex_boot(void) { #ifdef JEMALLOC_MUTEX_INIT_CB postpone_init = false; while (postponed_mutexes != NULL) { if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, base_calloc) != 0) return (true); postponed_mutexes = postponed_mutexes->postponed_next; } #endif return (false); } Index: projects/nand/contrib/jemalloc/src/prof.c =================================================================== --- projects/nand/contrib/jemalloc/src/prof.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/prof.c (revision 235263) @@ -1,1244 +1,1273 @@ #define JEMALLOC_PROF_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ #ifdef JEMALLOC_PROF_LIBUNWIND #define UNW_LOCAL_ONLY #include #endif #ifdef JEMALLOC_PROF_LIBGCC #include #endif /******************************************************************************/ /* Data. */ malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL) bool opt_prof = false; bool opt_prof_active = true; size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; bool opt_prof_gdump = false; bool opt_prof_final = true; bool opt_prof_leak = false; bool opt_prof_accum = false; char opt_prof_prefix[PATH_MAX + 1]; uint64_t prof_interval; bool prof_promote; /* * Table of mutexes that are shared among ctx's. These are leaf locks, so * there is no problem with using them for more than one ctx at the same time. * The primary motivation for this sharing though is that ctx's are ephemeral, * and destroying mutexes causes complications for systems that allocate when * creating/destroying mutexes. */ static malloc_mutex_t *ctx_locks; static unsigned cum_ctxs; /* Atomic counter. */ /* * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data * structure that knows about all backtraces currently captured. */ static ckh_t bt2ctx; static malloc_mutex_t bt2ctx_mtx; static malloc_mutex_t prof_dump_seq_mtx; static uint64_t prof_dump_seq; static uint64_t prof_dump_iseq; static uint64_t prof_dump_mseq; static uint64_t prof_dump_useq; /* * This buffer is rather large for stack allocation, so use a single buffer for * all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since * it must be locked anyway during dumping. */ static char prof_dump_buf[PROF_DUMP_BUFSIZE]; static unsigned prof_dump_buf_end; static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ static bool prof_booted = false; -static malloc_mutex_t enq_mtx; -static bool enq; -static bool enq_idump; -static bool enq_gdump; - /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static prof_bt_t *bt_dup(prof_bt_t *bt); static void bt_destroy(prof_bt_t *bt); #ifdef JEMALLOC_PROF_LIBGCC static _Unwind_Reason_Code prof_unwind_init_callback( struct _Unwind_Context *context, void *arg); static _Unwind_Reason_Code prof_unwind_callback( struct _Unwind_Context *context, void *arg); #endif static bool prof_flush(bool propagate_err); static bool prof_write(bool propagate_err, const char *s); static bool prof_printf(bool propagate_err, const char *format, ...) JEMALLOC_ATTR(format(printf, 2, 3)); static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx); static void prof_ctx_destroy(prof_ctx_t *ctx); static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt); static bool prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt); static bool prof_dump_maps(bool propagate_err); static bool prof_dump(bool propagate_err, const char *filename, bool leakcheck); static void prof_dump_filename(char *filename, char v, int64_t vseq); static void prof_fdump(void); static void prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2); static bool prof_bt_keycomp(const void *k1, const void *k2); static malloc_mutex_t *prof_ctx_mutex_choose(void); /******************************************************************************/ void bt_init(prof_bt_t *bt, void **vec) { cassert(config_prof); bt->vec = vec; bt->len = 0; } static void bt_destroy(prof_bt_t *bt) { cassert(config_prof); idalloc(bt); } static prof_bt_t * bt_dup(prof_bt_t *bt) { prof_bt_t *ret; cassert(config_prof); /* * Create a single allocation that has space for vec immediately * following the prof_bt_t structure. The backtraces that get * stored in the backtrace caches are copied from stack-allocated * temporary variables, so size is known at creation time. Making this * a contiguous object improves cache locality. */ ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) + (bt->len * sizeof(void *))); if (ret == NULL) return (NULL); ret->vec = (void **)((uintptr_t)ret + QUANTUM_CEILING(sizeof(prof_bt_t))); memcpy(ret->vec, bt->vec, bt->len * sizeof(void *)); ret->len = bt->len; return (ret); } static inline void -prof_enter(void) +prof_enter(prof_tdata_t *prof_tdata) { cassert(config_prof); - malloc_mutex_lock(&enq_mtx); - enq = true; - malloc_mutex_unlock(&enq_mtx); + assert(prof_tdata->enq == false); + prof_tdata->enq = true; malloc_mutex_lock(&bt2ctx_mtx); } static inline void -prof_leave(void) +prof_leave(prof_tdata_t *prof_tdata) { bool idump, gdump; cassert(config_prof); malloc_mutex_unlock(&bt2ctx_mtx); - malloc_mutex_lock(&enq_mtx); - enq = false; - idump = enq_idump; - enq_idump = false; - gdump = enq_gdump; - enq_gdump = false; - malloc_mutex_unlock(&enq_mtx); + assert(prof_tdata->enq); + prof_tdata->enq = false; + idump = prof_tdata->enq_idump; + prof_tdata->enq_idump = false; + gdump = prof_tdata->enq_gdump; + prof_tdata->enq_gdump = false; if (idump) prof_idump(); if (gdump) prof_gdump(); } #ifdef JEMALLOC_PROF_LIBUNWIND void prof_backtrace(prof_bt_t *bt, unsigned nignore) { unw_context_t uc; unw_cursor_t cursor; unsigned i; int err; cassert(config_prof); assert(bt->len == 0); assert(bt->vec != NULL); unw_getcontext(&uc); unw_init_local(&cursor, &uc); /* Throw away (nignore+1) stack frames, if that many exist. */ for (i = 0; i < nignore + 1; i++) { err = unw_step(&cursor); if (err <= 0) return; } /* * Iterate over stack frames until there are no more, or until no space * remains in bt. */ for (i = 0; i < PROF_BT_MAX; i++) { unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]); bt->len++; err = unw_step(&cursor); if (err <= 0) break; } } #elif (defined(JEMALLOC_PROF_LIBGCC)) static _Unwind_Reason_Code prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { cassert(config_prof); return (_URC_NO_REASON); } static _Unwind_Reason_Code prof_unwind_callback(struct _Unwind_Context *context, void *arg) { prof_unwind_data_t *data = (prof_unwind_data_t *)arg; cassert(config_prof); if (data->nignore > 0) data->nignore--; else { data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context); data->bt->len++; if (data->bt->len == data->max) return (_URC_END_OF_STACK); } return (_URC_NO_REASON); } void prof_backtrace(prof_bt_t *bt, unsigned nignore) { prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX}; cassert(config_prof); _Unwind_Backtrace(prof_unwind_callback, &data); } #elif (defined(JEMALLOC_PROF_GCC)) void prof_backtrace(prof_bt_t *bt, unsigned nignore) { #define BT_FRAME(i) \ if ((i) < nignore + PROF_BT_MAX) { \ void *p; \ if (__builtin_frame_address(i) == 0) \ return; \ p = __builtin_return_address(i); \ if (p == NULL) \ return; \ if (i >= nignore) { \ bt->vec[(i) - nignore] = p; \ bt->len = (i) - nignore + 1; \ } \ } else \ return; cassert(config_prof); assert(nignore <= 3); BT_FRAME(0) BT_FRAME(1) BT_FRAME(2) BT_FRAME(3) BT_FRAME(4) BT_FRAME(5) BT_FRAME(6) BT_FRAME(7) BT_FRAME(8) BT_FRAME(9) BT_FRAME(10) BT_FRAME(11) BT_FRAME(12) BT_FRAME(13) BT_FRAME(14) BT_FRAME(15) BT_FRAME(16) BT_FRAME(17) BT_FRAME(18) BT_FRAME(19) BT_FRAME(20) BT_FRAME(21) BT_FRAME(22) BT_FRAME(23) BT_FRAME(24) BT_FRAME(25) BT_FRAME(26) BT_FRAME(27) BT_FRAME(28) BT_FRAME(29) BT_FRAME(30) BT_FRAME(31) BT_FRAME(32) BT_FRAME(33) BT_FRAME(34) BT_FRAME(35) BT_FRAME(36) BT_FRAME(37) BT_FRAME(38) BT_FRAME(39) BT_FRAME(40) BT_FRAME(41) BT_FRAME(42) BT_FRAME(43) BT_FRAME(44) BT_FRAME(45) BT_FRAME(46) BT_FRAME(47) BT_FRAME(48) BT_FRAME(49) BT_FRAME(50) BT_FRAME(51) BT_FRAME(52) BT_FRAME(53) BT_FRAME(54) BT_FRAME(55) BT_FRAME(56) BT_FRAME(57) BT_FRAME(58) BT_FRAME(59) BT_FRAME(60) BT_FRAME(61) BT_FRAME(62) BT_FRAME(63) BT_FRAME(64) BT_FRAME(65) BT_FRAME(66) BT_FRAME(67) BT_FRAME(68) BT_FRAME(69) BT_FRAME(70) BT_FRAME(71) BT_FRAME(72) BT_FRAME(73) BT_FRAME(74) BT_FRAME(75) BT_FRAME(76) BT_FRAME(77) BT_FRAME(78) BT_FRAME(79) BT_FRAME(80) BT_FRAME(81) BT_FRAME(82) BT_FRAME(83) BT_FRAME(84) BT_FRAME(85) BT_FRAME(86) BT_FRAME(87) BT_FRAME(88) BT_FRAME(89) BT_FRAME(90) BT_FRAME(91) BT_FRAME(92) BT_FRAME(93) BT_FRAME(94) BT_FRAME(95) BT_FRAME(96) BT_FRAME(97) BT_FRAME(98) BT_FRAME(99) BT_FRAME(100) BT_FRAME(101) BT_FRAME(102) BT_FRAME(103) BT_FRAME(104) BT_FRAME(105) BT_FRAME(106) BT_FRAME(107) BT_FRAME(108) BT_FRAME(109) BT_FRAME(110) BT_FRAME(111) BT_FRAME(112) BT_FRAME(113) BT_FRAME(114) BT_FRAME(115) BT_FRAME(116) BT_FRAME(117) BT_FRAME(118) BT_FRAME(119) BT_FRAME(120) BT_FRAME(121) BT_FRAME(122) BT_FRAME(123) BT_FRAME(124) BT_FRAME(125) BT_FRAME(126) BT_FRAME(127) /* Extras to compensate for nignore. */ BT_FRAME(128) BT_FRAME(129) BT_FRAME(130) #undef BT_FRAME } #else void prof_backtrace(prof_bt_t *bt, unsigned nignore) { cassert(config_prof); assert(false); } #endif prof_thr_cnt_t * prof_lookup(prof_bt_t *bt) { union { prof_thr_cnt_t *p; void *v; } ret; prof_tdata_t *prof_tdata; cassert(config_prof); - prof_tdata = *prof_tdata_tsd_get(); - if (prof_tdata == NULL) { - prof_tdata = prof_tdata_init(); - if (prof_tdata == NULL) - return (NULL); - } + prof_tdata = prof_tdata_get(); + if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) + return (NULL); if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) { union { prof_bt_t *p; void *v; } btkey; union { prof_ctx_t *p; void *v; } ctx; bool new_ctx; /* * This thread's cache lacks bt. Look for it in the global * cache. */ - prof_enter(); + prof_enter(prof_tdata); if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) { /* bt has never been seen before. Insert it. */ ctx.v = imalloc(sizeof(prof_ctx_t)); if (ctx.v == NULL) { - prof_leave(); + prof_leave(prof_tdata); return (NULL); } btkey.p = bt_dup(bt); if (btkey.v == NULL) { - prof_leave(); + prof_leave(prof_tdata); idalloc(ctx.v); return (NULL); } ctx.p->bt = btkey.p; ctx.p->lock = prof_ctx_mutex_choose(); + /* + * Set nlimbo to 1, in order to avoid a race condition + * with prof_ctx_merge()/prof_ctx_destroy(). + */ + ctx.p->nlimbo = 1; memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t)); ql_new(&ctx.p->cnts_ql); if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) { /* OOM. */ - prof_leave(); + prof_leave(prof_tdata); idalloc(btkey.v); idalloc(ctx.v); return (NULL); } - /* - * Artificially raise curobjs, in order to avoid a race - * condition with prof_ctx_merge()/prof_ctx_destroy(). - * - * No locking is necessary for ctx here because no other - * threads have had the opportunity to fetch it from - * bt2ctx yet. - */ - ctx.p->cnt_merged.curobjs++; new_ctx = true; } else { /* - * Artificially raise curobjs, in order to avoid a race - * condition with prof_ctx_merge()/prof_ctx_destroy(). + * Increment nlimbo, in order to avoid a race condition + * with prof_ctx_merge()/prof_ctx_destroy(). */ malloc_mutex_lock(ctx.p->lock); - ctx.p->cnt_merged.curobjs++; + ctx.p->nlimbo++; malloc_mutex_unlock(ctx.p->lock); new_ctx = false; } - prof_leave(); + prof_leave(prof_tdata); /* Link a prof_thd_cnt_t into ctx for this thread. */ if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) { assert(ckh_count(&prof_tdata->bt2cnt) > 0); /* * Flush the least recently used cnt in order to keep * bt2cnt from becoming too large. */ ret.p = ql_last(&prof_tdata->lru_ql, lru_link); assert(ret.v != NULL); if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, NULL, NULL)) assert(false); ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); prof_ctx_merge(ret.p->ctx, ret.p); /* ret can now be re-used. */ } else { assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX); /* Allocate and partially initialize a new cnt. */ ret.v = imalloc(sizeof(prof_thr_cnt_t)); if (ret.p == NULL) { if (new_ctx) prof_ctx_destroy(ctx.p); return (NULL); } ql_elm_new(ret.p, cnts_link); ql_elm_new(ret.p, lru_link); } /* Finish initializing ret. */ ret.p->ctx = ctx.p; ret.p->epoch = 0; memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) { if (new_ctx) prof_ctx_destroy(ctx.p); idalloc(ret.v); return (NULL); } ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); malloc_mutex_lock(ctx.p->lock); ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link); - ctx.p->cnt_merged.curobjs--; + ctx.p->nlimbo--; malloc_mutex_unlock(ctx.p->lock); } else { /* Move ret to the front of the LRU. */ ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); } return (ret.p); } static bool prof_flush(bool propagate_err) { bool ret = false; ssize_t err; cassert(config_prof); err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); if (err == -1) { if (propagate_err == false) { malloc_write(": write() failed during heap " "profile flush\n"); if (opt_abort) abort(); } ret = true; } prof_dump_buf_end = 0; return (ret); } static bool prof_write(bool propagate_err, const char *s) { unsigned i, slen, n; cassert(config_prof); i = 0; slen = strlen(s); while (i < slen) { /* Flush the buffer if it is full. */ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) if (prof_flush(propagate_err) && propagate_err) return (true); if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { /* Finish writing. */ n = slen - i; } else { /* Write as much of s as will fit. */ n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; } memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); prof_dump_buf_end += n; i += n; } return (false); } JEMALLOC_ATTR(format(printf, 2, 3)) static bool prof_printf(bool propagate_err, const char *format, ...) { bool ret; va_list ap; char buf[PROF_PRINTF_BUFSIZE]; va_start(ap, format); malloc_vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); ret = prof_write(propagate_err, buf); return (ret); } static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx) { prof_thr_cnt_t *thr_cnt; prof_cnt_t tcnt; cassert(config_prof); malloc_mutex_lock(ctx->lock); memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t)); ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) { volatile unsigned *epoch = &thr_cnt->epoch; while (true) { unsigned epoch0 = *epoch; /* Make sure epoch is even. */ if (epoch0 & 1U) continue; memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t)); /* Terminate if epoch didn't change while reading. */ if (*epoch == epoch0) break; } ctx->cnt_summed.curobjs += tcnt.curobjs; ctx->cnt_summed.curbytes += tcnt.curbytes; if (opt_prof_accum) { ctx->cnt_summed.accumobjs += tcnt.accumobjs; ctx->cnt_summed.accumbytes += tcnt.accumbytes; } } if (ctx->cnt_summed.curobjs != 0) (*leak_nctx)++; /* Add to cnt_all. */ cnt_all->curobjs += ctx->cnt_summed.curobjs; cnt_all->curbytes += ctx->cnt_summed.curbytes; if (opt_prof_accum) { cnt_all->accumobjs += ctx->cnt_summed.accumobjs; cnt_all->accumbytes += ctx->cnt_summed.accumbytes; } malloc_mutex_unlock(ctx->lock); } static void prof_ctx_destroy(prof_ctx_t *ctx) { + prof_tdata_t *prof_tdata; cassert(config_prof); /* * Check that ctx is still unused by any thread cache before destroying - * it. prof_lookup() artificially raises ctx->cnt_merge.curobjs in - * order to avoid a race condition with this function, as does - * prof_ctx_merge() in order to avoid a race between the main body of - * prof_ctx_merge() and entry into this function. + * it. prof_lookup() increments ctx->nlimbo in order to avoid a race + * condition with this function, as does prof_ctx_merge() in order to + * avoid a race between the main body of prof_ctx_merge() and entry + * into this function. */ - prof_enter(); + prof_tdata = *prof_tdata_tsd_get(); + assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX); + prof_enter(prof_tdata); malloc_mutex_lock(ctx->lock); - if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 1) { + if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 && + ctx->nlimbo == 1) { assert(ctx->cnt_merged.curbytes == 0); assert(ctx->cnt_merged.accumobjs == 0); assert(ctx->cnt_merged.accumbytes == 0); /* Remove ctx from bt2ctx. */ if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL)) assert(false); - prof_leave(); + prof_leave(prof_tdata); /* Destroy ctx. */ malloc_mutex_unlock(ctx->lock); bt_destroy(ctx->bt); idalloc(ctx); } else { /* * Compensate for increment in prof_ctx_merge() or * prof_lookup(). */ - ctx->cnt_merged.curobjs--; + ctx->nlimbo--; malloc_mutex_unlock(ctx->lock); - prof_leave(); + prof_leave(prof_tdata); } } static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt) { bool destroy; cassert(config_prof); /* Merge cnt stats and detach from ctx. */ malloc_mutex_lock(ctx->lock); ctx->cnt_merged.curobjs += cnt->cnts.curobjs; ctx->cnt_merged.curbytes += cnt->cnts.curbytes; ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs; ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes; ql_remove(&ctx->cnts_ql, cnt, cnts_link); if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL && - ctx->cnt_merged.curobjs == 0) { + ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) { /* - * Artificially raise ctx->cnt_merged.curobjs in order to keep - * another thread from winning the race to destroy ctx while - * this one has ctx->lock dropped. Without this, it would be - * possible for another thread to: + * Increment ctx->nlimbo in order to keep another thread from + * winning the race to destroy ctx while this one has ctx->lock + * dropped. Without this, it would be possible for another + * thread to: * * 1) Sample an allocation associated with ctx. * 2) Deallocate the sampled object. * 3) Successfully prof_ctx_destroy(ctx). * * The result would be that ctx no longer exists by the time * this thread accesses it in prof_ctx_destroy(). */ - ctx->cnt_merged.curobjs++; + ctx->nlimbo++; destroy = true; } else destroy = false; malloc_mutex_unlock(ctx->lock); if (destroy) prof_ctx_destroy(ctx); } static bool prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt) { unsigned i; cassert(config_prof); - if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) { + /* + * Current statistics can sum to 0 as a result of unmerged per thread + * statistics. Additionally, interval- and growth-triggered dumps can + * occur between the time a ctx is created and when its statistics are + * filled in. Avoid dumping any ctx that is an artifact of either + * implementation detail. + */ + if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) || + (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) { + assert(ctx->cnt_summed.curobjs == 0); assert(ctx->cnt_summed.curbytes == 0); assert(ctx->cnt_summed.accumobjs == 0); assert(ctx->cnt_summed.accumbytes == 0); return (false); } if (prof_printf(propagate_err, "%"PRId64": %"PRId64 " [%"PRIu64": %"PRIu64"] @", ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes, ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) return (true); for (i = 0; i < bt->len; i++) { if (prof_printf(propagate_err, " %#"PRIxPTR, (uintptr_t)bt->vec[i])) return (true); } if (prof_write(propagate_err, "\n")) return (true); return (false); } static bool prof_dump_maps(bool propagate_err) { int mfd; char filename[PATH_MAX + 1]; cassert(config_prof); malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps", (int)getpid()); mfd = open(filename, O_RDONLY); if (mfd != -1) { ssize_t nread; if (prof_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && propagate_err) return (true); nread = 0; do { prof_dump_buf_end += nread; if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { /* Make space in prof_dump_buf before read(). */ if (prof_flush(propagate_err) && propagate_err) return (true); } nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE - prof_dump_buf_end); } while (nread > 0); close(mfd); } else return (true); return (false); } static bool prof_dump(bool propagate_err, const char *filename, bool leakcheck) { + prof_tdata_t *prof_tdata; prof_cnt_t cnt_all; size_t tabind; union { prof_bt_t *p; void *v; } bt; union { prof_ctx_t *p; void *v; } ctx; size_t leak_nctx; cassert(config_prof); - prof_enter(); + prof_tdata = prof_tdata_get(); + if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) + return (true); + prof_enter(prof_tdata); prof_dump_fd = creat(filename, 0644); if (prof_dump_fd == -1) { if (propagate_err == false) { malloc_printf( ": creat(\"%s\"), 0644) failed\n", filename); if (opt_abort) abort(); } goto label_error; } /* Merge per thread profile stats, and sum them in cnt_all. */ memset(&cnt_all, 0, sizeof(prof_cnt_t)); leak_nctx = 0; for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;) prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx); /* Dump profile header. */ if (opt_lg_prof_sample == 0) { if (prof_printf(propagate_err, "heap profile: %"PRId64": %"PRId64 " [%"PRIu64": %"PRIu64"] @ heapprofile\n", cnt_all.curobjs, cnt_all.curbytes, cnt_all.accumobjs, cnt_all.accumbytes)) goto label_error; } else { if (prof_printf(propagate_err, "heap profile: %"PRId64": %"PRId64 " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n", cnt_all.curobjs, cnt_all.curbytes, cnt_all.accumobjs, cnt_all.accumbytes, ((uint64_t)1U << opt_lg_prof_sample))) goto label_error; } /* Dump per ctx profile stats. */ for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v) == false;) { if (prof_dump_ctx(propagate_err, ctx.p, bt.p)) goto label_error; } /* Dump /proc//maps if possible. */ if (prof_dump_maps(propagate_err)) goto label_error; if (prof_flush(propagate_err)) goto label_error; close(prof_dump_fd); - prof_leave(); + prof_leave(prof_tdata); if (leakcheck && cnt_all.curbytes != 0) { malloc_printf(": Leak summary: %"PRId64" byte%s, %" PRId64" object%s, %zu context%s\n", cnt_all.curbytes, (cnt_all.curbytes != 1) ? "s" : "", cnt_all.curobjs, (cnt_all.curobjs != 1) ? "s" : "", leak_nctx, (leak_nctx != 1) ? "s" : ""); malloc_printf( ": Run pprof on \"%s\" for leak detail\n", filename); } return (false); label_error: - prof_leave(); + prof_leave(prof_tdata); return (true); } #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) static void prof_dump_filename(char *filename, char v, int64_t vseq) { cassert(config_prof); if (vseq != UINT64_C(0xffffffffffffffff)) { /* "...v.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"PRIu64".%c%"PRId64".heap", opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq); } else { /* "....heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"PRIu64".%c.heap", opt_prof_prefix, (int)getpid(), prof_dump_seq, v); } + prof_dump_seq++; } static void prof_fdump(void) { char filename[DUMP_FILENAME_BUFSIZE]; cassert(config_prof); if (prof_booted == false) return; if (opt_prof_final && opt_prof_prefix[0] != '\0') { malloc_mutex_lock(&prof_dump_seq_mtx); prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff)); malloc_mutex_unlock(&prof_dump_seq_mtx); prof_dump(false, filename, opt_prof_leak); } } void prof_idump(void) { + prof_tdata_t *prof_tdata; char filename[PATH_MAX + 1]; cassert(config_prof); if (prof_booted == false) return; - malloc_mutex_lock(&enq_mtx); - if (enq) { - enq_idump = true; - malloc_mutex_unlock(&enq_mtx); + /* + * Don't call prof_tdata_get() here, because it could cause recursive + * allocation. + */ + prof_tdata = *prof_tdata_tsd_get(); + if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) return; + if (prof_tdata->enq) { + prof_tdata->enq_idump = true; + return; } - malloc_mutex_unlock(&enq_mtx); if (opt_prof_prefix[0] != '\0') { malloc_mutex_lock(&prof_dump_seq_mtx); prof_dump_filename(filename, 'i', prof_dump_iseq); prof_dump_iseq++; malloc_mutex_unlock(&prof_dump_seq_mtx); prof_dump(false, filename, false); } } bool prof_mdump(const char *filename) { char filename_buf[DUMP_FILENAME_BUFSIZE]; cassert(config_prof); if (opt_prof == false || prof_booted == false) return (true); if (filename == NULL) { /* No filename specified, so automatically generate one. */ if (opt_prof_prefix[0] == '\0') return (true); malloc_mutex_lock(&prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_mseq++; malloc_mutex_unlock(&prof_dump_seq_mtx); filename = filename_buf; } return (prof_dump(true, filename, false)); } void prof_gdump(void) { + prof_tdata_t *prof_tdata; char filename[DUMP_FILENAME_BUFSIZE]; cassert(config_prof); if (prof_booted == false) return; - malloc_mutex_lock(&enq_mtx); - if (enq) { - enq_gdump = true; - malloc_mutex_unlock(&enq_mtx); + /* + * Don't call prof_tdata_get() here, because it could cause recursive + * allocation. + */ + prof_tdata = *prof_tdata_tsd_get(); + if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) return; + if (prof_tdata->enq) { + prof_tdata->enq_gdump = true; + return; } - malloc_mutex_unlock(&enq_mtx); if (opt_prof_prefix[0] != '\0') { malloc_mutex_lock(&prof_dump_seq_mtx); prof_dump_filename(filename, 'u', prof_dump_useq); prof_dump_useq++; malloc_mutex_unlock(&prof_dump_seq_mtx); prof_dump(false, filename, false); } } static void prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2) { size_t ret1, ret2; uint64_t h; prof_bt_t *bt = (prof_bt_t *)key; cassert(config_prof); assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64)); assert(hash1 != NULL); assert(hash2 != NULL); h = hash(bt->vec, bt->len * sizeof(void *), UINT64_C(0x94122f335b332aea)); if (minbits <= 32) { /* * Avoid doing multiple hashes, since a single hash provides * enough bits. */ ret1 = h & ZU(0xffffffffU); ret2 = h >> 32; } else { ret1 = h; ret2 = hash(bt->vec, bt->len * sizeof(void *), UINT64_C(0x8432a476666bbc13)); } *hash1 = ret1; *hash2 = ret2; } static bool prof_bt_keycomp(const void *k1, const void *k2) { const prof_bt_t *bt1 = (prof_bt_t *)k1; const prof_bt_t *bt2 = (prof_bt_t *)k2; cassert(config_prof); if (bt1->len != bt2->len) return (false); return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } static malloc_mutex_t * prof_ctx_mutex_choose(void) { unsigned nctxs = atomic_add_u(&cum_ctxs, 1); return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]); } prof_tdata_t * prof_tdata_init(void) { prof_tdata_t *prof_tdata; cassert(config_prof); /* Initialize an empty cache for this thread. */ prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t)); if (prof_tdata == NULL) return (NULL); if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) { idalloc(prof_tdata); return (NULL); } ql_new(&prof_tdata->lru_ql); prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX); if (prof_tdata->vec == NULL) { ckh_delete(&prof_tdata->bt2cnt); idalloc(prof_tdata); return (NULL); } prof_tdata->prng_state = 0; prof_tdata->threshold = 0; prof_tdata->accum = 0; + prof_tdata->enq = false; + prof_tdata->enq_idump = false; + prof_tdata->enq_gdump = false; + prof_tdata_tsd_set(&prof_tdata); return (prof_tdata); } void prof_tdata_cleanup(void *arg) { prof_thr_cnt_t *cnt; prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg; cassert(config_prof); - /* - * Delete the hash table. All of its contents can still be iterated - * over via the LRU. - */ - ckh_delete(&prof_tdata->bt2cnt); - - /* Iteratively merge cnt's into the global stats and delete them. */ - while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) { - ql_remove(&prof_tdata->lru_ql, cnt, lru_link); - prof_ctx_merge(cnt->ctx, cnt); - idalloc(cnt); + if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) { + /* + * Another destructor deallocated memory after this destructor + * was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY + * in order to receive another callback. + */ + prof_tdata = PROF_TDATA_STATE_PURGATORY; + prof_tdata_tsd_set(&prof_tdata); + } else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) { + /* + * The previous time this destructor was called, we set the key + * to PROF_TDATA_STATE_PURGATORY so that other destructors + * wouldn't cause re-creation of the prof_tdata. This time, do + * nothing, so that the destructor will not be called again. + */ + } else if (prof_tdata != NULL) { + /* + * Delete the hash table. All of its contents can still be + * iterated over via the LRU. + */ + ckh_delete(&prof_tdata->bt2cnt); + /* + * Iteratively merge cnt's into the global stats and delete + * them. + */ + while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) { + ql_remove(&prof_tdata->lru_ql, cnt, lru_link); + prof_ctx_merge(cnt->ctx, cnt); + idalloc(cnt); + } + idalloc(prof_tdata->vec); + idalloc(prof_tdata); + prof_tdata = PROF_TDATA_STATE_PURGATORY; + prof_tdata_tsd_set(&prof_tdata); } - - idalloc(prof_tdata->vec); - - idalloc(prof_tdata); - prof_tdata = NULL; - prof_tdata_tsd_set(&prof_tdata); } void prof_boot0(void) { cassert(config_prof); memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, sizeof(PROF_PREFIX_DEFAULT)); } void prof_boot1(void) { cassert(config_prof); /* * opt_prof and prof_promote must be in their final state before any * arenas are initialized, so this function must be executed early. */ if (opt_prof_leak && opt_prof == false) { /* * Enable opt_prof, but in such a way that profiles are never * automatically dumped. */ opt_prof = true; opt_prof_gdump = false; prof_interval = 0; } else if (opt_prof) { if (opt_lg_prof_interval >= 0) { prof_interval = (((uint64_t)1U) << opt_lg_prof_interval); } else prof_interval = 0; } prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE); } bool prof_boot2(void) { cassert(config_prof); if (opt_prof) { unsigned i; if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) return (true); if (malloc_mutex_init(&bt2ctx_mtx)) return (true); if (prof_tdata_tsd_boot()) { malloc_write( ": Error in pthread_key_create()\n"); abort(); } if (malloc_mutex_init(&prof_dump_seq_mtx)) return (true); - - if (malloc_mutex_init(&enq_mtx)) - return (true); - enq = false; - enq_idump = false; - enq_gdump = false; if (atexit(prof_fdump) != 0) { malloc_write(": Error in atexit()\n"); if (opt_abort) abort(); } ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS * sizeof(malloc_mutex_t)); if (ctx_locks == NULL) return (true); for (i = 0; i < PROF_NCTX_LOCKS; i++) { if (malloc_mutex_init(&ctx_locks[i])) return (true); } } #ifdef JEMALLOC_PROF_LIBGCC /* * Cause the backtracing machinery to allocate its internal state * before enabling profiling. */ _Unwind_Backtrace(prof_unwind_init_callback, NULL); #endif prof_booted = true; return (false); } /******************************************************************************/ Index: projects/nand/contrib/jemalloc/src/quarantine.c =================================================================== --- projects/nand/contrib/jemalloc/src/quarantine.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/quarantine.c (revision 235263) @@ -1,163 +1,210 @@ #include "jemalloc/internal/jemalloc_internal.h" +/* + * quarantine pointers close to NULL are used to encode state information that + * is used for cleaning up during thread shutdown. + */ +#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1) +#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2) +#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY + /******************************************************************************/ /* Data. */ +typedef struct quarantine_obj_s quarantine_obj_t; typedef struct quarantine_s quarantine_t; +struct quarantine_obj_s { + void *ptr; + size_t usize; +}; + struct quarantine_s { - size_t curbytes; - size_t curobjs; - size_t first; + size_t curbytes; + size_t curobjs; + size_t first; #define LG_MAXOBJS_INIT 10 - size_t lg_maxobjs; - void *objs[1]; /* Dynamically sized ring buffer. */ + size_t lg_maxobjs; + quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ }; static void quarantine_cleanup(void *arg); malloc_tsd_data(static, quarantine, quarantine_t *, NULL) malloc_tsd_funcs(JEMALLOC_INLINE, quarantine, quarantine_t *, NULL, quarantine_cleanup) /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static quarantine_t *quarantine_init(size_t lg_maxobjs); static quarantine_t *quarantine_grow(quarantine_t *quarantine); static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound); /******************************************************************************/ static quarantine_t * quarantine_init(size_t lg_maxobjs) { quarantine_t *quarantine; quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) + - ((ZU(1) << lg_maxobjs) * sizeof(void *))); + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t))); if (quarantine == NULL) return (NULL); quarantine->curbytes = 0; quarantine->curobjs = 0; quarantine->first = 0; quarantine->lg_maxobjs = lg_maxobjs; quarantine_tsd_set(&quarantine); return (quarantine); } static quarantine_t * quarantine_grow(quarantine_t *quarantine) { quarantine_t *ret; ret = quarantine_init(quarantine->lg_maxobjs + 1); if (ret == NULL) return (quarantine); ret->curbytes = quarantine->curbytes; - if (quarantine->first + quarantine->curobjs < (ZU(1) << + ret->curobjs = quarantine->curobjs; + if (quarantine->first + quarantine->curobjs <= (ZU(1) << quarantine->lg_maxobjs)) { /* objs ring buffer data are contiguous. */ memcpy(ret->objs, &quarantine->objs[quarantine->first], - quarantine->curobjs * sizeof(void *)); - ret->curobjs = quarantine->curobjs; + quarantine->curobjs * sizeof(quarantine_obj_t)); } else { /* objs ring buffer data wrap around. */ - size_t ncopy = (ZU(1) << quarantine->lg_maxobjs) - + size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) - quarantine->first; - memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy * - sizeof(void *)); - ret->curobjs = ncopy; - if (quarantine->curobjs != 0) { - memcpy(&ret->objs[ret->curobjs], quarantine->objs, - quarantine->curobjs - ncopy); - } + size_t ncopy_b = quarantine->curobjs - ncopy_a; + + memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a + * sizeof(quarantine_obj_t)); + memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * + sizeof(quarantine_obj_t)); } return (ret); } static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound) { while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) { - void *ptr = quarantine->objs[quarantine->first]; - size_t usize = isalloc(ptr, config_prof); - idalloc(ptr); - quarantine->curbytes -= usize; + quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; + assert(obj->usize == isalloc(obj->ptr, config_prof)); + idalloc(obj->ptr); + quarantine->curbytes -= obj->usize; quarantine->curobjs--; quarantine->first = (quarantine->first + 1) & ((ZU(1) << quarantine->lg_maxobjs) - 1); } } void quarantine(void *ptr) { quarantine_t *quarantine; size_t usize = isalloc(ptr, config_prof); cassert(config_fill); assert(opt_quarantine); quarantine = *quarantine_tsd_get(); - if (quarantine == NULL && (quarantine = - quarantine_init(LG_MAXOBJS_INIT)) == NULL) { - idalloc(ptr); - return; + if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) { + if (quarantine == NULL) { + if ((quarantine = quarantine_init(LG_MAXOBJS_INIT)) == + NULL) { + idalloc(ptr); + return; + } + } else { + if (quarantine == QUARANTINE_STATE_PURGATORY) { + /* + * Make a note that quarantine() was called + * after quarantine_cleanup() was called. + */ + quarantine = QUARANTINE_STATE_REINCARNATED; + quarantine_tsd_set(&quarantine); + } + idalloc(ptr); + return; + } } /* * Drain one or more objects if the quarantine size limit would be * exceeded by appending ptr. */ if (quarantine->curbytes + usize > opt_quarantine) { size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - usize : 0; quarantine_drain(quarantine, upper_bound); } /* Grow the quarantine ring buffer if it's full. */ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) quarantine = quarantine_grow(quarantine); /* quarantine_grow() must free a slot if it fails to grow. */ assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); /* Append ptr if its size doesn't exceed the quarantine size. */ if (quarantine->curbytes + usize <= opt_quarantine) { size_t offset = (quarantine->first + quarantine->curobjs) & ((ZU(1) << quarantine->lg_maxobjs) - 1); - quarantine->objs[offset] = ptr; + quarantine_obj_t *obj = &quarantine->objs[offset]; + obj->ptr = ptr; + obj->usize = usize; quarantine->curbytes += usize; quarantine->curobjs++; if (opt_junk) memset(ptr, 0x5a, usize); } else { assert(quarantine->curbytes == 0); idalloc(ptr); } } static void quarantine_cleanup(void *arg) { quarantine_t *quarantine = *(quarantine_t **)arg; - if (quarantine != NULL) { + if (quarantine == QUARANTINE_STATE_REINCARNATED) { + /* + * Another destructor deallocated memory after this destructor + * was called. Reset quarantine to QUARANTINE_STATE_PURGATORY + * in order to receive another callback. + */ + quarantine = QUARANTINE_STATE_PURGATORY; + quarantine_tsd_set(&quarantine); + } else if (quarantine == QUARANTINE_STATE_PURGATORY) { + /* + * The previous time this destructor was called, we set the key + * to QUARANTINE_STATE_PURGATORY so that other destructors + * wouldn't cause re-creation of the quarantine. This time, do + * nothing, so that the destructor will not be called again. + */ + } else if (quarantine != NULL) { quarantine_drain(quarantine, 0); idalloc(quarantine); + quarantine = QUARANTINE_STATE_PURGATORY; + quarantine_tsd_set(&quarantine); } } bool quarantine_boot(void) { cassert(config_fill); if (quarantine_tsd_boot()) return (true); return (false); } Index: projects/nand/contrib/jemalloc/src/stats.c =================================================================== --- projects/nand/contrib/jemalloc/src/stats.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/stats.c (revision 235263) @@ -1,551 +1,543 @@ #define JEMALLOC_STATS_C_ #include "jemalloc/internal/jemalloc_internal.h" #define CTL_GET(n, v, t) do { \ size_t sz = sizeof(t); \ xmallctl(n, v, &sz, NULL, 0); \ } while (0) #define CTL_I_GET(n, v, t) do { \ size_t mib[6]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = i; \ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ } while (0) #define CTL_J_GET(n, v, t) do { \ size_t mib[6]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = j; \ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ } while (0) #define CTL_IJ_GET(n, v, t) do { \ size_t mib[6]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = i; \ mib[4] = j; \ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ } while (0) /******************************************************************************/ /* Data. */ bool opt_stats_print = false; size_t stats_cactive = 0; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static void stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i); static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i); static void stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i, bool bins, bool large); /******************************************************************************/ static void stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i) { size_t page; bool config_tcache; unsigned nbins, j, gap_start; CTL_GET("arenas.page", &page, size_t); CTL_GET("config.tcache", &config_tcache, bool); if (config_tcache) { malloc_cprintf(write_cb, cbopaque, "bins: bin size regs pgs allocated nmalloc" " ndalloc nrequests nfills nflushes" " newruns reruns curruns\n"); } else { malloc_cprintf(write_cb, cbopaque, "bins: bin size regs pgs allocated nmalloc" " ndalloc newruns reruns curruns\n"); } CTL_GET("arenas.nbins", &nbins, unsigned); for (j = 0, gap_start = UINT_MAX; j < nbins; j++) { uint64_t nruns; CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t); if (nruns == 0) { if (gap_start == UINT_MAX) gap_start = j; } else { size_t reg_size, run_size, allocated; uint32_t nregs; uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t reruns; size_t curruns; if (gap_start != UINT_MAX) { if (j > gap_start + 1) { /* Gap of more than one size class. */ malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n", gap_start, j - 1); } else { /* Gap of one size class. */ malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start); } gap_start = UINT_MAX; } CTL_J_GET("arenas.bin.0.size", ®_size, size_t); CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t); CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t); CTL_IJ_GET("stats.arenas.0.bins.0.allocated", &allocated, size_t); CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc", &nmalloc, uint64_t); CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc", &ndalloc, uint64_t); if (config_tcache) { CTL_IJ_GET("stats.arenas.0.bins.0.nrequests", &nrequests, uint64_t); CTL_IJ_GET("stats.arenas.0.bins.0.nfills", &nfills, uint64_t); CTL_IJ_GET("stats.arenas.0.bins.0.nflushes", &nflushes, uint64_t); } CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns, uint64_t); CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns, size_t); if (config_tcache) { malloc_cprintf(write_cb, cbopaque, "%13u %5zu %4u %3zu %12zu %12"PRIu64 " %12"PRIu64" %12"PRIu64" %12"PRIu64 " %12"PRIu64" %12"PRIu64" %12"PRIu64 " %12zu\n", j, reg_size, nregs, run_size / page, allocated, nmalloc, ndalloc, nrequests, nfills, nflushes, nruns, reruns, curruns); } else { malloc_cprintf(write_cb, cbopaque, "%13u %5zu %4u %3zu %12zu %12"PRIu64 " %12"PRIu64" %12"PRIu64" %12"PRIu64 " %12zu\n", j, reg_size, nregs, run_size / page, allocated, nmalloc, ndalloc, nruns, reruns, curruns); } } } if (gap_start != UINT_MAX) { if (j > gap_start + 1) { /* Gap of more than one size class. */ malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n", gap_start, j - 1); } else { /* Gap of one size class. */ malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start); } } } static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i) { size_t page, nlruns, j; ssize_t gap_start; CTL_GET("arenas.page", &page, size_t); malloc_cprintf(write_cb, cbopaque, "large: size pages nmalloc ndalloc nrequests" " curruns\n"); CTL_GET("arenas.nlruns", &nlruns, size_t); for (j = 0, gap_start = -1; j < nlruns; j++) { uint64_t nmalloc, ndalloc, nrequests; size_t run_size, curruns; CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc, uint64_t); CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc, uint64_t); CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests, uint64_t); if (nrequests == 0) { if (gap_start == -1) gap_start = j; } else { CTL_J_GET("arenas.lrun.0.size", &run_size, size_t); CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns, size_t); if (gap_start != -1) { malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start); gap_start = -1; } malloc_cprintf(write_cb, cbopaque, "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64 " %12zu\n", run_size, run_size / page, nmalloc, ndalloc, nrequests, curruns); } } if (gap_start != -1) malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start); } static void stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, unsigned i, bool bins, bool large) { unsigned nthreads; size_t page, pactive, pdirty, mapped; uint64_t npurge, nmadvise, purged; size_t small_allocated; uint64_t small_nmalloc, small_ndalloc, small_nrequests; size_t large_allocated; uint64_t large_nmalloc, large_ndalloc, large_nrequests; CTL_GET("arenas.page", &page, size_t); CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned); malloc_cprintf(write_cb, cbopaque, "assigned threads: %u\n", nthreads); CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t); CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t); CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t); CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t); CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t); malloc_cprintf(write_cb, cbopaque, "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s," " %"PRIu64" madvise%s, %"PRIu64" purged\n", pactive, pdirty, npurge, npurge == 1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged); malloc_cprintf(write_cb, cbopaque, " allocated nmalloc ndalloc nrequests\n"); CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t); CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t); CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t); CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t); malloc_cprintf(write_cb, cbopaque, "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", small_allocated, small_nmalloc, small_ndalloc, small_nrequests); CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t); CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t); CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t); CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t); malloc_cprintf(write_cb, cbopaque, "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", large_allocated, large_nmalloc, large_ndalloc, large_nrequests); malloc_cprintf(write_cb, cbopaque, "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", small_allocated + large_allocated, small_nmalloc + large_nmalloc, small_ndalloc + large_ndalloc, small_nrequests + large_nrequests); malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page); CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t); malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped); if (bins) stats_arena_bins_print(write_cb, cbopaque, i); if (large) stats_arena_lruns_print(write_cb, cbopaque, i); } void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { int err; uint64_t epoch; size_t u64sz; bool general = true; bool merged = true; bool unmerged = true; bool bins = true; bool large = true; /* * Refresh stats, in case mallctl() was called by the application. * * Check for OOM here, since refreshing the ctl cache can trigger * allocation. In practice, none of the subsequent mallctl()-related * calls in this function will cause OOM if this one succeeds. * */ epoch = 1; u64sz = sizeof(uint64_t); err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); if (err != 0) { if (err == EAGAIN) { malloc_write(": Memory allocation failure in " "mallctl(\"epoch\", ...)\n"); return; } malloc_write(": Failure in mallctl(\"epoch\", " "...)\n"); abort(); } - if (write_cb == NULL) { - /* - * The caller did not provide an alternate write_cb callback - * function, so use the default one. malloc_write() is an - * inline function, so use malloc_message() directly here. - */ - write_cb = je_malloc_message; - cbopaque = NULL; - } - if (opts != NULL) { unsigned i; for (i = 0; opts[i] != '\0'; i++) { switch (opts[i]) { case 'g': general = false; break; case 'm': merged = false; break; case 'a': unmerged = false; break; case 'b': bins = false; break; case 'l': large = false; break; default:; } } } - write_cb(cbopaque, "___ Begin jemalloc statistics ___\n"); + malloc_cprintf(write_cb, cbopaque, + "___ Begin jemalloc statistics ___\n"); if (general) { int err; const char *cpv; bool bv; unsigned uv; ssize_t ssv; size_t sv, bsz, ssz, sssz, cpsz; bsz = sizeof(bool); ssz = sizeof(size_t); sssz = sizeof(ssize_t); cpsz = sizeof(const char *); CTL_GET("version", &cpv, const char *); malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); CTL_GET("config.debug", &bv, bool); malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", bv ? "enabled" : "disabled"); #define OPT_WRITE_BOOL(n) \ if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %s\n", bv ? "true" : "false"); \ } #define OPT_WRITE_SIZE_T(n) \ if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zu\n", sv); \ } #define OPT_WRITE_SSIZE_T(n) \ if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zd\n", ssv); \ } #define OPT_WRITE_CHAR_P(n) \ if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": \"%s\"\n", cpv); \ } - write_cb(cbopaque, "Run-time option settings:\n"); + malloc_cprintf(write_cb, cbopaque, + "Run-time option settings:\n"); OPT_WRITE_BOOL(abort) OPT_WRITE_SIZE_T(lg_chunk) OPT_WRITE_SIZE_T(narenas) OPT_WRITE_SSIZE_T(lg_dirty_mult) OPT_WRITE_BOOL(stats_print) OPT_WRITE_BOOL(junk) OPT_WRITE_SIZE_T(quarantine) OPT_WRITE_BOOL(redzone) OPT_WRITE_BOOL(zero) OPT_WRITE_BOOL(utrace) OPT_WRITE_BOOL(valgrind) OPT_WRITE_BOOL(xmalloc) OPT_WRITE_BOOL(tcache) OPT_WRITE_SSIZE_T(lg_tcache_max) OPT_WRITE_BOOL(prof) OPT_WRITE_CHAR_P(prof_prefix) OPT_WRITE_BOOL(prof_active) OPT_WRITE_SSIZE_T(lg_prof_sample) OPT_WRITE_BOOL(prof_accum) OPT_WRITE_SSIZE_T(lg_prof_interval) OPT_WRITE_BOOL(prof_gdump) OPT_WRITE_BOOL(prof_final) OPT_WRITE_BOOL(prof_leak) #undef OPT_WRITE_BOOL #undef OPT_WRITE_SIZE_T #undef OPT_WRITE_SSIZE_T #undef OPT_WRITE_CHAR_P malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); CTL_GET("arenas.narenas", &uv, unsigned); malloc_cprintf(write_cb, cbopaque, "Max arenas: %u\n", uv); malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", sizeof(void *)); CTL_GET("arenas.quantum", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); CTL_GET("arenas.page", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t); if (ssv >= 0) { malloc_cprintf(write_cb, cbopaque, "Min active:dirty page ratio per arena: %u:1\n", (1U << ssv)); } else { - write_cb(cbopaque, + malloc_cprintf(write_cb, cbopaque, "Min active:dirty page ratio per arena: N/A\n"); } if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0)) == 0) { malloc_cprintf(write_cb, cbopaque, "Maximum thread-cached size class: %zu\n", sv); } if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 && bv) { CTL_GET("opt.lg_prof_sample", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Average profile sample interval: %"PRIu64 " (2^%zu)\n", (((uint64_t)1U) << sv), sv); CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); if (ssv >= 0) { malloc_cprintf(write_cb, cbopaque, "Average profile dump interval: %"PRIu64 " (2^%zd)\n", (((uint64_t)1U) << ssv), ssv); } else { - write_cb(cbopaque, + malloc_cprintf(write_cb, cbopaque, "Average profile dump interval: N/A\n"); } } CTL_GET("opt.lg_chunk", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv); } if (config_stats) { size_t *cactive; size_t allocated, active, mapped; size_t chunks_current, chunks_high; uint64_t chunks_total; size_t huge_allocated; uint64_t huge_nmalloc, huge_ndalloc; CTL_GET("stats.cactive", &cactive, size_t *); CTL_GET("stats.allocated", &allocated, size_t); CTL_GET("stats.active", &active, size_t); CTL_GET("stats.mapped", &mapped, size_t); malloc_cprintf(write_cb, cbopaque, "Allocated: %zu, active: %zu, mapped: %zu\n", allocated, active, mapped); malloc_cprintf(write_cb, cbopaque, "Current active ceiling: %zu\n", atomic_read_z(cactive)); /* Print chunk stats. */ CTL_GET("stats.chunks.total", &chunks_total, uint64_t); CTL_GET("stats.chunks.high", &chunks_high, size_t); CTL_GET("stats.chunks.current", &chunks_current, size_t); malloc_cprintf(write_cb, cbopaque, "chunks: nchunks " "highchunks curchunks\n"); malloc_cprintf(write_cb, cbopaque, " %13"PRIu64"%13zu%13zu\n", chunks_total, chunks_high, chunks_current); /* Print huge stats. */ CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t); CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t); CTL_GET("stats.huge.allocated", &huge_allocated, size_t); malloc_cprintf(write_cb, cbopaque, "huge: nmalloc ndalloc allocated\n"); malloc_cprintf(write_cb, cbopaque, " %12"PRIu64" %12"PRIu64" %12zu\n", huge_nmalloc, huge_ndalloc, huge_allocated); if (merged) { unsigned narenas; CTL_GET("arenas.narenas", &narenas, unsigned); { - bool initialized[narenas]; + VARIABLE_ARRAY(bool, initialized, narenas); size_t isz; unsigned i, ninitialized; - isz = sizeof(initialized); + isz = sizeof(bool) * narenas; xmallctl("arenas.initialized", initialized, &isz, NULL, 0); for (i = ninitialized = 0; i < narenas; i++) { if (initialized[i]) ninitialized++; } if (ninitialized > 1 || unmerged == false) { /* Print merged arena stats. */ malloc_cprintf(write_cb, cbopaque, "\nMerged arenas stats:\n"); stats_arena_print(write_cb, cbopaque, narenas, bins, large); } } } if (unmerged) { unsigned narenas; /* Print stats for each arena. */ CTL_GET("arenas.narenas", &narenas, unsigned); { - bool initialized[narenas]; + VARIABLE_ARRAY(bool, initialized, narenas); size_t isz; unsigned i; - isz = sizeof(initialized); + isz = sizeof(bool) * narenas; xmallctl("arenas.initialized", initialized, &isz, NULL, 0); for (i = 0; i < narenas; i++) { if (initialized[i]) { malloc_cprintf(write_cb, cbopaque, "\narenas[%u]:\n", i); stats_arena_print(write_cb, cbopaque, i, bins, large); } } } } } - write_cb(cbopaque, "--- End jemalloc statistics ---\n"); + malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n"); } Index: projects/nand/contrib/jemalloc/src/tcache.c =================================================================== --- projects/nand/contrib/jemalloc/src/tcache.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/tcache.c (revision 235263) @@ -1,441 +1,474 @@ #define JEMALLOC_TCACHE_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ malloc_tsd_data(, tcache, tcache_t *, NULL) malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default) bool opt_tcache = true; ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; tcache_bin_info_t *tcache_bin_info; static unsigned stack_nelms; /* Total stack elms per tcache. */ size_t nhbins; size_t tcache_maxclass; /******************************************************************************/ size_t tcache_salloc(const void *ptr) { return (arena_salloc(ptr, false)); } +void +tcache_event_hard(tcache_t *tcache) +{ + size_t binind = tcache->next_gc_bin; + tcache_bin_t *tbin = &tcache->tbins[binind]; + tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; + + if (tbin->low_water > 0) { + /* + * Flush (ceiling) 3/4 of the objects below the low water mark. + */ + if (binind < NBINS) { + tcache_bin_flush_small(tbin, binind, tbin->ncached - + tbin->low_water + (tbin->low_water >> 2), tcache); + } else { + tcache_bin_flush_large(tbin, binind, tbin->ncached - + tbin->low_water + (tbin->low_water >> 2), tcache); + } + /* + * Reduce fill count by 2X. Limit lg_fill_div such that the + * fill count is always at least 1. + */ + if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) + tbin->lg_fill_div++; + } else if (tbin->low_water < 0) { + /* + * Increase fill count by 2X. Make sure lg_fill_div stays + * greater than 0. + */ + if (tbin->lg_fill_div > 1) + tbin->lg_fill_div--; + } + tbin->low_water = tbin->ncached; + + tcache->next_gc_bin++; + if (tcache->next_gc_bin == nhbins) + tcache->next_gc_bin = 0; + tcache->ev_cnt = 0; +} + void * tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind) { void *ret; arena_tcache_fill_small(tcache->arena, tbin, binind, config_prof ? tcache->prof_accumbytes : 0); if (config_prof) tcache->prof_accumbytes = 0; ret = tcache_alloc_easy(tbin); return (ret); } void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, tcache_t *tcache) { void *ptr; unsigned i, nflush, ndeferred; bool merged_stats = false; assert(binind < NBINS); assert(rem <= tbin->ncached); for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena bin associated with the first object. */ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( tbin->avail[0]); arena_t *arena = chunk->arena; arena_bin_t *bin = &arena->bins[binind]; if (config_prof && arena == tcache->arena) { malloc_mutex_lock(&arena->lock); arena_prof_accum(arena, tcache->prof_accumbytes); malloc_mutex_unlock(&arena->lock); tcache->prof_accumbytes = 0; } malloc_mutex_lock(&bin->lock); if (config_stats && arena == tcache->arena) { assert(merged_stats == false); merged_stats = true; bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } ndeferred = 0; for (i = 0; i < nflush; i++) { ptr = tbin->avail[i]; assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk->arena == arena) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; arena_chunk_map_t *mapelm = - &chunk->map[pageind-map_bias]; + arena_mapp_get(chunk, pageind); if (config_fill && opt_junk) { arena_alloc_junk_small(ptr, &arena_bin_info[binind], true); } - arena_dalloc_bin(arena, chunk, ptr, mapelm); + arena_dalloc_bin_locked(arena, chunk, ptr, + mapelm); } else { /* * This object was allocated via a different * arena bin than the one that is currently * locked. Stash the object, so that it can be * handled in a future pass. */ tbin->avail[ndeferred] = ptr; ndeferred++; } } malloc_mutex_unlock(&bin->lock); } if (config_stats && merged_stats == false) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ arena_bin_t *bin = &tcache->arena->bins[binind]; malloc_mutex_lock(&bin->lock); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; malloc_mutex_unlock(&bin->lock); } memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], rem * sizeof(void *)); tbin->ncached = rem; if ((int)tbin->ncached < tbin->low_water) tbin->low_water = tbin->ncached; } void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, tcache_t *tcache) { void *ptr; unsigned i, nflush, ndeferred; bool merged_stats = false; assert(binind < nhbins); assert(rem <= tbin->ncached); for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena associated with the first object. */ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( tbin->avail[0]); arena_t *arena = chunk->arena; malloc_mutex_lock(&arena->lock); if ((config_prof || config_stats) && arena == tcache->arena) { if (config_prof) { arena_prof_accum(arena, tcache->prof_accumbytes); tcache->prof_accumbytes = 0; } if (config_stats) { merged_stats = true; arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[binind - NBINS].nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } } ndeferred = 0; for (i = 0; i < nflush; i++) { ptr = tbin->avail[i]; assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk->arena == arena) - arena_dalloc_large(arena, chunk, ptr); + arena_dalloc_large_locked(arena, chunk, ptr); else { /* * This object was allocated via a different * arena than the one that is currently locked. * Stash the object, so that it can be handled * in a future pass. */ tbin->avail[ndeferred] = ptr; ndeferred++; } } malloc_mutex_unlock(&arena->lock); } if (config_stats && merged_stats == false) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ arena_t *arena = tcache->arena; malloc_mutex_lock(&arena->lock); arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[binind - NBINS].nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; malloc_mutex_unlock(&arena->lock); } memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], rem * sizeof(void *)); tbin->ncached = rem; if ((int)tbin->ncached < tbin->low_water) tbin->low_water = tbin->ncached; } void tcache_arena_associate(tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Link into list of extant tcaches. */ malloc_mutex_lock(&arena->lock); ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); malloc_mutex_unlock(&arena->lock); } tcache->arena = arena; } void tcache_arena_dissociate(tcache_t *tcache) { if (config_stats) { /* Unlink from list of extant tcaches. */ malloc_mutex_lock(&tcache->arena->lock); ql_remove(&tcache->arena->tcache_ql, tcache, link); malloc_mutex_unlock(&tcache->arena->lock); tcache_stats_merge(tcache, tcache->arena); } } tcache_t * tcache_create(arena_t *arena) { tcache_t *tcache; size_t size, stack_offset; unsigned i; size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); /* Naturally align the pointer stacks. */ size = PTR_CEILING(size); stack_offset = size; size += stack_nelms * sizeof(void *); /* * Round up to the nearest multiple of the cacheline size, in order to * avoid the possibility of false cacheline sharing. * * That this works relies on the same logic as in ipalloc(), but we * cannot directly call ipalloc() here due to tcache bootstrapping * issues. */ size = (size + CACHELINE_MASK) & (-CACHELINE); if (size <= SMALL_MAXCLASS) tcache = (tcache_t *)arena_malloc_small(arena, size, true); else if (size <= tcache_maxclass) tcache = (tcache_t *)arena_malloc_large(arena, size, true); else tcache = (tcache_t *)icalloc(size); if (tcache == NULL) return (NULL); tcache_arena_associate(tcache, arena); assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); for (i = 0; i < nhbins; i++) { tcache->tbins[i].lg_fill_div = 1; tcache->tbins[i].avail = (void **)((uintptr_t)tcache + (uintptr_t)stack_offset); stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); } tcache_tsd_set(&tcache); return (tcache); } void tcache_destroy(tcache_t *tcache) { unsigned i; size_t tcache_size; tcache_arena_dissociate(tcache); for (i = 0; i < NBINS; i++) { tcache_bin_t *tbin = &tcache->tbins[i]; tcache_bin_flush_small(tbin, i, 0, tcache); if (config_stats && tbin->tstats.nrequests != 0) { arena_t *arena = tcache->arena; arena_bin_t *bin = &arena->bins[i]; malloc_mutex_lock(&bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(&bin->lock); } } for (; i < nhbins; i++) { tcache_bin_t *tbin = &tcache->tbins[i]; tcache_bin_flush_large(tbin, i, 0, tcache); if (config_stats && tbin->tstats.nrequests != 0) { arena_t *arena = tcache->arena; malloc_mutex_lock(&arena->lock); arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[i - NBINS].nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(&arena->lock); } } if (config_prof && tcache->prof_accumbytes > 0) { malloc_mutex_lock(&tcache->arena->lock); arena_prof_accum(tcache->arena, tcache->prof_accumbytes); malloc_mutex_unlock(&tcache->arena->lock); } tcache_size = arena_salloc(tcache, false); if (tcache_size <= SMALL_MAXCLASS) { arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); arena_t *arena = chunk->arena; size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias]; - arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + - (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) << - LG_PAGE)); - arena_bin_t *bin = run->bin; + arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - malloc_mutex_lock(&bin->lock); - arena_dalloc_bin(arena, chunk, tcache, mapelm); - malloc_mutex_unlock(&bin->lock); + arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm); } else if (tcache_size <= tcache_maxclass) { arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); arena_t *arena = chunk->arena; - malloc_mutex_lock(&arena->lock); arena_dalloc_large(arena, chunk, tcache); - malloc_mutex_unlock(&arena->lock); } else idalloc(tcache); } void tcache_thread_cleanup(void *arg) { tcache_t *tcache = *(tcache_t **)arg; if (tcache == TCACHE_STATE_DISABLED) { /* Do nothing. */ } else if (tcache == TCACHE_STATE_REINCARNATED) { /* * Another destructor called an allocator function after this * destructor was called. Reset tcache to * TCACHE_STATE_PURGATORY in order to receive another callback. */ tcache = TCACHE_STATE_PURGATORY; tcache_tsd_set(&tcache); } else if (tcache == TCACHE_STATE_PURGATORY) { /* * The previous time this destructor was called, we set the key * to TCACHE_STATE_PURGATORY so that other destructors wouldn't * cause re-creation of the tcache. This time, do nothing, so * that the destructor will not be called again. */ } else if (tcache != NULL) { assert(tcache != TCACHE_STATE_PURGATORY); tcache_destroy(tcache); tcache = TCACHE_STATE_PURGATORY; tcache_tsd_set(&tcache); } } void tcache_stats_merge(tcache_t *tcache, arena_t *arena) { unsigned i; /* Merge and reset tcache stats. */ for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; tcache_bin_t *tbin = &tcache->tbins[i]; malloc_mutex_lock(&bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(&bin->lock); tbin->tstats.nrequests = 0; } for (; i < nhbins; i++) { malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; tcache_bin_t *tbin = &tcache->tbins[i]; arena->stats.nrequests_large += tbin->tstats.nrequests; lstats->nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } } bool tcache_boot0(void) { unsigned i; /* * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is * known. */ if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) tcache_maxclass = SMALL_MAXCLASS; else if ((1U << opt_lg_tcache_max) > arena_maxclass) tcache_maxclass = arena_maxclass; else tcache_maxclass = (1U << opt_lg_tcache_max); nhbins = NBINS + (tcache_maxclass >> LG_PAGE); /* Initialize tcache_bin_info. */ tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * sizeof(tcache_bin_info_t)); if (tcache_bin_info == NULL) return (true); stack_nelms = 0; for (i = 0; i < NBINS; i++) { if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { tcache_bin_info[i].ncached_max = (arena_bin_info[i].nregs << 1); } else { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MAX; } stack_nelms += tcache_bin_info[i].ncached_max; } for (; i < nhbins; i++) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; stack_nelms += tcache_bin_info[i].ncached_max; } return (false); } bool tcache_boot1(void) { if (tcache_tsd_boot() || tcache_enabled_tsd_boot()) return (true); return (false); } Index: projects/nand/contrib/jemalloc/src/tsd.c =================================================================== --- projects/nand/contrib/jemalloc/src/tsd.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/tsd.c (revision 235263) @@ -1,72 +1,107 @@ #define JEMALLOC_TSD_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ static unsigned ncleanups; static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; /******************************************************************************/ void * malloc_tsd_malloc(size_t size) { /* Avoid choose_arena() in order to dodge bootstrapping issues. */ - return arena_malloc(arenas[0], size, false, false); + return (arena_malloc(arenas[0], size, false, false)); } void malloc_tsd_dalloc(void *wrapper) { idalloc(wrapper); } void malloc_tsd_no_cleanup(void *arg) { not_reached(); } -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -JEMALLOC_ATTR(visibility("default")) +#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) +#ifndef _WIN32 +JEMALLOC_EXPORT +#endif void _malloc_thread_cleanup(void) { - bool pending[ncleanups], again; + bool pending[MALLOC_TSD_CLEANUPS_MAX], again; unsigned i; for (i = 0; i < ncleanups; i++) pending[i] = true; do { again = false; for (i = 0; i < ncleanups; i++) { if (pending[i]) { pending[i] = cleanups[i](); if (pending[i]) again = true; } } } while (again); } #endif void malloc_tsd_cleanup_register(bool (*f)(void)) { assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); cleanups[ncleanups] = f; ncleanups++; } void malloc_tsd_boot(void) { ncleanups = 0; } + +#ifdef _WIN32 +static BOOL WINAPI +_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) +{ + + switch (fdwReason) { +#ifdef JEMALLOC_LAZY_LOCK + case DLL_THREAD_ATTACH: + isthreaded = true; + break; +#endif + case DLL_THREAD_DETACH: + _malloc_thread_cleanup(); + break; + default: + break; + } + return (true); +} + +#ifdef _MSC_VER +# ifdef _M_IX86 +# pragma comment(linker, "/INCLUDE:__tls_used") +# else +# pragma comment(linker, "/INCLUDE:_tls_used") +# endif +# pragma section(".CRT$XLY",long,read) +#endif +JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) +static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL, + DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; +#endif Index: projects/nand/contrib/jemalloc/src/util.c =================================================================== --- projects/nand/contrib/jemalloc/src/util.c (revision 235262) +++ projects/nand/contrib/jemalloc/src/util.c (revision 235263) @@ -1,647 +1,662 @@ #define assert(e) do { \ if (config_debug && !(e)) { \ malloc_write(": Failed assertion\n"); \ abort(); \ } \ } while (0) #define not_reached() do { \ if (config_debug) { \ malloc_write(": Unreachable code reached\n"); \ abort(); \ } \ } while (0) #define not_implemented() do { \ if (config_debug) { \ malloc_write(": Not implemented\n"); \ abort(); \ } \ } while (0) #define JEMALLOC_UTIL_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static void wrtmessage(void *cbopaque, const char *s); #define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p); #define D2S_BUFSIZE (1 + U2S_BUFSIZE) static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); #define O2S_BUFSIZE (1 + U2S_BUFSIZE) static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); #define X2S_BUFSIZE (2 + U2S_BUFSIZE) static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p); /******************************************************************************/ /* malloc_message() setup. */ -JEMALLOC_CATTR(visibility("hidden"), static) -void +static void wrtmessage(void *cbopaque, const char *s) { #ifdef SYS_write /* * Use syscall(2) rather than write(2) when possible in order to avoid * the possibility of memory allocation within libc. This is necessary * on FreeBSD; most operating systems do not have this problem though. */ UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); #else UNUSED int result = write(STDERR_FILENO, s, strlen(s)); #endif } -void (*je_malloc_message)(void *, const char *s) - JEMALLOC_ATTR(visibility("default")) = wrtmessage; +JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); -JEMALLOC_CATTR(visibility("hidden"), static) +JEMALLOC_ATTR(visibility("hidden")) void wrtmessage_1_0(const char *s1, const char *s2, const char *s3, const char *s4) { wrtmessage(NULL, s1); wrtmessage(NULL, s2); wrtmessage(NULL, s3); wrtmessage(NULL, s4); } void (*__malloc_message_1_0)(const char *s1, const char *s2, const char *s3, const char *s4) = wrtmessage_1_0; __sym_compat(_malloc_message, __malloc_message_1_0, FBSD_1.0); /* + * Wrapper around malloc_message() that avoids the need for + * je_malloc_message(...) throughout the code. + */ +void +malloc_write(const char *s) +{ + + if (je_malloc_message != NULL) + je_malloc_message(NULL, s); + else + wrtmessage(NULL, s); +} + +/* * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so * provide a wrapper. */ int -buferror(int errnum, char *buf, size_t buflen) +buferror(char *buf, size_t buflen) { -#ifdef _GNU_SOURCE + +#ifdef _WIN32 + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0, + (LPSTR)buf, buflen, NULL); + return (0); +#elif defined(_GNU_SOURCE) char *b = strerror_r(errno, buf, buflen); if (b != buf) { strncpy(buf, b, buflen); buf[buflen-1] = '\0'; } return (0); #else return (strerror_r(errno, buf, buflen)); #endif } uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base) { uintmax_t ret, digit; int b; bool neg; const char *p, *ns; if (base < 0 || base == 1 || base > 36) { - errno = EINVAL; + set_errno(EINVAL); return (UINTMAX_MAX); } b = base; /* Swallow leading whitespace and get sign, if any. */ neg = false; p = nptr; while (true) { switch (*p) { case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': p++; break; case '-': neg = true; /* Fall through. */ case '+': p++; /* Fall through. */ default: goto label_prefix; } } /* Get prefix, if any. */ label_prefix: /* * Note where the first non-whitespace/sign character is so that it is * possible to tell whether any digits are consumed (e.g., " 0" vs. * " -x"). */ ns = p; if (*p == '0') { switch (p[1]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': if (b == 0) b = 8; if (b == 8) p++; break; case 'x': switch (p[2]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': if (b == 0) b = 16; if (b == 16) p += 2; break; default: break; } break; default: break; } } if (b == 0) b = 10; /* Convert. */ ret = 0; while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { uintmax_t pret = ret; ret *= b; ret += digit; if (ret < pret) { /* Overflow. */ - errno = ERANGE; + set_errno(ERANGE); return (UINTMAX_MAX); } p++; } if (neg) ret = -ret; if (endptr != NULL) { if (p == ns) { /* No characters were converted. */ *endptr = (char *)nptr; } else *endptr = (char *)p; } return (ret); } static char * u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { unsigned i; i = U2S_BUFSIZE - 1; s[i] = '\0'; switch (base) { case 10: do { i--; s[i] = "0123456789"[x % (uint64_t)10]; x /= (uint64_t)10; } while (x > 0); break; case 16: { const char *digits = (uppercase) ? "0123456789ABCDEF" : "0123456789abcdef"; do { i--; s[i] = digits[x & 0xf]; x >>= 4; } while (x > 0); break; } default: { const char *digits = (uppercase) ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" : "0123456789abcdefghijklmnopqrstuvwxyz"; assert(base >= 2 && base <= 36); do { i--; s[i] = digits[x % (uint64_t)base]; x /= (uint64_t)base; } while (x > 0); }} *slen_p = U2S_BUFSIZE - 1 - i; return (&s[i]); } static char * d2s(intmax_t x, char sign, char *s, size_t *slen_p) { bool neg; if ((neg = (x < 0))) x = -x; s = u2s(x, 10, false, s, slen_p); if (neg) sign = '-'; switch (sign) { case '-': if (neg == false) break; /* Fall through. */ case ' ': case '+': s--; (*slen_p)++; *s = sign; break; default: not_reached(); } return (s); } static char * o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) { s = u2s(x, 8, false, s, slen_p); if (alt_form && *s != '0') { s--; (*slen_p)++; *s = '0'; } return (s); } static char * x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) { s = u2s(x, 16, uppercase, s, slen_p); if (alt_form) { s -= 2; (*slen_p) += 2; memcpy(s, uppercase ? "0X" : "0x", 2); } return (s); } int malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { int ret; size_t i; const char *f; - va_list tap; #define APPEND_C(c) do { \ if (i < size) \ str[i] = (c); \ i++; \ } while (0) #define APPEND_S(s, slen) do { \ if (i < size) { \ size_t cpylen = (slen <= size - i) ? slen : size - i; \ memcpy(&str[i], s, cpylen); \ } \ i += slen; \ } while (0) #define APPEND_PADDED_S(s, slen, width, left_justify) do { \ /* Left padding. */ \ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ (size_t)width - slen : 0); \ if (left_justify == false && pad_len != 0) { \ size_t j; \ for (j = 0; j < pad_len; j++) \ APPEND_C(' '); \ } \ /* Value. */ \ APPEND_S(s, slen); \ /* Right padding. */ \ if (left_justify && pad_len != 0) { \ size_t j; \ for (j = 0; j < pad_len; j++) \ APPEND_C(' '); \ } \ } while (0) #define GET_ARG_NUMERIC(val, len) do { \ switch (len) { \ case '?': \ val = va_arg(ap, int); \ break; \ case '?' | 0x80: \ val = va_arg(ap, unsigned int); \ break; \ case 'l': \ val = va_arg(ap, long); \ break; \ case 'l' | 0x80: \ val = va_arg(ap, unsigned long); \ break; \ case 'q': \ val = va_arg(ap, long long); \ break; \ case 'q' | 0x80: \ val = va_arg(ap, unsigned long long); \ break; \ case 'j': \ val = va_arg(ap, intmax_t); \ break; \ case 't': \ val = va_arg(ap, ptrdiff_t); \ break; \ case 'z': \ val = va_arg(ap, ssize_t); \ break; \ case 'z' | 0x80: \ val = va_arg(ap, size_t); \ break; \ case 'p': /* Synthetic; used for %p. */ \ val = va_arg(ap, uintptr_t); \ break; \ default: not_reached(); \ } \ } while (0) - if (config_debug) - va_copy(tap, ap); - i = 0; f = format; while (true) { switch (*f) { case '\0': goto label_out; case '%': { bool alt_form = false; bool zero_pad = false; bool left_justify = false; bool plus_space = false; bool plus_plus = false; int prec = -1; int width = -1; unsigned char len = '?'; f++; if (*f == '%') { /* %% */ APPEND_C(*f); break; } /* Flags. */ while (true) { switch (*f) { case '#': assert(alt_form == false); alt_form = true; break; case '0': assert(zero_pad == false); zero_pad = true; break; case '-': assert(left_justify == false); left_justify = true; break; case ' ': assert(plus_space == false); plus_space = true; break; case '+': assert(plus_plus == false); plus_plus = true; break; default: goto label_width; } f++; } /* Width. */ label_width: switch (*f) { case '*': width = va_arg(ap, int); f++; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { uintmax_t uwidth; - errno = 0; + set_errno(0); uwidth = malloc_strtoumax(f, (char **)&f, 10); - assert(uwidth != UINTMAX_MAX || errno != + assert(uwidth != UINTMAX_MAX || get_errno() != ERANGE); width = (int)uwidth; if (*f == '.') { f++; goto label_precision; } else goto label_length; break; } case '.': f++; goto label_precision; default: goto label_length; } /* Precision. */ label_precision: switch (*f) { case '*': prec = va_arg(ap, int); f++; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { uintmax_t uprec; - errno = 0; + set_errno(0); uprec = malloc_strtoumax(f, (char **)&f, 10); - assert(uprec != UINTMAX_MAX || errno != ERANGE); + assert(uprec != UINTMAX_MAX || get_errno() != + ERANGE); prec = (int)uprec; break; } default: break; } /* Length. */ label_length: switch (*f) { case 'l': f++; if (*f == 'l') { len = 'q'; f++; } else len = 'l'; break; case 'j': len = 'j'; f++; break; case 't': len = 't'; f++; break; case 'z': len = 'z'; f++; break; default: break; } /* Conversion specifier. */ switch (*f) { char *s; size_t slen; case 'd': case 'i': { intmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[D2S_BUFSIZE]; GET_ARG_NUMERIC(val, len); s = d2s(val, (plus_plus ? '+' : (plus_space ? ' ' : '-')), buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'o': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[O2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = o2s(val, alt_form, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'u': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[U2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = u2s(val, 10, false, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'x': case 'X': { uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[X2S_BUFSIZE]; GET_ARG_NUMERIC(val, len | 0x80); s = x2s(val, alt_form, *f == 'X', buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } case 'c': { unsigned char val; char buf[2]; assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); val = va_arg(ap, int); buf[0] = val; buf[1] = '\0'; APPEND_PADDED_S(buf, 1, width, left_justify); f++; break; } case 's': assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); s = va_arg(ap, char *); slen = (prec == -1) ? strlen(s) : prec; APPEND_PADDED_S(s, slen, width, left_justify); f++; break; case 'p': { uintmax_t val; char buf[X2S_BUFSIZE]; GET_ARG_NUMERIC(val, 'p'); s = x2s(val, true, false, buf, &slen); APPEND_PADDED_S(s, slen, width, left_justify); f++; break; } default: not_implemented(); } break; } default: { APPEND_C(*f); f++; break; }} } label_out: if (i < size) str[i] = '\0'; else str[size - 1] = '\0'; ret = i; #undef APPEND_C #undef APPEND_S #undef APPEND_PADDED_S #undef GET_ARG_NUMERIC return (ret); } JEMALLOC_ATTR(format(printf, 3, 4)) int malloc_snprintf(char *str, size_t size, const char *format, ...) { int ret; va_list ap; va_start(ap, format); ret = malloc_vsnprintf(str, size, format, ap); va_end(ap); return (ret); } void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap) { char buf[MALLOC_PRINTF_BUFSIZE]; if (write_cb == NULL) { /* * The caller did not provide an alternate write_cb callback * function, so use the default one. malloc_write() is an * inline function, so use malloc_message() directly here. */ - write_cb = je_malloc_message; + write_cb = (je_malloc_message != NULL) ? je_malloc_message : + wrtmessage; cbopaque = NULL; } malloc_vsnprintf(buf, sizeof(buf), format, ap); write_cb(cbopaque, buf); } /* * Print to a callback function in such a way as to (hopefully) avoid memory * allocation. */ JEMALLOC_ATTR(format(printf, 3, 4)) void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(write_cb, cbopaque, format, ap); va_end(ap); } /* Print to stderr in such a way as to avoid memory allocation. */ JEMALLOC_ATTR(format(printf, 1, 2)) void malloc_printf(const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); } Index: projects/nand/contrib/less =================================================================== --- projects/nand/contrib/less (revision 235262) +++ projects/nand/contrib/less (revision 235263) Property changes on: projects/nand/contrib/less ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/less:r235157-235262 Index: projects/nand/contrib/libarchive/cpio =================================================================== --- projects/nand/contrib/libarchive/cpio (revision 235262) +++ projects/nand/contrib/libarchive/cpio (revision 235263) Property changes on: projects/nand/contrib/libarchive/cpio ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/libarchive/cpio:r235157-235262 Index: projects/nand/contrib/libarchive/libarchive =================================================================== --- projects/nand/contrib/libarchive/libarchive (revision 235262) +++ projects/nand/contrib/libarchive/libarchive (revision 235263) Property changes on: projects/nand/contrib/libarchive/libarchive ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/libarchive/libarchive:r235157-235262 Index: projects/nand/contrib/libarchive/libarchive_fe =================================================================== --- projects/nand/contrib/libarchive/libarchive_fe (revision 235262) +++ projects/nand/contrib/libarchive/libarchive_fe (revision 235263) Property changes on: projects/nand/contrib/libarchive/libarchive_fe ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/libarchive/libarchive_fe:r235157-235262 Index: projects/nand/contrib/libarchive/tar =================================================================== --- projects/nand/contrib/libarchive/tar (revision 235262) +++ projects/nand/contrib/libarchive/tar (revision 235263) Property changes on: projects/nand/contrib/libarchive/tar ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/libarchive/tar:r235157-235262 Index: projects/nand/contrib/libarchive =================================================================== --- projects/nand/contrib/libarchive (revision 235262) +++ projects/nand/contrib/libarchive (revision 235263) Property changes on: projects/nand/contrib/libarchive ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/libarchive:r235157-235262 Index: projects/nand/contrib/libc++ =================================================================== --- projects/nand/contrib/libc++ (revision 235262) +++ projects/nand/contrib/libc++ (revision 235263) Property changes on: projects/nand/contrib/libc++ ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/libc++:r235157-235262 Index: projects/nand/contrib/libcxxrt =================================================================== --- projects/nand/contrib/libcxxrt (revision 235262) +++ projects/nand/contrib/libcxxrt (revision 235263) Property changes on: projects/nand/contrib/libcxxrt ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/libcxxrt:r235157-235262 Index: projects/nand/contrib/libpcap =================================================================== --- projects/nand/contrib/libpcap (revision 235262) +++ projects/nand/contrib/libpcap (revision 235263) Property changes on: projects/nand/contrib/libpcap ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/libpcap:r235157-235262 Index: projects/nand/contrib/libstdc++ =================================================================== --- projects/nand/contrib/libstdc++ (revision 235262) +++ projects/nand/contrib/libstdc++ (revision 235263) Property changes on: projects/nand/contrib/libstdc++ ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/libstdc++:r235157-235262 Index: projects/nand/contrib/llvm/tools/clang =================================================================== --- projects/nand/contrib/llvm/tools/clang (revision 235262) +++ projects/nand/contrib/llvm/tools/clang (revision 235263) Property changes on: projects/nand/contrib/llvm/tools/clang ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/llvm/tools/clang:r235157-235262 Index: projects/nand/contrib/llvm =================================================================== --- projects/nand/contrib/llvm (revision 235262) +++ projects/nand/contrib/llvm (revision 235263) Property changes on: projects/nand/contrib/llvm ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/llvm:r235157-235262 Index: projects/nand/contrib/ncurses =================================================================== --- projects/nand/contrib/ncurses (revision 235262) +++ projects/nand/contrib/ncurses (revision 235263) Property changes on: projects/nand/contrib/ncurses ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/ncurses:r235157-235262 Index: projects/nand/contrib/netcat =================================================================== --- projects/nand/contrib/netcat (revision 235262) +++ projects/nand/contrib/netcat (revision 235263) Property changes on: projects/nand/contrib/netcat ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/netcat:r235157-235262 Index: projects/nand/contrib/ntp =================================================================== --- projects/nand/contrib/ntp (revision 235262) +++ projects/nand/contrib/ntp (revision 235263) Property changes on: projects/nand/contrib/ntp ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/ntp:r235157-235262 Index: projects/nand/contrib/one-true-awk =================================================================== --- projects/nand/contrib/one-true-awk (revision 235262) +++ projects/nand/contrib/one-true-awk (revision 235263) Property changes on: projects/nand/contrib/one-true-awk ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/one-true-awk:r235157-235262 Index: projects/nand/contrib/openbsm =================================================================== --- projects/nand/contrib/openbsm (revision 235262) +++ projects/nand/contrib/openbsm (revision 235263) Property changes on: projects/nand/contrib/openbsm ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/openbsm:r235157-235262 Index: projects/nand/contrib/openpam =================================================================== --- projects/nand/contrib/openpam (revision 235262) +++ projects/nand/contrib/openpam (revision 235263) Property changes on: projects/nand/contrib/openpam ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/openpam:r235157-235262 Index: projects/nand/contrib/openresolv =================================================================== --- projects/nand/contrib/openresolv (revision 235262) +++ projects/nand/contrib/openresolv (revision 235263) Property changes on: projects/nand/contrib/openresolv ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/openresolv:r235157-235262 Index: projects/nand/contrib/pf =================================================================== --- projects/nand/contrib/pf (revision 235262) +++ projects/nand/contrib/pf (revision 235263) Property changes on: projects/nand/contrib/pf ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/pf:r235157-235262 Index: projects/nand/contrib/sendmail =================================================================== --- projects/nand/contrib/sendmail (revision 235262) +++ projects/nand/contrib/sendmail (revision 235263) Property changes on: projects/nand/contrib/sendmail ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/sendmail:r235157-235262 Index: projects/nand/contrib/tcpdump =================================================================== --- projects/nand/contrib/tcpdump (revision 235262) +++ projects/nand/contrib/tcpdump (revision 235263) Property changes on: projects/nand/contrib/tcpdump ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/tcpdump:r235157-235262 Index: projects/nand/contrib/tcsh =================================================================== --- projects/nand/contrib/tcsh (revision 235262) +++ projects/nand/contrib/tcsh (revision 235263) Property changes on: projects/nand/contrib/tcsh ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/tcsh:r235157-235262 Index: projects/nand/contrib/tnftp =================================================================== --- projects/nand/contrib/tnftp (revision 235262) +++ projects/nand/contrib/tnftp (revision 235263) Property changes on: projects/nand/contrib/tnftp ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/tnftp:r235157-235262 Index: projects/nand/contrib/top/install-sh =================================================================== --- projects/nand/contrib/top/install-sh (revision 235262) +++ projects/nand/contrib/top/install-sh (revision 235263) Property changes on: projects/nand/contrib/top/install-sh ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/top/install-sh:r235157-235262 Index: projects/nand/contrib/top =================================================================== --- projects/nand/contrib/top (revision 235262) +++ projects/nand/contrib/top (revision 235263) Property changes on: projects/nand/contrib/top ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/top:r235157-235262 Index: projects/nand/contrib/tzcode/stdtime =================================================================== --- projects/nand/contrib/tzcode/stdtime (revision 235262) +++ projects/nand/contrib/tzcode/stdtime (revision 235263) Property changes on: projects/nand/contrib/tzcode/stdtime ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/tzcode/stdtime:r235157-235262 Index: projects/nand/contrib/tzcode/zic =================================================================== --- projects/nand/contrib/tzcode/zic (revision 235262) +++ projects/nand/contrib/tzcode/zic (revision 235263) Property changes on: projects/nand/contrib/tzcode/zic ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/tzcode/zic:r235157-235262 Index: projects/nand/contrib/tzdata =================================================================== --- projects/nand/contrib/tzdata (revision 235262) +++ projects/nand/contrib/tzdata (revision 235263) Property changes on: projects/nand/contrib/tzdata ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/tzdata:r235157-235262 Index: projects/nand/contrib/wpa =================================================================== --- projects/nand/contrib/wpa (revision 235262) +++ projects/nand/contrib/wpa (revision 235263) Property changes on: projects/nand/contrib/wpa ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/wpa:r235157-235262 Index: projects/nand/contrib/xz =================================================================== --- projects/nand/contrib/xz (revision 235262) +++ projects/nand/contrib/xz (revision 235263) Property changes on: projects/nand/contrib/xz ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/xz:r235157-235262 Index: projects/nand/crypto/heimdal =================================================================== --- projects/nand/crypto/heimdal (revision 235262) +++ projects/nand/crypto/heimdal (revision 235263) Property changes on: projects/nand/crypto/heimdal ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/crypto/heimdal:r235157-235262 Index: projects/nand/crypto/openssh =================================================================== --- projects/nand/crypto/openssh (revision 235262) +++ projects/nand/crypto/openssh (revision 235263) Property changes on: projects/nand/crypto/openssh ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/crypto/openssh:r235157-235262 Index: projects/nand/crypto/openssl =================================================================== --- projects/nand/crypto/openssl (revision 235262) +++ projects/nand/crypto/openssl (revision 235263) Property changes on: projects/nand/crypto/openssl ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/crypto/openssl:r235157-235262 Index: projects/nand/etc/mtree/BSD.usr.dist =================================================================== --- projects/nand/etc/mtree/BSD.usr.dist (revision 235262) +++ projects/nand/etc/mtree/BSD.usr.dist (revision 235263) @@ -1,1364 +1,1364 @@ # $FreeBSD$ # # Please see the file src/etc/mtree/README before making changes to this file. # /set type=dir uname=root gname=wheel mode=0755 . bin .. games .. include .. lib aout .. compat aout .. .. dtrace .. engines .. i18n .. .. lib32 dtrace .. i18n .. .. libdata gcc .. ldscripts .. lint .. .. libexec bsdinstall .. lpr ru .. .. sendmail .. sm.bin .. .. local .. obj nochange .. sbin .. share calendar de_DE.ISO8859-1 .. fr_FR.ISO8859-1 .. hr_HR.ISO8859-2 .. hu_HU.ISO8859-2 .. ru_RU.KOI8-R .. uk_UA.KOI8-U .. .. dict .. doc IPv6 .. atm .. bind9 arm .. misc .. .. legal intel_ipw .. intel_iwi .. intel_wpi .. .. llvm clang .. .. ncurses .. ntp .. papers .. psd 01.cacm .. 02.implement .. 03.iosys .. 04.uprog .. 05.sysman .. 06.Clang .. 12.make .. 13.rcs .. 15.yacc .. 16.lex .. 17.m4 .. 18.gprof .. 20.ipctut .. 21.ipc .. 22.rpcgen .. 23.rpc .. 24.xdr .. 25.xdrrfc .. 26.rpcrfc .. 27.nfsrfc .. 28.cvs .. .. smm 01.setup .. 02.config .. 03.fsck .. 04.quotas .. 05.fastfs .. 06.nfs .. 07.lpd .. 08.sendmailop .. 11.timedop .. 12.timed .. 18.net .. .. usd 04.csh .. 05.dc .. 06.bc .. 07.mail .. 10.exref .. 11.edit .. 12.vi .. 13.viref .. 18.msdiffs .. 19.memacros .. 20.meref .. 21.troff .. 22.trofftut .. .. .. examples BSD_daemon .. FreeBSD_version .. IPv6 .. bootforth .. - csh - .. + csh + .. cvs contrib .. .. cvsup .. diskless .. drivers .. etc defaults .. .. find_interface .. hast .. hostapd .. ibcs2 .. indent .. ipfilter .. ipfw .. iscsi .. jails .. kld cdev module .. test .. .. dyn_sysctl .. firmware fwconsumer .. fwimage .. .. khelp .. syscall module .. test .. .. .. libvgl .. mdoc .. netgraph bluetooth .. .. nwclient .. pc-sysinstall .. perfmon .. pf .. portal .. ppi .. ppp .. printing .. scsi_target .. ses getencstat .. sesd .. setencstat .. setobjstat .. srcs .. .. smbfs print .. .. sunrpc dir .. msg .. sort .. .. tcsh .. .. games fortune .. .. info .. i18n csmapper APPLE .. AST .. BIG5 .. CNS .. CP .. EBCDIC .. GB .. GEORGIAN .. ISO-8859 .. ISO646 .. JIS .. KAZAKH .. KOI .. KS .. MISC .. TCVN .. .. esdb APPLE .. AST .. BIG5 .. CP .. DEC .. EBCDIC .. EUC .. GB .. GEORGIAN .. ISO-2022 .. ISO-8859 .. ISO646 .. KAZAKH .. KOI .. MISC .. TCVN .. UTF .. .. .. locale UTF-8 .. af_ZA.ISO8859-1 .. af_ZA.ISO8859-15 .. af_ZA.UTF-8 .. am_ET.UTF-8 .. be_BY.CP1131 .. be_BY.CP1251 .. be_BY.ISO8859-5 .. be_BY.UTF-8 .. bg_BG.CP1251 .. bg_BG.UTF-8 .. ca_AD.ISO8859-1 .. ca_ES.ISO8859-1 .. ca_FR.ISO8859-1 .. ca_IT.ISO8859-1 .. ca_AD.ISO8859-15 .. ca_ES.ISO8859-15 .. ca_FR.ISO8859-15 .. ca_IT.ISO8859-15 .. ca_AD.UTF-8 .. ca_ES.UTF-8 .. ca_FR.UTF-8 .. ca_IT.UTF-8 .. cs_CZ.ISO8859-2 .. cs_CZ.UTF-8 .. da_DK.ISO8859-1 .. da_DK.ISO8859-15 .. da_DK.UTF-8 .. de_AT.ISO8859-1 .. de_AT.ISO8859-15 .. de_AT.UTF-8 .. de_CH.ISO8859-1 .. de_CH.ISO8859-15 .. de_CH.UTF-8 .. de_DE.ISO8859-1 .. de_DE.ISO8859-15 .. de_DE.UTF-8 .. el_GR.ISO8859-7 .. el_GR.UTF-8 .. en_AU.ISO8859-1 .. en_AU.ISO8859-15 .. en_AU.US-ASCII .. en_AU.UTF-8 .. en_CA.ISO8859-1 .. en_CA.ISO8859-15 .. en_CA.US-ASCII .. en_CA.UTF-8 .. en_GB.ISO8859-1 .. en_GB.ISO8859-15 .. en_GB.US-ASCII .. en_GB.UTF-8 .. en_IE.UTF-8 .. en_NZ.ISO8859-1 .. en_NZ.ISO8859-15 .. en_NZ.US-ASCII .. en_NZ.UTF-8 .. en_US.ISO8859-1 .. en_US.ISO8859-15 .. en_US.US-ASCII .. en_US.UTF-8 .. es_ES.ISO8859-1 .. es_ES.ISO8859-15 .. es_ES.UTF-8 .. et_EE.ISO8859-15 .. et_EE.UTF-8 .. eu_ES.ISO8859-1 .. eu_ES.ISO8859-15 .. eu_ES.UTF-8 .. fi_FI.ISO8859-1 .. fi_FI.ISO8859-15 .. fi_FI.UTF-8 .. fr_BE.ISO8859-1 .. fr_BE.ISO8859-15 .. fr_BE.UTF-8 .. fr_CA.ISO8859-1 .. fr_CA.ISO8859-15 .. fr_CA.UTF-8 .. fr_CH.ISO8859-1 .. fr_CH.ISO8859-15 .. fr_CH.UTF-8 .. fr_FR.ISO8859-1 .. fr_FR.ISO8859-15 .. fr_FR.UTF-8 .. he_IL.UTF-8 .. hi_IN.ISCII-DEV .. hr_HR.ISO8859-2 .. hr_HR.UTF-8 .. hu_HU.ISO8859-2 .. hu_HU.UTF-8 .. hy_AM.ARMSCII-8 .. hy_AM.UTF-8 .. is_IS.ISO8859-1 .. is_IS.ISO8859-15 .. is_IS.UTF-8 .. it_CH.ISO8859-1 .. it_CH.ISO8859-15 .. it_CH.UTF-8 .. it_IT.ISO8859-1 .. it_IT.ISO8859-15 .. it_IT.UTF-8 .. ja_JP.SJIS .. ja_JP.UTF-8 .. ja_JP.eucJP .. kk_KZ.PT154 .. kk_KZ.UTF-8 .. ko_KR.CP949 .. ko_KR.UTF-8 .. ko_KR.eucKR .. la_LN.ISO8859-1 .. la_LN.ISO8859-13 .. la_LN.ISO8859-15 .. la_LN.ISO8859-2 .. la_LN.ISO8859-4 .. la_LN.US-ASCII .. lt_LT.ISO8859-13 .. lt_LT.ISO8859-4 .. lt_LT.UTF-8 .. lv_LV.ISO8859-13 .. lv_LV.UTF-8 .. mn_MN.UTF-8 .. nb_NO.ISO8859-1 .. nb_NO.ISO8859-15 .. nb_NO.UTF-8 .. nl_BE.ISO8859-1 .. nl_BE.ISO8859-15 .. nl_BE.UTF-8 .. nl_NL.ISO8859-1 .. nl_NL.ISO8859-15 .. nl_NL.UTF-8 .. nn_NO.ISO8859-1 .. nn_NO.ISO8859-15 .. nn_NO.UTF-8 .. no_NO.ISO8859-1 .. no_NO.ISO8859-15 .. no_NO.UTF-8 .. pl_PL.ISO8859-2 .. pl_PL.UTF-8 .. pt_BR.ISO8859-1 .. pt_BR.UTF-8 .. pt_PT.ISO8859-1 .. pt_PT.ISO8859-15 .. pt_PT.UTF-8 .. ro_RO.ISO8859-2 .. ro_RO.UTF-8 .. ru_RU.CP1251 .. ru_RU.CP866 .. ru_RU.ISO8859-5 .. ru_RU.KOI8-R .. ru_RU.UTF-8 .. sk_SK.ISO8859-2 .. sk_SK.UTF-8 .. sl_SI.ISO8859-2 .. sl_SI.UTF-8 .. sr_YU.ISO8859-2 .. sr_YU.ISO8859-5 .. sr_YU.UTF-8 .. sv_SE.ISO8859-1 .. sv_SE.ISO8859-15 .. sv_SE.UTF-8 .. tr_TR.ISO8859-9 .. tr_TR.UTF-8 .. uk_UA.CP1251 .. uk_UA.ISO8859-5 .. uk_UA.KOI8-U .. uk_UA.UTF-8 .. zh_CN.GB18030 .. zh_CN.GB2312 .. zh_CN.GBK .. zh_CN.UTF-8 .. zh_CN.eucCN .. zh_HK.Big5HKSCS .. zh_HK.UTF-8 .. zh_TW.Big5 .. zh_TW.UTF-8 .. .. man /set uname=man cat1 .. cat1aout .. cat2 .. cat3 .. cat4 amd64 .. arm .. i386 .. powerpc .. sparc64 .. .. cat5 .. cat6 .. cat7 .. cat8 amd64 .. i386 .. powerpc .. sparc64 .. .. cat9 .. en.ISO8859-1 uname=root cat1 .. cat1aout .. cat2 .. cat3 .. cat4 amd64 .. arm .. i386 .. powerpc .. sparc64 .. .. cat5 .. cat6 .. cat7 .. cat8 amd64 .. i386 .. powerpc .. sparc64 .. .. cat9 .. .. en.UTF-8 uname=root cat1 .. cat1aout .. cat2 .. cat3 .. cat4 amd64 .. arm .. i386 .. powerpc .. sparc64 .. .. cat5 .. cat6 .. cat7 .. cat8 amd64 .. i386 .. powerpc .. sparc64 .. .. cat9 .. .. ja uname=root cat1 .. cat2 .. cat3 .. cat4 .. cat5 .. cat6 .. cat7 .. cat8 .. cat9 .. /set uname=root man1 .. man2 .. man3 .. man4 .. man5 .. man6 .. man7 .. man8 .. man9 .. .. man1 .. man1aout .. man2 .. man3 .. man4 amd64 .. arm .. i386 .. powerpc .. sparc64 .. .. man5 .. man6 .. man7 .. man8 amd64 .. i386 .. powerpc .. sparc64 .. .. man9 .. .. misc fonts .. .. mk .. nls C .. af_ZA.ISO8859-1 .. af_ZA.ISO8859-15 .. af_ZA.UTF-8 .. am_ET.UTF-8 .. be_BY.CP1131 .. be_BY.CP1251 .. be_BY.ISO8859-5 .. be_BY.UTF-8 .. bg_BG.CP1251 .. bg_BG.UTF-8 .. ca_ES.ISO8859-1 .. ca_ES.ISO8859-15 .. ca_ES.UTF-8 .. cs_CZ.ISO8859-2 .. cs_CZ.UTF-8 .. da_DK.ISO8859-1 .. da_DK.ISO8859-15 .. da_DK.UTF-8 .. de_AT.ISO8859-1 .. de_AT.ISO8859-15 .. de_AT.UTF-8 .. de_CH.ISO8859-1 .. de_CH.ISO8859-15 .. de_CH.UTF-8 .. de_DE.ISO8859-1 .. de_DE.ISO8859-15 .. de_DE.UTF-8 .. el_GR.ISO8859-7 .. el_GR.UTF-8 .. en_AU.ISO8859-1 .. en_AU.ISO8859-15 .. en_AU.US-ASCII .. en_AU.UTF-8 .. en_CA.ISO8859-1 .. en_CA.ISO8859-15 .. en_CA.US-ASCII .. en_CA.UTF-8 .. en_GB.ISO8859-1 .. en_GB.ISO8859-15 .. en_GB.US-ASCII .. en_GB.UTF-8 .. en_IE.UTF-8 .. en_NZ.ISO8859-1 .. en_NZ.ISO8859-15 .. en_NZ.US-ASCII .. en_NZ.UTF-8 .. en_US.ISO8859-1 .. en_US.ISO8859-15 .. en_US.UTF-8 .. es_ES.ISO8859-1 .. es_ES.ISO8859-15 .. es_ES.UTF-8 .. et_EE.ISO8859-15 .. et_EE.UTF-8 .. fi_FI.ISO8859-1 .. fi_FI.ISO8859-15 .. fi_FI.UTF-8 .. fr_BE.ISO8859-1 .. fr_BE.ISO8859-15 .. fr_BE.UTF-8 .. fr_CA.ISO8859-1 .. fr_CA.ISO8859-15 .. fr_CA.UTF-8 .. fr_CH.ISO8859-1 .. fr_CH.ISO8859-15 .. fr_CH.UTF-8 .. fr_FR.ISO8859-1 .. fr_FR.ISO8859-15 .. fr_FR.UTF-8 .. gl_ES.ISO8859-1 .. he_IL.UTF-8 .. hi_IN.ISCII-DEV .. hr_HR.ISO8859-2 .. hr_HR.UTF-8 .. hu_HU.ISO8859-2 .. hu_HU.UTF-8 .. hy_AM.ARMSCII-8 .. hy_AM.UTF-8 .. is_IS.ISO8859-1 .. is_IS.ISO8859-15 .. is_IS.UTF-8 .. it_CH.ISO8859-1 .. it_CH.ISO8859-15 .. it_CH.UTF-8 .. it_IT.ISO8859-1 .. it_IT.ISO8859-15 .. it_IT.UTF-8 .. ja_JP.SJIS .. ja_JP.UTF-8 .. ja_JP.eucJP .. kk_KZ.PT154 .. kk_KZ.UTF-8 .. ko_KR.CP949 .. ko_KR.UTF-8 .. ko_KR.eucKR .. la_LN.ISO8859-1 .. la_LN.ISO8859-13 .. la_LN.ISO8859-15 .. la_LN.ISO8859-2 .. la_LN.ISO8859-4 .. la_LN.US-ASCII .. lt_LT.ISO8859-13 .. lt_LT.ISO8859-4 .. lt_LT.UTF-8 .. lv_LV.ISO8859-13 .. lv_LV.UTF-8 .. mn_MN.UTF-8 .. nl_BE.ISO8859-1 .. nl_BE.ISO8859-15 .. nl_BE.UTF-8 .. nl_NL.ISO8859-1 .. nl_NL.ISO8859-15 .. nl_NL.UTF-8 .. no_NO.ISO8859-1 .. no_NO.ISO8859-15 .. no_NO.UTF-8 .. pl_PL.ISO8859-2 .. pl_PL.UTF-8 .. pt_BR.ISO8859-1 .. pt_BR.UTF-8 .. pt_PT.ISO8859-1 .. pt_PT.ISO8859-15 .. pt_PT.UTF-8 .. ro_RO.ISO8859-2 .. ro_RO.UTF-8 .. ru_RU.CP1251 .. ru_RU.CP866 .. ru_RU.ISO8859-5 .. ru_RU.KOI8-R .. ru_RU.UTF-8 .. sk_SK.ISO8859-2 .. sk_SK.UTF-8 .. sl_SI.ISO8859-2 .. sl_SI.UTF-8 .. sr_YU.ISO8859-2 .. sr_YU.ISO8859-5 .. sr_YU.UTF-8 .. sv_SE.ISO8859-1 .. sv_SE.ISO8859-15 .. sv_SE.UTF-8 .. tr_TR.ISO8859-9 .. tr_TR.UTF-8 .. uk_UA.ISO8859-5 .. uk_UA.KOI8-U .. uk_UA.UTF-8 .. zh_CN.GB18030 .. zh_CN.GB2312 .. zh_CN.GBK .. zh_CN.UTF-8 .. zh_CN.eucCN .. zh_HK.Big5HKSCS .. zh_HK.UTF-8 .. zh_TW.Big5 .. zh_TW.UTF-8 .. .. openssl man /set uname=man cat1 .. cat3 .. en.ISO8859-1 uname=root cat1 .. cat3 .. .. /set uname=root man1 .. man3 .. .. .. pc-sysinstall backend .. backend-partmanager .. backend-query .. conf license .. .. doc .. .. security .. sendmail .. skel .. snmp defs .. mibs .. .. syscons fonts .. keymaps .. scrnmaps .. .. tabset .. vi catalog .. .. zoneinfo Africa .. America Argentina .. Indiana .. Kentucky .. North_Dakota .. .. Antarctica .. Arctic .. Asia .. Atlantic .. Australia .. Etc .. Europe .. Indian .. Pacific .. SystemV .. .. .. src nochange .. .. Index: projects/nand/games/fortune/datfiles/freebsd-tips =================================================================== --- projects/nand/games/fortune/datfiles/freebsd-tips (revision 235262) +++ projects/nand/games/fortune/datfiles/freebsd-tips (revision 235263) @@ -1,481 +1,477 @@ This fortune brought to you by: $FreeBSD$ % Any user that is a member of the wheel group can use "su -" to simulate a root login. You can add a user to the wheel group by editing /etc/group. -- Konstantinos Konstantinidis % By pressing "Scroll Lock" you can use the arrow keys to scroll backward through the console output. Press "Scroll Lock" again to turn it off. % Can't remember if you've installed a certain port or not? Try "pkg_info -Ix port_name". % Ever wonder what those numbers after command names were, as in cat(1)? It's the section of the manual the man page is in. "man man" will tell you more. -- David Scheidt % Forget how to spell a word or a variation of a word? Use look portion_of_word_you_know -- Dru % Forget what directory you are in? Type "pwd". -- Dru % Forget when Easter is? Try "ncal -e". If you need the date for Orthodox Easter, use "ncal -o" instead. -- Dru % FreeBSD is started up by the program 'init'. The first thing init does when starting multiuser mode (ie, starting the computer up for normal use) is to run the shell script /etc/rc. By reading /etc/rc and the /etc/rc.d/ scripts, you can learn a lot about how the system is put together, which again will make you more confident about what happens when you do something with it. % Handy bash(1) prompt: PS1="\u@\h \w \!$ " -- David Scheidt % Having trouble using fetch through a firewall? Try setting the environment variable FTP_PASSIVE_MODE to yes, and see fetch(3) for more details. % If other operating systems have damaged your Master Boot Record, you can reinstall it either with /usr/sbin/sysinstall or with boot0cfg(8). See "man boot0cfg" for details. % If you accidentally end up inside vi, you can quit it by pressing Escape, colon (:), q (q), bang (!) and pressing return. % If you are in the C shell and have just installed a new program, you won't be able to run it unless you first type "rehash". -- Dru % If you do not want to get beeps in X11 (X Windows), you can turn them off with xset b off % If you have a CD-ROM drive in your machine, you can make the CD-ROM that is presently inserted available by typing 'mount /cdrom' as root. The CD-ROM will be available under /cdrom/. Remember to do 'umount /cdrom' before removing the CD-ROM (it will usually not be possible to remove the CD-ROM without doing this.) Note: This tip may not work in all configurations. % If you need a reminder to leave your terminal, type "leave +hhmm" where "hhmm" represents in how many hours and minutes you need to leave. -- Dru % If you need to ask a question on the FreeBSD-questions mailing list then http://www.FreeBSD.org/doc/en_US.ISO8859-1/articles/\ freebsd-questions/index.html contains lots of useful advice to help you get the best results. % -If you `set filec' (file completion) in tcsh and write a part of the -filename, pressing TAB will show you the available choices when there +If you write part of a filename in tcsh, +pressing TAB will show you the available choices when there is more than one, or complete the filename if there's only one match. % If you `set watch = (0 any any)' in tcsh, you will be notified when someone logs in or out of your system. % If you use the C shell, add the following line to the .cshrc file in your home directory to prevent core files from being written to disk: limit coredumpsize 0 -- Dru % If you want df(1) and other commands to display disk sizes in kilobytes instead of 512-byte blocks, set BLOCKSIZE in your environment to 'K'. You can also use 'M' for Megabytes or 'G' for Gigabytes. If you want df(1) to automatically select the best size then use 'df -h'. % If you want to play CDs with FreeBSD, a utility for this is already included. Type 'cdcontrol' then 'help' to learn more. (You may need to set the CDROM environment variable in order to make cdcontrol want to start.) % If you want to quickly check for duplicate package/port installations, try the following pkg_info command. pkg_info | sort | sed -e 's/-[0-9].*$//' | \ uniq -c | grep -v '^[[:space:]]*1' % If you'd like to keep track of applications in the FreeBSD ports tree, take a look at FreshPorts; http://www.freshports.org/ % In order to make fetch (the FreeBSD downloading tool) ask for username/password when it encounters a password-protected web page, you can set the environment variable HTTP_AUTH to 'basic:*'. % In order to search for a string in some files, use 'grep' like this: grep "string" filename1 [filename2 filename3 ...] This will print out the lines in the files that contain the string. grep can also do a lot more advanced searches - type 'man grep' for details. % In order to support national characters for European languages in tools like less without creating other nationalisation aspects, set the environment variable LC_ALL to 'en_US.ISO8859-1'. % -In tcsh, you can `set autolist' to have the shell automatically show -all the possible matches when doing filename/directory expansion. -% "man firewall" will give advice for building a FreeBSD firewall -- David Scheidt % "man hier" will explain the way FreeBSD filesystems are normally laid out. -- David Scheidt % Man pages are divided into section depending on topic. There are 9 different sections numbered from 1 (General Commands) to 9 (Kernel Developer's Manual). You can get an introduction to each topic by typing man intro In other words, to get the intro to general commands, type man 1 intro % "man ports" gives many useful hints about installing FreeBSD ports. % "man security" gives very good advice on how to tune the security of your FreeBSD system. % "man tuning" gives some tips how to tune performance of your FreeBSD system. -- David Scheidt % Need to do a search in a manpage or in a file you've sent to a pager? Use "/search_word". To repeat the same search, type "n" for next. -- Dru % Need to find the location of a program? Use "locate program_name". -- Dru % Need to leave your terminal for a few minutes and don't want to logout? Use "lock -p". When you return, use your password as the key to unlock the terminal. -- Dru % Need to print a manpage? Use man name_of_manpage | col -bx | lpr -- Dru % Need to quickly empty a file? Use ": > filename". -- Dru % Need to quickly return to your home directory? Type "cd". -- Dru % Need to remove all those ^M characters from a DOS file? Try tr -d \\r < dosfile > newfile -- Originally by Dru % Need to see the calendar for this month? Simply type "cal". To see the whole year, type "cal -y". -- Dru % Need to see which daemons are listening for connection requests? Use "sockstat -4l" for IPv4, and "sockstat -l" for IPv4 and IPv6. -- Dru % Need to see your routing table? Type "netstat -rn". The entry with the G flag is your gateway. -- Dru % Nice bash prompt: PS1='(\[$(tput md)\]\t <\w>\[$(tput me)\]) $(echo $?) \$ ' -- Mathieu % -Nice tcsh prompts: - set prompt = '[%B%m%b] %B%~%b%# ' - set prompt = '%m %# ' - set prompt = '%n@%m%# ' - set prompt = '%n@%m:%/%# ' - set prompt = '%n@%m:%~%# ' -% Over quota? "du -s * | sort -n " will give you a sorted list of your directory sizes. -- David Scheidt % nc(1) (or netcat) is useful not only for redirecting input/output to TCP or UDP connections, but also for proxying them with inetd(8). % sh (the default Bourne shell in FreeBSD) supports command-line editing. Just ``set -o emacs'' or ``set -o vi'' to enable it. % Simple tcsh prompt: set prompt = '%# ' % The default editor in FreeBSD is vi, which is efficient to use when you have learned it, but somewhat user-unfriendly. To use ee (an easier but less powerful editor) instead, set the environment variable EDITOR to /usr/bin/ee % Time to change your password? Type "passwd" and follow the prompts. -- Dru % To change an environment variable in /bin/sh use: $ VARIABLE="value" $ export VARIABLE % To change an environment variable in tcsh you use: setenv NAME "value" where NAME is the name of the variable and "value" its new value. % To clear the screen, use "clear". To re-display your screen buffer, press the scroll lock key and use your page up button. When you're finished, press the scroll lock key again to get your prompt back. -- Dru % To determine whether a file is a text file, executable, or some other type of file, use file filename -- Dru % To do a fast search for a file, try locate filename locate uses a database that is updated every Saturday (assuming your computer is running FreeBSD at the time) to quickly find files based on name only. % To erase a line you've written at the command prompt, use "Ctrl-U". -- Dru % To find out the hostname associated with an IP address, use dig -x IP_address -- Dru % To obtain a neat PostScript rendering of a manual page, use ``-t'' switch of the man(1) utility: ``man -t ''. For example: man -t grep > grep.ps # Save the PostScript version to a file or man -t printf | lp # Send the PostScript directly to printer % To quickly create an empty file, use "touch filename". -- Dru % To read a compressed file without having to first uncompress it, use -"zcat" or "zmore" to view it. +"zcat" or "zless" to view it. -- Dru % To repeat the last command in the C shell, type "!!". -- Dru % To save disk space in your home directory, compress files you rarely use with "gzip filename". -- Dru % To search for files that match a particular name, use find(1); for example find / -name "*GENERIC*" -ls will search '/', and all subdirectories, for files with 'GENERIC' in the name. -- Stephen Hilton % To see all of the directories on your FreeBSD system, type - ls -R / | more + ls -R / | less -- Dru % To see how long it takes a command to run, type the word "time" before the command name. -- Dru % To see how much disk space is left on your partitions, use df -h -- Dru % To see the 10 largest files on a directory or partition, use du /partition_or_directory_name | sort -rn | head -- Dru % To see the IP addresses currently set on your active interfaces, type "ifconfig -u". -- Dru % To see the last 10 lines of a long file, use "tail filename". To see the first 10 lines, use "head filename". -- Dru % To see the last time that you logged in, use lastlogin(8). -- Dru % To see the MAC addresses of the NICs on your system, type ifconfig -a -- Dru % To see the output from when your computer started, run dmesg(8). If it has been replaced with other messages, look at /var/run/dmesg.boot. -- Francisco Reyes % Want colour in your directory listings? Use "ls -G". "ls -F" is also useful, and they can be combined as "ls -FG". % -Want to find a specific port, just type the following under /usr/ports, +Want to find a specific port, just type the following under /usr/ports or one its subdirectories: "make search name=" or "make search key=" % Want to know how many words, lines, or bytes are contained in a file? Type "wc filename". -- Dru % Want to see how much virtual memory you're using? Just type "swapinfo" to be shown information about the usage of your swap partitions. % Want to strip UTF-8 BOM(Byte Order Mark) from given files? sed -e '1s/^\xef\xbb\xbf//' < bomfile > newfile % Want to use sed(1) to edit a file in place? Well, to replace every 'e' with an 'o', in a file named 'foo', you can do: sed -i.bak s/e/o/g foo And you'll get a backup of the original in a file named 'foo.bak', but if you want no backup: sed -i '' s/e/o/g foo % When you've made modifications to a file in vi(1) and then find that you can't write it, type ``!rm -f %'' then ``:w!'' to force the write This won't work if you don't have write permissions to the directory and probably won't be suitable if you're editing through a symbolic link. % You can adjust the volume of various parts of the sound system in your computer by typing 'mixer '. To get a list of what you can adjust, just type 'mixer'. % You can automatically download and install binary packages by doing pkg_add -r where you replace with the URL to the package. This will also automatically install the packages the package you download is dependent on (ie, the packages it needs in order to work.) % You can change the video mode on all consoles by adding something like the following to /etc/rc.conf: allscreens="80x30" You can use "vidcontrol -i mode | grep T" for a list of supported text modes. -- Konstantinos Konstantinidis % You can disable tcsh's terminal beep if you `set nobeep'. % You can get a good generic server install by using the instant-server port/package. If you have ports installed, you can install it by doing # cd /usr/ports/misc/instant-server # make install && make clean as root. This will install a collection of packages that is appropriate for running a "generic" server. % You can install extra packages for FreeBSD by using the ports system. If you have installed it, you can download, compile, and install software by just typing # cd /usr/ports// # make install && make clean as root. The ports infrastructure will download the software, change it so it works on FreeBSD, compile it, install it, register the installation so it will be possible to automatically uninstall it, and clean out the temporary working space it used. You can remove an installed port you decide you do not want after all by typing # cd /usr/ports// # make deinstall as root. % You can look through a file in a nice text-based interface by typing less filename % You can make a log of your terminal session with script(1). % You can often get answers to your questions about FreeBSD by searching in the FreeBSD mailing list archives at http://www.FreeBSD.org/search/search.html % You can open up a new split-screen window in (n)vi with :N or :E and then use ^w to switch between the two. % You can permanently set environment variables for your shell by putting them in a startup file for the shell. The name of the startup file varies depending on the shell - csh and tcsh uses .login, bash, sh, ksh and zsh use .profile. When using bash, sh, ksh or zsh, don't forget to export the variable. % You can press Ctrl-D to quickly exit from a shell, or logout from a login shell. -- Konstantinos Konstantinidis % You can press up-arrow or down-arrow to walk through a list of previous commands in tcsh. % You can search for documentation on a keyword by typing apropos keyword % You can `set autologout = 30' to have tcsh log you off automatically if you leave the shell idle for more than 30 minutes. % You can use aliases to decrease the amount of typing you need to do to get commands you commonly use. Examples of fairly popular aliases include (in Bourne shell style, as in /bin/sh, bash, ksh, and zsh): alias lf="ls -FA" alias ll="ls -lA" alias su="su -m" In csh or tcsh, these would be alias lf ls -FA alias ll ls -lA alias su su -m To remove an alias, you can usually use 'unalias aliasname'. To list all aliases, you can usually type just 'alias'. % You can use /etc/make.conf to control the options used to compile software on this system. Example entries are in /usr/share/examples/etc/make.conf. % You can use "pkg_info" to see a list of packages you have installed. -- Konstantinos Konstantinidis % You can use the 'fetch' command to retrieve files over ftp or http. fetch http://www.FreeBSD.org/index.html will download the front page of the FreeBSD web site. % You can use "whereis" to search standard binary, manual page and source directories for the specified programs. This can be particularly handy when you are trying to find where in the ports tree an application is. Try "whereis firefox" and "whereis whereis". -- Konstantinos Konstantinidis +% +Want to run the same command again? +In tcsh you can type "!!" +% +Want to go the directory you were just in? +Type "cd -" % Index: projects/nand/gnu/lib =================================================================== --- projects/nand/gnu/lib (revision 235262) +++ projects/nand/gnu/lib (revision 235263) Property changes on: projects/nand/gnu/lib ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/gnu/lib:r235157-235262 Index: projects/nand/gnu/usr.bin/binutils/addr2line/addr2line.1 =================================================================== --- projects/nand/gnu/usr.bin/binutils/addr2line/addr2line.1 (revision 235262) +++ projects/nand/gnu/usr.bin/binutils/addr2line/addr2line.1 (revision 235263) @@ -1,266 +1,266 @@ .\" $FreeBSD$ .\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .ie \nF \{\ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . nr % 0 . rr F .\} .el \{\ . de IX .. .\} .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "ADDR2LINE 1" .TH ADDR2LINE 1 "2010-10-30" "binutils-2.17.50" "GNU Development Tools" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" -addr2line \- convert addresses into file names and line numbers. +addr2line \- convert addresses into file names and line numbers .SH "SYNOPSIS" .IX Header "SYNOPSIS" addr2line [\fB\-b\fR \fIbfdname\fR|\fB\-\-target=\fR\fIbfdname\fR] [\fB\-C\fR|\fB\-\-demangle\fR[=\fIstyle\fR]] [\fB\-e\fR \fIfilename\fR|\fB\-\-exe=\fR\fIfilename\fR] [\fB\-f\fR|\fB\-\-functions\fR] [\fB\-s\fR|\fB\-\-basename\fR] [\fB\-i\fR|\fB\-\-inlines\fR] [\fB\-j\fR|\fB\-\-section=\fR\fIname\fR] [\fB\-H\fR|\fB\-\-help\fR] [\fB\-V\fR|\fB\-\-version\fR] [addr addr ...] .SH "DESCRIPTION" .IX Header "DESCRIPTION" \&\fBaddr2line\fR translates addresses into file names and line numbers. Given an address in an executable or an offset in a section of a relocatable object, it uses the debugging information to figure out which file name and line number are associated with it. .PP The executable or relocatable object to use is specified with the \fB\-e\fR option. The default is the file \fIa.out\fR. The section in the relocatable object to use is specified with the \fB\-j\fR option. .PP \&\fBaddr2line\fR has two modes of operation. .PP In the first, hexadecimal addresses are specified on the command line, and \fBaddr2line\fR displays the file name and line number for each address. .PP In the second, \fBaddr2line\fR reads hexadecimal addresses from standard input, and prints the file name and line number for each address on standard output. In this mode, \fBaddr2line\fR may be used in a pipe to convert dynamically chosen addresses. .PP The format of the output is \fB\s-1FILENAME:LINENO\s0\fR. The file name and line number for each address is printed on a separate line. If the \&\fB\-f\fR option is used, then each \fB\s-1FILENAME:LINENO\s0\fR line is preceded by a \fB\s-1FUNCTIONNAME\s0\fR line which is the name of the function containing the address. .PP If the file name or function name can not be determined, \&\fBaddr2line\fR will print two question marks in their place. If the line number can not be determined, \fBaddr2line\fR will print 0. .SH "OPTIONS" .IX Header "OPTIONS" The long and short forms of options, shown here as alternatives, are equivalent. .IP "\fB\-b\fR \fIbfdname\fR" 4 .IX Item "-b bfdname" .PD 0 .IP "\fB\-\-target=\fR\fIbfdname\fR" 4 .IX Item "--target=bfdname" .PD Specify that the object-code format for the object files is \&\fIbfdname\fR. .IP "\fB\-C\fR" 4 .IX Item "-C" .PD 0 .IP "\fB\-\-demangle[=\fR\fIstyle\fR\fB]\fR" 4 .IX Item "--demangle[=style]" .PD Decode (\fIdemangle\fR) low-level symbol names into user-level names. Besides removing any initial underscore prepended by the system, this makes \*(C+ function names readable. Different compilers have different mangling styles. The optional demangling style argument can be used to choose an appropriate demangling style for your compiler. .IP "\fB\-e\fR \fIfilename\fR" 4 .IX Item "-e filename" .PD 0 .IP "\fB\-\-exe=\fR\fIfilename\fR" 4 .IX Item "--exe=filename" .PD Specify the name of the executable for which addresses should be translated. The default file is \fIa.out\fR. .IP "\fB\-f\fR" 4 .IX Item "-f" .PD 0 .IP "\fB\-\-functions\fR" 4 .IX Item "--functions" .PD Display function names as well as file and line number information. .IP "\fB\-s\fR" 4 .IX Item "-s" .PD 0 .IP "\fB\-\-basenames\fR" 4 .IX Item "--basenames" .PD Display only the base of each file name. .IP "\fB\-i\fR" 4 .IX Item "-i" .PD 0 .IP "\fB\-\-inlines\fR" 4 .IX Item "--inlines" .PD If the address belongs to a function that was inlined, the source information for all enclosing scopes back to the first non-inlined function will also be printed. For example, if \f(CW\*(C`main\*(C'\fR inlines \&\f(CW\*(C`callee1\*(C'\fR which inlines \f(CW\*(C`callee2\*(C'\fR, and address is from \&\f(CW\*(C`callee2\*(C'\fR, the source information for \f(CW\*(C`callee1\*(C'\fR and \f(CW\*(C`main\*(C'\fR will also be printed. .IP "\fB\-j\fR" 4 .IX Item "-j" .PD 0 .IP "\fB\-\-section\fR" 4 .IX Item "--section" .PD Read offsets relative to the specified section instead of absolute addresses. .IP "\fB@\fR\fIfile\fR" 4 .IX Item "@file" Read command-line options from \fIfile\fR. The options read are inserted in place of the original @\fIfile\fR option. If \fIfile\fR does not exist, or cannot be read, then the option will be treated literally, and not removed. .Sp Options in \fIfile\fR are separated by whitespace. A whitespace character may be included in an option by surrounding the entire option in either single or double quotes. Any character (including a backslash) may be included by prefixing the character to be included with a backslash. The \fIfile\fR may itself contain additional @\fIfile\fR options; any such options will be processed recursively. .SH "SEE ALSO" .IX Header "SEE ALSO" Info entries for \fIbinutils\fR. .SH "COPYRIGHT" .IX Header "COPYRIGHT" Copyright (c) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc. .PP Permission is granted to copy, distribute and/or modify this document under the terms of the \s-1GNU\s0 Free Documentation License, Version 1.1 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. A copy of the license is included in the section entitled \*(L"\s-1GNU\s0 Free Documentation License\*(R". Index: projects/nand/gnu/usr.bin/binutils/ranlib/ranlib.1 =================================================================== --- projects/nand/gnu/usr.bin/binutils/ranlib/ranlib.1 (revision 235262) +++ projects/nand/gnu/usr.bin/binutils/ranlib/ranlib.1 (revision 235263) @@ -1,189 +1,189 @@ .\" $FreeBSD$ .\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .ie \nF \{\ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . nr % 0 . rr F .\} .el \{\ . de IX .. .\} .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "RANLIB 1" .TH RANLIB 1 "2010-10-30" "binutils-2.17.50" "GNU Development Tools" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" -ranlib \- generate index to archive. +ranlib \- generate index to archive .SH "SYNOPSIS" .IX Header "SYNOPSIS" ranlib [\fB\-vV\fR] \fIarchive\fR .SH "DESCRIPTION" .IX Header "DESCRIPTION" \&\fBranlib\fR generates an index to the contents of an archive and stores it in the archive. The index lists each symbol defined by a member of an archive that is a relocatable object file. .PP You may use \fBnm \-s\fR or \fBnm \-\-print\-armap\fR to list this index. .PP An archive with such an index speeds up linking to the library and allows routines in the library to call each other without regard to their placement in the archive. .PP The \s-1GNU\s0 \fBranlib\fR program is another form of \s-1GNU\s0 \fBar\fR; running \&\fBranlib\fR is completely equivalent to executing \fBar \-s\fR. .SH "OPTIONS" .IX Header "OPTIONS" .IP "\fB\-v\fR" 4 .IX Item "-v" .PD 0 .IP "\fB\-V\fR" 4 .IX Item "-V" .IP "\fB\-\-version\fR" 4 .IX Item "--version" .PD Show the version number of \fBranlib\fR. .IP "\fB@\fR\fIfile\fR" 4 .IX Item "@file" Read command-line options from \fIfile\fR. The options read are inserted in place of the original @\fIfile\fR option. If \fIfile\fR does not exist, or cannot be read, then the option will be treated literally, and not removed. .Sp Options in \fIfile\fR are separated by whitespace. A whitespace character may be included in an option by surrounding the entire option in either single or double quotes. Any character (including a backslash) may be included by prefixing the character to be included with a backslash. The \fIfile\fR may itself contain additional @\fIfile\fR options; any such options will be processed recursively. .SH "SEE ALSO" .IX Header "SEE ALSO" \&\fIar\fR\|(1), \fInm\fR\|(1), and the Info entries for \fIbinutils\fR. .SH "COPYRIGHT" .IX Header "COPYRIGHT" Copyright (c) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc. .PP Permission is granted to copy, distribute and/or modify this document under the terms of the \s-1GNU\s0 Free Documentation License, Version 1.1 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. A copy of the license is included in the section entitled \*(L"\s-1GNU\s0 Free Documentation License\*(R". Index: projects/nand/gnu/usr.bin/binutils/size/size.1 =================================================================== --- projects/nand/gnu/usr.bin/binutils/size/size.1 (revision 235262) +++ projects/nand/gnu/usr.bin/binutils/size/size.1 (revision 235263) @@ -1,263 +1,263 @@ .\" $FreeBSD$ .\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .ie \nF \{\ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . nr % 0 . rr F .\} .el \{\ . de IX .. .\} .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "SIZE 1" .TH SIZE 1 "2010-10-30" "binutils-2.17.50" "GNU Development Tools" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" -size \- list section sizes and total size. +size \- list section sizes and total size .SH "SYNOPSIS" .IX Header "SYNOPSIS" size [\fB\-A\fR|\fB\-B\fR|\fB\-\-format=\fR\fIcompatibility\fR] [\fB\-\-help\fR] [\fB\-d\fR|\fB\-o\fR|\fB\-x\fR|\fB\-\-radix=\fR\fInumber\fR] [\fB\-t\fR|\fB\-\-totals\fR] [\fB\-\-target=\fR\fIbfdname\fR] [\fB\-V\fR|\fB\-\-version\fR] [\fIobjfile\fR...] .SH "DESCRIPTION" .IX Header "DESCRIPTION" The \s-1GNU\s0 \fBsize\fR utility lists the section sizes\-\-\-and the total size\-\-\-for each of the object or archive files \fIobjfile\fR in its argument list. By default, one line of output is generated for each object file or each module in an archive. .PP \&\fIobjfile\fR... are the object files to be examined. If none are specified, the file \f(CW\*(C`a.out\*(C'\fR will be used. .SH "OPTIONS" .IX Header "OPTIONS" The command line options have the following meanings: .IP "\fB\-A\fR" 4 .IX Item "-A" .PD 0 .IP "\fB\-B\fR" 4 .IX Item "-B" .IP "\fB\-\-format=\fR\fIcompatibility\fR" 4 .IX Item "--format=compatibility" .PD Using one of these options, you can choose whether the output from \s-1GNU\s0 \&\fBsize\fR resembles output from System V \fBsize\fR (using \fB\-A\fR, or \fB\-\-format=sysv\fR), or Berkeley \fBsize\fR (using \fB\-B\fR, or \&\fB\-\-format=berkeley\fR). The default is the one-line format similar to Berkeley's. .Sp Here is an example of the Berkeley (default) format of output from \&\fBsize\fR: .Sp .Vb 4 \& $ size \-\-format=Berkeley ranlib size \& text data bss dec hex filename \& 294880 81920 11592 388392 5ed28 ranlib \& 294880 81920 11888 388688 5ee50 size .Ve .Sp This is the same data, but displayed closer to System V conventions: .Sp .Vb 7 \& $ size \-\-format=SysV ranlib size \& ranlib : \& section size addr \& .text 294880 8192 \& .data 81920 303104 \& .bss 11592 385024 \& Total 388392 \& \& \& size : \& section size addr \& .text 294880 8192 \& .data 81920 303104 \& .bss 11888 385024 \& Total 388688 .Ve .IP "\fB\-\-help\fR" 4 .IX Item "--help" Show a summary of acceptable arguments and options. .IP "\fB\-d\fR" 4 .IX Item "-d" .PD 0 .IP "\fB\-o\fR" 4 .IX Item "-o" .IP "\fB\-x\fR" 4 .IX Item "-x" .IP "\fB\-\-radix=\fR\fInumber\fR" 4 .IX Item "--radix=number" .PD Using one of these options, you can control whether the size of each section is given in decimal (\fB\-d\fR, or \fB\-\-radix=10\fR); octal (\fB\-o\fR, or \fB\-\-radix=8\fR); or hexadecimal (\fB\-x\fR, or \&\fB\-\-radix=16\fR). In \fB\-\-radix=\fR\fInumber\fR, only the three values (8, 10, 16) are supported. The total size is always given in two radices; decimal and hexadecimal for \fB\-d\fR or \fB\-x\fR output, or octal and hexadecimal if you're using \fB\-o\fR. .IP "\fB\-t\fR" 4 .IX Item "-t" .PD 0 .IP "\fB\-\-totals\fR" 4 .IX Item "--totals" .PD Show totals of all objects listed (Berkeley format listing mode only). .IP "\fB\-\-target=\fR\fIbfdname\fR" 4 .IX Item "--target=bfdname" Specify that the object-code format for \fIobjfile\fR is \&\fIbfdname\fR. This option may not be necessary; \fBsize\fR can automatically recognize many formats. .IP "\fB\-V\fR" 4 .IX Item "-V" .PD 0 .IP "\fB\-\-version\fR" 4 .IX Item "--version" .PD Display the version number of \fBsize\fR. .IP "\fB@\fR\fIfile\fR" 4 .IX Item "@file" Read command-line options from \fIfile\fR. The options read are inserted in place of the original @\fIfile\fR option. If \fIfile\fR does not exist, or cannot be read, then the option will be treated literally, and not removed. .Sp Options in \fIfile\fR are separated by whitespace. A whitespace character may be included in an option by surrounding the entire option in either single or double quotes. Any character (including a backslash) may be included by prefixing the character to be included with a backslash. The \fIfile\fR may itself contain additional @\fIfile\fR options; any such options will be processed recursively. .SH "SEE ALSO" .IX Header "SEE ALSO" \&\fIar\fR\|(1), \fIobjdump\fR\|(1), \fIreadelf\fR\|(1), and the Info entries for \fIbinutils\fR. .SH "COPYRIGHT" .IX Header "COPYRIGHT" Copyright (c) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc. .PP Permission is granted to copy, distribute and/or modify this document under the terms of the \s-1GNU\s0 Free Documentation License, Version 1.1 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. A copy of the license is included in the section entitled \*(L"\s-1GNU\s0 Free Documentation License\*(R". Index: projects/nand/gnu/usr.bin/binutils/strip/strip.1 =================================================================== --- projects/nand/gnu/usr.bin/binutils/strip/strip.1 (revision 235262) +++ projects/nand/gnu/usr.bin/binutils/strip/strip.1 (revision 235263) @@ -1,392 +1,392 @@ .\" $FreeBSD$ .\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .ie \nF \{\ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . nr % 0 . rr F .\} .el \{\ . de IX .. .\} .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "STRIP 1" .TH STRIP 1 "2010-10-30" "binutils-2.17.50" "GNU Development Tools" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" -strip \- Discard symbols from object files. +strip \- Discard symbols from object files .SH "SYNOPSIS" .IX Header "SYNOPSIS" strip [\fB\-F\fR \fIbfdname\fR |\fB\-\-target=\fR\fIbfdname\fR] [\fB\-I\fR \fIbfdname\fR |\fB\-\-input\-target=\fR\fIbfdname\fR] [\fB\-O\fR \fIbfdname\fR |\fB\-\-output\-target=\fR\fIbfdname\fR] [\fB\-s\fR|\fB\-\-strip\-all\fR] [\fB\-S\fR|\fB\-g\fR|\fB\-d\fR|\fB\-\-strip\-debug\fR] [\fB\-K\fR \fIsymbolname\fR |\fB\-\-keep\-symbol=\fR\fIsymbolname\fR] [\fB\-N\fR \fIsymbolname\fR |\fB\-\-strip\-symbol=\fR\fIsymbolname\fR] [\fB\-w\fR|\fB\-\-wildcard\fR] [\fB\-x\fR|\fB\-\-discard\-all\fR] [\fB\-X\fR |\fB\-\-discard\-locals\fR] [\fB\-R\fR \fIsectionname\fR |\fB\-\-remove\-section=\fR\fIsectionname\fR] [\fB\-o\fR \fIfile\fR] [\fB\-p\fR|\fB\-\-preserve\-dates\fR] [\fB\-\-keep\-file\-symbols\fR] [\fB\-\-only\-keep\-debug\fR] [\fB\-v\fR |\fB\-\-verbose\fR] [\fB\-V\fR|\fB\-\-version\fR] [\fB\-\-help\fR] [\fB\-\-info\fR] \fIobjfile\fR... .SH "DESCRIPTION" .IX Header "DESCRIPTION" \&\s-1GNU\s0 \fBstrip\fR discards all symbols from object files \&\fIobjfile\fR. The list of object files may include archives. At least one object file must be given. .PP \&\fBstrip\fR modifies the files named in its argument, rather than writing modified copies under different names. .SH "OPTIONS" .IX Header "OPTIONS" .IP "\fB\-F\fR \fIbfdname\fR" 4 .IX Item "-F bfdname" .PD 0 .IP "\fB\-\-target=\fR\fIbfdname\fR" 4 .IX Item "--target=bfdname" .PD Treat the original \fIobjfile\fR as a file with the object code format \fIbfdname\fR, and rewrite it in the same format. .IP "\fB\-\-help\fR" 4 .IX Item "--help" Show a summary of the options to \fBstrip\fR and exit. .IP "\fB\-\-info\fR" 4 .IX Item "--info" Display a list showing all architectures and object formats available. .IP "\fB\-I\fR \fIbfdname\fR" 4 .IX Item "-I bfdname" .PD 0 .IP "\fB\-\-input\-target=\fR\fIbfdname\fR" 4 .IX Item "--input-target=bfdname" .PD Treat the original \fIobjfile\fR as a file with the object code format \fIbfdname\fR. .IP "\fB\-O\fR \fIbfdname\fR" 4 .IX Item "-O bfdname" .PD 0 .IP "\fB\-\-output\-target=\fR\fIbfdname\fR" 4 .IX Item "--output-target=bfdname" .PD Replace \fIobjfile\fR with a file in the output format \fIbfdname\fR. .IP "\fB\-R\fR \fIsectionname\fR" 4 .IX Item "-R sectionname" .PD 0 .IP "\fB\-\-remove\-section=\fR\fIsectionname\fR" 4 .IX Item "--remove-section=sectionname" .PD Remove any section named \fIsectionname\fR from the output file. This option may be given more than once. Note that using this option inappropriately may make the output file unusable. .IP "\fB\-s\fR" 4 .IX Item "-s" .PD 0 .IP "\fB\-\-strip\-all\fR" 4 .IX Item "--strip-all" .PD Remove all symbols. .IP "\fB\-g\fR" 4 .IX Item "-g" .PD 0 .IP "\fB\-S\fR" 4 .IX Item "-S" .IP "\fB\-d\fR" 4 .IX Item "-d" .IP "\fB\-\-strip\-debug\fR" 4 .IX Item "--strip-debug" .PD Remove debugging symbols only. .IP "\fB\-\-strip\-unneeded\fR" 4 .IX Item "--strip-unneeded" Remove all symbols that are not needed for relocation processing. .IP "\fB\-K\fR \fIsymbolname\fR" 4 .IX Item "-K symbolname" .PD 0 .IP "\fB\-\-keep\-symbol=\fR\fIsymbolname\fR" 4 .IX Item "--keep-symbol=symbolname" .PD When stripping symbols, keep symbol \fIsymbolname\fR even if it would normally be stripped. This option may be given more than once. .IP "\fB\-N\fR \fIsymbolname\fR" 4 .IX Item "-N symbolname" .PD 0 .IP "\fB\-\-strip\-symbol=\fR\fIsymbolname\fR" 4 .IX Item "--strip-symbol=symbolname" .PD Remove symbol \fIsymbolname\fR from the source file. This option may be given more than once, and may be combined with strip options other than \&\fB\-K\fR. .IP "\fB\-o\fR \fIfile\fR" 4 .IX Item "-o file" Put the stripped output in \fIfile\fR, rather than replacing the existing file. When this argument is used, only one \fIobjfile\fR argument may be specified. .IP "\fB\-p\fR" 4 .IX Item "-p" .PD 0 .IP "\fB\-\-preserve\-dates\fR" 4 .IX Item "--preserve-dates" .PD Preserve the access and modification dates of the file. .IP "\fB\-w\fR" 4 .IX Item "-w" .PD 0 .IP "\fB\-\-wildcard\fR" 4 .IX Item "--wildcard" .PD Permit regular expressions in \fIsymbolname\fRs used in other command line options. The question mark (?), asterisk (*), backslash (\e) and square brackets ([]) operators can be used anywhere in the symbol name. If the first character of the symbol name is the exclamation point (!) then the sense of the switch is reversed for that symbol. For example: .Sp .Vb 1 \& \-w \-K !foo \-K fo* .Ve .Sp would cause strip to only keep symbols that start with the letters \&\*(L"fo\*(R", but to discard the symbol \*(L"foo\*(R". .IP "\fB\-x\fR" 4 .IX Item "-x" .PD 0 .IP "\fB\-\-discard\-all\fR" 4 .IX Item "--discard-all" .PD Remove non-global symbols. .IP "\fB\-X\fR" 4 .IX Item "-X" .PD 0 .IP "\fB\-\-discard\-locals\fR" 4 .IX Item "--discard-locals" .PD Remove compiler-generated local symbols. (These usually start with \fBL\fR or \fB.\fR.) .IP "\fB\-\-keep\-file\-symbols\fR" 4 .IX Item "--keep-file-symbols" When stripping a file, perhaps with \fB\-\-strip\-debug\fR or \&\fB\-\-strip\-unneeded\fR, retain any symbols specifying source file names, which would otherwise get stripped. .IP "\fB\-\-only\-keep\-debug\fR" 4 .IX Item "--only-keep-debug" Strip a file, removing contents of any sections that would not be stripped by \fB\-\-strip\-debug\fR and leaving the debugging sections intact. In \s-1ELF\s0 files, this preserves all note sections in the output. .Sp The intention is that this option will be used in conjunction with \&\fB\-\-add\-gnu\-debuglink\fR to create a two part executable. One a stripped binary which will occupy less space in \s-1RAM\s0 and in a distribution and the second a debugging information file which is only needed if debugging abilities are required. The suggested procedure to create these files is as follows: .RS 4 .IP "1." 4 .IX Item "1." \&\f(CW\*(C`foo\*(C'\fR then... .ie n .IP "1." 4 .el .IP "1." 4 .IX Item "1." create a file containing the debugging info. .ie n .IP "1." 4 .el .IP "1." 4 .IX Item "1." stripped executable. .ie n .IP "1." 4 .el .IP "1." 4 .IX Item "1." to add a link to the debugging info into the stripped executable. .RE .RS 4 .Sp Note \- the choice of \f(CW\*(C`.dbg\*(C'\fR as an extension for the debug info file is arbitrary. Also the \f(CW\*(C`\-\-only\-keep\-debug\*(C'\fR step is optional. You could instead do this: .IP "1." 4 .IX Item "1." .PD 0 .ie n .IP "1." 4 .el .IP "1." 4 .IX Item "1." .ie n .IP "1." 4 .el .IP "1." 4 .IX Item "1." .ie n .IP "1." 4 .el .IP "1." 4 .IX Item "1." .RE .RS 4 .PD .Sp ie the file pointed to by the \fB\-\-add\-gnu\-debuglink\fR can be the full executable. It does not have to be a file created by the \&\fB\-\-only\-keep\-debug\fR switch. .Sp Note \- this switch is only intended for use on fully linked files. It does not make sense to use it on object files where the debugging information may be incomplete. Besides the gnu_debuglink feature currently only supports the presence of one filename containing debugging information, not multiple filenames on a one-per-object-file basis. .RE .IP "\fB\-V\fR" 4 .IX Item "-V" .PD 0 .IP "\fB\-\-version\fR" 4 .IX Item "--version" .PD Show the version number for \fBstrip\fR. .IP "\fB\-v\fR" 4 .IX Item "-v" .PD 0 .IP "\fB\-\-verbose\fR" 4 .IX Item "--verbose" .PD Verbose output: list all object files modified. In the case of archives, \fBstrip \-v\fR lists all members of the archive. .IP "\fB@\fR\fIfile\fR" 4 .IX Item "@file" Read command-line options from \fIfile\fR. The options read are inserted in place of the original @\fIfile\fR option. If \fIfile\fR does not exist, or cannot be read, then the option will be treated literally, and not removed. .Sp Options in \fIfile\fR are separated by whitespace. A whitespace character may be included in an option by surrounding the entire option in either single or double quotes. Any character (including a backslash) may be included by prefixing the character to be included with a backslash. The \fIfile\fR may itself contain additional @\fIfile\fR options; any such options will be processed recursively. .SH "SEE ALSO" .IX Header "SEE ALSO" the Info entries for \fIbinutils\fR. .SH "COPYRIGHT" .IX Header "COPYRIGHT" Copyright (c) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc. .PP Permission is granted to copy, distribute and/or modify this document under the terms of the \s-1GNU\s0 Free Documentation License, Version 1.1 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. A copy of the license is included in the section entitled \*(L"\s-1GNU\s0 Free Documentation License\*(R". Index: projects/nand/gnu/usr.bin/binutils =================================================================== --- projects/nand/gnu/usr.bin/binutils (revision 235262) +++ projects/nand/gnu/usr.bin/binutils (revision 235263) Property changes on: projects/nand/gnu/usr.bin/binutils ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/gnu/usr.bin/binutils:r235157-235262 Index: projects/nand/gnu/usr.bin/cc/cc_tools =================================================================== --- projects/nand/gnu/usr.bin/cc/cc_tools (revision 235262) +++ projects/nand/gnu/usr.bin/cc/cc_tools (revision 235263) Property changes on: projects/nand/gnu/usr.bin/cc/cc_tools ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/gnu/usr.bin/cc/cc_tools:r235157-235262 Index: projects/nand/gnu/usr.bin/gdb/gdb/gdb.1 =================================================================== --- projects/nand/gnu/usr.bin/gdb/gdb/gdb.1 (revision 235262) +++ projects/nand/gnu/usr.bin/gdb/gdb/gdb.1 (revision 235263) @@ -1,371 +1,371 @@ .\" Copyright (c) 1991 Free Software Foundation .\" See section COPYING for conditions for redistribution .\" $FreeBSD$ .TH gdb 1 "4nov1991" "GNU Tools" "GNU Tools" .SH NAME gdb \- The GNU Debugger .SH SYNOPSIS .na .TP .B gdb .RB "[\|" \-help "\|]" .RB "[\|" \-nx "\|]" .RB "[\|" \-q "\|]" .RB "[\|" \-batch "\|]" .RB "[\|" \-cd=\c .I dir\c \|] .RB "[\|" \-f "\|]" .RB "[\|" "\-b\ "\c .IR bps "\|]" .RB "[\|" "\-tty="\c .IR dev "\|]" .RB "[\|" "\-s "\c .I symfile\c \&\|] .RB "[\|" "\-e "\c .I prog\c \&\|] .RB "[\|" "\-se "\c .I prog\c \&\|] .RB "[\|" "\-c "\c .I core\c \&\|] .RB "[\|" "\-x "\c .I cmds\c \&\|] .RB "[\|" "\-d "\c .I dir\c \&\|] .RB "[\|" \c .I prog\c .RB "[\|" \c .IR core \||\| procID\c \&\|]\&\|] .ad b .SH DESCRIPTION The purpose of a debugger such as GDB is to allow you to see what is going on ``inside'' another program while it executes\(em\&or what another program was doing at the moment it crashed. GDB can do four main kinds of things (plus other things in support of these) to help you catch bugs in the act: .TP \ \ \ \(bu Start your program, specifying anything that might affect its behavior. .TP \ \ \ \(bu Make your program stop on specified conditions. .TP \ \ \ \(bu Examine what has happened, when your program has stopped. .TP \ \ \ \(bu Change things in your program, so you can experiment with correcting the effects of one bug and go on to learn about another. .PP You can use GDB to debug programs written in C, C++, and Modula-2. Fortran support will be added when a GNU Fortran compiler is ready. GDB is invoked with the shell command \c .B gdb\c \&. Once started, it reads commands from the terminal until you tell it to exit with the GDB command \c .B quit\c \&. You can get online help from \c .B gdb\c \& itself by using the command \c .B help\c \&. You can run \c .B gdb\c \& with no arguments or options; but the most usual way to start GDB is with one argument or two, specifying an executable program as the argument: .sp .br gdb\ program .br .sp You can also start with both an executable program and a core file specified: .sp .br gdb\ program\ core .br .sp You can, instead, specify a process ID as a second argument, if you want to debug a running process: .sp .br gdb\ program\ 1234 .br .sp would attach GDB to process \c .B 1234\c \& (unless you also have a file named `\|\c .B 1234\c \&\|'; GDB does check for a core file first). Here are some of the most frequently needed GDB commands: .TP .B break \fR[\|\fIfile\fB:\fR\|]\fIfunction \& Set a breakpoint at \c .I function\c \& (in \c .I file\c \&). .TP .B run \fR[\|\fIarglist\fR\|] Start your program (with \c .I arglist\c \&, if specified). .TP .B bt Backtrace: display the program stack. .TP .BI print " expr"\c \& Display the value of an expression. .TP .B c Continue running your program (after stopping, e.g. at a breakpoint). .TP .B next Execute next program line (after stopping); step \c .I over\c \& any function calls in the line. .TP .B step Execute next program line (after stopping); step \c .I into\c \& any function calls in the line. .TP .B help \fR[\|\fIname\fR\|] Show information about GDB command \c .I name\c \&, or general information about using GDB. .TP .B quit Exit from GDB. .PP For full details on GDB, see \c .I Using GDB: A Guide to the GNU Source-Level Debugger\c \&, by Richard M. Stallman and Roland H. Pesch. The same text is available online as the \c .B gdb\c \& entry in the \c .B info\c \& program. .SH OPTIONS Any arguments other than options specify an executable file and core file (or process ID); that is, the first argument encountered with no associated option flag is equivalent to a `\|\c .B \-se\c \&\|' option, and the second, if any, is equivalent to a `\|\c .B \-c\c \&\|' option if it's the name of a file. Many options have both long and short forms; both are shown here. The long forms are also recognized if you truncate them, so long as enough of the option is present to be unambiguous. (If you prefer, you can flag option arguments with `\|\c .B +\c \&\|' rather than `\|\c .B \-\c \&\|', though we illustrate the more usual convention.) All the options and command line arguments you give are processed in sequential order. The order makes a difference when the `\|\c .B \-x\c \&\|' option is used. .TP .B \-help .TP .B \-h List all options, with brief explanations. .TP .BI "\-symbols=" "file"\c .TP .BI "\-s " "file"\c \& Read symbol table from file \c .I file\c \&. .TP .BI "\-exec=" "file"\c .TP .BI "\-e " "file"\c \& Use file \c .I file\c \& as the executable file to execute when appropriate, and for examining pure data in conjunction with a core dump. .TP .BI "\-se=" "file"\c \& Read symbol table from file \c .I file\c \& and use it as the executable file. .TP .BI "\-core=" "file"\c .TP .BI "\-c " "file"\c \& Use file \c .I file\c \& as a core dump to examine. .TP .BI "\-command=" "file"\c .TP .BI "\-x " "file"\c \& Execute GDB commands from file \c .I file\c \&. .TP .BI "\-directory=" "directory"\c .TP .BI "\-d " "directory"\c \& Add \c .I directory\c \& to the path to search for source files. .PP .TP .B \-nx .TP .B \-n Do not execute commands from any `\|\c .B .gdbinit\c \&\|' initialization files. Normally, the commands in these files are executed after all the command options and arguments have been processed. .TP .B \-quiet .TP .B \-q ``Quiet''. Do not print the introductory and copyright messages. These messages are also suppressed in batch mode. .TP .B \-batch Run in batch mode. Exit with status \c .B 0\c \& after processing all the command files specified with `\|\c .B \-x\c \&\|' (and `\|\c .B .gdbinit\c \&\|', if not inhibited). Exit with nonzero status if an error occurs in executing the GDB commands in the command files. Batch mode may be useful for running GDB as a filter, for example to download and run a program on another computer; in order to make this more useful, the message .sp .br Program\ exited\ normally. .br .sp (which is ordinarily issued whenever a program running under GDB control terminates) is not issued when running in batch mode. .TP .BI "\-cd=" "directory"\c \& Run GDB using \c .I directory\c \& as its working directory, instead of the current directory. .TP .B \-fullname .TP .B \-f Emacs sets this option when it runs GDB as a subprocess. It tells GDB to output the full file name and line number in a standard, recognizable fashion each time a stack frame is displayed (which includes each time the program stops). This recognizable format looks like two `\|\c -.B \032\c +.B \e032\c \&\|' characters, followed by the file name, line number and character position separated by colons, and a newline. The Emacs-to-GDB interface program uses the two `\|\c -.B \032\c +.B \e032\c \&\|' characters as a signal to display the source code for the frame. .TP .BI "\-b " "bps"\c \& Set the line speed (baud rate or bits per second) of any serial interface used by GDB for remote debugging. .TP .BI "\-tty=" "device"\c \& Run using \c .I device\c \& for your program's standard input and output. .PP .SH "SEE ALSO" .RB "`\|" gdb "\|'" entry in .B info\c \&; .I Using GDB: A Guide to the GNU Source-Level Debugger\c , Richard M. Stallman and Roland H. Pesch, July 1991. .SH COPYING Copyright (c) 1991 Free Software Foundation, Inc. .PP Permission is granted to make and distribute verbatim copies of this manual provided the copyright notice and this permission notice are preserved on all copies. .PP Permission is granted to copy and distribute modified versions of this manual under the conditions for verbatim copying, provided that the entire resulting derived work is distributed under the terms of a permission notice identical to this one. .PP Permission is granted to copy and distribute translations of this manual into another language, under the above conditions for modified versions, except that this permission notice may be included in translations approved by the Free Software Foundation instead of in the original English. Index: projects/nand/gnu/usr.bin/gdb =================================================================== --- projects/nand/gnu/usr.bin/gdb (revision 235262) +++ projects/nand/gnu/usr.bin/gdb (revision 235263) Property changes on: projects/nand/gnu/usr.bin/gdb ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/gnu/usr.bin/gdb:r235157-235262 Index: projects/nand/lib/libc/locale/toupper.c =================================================================== --- projects/nand/lib/libc/locale/toupper.c (revision 235262) +++ projects/nand/lib/libc/locale/toupper.c (revision 235263) @@ -1,81 +1,81 @@ /*- * Copyright (c) 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Paul Borman at Krystal Technologies. * * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * Portions of this software were developed by David Chisnall * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include "mblocal.h" __ct_rune_t ___toupper_l(c, l) __ct_rune_t c; locale_t l; { size_t lim; FIX_LOCALE(l); - _RuneRange *rr = &XLOCALE_CTYPE(l)->runes->__maplower_ext; + _RuneRange *rr = &XLOCALE_CTYPE(l)->runes->__mapupper_ext; _RuneEntry *base, *re; if (c < 0 || c == EOF) return(c); /* Binary search -- see bsearch.c for explanation. */ base = rr->__ranges; for (lim = rr->__nranges; lim != 0; lim >>= 1) { re = base + (lim >> 1); if (re->__min <= c && c <= re->__max) { return (re->__map + c - re->__min); } else if (c > re->__max) { base = re + 1; lim--; } } return(c); } __ct_rune_t ___toupper(c) __ct_rune_t c; { return ___toupper_l(c, __get_locale()); } Index: projects/nand/lib/libc/stdio/fgets.3 =================================================================== --- projects/nand/lib/libc/stdio/fgets.3 (revision 235262) +++ projects/nand/lib/libc/stdio/fgets.3 (revision 235263) @@ -1,158 +1,156 @@ .\" Copyright (c) 1990, 1991, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" This code is derived from software contributed to Berkeley by .\" Chris Torek and the American National Standards Committee X3, .\" on Information Processing Systems. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 4. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" @(#)fgets.3 8.1 (Berkeley) 6/4/93 .\" $FreeBSD$ .\" -.Dd June 4, 1993 +.Dd May 5, 2012 .Dt FGETS 3 .Os .Sh NAME .Nm fgets , .Nm gets .Nd get a line from a stream .Sh LIBRARY .Lb libc .Sh SYNOPSIS .In stdio.h .Ft char * .Fn fgets "char * restrict str" "int size" "FILE * restrict stream" .Ft char * .Fn gets "char *str" .Sh DESCRIPTION The .Fn fgets function reads at most one less than the number of characters specified by .Fa size from the given .Fa stream and stores them in the string .Fa str . Reading stops when a newline character is found, at end-of-file or error. The newline, if any, is retained. If any characters are read and there is no error, a .Ql \e0 character is appended to end the string. .Pp The .Fn gets function is equivalent to .Fn fgets with an infinite .Fa size and a .Fa stream of .Dv stdin , except that the newline character (if any) is not stored in the string. It is the caller's responsibility to ensure that the input line, if any, is sufficiently short to fit in the string. .Sh RETURN VALUES Upon successful completion, .Fn fgets and .Fn gets return a pointer to the string. If end-of-file occurs before any characters are read, they return .Dv NULL and the buffer contents remain unchanged. If an error occurs, they return .Dv NULL and the buffer contents are indeterminate. The .Fn fgets and .Fn gets functions do not distinguish between end-of-file and error, and callers must use .Xr feof 3 and .Xr ferror 3 to determine which occurred. .Sh ERRORS .Bl -tag -width Er .It Bq Er EBADF The given .Fa stream is not a readable stream. .El .Pp The function .Fn fgets may also fail and set .Va errno for any of the errors specified for the routines .Xr fflush 3 , .Xr fstat 2 , .Xr read 2 , or .Xr malloc 3 . .Pp The function .Fn gets may also fail and set .Va errno for any of the errors specified for the routine .Xr getchar 3 . .Sh SEE ALSO .Xr feof 3 , .Xr ferror 3 , .Xr fgetln 3 , .Xr fgetws 3 , .Xr getline 3 .Sh STANDARDS The functions .Fn fgets and .Fn gets conform to .St -isoC-99 . .Sh SECURITY CONSIDERATIONS The .Fn gets function cannot be used securely. Because of its lack of bounds checking, and the inability for the calling program to reliably determine the length of the next incoming line, the use of this function enables malicious users to arbitrarily change a running program's functionality through a buffer overflow attack. It is strongly suggested that the .Fn fgets function be used in all cases. -(See -the FSA.) Index: projects/nand/lib/libc/stdtime =================================================================== --- projects/nand/lib/libc/stdtime (revision 235262) +++ projects/nand/lib/libc/stdtime (revision 235263) Property changes on: projects/nand/lib/libc/stdtime ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/lib/libc/stdtime:r235157-235262 Index: projects/nand/lib/libc =================================================================== --- projects/nand/lib/libc (revision 235262) +++ projects/nand/lib/libc (revision 235263) Property changes on: projects/nand/lib/libc ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/lib/libc:r235157-235262 Index: projects/nand/lib/libthr/thread/thr_sleepq.c =================================================================== --- projects/nand/lib/libthr/thread/thr_sleepq.c (revision 235262) +++ projects/nand/lib/libthr/thread/thr_sleepq.c (revision 235263) @@ -1,179 +1,183 @@ /* * Copyright (c) 2010 David Xu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include "thr_private.h" #define HASHSHIFT 9 #define HASHSIZE (1 << HASHSHIFT) #define SC_HASH(wchan) ((unsigned) \ ((((uintptr_t)(wchan) >> 3) \ ^ ((uintptr_t)(wchan) >> (HASHSHIFT + 3))) \ & (HASHSIZE - 1))) #define SC_LOOKUP(wc) &sc_table[SC_HASH(wc)] struct sleepqueue_chain { struct umutex sc_lock; int sc_enqcnt; LIST_HEAD(, sleepqueue) sc_queues; int sc_type; }; static struct sleepqueue_chain sc_table[HASHSIZE]; void _sleepq_init(void) { int i; for (i = 0; i < HASHSIZE; ++i) { LIST_INIT(&sc_table[i].sc_queues); _thr_umutex_init(&sc_table[i].sc_lock); } } struct sleepqueue * _sleepq_alloc(void) { struct sleepqueue *sq; sq = calloc(1, sizeof(struct sleepqueue)); TAILQ_INIT(&sq->sq_blocked); SLIST_INIT(&sq->sq_freeq); return (sq); } void _sleepq_free(struct sleepqueue *sq) { free(sq); } void _sleepq_lock(void *wchan) { struct pthread *curthread = _get_curthread(); struct sleepqueue_chain *sc; sc = SC_LOOKUP(wchan); THR_LOCK_ACQUIRE_SPIN(curthread, &sc->sc_lock); } void _sleepq_unlock(void *wchan) { struct sleepqueue_chain *sc; struct pthread *curthread = _get_curthread(); sc = SC_LOOKUP(wchan); THR_LOCK_RELEASE(curthread, &sc->sc_lock); } -struct sleepqueue * -_sleepq_lookup(void *wchan) +static inline struct sleepqueue * +lookup(struct sleepqueue_chain *sc, void *wchan) { - struct sleepqueue_chain *sc; struct sleepqueue *sq; - sc = SC_LOOKUP(wchan); LIST_FOREACH(sq, &sc->sc_queues, sq_hash) if (sq->sq_wchan == wchan) return (sq); return (NULL); } +struct sleepqueue * +_sleepq_lookup(void *wchan) +{ + return (lookup(SC_LOOKUP(wchan), wchan)); +} + void _sleepq_add(void *wchan, struct pthread *td) { struct sleepqueue_chain *sc; struct sleepqueue *sq; sc = SC_LOOKUP(wchan); - sq = _sleepq_lookup(wchan); + sq = lookup(sc, wchan); if (sq != NULL) { SLIST_INSERT_HEAD(&sq->sq_freeq, td->sleepqueue, sq_flink); } else { sq = td->sleepqueue; LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash); sq->sq_wchan = wchan; /* sq->sq_type = type; */ } td->sleepqueue = NULL; td->wchan = wchan; if (((++sc->sc_enqcnt << _thr_queuefifo) & 0xff) != 0) TAILQ_INSERT_HEAD(&sq->sq_blocked, td, wle); else TAILQ_INSERT_TAIL(&sq->sq_blocked, td, wle); } int _sleepq_remove(struct sleepqueue *sq, struct pthread *td) { int rc; TAILQ_REMOVE(&sq->sq_blocked, td, wle); if (TAILQ_EMPTY(&sq->sq_blocked)) { LIST_REMOVE(sq, sq_hash); td->sleepqueue = sq; rc = 0; } else { td->sleepqueue = SLIST_FIRST(&sq->sq_freeq); SLIST_REMOVE_HEAD(&sq->sq_freeq, sq_flink); rc = 1; } td->wchan = NULL; return (rc); } void _sleepq_drop(struct sleepqueue *sq, void (*cb)(struct pthread *, void *arg), void *arg) { struct pthread *td; struct sleepqueue *sq2; td = TAILQ_FIRST(&sq->sq_blocked); if (td == NULL) return; LIST_REMOVE(sq, sq_hash); TAILQ_REMOVE(&sq->sq_blocked, td, wle); if (cb != NULL) cb(td, arg); td->sleepqueue = sq; td->wchan = NULL; sq2 = SLIST_FIRST(&sq->sq_freeq); TAILQ_FOREACH(td, &sq->sq_blocked, wle) { if (cb != NULL) cb(td, arg); td->sleepqueue = sq2; td->wchan = NULL; sq2 = SLIST_NEXT(sq2, sq_flink); } TAILQ_INIT(&sq->sq_blocked); SLIST_INIT(&sq->sq_freeq); } Index: projects/nand/lib/libutil =================================================================== --- projects/nand/lib/libutil (revision 235262) +++ projects/nand/lib/libutil (revision 235263) Property changes on: projects/nand/lib/libutil ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/lib/libutil:r235157-235262 Index: projects/nand/lib/libz =================================================================== --- projects/nand/lib/libz (revision 235262) +++ projects/nand/lib/libz (revision 235263) Property changes on: projects/nand/lib/libz ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/lib/libz:r235157-235262 Index: projects/nand/sbin/geom/class/eli/geli.8 =================================================================== --- projects/nand/sbin/geom/class/eli/geli.8 (revision 235262) +++ projects/nand/sbin/geom/class/eli/geli.8 (revision 235263) @@ -1,971 +1,974 @@ .\" Copyright (c) 2005-2011 Pawel Jakub Dawidek .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .Dd April 28, 2012 .Dt GELI 8 .Os .Sh NAME .Nm geli .Nd "control utility for the cryptographic GEOM class" .Sh SYNOPSIS To compile GEOM_ELI into your kernel, add the following lines to your kernel configuration file: .Bd -ragged -offset indent .Cd "device crypto" .Cd "options GEOM_ELI" .Ed .Pp Alternatively, to load the GEOM_ELI module at boot time, add the following line to your .Xr loader.conf 5 : .Bd -literal -offset indent geom_eli_load="YES" .Ed .Pp Usage of the .Nm utility: .Pp .Nm .Cm init .Op Fl bPv .Op Fl a Ar aalgo .Op Fl B Ar backupfile .Op Fl e Ar ealgo .Op Fl i Ar iterations .Op Fl J Ar newpassfile .Op Fl K Ar newkeyfile .Op Fl l Ar keylen .Op Fl s Ar sectorsize .Op Fl V Ar version .Ar prov .Nm .Cm label - an alias for .Cm init .Nm .Cm attach .Op Fl dprv .Op Fl j Ar passfile .Op Fl k Ar keyfile .Ar prov .Nm .Cm detach .Op Fl fl .Ar prov ... .Nm .Cm stop - an alias for .Cm detach .Nm .Cm onetime .Op Fl d .Op Fl a Ar aalgo .Op Fl e Ar ealgo .Op Fl l Ar keylen .Op Fl s Ar sectorsize .Ar prov .Nm .Cm configure .Op Fl bB .Ar prov ... .Nm .Cm setkey .Op Fl pPv .Op Fl i Ar iterations .Op Fl j Ar passfile .Op Fl J Ar newpassfile .Op Fl k Ar keyfile .Op Fl K Ar newkeyfile .Op Fl n Ar keyno .Ar prov .Nm .Cm delkey .Op Fl afv .Op Fl n Ar keyno .Ar prov .Nm .Cm kill .Op Fl av .Op Ar prov ... .Nm .Cm backup .Op Fl v .Ar prov .Ar file .Nm .Cm restore .Op Fl fv .Ar file .Ar prov .Nm .Cm suspend .Op Fl v .Fl a | Ar prov ... .Nm .Cm resume .Op Fl pv .Op Fl j Ar passfile .Op Fl k Ar keyfile .Ar prov .Nm .Cm resize .Op Fl v .Fl s Ar oldsize .Ar prov .Nm .Cm version .Op Ar prov ... .Nm .Cm clear .Op Fl v .Ar prov ... .Nm .Cm dump .Op Fl v .Ar prov ... .Nm .Cm list .Nm .Cm status .Nm .Cm load .Nm .Cm unload .Sh DESCRIPTION The .Nm utility is used to configure encryption on GEOM providers. .Pp The following is a list of the most important features: .Pp .Bl -bullet -offset indent -compact .It Utilizes the .Xr crypto 9 framework, so when there is crypto hardware available, .Nm will make use of it automatically. .It Supports many cryptographic algorithms (currently .Nm AES-XTS , .Nm AES-CBC , .Nm Blowfish-CBC , .Nm Camellia-CBC and .Nm 3DES-CBC ) . .It Can optionally perform data authentication (integrity verification) utilizing one of the following algorithms: .Nm HMAC/MD5 , .Nm HMAC/SHA1 , .Nm HMAC/RIPEMD160 , .Nm HMAC/SHA256 , .Nm HMAC/SHA384 or .Nm HMAC/SHA512 . .It Can create a key from a couple of components (user entered passphrase, random bits from a file, etc.). .It Allows encryption of the root partition. The user will be asked for the passphrase before the root file system is mounted. .It The passphrase of the user is strengthened with: .Rs .%A B. Kaliski .%T "PKCS #5: Password-Based Cryptography Specification, Version 2.0." .%R RFC .%N 2898 .Re .It Allows the use of two independent keys (e.g., a .Qq "user key" and a .Qq "company key" ) . .It It is fast - .Nm performs simple sector-to-sector encryption. .It Allows Master Keys to be backed up and restored, so that if a user has to quickly destroy his keys, it is possible to get the data back by restoring keys from backup. .It Providers can be configured to automatically detach on last close (so users do not have to remember to detach providers after unmounting the file systems). .It Allows attaching a provider with a random, one-time key - useful for swap partitions and temporary file systems. .It Allows verification of data integrity (data authentication). .It Allows suspending and resuming encrypted devices. .El .Pp The first argument to .Nm indicates an action to be performed: .Bl -tag -width ".Cm configure" .It Cm init Initialize the provider which needs to be encrypted. Here you can set up the cryptographic algorithm to use, key length, etc. The last sector of the provider is used to store metadata. The .Cm init subcommand also automatically writes metadata backups to .Pa /var/backups/.eli file. The metadata can be recovered with the .Cm restore subcommand described below. .Pp Additional options include: .Bl -tag -width ".Fl J Ar newpassfile" .It Fl a Ar aalgo Enable data integrity verification (authentication) using the given algorithm. This will reduce the size of storage available and also reduce speed. For example, when using 4096 bytes sector and .Nm HMAC/SHA256 algorithm, 89% of the original provider storage will be available for use. Currently supported algorithms are: .Nm HMAC/MD5 , .Nm HMAC/SHA1 , .Nm HMAC/RIPEMD160 , .Nm HMAC/SHA256 , .Nm HMAC/SHA384 and .Nm HMAC/SHA512 . If the option is not given, there will be no authentication, only encryption. The recommended algorithm is .Nm HMAC/SHA256 . .It Fl b Ask for the passphrase on boot, before the root partition is mounted. This makes it possible to use an encrypted root partition. One will still need bootable unencrypted storage with a .Pa /boot/ directory, which can be a CD-ROM disc or USB pen-drive, that can be removed after boot. .It Fl B Ar backupfile File name to use for metadata backup instead of the default .Pa /var/backups/.eli . To inhibit backups, you can use .Pa none as the .Ar backupfile . .It Fl e Ar ealgo Encryption algorithm to use. Currently supported algorithms are: .Nm AES-XTS , .Nm AES-CBC , .Nm Blowfish-CBC , .Nm Camellia-CBC and .Nm 3DES-CBC . The default and recommended algorithm is .Nm AES-XTS . .It Fl i Ar iterations Number of iterations to use with PKCS#5v2. If this option is not specified, .Nm will find the number of iterations which is equal to 2 seconds of crypto work. If 0 is given, PKCS#5v2 will not be used. .It Fl J Ar newpassfile Specifies a file which contains the passphrase or its part. If .Ar newpassfile is given as -, standard input will be used. Only the first line (excluding new-line character) is taken from the given file. This argument can be specified multiple times. .It Fl K Ar newkeyfile Specifies a file which contains part of the key. If .Ar newkeyfile is given as -, standard input will be used. This argument can be specified multiple times. .It Fl l Ar keylen Key length to use with the given cryptographic algorithm. If not given, the default key length for the given algorithm is used, which is: 128 for .Nm AES-XTS , .Nm AES-CBC , .Nm Blowfish-CBC and .Nm Camellia-CBC and 192 for .Nm 3DES-CBC . .It Fl P Do not use passphrase as the key component. .It Fl s Ar sectorsize Change decrypted provider's sector size. Increasing the sector size allows increased performance, because encryption/decryption which requires an initialization vector is done per sector; fewer sectors means less computational work. .It Fl V Ar version Metadata version to use. This option is helpful when creating provider that may be used by older .Nm FreeBSD/GELI versions. Consult the .Sx HISTORY section to find which metadata version is supported by which FreeBSD version. Note that using older metadata version may limit numer of features available. .El .It Cm attach Attach the given provider. The master key will be decrypted using the given passphrase/keyfile and a new GEOM provider will be created using the given provider's name with an .Qq .eli suffix. .Pp Additional options include: .Bl -tag -width ".Fl j Ar passfile" .It Fl d If specified, a decrypted provider will be detached automatically on last close. This can help with scarce memory so the user does not have to remember to detach the provider after unmounting the file system. It only works when the provider was opened for writing, so it will not work if the file system on the provider is mounted read-only. Probably a better choice is the .Fl l option for the .Cm detach subcommand. .It Fl j Ar passfile Specifies a file which contains the passphrase or its part. For more information see the description of the .Fl J option for the .Cm init subcommand. .It Fl k Ar keyfile Specifies a file which contains part of the key. For more information see the description of the .Fl K option for the .Cm init subcommand. .It Fl p Do not use passphrase as the key component. .It Fl r Attach read-only provider. It will not be opened for writing. .El .It Cm detach Detach the given providers, which means remove the devfs entry and clear the keys from memory. .Pp Additional options include: .Bl -tag -width ".Fl f" .It Fl f Force detach - detach even if the provider is open. .It Fl l Mark provider to detach on last close. If this option is specified, the provider will not be detached while it is open, but will be automatically detached when it is closed for the last time even if it was only opened for reading. .El .It Cm onetime Attach the given providers with random, one-time keys. The command can be used to encrypt swap partitions or temporary file systems. .Pp Additional options include: .Bl -tag -width ".Fl a Ar sectorsize" .It Fl a Ar aalgo Enable data integrity verification (authentication). For more information, see the description of the .Cm init subcommand. .It Fl e Ar ealgo Encryption algorithm to use. For more information, see the description of the .Cm init subcommand. .It Fl d Detach on last close. Note: this option is not usable for temporary file systems as the provider will be detached after creating the file system on it. It still can (and should be) used for swap partitions. For more information, see the description of the .Cm attach subcommand. .It Fl l Ar keylen Key length to use with the given cryptographic algorithm. For more information, see the description of the .Cm init subcommand. .It Fl s Ar sectorsize Change decrypted provider's sector size. For more information, see the description of the .Cm init subcommand. .El .It Cm configure Change configuration of the given providers. .Pp Additional options include: .Bl -tag -width ".Fl b" .It Fl b Set the BOOT flag on the given providers. For more information, see the description of the .Cm init subcommand. .It Fl B Remove the BOOT flag from the given providers. .El .It Cm setkey Change or setup (if not yet initialized) selected key. There is one master key, which can be encrypted with two independent user keys. With the .Cm init subcommand, only key number 0 is initialized. The key can always be changed: for an attached provider, for a detached provider, or on the backup file. When a provider is attached, the user does not have to provide an old passphrase/keyfile. .Pp Additional options include: .Bl -tag -width ".Fl J Ar newpassfile" .It Fl i Ar iterations Number of iterations to use with PKCS#5v2. If 0 is given, PKCS#5v2 will not be used. To be able to use this option with the .Cm setkey subcommand, only one key has to be defined and this key must be changed. .It Fl j Ar passfile Specifies a file which contains the old passphrase or its part. .It Fl J Ar newpassfile Specifies a file which contains the new passphrase or its part. .It Fl k Ar keyfile Specifies a file which contains part of the old key. .It Fl K Ar newkeyfile Specifies a file which contains part of the new key. .It Fl n Ar keyno Specifies the number of the key to change (could be 0 or 1). If the provider is attached and no key number is given, the key used for attaching the provider will be changed. If the provider is detached (or we are operating on a backup file) and no key number is given, the key decrypted with the passphrase/keyfile will be changed. .It Fl p Do not use passphrase as the old key component. .It Fl P Do not use passphrase as the new key component. .El .It Cm delkey Destroy (overwrite with random data) the selected key. If one is destroying keys for an attached provider, the provider will not be detached even if all keys are destroyed. It can even be rescued with the .Cm setkey subcommand. .Pp Additional options include: .Bl -tag -width ".Fl a Ar keyno" .It Fl a Destroy all keys (does not need .Fl f option). .It Fl f Force key destruction. This option is needed to destroy the last key. .It Fl n Ar keyno Specifies the key number. If the provider is attached and no key number is given, the key used for attaching the provider will be destroyed. If provider is detached (or we are operating on a backup file) the key number has to be given. .El .It Cm kill This command should be used only in emergency situations. It will destroy all the keys on a given provider and will detach it forcibly (if it is attached). This is absolutely a one-way command - if you do not have a metadata backup, your data is gone for good. In case the provider was attached with the .Fl r flag, the keys will not be destroyed, only the provider will be detached. .Pp Additional options include: .Bl -tag -width ".Fl a" .It Fl a If specified, all currently attached providers will be killed. .El .It Cm backup Backup metadata from the given provider to the given file. .It Cm restore Restore metadata from the given file to the given provider. .Pp Additional options include: .Bl -tag -width ".Fl f" .It Fl f Metadata contains the size of the provider to ensure that the correct partition or slice is attached. If an attempt is made to restore metadata to a provider that has a different size, .Nm will refuse to restore the data unless the .Fl f switch is used. If the partition or slice has been grown, the .Cm resize subcommand should be used rather than attempting to relocate the metadata through .Cm backup and .Cm restore . .El .It Cm suspend Suspend device by waiting for all inflight requests to finish, clearing all sensitive information (like keys) from kernel memory, and blocking all further I/O requests until the .Cm resume subcommand is executed. This functionality is useful for laptops: when one wants to suspend a laptop, one does not want to leave an encrypted device attached. Instead of closing all files and directories opened from a file system located on an encrypted device, unmounting the file system, and detaching the device, the .Cm suspend subcommand can be used. Any access to the encrypted device will be blocked until the keys are recovered through the .Cm resume subcommand. Thus there is no need to close nor unmount anything. The .Cm suspend subcommand does not work with devices created with the .Cm onetime subcommand. Please note that sensitive data might still be present in memory after suspending an encrypted device due to the file system cache, etc. .Pp Additional options include: .Bl -tag -width ".Fl a" .It Fl a Suspend all .Nm devices. .El .It Cm resume Resume previously suspended device. The caller must ensure that executing this subcommand doesn't access the suspended device, leading to a deadlock. For example suspending a device which contains the file system where the .Nm utility is stored is bad idea. .Pp Additional options include: .Bl -tag -width ".Fl j Ar passfile" .It Fl j Ar passfile Specifies a file which contains the passphrase or its part. For more information see the description of the .Fl J option for the .Cm init subcommand. .It Fl k Ar keyfile Specifies a file which contains part of the key. For more information see the description of the .Fl K option for the .Cm init subcommand. .It Fl p Do not use passphrase as the key component. .El .It Cm resize Inform .Nm that the provider has been resized. The old metadata block is relocated to the correct position at the end of the provider and the provider size is updated. .Pp Additional options include: .Bl -tag -width ".Fl s Ar oldsize" .It Fl s Ar oldsize The size of the provider before it was resized. .El .It Cm version If no arguments are given, the .Cm version subcommand will print the version of .Nm userland utility as well as the version of the .Nm ELI GEOM class. .Pp If GEOM providers are specified, the .Cm version subcommand will print metadata version used by each of them. .It Cm clear Clear metadata from the given providers. .It Cm dump Dump metadata stored on the given providers. .It Cm list See .Xr geom 8 . .It Cm status See .Xr geom 8 . .It Cm load See .Xr geom 8 . .It Cm unload See .Xr geom 8 . .El .Pp Additional options include: .Bl -tag -width ".Fl v" .It Fl v Be more verbose. .El .Sh SYSCTL VARIABLES The following .Xr sysctl 8 variables can be used to control the behavior of the .Nm ELI GEOM class. The default value is shown next to each variable. Some variables can also be set in .Pa /boot/loader.conf . .Bl -tag -width indent .It Va kern.geom.eli.version Version number of the .Nm ELI GEOM class. .It Va kern.geom.eli.debug : No 0 Debug level of the .Nm ELI GEOM class. This can be set to a number between 0 and 3 inclusive. If set to 0, minimal debug information is printed. If set to 3, the maximum amount of debug information is printed. .It Va kern.geom.eli.tries : No 3 Number of times a user is asked for the passphrase. This is only used for providers which are attached on boot (before the root file system is mounted). If set to 0, attaching providers on boot will be disabled. This variable should be set in .Pa /boot/loader.conf . .It Va kern.geom.eli.overwrites : No 5 Specifies how many times the Master-Key will be overwritten with random values when it is destroyed. After this operation it is filled with zeros. .It Va kern.geom.eli.visible_passphrase : No 0 If set to 1, the passphrase entered on boot (before the root file system is mounted) will be visible. This alternative should be used with caution as the entered passphrase can be logged and exposed via .Xr dmesg 8 . This variable should be set in .Pa /boot/loader.conf . .It Va kern.geom.eli.threads : No 0 Specifies how many kernel threads should be used for doing software cryptography. Its purpose is to increase performance on SMP systems. If set to 0, a CPU-pinned thread will be started for every active CPU. .It Va kern.geom.eli.batch : No 0 When set to 1, can speed-up crypto operations by using batching. Batching reduces the number of interrupts by responding to a group of crypto requests with one interrupt. The crypto card and the driver has to support this feature. .It Va kern.geom.eli.key_cache_limit : No 8192 Specifies how many encryption keys to cache. The default limit (8192 keys) will allow caching of all keys for a 4TB provider with 512 byte sectors and will take around 1MB of memory. .It Va kern.geom.eli.key_cache_hits Reports how many times we were looking up a key and it was already in cache. This sysctl is not updated for providers that need less keys than the limit specified in .Va kern.geom.eli.key_cache_limit . .It Va kern.geom.eli.key_cache_misses Reports how many times we were looking up a key and it was not in cache. This sysctl is not updated for providers that need fewer keys than the limit specified in .Va kern.geom.eli.key_cache_limit . .El .Sh EXIT STATUS Exit status is 0 on success, and 1 if the command fails. .Sh EXAMPLES Initialize a provider which is going to be encrypted with a passphrase and random data from a file on the user's pen drive. Use 4kB sector size. Attach the provider, create a file system, and mount it. Do the work. Unmount the provider and detach it: .Bd -literal -offset indent # dd if=/dev/random of=/mnt/pendrive/da2.key bs=64 count=1 # geli init -s 4096 -K /mnt/pendrive/da2.key /dev/da2 Enter new passphrase: Reenter new passphrase: # geli attach -k /mnt/pendrive/da2.key /dev/da2 Enter passphrase: # dd if=/dev/random of=/dev/da2.eli bs=1m # newfs /dev/da2.eli # mount /dev/da2.eli /mnt/secret \&... # umount /mnt/secret # geli detach da2.eli .Ed .Pp Create an encrypted provider, but use two keys: one for your employee and one for you as the company's security officer (so it's not a tragedy if the employee .Qq accidentally forgets his passphrase): .Bd -literal -offset indent # geli init /dev/da2 Enter new passphrase: (enter security officer's passphrase) Reenter new passphrase: # geli setkey -n 1 /dev/da2 Enter passphrase: (enter security officer's passphrase) Enter new passphrase: (let your employee enter his passphrase ...) Reenter new passphrase: (... twice) .Ed .Pp You are the security officer in your company. Create an encrypted provider for use by the user, but remember that users forget their passphrases, so backup the Master Key with your own random key: .Bd -literal -offset indent # dd if=/dev/random of=/mnt/pendrive/keys/`hostname` bs=64 count=1 # geli init -P -K /mnt/pendrive/keys/`hostname` /dev/ad0s1e # geli backup /dev/ad0s1e /mnt/pendrive/backups/`hostname` (use key number 0, so the encrypted Master Key will be overwritten by this) # geli setkey -n 0 -k /mnt/pendrive/keys/`hostname` /dev/ad0s1e (allow the user to enter his passphrase) Enter new passphrase: Reenter new passphrase: .Ed .Pp Encrypted swap partition setup: .Bd -literal -offset indent # dd if=/dev/random of=/dev/ad0s1b bs=1m # geli onetime -d -e 3des ad0s1b # swapon /dev/ad0s1b.eli .Ed .Pp The example below shows how to configure two providers which will be attached on boot (before the root file system is mounted). One of them is using passphrase and three keyfiles and the other is using only a keyfile: .Bd -literal -offset indent # dd if=/dev/random of=/dev/da0 bs=1m # dd if=/dev/random of=/boot/keys/da0.key0 bs=32k count=1 # dd if=/dev/random of=/boot/keys/da0.key1 bs=32k count=1 # dd if=/dev/random of=/boot/keys/da0.key2 bs=32k count=1 # geli init -b -K /boot/keys/da0.key0 -K /boot/keys/da0.key1 -K /boot/keys/da0.key2 da0 Enter new passphrase: Reenter new passphrase: # dd if=/dev/random of=/dev/da1s3a bs=1m # dd if=/dev/random of=/boot/keys/da1s3a.key bs=128k count=1 # geli init -b -P -K /boot/keys/da1s3a.key da1s3a .Ed .Pp The providers are initialized, now we have to add these lines to .Pa /boot/loader.conf : .Bd -literal -offset indent geli_da0_keyfile0_load="YES" geli_da0_keyfile0_type="da0:geli_keyfile0" geli_da0_keyfile0_name="/boot/keys/da0.key0" geli_da0_keyfile1_load="YES" geli_da0_keyfile1_type="da0:geli_keyfile1" geli_da0_keyfile1_name="/boot/keys/da0.key1" geli_da0_keyfile2_load="YES" geli_da0_keyfile2_type="da0:geli_keyfile2" geli_da0_keyfile2_name="/boot/keys/da0.key2" geli_da1s3a_keyfile0_load="YES" geli_da1s3a_keyfile0_type="da1s3a:geli_keyfile0" geli_da1s3a_keyfile0_name="/boot/keys/da1s3a.key" .Ed .Pp Not only configure encryption, but also data integrity verification using .Nm HMAC/SHA256 . .Bd -literal -offset indent # geli init -a hmac/sha256 -s 4096 /dev/da0 Enter new passphrase: Reenter new passphrase: # geli attach /dev/da0 Enter passphrase: # dd if=/dev/random of=/dev/da0.eli bs=1m # newfs /dev/da0.eli # mount /dev/da0.eli /mnt/secret .Ed .Pp .Cm geli writes the metadata backup by default to the .Pa /var/backups/.eli file. If the metadata is lost in any way (e.g., by accidental overwrite), it can be restored. Consider the following situation: .Bd -literal -offset indent # geli init /dev/da0 Enter new passphrase: Reenter new passphrase: Metadata backup can be found in /var/backups/da0.eli and can be restored with the following command: # geli restore /var/backups/da0.eli /dev/da0 # geli clear /dev/da0 # geli attach /dev/da0 geli: Cannot read metadata from /dev/da0: Invalid argument. # geli restore /var/backups/da0.eli /dev/da0 # geli attach /dev/da0 Enter passphrase: .Ed .Pp If an encrypted file system is extended, it is necessary to relocate and update the metadata: .Bd -literal -offset indent # gpart create -s GPT ada0 # gpart add -s 1g -t freebsd-ufs -i 1 ada0 # geli init -K keyfile -P ada0p1 # gpart resize -s 2g -i 1 ada0 # geli resize -s 1g ada0p1 # geli attach -k keyfile -p ada0p1 .Ed .Pp Initialize provider with the passphrase split into two files. The provider can be attached using those two files or by entering .Dq foobar as the passphrase at the .Nm prompt: .Bd -literal -offset indent # echo foo > da0.pass0 # echo bar > da0.pass1 # geli init -J da0.pass0 -J da0.pass1 da0 # geli attach -j da0.pass0 -j da0.pass1 da0 # geli detach da0 # geli attach da0 Enter passphrase: foobar .Ed .Pp Suspend all .Nm devices on a laptop, suspend the laptop, then resume devices one by one after resuming the laptop: .Bd -literal -offset indent # geli suspend -a # zzz # geli resume -p -k keyfile gpt/secret # geli resume gpt/private Enter passphrase: .Ed .Sh ENCRYPTION MODES .Nm supports two encryption modes: .Nm XTS , which was standardized as .Nm IEE P1619 and .Nm CBC with unpredictable IV. The .Nm CBC mode used by .Nm is very similar to the mode .Nm ESSIV . .Sh DATA AUTHENTICATION .Nm can verify data integrity when an authentication algorithm is specified. When data corruption/modification is detected, .Nm will not return any data, but instead will return an error .Pq Er EINVAL . The offset and size of the corrupted data will be printed on the console. It is important to know against which attacks .Nm provides protection for your data. If data is modified in-place or copied from one place on the disk to another even without modification, .Nm should be able to detect such a change. If an attacker can remember the encrypted data, he can overwrite any future changes with the data he owns without it being noticed. In other words .Nm will not protect your data against replay attacks. .Pp It is recommended to write to the whole provider before first use, in order to make sure that all sectors and their corresponding checksums are properly initialized into a consistent state. +One can safely ignore data authentication errors that occur immediately +after the first time a provider is attached and before it is +initialized in this way. .Sh SEE ALSO .Xr crypto 4 , .Xr gbde 4 , .Xr geom 4 , .Xr loader.conf 5 , .Xr gbde 8 , .Xr geom 8 , .Xr crypto 9 .Sh HISTORY The .Nm utility appeared in .Fx 6.0 . Support for the .Nm Camellia block cipher is implemented by Yoshisato Yanagisawa in .Fx 7.0 . .Pp Highest .Nm GELI metadata version supported by the given FreeBSD version: .Bl -column -offset indent ".Sy FreeBSD" ".Sy version" .It Sy FreeBSD Ta Sy GELI .It Sy version Ta Sy version .Pp .It Li 6.0 Ta 0 .It Li 6.1 Ta 0 .It Li 6.2 Ta 3 .It Li 6.3 Ta 3 .It Li 6.4 Ta 3 .Pp .It Li 7.0 Ta 3 .It Li 7.1 Ta 3 .It Li 7.2 Ta 3 .It Li 7.3 Ta 3 .It Li 7.4 Ta 3 .Pp .It Li 8.0 Ta 3 .It Li 8.1 Ta 3 .It Li 8.2 Ta 5 .Pp .It Li 9.0 Ta 6 .El .Sh AUTHORS .An Pawel Jakub Dawidek Aq pjd@FreeBSD.org Index: projects/nand/sbin/ipfw =================================================================== --- projects/nand/sbin/ipfw (revision 235262) +++ projects/nand/sbin/ipfw (revision 235263) Property changes on: projects/nand/sbin/ipfw ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sbin/ipfw:r235157-235262 Index: projects/nand/sbin/md5/md5.1 =================================================================== --- projects/nand/sbin/md5/md5.1 (revision 235262) +++ projects/nand/sbin/md5/md5.1 (revision 235263) @@ -1,149 +1,149 @@ .\" $FreeBSD$ .Dd September 7, 2008 .Dt MD5 1 .Os .Sh NAME -.Nm md5 , sha1 , sha256, rmd160 +.Nm md5 , sha1 , sha256 , rmd160 .Nd calculate a message-digest fingerprint (checksum) for a file .Sh SYNOPSIS .Nm md5 .Op Fl pqrtx .Op Fl c Ar string .Op Fl s Ar string .Op Ar .Nm sha1 .Op Fl pqrtx .Op Fl c Ar string .Op Fl s Ar string .Op Ar .Nm sha256 .Op Fl pqrtx .Op Fl c Ar string .Op Fl s Ar string .Op Ar .Nm rmd160 .Op Fl pqrtx .Op Fl c Ar string .Op Fl s Ar string .Op Ar .Sh DESCRIPTION The .Nm md5 , sha1 , sha256 and .Nm rmd160 utilities take as input a message of arbitrary length and produce as output a .Dq fingerprint or .Dq message digest of the input. It is conjectured that it is computationally infeasible to produce two messages having the same message digest, or to produce any message having a given prespecified target message digest. The .Tn MD5 , SHA-1 , SHA-256 and .Tn RIPEMD-160 algorithms are intended for digital signature applications, where a large file must be .Dq compressed in a secure manner before being encrypted with a private (secret) key under a public-key cryptosystem such as .Tn RSA . .Pp .Tn MD5 has been completely broken as far as finding collisions is concerned, and should not be relied upon to produce unique outputs. This also means that .Tn MD5 should not be used as part of a cryptographic signature scheme. At the current time (2009-01-06) there is no publicly known method to .Dq reverse MD5, i.e., to find an input given a hash value. .Pp .Tn SHA-1 currently (2009-01-06) has no known collisions, but an attack has been found which is faster than a brute-force search, placing the security of .Tn SHA-1 in doubt. .Pp It is recommended that all new applications use .Tn SHA-256 instead of one of the other hash functions. .Pp The following options may be used in any combination and must precede any files named on the command line. The hexadecimal checksum of each file listed on the command line is printed after the options are processed. .Bl -tag -width indent .It Fl c Ar string Compare files to this md5 string. (Note that this option is not yet useful if multiple files are specified.) .It Fl s Ar string Print a checksum of the given .Ar string . .It Fl p Echo stdin to stdout and append the checksum to stdout. .It Fl q Quiet mode - only the checksum is printed out. Overrides the .Fl r option. .It Fl r Reverses the format of the output. This helps with visual diffs. Does nothing when combined with the .Fl ptx options. .It Fl t Run a built-in time trial. .It Fl x Run a built-in test script. .El .Sh EXIT STATUS The .Nm md5 , sha1 , sha256 and .Nm rmd160 utilities exit 0 on success, 1 if at least one of the input files could not be read, and 2 if at least one file does not have the same hash as the -c option. .Sh SEE ALSO .Xr cksum 1 , .Xr md5 3 , .Xr ripemd 3 , .Xr sha 3 , .Xr sha256 3 .Rs .%A R. Rivest .%T The MD5 Message-Digest Algorithm .%O RFC1321 .Re .Rs .%A J. Burrows .%T The Secure Hash Standard .%O FIPS PUB 180-2 .Re .Rs .%A D. Eastlake and P. Jones .%T US Secure Hash Algorithm 1 .%O RFC 3174 .Re .Pp RIPEMD-160 is part of the ISO draft standard .Qq ISO/IEC DIS 10118-3 on dedicated hash functions. .Pp Secure Hash Standard (SHS): .Pa http://csrc.nist.gov/cryptval/shs.html . .Pp The RIPEMD-160 page: .Pa http://www.esat.kuleuven.ac.be/~bosselae/ripemd160.html . .Sh ACKNOWLEDGMENTS This program is placed in the public domain for free general use by RSA Data Security. .Pp Support for SHA-1 and RIPEMD-160 has been added by .An Oliver Eikemeier Aq eik@FreeBSD.org . Index: projects/nand/sbin/md5/md5.c =================================================================== --- projects/nand/sbin/md5/md5.c (revision 235262) +++ projects/nand/sbin/md5/md5.c (revision 235263) @@ -1,377 +1,377 @@ /* * Derived from: * * MDDRIVER.C - test driver for MD2, MD4 and MD5 */ /* * Copyright (C) 1990-2, RSA Data Security, Inc. Created 1990. All * rights reserved. * * RSA Data Security, Inc. makes no representations concerning either * the merchantability of this software or the suitability of this * software for any particular purpose. It is provided "as is" * without express or implied warranty of any kind. * * These notices must be retained in any copies of any part of this * documentation and/or software. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Length of test block, number of test blocks. */ #define TEST_BLOCK_LEN 10000 #define TEST_BLOCK_COUNT 100000 #define MDTESTCOUNT 8 int qflag; int rflag; int sflag; unsigned char* checkAgainst; int checksFailed; typedef void (DIGEST_Init)(void *); typedef void (DIGEST_Update)(void *, const unsigned char *, size_t); typedef char *(DIGEST_End)(void *, char *); extern const char *MD5TestOutput[MDTESTCOUNT]; extern const char *SHA1_TestOutput[MDTESTCOUNT]; extern const char *SHA256_TestOutput[MDTESTCOUNT]; extern const char *RIPEMD160_TestOutput[MDTESTCOUNT]; typedef struct Algorithm_t { const char *progname; const char *name; const char *(*TestOutput)[MDTESTCOUNT]; DIGEST_Init *Init; DIGEST_Update *Update; DIGEST_End *End; char *(*Data)(const void *, unsigned int, char *); char *(*File)(const char *, char *); } Algorithm_t; static void MD5_Update(MD5_CTX *, const unsigned char *, size_t); static void MDString(Algorithm_t *, const char *); static void MDTimeTrial(Algorithm_t *); static void MDTestSuite(Algorithm_t *); static void MDFilter(Algorithm_t *, int); static void usage(Algorithm_t *); typedef union { MD5_CTX md5; SHA1_CTX sha1; SHA256_CTX sha256; RIPEMD160_CTX ripemd160; } DIGEST_CTX; /* max(MD5_DIGEST_LENGTH, SHA_DIGEST_LENGTH, SHA256_DIGEST_LENGTH, RIPEMD160_DIGEST_LENGTH)*2+1 */ #define HEX_DIGEST_LENGTH 65 /* algorithm function table */ struct Algorithm_t Algorithm[] = { { "md5", "MD5", &MD5TestOutput, (DIGEST_Init*)&MD5Init, (DIGEST_Update*)&MD5_Update, (DIGEST_End*)&MD5End, &MD5Data, &MD5File }, { "sha1", "SHA1", &SHA1_TestOutput, (DIGEST_Init*)&SHA1_Init, (DIGEST_Update*)&SHA1_Update, (DIGEST_End*)&SHA1_End, &SHA1_Data, &SHA1_File }, { "sha256", "SHA256", &SHA256_TestOutput, (DIGEST_Init*)&SHA256_Init, (DIGEST_Update*)&SHA256_Update, (DIGEST_End*)&SHA256_End, &SHA256_Data, &SHA256_File }, { "rmd160", "RMD160", &RIPEMD160_TestOutput, (DIGEST_Init*)&RIPEMD160_Init, (DIGEST_Update*)&RIPEMD160_Update, (DIGEST_End*)&RIPEMD160_End, &RIPEMD160_Data, &RIPEMD160_File } }; static void MD5_Update(MD5_CTX *c, const unsigned char *data, size_t len) { MD5Update(c, data, len); } /* Main driver. Arguments (may be any combination): -sstring - digests string -t - runs time trial -x - runs test script filename - digests file (none) - digests standard input */ int main(int argc, char *argv[]) { int ch; char *p; char buf[HEX_DIGEST_LENGTH]; int failed; unsigned digest; const char* progname; if ((progname = strrchr(argv[0], '/')) == NULL) progname = argv[0]; else progname++; for (digest = 0; digest < sizeof(Algorithm)/sizeof(*Algorithm); digest++) if (strcasecmp(Algorithm[digest].progname, progname) == 0) break; if (digest == sizeof(Algorithm)/sizeof(*Algorithm)) digest = 0; failed = 0; checkAgainst = NULL; checksFailed = 0; while ((ch = getopt(argc, argv, "c:pqrs:tx")) != -1) switch (ch) { case 'c': checkAgainst = optarg; break; case 'p': MDFilter(&Algorithm[digest], 1); break; case 'q': qflag = 1; break; case 'r': rflag = 1; break; case 's': sflag = 1; MDString(&Algorithm[digest], optarg); break; case 't': MDTimeTrial(&Algorithm[digest]); break; case 'x': MDTestSuite(&Algorithm[digest]); break; default: usage(&Algorithm[digest]); } argc -= optind; argv += optind; if (*argv) { do { p = Algorithm[digest].File(*argv, buf); if (!p) { warn("%s", *argv); failed++; } else { if (qflag) printf("%s", p); else if (rflag) printf("%s %s", p, *argv); else printf("%s (%s) = %s", Algorithm[digest].name, *argv, p); if (checkAgainst && strcmp(checkAgainst,p)) { checksFailed++; if (!qflag) printf(" [ Failed ]"); } printf("\n"); } } while (*++argv); } else if (!sflag && (optind == 1 || qflag || rflag)) MDFilter(&Algorithm[digest], 0); if (failed != 0) return (1); if (checksFailed != 0) return (2); return (0); } /* * Digests a string and prints the result. */ static void MDString(Algorithm_t *alg, const char *string) { size_t len = strlen(string); char buf[HEX_DIGEST_LENGTH]; alg->Data(string,len,buf); if (qflag) printf("%s", buf); else if (rflag) printf("%s \"%s\"", buf, string); else printf("%s (\"%s\") = %s", alg->name, string, buf); if (checkAgainst && strcmp(buf,checkAgainst)) { checksFailed++; if (!qflag) printf(" [ failed ]"); } printf("\n"); } /* * Measures the time to digest TEST_BLOCK_COUNT TEST_BLOCK_LEN-byte blocks. */ static void MDTimeTrial(Algorithm_t *alg) { DIGEST_CTX context; struct rusage before, after; struct timeval total; float seconds; unsigned char block[TEST_BLOCK_LEN]; unsigned int i; char *p, buf[HEX_DIGEST_LENGTH]; printf("%s time trial. Digesting %d %d-byte blocks ...", alg->name, TEST_BLOCK_COUNT, TEST_BLOCK_LEN); fflush(stdout); /* Initialize block */ for (i = 0; i < TEST_BLOCK_LEN; i++) block[i] = (unsigned char) (i & 0xff); /* Start timer */ getrusage(RUSAGE_SELF, &before); /* Digest blocks */ alg->Init(&context); for (i = 0; i < TEST_BLOCK_COUNT; i++) alg->Update(&context, block, TEST_BLOCK_LEN); p = alg->End(&context, buf); /* Stop timer */ getrusage(RUSAGE_SELF, &after); timersub(&after.ru_utime, &before.ru_utime, &total); seconds = total.tv_sec + (float) total.tv_usec / 1000000; printf(" done\n"); printf("Digest = %s", p); printf("\nTime = %f seconds\n", seconds); printf("Speed = %f bytes/second\n", (float) TEST_BLOCK_LEN * (float) TEST_BLOCK_COUNT / seconds); } /* * Digests a reference suite of strings and prints the results. */ const char *MDTestInput[MDTESTCOUNT] = { "", "a", "abc", "message digest", "abcdefghijklmnopqrstuvwxyz", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", "12345678901234567890123456789012345678901234567890123456789012345678901234567890", "MD5 has not yet (2001-09-03) been broken, but sufficient attacks have been made \ that its security is in some doubt" }; const char *MD5TestOutput[MDTESTCOUNT] = { "d41d8cd98f00b204e9800998ecf8427e", "0cc175b9c0f1b6a831c399e269772661", "900150983cd24fb0d6963f7d28e17f72", "f96b697d7cb7938d525a2f31aaf161d0", "c3fcd3d76192e4007dfb496cca67e13b", "d174ab98d277d9f5a5611c2c9f419d9f", "57edf4a22be3c955ac49da2e2107b67a", "b50663f41d44d92171cb9976bc118538" }; const char *SHA1_TestOutput[MDTESTCOUNT] = { "da39a3ee5e6b4b0d3255bfef95601890afd80709", "86f7e437faa5a7fce15d1ddcb9eaeaea377667b8", "a9993e364706816aba3e25717850c26c9cd0d89d", "c12252ceda8be8994d5fa0290a47231c1d16aae3", "32d10c7b8cf96570ca04ce37f2a19d84240d3a89", "761c457bf73b14d27e9e9265c46f4b4dda11f940", "50abf5706a150990a08b2c5ea40fa0e585554732", "18eca4333979c4181199b7b4fab8786d16cf2846" }; const char *SHA256_TestOutput[MDTESTCOUNT] = { "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb", "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", "f7846f55cf23e14eebeab5b4e1550cad5b509e3348fbc4efa3a1413d393cb650", "71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73", "db4bfcbd4da0cd85a60c3c37d3fbd8805c77f15fc6b1fdfe614ee0a7c8fdb4c0", "f371bc4a311f2b009eef952dd83ca80e2b60026c8e935592d0f9c308453c813e", "e6eae09f10ad4122a0e2a4075761d185a272ebd9f5aa489e998ff2f09cbfdd9f" }; const char *RIPEMD160_TestOutput[MDTESTCOUNT] = { "9c1185a5c5e9fc54612808977ee8f548b2258d31", "0bdc9d2d256b3ee9daae347be6f4dc835a467ffe", "8eb208f7e05d987a9b044a8e98c6b087f15a0bfc", "5d0689ef49d2fae572b881b123a85ffa21595f36", "f71c27109c692c1b56bbdceb5b9d2865b3708dbc", "b0e20b6e3116640286ed3a87a5713079b21f5189", "9b752e45573d4b39f4dbd3323cab82bf63326bfb", "5feb69c6bf7c29d95715ad55f57d8ac5b2b7dd32" }; static void MDTestSuite(Algorithm_t *alg) { int i; char buffer[HEX_DIGEST_LENGTH]; printf("%s test suite:\n", alg->name); for (i = 0; i < MDTESTCOUNT; i++) { (*alg->Data)(MDTestInput[i], strlen(MDTestInput[i]), buffer); printf("%s (\"%s\") = %s", alg->name, MDTestInput[i], buffer); if (strcmp(buffer, (*alg->TestOutput)[i]) == 0) printf(" - verified correct\n"); else printf(" - INCORRECT RESULT!\n"); } } /* * Digests the standard input and prints the result. */ static void MDFilter(Algorithm_t *alg, int tee) { DIGEST_CTX context; unsigned int len; unsigned char buffer[BUFSIZ]; char buf[HEX_DIGEST_LENGTH]; alg->Init(&context); while ((len = fread(buffer, 1, BUFSIZ, stdin))) { if (tee && len != fwrite(buffer, 1, len, stdout)) err(1, "stdout"); alg->Update(&context, buffer, len); } printf("%s\n", alg->End(&context, buf)); } static void usage(Algorithm_t *alg) { - fprintf(stderr, "usage: %s [-pqrtx] [-s string] [files ...]\n", alg->progname); + fprintf(stderr, "usage: %s [-pqrtx] [-c string] [-s string] [files ...]\n", alg->progname); exit(1); } Index: projects/nand/sbin =================================================================== --- projects/nand/sbin (revision 235262) +++ projects/nand/sbin (revision 235263) Property changes on: projects/nand/sbin ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sbin:r235157-235262 Index: projects/nand/share/man/man4/ral.4 =================================================================== --- projects/nand/share/man/man4/ral.4 (revision 235262) +++ projects/nand/share/man/man4/ral.4 (revision 235263) @@ -1,259 +1,295 @@ -.\" Copyright (c) 2005, 2006 -.\" Damien Bergamini +.\" Copyright (c) 2005-2010 Damien Bergamini .\" .\" Permission to use, copy, modify, and distribute this software for any .\" purpose with or without fee is hereby granted, provided that the above .\" copyright notice and this permission notice appear in all copies. .\" .\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES .\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF .\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR .\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES .\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN .\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF .\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. .\" .\" $FreeBSD$ .\" -.Dd July 8, 2009 +.Dd May 10, 2012 .Dt RAL 4 .Os .Sh NAME .Nm ral -.Nd "Ralink Technology IEEE 802.11 wireless network driver" +.Nd "Ralink Technology IEEE 802.11a/g/n wireless network device" .Sh SYNOPSIS To compile this driver into the kernel, place the following lines in your kernel configuration file: .Bd -ragged -offset indent .Cd "device ral" .Cd "device ralfw" .Cd "device wlan" .Cd "device wlan_amrr" .Cd "device firmware" .Ed .Pp Alternatively, to load the driver as a module at boot time, place the following line in .Xr loader.conf 5 : .Bd -literal -offset indent if_ral_load="YES" .Ed .Sh DESCRIPTION The .Nm -driver supports PCI/CardBus wireless adapters based on the Ralink Technology -RT2500, RT2501, and RT2600 chipsets. +driver supports PCI/PCIe/CardBus wireless adapters based on the Ralink RT2500, +RT2501, RT2600, RT2700, RT2800 and RT3090 chipsets. .Pp The RT2500 chipset is the first generation of 802.11b/g adapters from Ralink. -It consists of two integrated chips, a RT2560 MAC/BBP and a RT2525 radio +It consists of two integrated chips, an RT2560 MAC/BBP and an RT2525 radio transceiver. .Pp -The RT2501 chipset is the second generation of 802.11b/g adapters from Ralink. -It consists of two integrated chips, a RT2561 MAC/BBP and a RT2527 radio +The RT2501 chipset is the second generation of 802.11a/b/g adapters from +Ralink. +It consists of two integrated chips, an RT2561 MAC/BBP and an RT2527 radio transceiver. This chipset provides support for the IEEE 802.11e standard with multiple hardware transmission queues and allows scatter/gather for efficient DMA operations. .Pp -The RT2600 chipset consists of two integrated chips, a RT2661 MAC/BBP and a +The RT2600 chipset consists of two integrated chips, an RT2661 MAC/BBP and an RT2529 radio transceiver. This chipset uses the MIMO (multiple-input multiple-output) technology with -multiple antennas to extend the operating range of the adapter and to achieve -higher throughput. -MIMO is the basis of the forthcoming IEEE 802.11n standard. +multiple radio transceivers to extend the operating range of the adapter and +to achieve higher throughput. +However, the RT2600 chipset does not support any of the 802.11n features. .Pp -The transmit speed is user-selectable or can be adapted automatically by the -driver depending on the received signal strength and on the number of hardware -transmission retries. +The RT2700 chipset is a low-cost version of the RT2800 chipset. +It supports a single transmit path and two receiver paths (1T2R). +It consists of two integrated chips, an RT2760 or RT2790 (PCIe) MAC/BBP and +an RT2720 (2.4GHz) or RT2750 (2.4GHz/5GHz) radio transceiver. .Pp +The RT2800 chipset is the first generation of 802.11n adapters from Ralink. +It consists of two integrated chips, an RT2860 or RT2890 (PCIe) MAC/BBP and +an RT2820 (2.4GHz) or RT2850 (2.4GHz/5GHz) radio transceiver. +The RT2800 chipset supports two transmit paths and up to three receiver +paths (2T2R/2T3R). +It can achieve speeds up to 144Mbps (20MHz bandwidth) and 300Mbps (40MHz +bandwidth.) +.Pp +The RT3090 chipset is the first generation of single-chip 802.11n adapters +from Ralink. .Nm supports .Cm station , .Cm adhoc , .Cm hostap , .Cm mesh , .Cm wds , and .Cm monitor mode operation. Only one .Cm hostap or .Cm mesh virtual interface may be configured at a time. Any number of .Cm wds virtual interfaces may be configured together with a .Cm hostap interface. Multiple .Cm station interfaces may be operated together with a .Cm hostap interface to construct a wireless repeater device. +.Pp +The transmit speed is user-selectable or can be adapted automatically by the +driver depending on the number of hardware transmission retries. For more information on configuring this device, see .Xr ifconfig 8 . .Sh HARDWARE The .Nm -driver supports PCI/CardBus wireless adapters based on the Ralink Technology -RT2500, RT2501, and RT2600 chipsets, including: +driver supports PCI/PCIe/CardBus wireless adapters based on Ralink Technology +chipsets, including: .Pp .Bl -column -compact ".Li Atlantis Land A02-PCM-W54" "RT2561S" "CardBus" .It Em Card Ta Em MAC/BBP Ta Em Bus .It "A-Link WL54H" Ta RT2560 Ta PCI .It "A-Link WL54PC" Ta RT2560 Ta CardBus .It "AirLink101 AWLC5025" Ta RT2661 Ta CardBus .It "AirLink101 AWLH5025" Ta RT2661 Ta PCI .It "Amigo AWI-914W" Ta RT2560 Ta CardBus .It "Amigo AWI-922W" Ta RT2560 Ta mini-PCI .It "Amigo AWI-926W" Ta RT2560 Ta PCI .It "AMIT WL531C" Ta RT2560 Ta CardBus .It "AMIT WL531P" Ta RT2560 Ta PCI .It "AOpen AOI-831" Ta RT2560 Ta PCI .It "ASUS WL-107G" Ta RT2560 Ta CardBus .It "ASUS WL-130g" Ta RT2560 Ta PCI .It "Atlantis Land A02-PCI-W54" Ta RT2560 Ta PCI .It "Atlantis Land A02-PCM-W54" Ta RT2560 Ta CardBus .It "Belkin F5D7000 v3" Ta RT2560 Ta PCI .It "Belkin F5D7010 v2" Ta RT2560 Ta CardBus .It "Billionton MIWLGRL" Ta RT2560 Ta mini-PCI .It "Canyon CN-WF511" Ta RT2560 Ta PCI .It "Canyon CN-WF513" Ta RT2560 Ta CardBus .It "CC&C WL-2102" Ta RT2560 Ta CardBus .It "CNet CWC-854" Ta RT2560 Ta CardBus .It "CNet CWP-854" Ta RT2560 Ta PCI .It "Compex WL54G" Ta RT2560 Ta CardBus .It "Compex WLP54G" Ta RT2560 Ta PCI .It "Conceptronic C54RC" Ta RT2560 Ta CardBus .It "Conceptronic C54Ri" Ta RT2560 Ta PCI .It "Digitus DN-7001G-RA" Ta RT2560 Ta CardBus .It "Digitus DN-7006G-RA" Ta RT2560 Ta PCI .It "E-Tech WGPC02" Ta RT2560 Ta CardBus .It "E-Tech WGPI02" Ta RT2560 Ta PCI .It "Edimax EW-7108PCg" Ta RT2560 Ta CardBus .It "Edimax EW-7128g" Ta RT2560 Ta PCI .It "Eminent EM3036" Ta RT2560 Ta CardBus .It "Eminent EM3037" Ta RT2560 Ta PCI .It "Encore ENLWI-G-RLAM" Ta RT2560 Ta PCI .It "Encore ENPWI-G-RLAM" Ta RT2560 Ta CardBus .It "Fiberline WL-400P" Ta RT2560 Ta PCI .It "Fibreline WL-400X" Ta RT2560 Ta CardBus .It "Gigabyte GN-WI01GS" Ta RT2561S Ta mini-PCI .It "Gigabyte GN-WIKG" Ta RT2560 Ta mini-PCI .It "Gigabyte GN-WMKG" Ta RT2560 Ta CardBus .It "Gigabyte GN-WP01GS" Ta RT2561S Ta PCI .It "Gigabyte GN-WPKG" Ta RT2560 Ta PCI .It "Hawking HWC54GR" Ta RT2560 Ta CardBus .It "Hawking HWP54GR" Ta RT2560 Ta PCI .It "iNexQ CR054g-009 (R03)" Ta RT2560 Ta PCI .It "JAHT WN-4054P" Ta RT2560 Ta CardBus .It "JAHT WN-4054PCI" Ta RT2560 Ta PCI .It "LevelOne WNC-0301 v2" Ta RT2560 Ta PCI .It "LevelOne WPC-0301 v2" Ta RT2560 Ta CardBus .It "Linksys WMP54G v4" Ta RT2560 Ta PCI .It "Micronet SP906GK" Ta RT2560 Ta PCI .It "Micronet SP908GK V3" Ta RT2560 Ta CardBus .It "Minitar MN54GCB-R" Ta RT2560 Ta CardBus .It "Minitar MN54GPC-R" Ta RT2560 Ta PCI .It "MSI CB54G2" Ta RT2560 Ta CardBus .It "MSI MP54G2" Ta RT2560 Ta mini-PCI .It "MSI PC54G2" Ta RT2560 Ta PCI .It "OvisLink EVO-W54PCI" Ta RT2560 Ta PCI .It "PheeNet HWL-PCIG/RA" Ta RT2560 Ta PCI .It "Pro-Nets CB80211G" Ta RT2560 Ta CardBus .It "Pro-Nets PC80211G" Ta RT2560 Ta PCI .It "Repotec RP-WB7108" Ta RT2560 Ta CardBus .It "Repotec RP-WP0854" Ta RT2560 Ta PCI .It "SATech SN-54C" Ta RT2560 Ta CardBus .It "SATech SN-54P" Ta RT2560 Ta PCI .It "Sitecom WL-112" Ta RT2560 Ta CardBus .It "Sitecom WL-115" Ta RT2560 Ta PCI .It "SMC SMCWCB-GM" Ta RT2661 Ta CardBus .It "SMC SMCWPCI-GM" Ta RT2661 Ta PCI .It "SparkLAN WL-685R" Ta RT2560 Ta CardBus .It "Surecom EP-9321-g" Ta RT2560 Ta PCI .It "Surecom EP-9321-g1" Ta RT2560 Ta PCI .It "Surecom EP-9428-g" Ta RT2560 Ta CardBus .It "Sweex LC500050" Ta RT2560 Ta CardBus .It "Sweex LC700030" Ta RT2560 Ta PCI .It "TekComm NE-9321-g" Ta RT2560 Ta PCI .It "TekComm NE-9428-g" Ta RT2560 Ta CardBus .It "Unex CR054g-R02" Ta RT2560 Ta PCI .It "Unex MR054g-R02" Ta RT2560 Ta CardBus .It "Zinwell ZWX-G160" Ta RT2560 Ta CardBus .It "Zinwell ZWX-G360" Ta RT2560 Ta mini-PCI .It "Zinwell ZWX-G361" Ta RT2560 Ta PCI .It "Zonet ZEW1500" Ta RT2560 Ta CardBus .It "Zonet ZEW1600" Ta RT2560 Ta PCI .El -.Pp -An up to date list can be found at -.Pa http://damien.bergamini.free.fr/ral/list.html . .Sh EXAMPLES Join an existing BSS network (i.e., connect to an access point): .Pp .Dl "ifconfig wlan create wlandev ral0 inet 192.168.0.20 netmask 0xffffff00" .Pp Join a specific BSS network with network name .Dq Li my_net : .Bd -literal -offset indent ifconfig wlan create wlandev ral0 inet 192.168.0.20 \e netmask 0xffffff00 ssid my_net .Ed .Pp Join a specific BSS network with 40-bit WEP encryption: .Bd -literal -offset indent ifconfig wlan create wlandev ral0 inet 192.168.0.20 \e netmask 0xffffff00 ssid my_net \e wepmode on wepkey 0x1234567890 weptxkey 1 .Ed .Pp Join a specific BSS network with 104-bit WEP encryption: .Bd -literal -offset indent ifconfig wlan create wlandev ral0 inet 192.168.0.20 \e netmask 0xffffff00 ssid my_net \e wepmode on wepkey 0x01020304050607080910111213 weptxkey 1 .Ed .Sh DIAGNOSTICS .Bl -diag .It "ral%d: could not load 8051 microcode" An error occurred while attempting to upload the microcode to the onboard 8051 microcontroller unit. .It "ral%d: timeout waiting for MCU to initialize" The onboard 8051 microcontroller unit failed to initialize in time. .It "ral%d: device timeout" A frame dispatched to the hardware for transmission did not complete in time. The driver will reset the hardware. This should not happen. .El .Sh SEE ALSO .Xr intro 4 , .Xr cardbus 4 , .Xr wlan 4 , .Xr wlan_ccmp 4 , .Xr wlan_tkip 4 , .Xr wlan_wep 4 , .Xr wlan_xauth 4 , .Xr hostapd 8 , .Xr ifconfig 8 , -.Xr wpa_supplicant 8 . +.Xr wpa_supplicant 8 .Rs .%T "Ralink Technology" .%U http://www.ralinktech.com/ .Re .Sh HISTORY The .Nm driver first appeared in .Ox 3.7 . +Support for the RT2501 and RT2600 chipsets was added in +.Ox 3.9 . +Support for the RT2800 chipset was added in +.Ox 4.3 . +Support for the RT2700 chipset was added in +.Ox 4.4 . +Support for the RT3090 chipset was added in +.Ox 4.9 . .Sh AUTHORS The original .Nm driver was written by -.An Damien Bergamini Aq damien@FreeBSD.org . -.Sh BUGS -Host AP mode doesn't support client power save. -Clients using power save mode will experience +.An Damien Bergamini Aq damien@openbsd.org . +.Sh CAVEATS +The +.Nm +driver does not make use of the hardware cryptographic engine. +.Pp +The +.Nm +driver does not support any of the 802.11n capabilities offered by +the RT2700 and RT2800 chipsets. +Additional work is required in before those features can be supported. +.Pp +Host AP mode doesn't support power saving. +Clients attempting to use power saving mode may experience significant packet loss (disabling power saving on the client will fix this). +.Pp +Some PCI +.Nm +adapters seem to strictly require a system supporting PCI 2.2 or greater and +will likely not work in systems based on older revisions of the PCI +specification. +Check the board's PCI version before purchasing the card. Index: projects/nand/share/man/man4 =================================================================== --- projects/nand/share/man/man4 (revision 235262) +++ projects/nand/share/man/man4 (revision 235263) Property changes on: projects/nand/share/man/man4 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/share/man/man4:r235157-235262 Index: projects/nand/share/mk/bsd.arch.inc.mk =================================================================== --- projects/nand/share/mk/bsd.arch.inc.mk (revision 235262) +++ projects/nand/share/mk/bsd.arch.inc.mk (revision 235263) Property changes on: projects/nand/share/mk/bsd.arch.inc.mk ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/share/mk/bsd.arch.inc.mk:r235157-235262 Index: projects/nand/share/syscons/keymaps/Makefile =================================================================== --- projects/nand/share/syscons/keymaps/Makefile (revision 235262) +++ projects/nand/share/syscons/keymaps/Makefile (revision 235263) @@ -1,55 +1,56 @@ # $FreeBSD$ FILES= INDEX.keymaps \ be.iso.kbd be.iso.acc.kbd \ bg.bds.ctrlcaps.kbd bg.phonetic.ctrlcaps.kbd \ br275.iso.kbd br275.iso.acc.kbd br275.cp850.kbd \ by.cp1131.kbd by.cp1251.kbd by.iso5.kbd \ ce.iso2.kbd \ colemak.iso15.acc.kbd \ cs.latin2.qwertz.kbd \ cz.iso2.kbd \ danish.iso.kbd danish.iso.acc.kbd danish.cp865.kbd \ dutch.iso.acc.kbd \ eee_nordic.kbd el.iso07.kbd \ estonian.iso.kbd estonian.iso15.kbd estonian.cp850.kbd \ finnish.iso.kbd finnish.cp850.kbd \ fr.iso.kbd fr.iso.acc.kbd fr.dvorak.kbd fr.dvorak.acc.kbd \ fr.macbook.acc.kbd \ fr_CA.iso.acc.kbd \ german.iso.kbd german.iso.acc.kbd german.cp850.kbd \ gr.elot.acc.kbd gr.us101.acc.kbd \ hr.iso.kbd \ hu.iso2.101keys.kbd hu.iso2.102keys.kbd \ hy.armscii-8.kbd \ icelandic.iso.kbd icelandic.iso.acc.kbd \ it.iso.kbd \ iw.iso8.kbd \ jp.106.kbd jp.106x.kbd jp.pc98.kbd jp.pc98.iso.kbd \ kk.pt154.kst.kbd kk.pt154.io.kbd \ latinamerican.kbd latinamerican.iso.acc.kbd \ lt.iso4.kbd \ norwegian.iso.kbd norwegian.dvorak.kbd \ pl_PL.ISO8859-2.kbd pl_PL.dvorak.kbd \ pt.iso.kbd pt.iso.acc.kbd \ ru.koi8-r.kbd ru.koi8-r.shift.kbd ru.koi8-r.win.kbd \ ru.cp866.kbd ru.iso5.kbd \ si.iso.kbd \ sk.iso2.kbd \ + spanish.dvorak.kbd \ spanish.iso.kbd spanish.iso.acc.kbd spanish.iso15.acc.kbd \ swedish.iso.kbd swedish.cp850.kbd \ swissfrench.iso.kbd swissfrench.iso.acc.kbd swissfrench.cp850.kbd \ swissgerman.iso.kbd swissgerman.iso.acc.kbd swissgerman.cp850.kbd \ swissgerman.macbook.acc.kbd \ tr.iso9.q.kbd \ ua.koi8-u.kbd ua.koi8-u.shift.alt.kbd ua.iso5.kbd \ uk.iso.kbd uk.iso-ctrl.kbd uk.cp850.kbd uk.cp850-ctrl.kbd \ uk.dvorak.kbd \ us.iso.kbd us.dvorak.kbd us.dvorakl.kbd us.dvorakr.kbd us.dvorakx.kbd \ us.emacs.kbd us.pc-ctrl.kbd us.unix.kbd us.iso.acc.kbd FILESDIR= ${SHAREDIR}/syscons/keymaps NO_OBJ= .include Index: projects/nand/share/syscons/keymaps/spanish.dvorak.kbd =================================================================== --- projects/nand/share/syscons/keymaps/spanish.dvorak.kbd (nonexistent) +++ projects/nand/share/syscons/keymaps/spanish.dvorak.kbd (revision 235263) @@ -0,0 +1,139 @@ +# $FreeBSD$ +# alt +# scan cntrl alt alt cntrl lock +# code base shift cntrl shift alt shift cntrl shift state +# ------------------------------------------------------------------ + 000 nop nop nop nop nop nop nop nop O + 001 esc esc esc esc esc esc debug esc O + 002 '1' '!' nop nop '|' '!' nop nop O + 003 '2' '"' nop nop '@' '"' nop nop O + 004 '3' 183 nop nop '#' '#' nop nop O + 005 '4' '$' nop nop '~' '~' nop nop O + 006 '5' '%' nop nop '5' '%' nop nop O + 007 '6' '&' nop nop 172 172 nop nop O + 008 '7' '/' nop nop '7' '/' nop nop O + 009 '8' '(' nop nop '8' '(' nop nop O + 010 '9' ')' nop nop '9' ')' nop nop O + 011 '0' '=' nop nop '0' '=' nop nop O + 012 ''' '?' nop nop ''' '?' nop nop O + 013 161 191 nop nop 161 191 nop nop O + 014 bs bs del del bs bs del del O + 015 ht btab nop nop ht btab nop nop O + 016 '.' ':' nop nop '.' ':' nop nop O + 017 ',' ';' nop nop ',' ';' nop nop O + 018 241 209 nop nop '~' '~' nop nop O + 019 'p' 'P' dle dle 'p' 'P' dle dle C + 020 'y' 'Y' em em 'y' 'Y' em em C + 021 'f' 'F' ack ack 'f' 'F' ack ack C + 022 'g' 'G' bel bel 'g' 'G' bel bel C + 023 'c' 'C' etx etx 'c' 'C' etx etx C + 024 'h' 'H' bs bs 'h' 'H' bs bs C + 025 'l' 'L' ff ff 'l' 'L' ff ff C + 026 dgra dcir esc esc '[' '[' esc esc O + 027 '+' '*' gs gs ']' ']' gs gs O + 028 cr cr nl nl cr cr nl nl O + 029 lctrl lctrl lctrl lctrl lctrl lctrl lctrl lctrl O + 030 'a' 'A' soh soh 'a' 'A' soh soh C + 031 'o' 'O' si si 243 211 si si C + 032 'e' 'E' enq enq 164 164 enq enq C + 033 'u' 'U' nak nak 250 218 nak nak C + 034 'i' 'I' ht ht 237 205 ht ht C + 035 'd' 'D' eot eot 'd' 'D' eot eot C + 036 'r' 'R' dc2 dc2 'r' 'R' dc2 dc2 C + 037 't' 'T' dc4 dc4 't' 'T' dc4 dc4 C + 038 'n' 'N' so so 'n' 'N' so so C + 039 's' 'S' dc3 dc3 's' 'S' dc3 dc3 C + 040 dacu duml nop nop '{' '{' nop nop O + 041 '\' '|' fs fs '\' '|' fs fs O + 042 lshift lshift lshift lshift lshift lshift lshift lshift O + 043 231 199 rs rs '}' '}' rs rs O + 044 '-' '_' us us '-' '_' us us O + 045 'q' 'Q' dc1 dc1 'q' 'Q' dc1 dc1 C + 046 'j' 'J' nl nl 'j' 'J' nl nl C + 047 'k' 'K' vt vt 'k' 'K' vt vt C + 048 'x' 'X' can can 'x' 'X' can can C + 049 'b' 'B' stx stx 'b' 'B' stx stx C + 050 'm' 'M' cr cr 'm' 'M' cr cr C + 051 'w' 'W' etb etb 'w' 'W' etb etb C + 052 'v' 'V' syn syn 'v' 'V' syn syn C + 053 'z' 'Z' sub sub 'z' 'Z' sub sub C + 054 rshift rshift rshift rshift rshift rshift rshift rshift O + 055 '*' '*' '*' '*' '*' '*' '*' '*' O + 056 lalt lalt lalt lalt lalt lalt lalt lalt O + 057 ' ' ' ' nul ' ' ' ' ' ' susp ' ' O + 058 clock clock clock clock clock clock clock clock O + 059 fkey01 fkey13 fkey25 fkey37 scr01 scr11 scr01 scr11 O + 060 fkey02 fkey14 fkey26 fkey38 scr02 scr12 scr02 scr12 O + 061 fkey03 fkey15 fkey27 fkey39 scr03 scr13 scr03 scr13 O + 062 fkey04 fkey16 fkey28 fkey40 scr04 scr14 scr04 scr14 O + 063 fkey05 fkey17 fkey29 fkey41 scr05 scr15 scr05 scr15 O + 064 fkey06 fkey18 fkey30 fkey42 scr06 scr16 scr06 scr16 O + 065 fkey07 fkey19 fkey31 fkey43 scr07 scr07 scr07 scr07 O + 066 fkey08 fkey20 fkey32 fkey44 scr08 scr08 scr08 scr08 O + 067 fkey09 fkey21 fkey33 fkey45 scr09 scr09 scr09 scr09 O + 068 fkey10 fkey22 fkey34 fkey46 scr10 scr10 scr10 scr10 O + 069 nlock nlock nlock nlock nlock nlock nlock nlock O + 070 slock slock slock slock slock slock slock slock O + 071 fkey49 '7' '7' '7' '7' '7' '7' '7' N + 072 fkey50 '8' '8' '8' '8' '8' '8' '8' N + 073 fkey51 '9' '9' '9' '9' '9' '9' '9' N + 074 fkey52 '-' '-' '-' '-' '-' '-' '-' N + 075 fkey53 '4' '4' '4' '4' '4' '4' '4' N + 076 fkey54 '5' '5' '5' '5' '5' '5' '5' N + 077 fkey55 '6' '6' '6' '6' '6' '6' '6' N + 078 fkey56 '+' '+' '+' '+' '+' '+' '+' N + 079 fkey57 '1' '1' '1' '1' '1' '1' '1' N + 080 fkey58 '2' '2' '2' '2' '2' '2' '2' N + 081 fkey59 '3' '3' '3' '3' '3' '3' '3' N + 082 fkey60 '0' '0' '0' '0' '0' '0' '0' N + 083 del '.' '.' '.' '.' '.' boot boot N + 084 nop nop nop nop nop nop nop nop O + 085 nop nop nop nop nop nop nop nop O + 086 '<' '>' nop nop '<' '>' nop nop O + 087 fkey11 fkey23 fkey35 fkey47 scr11 scr11 scr11 scr11 O + 088 fkey12 fkey24 fkey36 fkey48 scr12 scr12 scr12 scr12 O + 089 cr cr nl nl cr cr nl nl O + 090 rctrl rctrl rctrl rctrl rctrl rctrl rctrl rctrl O + 091 '/' '/' '/' '/' '/' '/' '/' '/' N + 092 nscr pscr debug debug nop nop nop nop O + 093 ralt ralt ralt ralt ralt ralt ralt ralt O + 094 fkey49 fkey49 fkey49 fkey49 fkey49 fkey49 fkey49 fkey49 O + 095 fkey50 fkey50 fkey50 fkey50 fkey50 fkey50 fkey50 fkey50 O + 096 fkey51 fkey51 fkey51 fkey51 fkey51 fkey51 fkey51 fkey51 O + 097 fkey53 fkey53 fkey53 fkey53 fkey53 fkey53 fkey53 fkey53 O + 098 fkey55 fkey55 fkey55 fkey55 fkey55 fkey55 fkey55 fkey55 O + 099 fkey57 fkey57 fkey57 fkey57 fkey57 fkey57 fkey57 fkey57 O + 100 fkey58 fkey58 fkey58 fkey58 fkey58 fkey58 fkey58 fkey58 O + 101 fkey59 fkey59 fkey59 fkey59 fkey59 fkey59 fkey59 fkey59 O + 102 fkey60 paste fkey60 fkey60 fkey60 fkey60 fkey60 fkey60 O + 103 fkey61 fkey61 fkey61 fkey61 fkey61 fkey61 boot fkey61 O + 104 slock saver slock saver susp nop susp nop O + 105 fkey62 fkey62 fkey62 fkey62 fkey62 fkey62 fkey62 fkey62 O + 106 fkey63 fkey63 fkey63 fkey63 fkey63 fkey63 fkey63 fkey63 O + 107 fkey64 fkey64 fkey64 fkey64 fkey64 fkey64 fkey64 fkey64 O + 108 nop nop nop nop nop nop nop nop O + + dgra '`' ( 'a' 224 ) ( 'A' 192 ) ( 'e' 232 ) ( 'E' 200 ) + ( 'i' 236 ) ( 'I' 204 ) ( 'o' 242 ) ( 'O' 210 ) + ( 'u' 249 ) ( 'U' 217 ) + dacu 180 ( 'a' 225 ) ( 'A' 193 ) ( 'e' 233 ) ( 'E' 201 ) + ( 'i' 237 ) ( 'I' 205 ) ( 'o' 243 ) ( 'O' 211 ) + ( 'u' 250 ) ( 'U' 218 ) ( 'y' 253 ) ( 'Y' 221 ) + dcir '^' ( 'a' 226 ) ( 'A' 194 ) ( 'e' 234 ) ( 'E' 202 ) + ( 'i' 238 ) ( 'I' 206 ) ( 'o' 244 ) ( 'O' 212 ) + ( 'u' 251 ) ( 'U' 219 ) + dtil '~' ( 'a' 227 ) ( 'A' 195 ) ( 'n' 241 ) ( 'N' 209 ) + ( 'o' 245 ) ( 'O' 213 ) + dmac 000 + dbre 000 + ddot 000 + duml 168 ( 'a' 228 ) ( 'A' 196 ) ( 'e' 235 ) ( 'E' 203 ) + ( 'i' 239 ) ( 'I' 207 ) ( 'o' 246 ) ( 'O' 214 ) + ( 'u' 252 ) ( 'U' 220 ) ( 'y' 255 ) + dsla 000 + drin 176 ( 'a' 229 ) ( 'A' 197 ) + dced 184 ( 'c' 231 ) ( 'C' 199 ) + dapo 000 + ddac 000 + dogo 000 + dcar 000 Property changes on: projects/nand/share/syscons/keymaps/spanish.dvorak.kbd ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: projects/nand/share/zoneinfo =================================================================== --- projects/nand/share/zoneinfo (revision 235262) +++ projects/nand/share/zoneinfo (revision 235263) Property changes on: projects/nand/share/zoneinfo ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/share/zoneinfo:r235157-235262 Index: projects/nand/sys/amd64/conf/GENERIC =================================================================== --- projects/nand/sys/amd64/conf/GENERIC (revision 235262) +++ projects/nand/sys/amd64/conf/GENERIC (revision 235263) @@ -1,335 +1,336 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/amd64 # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu HAMMER ident GENERIC makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options PREEMPTION # Enable kernel thread preemption options INET # InterNETworking options INET6 # IPv6 communications protocols options SCTP # Stream Control Transmission Protocol options FFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options UFS_GJOURNAL # Enable gjournal-based UFS journaling options MD_ROOT # MD is a potential root device options NFSCL # New Network Filesystem Client options NFSD # New Network Filesystem Server options NFSLOCKD # Network Lock Manager options NFS_ROOT # NFS usable as /, requires NFSCL options MSDOSFS # MSDOS Filesystem options CD9660 # ISO 9660 Filesystem options PROCFS # Process filesystem (requires PSEUDOFS) options PSEUDOFS # Pseudo-filesystem framework options GEOM_PART_GPT # GUID Partition Tables. +options GEOM_RAID # Soft RAID functionality. options GEOM_LABEL # Provides labelization options COMPAT_FREEBSD32 # Compatible with i386 binaries options COMPAT_FREEBSD4 # Compatible with FreeBSD4 options COMPAT_FREEBSD5 # Compatible with FreeBSD5 options COMPAT_FREEBSD6 # Compatible with FreeBSD6 options COMPAT_FREEBSD7 # Compatible with FreeBSD7 options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options STACK # stack(9) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KBD_INSTALL_CDEV # install a CDEV entry in /dev options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_FRAME # Ensure frames are compiled in options KDTRACE_HOOKS # Kernel DTrace hooks options DDB_CTF # Kernel ELF linker loads CTF data options INCLUDE_CONFIG_FILE # Include this file in kernel # Debugging support. Always need this: options KDB # Enable kernel debugger support. # For minimum debugger support (stable branch) use: #options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use this instead: options DDB # Support DDB. options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel # CPU frequency control device cpufreq # Bus support. device acpi device pci # Floppy drives device fdc # ATA controllers device ahci # AHCI-compatible SATA controllers device ata # Legacy ATA/SATA controllers options ATA_CAM # Handle legacy controllers with CAM options ATA_STATIC_ID # Static device numbering device mvs # Marvell 88SX50XX/88SX60XX/88SX70XX/SoC SATA device siis # SiliconImage SiI3124/SiI3132/SiI3531 SATA # SCSI Controllers device ahc # AHA2940 and onboard AIC7xxx devices options AHC_REG_PRETTY_PRINT # Print register bitfields in debug # output. Adds ~128k to driver. device ahd # AHA39320/29320 and onboard AIC79xx devices options AHD_REG_PRETTY_PRINT # Print register bitfields in debug # output. Adds ~215k to driver. device esp # AMD Am53C974 (Tekram DC-390(T)) device hptiop # Highpoint RocketRaid 3xxx series device isp # Qlogic family #device ispfw # Firmware for QLogic HBAs- normally a module device mpt # LSI-Logic MPT-Fusion device mps # LSI-Logic MPT-Fusion 2 #device ncr # NCR/Symbios Logic device sym # NCR/Symbios Logic (newer chipsets + those of `ncr') device trm # Tekram DC395U/UW/F DC315U adapters device adv # Advansys SCSI adapters device adw # Advansys wide SCSI adapters device aic # Adaptec 15[012]x SCSI adapters, AIC-6[23]60. device bt # Buslogic/Mylex MultiMaster SCSI adapters device isci # Intel C600 SAS controller # ATA/SCSI peripherals device scbus # SCSI bus (required for ATA/SCSI) device ch # SCSI media changers device da # Direct Access (disks) device sa # Sequential Access (tape etc) device cd # CD device pass # Passthrough device (direct ATA/SCSI access) device ses # Enclosure Services (SES and SAF-TE) device ctl # CAM Target Layer # RAID controllers interfaced to the SCSI subsystem device amr # AMI MegaRAID device arcmsr # Areca SATA II RAID #XXX it is not 64-bit clean, -scottl #device asr # DPT SmartRAID V, VI and Adaptec SCSI RAID device ciss # Compaq Smart RAID 5* device dpt # DPT Smartcache III, IV - See NOTES for options device hptmv # Highpoint RocketRAID 182x device hptrr # Highpoint RocketRAID 17xx, 22xx, 23xx, 25xx device iir # Intel Integrated RAID device ips # IBM (Adaptec) ServeRAID device mly # Mylex AcceleRAID/eXtremeRAID device twa # 3ware 9000 series PATA/SATA RAID # RAID controllers device aac # Adaptec FSA RAID device aacp # SCSI passthrough for aac (requires CAM) device ida # Compaq Smart RAID device mfi # LSI MegaRAID SAS device mlx # Mylex DAC960 family #XXX pointer/int warnings #device pst # Promise Supertrak SX6000 device twe # 3ware ATA RAID device tws # LSI 3ware 9750 SATA+SAS 6Gb/s RAID controller # atkbdc0 controls both the keyboard and the PS/2 mouse device atkbdc # AT keyboard controller device atkbd # AT keyboard device psm # PS/2 mouse device kbdmux # keyboard multiplexer device vga # VGA video card driver options VESA # Add support for VESA BIOS Extensions (VBE) device splash # Splash screen and screen saver support # syscons is the default console driver, resembling an SCO console device sc options SC_PIXEL_MODE # add support for the raster text mode device agp # support several AGP chipsets # PCCARD (PCMCIA) support # PCMCIA and cardbus bridge support device cbb # cardbus (yenta) bridge device pccard # PC Card (16-bit) bus device cardbus # CardBus (32-bit) bus # Serial (COM) ports device uart # Generic UART driver # Parallel port device ppc device ppbus # Parallel port bus (required) device lpt # Printer device plip # TCP/IP over parallel device ppi # Parallel port interface device #device vpo # Requires scbus and da device puc # Multi I/O cards and multi-channel UARTs # PCI Ethernet NICs. device bxe # Broadcom BCM57710/BCM57711/BCM57711E 10Gb Ethernet device de # DEC/Intel DC21x4x (``Tulip'') device em # Intel PRO/1000 Gigabit Ethernet Family device igb # Intel PRO/1000 PCIE Server Gigabit Family device ixgbe # Intel PRO/10GbE PCIE Ethernet Family device le # AMD Am7900 LANCE and Am79C9xx PCnet device ti # Alteon Networks Tigon I/II gigabit Ethernet device txp # 3Com 3cR990 (``Typhoon'') device vx # 3Com 3c590, 3c595 (``Vortex'') # PCI Ethernet NICs that use the common MII bus controller code. # NOTE: Be sure to keep the 'device miibus' line in order to use these NICs! device miibus # MII bus support device ae # Attansic/Atheros L2 FastEthernet device age # Attansic/Atheros L1 Gigabit Ethernet device alc # Atheros AR8131/AR8132 Ethernet device ale # Atheros AR8121/AR8113/AR8114 Ethernet device bce # Broadcom BCM5706/BCM5708 Gigabit Ethernet device bfe # Broadcom BCM440x 10/100 Ethernet device bge # Broadcom BCM570xx Gigabit Ethernet device cas # Sun Cassini/Cassini+ and NS DP83065 Saturn device dc # DEC/Intel 21143 and various workalikes device et # Agere ET1310 10/100/Gigabit Ethernet device fxp # Intel EtherExpress PRO/100B (82557, 82558) device gem # Sun GEM/Sun ERI/Apple GMAC device hme # Sun HME (Happy Meal Ethernet) device jme # JMicron JMC250 Gigabit/JMC260 Fast Ethernet device lge # Level 1 LXT1001 gigabit Ethernet device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet device nfe # nVidia nForce MCP on-board Ethernet device nge # NatSemi DP83820 gigabit Ethernet #device nve # nVidia nForce MCP on-board Ethernet Networking device pcn # AMD Am79C97x PCI 10/100 (precedence over 'le') device re # RealTek 8139C+/8169/8169S/8110S device rl # RealTek 8129/8139 device sf # Adaptec AIC-6915 (``Starfire'') device sge # Silicon Integrated Systems SiS190/191 device sis # Silicon Integrated Systems SiS 900/SiS 7016 device sk # SysKonnect SK-984x & SK-982x gigabit Ethernet device ste # Sundance ST201 (D-Link DFE-550TX) device stge # Sundance/Tamarack TC9021 gigabit Ethernet device tl # Texas Instruments ThunderLAN device tx # SMC EtherPower II (83c170 ``EPIC'') device vge # VIA VT612x gigabit Ethernet device vr # VIA Rhine, Rhine II device wb # Winbond W89C840F device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'') # ISA Ethernet NICs. pccard NICs included. device cs # Crystal Semiconductor CS89x0 NIC # 'device ed' requires 'device miibus' device ed # NE[12]000, SMC Ultra, 3c503, DS8390 cards device ex # Intel EtherExpress Pro/10 and Pro/10+ device ep # Etherlink III based cards device fe # Fujitsu MB8696x based cards device sn # SMC's 9000 series of Ethernet chips device xe # Xircom pccard Ethernet # Wireless NIC cards device wlan # 802.11 support options IEEE80211_DEBUG # enable debug msgs options IEEE80211_AMPDU_AGE # age frames in AMPDU reorder q's options IEEE80211_SUPPORT_MESH # enable 802.11s draft support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support device wlan_tkip # 802.11 TKIP support device wlan_amrr # AMRR transmit rate control algorithm device an # Aironet 4500/4800 802.11 wireless NICs. device ath # Atheros NIC's device ath_pci # Atheros pci/cardbus glue device ath_hal # pci/cardbus chip support options AH_SUPPORT_AR5416 # enable AR5416 tx/rx descriptors device ath_rate_sample # SampleRate tx rate control for ath #device bwi # Broadcom BCM430x/BCM431x wireless NICs. #device bwn # Broadcom BCM43xx wireless NICs. device ipw # Intel 2100 wireless NICs. device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs. device iwn # Intel 4965/1000/5000/6000 wireless NICs. device malo # Marvell Libertas wireless NICs. device mwl # Marvell 88W8363 802.11n wireless NICs. device ral # Ralink Technology RT2500 wireless NICs. device wi # WaveLAN/Intersil/Symbol 802.11 wireless NICs. device wpi # Intel 3945ABG wireless NICs. # Pseudo devices. device loop # Network loopback device random # Entropy device device ether # Ethernet support device vlan # 802.1Q VLAN support device tun # Packet tunnel. device md # Memory "disks" device gif # IPv6 and IPv4 tunneling device faith # IPv6-to-IPv4 relaying (translation) device firmware # firmware assist module # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # USB support options USB_DEBUG # enable debug msgs device uhci # UHCI PCI->USB interface device ohci # OHCI PCI->USB interface device ehci # EHCI PCI->USB interface (USB 2.0) device xhci # XHCI PCI->USB interface (USB 3.0) device usb # USB Bus (required) device ukbd # Keyboard device umass # Disks/Mass storage - Requires scbus and da # FireWire support device firewire # FireWire bus code # sbp(4) works for some systems but causes boot failure on others #device sbp # SCSI over FireWire (Requires scbus and da) device fwe # Ethernet over FireWire (non-standard!) device fwip # IP over FireWire (RFC 2734,3146) device dcons # Dumb console driver device dcons_crom # Configuration ROM for dcons # Sound support device sound # Generic sound driver (required) device snd_cmi # CMedia CMI8338/CMI8738 device snd_csa # Crystal Semiconductor CS461x/428x device snd_emu10kx # Creative SoundBlaster Live! and Audigy device snd_es137x # Ensoniq AudioPCI ES137x device snd_hda # Intel High Definition Audio device snd_ich # Intel, NVidia and other ICH AC'97 Audio device snd_via8233 # VIA VT8233x Audio # MMC/SD device mmc # MMC/SD bus device mmcsd # MMC/SD memory card device sdhci # Generic PCI SD Host Controller Index: projects/nand/sys/amd64/include/xen =================================================================== --- projects/nand/sys/amd64/include/xen (revision 235262) +++ projects/nand/sys/amd64/include/xen (revision 235263) Property changes on: projects/nand/sys/amd64/include/xen ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/amd64/include/xen:r235157-235262 Index: projects/nand/sys/arm/conf/genboardid.awk =================================================================== --- projects/nand/sys/arm/conf/genboardid.awk (nonexistent) +++ projects/nand/sys/arm/conf/genboardid.awk (revision 235263) @@ -0,0 +1,55 @@ +#!/bin/awk +# $FreeBSD$ + +#- +# Copyright (c) 2012 M. Warner Losh. All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# + +# +# Generate FreeBSD's board ID's defines from Linux's +# arm board list. +# +# You can grab a new copy any time with: +# fetch -o sys/arm/conf/mach-types http://www.arm.linux.org.uk/developer/machines/download.php +# +BEGIN { nr = 0; boardid[nr] = "ARM_BOARD_ID_NONE"; num[nr++] = 0; } +/^#/ { next; } +/^[ ]*$/ { next; } + +NF == 4 { + boardid[nr] = "ARM_BOARD_ID_"$3; + num[nr] = $4; + nr++ +} + +END { + printf("/* Arm board ID file generated automatically from Linux's mach-types file. */\n\n"); + printf("#ifndef _SYS_ARM_ARM_BOARDID_H\n"); + printf("#define _SYS_ARM_ARM_BOARDID_H\n\n"); + for (i = 0; i < nr; i++) { + printf("#define %-30s %d\n", boardid[i], num[i]); + } + printf("\n#endif /* _SYS_ARM_ARM_BOARDID_H */\n"); +} + Property changes on: projects/nand/sys/arm/conf/genboardid.awk ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/nand/sys/arm/conf/mach-types =================================================================== --- projects/nand/sys/arm/conf/mach-types (nonexistent) +++ projects/nand/sys/arm/conf/mach-types (revision 235263) @@ -0,0 +1,4146 @@ +# Database of machine macros and numbers +# +# This file is linux/arch/arm/tools/mach-types +# +# Up to date versions of this file can be obtained from: +# +# http://www.arm.linux.org.uk/developer/machines/download.php +# +# Please do not send patches to this file; it is automatically generated! +# To add an entry into this database, please see Documentation/arm/README, +# or visit: +# +# http://www.arm.linux.org.uk/developer/machines/?action=new +# +# Last update: Thu May 10 18:35:23 2012 +# +# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number +# +ebsa110 ARCH_EBSA110 EBSA110 0 +riscpc ARCH_RPC RISCPC 1 +nexuspci ARCH_NEXUSPCI NEXUSPCI 3 +ebsa285 ARCH_EBSA285 EBSA285 4 +netwinder ARCH_NETWINDER NETWINDER 5 +cats ARCH_CATS CATS 6 +tbox ARCH_TBOX TBOX 7 +co285 ARCH_CO285 CO285 8 +clps7110 ARCH_CLPS7110 CLPS7110 9 +archimedes ARCH_ARC ARCHIMEDES 10 +a5k ARCH_A5K A5K 11 +etoile ARCH_ETOILE ETOILE 12 +lacie_nas ARCH_LACIE_NAS LACIE_NAS 13 +clps7500 ARCH_CLPS7500 CLPS7500 14 +shark ARCH_SHARK SHARK 15 +brutus SA1100_BRUTUS BRUTUS 16 +personal_server ARCH_PERSONAL_SERVER PERSONAL_SERVER 17 +itsy SA1100_ITSY ITSY 18 +l7200 ARCH_L7200 L7200 19 +pleb SA1100_PLEB PLEB 20 +integrator ARCH_INTEGRATOR INTEGRATOR 21 +h3600 SA1100_H3600 H3600 22 +ixp1200 ARCH_IXP1200 IXP1200 23 +p720t ARCH_P720T P720T 24 +assabet SA1100_ASSABET ASSABET 25 +victor SA1100_VICTOR VICTOR 26 +lart SA1100_LART LART 27 +ranger SA1100_RANGER RANGER 28 +graphicsclient SA1100_GRAPHICSCLIENT GRAPHICSCLIENT 29 +xp860 SA1100_XP860 XP860 30 +cerf SA1100_CERF CERF 31 +nanoengine SA1100_NANOENGINE NANOENGINE 32 +fpic SA1100_FPIC FPIC 33 +extenex1 SA1100_EXTENEX1 EXTENEX1 34 +sherman SA1100_SHERMAN SHERMAN 35 +accelent_sa SA1100_ACCELENT ACCELENT_SA 36 +accelent_l7200 ARCH_L7200_ACCELENT ACCELENT_L7200 37 +netport SA1100_NETPORT NETPORT 38 +pangolin SA1100_PANGOLIN PANGOLIN 39 +yopy SA1100_YOPY YOPY 40 +coolidge SA1100_COOLIDGE COOLIDGE 41 +huw_webpanel SA1100_HUW_WEBPANEL HUW_WEBPANEL 42 +spotme ARCH_SPOTME SPOTME 43 +freebird ARCH_FREEBIRD FREEBIRD 44 +ti925 ARCH_TI925 TI925 45 +riscstation ARCH_RISCSTATION RISCSTATION 46 +cavy SA1100_CAVY CAVY 47 +jornada720 SA1100_JORNADA720 JORNADA720 48 +omnimeter SA1100_OMNIMETER OMNIMETER 49 +edb7211 ARCH_EDB7211 EDB7211 50 +citygo SA1100_CITYGO CITYGO 51 +pfs168 SA1100_PFS168 PFS168 52 +spot SA1100_SPOT SPOT 53 +flexanet SA1100_FLEXANET FLEXANET 54 +webpal ARCH_WEBPAL WEBPAL 55 +linpda SA1100_LINPDA LINPDA 56 +anakin ARCH_ANAKIN ANAKIN 57 +mvi SA1100_MVI MVI 58 +jupiter SA1100_JUPITER JUPITER 59 +psionw ARCH_PSIONW PSIONW 60 +aln SA1100_ALN ALN 61 +epxa ARCH_CAMELOT CAMELOT 62 +gds2200 SA1100_GDS2200 GDS2200 63 +netbook SA1100_PSION_SERIES7 PSION_SERIES7 64 +xfile SA1100_XFILE XFILE 65 +accelent_ep9312 ARCH_ACCELENT_EP9312 ACCELENT_EP9312 66 +ic200 ARCH_IC200 IC200 67 +creditlart SA1100_CREDITLART CREDITLART 68 +htm SA1100_HTM HTM 69 +iq80310 ARCH_IQ80310 IQ80310 70 +freebot SA1100_FREEBOT FREEBOT 71 +entel ARCH_ENTEL ENTEL 72 +enp3510 ARCH_ENP3510 ENP3510 73 +trizeps SA1100_TRIZEPS TRIZEPS 74 +nesa SA1100_NESA NESA 75 +venus ARCH_VENUS VENUS 76 +tardis ARCH_TARDIS TARDIS 77 +mercury ARCH_MERCURY MERCURY 78 +empeg SA1100_EMPEG EMPEG 79 +adi_evb ARCH_I80200FCC I80200FCC 80 +itt_cpb SA1100_ITT_CPB ITT_CPB 81 +svc SA1100_SVC SVC 82 +alpha2 SA1100_ALPHA2 ALPHA2 84 +alpha1 SA1100_ALPHA1 ALPHA1 85 +netarm ARCH_NETARM NETARM 86 +simpad SA1100_SIMPAD SIMPAD 87 +pda1 ARCH_PDA1 PDA1 88 +lubbock ARCH_LUBBOCK LUBBOCK 89 +aniko ARCH_ANIKO ANIKO 90 +clep7212 ARCH_CLEP7212 CLEP7212 91 +cs89712 ARCH_CS89712 CS89712 92 +weararm SA1100_WEARARM WEARARM 93 +possio_px SA1100_POSSIO_PX POSSIO_PX 94 +sidearm SA1100_SIDEARM SIDEARM 95 +stork SA1100_STORK STORK 96 +shannon SA1100_SHANNON SHANNON 97 +ace ARCH_ACE ACE 98 +ballyarm SA1100_BALLYARM BALLYARM 99 +simputer SA1100_SIMPUTER SIMPUTER 100 +nexterm SA1100_NEXTERM NEXTERM 101 +sa1100_elf SA1100_SA1100_ELF SA1100_ELF 102 +gator SA1100_GATOR GATOR 103 +granite ARCH_GRANITE GRANITE 104 +consus SA1100_CONSUS CONSUS 105 +aaed2000 ARCH_AAED2000 AAED2000 106 +cdb89712 ARCH_CDB89712 CDB89712 107 +graphicsmaster SA1100_GRAPHICSMASTER GRAPHICSMASTER 108 +adsbitsy SA1100_ADSBITSY ADSBITSY 109 +pxa_idp ARCH_PXA_IDP PXA_IDP 110 +plce ARCH_PLCE PLCE 111 +pt_system3 SA1100_PT_SYSTEM3 PT_SYSTEM3 112 +murphy ARCH_MEDALB MEDALB 113 +eagle ARCH_EAGLE EAGLE 114 +dsc21 ARCH_DSC21 DSC21 115 +dsc24 ARCH_DSC24 DSC24 116 +ti5472 ARCH_TI5472 TI5472 117 +autcpu12 ARCH_AUTCPU12 AUTCPU12 118 +uengine ARCH_UENGINE UENGINE 119 +bluestem SA1100_BLUESTEM BLUESTEM 120 +xingu8 ARCH_XINGU8 XINGU8 121 +bushstb ARCH_BUSHSTB BUSHSTB 122 +epsilon1 SA1100_EPSILON1 EPSILON1 123 +balloon SA1100_BALLOON BALLOON 124 +puppy ARCH_PUPPY PUPPY 125 +elroy SA1100_ELROY ELROY 126 +gms720 ARCH_GMS720 GMS720 127 +s24x ARCH_S24X S24X 128 +jtel_clep7312 ARCH_JTEL_CLEP7312 JTEL_CLEP7312 129 +cx821xx ARCH_CX821XX CX821XX 130 +edb7312 ARCH_EDB7312 EDB7312 131 +bsa1110 SA1100_BSA1110 BSA1110 132 +powerpin ARCH_POWERPIN POWERPIN 133 +openarm ARCH_OPENARM OPENARM 134 +whitechapel SA1100_WHITECHAPEL WHITECHAPEL 135 +h3100 SA1100_H3100 H3100 136 +h3800 SA1100_H3800 H3800 137 +blue_v1 ARCH_BLUE_V1 BLUE_V1 138 +pxa_cerf ARCH_PXA_CERF PXA_CERF 139 +arm7tevb ARCH_ARM7TEVB ARM7TEVB 140 +d7400 SA1100_D7400 D7400 141 +piranha ARCH_PIRANHA PIRANHA 142 +sbcamelot SA1100_SBCAMELOT SBCAMELOT 143 +kings SA1100_KINGS KINGS 144 +smdk2400 ARCH_SMDK2400 SMDK2400 145 +collie SA1100_COLLIE COLLIE 146 +idr ARCH_IDR IDR 147 +badge4 SA1100_BADGE4 BADGE4 148 +webnet ARCH_WEBNET WEBNET 149 +d7300 SA1100_D7300 D7300 150 +cep SA1100_CEP CEP 151 +fortunet ARCH_FORTUNET FORTUNET 152 +vc547x ARCH_VC547X VC547X 153 +filewalker SA1100_FILEWALKER FILEWALKER 154 +netgateway SA1100_NETGATEWAY NETGATEWAY 155 +symbol2800 SA1100_SYMBOL2800 SYMBOL2800 156 +suns SA1100_SUNS SUNS 157 +frodo SA1100_FRODO FRODO 158 +ms301 SA1100_MACH_TYTE_MS301 MACH_TYTE_MS301 159 +mx1ads ARCH_MX1ADS MX1ADS 160 +h7201 ARCH_H7201 H7201 161 +h7202 ARCH_H7202 H7202 162 +amico ARCH_AMICO AMICO 163 +iam SA1100_IAM IAM 164 +tt530 SA1100_TT530 TT530 165 +sam2400 ARCH_SAM2400 SAM2400 166 +jornada56x SA1100_JORNADA56X JORNADA56X 167 +active SA1100_ACTIVE ACTIVE 168 +iq80321 ARCH_IQ80321 IQ80321 169 +wid SA1100_WID WID 170 +sabinal ARCH_SABINAL SABINAL 171 +ixp425_matacumbe ARCH_IXP425_MATACUMBE IXP425_MATACUMBE 172 +miniprint SA1100_MINIPRINT MINIPRINT 173 +adm510x ARCH_ADM510X ADM510X 174 +svs200 SA1100_SVS200 SVS200 175 +atg_tcu ARCH_ATG_TCU ATG_TCU 176 +jornada820 SA1100_JORNADA820 JORNADA820 177 +s3c44b0 ARCH_S3C44B0 S3C44B0 178 +margis2 ARCH_MARGIS2 MARGIS2 179 +ks8695 ARCH_KS8695 KS8695 180 +brh ARCH_BRH BRH 181 +s3c2410 ARCH_S3C2410 S3C2410 182 +possio_px30 ARCH_POSSIO_PX30 POSSIO_PX30 183 +s3c2800 ARCH_S3C2800 S3C2800 184 +fleetwood SA1100_FLEETWOOD FLEETWOOD 185 +omaha ARCH_OMAHA OMAHA 186 +ta7 ARCH_TA7 TA7 187 +nova SA1100_NOVA NOVA 188 +hmk ARCH_HMK HMK 189 +karo ARCH_KARO KARO 190 +fester SA1100_FESTER FESTER 191 +gpi ARCH_GPI GPI 192 +smdk2410 ARCH_SMDK2410 SMDK2410 193 +i519 ARCH_I519 I519 194 +nexio SA1100_NEXIO NEXIO 195 +bitbox SA1100_BITBOX BITBOX 196 +g200 SA1100_G200 G200 197 +gill SA1100_GILL GILL 198 +pxa_mercury ARCH_PXA_MERCURY PXA_MERCURY 199 +ceiva ARCH_CEIVA CEIVA 200 +fret SA1100_FRET FRET 201 +emailphone SA1100_EMAILPHONE EMAILPHONE 202 +h3900 ARCH_H3900 H3900 203 +pxa1 ARCH_PXA1 PXA1 204 +koan369 SA1100_KOAN369 KOAN369 205 +cogent ARCH_COGENT COGENT 206 +esl_simputer ARCH_ESL_SIMPUTER ESL_SIMPUTER 207 +esl_simputer_clr ARCH_ESL_SIMPUTER_CLR ESL_SIMPUTER_CLR 208 +esl_simputer_bw ARCH_ESL_SIMPUTER_BW ESL_SIMPUTER_BW 209 +hhp_cradle ARCH_HHP_CRADLE HHP_CRADLE 210 +he500 ARCH_HE500 HE500 211 +inhandelf2 SA1100_INHANDELF2 INHANDELF2 212 +inhandftip SA1100_INHANDFTIP INHANDFTIP 213 +dnp1110 SA1100_DNP1110 DNP1110 214 +pnp1110 SA1100_PNP1110 PNP1110 215 +csb226 ARCH_CSB226 CSB226 216 +arnold SA1100_ARNOLD ARNOLD 217 +voiceblue MACH_VOICEBLUE VOICEBLUE 218 +jz8028 ARCH_JZ8028 JZ8028 219 +h5400 ARCH_H5400 H5400 220 +forte SA1100_FORTE FORTE 221 +acam SA1100_ACAM ACAM 222 +abox SA1100_ABOX ABOX 223 +atmel ARCH_ATMEL ATMEL 224 +sitsang ARCH_SITSANG SITSANG 225 +cpu1110lcdnet SA1100_CPU1110LCDNET CPU1110LCDNET 226 +mpl_vcma9 ARCH_MPL_VCMA9 MPL_VCMA9 227 +opus_a1 ARCH_OPUS_A1 OPUS_A1 228 +daytona ARCH_DAYTONA DAYTONA 229 +killbear SA1100_KILLBEAR KILLBEAR 230 +yoho ARCH_YOHO YOHO 231 +jasper ARCH_JASPER JASPER 232 +dsc25 ARCH_DSC25 DSC25 233 +omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234 +mnci ARCH_RAMSES RAMSES 235 +s28x ARCH_S28X S28X 236 +mport3 ARCH_MPORT3 MPORT3 237 +pxa_eagle250 ARCH_PXA_EAGLE250 PXA_EAGLE250 238 +pdb ARCH_PDB PDB 239 +blue_2g SA1100_BLUE_2G BLUE_2G 240 +bluearch SA1100_BLUEARCH BLUEARCH 241 +ixdp2400 ARCH_IXDP2400 IXDP2400 242 +ixdp2800 ARCH_IXDP2800 IXDP2800 243 +explorer SA1100_EXPLORER EXPLORER 244 +ixdp425 ARCH_IXDP425 IXDP425 245 +chimp ARCH_CHIMP CHIMP 246 +stork_nest ARCH_STORK_NEST STORK_NEST 247 +stork_egg ARCH_STORK_EGG STORK_EGG 248 +wismo SA1100_WISMO WISMO 249 +ezlinx ARCH_EZLINX EZLINX 250 +at91rm9200 ARCH_AT91RM9200 AT91RM9200 251 +adtech_orion ARCH_ADTECH_ORION ADTECH_ORION 252 +neptune ARCH_NEPTUNE NEPTUNE 253 +hackkit SA1100_HACKKIT HACKKIT 254 +pxa_wins30 ARCH_PXA_WINS30 PXA_WINS30 255 +lavinna SA1100_LAVINNA LAVINNA 256 +pxa_uengine ARCH_PXA_UENGINE PXA_UENGINE 257 +innokom ARCH_INNOKOM INNOKOM 258 +bms ARCH_BMS BMS 259 +ixcdp1100 ARCH_IXCDP1100 IXCDP1100 260 +prpmc1100 ARCH_PRPMC1100 PRPMC1100 261 +at91rm9200dk ARCH_AT91RM9200DK AT91RM9200DK 262 +armstick ARCH_ARMSTICK ARMSTICK 263 +armonie ARCH_ARMONIE ARMONIE 264 +mport1 ARCH_MPORT1 MPORT1 265 +s3c5410 ARCH_S3C5410 S3C5410 266 +zcp320a ARCH_ZCP320A ZCP320A 267 +i_box ARCH_I_BOX I_BOX 268 +stlc1502 ARCH_STLC1502 STLC1502 269 +siren ARCH_SIREN SIREN 270 +greenlake ARCH_GREENLAKE GREENLAKE 271 +argus ARCH_ARGUS ARGUS 272 +combadge SA1100_COMBADGE COMBADGE 273 +rokepxa ARCH_ROKEPXA ROKEPXA 274 +cintegrator ARCH_CINTEGRATOR CINTEGRATOR 275 +guidea07 ARCH_GUIDEA07 GUIDEA07 276 +tat257 ARCH_TAT257 TAT257 277 +igp2425 ARCH_IGP2425 IGP2425 278 +bluegrama ARCH_BLUEGRAMMA BLUEGRAMMA 279 +ipod ARCH_IPOD IPOD 280 +adsbitsyx ARCH_ADSBITSYX ADSBITSYX 281 +trizeps2 ARCH_TRIZEPS2 TRIZEPS2 282 +viper ARCH_VIPER VIPER 283 +adsbitsyplus SA1100_ADSBITSYPLUS ADSBITSYPLUS 284 +adsagc SA1100_ADSAGC ADSAGC 285 +stp7312 ARCH_STP7312 STP7312 286 +nx_phnx MACH_NX_PHNX NX_PHNX 287 +wep_ep250 ARCH_WEP_EP250 WEP_EP250 288 +inhandelf3 ARCH_INHANDELF3 INHANDELF3 289 +adi_coyote ARCH_ADI_COYOTE ADI_COYOTE 290 +iyonix ARCH_IYONIX IYONIX 291 +damicam1 ARCH_DAMICAM_SA1110 DAMICAM_SA1110 292 +meg03 ARCH_MEG03 MEG03 293 +pxa_whitechapel ARCH_PXA_WHITECHAPEL PXA_WHITECHAPEL 294 +nwsc ARCH_NWSC NWSC 295 +nwlarm ARCH_NWLARM NWLARM 296 +ixp425_mguard ARCH_IXP425_MGUARD IXP425_MGUARD 297 +pxa_netdcu4 ARCH_PXA_NETDCU4 PXA_NETDCU4 298 +ixdp2401 ARCH_IXDP2401 IXDP2401 299 +ixdp2801 ARCH_IXDP2801 IXDP2801 300 +zodiac ARCH_ZODIAC ZODIAC 301 +armmodul ARCH_ARMMODUL ARMMODUL 302 +ketop SA1100_KETOP KETOP 303 +av7200 ARCH_AV7200 AV7200 304 +arch_ti925 ARCH_ARCH_TI925 ARCH_TI925 305 +acq200 ARCH_ACQ200 ACQ200 306 +pt_dafit SA1100_PT_DAFIT PT_DAFIT 307 +ihba ARCH_IHBA IHBA 308 +quinque ARCH_QUINQUE QUINQUE 309 +nimbraone ARCH_NIMBRAONE NIMBRAONE 310 +nimbra29x ARCH_NIMBRA29X NIMBRA29X 311 +nimbra210 ARCH_NIMBRA210 NIMBRA210 312 +hhp_d95xx ARCH_HHP_D95XX HHP_D95XX 313 +labarm ARCH_LABARM LABARM 314 +m825xx ARCH_M825XX M825XX 315 +m7100 SA1100_M7100 M7100 316 +nipc2 ARCH_NIPC2 NIPC2 317 +fu7202 ARCH_FU7202 FU7202 318 +adsagx ARCH_ADSAGX ADSAGX 319 +pxa_pooh ARCH_PXA_POOH PXA_POOH 320 +bandon ARCH_BANDON BANDON 321 +pcm7210 ARCH_PCM7210 PCM7210 322 +nms9200 ARCH_NMS9200 NMS9200 323 +logodl ARCH_LOGODL LOGODL 324 +m7140 SA1100_M7140 M7140 325 +korebot ARCH_KOREBOT KOREBOT 326 +iq31244 ARCH_IQ31244 IQ31244 327 +koan393 SA1100_KOAN393 KOAN393 328 +inhandftip3 ARCH_INHANDFTIP3 INHANDFTIP3 329 +gonzo ARCH_GONZO GONZO 330 +bast ARCH_BAST BAST 331 +scanpass ARCH_SCANPASS SCANPASS 332 +ep7312_pooh ARCH_EP7312_POOH EP7312_POOH 333 +ta7s ARCH_TA7S TA7S 334 +ta7v ARCH_TA7V TA7V 335 +icarus SA1100_ICARUS ICARUS 336 +h1900 ARCH_H1900 H1900 337 +gemini SA1100_GEMINI GEMINI 338 +axim ARCH_AXIM AXIM 339 +audiotron ARCH_AUDIOTRON AUDIOTRON 340 +h2200 ARCH_H2200 H2200 341 +loox600 ARCH_LOOX600 LOOX600 342 +niop ARCH_NIOP NIOP 343 +dm310 ARCH_DM310 DM310 344 +seedpxa_c2 ARCH_SEEDPXA_C2 SEEDPXA_C2 345 +ixp4xx_mguardpci ARCH_IXP4XX_MGUARD_PCI IXP4XX_MGUARD_PCI 346 +h1940 ARCH_H1940 H1940 347 +scorpio ARCH_SCORPIO SCORPIO 348 +viva ARCH_VIVA VIVA 349 +pxa_xcard ARCH_PXA_XCARD PXA_XCARD 350 +csb335 ARCH_CSB335 CSB335 351 +ixrd425 ARCH_IXRD425 IXRD425 352 +iq80315 ARCH_IQ80315 IQ80315 353 +nmp7312 ARCH_NMP7312 NMP7312 354 +cx861xx ARCH_CX861XX CX861XX 355 +enp2611 ARCH_ENP2611 ENP2611 356 +xda SA1100_XDA XDA 357 +csir_ims ARCH_CSIR_IMS CSIR_IMS 358 +ixp421_dnaeeth ARCH_IXP421_DNAEETH IXP421_DNAEETH 359 +pocketserv9200 ARCH_POCKETSERV9200 POCKETSERV9200 360 +toto ARCH_TOTO TOTO 361 +s3c2440 ARCH_S3C2440 S3C2440 362 +ks8695p ARCH_KS8695P KS8695P 363 +se4000 ARCH_SE4000 SE4000 364 +quadriceps ARCH_QUADRICEPS QUADRICEPS 365 +bronco ARCH_BRONCO BRONCO 366 +esl_wireless_tab ARCH_ESL_WIRELESS_TAB ESL_WIRELESS_TAB 367 +esl_sofcomp ARCH_ESL_SOFCOMP ESL_SOFCOMP 368 +s5c7375 ARCH_S5C7375 S5C7375 369 +spearhead ARCH_SPEARHEAD SPEARHEAD 370 +pantera ARCH_PANTERA PANTERA 371 +prayoglite ARCH_PRAYOGLITE PRAYOGLITE 372 +gumstix ARCH_GUMSTIX GUMSTIX 373 +rcube ARCH_RCUBE RCUBE 374 +rea_olv ARCH_REA_OLV REA_OLV 375 +pxa_iphone ARCH_PXA_IPHONE PXA_IPHONE 376 +s3c3410 ARCH_S3C3410 S3C3410 377 +espd_4510b ARCH_ESPD_4510B ESPD_4510B 378 +mp1x ARCH_MP1X MP1X 379 +at91rm9200tb ARCH_AT91RM9200TB AT91RM9200TB 380 +adsvgx ARCH_ADSVGX ADSVGX 381 +omap_h2 MACH_OMAP_H2 OMAP_H2 382 +pelee ARCH_PELEE PELEE 383 +e740 MACH_E740 E740 384 +iq80331 ARCH_IQ80331 IQ80331 385 +versatile_pb ARCH_VERSATILE_PB VERSATILE_PB 387 +kev7a400 MACH_KEV7A400 KEV7A400 388 +lpd7a400 MACH_LPD7A400 LPD7A400 389 +lpd7a404 MACH_LPD7A404 LPD7A404 390 +fujitsu_camelot ARCH_FUJITSU_CAMELOT FUJITSU_CAMELOT 391 +janus2m ARCH_JANUS2M JANUS2M 392 +embtf MACH_EMBTF EMBTF 393 +hpm MACH_HPM HPM 394 +smdk2410tk MACH_SMDK2410TK SMDK2410TK 395 +smdk2410aj MACH_SMDK2410AJ SMDK2410AJ 396 +streetracer MACH_STREETRACER STREETRACER 397 +eframe MACH_EFRAME EFRAME 398 +csb337 MACH_CSB337 CSB337 399 +pxa_lark MACH_PXA_LARK PXA_LARK 400 +pxa_pnp2110 MACH_PNP2110 PNP2110 401 +tcc72x MACH_TCC72X TCC72X 402 +altair MACH_ALTAIR ALTAIR 403 +kc3 MACH_KC3 KC3 404 +sinteftd MACH_SINTEFTD SINTEFTD 405 +mainstone MACH_MAINSTONE MAINSTONE 406 +aday4x MACH_ADAY4X ADAY4X 407 +lite300 MACH_LITE300 LITE300 408 +s5c7376 MACH_S5C7376 S5C7376 409 +mt02 MACH_MT02 MT02 410 +mport3s MACH_MPORT3S MPORT3S 411 +ra_alpha MACH_RA_ALPHA RA_ALPHA 412 +xcep MACH_XCEP XCEP 413 +arcom_vulcan MACH_ARCOM_VULCAN ARCOM_VULCAN 414 +stargate MACH_STARGATE STARGATE 415 +armadilloj MACH_ARMADILLOJ ARMADILLOJ 416 +elroy_jack MACH_ELROY_JACK ELROY_JACK 417 +backend MACH_BACKEND BACKEND 418 +s5linbox MACH_S5LINBOX S5LINBOX 419 +nomadik MACH_NOMADIK NOMADIK 420 +ia_cpu_9200 MACH_IA_CPU_9200 IA_CPU_9200 421 +at91_bja1 MACH_AT91_BJA1 AT91_BJA1 422 +corgi MACH_CORGI CORGI 423 +poodle MACH_POODLE POODLE 424 +ten MACH_TEN TEN 425 +roverp5p MACH_ROVERP5P ROVERP5P 426 +sc2700 MACH_SC2700 SC2700 427 +ex_eagle MACH_EX_EAGLE EX_EAGLE 428 +nx_pxa12 MACH_NX_PXA12 NX_PXA12 429 +nx_pxa5 MACH_NX_PXA5 NX_PXA5 430 +blackboard2 MACH_BLACKBOARD2 BLACKBOARD2 431 +i819 MACH_I819 I819 432 +ixmb995e MACH_IXMB995E IXMB995E 433 +skyrider MACH_SKYRIDER SKYRIDER 434 +skyhawk MACH_SKYHAWK SKYHAWK 435 +enterprise MACH_ENTERPRISE ENTERPRISE 436 +dep2410 MACH_DEP2410 DEP2410 437 +armcore MACH_ARMCORE ARMCORE 438 +hobbit MACH_HOBBIT HOBBIT 439 +h7210 MACH_H7210 H7210 440 +pxa_netdcu5 MACH_PXA_NETDCU5 PXA_NETDCU5 441 +acc MACH_ACC ACC 442 +esl_sarva MACH_ESL_SARVA ESL_SARVA 443 +xm250 MACH_XM250 XM250 444 +t6tc1xb MACH_T6TC1XB T6TC1XB 445 +ess710 MACH_ESS710 ESS710 446 +mx31ads MACH_MX31ADS MX31ADS 447 +himalaya MACH_HIMALAYA HIMALAYA 448 +bolfenk MACH_BOLFENK BOLFENK 449 +at91rm9200kr MACH_AT91RM9200KR AT91RM9200KR 450 +edb9312 MACH_EDB9312 EDB9312 451 +omap_generic MACH_OMAP_GENERIC OMAP_GENERIC 452 +aximx3 MACH_AXIMX3 AXIMX3 453 +eb67xdip MACH_EB67XDIP EB67XDIP 454 +webtxs MACH_WEBTXS WEBTXS 455 +hawk MACH_HAWK HAWK 456 +ccat91sbc001 MACH_CCAT91SBC001 CCAT91SBC001 457 +expresso MACH_EXPRESSO EXPRESSO 458 +h4000 MACH_H4000 H4000 459 +dino MACH_DINO DINO 460 +ml675k MACH_ML675K ML675K 461 +edb9301 MACH_EDB9301 EDB9301 462 +edb9315 MACH_EDB9315 EDB9315 463 +reciva_tt MACH_RECIVA_TT RECIVA_TT 464 +cstcb01 MACH_CSTCB01 CSTCB01 465 +cstcb1 MACH_CSTCB1 CSTCB1 466 +shadwell MACH_SHADWELL SHADWELL 467 +goepel263 MACH_GOEPEL263 GOEPEL263 468 +acq100 MACH_ACQ100 ACQ100 469 +mx1fs2 MACH_MX1FS2 MX1FS2 470 +hiptop_g1 MACH_HIPTOP_G1 HIPTOP_G1 471 +sparky MACH_SPARKY SPARKY 472 +ns9750 MACH_NS9750 NS9750 473 +phoenix MACH_PHOENIX PHOENIX 474 +vr1000 MACH_VR1000 VR1000 475 +deisterpxa MACH_DEISTERPXA DEISTERPXA 476 +bcm1160 MACH_BCM1160 BCM1160 477 +pcm022 MACH_PCM022 PCM022 478 +adsgcx MACH_ADSGCX ADSGCX 479 +dreadnaught MACH_DREADNAUGHT DREADNAUGHT 480 +dm320 MACH_DM320 DM320 481 +markov MACH_MARKOV MARKOV 482 +cos7a400 MACH_COS7A400 COS7A400 483 +milano MACH_MILANO MILANO 484 +ue9328 MACH_UE9328 UE9328 485 +uex255 MACH_UEX255 UEX255 486 +ue2410 MACH_UE2410 UE2410 487 +a620 MACH_A620 A620 488 +ocelot MACH_OCELOT OCELOT 489 +cheetah MACH_CHEETAH CHEETAH 490 +omap_perseus2 MACH_OMAP_PERSEUS2 OMAP_PERSEUS2 491 +zvue MACH_ZVUE ZVUE 492 +roverp1 MACH_ROVERP1 ROVERP1 493 +asidial2 MACH_ASIDIAL2 ASIDIAL2 494 +s3c24a0 MACH_S3C24A0 S3C24A0 495 +e800 MACH_E800 E800 496 +e750 MACH_E750 E750 497 +s3c5500 MACH_S3C5500 S3C5500 498 +smdk5500 MACH_SMDK5500 SMDK5500 499 +signalsync MACH_SIGNALSYNC SIGNALSYNC 500 +nbc MACH_NBC NBC 501 +kodiak MACH_KODIAK KODIAK 502 +netbookpro MACH_NETBOOKPRO NETBOOKPRO 503 +hw90200 MACH_HW90200 HW90200 504 +condor MACH_CONDOR CONDOR 505 +cup MACH_CUP CUP 506 +kite MACH_KITE KITE 507 +scb9328 MACH_SCB9328 SCB9328 508 +omap_h3 MACH_OMAP_H3 OMAP_H3 509 +omap_h4 MACH_OMAP_H4 OMAP_H4 510 +n10 MACH_N10 N10 511 +montejade MACH_MONTAJADE MONTAJADE 512 +sg560 MACH_SG560 SG560 513 +dp1000 MACH_DP1000 DP1000 514 +omap_osk MACH_OMAP_OSK OMAP_OSK 515 +rg100v3 MACH_RG100V3 RG100V3 516 +mx2ads MACH_MX2ADS MX2ADS 517 +pxa_kilo MACH_PXA_KILO PXA_KILO 518 +ixp4xx_eagle MACH_IXP4XX_EAGLE IXP4XX_EAGLE 519 +tosa MACH_TOSA TOSA 520 +mb2520f MACH_MB2520F MB2520F 521 +emc1000 MACH_EMC1000 EMC1000 522 +tidsc25 MACH_TIDSC25 TIDSC25 523 +akcpmxl MACH_AKCPMXL AKCPMXL 524 +av3xx MACH_AV3XX AV3XX 525 +avila MACH_AVILA AVILA 526 +pxa_mpm10 MACH_PXA_MPM10 PXA_MPM10 527 +pxa_kyanite MACH_PXA_KYANITE PXA_KYANITE 528 +sgold MACH_SGOLD SGOLD 529 +oscar MACH_OSCAR OSCAR 530 +epxa4usb2 MACH_EPXA4USB2 EPXA4USB2 531 +xsengine MACH_XSENGINE XSENGINE 532 +ip600 MACH_IP600 IP600 533 +mcan2 MACH_MCAN2 MCAN2 534 +ddi_blueridge MACH_DDI_BLUERIDGE DDI_BLUERIDGE 535 +skyminder MACH_SKYMINDER SKYMINDER 536 +lpd79520 MACH_LPD79520 LPD79520 537 +edb9302 MACH_EDB9302 EDB9302 538 +hw90340 MACH_HW90340 HW90340 539 +cip_box MACH_CIP_BOX CIP_BOX 540 +ivpn MACH_IVPN IVPN 541 +rsoc2 MACH_RSOC2 RSOC2 542 +husky MACH_HUSKY HUSKY 543 +boxer MACH_BOXER BOXER 544 +shepherd MACH_SHEPHERD SHEPHERD 545 +aml42800aa MACH_AML42800AA AML42800AA 546 +lpc2294 MACH_LPC2294 LPC2294 548 +switchgrass MACH_SWITCHGRASS SWITCHGRASS 549 +ens_cmu MACH_ENS_CMU ENS_CMU 550 +mm6_sdb MACH_MM6_SDB MM6_SDB 551 +saturn MACH_SATURN SATURN 552 +i30030evb MACH_I30030EVB I30030EVB 553 +mxc27530evb MACH_MXC27530EVB MXC27530EVB 554 +smdk2800 MACH_SMDK2800 SMDK2800 555 +mtwilson MACH_MTWILSON MTWILSON 556 +ziti MACH_ZITI ZITI 557 +grandfather MACH_GRANDFATHER GRANDFATHER 558 +tengine MACH_TENGINE TENGINE 559 +s3c2460 MACH_S3C2460 S3C2460 560 +pdm MACH_PDM PDM 561 +h4700 MACH_H4700 H4700 562 +h6300 MACH_H6300 H6300 563 +rz1700 MACH_RZ1700 RZ1700 564 +a716 MACH_A716 A716 565 +estk2440a MACH_ESTK2440A ESTK2440A 566 +atwixp425 MACH_ATWIXP425 ATWIXP425 567 +csb336 MACH_CSB336 CSB336 568 +rirm2 MACH_RIRM2 RIRM2 569 +cx23518 MACH_CX23518 CX23518 570 +cx2351x MACH_CX2351X CX2351X 571 +computime MACH_COMPUTIME COMPUTIME 572 +izarus MACH_IZARUS IZARUS 573 +pxa_rts MACH_RTS RTS 574 +se5100 MACH_SE5100 SE5100 575 +s3c2510 MACH_S3C2510 S3C2510 576 +csb437tl MACH_CSB437TL CSB437TL 577 +slauson MACH_SLAUSON SLAUSON 578 +pearlriver MACH_PEARLRIVER PEARLRIVER 579 +tdc_p210 MACH_TDC_P210 TDC_P210 580 +sg580 MACH_SG580 SG580 581 +wrsbcarm7 MACH_WRSBCARM7 WRSBCARM7 582 +ipd MACH_IPD IPD 583 +pxa_dnp2110 MACH_PXA_DNP2110 PXA_DNP2110 584 +xaeniax MACH_XAENIAX XAENIAX 585 +somn4250 MACH_SOMN4250 SOMN4250 586 +pleb2 MACH_PLEB2 PLEB2 587 +cornwallis MACH_CORNWALLIS CORNWALLIS 588 +gurney_drv MACH_GURNEY_DRV GURNEY_DRV 589 +chaffee MACH_CHAFFEE CHAFFEE 590 +rms101 MACH_RMS101 RMS101 591 +rx3715 MACH_RX3715 RX3715 592 +swift MACH_SWIFT SWIFT 593 +roverp7 MACH_ROVERP7 ROVERP7 594 +pr818s MACH_PR818S PR818S 595 +trxpro MACH_TRXPRO TRXPRO 596 +nslu2 MACH_NSLU2 NSLU2 597 +e400 MACH_E400 E400 598 +trab MACH_TRAB TRAB 599 +cmc_pu2 MACH_CMC_PU2 CMC_PU2 600 +fulcrum MACH_FULCRUM FULCRUM 601 +netgate42x MACH_NETGATE42X NETGATE42X 602 +str710 MACH_STR710 STR710 603 +ixdpg425 MACH_IXDPG425 IXDPG425 604 +tomtomgo MACH_TOMTOMGO TOMTOMGO 605 +versatile_ab MACH_VERSATILE_AB VERSATILE_AB 606 +edb9307 MACH_EDB9307 EDB9307 607 +sg565 MACH_SG565 SG565 608 +lpd79524 MACH_LPD79524 LPD79524 609 +lpd79525 MACH_LPD79525 LPD79525 610 +rms100 MACH_RMS100 RMS100 611 +kb9200 MACH_KB9200 KB9200 612 +sx1 MACH_SX1 SX1 613 +hms39c7092 MACH_HMS39C7092 HMS39C7092 614 +armadillo MACH_ARMADILLO ARMADILLO 615 +ipcu MACH_IPCU IPCU 616 +loox720 MACH_LOOX720 LOOX720 617 +ixdp465 MACH_IXDP465 IXDP465 618 +ixdp2351 MACH_IXDP2351 IXDP2351 619 +adsvix MACH_ADSVIX ADSVIX 620 +dm270 MACH_DM270 DM270 621 +socltplus MACH_SOCLTPLUS SOCLTPLUS 622 +ecia MACH_ECIA ECIA 623 +cm4008 MACH_CM4008 CM4008 624 +p2001 MACH_P2001 P2001 625 +twister MACH_TWISTER TWISTER 626 +mudshark MACH_MUDSHARK MUDSHARK 627 +hb2 MACH_HB2 HB2 628 +iq80332 MACH_IQ80332 IQ80332 629 +sendt MACH_SENDT SENDT 630 +mx2jazz MACH_MX2JAZZ MX2JAZZ 631 +multiio MACH_MULTIIO MULTIIO 632 +hrdisplay MACH_HRDISPLAY HRDISPLAY 633 +mxc27530ads MACH_MXC27530ADS MXC27530ADS 634 +trizeps3 MACH_TRIZEPS3 TRIZEPS3 635 +zefeerdza MACH_ZEFEERDZA ZEFEERDZA 636 +zefeerdzb MACH_ZEFEERDZB ZEFEERDZB 637 +zefeerdzg MACH_ZEFEERDZG ZEFEERDZG 638 +zefeerdzn MACH_ZEFEERDZN ZEFEERDZN 639 +zefeerdzq MACH_ZEFEERDZQ ZEFEERDZQ 640 +gtwx5715 MACH_GTWX5715 GTWX5715 641 +astro_jack MACH_ASTRO_JACK ASTRO_JACK 643 +tip03 MACH_TIP03 TIP03 644 +a9200ec MACH_A9200EC A9200EC 645 +pnx0105 MACH_PNX0105 PNX0105 646 +adcpoecpu MACH_ADCPOECPU ADCPOECPU 647 +csb637 MACH_CSB637 CSB637 648 +mb9200 MACH_MB9200 MB9200 650 +kulun MACH_KULUN KULUN 651 +snapper MACH_SNAPPER SNAPPER 652 +optima MACH_OPTIMA OPTIMA 653 +dlhsbc MACH_DLHSBC DLHSBC 654 +x30 MACH_X30 X30 655 +n30 MACH_N30 N30 656 +manga_ks8695 MACH_MANGA_KS8695 MANGA_KS8695 657 +ajax MACH_AJAX AJAX 658 +nec_mp900 MACH_NEC_MP900 NEC_MP900 659 +vvtk1000 MACH_VVTK1000 VVTK1000 661 +kafa MACH_KAFA KAFA 662 +vvtk3000 MACH_VVTK3000 VVTK3000 663 +pimx1 MACH_PIMX1 PIMX1 664 +ollie MACH_OLLIE OLLIE 665 +skymax MACH_SKYMAX SKYMAX 666 +jazz MACH_JAZZ JAZZ 667 +tel_t3 MACH_TEL_T3 TEL_T3 668 +aisino_fcr255 MACH_AISINO_FCR255 AISINO_FCR255 669 +btweb MACH_BTWEB BTWEB 670 +dbg_lh79520 MACH_DBG_LH79520 DBG_LH79520 671 +cm41xx MACH_CM41XX CM41XX 672 +ts72xx MACH_TS72XX TS72XX 673 +nggpxa MACH_NGGPXA NGGPXA 674 +csb535 MACH_CSB535 CSB535 675 +csb536 MACH_CSB536 CSB536 676 +pxa_trakpod MACH_PXA_TRAKPOD PXA_TRAKPOD 677 +praxis MACH_PRAXIS PRAXIS 678 +lh75411 MACH_LH75411 LH75411 679 +otom MACH_OTOM OTOM 680 +nexcoder_2440 MACH_NEXCODER_2440 NEXCODER_2440 681 +loox410 MACH_LOOX410 LOOX410 682 +westlake MACH_WESTLAKE WESTLAKE 683 +nsb MACH_NSB NSB 684 +esl_sarva_stn MACH_ESL_SARVA_STN ESL_SARVA_STN 685 +esl_sarva_tft MACH_ESL_SARVA_TFT ESL_SARVA_TFT 686 +esl_sarva_iad MACH_ESL_SARVA_IAD ESL_SARVA_IAD 687 +esl_sarva_acc MACH_ESL_SARVA_ACC ESL_SARVA_ACC 688 +typhoon MACH_TYPHOON TYPHOON 689 +cnav MACH_CNAV CNAV 690 +a730 MACH_A730 A730 691 +netstar MACH_NETSTAR NETSTAR 692 +supercon MACH_PHASEFALE_SUPERCON PHASEFALE_SUPERCON 693 +shiva1100 MACH_SHIVA1100 SHIVA1100 694 +etexsc MACH_ETEXSC ETEXSC 695 +ixdpg465 MACH_IXDPG465 IXDPG465 696 +a9m2410 MACH_A9M2410 A9M2410 697 +a9m2440 MACH_A9M2440 A9M2440 698 +a9m9750 MACH_A9M9750 A9M9750 699 +a9m9360 MACH_A9M9360 A9M9360 700 +unc90 MACH_UNC90 UNC90 701 +eco920 MACH_ECO920 ECO920 702 +satview MACH_SATVIEW SATVIEW 703 +roadrunner MACH_ROADRUNNER ROADRUNNER 704 +at91rm9200ek MACH_AT91RM9200EK AT91RM9200EK 705 +gp32 MACH_GP32 GP32 706 +gem MACH_GEM GEM 707 +i858 MACH_I858 I858 708 +hx2750 MACH_HX2750 HX2750 709 +mxc91131evb MACH_MXC91131EVB MXC91131EVB 710 +p700 MACH_P700 P700 711 +cpe MACH_CPE CPE 712 +spitz MACH_SPITZ SPITZ 713 +nimbra340 MACH_NIMBRA340 NIMBRA340 714 +lpc22xx MACH_LPC22XX LPC22XX 715 +omap_comet3 MACH_COMET3 COMET3 716 +omap_comet4 MACH_COMET4 COMET4 717 +csb625 MACH_CSB625 CSB625 718 +fortunet2 MACH_FORTUNET2 FORTUNET2 719 +s5h2200 MACH_S5H2200 S5H2200 720 +optorm920 MACH_OPTORM920 OPTORM920 721 +adsbitsyxb MACH_ADSBITSYXB ADSBITSYXB 722 +adssphere MACH_ADSSPHERE ADSSPHERE 723 +adsportal MACH_ADSPORTAL ADSPORTAL 724 +ln2410sbc MACH_LN2410SBC LN2410SBC 725 +cb3rufc MACH_CB3RUFC CB3RUFC 726 +mp2usb MACH_MP2USB MP2USB 727 +ntnp425c MACH_NTNP425C NTNP425C 728 +colibri MACH_COLIBRI COLIBRI 729 +pcm7220 MACH_PCM7220 PCM7220 730 +gateway7001 MACH_GATEWAY7001 GATEWAY7001 731 +pcm027 MACH_PCM027 PCM027 732 +cmpxa MACH_CMPXA CMPXA 733 +anubis MACH_ANUBIS ANUBIS 734 +ite8152 MACH_ITE8152 ITE8152 735 +lpc3xxx MACH_LPC3XXX LPC3XXX 736 +puppeteer MACH_PUPPETEER PUPPETEER 737 +e570 MACH_E570 E570 739 +x50 MACH_X50 X50 740 +recon MACH_RECON RECON 741 +xboardgp8 MACH_XBOARDGP8 XBOARDGP8 742 +fpic2 MACH_FPIC2 FPIC2 743 +akita MACH_AKITA AKITA 744 +a81 MACH_A81 A81 745 +svm_sc25x MACH_SVM_SC25X SVM_SC25X 746 +vt020 MACH_VADATECH020 VADATECH020 747 +tli MACH_TLI TLI 748 +edb9315lc MACH_EDB9315LC EDB9315LC 749 +passec MACH_PASSEC PASSEC 750 +ds_tiger MACH_DS_TIGER DS_TIGER 751 +e310 MACH_E310 E310 752 +e330 MACH_E330 E330 753 +rt3000 MACH_RT3000 RT3000 754 +nokia770 MACH_NOKIA770 NOKIA770 755 +pnx0106 MACH_PNX0106 PNX0106 756 +hx21xx MACH_HX21XX HX21XX 757 +faraday MACH_FARADAY FARADAY 758 +sbc9312 MACH_SBC9312 SBC9312 759 +batman MACH_BATMAN BATMAN 760 +jpd201 MACH_JPD201 JPD201 761 +mipsa MACH_MIPSA MIPSA 762 +kacom MACH_KACOM KACOM 763 +swarcocpu MACH_SWARCOCPU SWARCOCPU 764 +swarcodsl MACH_SWARCODSL SWARCODSL 765 +blueangel MACH_BLUEANGEL BLUEANGEL 766 +hairygrama MACH_HAIRYGRAMA HAIRYGRAMA 767 +banff MACH_BANFF BANFF 768 +carmeva MACH_CARMEVA CARMEVA 769 +sam255 MACH_SAM255 SAM255 770 +ppm10 MACH_PPM10 PPM10 771 +edb9315a MACH_EDB9315A EDB9315A 772 +sunset MACH_SUNSET SUNSET 773 +stargate2 MACH_STARGATE2 STARGATE2 774 +intelmote2 MACH_INTELMOTE2 INTELMOTE2 775 +trizeps4 MACH_TRIZEPS4 TRIZEPS4 776 +mainstone2 MACH_MAINSTONE2 MAINSTONE2 777 +ez_ixp42x MACH_EZ_IXP42X EZ_IXP42X 778 +tapwave_zodiac MACH_TAPWAVE_ZODIAC TAPWAVE_ZODIAC 779 +universalmeter MACH_UNIVERSALMETER UNIVERSALMETER 780 +hicoarm9 MACH_HICOARM9 HICOARM9 781 +pnx4008 MACH_PNX4008 PNX4008 782 +kws6000 MACH_KWS6000 KWS6000 783 +portux920t MACH_PORTUX920T PORTUX920T 784 +ez_x5 MACH_EZ_X5 EZ_X5 785 +omap_rudolph MACH_OMAP_RUDOLPH OMAP_RUDOLPH 786 +cpuat91 MACH_CPUAT91 CPUAT91 787 +rea9200 MACH_REA9200 REA9200 788 +acts_pune_sa1110 MACH_ACTS_PUNE_SA1110 ACTS_PUNE_SA1110 789 +ixp425 MACH_IXP425 IXP425 790 +i30030ads MACH_I30030ADS I30030ADS 791 +perch MACH_PERCH PERCH 792 +eis05r1 MACH_EIS05R1 EIS05R1 793 +pepperpad MACH_PEPPERPAD PEPPERPAD 794 +sb3010 MACH_SB3010 SB3010 795 +rm9200 MACH_RM9200 RM9200 796 +dma03 MACH_DMA03 DMA03 797 +road_s101 MACH_ROAD_S101 ROAD_S101 798 +iq81340sc MACH_IQ81340SC IQ81340SC 799 +iq_nextgen_b MACH_IQ_NEXTGEN_B IQ_NEXTGEN_B 800 +iq81340mc MACH_IQ81340MC IQ81340MC 801 +iq_nextgen_d MACH_IQ_NEXTGEN_D IQ_NEXTGEN_D 802 +iq_nextgen_e MACH_IQ_NEXTGEN_E IQ_NEXTGEN_E 803 +mallow_at91 MACH_MALLOW_AT91 MALLOW_AT91 804 +cybertracker_i MACH_CYBERTRACKER_I CYBERTRACKER_I 805 +gesbc931x MACH_GESBC931X GESBC931X 806 +centipad MACH_CENTIPAD CENTIPAD 807 +armsoc MACH_ARMSOC ARMSOC 808 +se4200 MACH_SE4200 SE4200 809 +ems197a MACH_EMS197A EMS197A 810 +micro9 MACH_MICRO9 MICRO9 811 +micro9l MACH_MICRO9L MICRO9L 812 +uc5471dsp MACH_UC5471DSP UC5471DSP 813 +sj5471eng MACH_SJ5471ENG SJ5471ENG 814 +none MACH_CMPXA26X CMPXA26X 815 +nc1 MACH_NC NC 816 +omap_palmte MACH_OMAP_PALMTE OMAP_PALMTE 817 +ajax52x MACH_AJAX52X AJAX52X 818 +siriustar MACH_SIRIUSTAR SIRIUSTAR 819 +iodata_hdlg MACH_IODATA_HDLG IODATA_HDLG 820 +at91rm9200utl MACH_AT91RM9200UTL AT91RM9200UTL 821 +biosafe MACH_BIOSAFE BIOSAFE 822 +mp1000 MACH_MP1000 MP1000 823 +parsy MACH_PARSY PARSY 824 +ccxp270 MACH_CCXP CCXP 825 +omap_gsample MACH_OMAP_GSAMPLE OMAP_GSAMPLE 826 +realview_eb MACH_REALVIEW_EB REALVIEW_EB 827 +samoa MACH_SAMOA SAMOA 828 +palmt3 MACH_PALMT3 PALMT3 829 +i878 MACH_I878 I878 830 +borzoi MACH_BORZOI BORZOI 831 +gecko MACH_GECKO GECKO 832 +ds101 MACH_DS101 DS101 833 +omap_palmtt2 MACH_OMAP_PALMTT2 OMAP_PALMTT2 834 +palmld MACH_PALMLD PALMLD 835 +cc9c MACH_CC9C CC9C 836 +sbc1670 MACH_SBC1670 SBC1670 837 +ixdp28x5 MACH_IXDP28X5 IXDP28X5 838 +omap_palmtt MACH_OMAP_PALMTT OMAP_PALMTT 839 +ml696k MACH_ML696K ML696K 840 +arcom_zeus MACH_ARCOM_ZEUS ARCOM_ZEUS 841 +osiris MACH_OSIRIS OSIRIS 842 +maestro MACH_MAESTRO MAESTRO 843 +palmte2 MACH_PALMTE2 PALMTE2 844 +ixbbm MACH_IXBBM IXBBM 845 +mx27ads MACH_MX27ADS MX27ADS 846 +ax8004 MACH_AX8004 AX8004 847 +at91sam9261ek MACH_AT91SAM9261EK AT91SAM9261EK 848 +loft MACH_LOFT LOFT 849 +magpie MACH_MAGPIE MAGPIE 850 +mx21ads MACH_MX21ADS MX21ADS 851 +mb87m3400 MACH_MB87M3400 MB87M3400 852 +mguard_delta MACH_MGUARD_DELTA MGUARD_DELTA 853 +davinci_dvdp MACH_DAVINCI_DVDP DAVINCI_DVDP 854 +htcuniversal MACH_HTCUNIVERSAL HTCUNIVERSAL 855 +tpad MACH_TPAD TPAD 856 +roverp3 MACH_ROVERP3 ROVERP3 857 +jornada928 MACH_JORNADA928 JORNADA928 858 +mv88fxx81 MACH_MV88FXX81 MV88FXX81 859 +stmp36xx MACH_STMP36XX STMP36XX 860 +sxni79524 MACH_SXNI79524 SXNI79524 861 +ams_delta MACH_AMS_DELTA AMS_DELTA 862 +uranium MACH_URANIUM URANIUM 863 +ucon MACH_UCON UCON 864 +nas100d MACH_NAS100D NAS100D 865 +l083 MACH_L083_1000 L083_1000 866 +ezx MACH_EZX EZX 867 +pnx5220 MACH_PNX5220 PNX5220 868 +butte MACH_BUTTE BUTTE 869 +srm2 MACH_SRM2 SRM2 870 +dsbr MACH_DSBR DSBR 871 +crystalball MACH_CRYSTALBALL CRYSTALBALL 872 +tinypxa27x MACH_TINYPXA27X TINYPXA27X 873 +herbie MACH_HERBIE HERBIE 874 +magician MACH_MAGICIAN MAGICIAN 875 +cm4002 MACH_CM4002 CM4002 876 +b4 MACH_B4 B4 877 +maui MACH_MAUI MAUI 878 +cybertracker_g MACH_CYBERTRACKER_G CYBERTRACKER_G 879 +nxdkn MACH_NXDKN NXDKN 880 +mio8390 MACH_MIO8390 MIO8390 881 +omi_board MACH_OMI_BOARD OMI_BOARD 882 +mx21civ MACH_MX21CIV MX21CIV 883 +mahi_cdac MACH_MAHI_CDAC MAHI_CDAC 884 +palmtx MACH_PALMTX PALMTX 885 +s3c2413 MACH_S3C2413 S3C2413 887 +samsys_ep0 MACH_SAMSYS_EP0 SAMSYS_EP0 888 +wg302v1 MACH_WG302V1 WG302V1 889 +wg302v2 MACH_WG302V2 WG302V2 890 +eb42x MACH_EB42X EB42X 891 +iq331es MACH_IQ331ES IQ331ES 892 +cosydsp MACH_COSYDSP COSYDSP 893 +uplat7d_proto MACH_UPLAT7D UPLAT7D 894 +ptdavinci MACH_PTDAVINCI PTDAVINCI 895 +mbus MACH_MBUS MBUS 896 +nadia2vb MACH_NADIA2VB NADIA2VB 897 +r1000 MACH_R1000 R1000 898 +hw90250 MACH_HW90250 HW90250 899 +omap_2430sdp MACH_OMAP_2430SDP OMAP_2430SDP 900 +davinci_evm MACH_DAVINCI_EVM DAVINCI_EVM 901 +omap_tornado MACH_OMAP_TORNADO OMAP_TORNADO 902 +olocreek MACH_OLOCREEK OLOCREEK 903 +palmz72 MACH_PALMZ72 PALMZ72 904 +nxdb500 MACH_NXDB500 NXDB500 905 +apf9328 MACH_APF9328 APF9328 906 +omap_wipoq MACH_OMAP_WIPOQ OMAP_WIPOQ 907 +omap_twip MACH_OMAP_TWIP OMAP_TWIP 908 +treo650 MACH_TREO650 TREO650 909 +acumen MACH_ACUMEN ACUMEN 910 +xp100 MACH_XP100 XP100 911 +fs2410 MACH_FS2410 FS2410 912 +pxa270_cerf MACH_PXA270_CERF PXA270_CERF 913 +sq2ftlpalm MACH_SQ2FTLPALM SQ2FTLPALM 914 +bsemserver MACH_BSEMSERVER BSEMSERVER 915 +netclient MACH_NETCLIENT NETCLIENT 916 +palmt5 MACH_PALMT5 PALMT5 917 +palmtc MACH_PALMTC PALMTC 918 +omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919 +mxc30030evb MACH_MXC30030EVB MXC30030EVB 920 +rea_cpu2 MACH_REA_2D REA_2D 921 +eti3e524 MACH_TI3E524 TI3E524 922 +ateb9200 MACH_ATEB9200 ATEB9200 923 +auckland MACH_AUCKLAND AUCKLAND 924 +ak3220m MACH_AK3320M AK3320M 925 +duramax MACH_DURAMAX DURAMAX 926 +n35 MACH_N35 N35 927 +pronghorn MACH_PRONGHORN PRONGHORN 928 +fundy MACH_FUNDY FUNDY 929 +logicpd_pxa270 MACH_LOGICPD_PXA270 LOGICPD_PXA270 930 +cpu777 MACH_CPU777 CPU777 931 +simicon9201 MACH_SIMICON9201 SIMICON9201 932 +leap2_hpm MACH_LEAP2_HPM LEAP2_HPM 933 +cm922txa10 MACH_CM922TXA10 CM922TXA10 934 +sandgate MACH_PXA PXA 935 +sandgate2 MACH_SANDGATE2 SANDGATE2 936 +sandgate2g MACH_SANDGATE2G SANDGATE2G 937 +sandgate2p MACH_SANDGATE2P SANDGATE2P 938 +fred_jack MACH_FRED_JACK FRED_JACK 939 +ttg_color1 MACH_TTG_COLOR1 TTG_COLOR1 940 +nxeb500hmi MACH_NXEB500HMI NXEB500HMI 941 +netdcu8 MACH_NETDCU8 NETDCU8 942 +ng_fvx538 MACH_NG_FVX538 NG_FVX538 944 +ng_fvs338 MACH_NG_FVS338 NG_FVS338 945 +pnx4103 MACH_PNX4103 PNX4103 946 +hesdb MACH_HESDB HESDB 947 +xsilo MACH_XSILO XSILO 948 +espresso MACH_ESPRESSO ESPRESSO 949 +emlc MACH_EMLC EMLC 950 +sisteron MACH_SISTERON SISTERON 951 +rx1950 MACH_RX1950 RX1950 952 +tsc_venus MACH_TSC_VENUS TSC_VENUS 953 +ds101j MACH_DS101J DS101J 954 +mxc30030ads MACH_MXC30030ADS MXC30030ADS 955 +fujitsu_wimaxsoc MACH_FUJITSU_WIMAXSOC FUJITSU_WIMAXSOC 956 +dualpcmodem MACH_DUALPCMODEM DUALPCMODEM 957 +gesbc9312 MACH_GESBC9312 GESBC9312 958 +htcapache MACH_HTCAPACHE HTCAPACHE 959 +ixdp435 MACH_IXDP435 IXDP435 960 +catprovt100 MACH_CATPROVT100 CATPROVT100 961 +picotux1xx MACH_PICOTUX1XX PICOTUX1XX 962 +picotux2xx MACH_PICOTUX2XX PICOTUX2XX 963 +dsmg600 MACH_DSMG600 DSMG600 964 +empc2 MACH_EMPC2 EMPC2 965 +ventura MACH_VENTURA VENTURA 966 +phidget_sbc MACH_PHIDGET_SBC PHIDGET_SBC 967 +ij3k MACH_IJ3K IJ3K 968 +pisgah MACH_PISGAH PISGAH 969 +omap_fsample MACH_OMAP_FSAMPLE OMAP_FSAMPLE 970 +sg720 MACH_SG720 SG720 971 +redfox MACH_REDFOX REDFOX 972 +mysh_ep9315_1 MACH_MYSH_EP9315_1 MYSH_EP9315_1 973 +tpf106 MACH_TPF106 TPF106 974 +at91rm9200kg MACH_AT91RM9200KG AT91RM9200KG 975 +rcmt2 MACH_SLEDB SLEDB 976 +ontrack MACH_ONTRACK ONTRACK 977 +pm1200 MACH_PM1200 PM1200 978 +ess24562 MACH_ESS24XXX ESS24XXX 979 +coremp7 MACH_COREMP7 COREMP7 980 +nexcoder_6446 MACH_NEXCODER_6446 NEXCODER_6446 981 +stvc8380 MACH_STVC8380 STVC8380 982 +teklynx MACH_TEKLYNX TEKLYNX 983 +carbonado MACH_CARBONADO CARBONADO 984 +sysmos_mp730 MACH_SYSMOS_MP730 SYSMOS_MP730 985 +snapper_cl15 MACH_SNAPPER_CL15 SNAPPER_CL15 986 +pgigim MACH_PGIGIM PGIGIM 987 +ptx9160p2 MACH_PTX9160P2 PTX9160P2 988 +dcore1 MACH_DCORE1 DCORE1 989 +victorpxa MACH_VICTORPXA VICTORPXA 990 +mx2dtb MACH_MX2DTB MX2DTB 991 +pxa_irex_er0100 MACH_PXA_IREX_ER0100 PXA_IREX_ER0100 992 +omap_palmz71 MACH_OMAP_PALMZ71 OMAP_PALMZ71 993 +bartec_deg MACH_BARTEC_DEG BARTEC_DEG 994 +hw50251 MACH_HW50251 HW50251 995 +ibox MACH_IBOX IBOX 996 +atlaslh7a404 MACH_ATLASLH7A404 ATLASLH7A404 997 +pt2026 MACH_PT2026 PT2026 998 +htcalpine MACH_HTCALPINE HTCALPINE 999 +bartec_vtu MACH_BARTEC_VTU BARTEC_VTU 1000 +vcoreii MACH_VCOREII VCOREII 1001 +pdnb3 MACH_PDNB3 PDNB3 1002 +htcbeetles MACH_HTCBEETLES HTCBEETLES 1003 +s3c6400 MACH_S3C6400 S3C6400 1004 +s3c2443 MACH_S3C2443 S3C2443 1005 +omap_ldk MACH_OMAP_LDK OMAP_LDK 1006 +smdk2460 MACH_SMDK2460 SMDK2460 1007 +smdk2440 MACH_SMDK2440 SMDK2440 1008 +smdk2412 MACH_SMDK2412 SMDK2412 1009 +webbox MACH_WEBBOX WEBBOX 1010 +cwwndp MACH_CWWNDP CWWNDP 1011 +i839 MACH_DRAGON DRAGON 1012 +opendo_cpu_board MACH_OPENDO_CPU_BOARD OPENDO_CPU_BOARD 1013 +ccm2200 MACH_CCM2200 CCM2200 1014 +etwarm MACH_ETWARM ETWARM 1015 +m93030 MACH_M93030 M93030 1016 +cc7u MACH_CC7U CC7U 1017 +mtt_ranger MACH_MTT_RANGER MTT_RANGER 1018 +nexus MACH_NEXUS NEXUS 1019 +desman MACH_DESMAN DESMAN 1020 +bkde303 MACH_BKDE303 BKDE303 1021 +smdk2413 MACH_SMDK2413 SMDK2413 1022 +aml_m7200 MACH_AML_M7200 AML_M7200 1023 +aml_m5900 MACH_AML_M5900 AML_M5900 1024 +sg640 MACH_SG640 SG640 1025 +edg79524 MACH_EDG79524 EDG79524 1026 +ai2410 MACH_AI2410 AI2410 1027 +ixp465 MACH_IXP465 IXP465 1028 +balloon3 MACH_BALLOON3 BALLOON3 1029 +heins MACH_HEINS HEINS 1030 +mpluseva MACH_MPLUSEVA MPLUSEVA 1031 +rt042 MACH_RT042 RT042 1032 +cwiem MACH_CWIEM CWIEM 1033 +cm_x270 MACH_CM_X270 CM_X270 1034 +cm_x255 MACH_CM_X255 CM_X255 1035 +esh_at91 MACH_ESH_AT91 ESH_AT91 1036 +sandgate3 MACH_SANDGATE3 SANDGATE3 1037 +primo MACH_PRIMO PRIMO 1038 +gemstone MACH_GEMSTONE GEMSTONE 1039 +pronghorn_metro MACH_PRONGHORNMETRO PRONGHORNMETRO 1040 +sidewinder MACH_SIDEWINDER SIDEWINDER 1041 +picomod1 MACH_PICOMOD1 PICOMOD1 1042 +sg590 MACH_SG590 SG590 1043 +akai9307 MACH_AKAI9307 AKAI9307 1044 +fontaine MACH_FONTAINE FONTAINE 1045 +wombat MACH_WOMBAT WOMBAT 1046 +acq300 MACH_ACQ300 ACQ300 1047 +mod272 MACH_MOD_270 MOD_270 1048 +vmc_vc0820 MACH_VC0820 VC0820 1049 +ani_aim MACH_ANI_AIM ANI_AIM 1050 +jellyfish MACH_JELLYFISH JELLYFISH 1051 +amanita MACH_AMANITA AMANITA 1052 +vlink MACH_VLINK VLINK 1053 +dexflex MACH_DEXFLEX DEXFLEX 1054 +eigen_ttq MACH_EIGEN_TTQ EIGEN_TTQ 1055 +arcom_titan MACH_ARCOM_TITAN ARCOM_TITAN 1056 +tabla MACH_TABLA TABLA 1057 +mdirac3 MACH_MDIRAC3 MDIRAC3 1058 +mrhfbp2 MACH_MRHFBP2 MRHFBP2 1059 +at91rm9200rb MACH_AT91RM9200RB AT91RM9200RB 1060 +ani_apm MACH_ANI_APM ANI_APM 1061 +ella1 MACH_ELLA1 ELLA1 1062 +inhand_pxa27x MACH_INHAND_PXA27X INHAND_PXA27X 1063 +inhand_pxa25x MACH_INHAND_PXA25X INHAND_PXA25X 1064 +empos_xm MACH_EMPOS_XM EMPOS_XM 1065 +empos MACH_EMPOS EMPOS 1066 +empos_tiny MACH_EMPOS_TINY EMPOS_TINY 1067 +empos_sm MACH_EMPOS_SM EMPOS_SM 1068 +egret MACH_EGRET EGRET 1069 +ostrich MACH_OSTRICH OSTRICH 1070 +n50 MACH_N50 N50 1071 +ecbat91 MACH_ECBAT91 ECBAT91 1072 +stareast MACH_STAREAST STAREAST 1073 +dspg_dw MACH_DSPG_DW DSPG_DW 1074 +onearm MACH_ONEARM ONEARM 1075 +mrg110_6 MACH_MRG110_6 MRG110_6 1076 +wrt300nv2 MACH_WRT300NV2 WRT300NV2 1077 +xm_bulverde MACH_XM_BULVERDE XM_BULVERDE 1078 +msm6100 MACH_MSM6100 MSM6100 1079 +eti_b1 MACH_ETI_B1 ETI_B1 1080 +za9l_series MACH_ZILOG_ZA9L ZILOG_ZA9L 1081 +bit2440 MACH_BIT2440 BIT2440 1082 +nbi MACH_NBI NBI 1083 +smdk2443 MACH_SMDK2443 SMDK2443 1084 +vdavinci MACH_VDAVINCI VDAVINCI 1085 +atc6 MACH_ATC6 ATC6 1086 +multmdw MACH_MULTMDW MULTMDW 1087 +mba2440 MACH_MBA2440 MBA2440 1088 +ecsd MACH_ECSD ECSD 1089 +palmz31 MACH_PALMZ31 PALMZ31 1090 +fsg MACH_FSG FSG 1091 +razor101 MACH_RAZOR101 RAZOR101 1092 +opera_tdm MACH_OPERA_TDM OPERA_TDM 1093 +comcerto MACH_COMCERTO COMCERTO 1094 +tb0319 MACH_TB0319 TB0319 1095 +kws8000 MACH_KWS8000 KWS8000 1096 +b2 MACH_B2 B2 1097 +lcl54 MACH_LCL54 LCL54 1098 +at91sam9260ek MACH_AT91SAM9260EK AT91SAM9260EK 1099 +glantank MACH_GLANTANK GLANTANK 1100 +n2100 MACH_N2100 N2100 1101 +n4100 MACH_N4100 N4100 1102 +rsc4 MACH_VERTICAL_RSC4 VERTICAL_RSC4 1103 +sg8100 MACH_SG8100 SG8100 1104 +im42xx MACH_IM42XX IM42XX 1105 +ftxx MACH_FTXX FTXX 1106 +lwfusion MACH_LWFUSION LWFUSION 1107 +qt2410 MACH_QT2410 QT2410 1108 +kixrp435 MACH_KIXRP435 KIXRP435 1109 +ccw9c MACH_CCW9C CCW9C 1110 +dabhs MACH_DABHS DABHS 1111 +gzmx MACH_GZMX GZMX 1112 +ipnw100ap MACH_IPNW100AP IPNW100AP 1113 +cc9p9360dev MACH_CC9P9360DEV CC9P9360DEV 1114 +cc9p9750dev MACH_CC9P9750DEV CC9P9750DEV 1115 +cc9p9360val MACH_CC9P9360VAL CC9P9360VAL 1116 +cc9p9750val MACH_CC9P9750VAL CC9P9750VAL 1117 +nx70v MACH_NX70V NX70V 1118 +at91rm9200df MACH_AT91RM9200DF AT91RM9200DF 1119 +se_pilot2 MACH_SE_PILOT2 SE_PILOT2 1120 +mtcn_t800 MACH_MTCN_T800 MTCN_T800 1121 +vcmx212 MACH_VCMX212 VCMX212 1122 +lynx MACH_LYNX LYNX 1123 +at91sam9260id MACH_AT91SAM9260ID AT91SAM9260ID 1124 +hw86052 MACH_HW86052 HW86052 1125 +pilz_pmi3 MACH_PILZ_PMI3 PILZ_PMI3 1126 +edb9302a MACH_EDB9302A EDB9302A 1127 +edb9307a MACH_EDB9307A EDB9307A 1128 +ct_dfs MACH_CT_DFS CT_DFS 1129 +pilz_pmi4 MACH_PILZ_PMI4 PILZ_PMI4 1130 +xceednp_ixp MACH_XCEEDNP_IXP XCEEDNP_IXP 1131 +smdk2442b MACH_SMDK2442B SMDK2442B 1132 +xnode MACH_XNODE XNODE 1133 +aidx270 MACH_AIDX270 AIDX270 1134 +rema MACH_REMA REMA 1135 +bps1000 MACH_BPS1000 BPS1000 1136 +hw90350 MACH_HW90350 HW90350 1137 +omap_3430sdp MACH_OMAP_3430SDP OMAP_3430SDP 1138 +bluetouch MACH_BLUETOUCH BLUETOUCH 1139 +vstms MACH_VSTMS VSTMS 1140 +xsbase270 MACH_XSBASE270 XSBASE270 1141 +at91sam9260ek_cn MACH_AT91SAM9260EK_CN AT91SAM9260EK_CN 1142 +adsturboxb MACH_ADSTURBOXB ADSTURBOXB 1143 +oti4110 MACH_OTI4110 OTI4110 1144 +hme_pxa MACH_HME_PXA HME_PXA 1145 +deisterdca MACH_DEISTERDCA DEISTERDCA 1146 +ces_ssem2 MACH_CES_SSEM2 CES_SSEM2 1147 +ces_mtr MACH_CES_MTR CES_MTR 1148 +tds_avng_sbc MACH_TDS_AVNG_SBC TDS_AVNG_SBC 1149 +everest MACH_EVEREST EVEREST 1150 +pnx4010 MACH_PNX4010 PNX4010 1151 +oxnas MACH_OXNAS OXNAS 1152 +fiori MACH_FIORI FIORI 1153 +ml1200 MACH_ML1200 ML1200 1154 +pecos MACH_PECOS PECOS 1155 +nb2xxx MACH_NB2XXX NB2XXX 1156 +hw6900 MACH_HW6900 HW6900 1157 +cdcs_quoll MACH_CDCS_QUOLL CDCS_QUOLL 1158 +quicksilver MACH_QUICKSILVER QUICKSILVER 1159 +uplat926 MACH_UPLAT926 UPLAT926 1160 +dep2410_dep2410 MACH_DEP2410_THOMAS DEP2410_THOMAS 1161 +dtk2410 MACH_DTK2410 DTK2410 1162 +chili MACH_CHILI CHILI 1163 +demeter MACH_DEMETER DEMETER 1164 +dionysus MACH_DIONYSUS DIONYSUS 1165 +as352x MACH_AS352X AS352X 1166 +service MACH_SERVICE SERVICE 1167 +cs_e9301 MACH_CS_E9301 CS_E9301 1168 +micro9m MACH_MICRO9M MICRO9M 1169 +ia_mospck MACH_IA_MOSPCK IA_MOSPCK 1170 +ql201b MACH_QL201B QL201B 1171 +bbm MACH_BBM BBM 1174 +exxx MACH_EXXX EXXX 1175 +wma11b MACH_WMA11B WMA11B 1176 +pelco_atlas MACH_PELCO_ATLAS PELCO_ATLAS 1177 +g500 MACH_G500 G500 1178 +bug MACH_BUG BUG 1179 +mx33ads MACH_MX33ADS MX33ADS 1180 +chub MACH_CHUB CHUB 1181 +neo1973_gta01 MACH_NEO1973_GTA01 NEO1973_GTA01 1182 +w90n740 MACH_W90N740 W90N740 1183 +medallion_sa2410 MACH_MEDALLION_SA2410 MEDALLION_SA2410 1184 +ia_cpu_9200_2 MACH_IA_CPU_9200_2 IA_CPU_9200_2 1185 +dimmrm9200 MACH_DIMMRM9200 DIMMRM9200 1186 +pm9261 MACH_PM9261 PM9261 1187 +ml7304 MACH_ML7304 ML7304 1189 +ucp250 MACH_UCP250 UCP250 1190 +intboard MACH_INTBOARD INTBOARD 1191 +gulfstream MACH_GULFSTREAM GULFSTREAM 1192 +labquest MACH_LABQUEST LABQUEST 1193 +vcmx313 MACH_VCMX313 VCMX313 1194 +urg200 MACH_URG200 URG200 1195 +cpux255lcdnet MACH_CPUX255LCDNET CPUX255LCDNET 1196 +netdcu9 MACH_NETDCU9 NETDCU9 1197 +netdcu10 MACH_NETDCU10 NETDCU10 1198 +dspg_dga MACH_DSPG_DGA DSPG_DGA 1199 +dspg_dvw MACH_DSPG_DVW DSPG_DVW 1200 +solos MACH_SOLOS SOLOS 1201 +at91sam9263ek MACH_AT91SAM9263EK AT91SAM9263EK 1202 +osstbox MACH_OSSTBOX OSSTBOX 1203 +kbat9261 MACH_KBAT9261 KBAT9261 1204 +ct1100 MACH_CT1100 CT1100 1205 +akcppxa MACH_AKCPPXA AKCPPXA 1206 +ochaya1020 MACH_OCHAYA1020 OCHAYA1020 1207 +hitrack MACH_HITRACK HITRACK 1208 +syme1 MACH_SYME1 SYME1 1209 +syhl1 MACH_SYHL1 SYHL1 1210 +empca400 MACH_EMPCA400 EMPCA400 1211 +em7210 MACH_EM7210 EM7210 1212 +htchermes MACH_HTCHERMES HTCHERMES 1213 +eti_c1 MACH_ETI_C1 ETI_C1 1214 +ac100 MACH_AC100 AC100 1216 +sneetch MACH_SNEETCH SNEETCH 1217 +studentmate MACH_STUDENTMATE STUDENTMATE 1218 +zir2410 MACH_ZIR2410 ZIR2410 1219 +zir2413 MACH_ZIR2413 ZIR2413 1220 +dlonip3 MACH_DLONIP3 DLONIP3 1221 +instream MACH_INSTREAM INSTREAM 1222 +ambarella MACH_AMBARELLA AMBARELLA 1223 +nevis MACH_NEVIS NEVIS 1224 +htc_trinity MACH_HTC_TRINITY HTC_TRINITY 1225 +ql202b MACH_QL202B QL202B 1226 +vpac270 MACH_VPAC270 VPAC270 1227 +rd129 MACH_RD129 RD129 1228 +htcwizard MACH_HTCWIZARD HTCWIZARD 1229 +treo680 MACH_TREO680 TREO680 1230 +tecon_tmezon MACH_TECON_TMEZON TECON_TMEZON 1231 +zylonite MACH_ZYLONITE ZYLONITE 1233 +gene1270 MACH_GENE1270 GENE1270 1234 +zir2412 MACH_ZIR2412 ZIR2412 1235 +mx31lite MACH_MX31LITE MX31LITE 1236 +t700wx MACH_T700WX T700WX 1237 +vf100 MACH_VF100 VF100 1238 +nsb2 MACH_NSB2 NSB2 1239 +nxhmi_bb MACH_NXHMI_BB NXHMI_BB 1240 +nxhmi_re MACH_NXHMI_RE NXHMI_RE 1241 +n4100pro MACH_N4100PRO N4100PRO 1242 +sam9260 MACH_SAM9260 SAM9260 1243 +omap_treo600 MACH_OMAP_TREO600 OMAP_TREO600 1244 +indy2410 MACH_INDY2410 INDY2410 1245 +nelt_a MACH_NELT_A NELT_A 1246 +n311 MACH_N311 N311 1248 +at91sam9260vgk MACH_AT91SAM9260VGK AT91SAM9260VGK 1249 +at91leppe MACH_AT91LEPPE AT91LEPPE 1250 +at91lepccn MACH_AT91LEPCCN AT91LEPCCN 1251 +apc7100 MACH_APC7100 APC7100 1252 +stargazer MACH_STARGAZER STARGAZER 1253 +sonata MACH_SONATA SONATA 1254 +schmoogie MACH_SCHMOOGIE SCHMOOGIE 1255 +aztool MACH_AZTOOL AZTOOL 1256 +mioa701 MACH_MIOA701 MIOA701 1257 +sxni9260 MACH_SXNI9260 SXNI9260 1258 +mxc27520evb MACH_MXC27520EVB MXC27520EVB 1259 +armadillo5x0 MACH_ARMADILLO5X0 ARMADILLO5X0 1260 +mb9260 MACH_MB9260 MB9260 1261 +mb9263 MACH_MB9263 MB9263 1262 +ipac9302 MACH_IPAC9302 IPAC9302 1263 +cc9p9360js MACH_CC9P9360JS CC9P9360JS 1264 +gallium MACH_GALLIUM GALLIUM 1265 +msc2410 MACH_MSC2410 MSC2410 1266 +ghi270 MACH_GHI270 GHI270 1267 +davinci_leonardo MACH_DAVINCI_LEONARDO DAVINCI_LEONARDO 1268 +oiab MACH_OIAB OIAB 1269 +smdk6400 MACH_SMDK6400 SMDK6400 1270 +nokia_n800 MACH_NOKIA_N800 NOKIA_N800 1271 +greenphone MACH_GREENPHONE GREENPHONE 1272 +compex42x MACH_COMPEXWP18 COMPEXWP18 1273 +xmate MACH_XMATE XMATE 1274 +energizer MACH_ENERGIZER ENERGIZER 1275 +ime1 MACH_IME1 IME1 1276 +sweda_tms MACH_SWEDATMS SWEDATMS 1277 +ntnp435c MACH_NTNP435C NTNP435C 1278 +spectro2 MACH_SPECTRO2 SPECTRO2 1279 +h6039 MACH_H6039 H6039 1280 +ep80219 MACH_EP80219 EP80219 1281 +samoa_ii MACH_SAMOA_II SAMOA_II 1282 +cwmxl MACH_CWMXL CWMXL 1283 +as9200 MACH_AS9200 AS9200 1284 +sfx1149 MACH_SFX1149 SFX1149 1285 +navi010 MACH_NAVI010 NAVI010 1286 +multmdp MACH_MULTMDP MULTMDP 1287 +scb9520 MACH_SCB9520 SCB9520 1288 +htcathena MACH_HTCATHENA HTCATHENA 1289 +xp179 MACH_XP179 XP179 1290 +h4300 MACH_H4300 H4300 1291 +goramo_mlr MACH_GORAMO_MLR GORAMO_MLR 1292 +mxc30020evb MACH_MXC30020EVB MXC30020EVB 1293 +adsbitsyg5 MACH_ADSBITSYG5 ADSBITSYG5 1294 +adsportalplus MACH_ADSPORTALPLUS ADSPORTALPLUS 1295 +mmsp2plus MACH_MMSP2PLUS MMSP2PLUS 1296 +em_x270 MACH_EM_X270 EM_X270 1297 +tpp302 MACH_TPP302 TPP302 1298 +tpp104 MACH_TPM104 TPM104 1299 +tpm102 MACH_TPM102 TPM102 1300 +tpm109 MACH_TPM109 TPM109 1301 +fbxo1 MACH_FBXO1 FBXO1 1302 +hxd8 MACH_HXD8 HXD8 1303 +neo1973_gta02 MACH_NEO1973_GTA02 NEO1973_GTA02 1304 +emtest MACH_EMTEST EMTEST 1305 +ad6900 MACH_AD6900 AD6900 1306 +europa MACH_EUROPA EUROPA 1307 +metroconnect MACH_METROCONNECT METROCONNECT 1308 +ez_s2410 MACH_EZ_S2410 EZ_S2410 1309 +ez_s2440 MACH_EZ_S2440 EZ_S2440 1310 +ez_ep9312 MACH_EZ_EP9312 EZ_EP9312 1311 +ez_ep9315 MACH_EZ_EP9315 EZ_EP9315 1312 +ez_x7 MACH_EZ_X7 EZ_X7 1313 +godotdb MACH_GODOTDB GODOTDB 1314 +mistral MACH_MISTRAL MISTRAL 1315 +msm MACH_MSM MSM 1316 +ct5910 MACH_CT5910 CT5910 1317 +ct5912 MACH_CT5912 CT5912 1318 +argonst_mp MACH_HYNET_INE HYNET_INE 1319 +hynet_app MACH_HYNET_APP HYNET_APP 1320 +msm7200 MACH_MSM7200 MSM7200 1321 +msm7600 MACH_MSM7600 MSM7600 1322 +ceb255 MACH_CEB255 CEB255 1323 +ciel MACH_CIEL CIEL 1324 +slm5650 MACH_SLM5650 SLM5650 1325 +at91sam9rlek MACH_AT91SAM9RLEK AT91SAM9RLEK 1326 +comtech_router MACH_COMTECH_ROUTER COMTECH_ROUTER 1327 +sbc2410x MACH_SBC2410X SBC2410X 1328 +at4x0bd MACH_AT4X0BD AT4X0BD 1329 +cbifr MACH_CBIFR CBIFR 1330 +arcom_quantum MACH_ARCOM_QUANTUM ARCOM_QUANTUM 1331 +matrix520 MACH_MATRIX520 MATRIX520 1332 +matrix510 MACH_MATRIX510 MATRIX510 1333 +matrix500 MACH_MATRIX500 MATRIX500 1334 +m501 MACH_M501 M501 1335 +aaeon1270 MACH_AAEON1270 AAEON1270 1336 +matrix500ev MACH_MATRIX500EV MATRIX500EV 1337 +pac500 MACH_PAC500 PAC500 1338 +pnx8181 MACH_PNX8181 PNX8181 1339 +colibri320 MACH_COLIBRI320 COLIBRI320 1340 +aztoolbb MACH_AZTOOLBB AZTOOLBB 1341 +aztoolg2 MACH_AZTOOLG2 AZTOOLG2 1342 +dvlhost MACH_DVLHOST DVLHOST 1343 +zir9200 MACH_ZIR9200 ZIR9200 1344 +zir9260 MACH_ZIR9260 ZIR9260 1345 +cocopah MACH_COCOPAH COCOPAH 1346 +nds MACH_NDS NDS 1347 +rosencrantz MACH_ROSENCRANTZ ROSENCRANTZ 1348 +fttx_odsc MACH_FTTX_ODSC FTTX_ODSC 1349 +classe_r6904 MACH_CLASSE_R6904 CLASSE_R6904 1350 +cam60 MACH_CAM60 CAM60 1351 +mxc30031ads MACH_MXC30031ADS MXC30031ADS 1352 +datacall MACH_DATACALL DATACALL 1353 +at91eb01 MACH_AT91EB01 AT91EB01 1354 +rty MACH_RTY RTY 1355 +dwl2100 MACH_DWL2100 DWL2100 1356 +vinsi MACH_VINSI VINSI 1357 +db88f5281 MACH_DB88F5281 DB88F5281 1358 +csb726 MACH_CSB726 CSB726 1359 +tik27 MACH_TIK27 TIK27 1360 +mx_uc7420 MACH_MX_UC7420 MX_UC7420 1361 +rirm3 MACH_RIRM3 RIRM3 1362 +pelco_odyssey MACH_PELCO_ODYSSEY PELCO_ODYSSEY 1363 +adx_abox MACH_ADX_ABOX ADX_ABOX 1365 +adx_tpid MACH_ADX_TPID ADX_TPID 1366 +minicheck MACH_MINICHECK MINICHECK 1367 +idam MACH_IDAM IDAM 1368 +mario_mx MACH_MARIO_MX MARIO_MX 1369 +vi1888 MACH_VI1888 VI1888 1370 +zr4230 MACH_ZR4230 ZR4230 1371 +t1_ix_blue MACH_T1_IX_BLUE T1_IX_BLUE 1372 +syhq2 MACH_SYHQ2 SYHQ2 1373 +computime_r3 MACH_COMPUTIME_R3 COMPUTIME_R3 1374 +oratis MACH_ORATIS ORATIS 1375 +mikko MACH_MIKKO MIKKO 1376 +holon MACH_HOLON HOLON 1377 +olip8 MACH_OLIP8 OLIP8 1378 +ghi270hg MACH_GHI270HG GHI270HG 1379 +davinci_dm6467_evm MACH_DAVINCI_DM6467_EVM DAVINCI_DM6467_EVM 1380 +davinci_dm355_evm MACH_DAVINCI_DM355_EVM DAVINCI_DM355_EVM 1381 +blackriver MACH_BLACKRIVER BLACKRIVER 1383 +sandgate_wp MACH_SANDGATEWP SANDGATEWP 1384 +cdotbwsg MACH_CDOTBWSG CDOTBWSG 1385 +quark963 MACH_QUARK963 QUARK963 1386 +csb735 MACH_CSB735 CSB735 1387 +littleton MACH_LITTLETON LITTLETON 1388 +mio_p550 MACH_MIO_P550 MIO_P550 1389 +motion2440 MACH_MOTION2440 MOTION2440 1390 +imm500 MACH_IMM500 IMM500 1391 +homematic MACH_HOMEMATIC HOMEMATIC 1392 +ermine MACH_ERMINE ERMINE 1393 +kb9202b MACH_KB9202B KB9202B 1394 +hs1xx MACH_HS1XX HS1XX 1395 +studentmate2440 MACH_STUDENTMATE2440 STUDENTMATE2440 1396 +arvoo_l1_z1 MACH_ARVOO_L1_Z1 ARVOO_L1_Z1 1397 +dep2410k MACH_DEP2410K DEP2410K 1398 +xxsvideo MACH_XXSVIDEO XXSVIDEO 1399 +im4004 MACH_IM4004 IM4004 1400 +ochaya1050 MACH_OCHAYA1050 OCHAYA1050 1401 +lep9261 MACH_LEP9261 LEP9261 1402 +svenmeb MACH_SVENMEB SVENMEB 1403 +fortunet2ne MACH_FORTUNET2NE FORTUNET2NE 1404 +nxhx MACH_NXHX NXHX 1406 +realview_pb11mp MACH_REALVIEW_PB11MP REALVIEW_PB11MP 1407 +ids500 MACH_IDS500 IDS500 1408 +ors_n725 MACH_ORS_N725 ORS_N725 1409 +hsdarm MACH_HSDARM HSDARM 1410 +sha_pon003 MACH_SHA_PON003 SHA_PON003 1411 +sha_pon004 MACH_SHA_PON004 SHA_PON004 1412 +sha_pon007 MACH_SHA_PON007 SHA_PON007 1413 +sha_pon011 MACH_SHA_PON011 SHA_PON011 1414 +h6042 MACH_H6042 H6042 1415 +h6043 MACH_H6043 H6043 1416 +looxc550 MACH_LOOXC550 LOOXC550 1417 +cnty_titan MACH_CNTY_TITAN CNTY_TITAN 1418 +app3xx MACH_APP3XX APP3XX 1419 +sideoatsgrama MACH_SIDEOATSGRAMA SIDEOATSGRAMA 1420 +treo700p MACH_TREO700P TREO700P 1421 +treo700w MACH_TREO700W TREO700W 1422 +treo750 MACH_TREO750 TREO750 1423 +treo755p MACH_TREO755P TREO755P 1424 +ezreganut9200 MACH_EZREGANUT9200 EZREGANUT9200 1425 +sarge MACH_SARGE SARGE 1426 +a696 MACH_A696 A696 1427 +turtle1916 MACH_TURTLE TURTLE 1428 +mx27_3ds MACH_MX27_3DS MX27_3DS 1430 +bishop MACH_BISHOP BISHOP 1431 +pxx MACH_PXX PXX 1432 +redwood MACH_REDWOOD REDWOOD 1433 +omap_2430dlp MACH_OMAP_2430DLP OMAP_2430DLP 1436 +omap_2430osk MACH_OMAP_2430OSK OMAP_2430OSK 1437 +sardine MACH_SARDINE SARDINE 1438 +halibut MACH_HALIBUT HALIBUT 1439 +trout MACH_TROUT TROUT 1440 +goldfish MACH_GOLDFISH GOLDFISH 1441 +gesbc2440 MACH_GESBC2440 GESBC2440 1442 +nomad MACH_NOMAD NOMAD 1443 +rosalind MACH_ROSALIND ROSALIND 1444 +cc9p9215 MACH_CC9P9215 CC9P9215 1445 +cc9p9210 MACH_CC9P9210 CC9P9210 1446 +cc9p9215js MACH_CC9P9215JS CC9P9215JS 1447 +cc9p9210js MACH_CC9P9210JS CC9P9210JS 1448 +nasffe MACH_NASFFE NASFFE 1449 +tn2x0bd MACH_TN2X0BD TN2X0BD 1450 +gwmpxa MACH_GWMPXA GWMPXA 1451 +exyplus MACH_EXYPLUS EXYPLUS 1452 +jadoo21 MACH_JADOO21 JADOO21 1453 +looxn560 MACH_LOOXN560 LOOXN560 1454 +bonsai MACH_BONSAI BONSAI 1455 +adsmilgato MACH_ADSMILGATO ADSMILGATO 1456 +gba MACH_GBA GBA 1457 +h6044 MACH_H6044 H6044 1458 +app MACH_APP APP 1459 +tct_hammer MACH_TCT_HAMMER TCT_HAMMER 1460 +herald MACH_HERALD HERALD 1461 +artemis MACH_ARTEMIS ARTEMIS 1462 +htctitan MACH_HTCTITAN HTCTITAN 1463 +qranium MACH_QRANIUM QRANIUM 1464 +adx_wsc2 MACH_ADX_WSC2 ADX_WSC2 1465 +adx_medcom MACH_ADX_MEDCOM ADX_MEDCOM 1466 +bboard MACH_BBOARD BBOARD 1467 +cambria MACH_CAMBRIA CAMBRIA 1468 +mt7xxx MACH_MT7XXX MT7XXX 1469 +matrix512 MACH_MATRIX512 MATRIX512 1470 +matrix522 MACH_MATRIX522 MATRIX522 1471 +ipac5010 MACH_IPAC5010 IPAC5010 1472 +sakura MACH_SAKURA SAKURA 1473 +grocx MACH_GROCX GROCX 1474 +pm9263 MACH_PM9263 PM9263 1475 +sim_one MACH_SIM_ONE SIM_ONE 1476 +acq132 MACH_ACQ132 ACQ132 1477 +datr MACH_DATR DATR 1478 +actux1 MACH_ACTUX1 ACTUX1 1479 +actux2 MACH_ACTUX2 ACTUX2 1480 +actux3 MACH_ACTUX3 ACTUX3 1481 +flexit MACH_FLEXIT FLEXIT 1482 +bh2x0bd MACH_BH2X0BD BH2X0BD 1483 +atb2002 MACH_ATB2002 ATB2002 1484 +xenon MACH_XENON XENON 1485 +fm607 MACH_FM607 FM607 1486 +matrix514 MACH_MATRIX514 MATRIX514 1487 +matrix524 MACH_MATRIX524 MATRIX524 1488 +inpod MACH_INPOD INPOD 1489 +jive MACH_JIVE JIVE 1490 +tll_mx21 MACH_TLL_MX21 TLL_MX21 1491 +sbc2800 MACH_SBC2800 SBC2800 1492 +cc7ucamry MACH_CC7UCAMRY CC7UCAMRY 1493 +ubisys_p9_sc15 MACH_UBISYS_P9_SC15 UBISYS_P9_SC15 1494 +ubisys_p9_ssc2d10 MACH_UBISYS_P9_SSC2D10 UBISYS_P9_SSC2D10 1495 +ubisys_p9_rcu3 MACH_UBISYS_P9_RCU3 UBISYS_P9_RCU3 1496 +aml_m8000 MACH_AML_M8000 AML_M8000 1497 +snapper_270 MACH_SNAPPER_270 SNAPPER_270 1498 +omap_bbx MACH_OMAP_BBX OMAP_BBX 1499 +ucn2410 MACH_UCN2410 UCN2410 1500 +sam9_l9260 MACH_SAM9_L9260 SAM9_L9260 1501 +eti_c2 MACH_ETI_C2 ETI_C2 1502 +avalanche MACH_AVALANCHE AVALANCHE 1503 +realview_pb1176 MACH_REALVIEW_PB1176 REALVIEW_PB1176 1504 +dp1500 MACH_DP1500 DP1500 1505 +apple_iphone MACH_APPLE_IPHONE APPLE_IPHONE 1506 +yl9200 MACH_YL9200 YL9200 1507 +rd88f5182 MACH_RD88F5182 RD88F5182 1508 +kurobox_pro MACH_KUROBOX_PRO KUROBOX_PRO 1509 +se_poet MACH_SE_POET SE_POET 1510 +mx31_3ds MACH_MX31_3DS MX31_3DS 1511 +r270 MACH_R270 R270 1512 +armour21 MACH_ARMOUR21 ARMOUR21 1513 +dt2 MACH_DT2 DT2 1514 +vt4 MACH_VT4 VT4 1515 +tyco320 MACH_TYCO320 TYCO320 1516 +adma MACH_ADMA ADMA 1517 +wp188 MACH_WP188 WP188 1518 +corsica MACH_CORSICA CORSICA 1519 +bigeye MACH_BIGEYE BIGEYE 1520 +tll5000 MACH_TLL5000 TLL5000 1522 +bebot MACH_BEBOT BEBOT 1523 +qong MACH_QONG QONG 1524 +tcompact MACH_TCOMPACT TCOMPACT 1525 +puma5 MACH_PUMA5 PUMA5 1526 +elara MACH_ELARA ELARA 1527 +ellington MACH_ELLINGTON ELLINGTON 1528 +xda_atom MACH_XDA_ATOM XDA_ATOM 1529 +energizer2 MACH_ENERGIZER2 ENERGIZER2 1530 +odin MACH_ODIN ODIN 1531 +actux4 MACH_ACTUX4 ACTUX4 1532 +esl_omap MACH_ESL_OMAP ESL_OMAP 1533 +omap2evm MACH_OMAP2EVM OMAP2EVM 1534 +omap3evm MACH_OMAP3EVM OMAP3EVM 1535 +adx_pcu57 MACH_ADX_PCU57 ADX_PCU57 1536 +monaco MACH_MONACO MONACO 1537 +levante MACH_LEVANTE LEVANTE 1538 +tmxipx425 MACH_TMXIPX425 TMXIPX425 1539 +leep MACH_LEEP LEEP 1540 +raad MACH_RAAD RAAD 1541 +dns323 MACH_DNS323 DNS323 1542 +ap1000 MACH_AP1000 AP1000 1543 +a9sam6432 MACH_A9SAM6432 A9SAM6432 1544 +shiny MACH_SHINY SHINY 1545 +omap3_beagle MACH_OMAP3_BEAGLE OMAP3_BEAGLE 1546 +csr_bdb2 MACH_CSR_BDB2 CSR_BDB2 1547 +nokia_n810 MACH_NOKIA_N810 NOKIA_N810 1548 +c270 MACH_C270 C270 1549 +sentry MACH_SENTRY SENTRY 1550 +pcm038 MACH_PCM038 PCM038 1551 +anc300 MACH_ANC300 ANC300 1552 +htckaiser MACH_HTCKAISER HTCKAISER 1553 +sbat100 MACH_SBAT100 SBAT100 1554 +modunorm MACH_MODUNORM MODUNORM 1555 +pelos_twarm MACH_PELOS_TWARM PELOS_TWARM 1556 +flank MACH_FLANK FLANK 1557 +sirloin MACH_SIRLOIN SIRLOIN 1558 +brisket MACH_BRISKET BRISKET 1559 +chuck MACH_CHUCK CHUCK 1560 +otter MACH_OTTER OTTER 1561 +davinci_ldk MACH_DAVINCI_LDK DAVINCI_LDK 1562 +phreedom MACH_PHREEDOM PHREEDOM 1563 +sg310 MACH_SG310 SG310 1564 +ts209 MACH_TS209 TS209 1565 +at91cap9adk MACH_AT91CAP9ADK AT91CAP9ADK 1566 +tion9315 MACH_TION9315 TION9315 1567 +mast MACH_MAST MAST 1568 +pfw MACH_PFW PFW 1569 +yl_p2440 MACH_YL_P2440 YL_P2440 1570 +zsbc32 MACH_ZSBC32 ZSBC32 1571 +omap_pace2 MACH_OMAP_PACE2 OMAP_PACE2 1572 +imx_pace2 MACH_IMX_PACE2 IMX_PACE2 1573 +mx31moboard MACH_MX31MOBOARD MX31MOBOARD 1574 +mx37_3ds MACH_MX37_3DS MX37_3DS 1575 +rcc MACH_RCC RCC 1576 +dmp MACH_ARM9 ARM9 1577 +vision_ep9307 MACH_VISION_EP9307 VISION_EP9307 1578 +scly1000 MACH_SCLY1000 SCLY1000 1579 +fontel_ep MACH_FONTEL_EP FONTEL_EP 1580 +voiceblue3g MACH_VOICEBLUE3G VOICEBLUE3G 1581 +tt9200 MACH_TT9200 TT9200 1582 +digi2410 MACH_DIGI2410 DIGI2410 1583 +terastation_pro2 MACH_TERASTATION_PRO2 TERASTATION_PRO2 1584 +linkstation_pro MACH_LINKSTATION_PRO LINKSTATION_PRO 1585 +motorola_a780 MACH_MOTOROLA_A780 MOTOROLA_A780 1587 +motorola_e6 MACH_MOTOROLA_E6 MOTOROLA_E6 1588 +motorola_e2 MACH_MOTOROLA_E2 MOTOROLA_E2 1589 +motorola_e680 MACH_MOTOROLA_E680 MOTOROLA_E680 1590 +ur2410 MACH_UR2410 UR2410 1591 +tas9261 MACH_TAS9261 TAS9261 1592 +davinci_hermes_hd MACH_HERMES_HD HERMES_HD 1593 +davinci_perseo_hd MACH_PERSEO_HD PERSEO_HD 1594 +stargazer2 MACH_STARGAZER2 STARGAZER2 1595 +e350 MACH_E350 E350 1596 +wpcm450 MACH_WPCM450 WPCM450 1597 +cartesio MACH_CARTESIO CARTESIO 1598 +toybox MACH_TOYBOX TOYBOX 1599 +tx27 MACH_TX27 TX27 1600 +ts409 MACH_TS409 TS409 1601 +p300 MACH_P300 P300 1602 +xdacomet MACH_XDACOMET XDACOMET 1603 +dexflex2 MACH_DEXFLEX2 DEXFLEX2 1604 +ow MACH_OW OW 1605 +armebs3 MACH_ARMEBS3 ARMEBS3 1606 +u3 MACH_U3 U3 1607 +smdk2450 MACH_SMDK2450 SMDK2450 1608 +rsi_ews MACH_RSI_EWS RSI_EWS 1609 +tnb MACH_TNB TNB 1610 +toepath MACH_TOEPATH TOEPATH 1611 +kb9263 MACH_KB9263 KB9263 1612 +mt7108 MACH_MT7108 MT7108 1613 +smtr2440 MACH_SMTR2440 SMTR2440 1614 +manao MACH_MANAO MANAO 1615 +cm_x300 MACH_CM_X300 CM_X300 1616 +gulfstream_kp MACH_GULFSTREAM_KP GULFSTREAM_KP 1617 +lanreadyfn522 MACH_LANREADYFN522 LANREADYFN522 1618 +arma37 MACH_ARMA37 ARMA37 1619 +mendel MACH_MENDEL MENDEL 1620 +pelco_iliad MACH_PELCO_ILIAD PELCO_ILIAD 1621 +unit2p MACH_UNIT2P UNIT2P 1622 +inc20otter MACH_INC20OTTER INC20OTTER 1623 +at91sam9g20ek MACH_AT91SAM9G20EK AT91SAM9G20EK 1624 +sc_ge2 MACH_STORCENTER STORCENTER 1625 +smdk6410 MACH_SMDK6410 SMDK6410 1626 +u300 MACH_U300 U300 1627 +u500 MACH_U500 U500 1628 +ds9260 MACH_DS9260 DS9260 1629 +riverrock MACH_RIVERROCK RIVERROCK 1630 +scibath MACH_SCIBATH SCIBATH 1631 +at91sam7se MACH_AT91SAM7SE512EK AT91SAM7SE512EK 1632 +wrt350n_v2 MACH_WRT350N_V2 WRT350N_V2 1633 +multimedia MACH_MULTIMEDIA MULTIMEDIA 1634 +marvin MACH_MARVIN MARVIN 1635 +x500 MACH_X500 X500 1636 +awlug4lcu MACH_AWLUG4LCU AWLUG4LCU 1637 +palermoc MACH_PALERMOC PALERMOC 1638 +omap_ldp MACH_OMAP_LDP OMAP_LDP 1639 +ip500 MACH_IP500 IP500 1640 +ase2 MACH_ASE2 ASE2 1642 +mx35evb MACH_MX35EVB MX35EVB 1643 +aml_m8050 MACH_AML_M8050 AML_M8050 1644 +mx35_3ds MACH_MX35_3DS MX35_3DS 1645 +mars MACH_MARS MARS 1646 +neuros_osd2 MACH_NEUROS_OSD2 NEUROS_OSD2 1647 +badger MACH_BADGER BADGER 1648 +trizeps4wl MACH_TRIZEPS4WL TRIZEPS4WL 1649 +trizeps5 MACH_TRIZEPS5 TRIZEPS5 1650 +marlin MACH_MARLIN MARLIN 1651 +ts78xx MACH_TS78XX TS78XX 1652 +hpipaq214 MACH_HPIPAQ214 HPIPAQ214 1653 +at572d940dcm MACH_AT572D940DCM AT572D940DCM 1654 +ne1board MACH_NE1BOARD NE1BOARD 1655 +zante MACH_ZANTE ZANTE 1656 +sffsdr MACH_SFFSDR SFFSDR 1657 +tw2662 MACH_TW2662 TW2662 1658 +vf10xx MACH_VF10XX VF10XX 1659 +zoran43xx MACH_ZORAN43XX ZORAN43XX 1660 +sonix926 MACH_SONIX926 SONIX926 1661 +celestialsemi MACH_CELESTIALSEMI CELESTIALSEMI 1662 +cc9m2443js MACH_CC9M2443JS CC9M2443JS 1663 +tw5334 MACH_TW5334 TW5334 1664 +omap_htcartemis MACH_HTCARTEMIS HTCARTEMIS 1665 +nal_hlite MACH_NAL_HLITE NAL_HLITE 1666 +htcvogue MACH_HTCVOGUE HTCVOGUE 1667 +smartweb MACH_SMARTWEB SMARTWEB 1668 +mv86xx MACH_MV86XX MV86XX 1669 +mv87xx MACH_MV87XX MV87XX 1670 +songyoungho MACH_SONGYOUNGHO SONGYOUNGHO 1671 +younghotema MACH_YOUNGHOTEMA YOUNGHOTEMA 1672 +pcm037 MACH_PCM037 PCM037 1673 +mmvp MACH_MMVP MMVP 1674 +mmap MACH_MMAP MMAP 1675 +ptid2410 MACH_PTID2410 PTID2410 1676 +james_926 MACH_JAMES_926 JAMES_926 1677 +fm6000 MACH_FM6000 FM6000 1678 +db88f6281_bp MACH_DB88F6281_BP DB88F6281_BP 1680 +rd88f6192_nas MACH_RD88F6192_NAS RD88F6192_NAS 1681 +rd88f6281 MACH_RD88F6281 RD88F6281 1682 +db78x00_bp MACH_DB78X00_BP DB78X00_BP 1683 +smdk2416 MACH_SMDK2416 SMDK2416 1685 +oce_spider_si MACH_OCE_SPIDER_SI OCE_SPIDER_SI 1686 +oce_spider_sk MACH_OCE_SPIDER_SK OCE_SPIDER_SK 1687 +rovern6 MACH_ROVERN6 ROVERN6 1688 +pelco_evolution MACH_PELCO_EVOLUTION PELCO_EVOLUTION 1689 +wbd111 MACH_WBD111 WBD111 1690 +elaracpe MACH_ELARACPE ELARACPE 1691 +mabv3 MACH_MABV3 MABV3 1692 +mv2120 MACH_MV2120 MV2120 1693 +csb737 MACH_CSB737 CSB737 1695 +mx51_3ds MACH_MX51_3DS MX51_3DS 1696 +g900 MACH_G900 G900 1697 +apf27 MACH_APF27 APF27 1698 +ggus2000 MACH_GGUS2000 GGUS2000 1699 +omap_2430_mimic MACH_OMAP_2430_MIMIC OMAP_2430_MIMIC 1700 +imx27lite MACH_IMX27LITE IMX27LITE 1701 +almex MACH_ALMEX ALMEX 1702 +control MACH_CONTROL CONTROL 1703 +mba2410 MACH_MBA2410 MBA2410 1704 +volcano MACH_VOLCANO VOLCANO 1705 +zenith MACH_ZENITH ZENITH 1706 +muchip MACH_MUCHIP MUCHIP 1707 +magellan MACH_MAGELLAN MAGELLAN 1708 +usb_a9260 MACH_USB_A9260 USB_A9260 1709 +usb_a9263 MACH_USB_A9263 USB_A9263 1710 +qil_a9260 MACH_QIL_A9260 QIL_A9260 1711 +cme9210 MACH_CME9210 CME9210 1712 +hczh4 MACH_HCZH4 HCZH4 1713 +spearbasic MACH_SPEARBASIC SPEARBASIC 1714 +dep2440 MACH_DEP2440 DEP2440 1715 +hdl_gxr MACH_HDL_GXR HDL_GXR 1716 +hdl_gt MACH_HDL_GT HDL_GT 1717 +hdl_4g MACH_HDL_4G HDL_4G 1718 +s3c6000 MACH_S3C6000 S3C6000 1719 +mmsp2_mdk MACH_MMSP2_MDK MMSP2_MDK 1720 +mpx220 MACH_MPX220 MPX220 1721 +kzm_arm11_01 MACH_KZM_ARM11_01 KZM_ARM11_01 1722 +htc_polaris MACH_HTC_POLARIS HTC_POLARIS 1723 +htc_kaiser MACH_HTC_KAISER HTC_KAISER 1724 +lg_ks20 MACH_LG_KS20 LG_KS20 1725 +hhgps MACH_HHGPS HHGPS 1726 +nokia_n810_wimax MACH_NOKIA_N810_WIMAX NOKIA_N810_WIMAX 1727 +insight MACH_INSIGHT INSIGHT 1728 +sapphire MACH_SAPPHIRE SAPPHIRE 1729 +csb637xo MACH_CSB637XO CSB637XO 1730 +evisiong MACH_EVISIONG EVISIONG 1731 +stmp37xx MACH_STMP37XX STMP37XX 1732 +stmp378x MACH_STMP378X STMP378X 1733 +tnt MACH_TNT TNT 1734 +tbxt MACH_TBXT TBXT 1735 +playmate MACH_PLAYMATE PLAYMATE 1736 +pns10 MACH_PNS10 PNS10 1737 +eznavi MACH_EZNAVI EZNAVI 1738 +ps4000 MACH_PS4000 PS4000 1739 +ezx_a780 MACH_EZX_A780 EZX_A780 1740 +ezx_e680 MACH_EZX_E680 EZX_E680 1741 +ezx_a1200 MACH_EZX_A1200 EZX_A1200 1742 +ezx_e6 MACH_EZX_E6 EZX_E6 1743 +ezx_e2 MACH_EZX_E2 EZX_E2 1744 +ezx_a910 MACH_EZX_A910 EZX_A910 1745 +cwmx31 MACH_CWMX31 CWMX31 1746 +sl2312 MACH_SL2312 SL2312 1747 +blenny MACH_BLENNY BLENNY 1748 +ds107 MACH_DS107 DS107 1749 +dsx07 MACH_DSX07 DSX07 1750 +picocom1 MACH_PICOCOM1 PICOCOM1 1751 +lynx_wolverine MACH_LYNX_WOLVERINE LYNX_WOLVERINE 1752 +ubisys_p9_sc19 MACH_UBISYS_P9_SC19 UBISYS_P9_SC19 1753 +kratos_low MACH_KRATOS_LOW KRATOS_LOW 1754 +m700 MACH_M700 M700 1755 +edmini_v2 MACH_EDMINI_V2 EDMINI_V2 1756 +zipit2 MACH_ZIPIT2 ZIPIT2 1757 +hslfemtocell MACH_HSLFEMTOCELL HSLFEMTOCELL 1758 +daintree_at91 MACH_DAINTREE_AT91 DAINTREE_AT91 1759 +sg560usb MACH_SG560USB SG560USB 1760 +omap3_pandora MACH_OMAP3_PANDORA OMAP3_PANDORA 1761 +usr8200 MACH_USR8200 USR8200 1762 +s1s65k MACH_S1S65K S1S65K 1763 +s2s65a MACH_S2S65A S2S65A 1764 +icore MACH_ICORE ICORE 1765 +mss2 MACH_MSS2 MSS2 1766 +belmont MACH_BELMONT BELMONT 1767 +asusp525 MACH_ASUSP525 ASUSP525 1768 +lb88rc8480 MACH_LB88RC8480 LB88RC8480 1769 +hipxa MACH_HIPXA HIPXA 1770 +mx25_3ds MACH_MX25_3DS MX25_3DS 1771 +m800 MACH_M800 M800 1772 +omap3530_lv_som MACH_OMAP3530_LV_SOM OMAP3530_LV_SOM 1773 +prima_evb MACH_PRIMA_EVB PRIMA_EVB 1774 +mx31bt1 MACH_MX31BT1 MX31BT1 1775 +atlas4_evb MACH_ATLAS4_EVB ATLAS4_EVB 1776 +mx31cicada MACH_MX31CICADA MX31CICADA 1777 +mi424wr MACH_MI424WR MI424WR 1778 +axs_ultrax MACH_AXS_ULTRAX AXS_ULTRAX 1779 +at572d940deb MACH_AT572D940DEB AT572D940DEB 1780 +davinci_da830_evm MACH_DAVINCI_DA830_EVM DAVINCI_DA830_EVM 1781 +ep9302 MACH_EP9302 EP9302 1782 +at572d940hfek MACH_AT572D940HFEB AT572D940HFEB 1783 +cybook3 MACH_CYBOOK3 CYBOOK3 1784 +wdg002 MACH_WDG002 WDG002 1785 +sg560adsl MACH_SG560ADSL SG560ADSL 1786 +nextio_n2800_ica MACH_NEXTIO_N2800_ICA NEXTIO_N2800_ICA 1787 +dove_db MACH_DOVE_DB DOVE_DB 1788 +vandihud MACH_VANDIHUD VANDIHUD 1790 +magx_e8 MACH_MAGX_E8 MAGX_E8 1791 +magx_z6 MACH_MAGX_Z6 MAGX_Z6 1792 +magx_v8 MACH_MAGX_V8 MAGX_V8 1793 +magx_u9 MACH_MAGX_U9 MAGX_U9 1794 +toughcf08 MACH_TOUGHCF08 TOUGHCF08 1795 +zw4400 MACH_ZW4400 ZW4400 1796 +marat91 MACH_MARAT91 MARAT91 1797 +overo MACH_OVERO OVERO 1798 +at2440evb MACH_AT2440EVB AT2440EVB 1799 +neocore926 MACH_NEOCORE926 NEOCORE926 1800 +wnr854t MACH_WNR854T WNR854T 1801 +imx27 MACH_IMX27 IMX27 1802 +moose_db MACH_MOOSE_DB MOOSE_DB 1803 +fab4 MACH_FAB4 FAB4 1804 +htcdiamond MACH_HTCDIAMOND HTCDIAMOND 1805 +fiona MACH_FIONA FIONA 1806 +mxc30030_x MACH_MXC30030_X MXC30030_X 1807 +bmp1000 MACH_BMP1000 BMP1000 1808 +logi9200 MACH_LOGI9200 LOGI9200 1809 +tqma31 MACH_TQMA31 TQMA31 1810 +ccw9p9215js MACH_CCW9P9215JS CCW9P9215JS 1811 +rd88f5181l_ge MACH_RD88F5181L_GE RD88F5181L_GE 1812 +sifmain MACH_SIFMAIN SIFMAIN 1813 +sam9_l9261 MACH_SAM9_L9261 SAM9_L9261 1814 +cc9m2443 MACH_CC9M2443 CC9M2443 1815 +xaria300 MACH_XARIA300 XARIA300 1816 +it9200 MACH_IT9200 IT9200 1817 +rd88f5181l_fxo MACH_RD88F5181L_FXO RD88F5181L_FXO 1818 +kriss_sensor MACH_KRISS_SENSOR KRISS_SENSOR 1819 +pilz_pmi5 MACH_PILZ_PMI5 PILZ_PMI5 1820 +jade MACH_JADE JADE 1821 +ks8695_softplc MACH_KS8695_SOFTPLC KS8695_SOFTPLC 1822 +gprisc3 MACH_GPRISC3 GPRISC3 1823 +stamp9g20 MACH_STAMP9G20 STAMP9G20 1824 +smdk6430 MACH_SMDK6430 SMDK6430 1825 +smdkc100 MACH_SMDKC100 SMDKC100 1826 +tavorevb MACH_TAVOREVB TAVOREVB 1827 +saar MACH_SAAR SAAR 1828 +deister_eyecam MACH_DEISTER_EYECAM DEISTER_EYECAM 1829 +at91sam9m10g45ek MACH_AT91SAM9M10G45EK AT91SAM9M10G45EK 1830 +linkstation_produo MACH_LINKSTATION_PRODUO LINKSTATION_PRODUO 1831 +hit_b0 MACH_HIT_B0 HIT_B0 1832 +adx_rmu MACH_ADX_RMU ADX_RMU 1833 +xg_cpe_main MACH_XG_CPE_MAIN XG_CPE_MAIN 1834 +edb9407a MACH_EDB9407A EDB9407A 1835 +dtb9608 MACH_DTB9608 DTB9608 1836 +em104v1 MACH_EM104V1 EM104V1 1837 +demo MACH_DEMO DEMO 1838 +logi9260 MACH_LOGI9260 LOGI9260 1839 +mx31_exm32 MACH_MX31_EXM32 MX31_EXM32 1840 +usb_a9g20 MACH_USB_A9G20 USB_A9G20 1841 +picproje2008 MACH_PICPROJE2008 PICPROJE2008 1842 +cs_e9315 MACH_CS_E9315 CS_E9315 1843 +qil_a9g20 MACH_QIL_A9G20 QIL_A9G20 1844 +sha_pon020 MACH_SHA_PON020 SHA_PON020 1845 +nad MACH_NAD NAD 1846 +sbc35_a9260 MACH_SBC35_A9260 SBC35_A9260 1847 +sbc35_a9g20 MACH_SBC35_A9G20 SBC35_A9G20 1848 +davinci_beginning MACH_DAVINCI_BEGINNING DAVINCI_BEGINNING 1849 +uwc MACH_UWC UWC 1850 +mxlads MACH_MXLADS MXLADS 1851 +htcnike MACH_HTCNIKE HTCNIKE 1852 +deister_pxa270 MACH_DEISTER_PXA270 DEISTER_PXA270 1853 +cme9210js MACH_CME9210JS CME9210JS 1854 +cc9p9360 MACH_CC9P9360 CC9P9360 1855 +mocha MACH_MOCHA MOCHA 1856 +wapd170ag MACH_WAPD170AG WAPD170AG 1857 +linkstation_mini MACH_LINKSTATION_MINI LINKSTATION_MINI 1858 +afeb9260 MACH_AFEB9260 AFEB9260 1859 +w90x900 MACH_W90X900 W90X900 1860 +w90x700 MACH_W90X700 W90X700 1861 +kt300ip MACH_KT300IP KT300IP 1862 +kt300ip_g20 MACH_KT300IP_G20 KT300IP_G20 1863 +srcm MACH_SRCM SRCM 1864 +wlnx_9260 MACH_WLNX_9260 WLNX_9260 1865 +openmoko_gta03 MACH_OPENMOKO_GTA03 OPENMOKO_GTA03 1866 +osprey2 MACH_OSPREY2 OSPREY2 1867 +kbio9260 MACH_KBIO9260 KBIO9260 1868 +ginza MACH_GINZA GINZA 1869 +a636n MACH_A636N A636N 1870 +imx27ipcam MACH_IMX27IPCAM IMX27IPCAM 1871 +nemoc MACH_NEMOC NEMOC 1872 +geneva MACH_GENEVA GENEVA 1873 +htcpharos MACH_HTCPHAROS HTCPHAROS 1874 +neonc MACH_NEONC NEONC 1875 +nas7100 MACH_NAS7100 NAS7100 1876 +teuphone MACH_TEUPHONE TEUPHONE 1877 +annax_eth2 MACH_ANNAX_ETH2 ANNAX_ETH2 1878 +csb733 MACH_CSB733 CSB733 1879 +bk3 MACH_BK3 BK3 1880 +omap_em32 MACH_OMAP_EM32 OMAP_EM32 1881 +et9261cp MACH_ET9261CP ET9261CP 1882 +jasperc MACH_JASPERC JASPERC 1883 +issi_arm9 MACH_ISSI_ARM9 ISSI_ARM9 1884 +ued MACH_UED UED 1885 +esiblade MACH_ESIBLADE ESIBLADE 1886 +eye02 MACH_EYE02 EYE02 1887 +imx27kbd MACH_IMX27KBD IMX27KBD 1888 +kixvp435 MACH_KIXVP435 KIXVP435 1890 +kixnp435 MACH_KIXNP435 KIXNP435 1891 +africa MACH_AFRICA AFRICA 1892 +nh233 MACH_NH233 NH233 1893 +rd88f6183ap_ge MACH_RD88F6183AP_GE RD88F6183AP_GE 1894 +bcm4760 MACH_BCM4760 BCM4760 1895 +eddy_v2 MACH_EDDY_V2 EDDY_V2 1896 +realview_pba8 MACH_REALVIEW_PBA8 REALVIEW_PBA8 1897 +hid_a7 MACH_HID_A7 HID_A7 1898 +hero MACH_HERO HERO 1899 +omap_poseidon MACH_OMAP_POSEIDON OMAP_POSEIDON 1900 +realview_pbx MACH_REALVIEW_PBX REALVIEW_PBX 1901 +micro9s MACH_MICRO9S MICRO9S 1902 +mako MACH_MAKO MAKO 1903 +xdaflame MACH_XDAFLAME XDAFLAME 1904 +phidget_sbc2 MACH_PHIDGET_SBC2 PHIDGET_SBC2 1905 +limestone MACH_LIMESTONE LIMESTONE 1906 +iprobe_c32 MACH_IPROBE_C32 IPROBE_C32 1907 +rut100 MACH_RUT100 RUT100 1908 +asusp535 MACH_ASUSP535 ASUSP535 1909 +htcraphael MACH_HTCRAPHAEL HTCRAPHAEL 1910 +sygdg1 MACH_SYGDG1 SYGDG1 1911 +sygdg2 MACH_SYGDG2 SYGDG2 1912 +seoul MACH_SEOUL SEOUL 1913 +salerno MACH_SALERNO SALERNO 1914 +ucn_s3c64xx MACH_UCN_S3C64XX UCN_S3C64XX 1915 +msm7201a MACH_MSM7201A MSM7201A 1916 +lpr1 MACH_LPR1 LPR1 1917 +armadillo500fx MACH_ARMADILLO500FX ARMADILLO500FX 1918 +g3evm MACH_G3EVM G3EVM 1919 +z3_dm355 MACH_Z3_DM355 Z3_DM355 1920 +w90p910evb MACH_W90P910EVB W90P910EVB 1921 +w90p920evb MACH_W90P920EVB W90P920EVB 1922 +w90p950evb MACH_W90P950EVB W90P950EVB 1923 +w90n960evb MACH_W90N960EVB W90N960EVB 1924 +camhd MACH_CAMHD CAMHD 1925 +mvc100 MACH_MVC100 MVC100 1926 +electrum_200 MACH_ELECTRUM_200 ELECTRUM_200 1927 +htcjade MACH_HTCJADE HTCJADE 1928 +memphis MACH_MEMPHIS MEMPHIS 1929 +imx27sbc MACH_IMX27SBC IMX27SBC 1930 +lextar MACH_LEXTAR LEXTAR 1931 +mv88f6281gtw_ge MACH_MV88F6281GTW_GE MV88F6281GTW_GE 1932 +ncp MACH_NCP NCP 1933 +z32an_series MACH_Z32AN Z32AN 1934 +tmq_capd MACH_TMQ_CAPD TMQ_CAPD 1935 +omap3_wl MACH_OMAP3_WL OMAP3_WL 1936 +chumby MACH_CHUMBY CHUMBY 1937 +atsarm9 MACH_ATSARM9 ATSARM9 1938 +davinci_dm365_evm MACH_DAVINCI_DM365_EVM DAVINCI_DM365_EVM 1939 +bahamas MACH_BAHAMAS BAHAMAS 1940 +das MACH_DAS DAS 1941 +minidas MACH_MINIDAS MINIDAS 1942 +vk1000 MACH_VK1000 VK1000 1943 +centro MACH_CENTRO CENTRO 1944 +ctera_2bay MACH_CTERA_2BAY CTERA_2BAY 1945 +edgeconnect MACH_EDGECONNECT EDGECONNECT 1946 +nd27000 MACH_ND27000 ND27000 1947 +cobra MACH_GEMALTO_COBRA GEMALTO_COBRA 1948 +ingelabs_comet MACH_INGELABS_COMET INGELABS_COMET 1949 +pollux_wiz MACH_POLLUX_WIZ POLLUX_WIZ 1950 +blackstone MACH_BLACKSTONE BLACKSTONE 1951 +topaz MACH_TOPAZ TOPAZ 1952 +aixle MACH_AIXLE AIXLE 1953 +mw998 MACH_MW998 MW998 1954 +nokia_rx51 MACH_NOKIA_RX51 NOKIA_RX51 1955 +vsc5605ev MACH_VSC5605EV VSC5605EV 1956 +nt98700dk MACH_NT98700DK NT98700DK 1957 +icontact MACH_ICONTACT ICONTACT 1958 +swarco_frcpu MACH_SWARCO_FRCPU SWARCO_FRCPU 1959 +swarco_scpu MACH_SWARCO_SCPU SWARCO_SCPU 1960 +bbox_p16 MACH_BBOX_P16 BBOX_P16 1961 +bstd MACH_BSTD BSTD 1962 +sbc2440ii MACH_SBC2440II SBC2440II 1963 +pcm034 MACH_PCM034 PCM034 1964 +neso MACH_NESO NESO 1965 +wlnx_9g20 MACH_WLNX_9G20 WLNX_9G20 1966 +omap_zoom2 MACH_OMAP_ZOOM2 OMAP_ZOOM2 1967 +totemnova MACH_TOTEMNOVA TOTEMNOVA 1968 +c5000 MACH_C5000 C5000 1969 +unipo_at91sam9263 MACH_UNIPO_AT91SAM9263 UNIPO_AT91SAM9263 1970 +ethernut5 MACH_ETHERNUT5 ETHERNUT5 1971 +arm11 MACH_ARM11 ARM11 1972 +cpuat9260 MACH_CPUAT9260 CPUAT9260 1973 +cpupxa255 MACH_CPUPXA255 CPUPXA255 1974 +eukrea_cpuimx27 MACH_EUKREA_CPUIMX27 EUKREA_CPUIMX27 1975 +cheflux MACH_CHEFLUX CHEFLUX 1976 +eb_cpux9k2 MACH_EB_CPUX9K2 EB_CPUX9K2 1977 +opcotec MACH_OPCOTEC OPCOTEC 1978 +yt MACH_YT YT 1979 +motoq MACH_MOTOQ MOTOQ 1980 +bsb1 MACH_BSB1 BSB1 1981 +acs5k MACH_ACS5K ACS5K 1982 +milan MACH_MILAN MILAN 1983 +quartzv2 MACH_QUARTZV2 QUARTZV2 1984 +rsvp MACH_RSVP RSVP 1985 +rmp200 MACH_RMP200 RMP200 1986 +snapper_9260 MACH_SNAPPER_9260 SNAPPER_9260 1987 +dsm320 MACH_DSM320 DSM320 1988 +adsgcm MACH_ADSGCM ADSGCM 1989 +ase2_400 MACH_ASE2_400 ASE2_400 1990 +pizza MACH_PIZZA PIZZA 1991 +spot_ngpl MACH_SPOT_NGPL SPOT_NGPL 1992 +armata MACH_ARMATA ARMATA 1993 +exeda MACH_EXEDA EXEDA 1994 +mx31sf005 MACH_MX31SF005 MX31SF005 1995 +f5d8231_4_v2 MACH_F5D8231_4_V2 F5D8231_4_V2 1996 +q2440 MACH_Q2440 Q2440 1997 +qq2440 MACH_QQ2440 QQ2440 1998 +mini2440 MACH_MINI2440 MINI2440 1999 +colibri300 MACH_COLIBRI300 COLIBRI300 2000 +jades MACH_JADES JADES 2001 +spark MACH_SPARK SPARK 2002 +benzina MACH_BENZINA BENZINA 2003 +blaze MACH_BLAZE BLAZE 2004 +linkstation_ls_hgl MACH_LINKSTATION_LS_HGL LINKSTATION_LS_HGL 2005 +htckovsky MACH_HTCKOVSKY HTCKOVSKY 2006 +sony_prs505 MACH_SONY_PRS505 SONY_PRS505 2007 +hanlin_v3 MACH_HANLIN_V3 HANLIN_V3 2008 +sapphira MACH_SAPPHIRA SAPPHIRA 2009 +dack_sda_01 MACH_DACK_SDA_01 DACK_SDA_01 2010 +armbox MACH_ARMBOX ARMBOX 2011 +harris_rvp MACH_HARRIS_RVP HARRIS_RVP 2012 +ribaldo MACH_RIBALDO RIBALDO 2013 +agora MACH_AGORA AGORA 2014 +omap3_mini MACH_OMAP3_MINI OMAP3_MINI 2015 +a9sam6432_b MACH_A9SAM6432_B A9SAM6432_B 2016 +usg2410 MACH_USG2410 USG2410 2017 +pc72052_i10_revb MACH_PC72052_I10_REVB PC72052_I10_REVB 2018 +mx35_exm32 MACH_MX35_EXM32 MX35_EXM32 2019 +topas910 MACH_TOPAS910 TOPAS910 2020 +hyena MACH_HYENA HYENA 2021 +pospax MACH_POSPAX POSPAX 2022 +hdl_gx MACH_HDL_GX HDL_GX 2023 +ctera_4bay MACH_CTERA_4BAY CTERA_4BAY 2024 +ctera_plug_c MACH_CTERA_PLUG_C CTERA_PLUG_C 2025 +crwea_plug_i MACH_CRWEA_PLUG_I CRWEA_PLUG_I 2026 +egauge2 MACH_EGAUGE2 EGAUGE2 2027 +didj MACH_DIDJ DIDJ 2028 +m_s3c2443 MACH_MEISTER MEISTER 2029 +htcblackstone MACH_HTCBLACKSTONE HTCBLACKSTONE 2030 +cpuat9g20 MACH_CPUAT9G20 CPUAT9G20 2031 +smdk6440 MACH_SMDK6440 SMDK6440 2032 +omap_35xx_mvp MACH_OMAP_35XX_MVP OMAP_35XX_MVP 2033 +ctera_plug_i MACH_CTERA_PLUG_I CTERA_PLUG_I 2034 +pvg610_100 MACH_PVG610 PVG610 2035 +hprw6815 MACH_HPRW6815 HPRW6815 2036 +omap3_oswald MACH_OMAP3_OSWALD OMAP3_OSWALD 2037 +nas4220b MACH_NAS4220B NAS4220B 2038 +htcraphael_cdma MACH_HTCRAPHAEL_CDMA HTCRAPHAEL_CDMA 2039 +htcdiamond_cdma MACH_HTCDIAMOND_CDMA HTCDIAMOND_CDMA 2040 +scaler MACH_SCALER SCALER 2041 +zylonite2 MACH_ZYLONITE2 ZYLONITE2 2042 +aspenite MACH_ASPENITE ASPENITE 2043 +teton MACH_TETON TETON 2044 +ttc_dkb MACH_TTC_DKB TTC_DKB 2045 +bishop2 MACH_BISHOP2 BISHOP2 2046 +ippv5 MACH_IPPV5 IPPV5 2047 +farm926 MACH_FARM926 FARM926 2048 +mmccpu MACH_MMCCPU MMCCPU 2049 +sgmsfl MACH_SGMSFL SGMSFL 2050 +tt8000 MACH_TT8000 TT8000 2051 +zrn4300lp MACH_ZRN4300LP ZRN4300LP 2052 +mptc MACH_MPTC MPTC 2053 +h6051 MACH_H6051 H6051 2054 +pvg610_101 MACH_PVG610_101 PVG610_101 2055 +stamp9261_pc_evb MACH_STAMP9261_PC_EVB STAMP9261_PC_EVB 2056 +pelco_odysseus MACH_PELCO_ODYSSEUS PELCO_ODYSSEUS 2057 +tny_a9260 MACH_TNY_A9260 TNY_A9260 2058 +tny_a9g20 MACH_TNY_A9G20 TNY_A9G20 2059 +aesop_mp2530f MACH_AESOP_MP2530F AESOP_MP2530F 2060 +dx900 MACH_DX900 DX900 2061 +cpodc2 MACH_CPODC2 CPODC2 2062 +tilt_8925 MACH_TILT_8925 TILT_8925 2063 +davinci_dm357_evm MACH_DAVINCI_DM357_EVM DAVINCI_DM357_EVM 2064 +swordfish MACH_SWORDFISH SWORDFISH 2065 +corvus MACH_CORVUS CORVUS 2066 +taurus MACH_TAURUS TAURUS 2067 +axm MACH_AXM AXM 2068 +axc MACH_AXC AXC 2069 +baby MACH_BABY BABY 2070 +mp200 MACH_MP200 MP200 2071 +pcm043 MACH_PCM043 PCM043 2072 +hanlin_v3c MACH_HANLIN_V3C HANLIN_V3C 2073 +kbk9g20 MACH_KBK9G20 KBK9G20 2074 +adsturbog5 MACH_ADSTURBOG5 ADSTURBOG5 2075 +avenger_lite1 MACH_AVENGER_LITE1 AVENGER_LITE1 2076 +suc82x MACH_SUC SUC 2077 +at91sam7s256 MACH_AT91SAM7S256 AT91SAM7S256 2078 +mendoza MACH_MENDOZA MENDOZA 2079 +kira MACH_KIRA KIRA 2080 +mx1hbm MACH_MX1HBM MX1HBM 2081 +quatro43xx MACH_QUATRO43XX QUATRO43XX 2082 +quatro4230 MACH_QUATRO4230 QUATRO4230 2083 +nsb400 MACH_NSB400 NSB400 2084 +drp255 MACH_DRP255 DRP255 2085 +thoth MACH_THOTH THOTH 2086 +firestone MACH_FIRESTONE FIRESTONE 2087 +asusp750 MACH_ASUSP750 ASUSP750 2088 +ctera_dl MACH_CTERA_DL CTERA_DL 2089 +socr MACH_SOCR SOCR 2090 +htcoxygen MACH_HTCOXYGEN HTCOXYGEN 2091 +heroc MACH_HEROC HEROC 2092 +zeno6800 MACH_ZENO6800 ZENO6800 2093 +sc2mcs MACH_SC2MCS SC2MCS 2094 +gene100 MACH_GENE100 GENE100 2095 +as353x MACH_AS353X AS353X 2096 +sheevaplug MACH_SHEEVAPLUG SHEEVAPLUG 2097 +at91sam9g20 MACH_AT91SAM9G20 AT91SAM9G20 2098 +mv88f6192gtw_fe MACH_MV88F6192GTW_FE MV88F6192GTW_FE 2099 +cc9200 MACH_CC9200 CC9200 2100 +sm9200 MACH_SM9200 SM9200 2101 +tp9200 MACH_TP9200 TP9200 2102 +snapperdv MACH_SNAPPERDV SNAPPERDV 2103 +avengers_lite MACH_AVENGERS_LITE AVENGERS_LITE 2104 +avengers_lite1 MACH_AVENGERS_LITE1 AVENGERS_LITE1 2105 +omap3axon MACH_OMAP3AXON OMAP3AXON 2106 +ma8xx MACH_MA8XX MA8XX 2107 +mp201ek MACH_MP201EK MP201EK 2108 +davinci_tux MACH_DAVINCI_TUX DAVINCI_TUX 2109 +mpa1600 MACH_MPA1600 MPA1600 2110 +pelco_troy MACH_PELCO_TROY PELCO_TROY 2111 +nsb667 MACH_NSB667 NSB667 2112 +rovers5_4mpix MACH_ROVERS5_4MPIX ROVERS5_4MPIX 2113 +twocom MACH_TWOCOM TWOCOM 2114 +ubisys_p9_rcu3r2 MACH_UBISYS_P9_RCU3R2 UBISYS_P9_RCU3R2 2115 +hero_espresso MACH_HERO_ESPRESSO HERO_ESPRESSO 2116 +afeusb MACH_AFEUSB AFEUSB 2117 +t830 MACH_T830 T830 2118 +spd8020_cc MACH_SPD8020_CC SPD8020_CC 2119 +om_3d7k MACH_OM_3D7K OM_3D7K 2120 +picocom2 MACH_PICOCOM2 PICOCOM2 2121 +uwg4mx27 MACH_UWG4MX27 UWG4MX27 2122 +uwg4mx31 MACH_UWG4MX31 UWG4MX31 2123 +cherry MACH_CHERRY CHERRY 2124 +mx51_babbage MACH_MX51_BABBAGE MX51_BABBAGE 2125 +s3c2440turkiye MACH_S3C2440TURKIYE S3C2440TURKIYE 2126 +tx37 MACH_TX37 TX37 2127 +sbc2800_9g20 MACH_SBC2800_9G20 SBC2800_9G20 2128 +benzglb MACH_BENZGLB BENZGLB 2129 +benztd MACH_BENZTD BENZTD 2130 +cartesio_plus MACH_CARTESIO_PLUS CARTESIO_PLUS 2131 +solrad_g20 MACH_SOLRAD_G20 SOLRAD_G20 2132 +mx27wallace MACH_MX27WALLACE MX27WALLACE 2133 +fmzwebmodul MACH_FMZWEBMODUL FMZWEBMODUL 2134 +rd78x00_masa MACH_RD78X00_MASA RD78X00_MASA 2135 +smallogger MACH_SMALLOGGER SMALLOGGER 2136 +ccw9p9215 MACH_CCW9P9215 CCW9P9215 2137 +dm355_leopard MACH_DM355_LEOPARD DM355_LEOPARD 2138 +ts219 MACH_TS219 TS219 2139 +tny_a9263 MACH_TNY_A9263 TNY_A9263 2140 +apollo MACH_APOLLO APOLLO 2141 +at91cap9stk MACH_AT91CAP9STK AT91CAP9STK 2142 +spc300 MACH_SPC300 SPC300 2143 +eko MACH_EKO EKO 2144 +ccw9m2443 MACH_CCW9M2443 CCW9M2443 2145 +ccw9m2443js MACH_CCW9M2443JS CCW9M2443JS 2146 +m2m_router_device MACH_M2M_ROUTER_DEVICE M2M_ROUTER_DEVICE 2147 +str9104nas MACH_STAR9104NAS STAR9104NAS 2148 +pca100 MACH_PCA100 PCA100 2149 +z3_dm365_mod_01 MACH_Z3_DM365_MOD_01 Z3_DM365_MOD_01 2150 +hipox MACH_HIPOX HIPOX 2151 +omap3_piteds MACH_OMAP3_PITEDS OMAP3_PITEDS 2152 +bm150r MACH_BM150R BM150R 2153 +tbone MACH_TBONE TBONE 2154 +merlin MACH_MERLIN MERLIN 2155 +falcon MACH_FALCON FALCON 2156 +davinci_da850_evm MACH_DAVINCI_DA850_EVM DAVINCI_DA850_EVM 2157 +s5p6440 MACH_S5P6440 S5P6440 2158 +at91sam9g10ek MACH_AT91SAM9G10EK AT91SAM9G10EK 2159 +omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160 +lpc313x MACH_LPC313X LPC313X 2161 +magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162 +magx_em30 MACH_MAGX_EM30 MAGX_EM30 2163 +magx_ve66 MACH_MAGX_VE66 MAGX_VE66 2164 +meesc MACH_MEESC MEESC 2165 +otc570 MACH_OTC570 OTC570 2166 +bcu2412 MACH_BCU2412 BCU2412 2167 +beacon MACH_BEACON BEACON 2168 +actia_tgw MACH_ACTIA_TGW ACTIA_TGW 2169 +e4430 MACH_E4430 E4430 2170 +ql300 MACH_QL300 QL300 2171 +btmavb101 MACH_BTMAVB101 BTMAVB101 2172 +btmawb101 MACH_BTMAWB101 BTMAWB101 2173 +sq201 MACH_SQ201 SQ201 2174 +quatro45xx MACH_QUATRO45XX QUATRO45XX 2175 +openpad MACH_OPENPAD OPENPAD 2176 +tx25 MACH_TX25 TX25 2177 +omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178 +htcraphael_k MACH_HTCRAPHAEL_K HTCRAPHAEL_K 2179 +lal43 MACH_LAL43 LAL43 2181 +htcraphael_cdma500 MACH_HTCRAPHAEL_CDMA500 HTCRAPHAEL_CDMA500 2182 +anw6410 MACH_ANW6410 ANW6410 2183 +htcprophet MACH_HTCPROPHET HTCPROPHET 2185 +cfa_10022 MACH_CFA_10022 CFA_10022 2186 +imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187 +px2imx27 MACH_PX2IMX27 PX2IMX27 2188 +stm3210e_eval MACH_STM3210E_EVAL STM3210E_EVAL 2189 +dvs10 MACH_DVS10 DVS10 2190 +portuxg20 MACH_PORTUXG20 PORTUXG20 2191 +arm_spv MACH_ARM_SPV ARM_SPV 2192 +smdkc110 MACH_SMDKC110 SMDKC110 2193 +cabespresso MACH_CABESPRESSO CABESPRESSO 2194 +hmc800 MACH_HMC800 HMC800 2195 +sholes MACH_SHOLES SHOLES 2196 +btmxc31 MACH_BTMXC31 BTMXC31 2197 +dt501 MACH_DT501 DT501 2198 +ktx MACH_KTX KTX 2199 +omap3517evm MACH_OMAP3517EVM OMAP3517EVM 2200 +netspace_v2 MACH_NETSPACE_V2 NETSPACE_V2 2201 +netspace_max_v2 MACH_NETSPACE_MAX_V2 NETSPACE_MAX_V2 2202 +d2net_v2 MACH_D2NET_V2 D2NET_V2 2203 +net2big_v2 MACH_NET2BIG_V2 NET2BIG_V2 2204 +net4big_v2 MACH_NET4BIG_V2 NET4BIG_V2 2205 +net5big_v2 MACH_NET5BIG_V2 NET5BIG_V2 2206 +endb2443 MACH_ENDB2443 ENDB2443 2207 +inetspace_v2 MACH_INETSPACE_V2 INETSPACE_V2 2208 +tros MACH_TROS TROS 2209 +pelco_homer MACH_PELCO_HOMER PELCO_HOMER 2210 +ofsp8 MACH_OFSP8 OFSP8 2211 +at91sam9g45ekes MACH_AT91SAM9G45EKES AT91SAM9G45EKES 2212 +guf_cupid MACH_GUF_CUPID GUF_CUPID 2213 +eab1r MACH_EAB1R EAB1R 2214 +desirec MACH_DESIREC DESIREC 2215 +cordoba MACH_CORDOBA CORDOBA 2216 +irvine MACH_IRVINE IRVINE 2217 +sff772 MACH_SFF772 SFF772 2218 +pelco_milano MACH_PELCO_MILANO PELCO_MILANO 2219 +pc7302 MACH_PC7302 PC7302 2220 +bip6000 MACH_BIP6000 BIP6000 2221 +silvermoon MACH_SILVERMOON SILVERMOON 2222 +vc0830 MACH_VC0830 VC0830 2223 +dt430 MACH_DT430 DT430 2224 +ji42pf MACH_JI42PF JI42PF 2225 +gnet_ksm MACH_GNET_KSM GNET_KSM 2226 +gnet_sgm MACH_GNET_SGM GNET_SGM 2227 +gnet_sgr MACH_GNET_SGR GNET_SGR 2228 +omap3_icetekevm MACH_OMAP3_ICETEKEVM OMAP3_ICETEKEVM 2229 +pnp MACH_PNP PNP 2230 +ctera_2bay_k MACH_CTERA_2BAY_K CTERA_2BAY_K 2231 +ctera_2bay_u MACH_CTERA_2BAY_U CTERA_2BAY_U 2232 +sas_c MACH_SAS_C SAS_C 2233 +vma2315 MACH_VMA2315 VMA2315 2234 +vcs MACH_VCS VCS 2235 +spear600 MACH_SPEAR600 SPEAR600 2236 +spear300 MACH_SPEAR300 SPEAR300 2237 +spear1300 MACH_SPEAR1300 SPEAR1300 2238 +lilly1131 MACH_LILLY1131 LILLY1131 2239 +arvoo_ax301 MACH_ARVOO_AX301 ARVOO_AX301 2240 +mapphone MACH_MAPPHONE MAPPHONE 2241 +legend MACH_LEGEND LEGEND 2242 +salsa MACH_SALSA SALSA 2243 +lounge MACH_LOUNGE LOUNGE 2244 +vision MACH_VISION VISION 2245 +vmb20 MACH_VMB20 VMB20 2246 +hy2410 MACH_HY2410 HY2410 2247 +hy9315 MACH_HY9315 HY9315 2248 +bullwinkle MACH_BULLWINKLE BULLWINKLE 2249 +arm_ultimator2 MACH_ARM_ULTIMATOR2 ARM_ULTIMATOR2 2250 +vs_v210 MACH_VS_V210 VS_V210 2252 +vs_v212 MACH_VS_V212 VS_V212 2253 +hmt MACH_HMT HMT 2254 +km_kirkwood MACH_KM_KIRKWOOD KM_KIRKWOOD 2255 +vesper MACH_VESPER VESPER 2256 +str9 MACH_STR9 STR9 2257 +omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258 +simcom MACH_SIMCOM SIMCOM 2259 +mcwebio MACH_MCWEBIO MCWEBIO 2260 +omap3_phrazer MACH_OMAP3_PHRAZER OMAP3_PHRAZER 2261 +darwin MACH_DARWIN DARWIN 2262 +oratiscomu MACH_ORATISCOMU ORATISCOMU 2263 +rtsbc20 MACH_RTSBC20 RTSBC20 2264 +sgh_i780 MACH_I780 I780 2265 +gemini324 MACH_GEMINI324 GEMINI324 2266 +oratislan MACH_ORATISLAN ORATISLAN 2267 +oratisalog MACH_ORATISALOG ORATISALOG 2268 +oratismadi MACH_ORATISMADI ORATISMADI 2269 +oratisot16 MACH_ORATISOT16 ORATISOT16 2270 +oratisdesk MACH_ORATISDESK ORATISDESK 2271 +vexpress MACH_VEXPRESS VEXPRESS 2272 +sintexo MACH_SINTEXO SINTEXO 2273 +cm3389 MACH_CM3389 CM3389 2274 +omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275 +sgh_i900 MACH_SGH_I900 SGH_I900 2276 +bst100 MACH_BST100 BST100 2277 +passion MACH_PASSION PASSION 2278 +indesign_at91sam MACH_INDESIGN_AT91SAM INDESIGN_AT91SAM 2279 +c4_badger MACH_C4_BADGER C4_BADGER 2280 +c4_viper MACH_C4_VIPER C4_VIPER 2281 +d2net MACH_D2NET D2NET 2282 +bigdisk MACH_BIGDISK BIGDISK 2283 +notalvision MACH_NOTALVISION NOTALVISION 2284 +omap3_kboc MACH_OMAP3_KBOC OMAP3_KBOC 2285 +cyclone MACH_CYCLONE CYCLONE 2286 +ninja MACH_NINJA NINJA 2287 +at91sam9g20ek_2mmc MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC 2288 +bcmring MACH_BCMRING BCMRING 2289 +resol_dl2 MACH_RESOL_DL2 RESOL_DL2 2290 +ifosw MACH_IFOSW IFOSW 2291 +htcrhodium MACH_HTCRHODIUM HTCRHODIUM 2292 +htctopaz MACH_HTCTOPAZ HTCTOPAZ 2293 +matrix504 MACH_MATRIX504 MATRIX504 2294 +mrfsa MACH_MRFSA MRFSA 2295 +sc_p270 MACH_SC_P270 SC_P270 2296 +atlas5_evb MACH_ATLAS5_EVB ATLAS5_EVB 2297 +pelco_lobox MACH_PELCO_LOBOX PELCO_LOBOX 2298 +dilax_pcu200 MACH_DILAX_PCU200 DILAX_PCU200 2299 +leonardo MACH_LEONARDO LEONARDO 2300 +zoran_approach7 MACH_ZORAN_APPROACH7 ZORAN_APPROACH7 2301 +dp6xx MACH_DP6XX DP6XX 2302 +bcm2153_vesper MACH_BCM2153_VESPER BCM2153_VESPER 2303 +mahimahi MACH_MAHIMAHI MAHIMAHI 2304 +clickc MACH_CLICKC CLICKC 2305 +zb_gateway MACH_ZB_GATEWAY ZB_GATEWAY 2306 +tazcard MACH_TAZCARD TAZCARD 2307 +tazdev MACH_TAZDEV TAZDEV 2308 +annax_cb_arm MACH_ANNAX_CB_ARM ANNAX_CB_ARM 2309 +annax_dm3 MACH_ANNAX_DM3 ANNAX_DM3 2310 +cerebric MACH_CEREBRIC CEREBRIC 2311 +orca MACH_ORCA ORCA 2312 +pc9260 MACH_PC9260 PC9260 2313 +ems285a MACH_EMS285A EMS285A 2314 +gec2410 MACH_GEC2410 GEC2410 2315 +gec2440 MACH_GEC2440 GEC2440 2316 +mw903 MACH_ARCH_MW903 ARCH_MW903 2317 +mw2440 MACH_MW2440 MW2440 2318 +ecac2378 MACH_ECAC2378 ECAC2378 2319 +tazkiosk MACH_TAZKIOSK TAZKIOSK 2320 +whiterabbit_mch MACH_WHITERABBIT_MCH WHITERABBIT_MCH 2321 +sbox9263 MACH_SBOX9263 SBOX9263 2322 +smdk6442 MACH_SMDK6442 SMDK6442 2324 +openrd_base MACH_OPENRD_BASE OPENRD_BASE 2325 +incredible MACH_INCREDIBLE INCREDIBLE 2326 +incrediblec MACH_INCREDIBLEC INCREDIBLEC 2327 +heroct MACH_HEROCT HEROCT 2328 +mmnet1000 MACH_MMNET1000 MMNET1000 2329 +devkit8000 MACH_DEVKIT8000 DEVKIT8000 2330 +devkit9000 MACH_DEVKIT9000 DEVKIT9000 2331 +mx31txtr MACH_MX31TXTR MX31TXTR 2332 +u380 MACH_U380 U380 2333 +oamp3_hualu MACH_HUALU_BOARD HUALU_BOARD 2334 +npcmx50 MACH_NPCMX50 NPCMX50 2335 +mx51_efikamx MACH_MX51_EFIKAMX MX51_EFIKAMX 2336 +mx51_lange52 MACH_MX51_LANGE52 MX51_LANGE52 2337 +riom MACH_RIOM RIOM 2338 +comcas MACH_COMCAS COMCAS 2339 +wsi_mx27 MACH_WSI_MX27 WSI_MX27 2340 +cm_t35 MACH_CM_T35 CM_T35 2341 +net2big MACH_NET2BIG NET2BIG 2342 +motorola_a1600 MACH_MOTOROLA_A1600 MOTOROLA_A1600 2343 +igep0020 MACH_IGEP0020 IGEP0020 2344 +igep0010 MACH_IGEP0010 IGEP0010 2345 +mv6281gtwge2 MACH_MV6281GTWGE2 MV6281GTWGE2 2346 +scat100 MACH_SCAT100 SCAT100 2347 +sanmina MACH_SANMINA SANMINA 2348 +momento MACH_MOMENTO MOMENTO 2349 +nuc9xx MACH_NUC9XX NUC9XX 2350 +nuc910evb MACH_NUC910EVB NUC910EVB 2351 +nuc920evb MACH_NUC920EVB NUC920EVB 2352 +nuc950evb MACH_NUC950EVB NUC950EVB 2353 +nuc945evb MACH_NUC945EVB NUC945EVB 2354 +nuc960evb MACH_NUC960EVB NUC960EVB 2355 +nuc932evb MACH_NUC932EVB NUC932EVB 2356 +nuc900 MACH_NUC900 NUC900 2357 +sd1soc MACH_SD1SOC SD1SOC 2358 +ln2440bc MACH_LN2440BC LN2440BC 2359 +rsbc MACH_RSBC RSBC 2360 +openrd_client MACH_OPENRD_CLIENT OPENRD_CLIENT 2361 +hpipaq11x MACH_HPIPAQ11X HPIPAQ11X 2362 +wayland MACH_WAYLAND WAYLAND 2363 +acnbsx102 MACH_ACNBSX102 ACNBSX102 2364 +hwat91 MACH_HWAT91 HWAT91 2365 +at91sam9263cs MACH_AT91SAM9263CS AT91SAM9263CS 2366 +csb732 MACH_CSB732 CSB732 2367 +u8500 MACH_U8500 U8500 2368 +huqiu MACH_HUQIU HUQIU 2369 +mx51_efikasb MACH_MX51_EFIKASB MX51_EFIKASB 2370 +pmt1g MACH_PMT1G PMT1G 2371 +htcelf MACH_HTCELF HTCELF 2372 +armadillo420 MACH_ARMADILLO420 ARMADILLO420 2373 +armadillo440 MACH_ARMADILLO440 ARMADILLO440 2374 +u_chip_dual_arm MACH_U_CHIP_DUAL_ARM U_CHIP_DUAL_ARM 2375 +csr_bdb3 MACH_CSR_BDB3 CSR_BDB3 2376 +dolby_cat1018 MACH_DOLBY_CAT1018 DOLBY_CAT1018 2377 +hy9307 MACH_HY9307 HY9307 2378 +aspire_easystore MACH_A_ES A_ES 2379 +davinci_irif MACH_DAVINCI_IRIF DAVINCI_IRIF 2380 +agama9263 MACH_AGAMA9263 AGAMA9263 2381 +marvell_jasper MACH_MARVELL_JASPER MARVELL_JASPER 2382 +flint MACH_FLINT FLINT 2383 +tavorevb3 MACH_TAVOREVB3 TAVOREVB3 2384 +sch_m490 MACH_SCH_M490 SCH_M490 2386 +rbl01 MACH_RBL01 RBL01 2387 +omnifi MACH_OMNIFI OMNIFI 2388 +otavalo MACH_OTAVALO OTAVALO 2389 +htc_excalibur_s620 MACH_HTC_EXCALIBUR_S620 HTC_EXCALIBUR_S620 2391 +htc_opal MACH_HTC_OPAL HTC_OPAL 2392 +touchbook MACH_TOUCHBOOK TOUCHBOOK 2393 +latte MACH_LATTE LATTE 2394 +xa200 MACH_XA200 XA200 2395 +nimrod MACH_NIMROD NIMROD 2396 +cc9p9215_3g MACH_CC9P9215_3G CC9P9215_3G 2397 +cc9p9215_3gjs MACH_CC9P9215_3GJS CC9P9215_3GJS 2398 +tk71 MACH_TK71 TK71 2399 +comham3525 MACH_COMHAM3525 COMHAM3525 2400 +mx31erebus MACH_MX31EREBUS MX31EREBUS 2401 +mcardmx27 MACH_MCARDMX27 MCARDMX27 2402 +paradise MACH_PARADISE PARADISE 2403 +tide MACH_TIDE TIDE 2404 +wzl2440 MACH_WZL2440 WZL2440 2405 +sdrdemo MACH_SDRDEMO SDRDEMO 2406 +ethercan2 MACH_ETHERCAN2 ETHERCAN2 2407 +ecmimg20 MACH_ECMIMG20 ECMIMG20 2408 +omap_dragon MACH_OMAP_DRAGON OMAP_DRAGON 2409 +halo MACH_HALO HALO 2410 +huangshan MACH_HUANGSHAN HUANGSHAN 2411 +vl_ma2sc MACH_VL_MA2SC VL_MA2SC 2412 +raumfeld_rc MACH_RAUMFELD_RC RAUMFELD_RC 2413 +raumfeld_connector MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR 2414 +raumfeld_speaker MACH_RAUMFELD_SPEAKER RAUMFELD_SPEAKER 2415 +multibus_master MACH_MULTIBUS_MASTER MULTIBUS_MASTER 2416 +multibus_pbk MACH_MULTIBUS_PBK MULTIBUS_PBK 2417 +tnetv107x MACH_TNETV107X TNETV107X 2418 +snake MACH_SNAKE SNAKE 2419 +cwmx27 MACH_CWMX27 CWMX27 2420 +sch_m480 MACH_SCH_M480 SCH_M480 2421 +platypus MACH_PLATYPUS PLATYPUS 2422 +pss2 MACH_PSS2 PSS2 2423 +davinci_apm150 MACH_DAVINCI_APM150 DAVINCI_APM150 2424 +str9100 MACH_STR9100 STR9100 2425 +net5big MACH_NET5BIG NET5BIG 2426 +seabed9263 MACH_SEABED9263 SEABED9263 2427 +mx51_m2id MACH_MX51_M2ID MX51_M2ID 2428 +octvocplus_eb MACH_OCTVOCPLUS_EB OCTVOCPLUS_EB 2429 +klk_firefox MACH_KLK_FIREFOX KLK_FIREFOX 2430 +klk_wirma_module MACH_KLK_WIRMA_MODULE KLK_WIRMA_MODULE 2431 +klk_wirma_mmi MACH_KLK_WIRMA_MMI KLK_WIRMA_MMI 2432 +supersonic MACH_SUPERSONIC SUPERSONIC 2433 +liberty MACH_LIBERTY LIBERTY 2434 +mh355 MACH_MH355 MH355 2435 +pc7802 MACH_PC7802 PC7802 2436 +gnet_sgc MACH_GNET_SGC GNET_SGC 2437 +einstein15 MACH_EINSTEIN15 EINSTEIN15 2438 +cmpd MACH_CMPD CMPD 2439 +davinci_hase1 MACH_DAVINCI_HASE1 DAVINCI_HASE1 2440 +lgeincitephone MACH_LGEINCITEPHONE LGEINCITEPHONE 2441 +ea313x MACH_EA313X EA313X 2442 +fwbd_39064 MACH_FWBD_39064 FWBD_39064 2443 +fwbd_390128 MACH_FWBD_390128 FWBD_390128 2444 +pelco_moe MACH_PELCO_MOE PELCO_MOE 2445 +minimix27 MACH_MINIMIX27 MINIMIX27 2446 +omap3_thunder MACH_OMAP3_THUNDER OMAP3_THUNDER 2447 +passionc MACH_PASSIONC PASSIONC 2448 +mx27amata MACH_MX27AMATA MX27AMATA 2449 +bgat1 MACH_BGAT1 BGAT1 2450 +buzz MACH_BUZZ BUZZ 2451 +mb9g20 MACH_MB9G20 MB9G20 2452 +yushan MACH_YUSHAN YUSHAN 2453 +lizard MACH_LIZARD LIZARD 2454 +omap3polycom MACH_OMAP3POLYCOM OMAP3POLYCOM 2455 +smdkv210 MACH_SMDKV210 SMDKV210 2456 +bravo MACH_BRAVO BRAVO 2457 +siogentoo1 MACH_SIOGENTOO1 SIOGENTOO1 2458 +siogentoo2 MACH_SIOGENTOO2 SIOGENTOO2 2459 +sm3k MACH_SM3K SM3K 2460 +acer_tempo_f900 MACH_ACER_TEMPO_F900 ACER_TEMPO_F900 2461 +glittertind MACH_GLITTERTIND GLITTERTIND 2463 +omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464 +omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465 +cybook2440 MACH_CYBOOK2440 CYBOOK2440 2466 +torino_s MACH_TORINO_S TORINO_S 2467 +havana MACH_HAVANA HAVANA 2468 +beaumont_11 MACH_BEAUMONT_11 BEAUMONT_11 2469 +vanguard MACH_VANGUARD VANGUARD 2470 +s5pc110_draco MACH_S5PC110_DRACO S5PC110_DRACO 2471 +cartesio_two MACH_CARTESIO_TWO CARTESIO_TWO 2472 +aster MACH_ASTER ASTER 2473 +voguesv210 MACH_VOGUESV210 VOGUESV210 2474 +acm500x MACH_ACM500X ACM500X 2475 +km9260 MACH_KM9260 KM9260 2476 +nideflexg1 MACH_NIDEFLEXG1 NIDEFLEXG1 2477 +ctera_plug_io MACH_CTERA_PLUG_IO CTERA_PLUG_IO 2478 +smartq7 MACH_SMARTQ7 SMARTQ7 2479 +at91sam9g10ek2 MACH_AT91SAM9G10EK2 AT91SAM9G10EK2 2480 +asusp527 MACH_ASUSP527 ASUSP527 2481 +at91sam9g20mpm2 MACH_AT91SAM9G20MPM2 AT91SAM9G20MPM2 2482 +topasa900 MACH_TOPASA900 TOPASA900 2483 +electrum_100 MACH_ELECTRUM_100 ELECTRUM_100 2484 +mx51grb MACH_MX51GRB MX51GRB 2485 +xea300 MACH_XEA300 XEA300 2486 +htcstartrek MACH_HTCSTARTREK HTCSTARTREK 2487 +lima MACH_LIMA LIMA 2488 +csb740 MACH_CSB740 CSB740 2489 +usb_s8815 MACH_USB_S8815 USB_S8815 2490 +watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491 +milkyway MACH_MILKYWAY MILKYWAY 2492 +g4evm MACH_G4EVM G4EVM 2493 +picomod6 MACH_PICOMOD6 PICOMOD6 2494 +omapl138_hawkboard MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD 2495 +ip6000 MACH_IP6000 IP6000 2496 +ip6010 MACH_IP6010 IP6010 2497 +utm400 MACH_UTM400 UTM400 2498 +omap3_zybex MACH_OMAP3_ZYBEX OMAP3_ZYBEX 2499 +wireless_space MACH_WIRELESS_SPACE WIRELESS_SPACE 2500 +sx560 MACH_SX560 SX560 2501 +ts41x MACH_TS41X TS41X 2502 +elphel10373 MACH_ELPHEL10373 ELPHEL10373 2503 +rhobot MACH_RHOBOT RHOBOT 2504 +mx51_refresh MACH_MX51_REFRESH MX51_REFRESH 2505 +ls9260 MACH_LS9260 LS9260 2506 +shank MACH_SHANK SHANK 2507 +qsd8x50_st1 MACH_QSD8X50_ST1 QSD8X50_ST1 2508 +at91sam9m10ekes MACH_AT91SAM9M10EKES AT91SAM9M10EKES 2509 +hiram MACH_HIRAM HIRAM 2510 +phy3250 MACH_PHY3250 PHY3250 2511 +ea3250 MACH_EA3250 EA3250 2512 +fdi3250 MACH_FDI3250 FDI3250 2513 +at91sam9263nit MACH_AT91SAM9263NIT AT91SAM9263NIT 2515 +ccmx51 MACH_CCMX51 CCMX51 2516 +ccmx51js MACH_CCMX51JS CCMX51JS 2517 +ccwmx51 MACH_CCWMX51 CCWMX51 2518 +ccwmx51js MACH_CCWMX51JS CCWMX51JS 2519 +mini6410 MACH_MINI6410 MINI6410 2520 +tiny6410 MACH_TINY6410 TINY6410 2521 +nano6410 MACH_NANO6410 NANO6410 2522 +at572d940hfnldb MACH_AT572D940HFNLDB AT572D940HFNLDB 2523 +htcleo MACH_HTCLEO HTCLEO 2524 +avp13 MACH_AVP13 AVP13 2525 +xxsvideod MACH_XXSVIDEOD XXSVIDEOD 2526 +vpnext MACH_VPNEXT VPNEXT 2527 +swarco_itc3 MACH_SWARCO_ITC3 SWARCO_ITC3 2528 +tx51 MACH_TX51 TX51 2529 +dolby_cat1021 MACH_DOLBY_CAT1021 DOLBY_CAT1021 2530 +mx28evk MACH_MX28EVK MX28EVK 2531 +phoenix260 MACH_PHOENIX260 PHOENIX260 2532 +uvaca_stork MACH_UVACA_STORK UVACA_STORK 2533 +smartq5 MACH_SMARTQ5 SMARTQ5 2534 +all3078 MACH_ALL3078 ALL3078 2535 +ctera_2bay_ds MACH_CTERA_2BAY_DS CTERA_2BAY_DS 2536 +siogentoo3 MACH_SIOGENTOO3 SIOGENTOO3 2537 +epb5000 MACH_EPB5000 EPB5000 2538 +hy9263 MACH_HY9263 HY9263 2539 +acer_tempo_m900 MACH_ACER_TEMPO_M900 ACER_TEMPO_M900 2540 +acer_tempo_dx650 MACH_ACER_TEMPO_DX900 ACER_TEMPO_DX900 2541 +acer_tempo_x960 MACH_ACER_TEMPO_X960 ACER_TEMPO_X960 2542 +acer_eten_v900 MACH_ACER_ETEN_V900 ACER_ETEN_V900 2543 +acer_eten_x900 MACH_ACER_ETEN_X900 ACER_ETEN_X900 2544 +bonnell MACH_BONNELL BONNELL 2545 +oht_mx27 MACH_OHT_MX27 OHT_MX27 2546 +htcquartz MACH_HTCQUARTZ HTCQUARTZ 2547 +davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548 +c3ax03 MACH_C3AX03 C3AX03 2549 +mxt_td60 MACH_MXT_TD60 MXT_TD60 2550 +esyx MACH_ESYX ESYX 2551 +dove_db2 MACH_DOVE_DB2 DOVE_DB2 2552 +bulldog MACH_BULLDOG BULLDOG 2553 +derell_me2000 MACH_DERELL_ME2000 DERELL_ME2000 2554 +bcmring_base MACH_BCMRING_BASE BCMRING_BASE 2555 +bcmring_evm MACH_BCMRING_EVM BCMRING_EVM 2556 +bcmring_evm_jazz MACH_BCMRING_EVM_JAZZ BCMRING_EVM_JAZZ 2557 +bcmring_sp MACH_BCMRING_SP BCMRING_SP 2558 +bcmring_sv MACH_BCMRING_SV BCMRING_SV 2559 +bcmring_sv_jazz MACH_BCMRING_SV_JAZZ BCMRING_SV_JAZZ 2560 +bcmring_tablet MACH_BCMRING_TABLET BCMRING_TABLET 2561 +bcmring_vp MACH_BCMRING_VP BCMRING_VP 2562 +bcmring_evm_seikor MACH_BCMRING_EVM_SEIKOR BCMRING_EVM_SEIKOR 2563 +bcmring_sp_wqvga MACH_BCMRING_SP_WQVGA BCMRING_SP_WQVGA 2564 +bcmring_custom MACH_BCMRING_CUSTOM BCMRING_CUSTOM 2565 +acer_s200 MACH_ACER_S200 ACER_S200 2566 +bt270 MACH_BT270 BT270 2567 +iseo MACH_ISEO ISEO 2568 +cezanne MACH_CEZANNE CEZANNE 2569 +lucca MACH_LUCCA LUCCA 2570 +supersmart MACH_SUPERSMART SUPERSMART 2571 +arm11_board MACH_CS_MISANO CS_MISANO 2572 +magnolia2 MACH_MAGNOLIA2 MAGNOLIA2 2573 +emxx MACH_EMXX EMXX 2574 +outlaw MACH_OUTLAW OUTLAW 2575 +riot_bei2 MACH_RIOT_BEI2 RIOT_BEI2 2576 +riot_gx2 MACH_RIOT_VOX RIOT_VOX 2577 +riot_x37 MACH_RIOT_X37 RIOT_X37 2578 +mega25mx MACH_MEGA25MX MEGA25MX 2579 +benzina2 MACH_BENZINA2 BENZINA2 2580 +ignite MACH_IGNITE IGNITE 2581 +foggia MACH_FOGGIA FOGGIA 2582 +arezzo MACH_AREZZO AREZZO 2583 +leica_skywalker MACH_LEICA_SKYWALKER LEICA_SKYWALKER 2584 +jacinto2_jamr MACH_JACINTO2_JAMR JACINTO2_JAMR 2585 +gts_nova MACH_GTS_NOVA GTS_NOVA 2586 +p3600 MACH_P3600 P3600 2587 +dlt2 MACH_DLT2 DLT2 2588 +df3120 MACH_DF3120 DF3120 2589 +ecucore_9g20 MACH_ECUCORE_9G20 ECUCORE_9G20 2590 +nautel_am35xx MACH_NAUTEL_LPC3240 NAUTEL_LPC3240 2591 +glacier MACH_GLACIER GLACIER 2592 +phrazer_bulldog MACH_PHRAZER_BULLDOG PHRAZER_BULLDOG 2593 +omap3_bulldog MACH_OMAP3_BULLDOG OMAP3_BULLDOG 2594 +pca101 MACH_PCA101 PCA101 2595 +buzzc MACH_BUZZC BUZZC 2596 +sasie2 MACH_SASIE2 SASIE2 2597 +smartmeter_dl MACH_SMARTMETER_DL SMARTMETER_DL 2599 +wzl6410 MACH_WZL6410 WZL6410 2600 +wzl6410m MACH_WZL6410M WZL6410M 2601 +wzl6410f MACH_WZL6410F WZL6410F 2602 +wzl6410i MACH_WZL6410I WZL6410I 2603 +spacecom1 MACH_SPACECOM1 SPACECOM1 2604 +pingu920 MACH_PINGU920 PINGU920 2605 +bravoc MACH_BRAVOC BRAVOC 2606 +vdssw MACH_VDSSW VDSSW 2608 +romulus MACH_ROMULUS ROMULUS 2609 +omap_magic MACH_OMAP_MAGIC OMAP_MAGIC 2610 +eltd100 MACH_ELTD100 ELTD100 2611 +capc7117 MACH_CAPC7117 CAPC7117 2612 +swan MACH_SWAN SWAN 2613 +veu MACH_VEU VEU 2614 +rm2 MACH_RM2 RM2 2615 +tt2100 MACH_TT2100 TT2100 2616 +venice MACH_VENICE VENICE 2617 +pc7323 MACH_PC7323 PC7323 2618 +masp MACH_MASP MASP 2619 +fujitsu_tvstbsoc0 MACH_FUJITSU_TVSTBSOC FUJITSU_TVSTBSOC 2620 +fujitsu_tvstbsoc1 MACH_FUJITSU_TVSTBSOC1 FUJITSU_TVSTBSOC1 2621 +lexikon MACH_LEXIKON LEXIKON 2622 +mini2440v2 MACH_MINI2440V2 MINI2440V2 2623 +icontrol MACH_ICONTROL ICONTROL 2624 +gplugd MACH_GPLUGD GPLUGD 2625 +qsd8x50a_st1_1 MACH_QSD8X50A_ST1_1 QSD8X50A_ST1_1 2626 +qsd8x50a_st1_5 MACH_QSD8X50A_ST1_5 QSD8X50A_ST1_5 2627 +bee MACH_BEE BEE 2628 +mx23evk MACH_MX23EVK MX23EVK 2629 +ap4evb MACH_AP4EVB AP4EVB 2630 +stockholm MACH_STOCKHOLM STOCKHOLM 2631 +lpc_h3131 MACH_LPC_H3131 LPC_H3131 2632 +stingray MACH_STINGRAY STINGRAY 2633 +kraken MACH_KRAKEN KRAKEN 2634 +gw2388 MACH_GW2388 GW2388 2635 +jadecpu MACH_JADECPU JADECPU 2636 +carlisle MACH_CARLISLE CARLISLE 2637 +lux_sf9 MACH_LUX_SF9 LUX_SF9 2638 +nemid_tb MACH_NEMID_TB NEMID_TB 2639 +terrier MACH_TERRIER TERRIER 2640 +turbot MACH_TURBOT TURBOT 2641 +sanddab MACH_SANDDAB SANDDAB 2642 +mx35_cicada MACH_MX35_CICADA MX35_CICADA 2643 +ghi2703d MACH_GHI2703D GHI2703D 2644 +lux_sfx9 MACH_LUX_SFX9 LUX_SFX9 2645 +lux_sf9g MACH_LUX_SF9G LUX_SF9G 2646 +lux_edk9 MACH_LUX_EDK9 LUX_EDK9 2647 +hw90240 MACH_HW90240 HW90240 2648 +dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649 +mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650 +scat110 MACH_SCAT110 SCAT110 2651 +acer_a1 MACH_ACER_A1 ACER_A1 2652 +cmcontrol MACH_CMCONTROL CMCONTROL 2653 +pelco_lamar MACH_PELCO_LAMAR PELCO_LAMAR 2654 +rfp43 MACH_RFP43 RFP43 2655 +sk86r0301 MACH_SK86R0301 SK86R0301 2656 +ctpxa MACH_CTPXA CTPXA 2657 +epb_arm9_a MACH_EPB_ARM9_A EPB_ARM9_A 2658 +guruplug MACH_GURUPLUG GURUPLUG 2659 +spear310 MACH_SPEAR310 SPEAR310 2660 +spear320 MACH_SPEAR320 SPEAR320 2661 +robotx MACH_ROBOTX ROBOTX 2662 +lsxhl MACH_LSXHL LSXHL 2663 +smartlite MACH_SMARTLITE SMARTLITE 2664 +cws2 MACH_CWS2 CWS2 2665 +m619 MACH_M619 M619 2666 +smartview MACH_SMARTVIEW SMARTVIEW 2667 +lsa_salsa MACH_LSA_SALSA LSA_SALSA 2668 +kizbox MACH_KIZBOX KIZBOX 2669 +htccharmer MACH_HTCCHARMER HTCCHARMER 2670 +guf_neso_lt MACH_GUF_NESO_LT GUF_NESO_LT 2671 +pm9g45 MACH_PM9G45 PM9G45 2672 +htcpanther MACH_HTCPANTHER HTCPANTHER 2673 +htcpanther_cdma MACH_HTCPANTHER_CDMA HTCPANTHER_CDMA 2674 +reb01 MACH_REB01 REB01 2675 +aquila MACH_AQUILA AQUILA 2676 +spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677 +esata_sheevaplug MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678 +msm7x30_surf MACH_MSM7X30_SURF MSM7X30_SURF 2679 +micro2440 MACH_MICRO2440 MICRO2440 2680 +am2440 MACH_AM2440 AM2440 2681 +tq2440 MACH_TQ2440 TQ2440 2682 +ea2478devkit MACH_EA2478DEVKIT EA2478DEVKIT 2683 +ak880x MACH_AK880X AK880X 2684 +cobra3530 MACH_COBRA3530 COBRA3530 2685 +pmppb MACH_PMPPB PMPPB 2686 +u6715 MACH_U6715 U6715 2687 +axar1500_sender MACH_AXAR1500_SENDER AXAR1500_SENDER 2688 +g30_dvb MACH_G30_DVB G30_DVB 2689 +vc088x MACH_VC088X VC088X 2690 +mioa702 MACH_MIOA702 MIOA702 2691 +hpmin MACH_HPMIN HPMIN 2692 +ak880xak MACH_AK880XAK AK880XAK 2693 +arm926tomap850 MACH_ARM926TOMAP850 ARM926TOMAP850 2694 +lkevm MACH_LKEVM LKEVM 2695 +mw6410 MACH_MW6410 MW6410 2696 +terastation_wxl MACH_TERASTATION_WXL TERASTATION_WXL 2697 +cpu8000e MACH_CPU8000E CPU8000E 2698 +tokyo MACH_TOKYO TOKYO 2700 +msm7201a_surf MACH_MSM7201A_SURF MSM7201A_SURF 2701 +msm7201a_ffa MACH_MSM7201A_FFA MSM7201A_FFA 2702 +msm7x25_surf MACH_MSM7X25_SURF MSM7X25_SURF 2703 +msm7x25_ffa MACH_MSM7X25_FFA MSM7X25_FFA 2704 +msm7x27_surf MACH_MSM7X27_SURF MSM7X27_SURF 2705 +msm7x27_ffa MACH_MSM7X27_FFA MSM7X27_FFA 2706 +msm7x30_ffa MACH_MSM7X30_FFA MSM7X30_FFA 2707 +qsd8x50_surf MACH_QSD8X50_SURF QSD8X50_SURF 2708 +qsd8x50_comet MACH_QSD8X50_COMET QSD8X50_COMET 2709 +qsd8x50_ffa MACH_QSD8X50_FFA QSD8X50_FFA 2710 +qsd8x50a_surf MACH_QSD8X50A_SURF QSD8X50A_SURF 2711 +qsd8x50a_ffa MACH_QSD8X50A_FFA QSD8X50A_FFA 2712 +adx_xgcp10 MACH_ADX_XGCP10 ADX_XGCP10 2713 +mcgwumts2a MACH_MCGWUMTS2A MCGWUMTS2A 2714 +mobikt MACH_MOBIKT MOBIKT 2715 +mx53_evk MACH_MX53_EVK MX53_EVK 2716 +igep0030 MACH_IGEP0030 IGEP0030 2717 +axell_h40_h50_ctrl MACH_AXELL_H40_H50_CTRL AXELL_H40_H50_CTRL 2718 +dtcommod MACH_DTCOMMOD DTCOMMOD 2719 +gould MACH_GOULD GOULD 2720 +siberia MACH_SIBERIA SIBERIA 2721 +sbc3530 MACH_SBC3530 SBC3530 2722 +qarm MACH_QARM QARM 2723 +mips MACH_MIPS MIPS 2724 +mx27grb MACH_MX27GRB MX27GRB 2725 +sbc8100 MACH_SBC8100 SBC8100 2726 +saarb MACH_SAARB SAARB 2727 +omap3mini MACH_OMAP3MINI OMAP3MINI 2728 +cnmbook7se MACH_CNMBOOK7SE CNMBOOK7SE 2729 +catan MACH_CATAN CATAN 2730 +harmony MACH_HARMONY HARMONY 2731 +tonga MACH_TONGA TONGA 2732 +cybook_orizon MACH_CYBOOK_ORIZON CYBOOK_ORIZON 2733 +htcrhodiumcdma MACH_HTCRHODIUMCDMA HTCRHODIUMCDMA 2734 +epc_g45 MACH_EPC_G45 EPC_G45 2735 +epc_lpc3250 MACH_EPC_LPC3250 EPC_LPC3250 2736 +mxc91341evb MACH_MXC91341EVB MXC91341EVB 2737 +rtw1000 MACH_RTW1000 RTW1000 2738 +bobcat MACH_BOBCAT BOBCAT 2739 +trizeps6 MACH_TRIZEPS6 TRIZEPS6 2740 +msm7x30_fluid MACH_MSM7X30_FLUID MSM7X30_FLUID 2741 +nedap9263 MACH_NEDAP9263 NEDAP9263 2742 +netgear_ms2110 MACH_NETGEAR_MS2110 NETGEAR_MS2110 2743 +bmx MACH_BMX BMX 2744 +netstream MACH_NETSTREAM NETSTREAM 2745 +vpnext_rcu MACH_VPNEXT_RCU VPNEXT_RCU 2746 +vpnext_mpu MACH_VPNEXT_MPU VPNEXT_MPU 2747 +bcmring_tablet_v1 MACH_BCMRING_TABLET_V1 BCMRING_TABLET_V1 2748 +sgarm10 MACH_SGARM10 SGARM10 2749 +cm_t3517 MACH_CM_T3517 CM_T3517 2750 +dig297 MACH_OMAP3_CPS OMAP3_CPS 2751 +axar1500_receiver MACH_AXAR1500_RECEIVER AXAR1500_RECEIVER 2752 +wbd222 MACH_WBD222 WBD222 2753 +mt65xx MACH_MT65XX MT65XX 2754 +msm8x60_surf MACH_MSM8X60_SURF MSM8X60_SURF 2755 +msm8x60_sim MACH_MSM8X60_SIM MSM8X60_SIM 2756 +tcc8000_sdk MACH_TCC8000_SDK TCC8000_SDK 2758 +nanos MACH_NANOS NANOS 2759 +stamp9g10 MACH_STAMP9G10 STAMP9G10 2760 +stamp9g45 MACH_STAMP9G45 STAMP9G45 2761 +h6053 MACH_H6053 H6053 2762 +smint01 MACH_SMINT01 SMINT01 2763 +prtlvt2 MACH_PRTLVT2 PRTLVT2 2764 +ap420 MACH_AP420 AP420 2765 +davinci_dm365_fc MACH_DAVINCI_DM365_FC DAVINCI_DM365_FC 2767 +msm8x55_surf MACH_MSM8X55_SURF MSM8X55_SURF 2768 +msm8x55_ffa MACH_MSM8X55_FFA MSM8X55_FFA 2769 +esl_vamana MACH_ESL_VAMANA ESL_VAMANA 2770 +sbc35 MACH_SBC35 SBC35 2771 +mpx6446 MACH_MPX6446 MPX6446 2772 +oreo_controller MACH_OREO_CONTROLLER OREO_CONTROLLER 2773 +kopin_models MACH_KOPIN_MODELS KOPIN_MODELS 2774 +ttc_vision2 MACH_TTC_VISION2 TTC_VISION2 2775 +cns3420vb MACH_CNS3420VB CNS3420VB 2776 +lpc_evo MACH_LPC2 LPC2 2777 +olympus MACH_OLYMPUS OLYMPUS 2778 +vortex MACH_VORTEX VORTEX 2779 +s5pc200 MACH_S5PC200 S5PC200 2780 +ecucore_9263 MACH_ECUCORE_9263 ECUCORE_9263 2781 +smdkc200 MACH_SMDKC200 SMDKC200 2782 +emsiso_sx27 MACH_EMSISO_SX27 EMSISO_SX27 2783 +apx_som9g45_ek MACH_APX_SOM9G45_EK APX_SOM9G45_EK 2784 +songshan MACH_SONGSHAN SONGSHAN 2785 +tianshan MACH_TIANSHAN TIANSHAN 2786 +vpx500 MACH_VPX500 VPX500 2787 +am3517sam MACH_AM3517SAM AM3517SAM 2788 +skat91_sim508 MACH_SKAT91_SIM508 SKAT91_SIM508 2789 +skat91_s3e MACH_SKAT91_S3E SKAT91_S3E 2790 +omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791 +df7220 MACH_DF7220 DF7220 2792 +nemini MACH_NEMINI NEMINI 2793 +t8200 MACH_T8200 T8200 2794 +apf51 MACH_APF51 APF51 2795 +dr_rc_unit MACH_DR_RC_UNIT DR_RC_UNIT 2796 +bordeaux MACH_BORDEAUX BORDEAUX 2797 +catania_b MACH_CATANIA_B CATANIA_B 2798 +mx51_ocean MACH_MX51_OCEAN MX51_OCEAN 2799 +ti8168evm MACH_TI8168EVM TI8168EVM 2800 +neocoreomap MACH_NEOCOREOMAP NEOCOREOMAP 2801 +withings_wbp MACH_WITHINGS_WBP WITHINGS_WBP 2802 +dbps MACH_DBPS DBPS 2803 +pcbfp0001 MACH_PCBFP0001 PCBFP0001 2805 +speedy MACH_SPEEDY SPEEDY 2806 +chrysaor MACH_CHRYSAOR CHRYSAOR 2807 +tango MACH_TANGO TANGO 2808 +synology_dsx11 MACH_SYNOLOGY_DSX11 SYNOLOGY_DSX11 2809 +hanlin_v3ext MACH_HANLIN_V3EXT HANLIN_V3EXT 2810 +hanlin_v5 MACH_HANLIN_V5 HANLIN_V5 2811 +hanlin_v3plus MACH_HANLIN_V3PLUS HANLIN_V3PLUS 2812 +iriver_story MACH_IRIVER_STORY IRIVER_STORY 2813 +irex_iliad MACH_IREX_ILIAD IREX_ILIAD 2814 +irex_dr1000 MACH_IREX_DR1000 IREX_DR1000 2815 +teton_bga MACH_TETON_BGA TETON_BGA 2816 +snapper9g45 MACH_SNAPPER9G45 SNAPPER9G45 2817 +tam3517 MACH_TAM3517 TAM3517 2818 +pdc100 MACH_PDC100 PDC100 2819 +eukrea_cpuimx25sd MACH_EUKREA_CPUIMX25SD EUKREA_CPUIMX25SD 2820 +eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35SD EUKREA_CPUIMX35SD 2821 +eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822 +eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823 +p565 MACH_P565 P565 2824 +acer_a4 MACH_ACER_A4 ACER_A4 2825 +davinci_dm368_bip MACH_DAVINCI_DM368_BIP DAVINCI_DM368_BIP 2826 +eshare MACH_ESHARE ESHARE 2827 +wlbargn MACH_WLBARGN WLBARGN 2829 +bm170 MACH_BM170 BM170 2830 +netspace_mini_v2 MACH_NETSPACE_MINI_V2 NETSPACE_MINI_V2 2831 +netspace_plug_v2 MACH_NETSPACE_PLUG_V2 NETSPACE_PLUG_V2 2832 +siemens_l1 MACH_SIEMENS_L1 SIEMENS_L1 2833 +elv_lcu1 MACH_ELV_LCU1 ELV_LCU1 2834 +mcu1 MACH_MCU1 MCU1 2835 +omap3_tao3530 MACH_OMAP3_TAO3530 OMAP3_TAO3530 2836 +omap3_pcutouch MACH_OMAP3_PCUTOUCH OMAP3_PCUTOUCH 2837 +smdkc210 MACH_SMDKC210 SMDKC210 2838 +omap3_braillo MACH_OMAP3_BRAILLO OMAP3_BRAILLO 2839 +spyplug MACH_SPYPLUG SPYPLUG 2840 +ginger MACH_GINGER GINGER 2841 +tny_t3530 MACH_TNY_T3530 TNY_T3530 2842 +pcaal1 MACH_PCAAL1 PCAAL1 2843 +spade MACH_SPADE SPADE 2844 +mxc25_topaz MACH_MXC25_TOPAZ MXC25_TOPAZ 2845 +t5325 MACH_T5325 T5325 2846 +gw2361 MACH_GW2361 GW2361 2847 +elog MACH_ELOG ELOG 2848 +income MACH_INCOME INCOME 2849 +bcm589x MACH_BCM589X BCM589X 2850 +etna MACH_ETNA ETNA 2851 +hawks MACH_HAWKS HAWKS 2852 +meson MACH_MESON MESON 2853 +xsbase255 MACH_XSBASE255 XSBASE255 2854 +pvm2030 MACH_PVM2030 PVM2030 2855 +mioa502 MACH_MIOA502 MIOA502 2856 +vvbox_sdorig2 MACH_VVBOX_SDORIG2 VVBOX_SDORIG2 2857 +vvbox_sdlite2 MACH_VVBOX_SDLITE2 VVBOX_SDLITE2 2858 +vvbox_sdpro4 MACH_VVBOX_SDPRO4 VVBOX_SDPRO4 2859 +htc_spv_m700 MACH_HTC_SPV_M700 HTC_SPV_M700 2860 +mx257sx MACH_MX257SX MX257SX 2861 +goni MACH_GONI GONI 2862 +msm8x55_svlte_ffa MACH_MSM8X55_SVLTE_FFA MSM8X55_SVLTE_FFA 2863 +msm8x55_svlte_surf MACH_MSM8X55_SVLTE_SURF MSM8X55_SVLTE_SURF 2864 +quickstep MACH_QUICKSTEP QUICKSTEP 2865 +dmw96 MACH_DMW96 DMW96 2866 +hammerhead MACH_HAMMERHEAD HAMMERHEAD 2867 +trident MACH_TRIDENT TRIDENT 2868 +lightning MACH_LIGHTNING LIGHTNING 2869 +iconnect MACH_ICONNECT ICONNECT 2870 +autobot MACH_AUTOBOT AUTOBOT 2871 +coconut MACH_COCONUT COCONUT 2872 +durian MACH_DURIAN DURIAN 2873 +cayenne MACH_CAYENNE CAYENNE 2874 +fuji MACH_FUJI FUJI 2875 +synology_6282 MACH_SYNOLOGY_6282 SYNOLOGY_6282 2876 +em1sy MACH_EM1SY EM1SY 2877 +m502 MACH_M502 M502 2878 +matrix518 MACH_MATRIX518 MATRIX518 2879 +tiny_gurnard MACH_TINY_GURNARD TINY_GURNARD 2880 +spear1310 MACH_SPEAR1310 SPEAR1310 2881 +bv07 MACH_BV07 BV07 2882 +mxt_td61 MACH_MXT_TD61 MXT_TD61 2883 +openrd_ultimate MACH_OPENRD_ULTIMATE OPENRD_ULTIMATE 2884 +devixp MACH_DEVIXP DEVIXP 2885 +miccpt MACH_MICCPT MICCPT 2886 +mic256 MACH_MIC256 MIC256 2887 +as1167 MACH_AS1167 AS1167 2888 +omap3_ibiza MACH_OMAP3_IBIZA OMAP3_IBIZA 2889 +u5500 MACH_U5500 U5500 2890 +davinci_picto MACH_DAVINCI_PICTO DAVINCI_PICTO 2891 +mecha MACH_MECHA MECHA 2892 +bubba3 MACH_BUBBA3 BUBBA3 2893 +pupitre MACH_PUPITRE PUPITRE 2894 +tegra_vogue MACH_TEGRA_VOGUE TEGRA_VOGUE 2896 +tegra_e1165 MACH_TEGRA_E1165 TEGRA_E1165 2897 +simplenet MACH_SIMPLENET SIMPLENET 2898 +ec4350tbm MACH_EC4350TBM EC4350TBM 2899 +pec_tc MACH_PEC_TC PEC_TC 2900 +pec_hc2 MACH_PEC_HC2 PEC_HC2 2901 +esl_mobilis_a MACH_ESL_MOBILIS_A ESL_MOBILIS_A 2902 +esl_mobilis_b MACH_ESL_MOBILIS_B ESL_MOBILIS_B 2903 +esl_wave_a MACH_ESL_WAVE_A ESL_WAVE_A 2904 +esl_wave_b MACH_ESL_WAVE_B ESL_WAVE_B 2905 +unisense_mmm MACH_UNISENSE_MMM UNISENSE_MMM 2906 +blueshark MACH_BLUESHARK BLUESHARK 2907 +e10 MACH_E10 E10 2908 +app3k_robin MACH_APP3K_ROBIN APP3K_ROBIN 2909 +pov15hd MACH_POV15HD POV15HD 2910 +stella MACH_STELLA STELLA 2911 +linkstation_lschl MACH_LINKSTATION_LSCHL LINKSTATION_LSCHL 2913 +netwalker MACH_NETWALKER NETWALKER 2914 +acsx106 MACH_ACSX106 ACSX106 2915 +atlas5_c1 MACH_ATLAS5_C1 ATLAS5_C1 2916 +nsb3ast MACH_NSB3AST NSB3AST 2917 +gnet_slc MACH_GNET_SLC GNET_SLC 2918 +af4000 MACH_AF4000 AF4000 2919 +ark9431 MACH_ARK9431 ARK9431 2920 +fs_s5pc100 MACH_FS_S5PC100 FS_S5PC100 2921 +omap3505nova8 MACH_OMAP3505NOVA8 OMAP3505NOVA8 2922 +omap3621_edp1 MACH_OMAP3621_EDP1 OMAP3621_EDP1 2923 +oratisaes MACH_ORATISAES ORATISAES 2924 +smdkv310 MACH_SMDKV310 SMDKV310 2925 +siemens_l0 MACH_SIEMENS_L0 SIEMENS_L0 2926 +ventana MACH_VENTANA VENTANA 2927 +wm8505_7in_netbook MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK 2928 +ec4350sdb MACH_EC4350SDB EC4350SDB 2929 +mimas MACH_MIMAS MIMAS 2930 +titan MACH_TITAN TITAN 2931 +craneboard MACH_CRANEBOARD CRANEBOARD 2932 +es2440 MACH_ES2440 ES2440 2933 +najay_a9263 MACH_NAJAY_A9263 NAJAY_A9263 2934 +htctornado MACH_HTCTORNADO HTCTORNADO 2935 +dimm_mx257 MACH_DIMM_MX257 DIMM_MX257 2936 +jigen301 MACH_JIGEN JIGEN 2937 +smdk6450 MACH_SMDK6450 SMDK6450 2938 +meno_qng MACH_MENO_QNG MENO_QNG 2939 +ns2416 MACH_NS2416 NS2416 2940 +rpc353 MACH_RPC353 RPC353 2941 +tq6410 MACH_TQ6410 TQ6410 2942 +sky6410 MACH_SKY6410 SKY6410 2943 +dynasty MACH_DYNASTY DYNASTY 2944 +vivo MACH_VIVO VIVO 2945 +bury_bl7582 MACH_BURY_BL7582 BURY_BL7582 2946 +bury_bps5270 MACH_BURY_BPS5270 BURY_BPS5270 2947 +basi MACH_BASI BASI 2948 +tn200 MACH_TN200 TN200 2949 +c2mmi MACH_C2MMI C2MMI 2950 +meson_6236m MACH_MESON_6236M MESON_6236M 2951 +meson_8626m MACH_MESON_8626M MESON_8626M 2952 +tube MACH_TUBE TUBE 2953 +messina MACH_MESSINA MESSINA 2954 +mx50_arm2 MACH_MX50_ARM2 MX50_ARM2 2955 +cetus9263 MACH_CETUS9263 CETUS9263 2956 +brownstone MACH_BROWNSTONE BROWNSTONE 2957 +vmx25 MACH_VMX25 VMX25 2958 +vmx51 MACH_VMX51 VMX51 2959 +abacus MACH_ABACUS ABACUS 2960 +cm4745 MACH_CM4745 CM4745 2961 +oratislink MACH_ORATISLINK ORATISLINK 2962 +davinci_dm365_dvr MACH_DAVINCI_DM365_DVR DAVINCI_DM365_DVR 2963 +netviz MACH_NETVIZ NETVIZ 2964 +flexibity MACH_FLEXIBITY FLEXIBITY 2965 +wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966 +lpc24xx MACH_LPC24XX LPC24XX 2967 +spica MACH_SPICA SPICA 2968 +gpsdisplay MACH_GPSDISPLAY GPSDISPLAY 2969 +bipnet MACH_BIPNET BIPNET 2970 +overo_ctu_inertial MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL 2971 +davinci_dm355_mmm MACH_DAVINCI_DM355_MMM DAVINCI_DM355_MMM 2972 +pc9260_v2 MACH_PC9260_V2 PC9260_V2 2973 +ptx7545 MACH_PTX7545 PTX7545 2974 +tm_efdc MACH_TM_EFDC TM_EFDC 2975 +omap3_waldo1 MACH_OMAP3_WALDO1 OMAP3_WALDO1 2977 +flyer MACH_FLYER FLYER 2978 +tornado3240 MACH_TORNADO3240 TORNADO3240 2979 +soli_01 MACH_SOLI_01 SOLI_01 2980 +omapl138_europalc MACH_OMAPL138_EUROPALC OMAPL138_EUROPALC 2981 +helios_v1 MACH_HELIOS_V1 HELIOS_V1 2982 +netspace_lite_v2 MACH_NETSPACE_LITE_V2 NETSPACE_LITE_V2 2983 +ssc MACH_SSC SSC 2984 +premierwave_en MACH_PREMIERWAVE_EN PREMIERWAVE_EN 2985 +wasabi MACH_WASABI WASABI 2986 +mx50_rdp MACH_MX50_RDP MX50_RDP 2988 +universal_c210 MACH_UNIVERSAL_C210 UNIVERSAL_C210 2989 +real6410 MACH_REAL6410 REAL6410 2990 +spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991 +ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992 +omap3_bc10 MACH_OMAP3_BC10 OMAP3_BC10 2993 +thebe MACH_THEBE THEBE 2994 +rv082 MACH_RV082 RV082 2995 +armlguest MACH_ARMLGUEST ARMLGUEST 2996 +tjinc1000 MACH_TJINC1000 TJINC1000 2997 +dockstar MACH_DOCKSTAR DOCKSTAR 2998 +ax8008 MACH_AX8008 AX8008 2999 +gnet_sgce MACH_GNET_SGCE GNET_SGCE 3000 +pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001 +ea20 MACH_EA20 EA20 3002 +awm2 MACH_AWM2 AWM2 3003 +ti8148evm MACH_TI8148EVM TI8148EVM 3004 +seaboard MACH_SEABOARD SEABOARD 3005 +linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006 +tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007 +rubys MACH_RUBYS RUBYS 3008 +aquarius MACH_AQUARIUS AQUARIUS 3009 +mx53_ard MACH_MX53_ARD MX53_ARD 3010 +mx53_smd MACH_MX53_SMD MX53_SMD 3011 +lswxl MACH_LSWXL LSWXL 3012 +dove_avng_v3 MACH_DOVE_AVNG_V3 DOVE_AVNG_V3 3013 +sdi_ess_9263 MACH_SDI_ESS_9263 SDI_ESS_9263 3014 +jocpu550 MACH_JOCPU550 JOCPU550 3015 +msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016 +msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017 +yanomami MACH_YANOMAMI YANOMAMI 3018 +gta04 MACH_GTA04 GTA04 3019 +cm_a510 MACH_CM_A510 CM_A510 3020 +omap3_rfs200 MACH_OMAP3_RFS200 OMAP3_RFS200 3021 +kx33xx MACH_KX33XX KX33XX 3022 +ptx7510 MACH_PTX7510 PTX7510 3023 +top9000 MACH_TOP9000 TOP9000 3024 +teenote MACH_TEENOTE TEENOTE 3025 +ts3 MACH_TS3 TS3 3026 +a0 MACH_A0 A0 3027 +fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028 +fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029 +frrhwcdma60w MACH_FRRHWCDMA60W FRRHWCDMA60W 3030 +remus MACH_REMUS REMUS 3031 +at91cap7xdk MACH_AT91CAP7XDK AT91CAP7XDK 3032 +at91cap7stk MACH_AT91CAP7STK AT91CAP7STK 3033 +kt_sbc_sam9_1 MACH_KT_SBC_SAM9_1 KT_SBC_SAM9_1 3034 +armada_xp_db MACH_ARMADA_XP_DB ARMADA_XP_DB 3036 +spdm MACH_SPDM SPDM 3037 +gtib MACH_GTIB GTIB 3038 +dgm3240 MACH_DGM3240 DGM3240 3039 +htcmega MACH_HTCMEGA HTCMEGA 3041 +tricorder MACH_TRICORDER TRICORDER 3042 +tx28 MACH_TX28 TX28 3043 +bstbrd MACH_BSTBRD BSTBRD 3044 +pwb3090 MACH_PWB3090 PWB3090 3045 +idea6410 MACH_IDEA6410 IDEA6410 3046 +qbc9263 MACH_QBC9263 QBC9263 3047 +borabora MACH_BORABORA BORABORA 3048 +valdez MACH_VALDEZ VALDEZ 3049 +ls9g20 MACH_LS9G20 LS9G20 3050 +mios_v1 MACH_MIOS_V1 MIOS_V1 3051 +s5pc110_crespo MACH_S5PC110_CRESPO S5PC110_CRESPO 3052 +controltek9g20 MACH_CONTROLTEK9G20 CONTROLTEK9G20 3053 +tin307 MACH_TIN307 TIN307 3054 +tin510 MACH_TIN510 TIN510 3055 +ep3505 MACH_EP3517 EP3517 3056 +bluecheese MACH_BLUECHEESE BLUECHEESE 3057 +tem3x30 MACH_TEM3X30 TEM3X30 3058 +harvest_desoto MACH_HARVEST_DESOTO HARVEST_DESOTO 3059 +msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060 +spear900 MACH_SPEAR900 SPEAR900 3061 +pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062 +rdstor MACH_RDSTOR RDSTOR 3063 +usdloader MACH_USDLOADER USDLOADER 3064 +tsoploader MACH_TSOPLOADER TSOPLOADER 3065 +kronos MACH_KRONOS KRONOS 3066 +ffcore MACH_FFCORE FFCORE 3067 +mone MACH_MONE MONE 3068 +unit2s MACH_UNIT2S UNIT2S 3069 +acer_a5 MACH_ACER_A5 ACER_A5 3070 +etherpro_isp MACH_ETHERPRO_ISP ETHERPRO_ISP 3071 +stretchs7000 MACH_STRETCHS7000 STRETCHS7000 3072 +p87_smartsim MACH_P87_SMARTSIM P87_SMARTSIM 3073 +tulip MACH_TULIP TULIP 3074 +sunflower MACH_SUNFLOWER SUNFLOWER 3075 +rib MACH_RIB RIB 3076 +clod MACH_CLOD CLOD 3077 +rump MACH_RUMP RUMP 3078 +tenderloin MACH_TENDERLOIN TENDERLOIN 3079 +shortloin MACH_SHORTLOIN SHORTLOIN 3080 +antares MACH_ANTARES ANTARES 3082 +wb40n MACH_WB40N WB40N 3083 +herring MACH_HERRING HERRING 3084 +naxy400 MACH_NAXY400 NAXY400 3085 +naxy1200 MACH_NAXY1200 NAXY1200 3086 +vpr200 MACH_VPR200 VPR200 3087 +bug20 MACH_BUG20 BUG20 3088 +goflexnet MACH_GOFLEXNET GOFLEXNET 3089 +torbreck MACH_TORBRECK TORBRECK 3090 +saarb_mg1 MACH_SAARB_MG1 SAARB_MG1 3091 +callisto MACH_CALLISTO CALLISTO 3092 +multhsu MACH_MULTHSU MULTHSU 3093 +saluda MACH_SALUDA SALUDA 3094 +pemp_omap3_apollo MACH_PEMP_OMAP3_APOLLO PEMP_OMAP3_APOLLO 3095 +vc0718 MACH_VC0718 VC0718 3096 +mvblx MACH_MVBLX MVBLX 3097 +inhand_apeiron MACH_INHAND_APEIRON INHAND_APEIRON 3098 +inhand_fury MACH_INHAND_FURY INHAND_FURY 3099 +inhand_siren MACH_INHAND_SIREN INHAND_SIREN 3100 +hdnvp MACH_HDNVP HDNVP 3101 +softwinner MACH_SOFTWINNER SOFTWINNER 3102 +prima2_evb MACH_PRIMA2_EVB PRIMA2_EVB 3103 +nas6210 MACH_NAS6210 NAS6210 3104 +unisdev MACH_UNISDEV UNISDEV 3105 +sbca11 MACH_SBCA11 SBCA11 3106 +saga MACH_SAGA SAGA 3107 +ns_k330 MACH_NS_K330 NS_K330 3108 +tanna MACH_TANNA TANNA 3109 +imate8502 MACH_IMATE8502 IMATE8502 3110 +aspen MACH_ASPEN ASPEN 3111 +daintree_cwac MACH_DAINTREE_CWAC DAINTREE_CWAC 3112 +zmx25 MACH_ZMX25 ZMX25 3113 +maple1 MACH_MAPLE1 MAPLE1 3114 +qsd8x72_surf MACH_QSD8X72_SURF QSD8X72_SURF 3115 +qsd8x72_ffa MACH_QSD8X72_FFA QSD8X72_FFA 3116 +abilene MACH_ABILENE ABILENE 3117 +eigen_ttr MACH_EIGEN_TTR EIGEN_TTR 3118 +iomega_ix2_200 MACH_IOMEGA_IX2_200 IOMEGA_IX2_200 3119 +coretec_vcx7400 MACH_CORETEC_VCX7400 CORETEC_VCX7400 3120 +santiago MACH_SANTIAGO SANTIAGO 3121 +mx257sol MACH_MX257SOL MX257SOL 3122 +strasbourg MACH_STRASBOURG STRASBOURG 3123 +msm8x60_fluid MACH_MSM8X60_FLUID MSM8X60_FLUID 3124 +smartqv5 MACH_SMARTQV5 SMARTQV5 3125 +smartqv3 MACH_SMARTQV3 SMARTQV3 3126 +smartqv7 MACH_SMARTQV7 SMARTQV7 3127 +paz00 MACH_PAZ00 PAZ00 3128 +acmenetusfoxg20 MACH_ACMENETUSFOXG20 ACMENETUSFOXG20 3129 +fwbd_0404 MACH_FWBD_0404 FWBD_0404 3131 +hdgu MACH_HDGU HDGU 3132 +pyramid MACH_PYRAMID PYRAMID 3133 +epiphan MACH_EPIPHAN EPIPHAN 3134 +omap_bender MACH_OMAP_BENDER OMAP_BENDER 3135 +gurnard MACH_GURNARD GURNARD 3136 +gtl_it5100 MACH_GTL_IT5100 GTL_IT5100 3137 +bcm2708 MACH_BCM2708 BCM2708 3138 +mx51_ggc MACH_MX51_GGC MX51_GGC 3139 +sharespace MACH_SHARESPACE SHARESPACE 3140 +haba_knx_explorer MACH_HABA_KNX_EXPLORER HABA_KNX_EXPLORER 3141 +simtec_kirkmod MACH_SIMTEC_KIRKMOD SIMTEC_KIRKMOD 3142 +crux MACH_CRUX CRUX 3143 +mx51_bravo MACH_MX51_BRAVO MX51_BRAVO 3144 +charon MACH_CHARON CHARON 3145 +picocom3 MACH_PICOCOM3 PICOCOM3 3146 +picocom4 MACH_PICOCOM4 PICOCOM4 3147 +serrano MACH_SERRANO SERRANO 3148 +doubleshot MACH_DOUBLESHOT DOUBLESHOT 3149 +evsy MACH_EVSY EVSY 3150 +huashan MACH_HUASHAN HUASHAN 3151 +lausanne MACH_LAUSANNE LAUSANNE 3152 +emerald MACH_EMERALD EMERALD 3153 +tqma35 MACH_TQMA35 TQMA35 3154 +marvel MACH_MARVEL MARVEL 3155 +manuae MACH_MANUAE MANUAE 3156 +chacha MACH_CHACHA CHACHA 3157 +lemon MACH_LEMON LEMON 3158 +csc MACH_CSC CSC 3159 +gira_knxip_router MACH_GIRA_KNXIP_ROUTER GIRA_KNXIP_ROUTER 3160 +t20 MACH_T20 T20 3161 +hdmini MACH_HDMINI HDMINI 3162 +sciphone_g2 MACH_SCIPHONE_G2 SCIPHONE_G2 3163 +express MACH_EXPRESS EXPRESS 3164 +express_kt MACH_EXPRESS_KT EXPRESS_KT 3165 +maximasp MACH_MAXIMASP MAXIMASP 3166 +nitrogen_imx51 MACH_NITROGEN_IMX51 NITROGEN_IMX51 3167 +nitrogen_imx53 MACH_NITROGEN_IMX53 NITROGEN_IMX53 3168 +sunfire MACH_SUNFIRE SUNFIRE 3169 +arowana MACH_AROWANA AROWANA 3170 +tegra_daytona MACH_TEGRA_DAYTONA TEGRA_DAYTONA 3171 +tegra_swordfish MACH_TEGRA_SWORDFISH TEGRA_SWORDFISH 3172 +edison MACH_EDISON EDISON 3173 +svp8500v1 MACH_SVP8500V1 SVP8500V1 3174 +svp8500v2 MACH_SVP8500V2 SVP8500V2 3175 +svp5500 MACH_SVP5500 SVP5500 3176 +b5500 MACH_B5500 B5500 3177 +s5500 MACH_S5500 S5500 3178 +icon MACH_ICON ICON 3179 +elephant MACH_ELEPHANT ELEPHANT 3180 +shooter MACH_SHOOTER SHOOTER 3182 +spade_lte MACH_SPADE_LTE SPADE_LTE 3183 +philhwani MACH_PHILHWANI PHILHWANI 3184 +gsncomm MACH_GSNCOMM GSNCOMM 3185 +strasbourg_a2 MACH_STRASBOURG_A2 STRASBOURG_A2 3186 +mmm MACH_MMM MMM 3187 +davinci_dm365_bv MACH_DAVINCI_DM365_BV DAVINCI_DM365_BV 3188 +ag5evm MACH_AG5EVM AG5EVM 3189 +sc575plc MACH_SC575PLC SC575PLC 3190 +sc575hmi MACH_SC575IPC SC575IPC 3191 +omap3_tdm3730 MACH_OMAP3_TDM3730 OMAP3_TDM3730 3192 +top9000_eval MACH_TOP9000_EVAL TOP9000_EVAL 3194 +top9000_su MACH_TOP9000_SU TOP9000_SU 3195 +utm300 MACH_UTM300 UTM300 3196 +tsunagi MACH_TSUNAGI TSUNAGI 3197 +ts75xx MACH_TS75XX TS75XX 3198 +ts47xx MACH_TS47XX TS47XX 3200 +da850_k5 MACH_DA850_K5 DA850_K5 3201 +ax502 MACH_AX502 AX502 3202 +igep0032 MACH_IGEP0032 IGEP0032 3203 +antero MACH_ANTERO ANTERO 3204 +synergy MACH_SYNERGY SYNERGY 3205 +ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206 +wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207 +punica MACH_PUNICA PUNICA 3208 +trimslice MACH_TRIMSLICE TRIMSLICE 3209 +mx27_wmultra MACH_MX27_WMULTRA MX27_WMULTRA 3210 +mackerel MACH_MACKEREL MACKEREL 3211 +fa9x27 MACH_FA9X27 FA9X27 3213 +ns2816tb MACH_NS2816TB NS2816TB 3214 +ns2816_ntpad MACH_NS2816_NTPAD NS2816_NTPAD 3215 +ns2816_ntnb MACH_NS2816_NTNB NS2816_NTNB 3216 +kaen MACH_KAEN KAEN 3217 +nv1000 MACH_NV1000 NV1000 3218 +nuc950ts MACH_NUC950TS NUC950TS 3219 +nokia_rm680 MACH_NOKIA_RM680 NOKIA_RM680 3220 +ast2200 MACH_AST2200 AST2200 3221 +lead MACH_LEAD LEAD 3222 +unino1 MACH_UNINO1 UNINO1 3223 +greeco MACH_GREECO GREECO 3224 +verdi MACH_VERDI VERDI 3225 +dm6446_adbox MACH_DM6446_ADBOX DM6446_ADBOX 3226 +quad_salsa MACH_QUAD_SALSA QUAD_SALSA 3227 +abb_gma_1_1 MACH_ABB_GMA_1_1 ABB_GMA_1_1 3228 +svcid MACH_SVCID SVCID 3229 +msm8960_sim MACH_MSM8960_SIM MSM8960_SIM 3230 +msm8960_rumi3 MACH_MSM8960_RUMI3 MSM8960_RUMI3 3231 +icon_g MACH_ICON_G ICON_G 3232 +mb3 MACH_MB3 MB3 3233 +gsia18s MACH_GSIA18S GSIA18S 3234 +pivicc MACH_PIVICC PIVICC 3235 +pcm048 MACH_PCM048 PCM048 3236 +dds MACH_DDS DDS 3237 +chalten_xa1 MACH_CHALTEN_XA1 CHALTEN_XA1 3238 +ts48xx MACH_TS48XX TS48XX 3239 +tonga2_tfttimer MACH_TONGA2_TFTTIMER TONGA2_TFTTIMER 3240 +whistler MACH_WHISTLER WHISTLER 3241 +asl_phoenix MACH_ASL_PHOENIX ASL_PHOENIX 3242 +at91sam9263otlite MACH_AT91SAM9263OTLITE AT91SAM9263OTLITE 3243 +ddplug MACH_DDPLUG DDPLUG 3244 +d2plug MACH_D2PLUG D2PLUG 3245 +kzm9d MACH_KZM9D KZM9D 3246 +verdi_lte MACH_VERDI_LTE VERDI_LTE 3247 +nanozoom MACH_NANOZOOM NANOZOOM 3248 +dm3730_som_lv MACH_DM3730_SOM_LV DM3730_SOM_LV 3249 +dm3730_torpedo MACH_DM3730_TORPEDO DM3730_TORPEDO 3250 +anchovy MACH_ANCHOVY ANCHOVY 3251 +re2rev20 MACH_RE2REV20 RE2REV20 3253 +re2rev21 MACH_RE2REV21 RE2REV21 3254 +cns21xx MACH_CNS21XX CNS21XX 3255 +rider MACH_RIDER RIDER 3257 +nsk330 MACH_NSK330 NSK330 3258 +cns2133evb MACH_CNS2133EVB CNS2133EVB 3259 +z3_816x_mod MACH_Z3_816X_MOD Z3_816X_MOD 3260 +z3_814x_mod MACH_Z3_814X_MOD Z3_814X_MOD 3261 +beect MACH_BEECT BEECT 3262 +dma_thunderbug MACH_DMA_THUNDERBUG DMA_THUNDERBUG 3263 +omn_at91sam9g20 MACH_OMN_AT91SAM9G20 OMN_AT91SAM9G20 3264 +mx25_e2s_uc MACH_MX25_E2S_UC MX25_E2S_UC 3265 +mione MACH_MIONE MIONE 3266 +top9000_tcu MACH_TOP9000_TCU TOP9000_TCU 3267 +top9000_bsl MACH_TOP9000_BSL TOP9000_BSL 3268 +kingdom MACH_KINGDOM KINGDOM 3269 +armadillo460 MACH_ARMADILLO460 ARMADILLO460 3270 +lq2 MACH_LQ2 LQ2 3271 +sweda_tms2 MACH_SWEDA_TMS2 SWEDA_TMS2 3272 +mx53_loco MACH_MX53_LOCO MX53_LOCO 3273 +acer_a8 MACH_ACER_A8 ACER_A8 3275 +acer_gauguin MACH_ACER_GAUGUIN ACER_GAUGUIN 3276 +guppy MACH_GUPPY GUPPY 3277 +mx61_ard MACH_MX61_ARD MX61_ARD 3278 +tx53 MACH_TX53 TX53 3279 +omapl138_case_a3 MACH_OMAPL138_CASE_A3 OMAPL138_CASE_A3 3280 +uemd MACH_UEMD UEMD 3281 +ccwmx51mut MACH_CCWMX51MUT CCWMX51MUT 3282 +rockhopper MACH_ROCKHOPPER ROCKHOPPER 3283 +encore MACH_ENCORE ENCORE 3284 +hkdkc100 MACH_HKDKC100 HKDKC100 3285 +ts42xx MACH_TS42XX TS42XX 3286 +aebl MACH_AEBL AEBL 3287 +wario MACH_WARIO WARIO 3288 +gfs_spm MACH_GFS_SPM GFS_SPM 3289 +cm_t3730 MACH_CM_T3730 CM_T3730 3290 +isc3 MACH_ISC3 ISC3 3291 +rascal MACH_RASCAL RASCAL 3292 +hrefv60 MACH_HREFV60 HREFV60 3293 +tpt_2_0 MACH_TPT_2_0 TPT_2_0 3294 +pydtd MACH_PYRAMID_TD PYRAMID_TD 3295 +splendor MACH_SPLENDOR SPLENDOR 3296 +guf_vincell MACH_GUF_PLANET GUF_PLANET 3297 +msm8x60_qt MACH_MSM8X60_QT MSM8X60_QT 3298 +htc_hd_mini MACH_HTC_HD_MINI HTC_HD_MINI 3299 +athene MACH_ATHENE ATHENE 3300 +deep_r_ek_1 MACH_DEEP_R_EK_1 DEEP_R_EK_1 3301 +vivow_ct MACH_VIVOW_CT VIVOW_CT 3302 +nery_1000 MACH_NERY_1000 NERY_1000 3303 +rfl109145_ssrv MACH_RFL109145_SSRV RFL109145_SSRV 3304 +nmh MACH_NMH NMH 3305 +wn802t MACH_WN802T WN802T 3306 +dragonet MACH_DRAGONET DRAGONET 3307 +geneva_b4 MACH_GENEVA_B GENEVA_B 3308 +at91sam9263desk16l MACH_AT91SAM9263DESK16L AT91SAM9263DESK16L 3309 +bcmhana_sv MACH_BCMHANA_SV BCMHANA_SV 3310 +bcmhana_tablet MACH_BCMHANA_TABLET BCMHANA_TABLET 3311 +koi MACH_KOI KOI 3312 +ts4800 MACH_TS4800 TS4800 3313 +tqma9263 MACH_TQMA9263 TQMA9263 3314 +holiday MACH_HOLIDAY HOLIDAY 3315 +dma_6410 MACH_DMA6410 DMA6410 3316 +pcats_overlay MACH_PCATS_OVERLAY PCATS_OVERLAY 3317 +hwgw6410 MACH_HWGW6410 HWGW6410 3318 +shenzhou MACH_SHENZHOU SHENZHOU 3319 +cwme9210 MACH_CWME9210 CWME9210 3320 +cwme9210js MACH_CWME9210JS CWME9210JS 3321 +pgs_v1 MACH_PGS_SITARA PGS_SITARA 3322 +colibri_tegra2 MACH_COLIBRI_TEGRA2 COLIBRI_TEGRA2 3323 +w21 MACH_W21 W21 3324 +polysat1 MACH_POLYSAT1 POLYSAT1 3325 +dataway MACH_DATAWAY DATAWAY 3326 +cobral138 MACH_COBRAL138 COBRAL138 3327 +roverpcs8 MACH_ROVERPCS8 ROVERPCS8 3328 +marvelc MACH_MARVELC MARVELC 3329 +navefihid MACH_NAVEFIHID NAVEFIHID 3330 +dm365_cv100 MACH_DM365_CV100 DM365_CV100 3331 +able MACH_ABLE ABLE 3332 +legacy MACH_LEGACY LEGACY 3333 +icong MACH_ICONG ICONG 3334 +rover_g8 MACH_ROVER_G8 ROVER_G8 3335 +t5388p MACH_T5388P T5388P 3336 +dingo MACH_DINGO DINGO 3337 +goflexhome MACH_GOFLEXHOME GOFLEXHOME 3338 +lanreadyfn511 MACH_LANREADYFN511 LANREADYFN511 3340 +omap3_baia MACH_OMAP3_BAIA OMAP3_BAIA 3341 +omap3smartdisplay MACH_OMAP3SMARTDISPLAY OMAP3SMARTDISPLAY 3342 +xilinx MACH_XILINX XILINX 3343 +a2f MACH_A2F A2F 3344 +sky25 MACH_SKY25 SKY25 3345 +ccmx53 MACH_CCMX53 CCMX53 3346 +ccmx53js MACH_CCMX53JS CCMX53JS 3347 +ccwmx53 MACH_CCWMX53 CCWMX53 3348 +ccwmx53js MACH_CCWMX53JS CCWMX53JS 3349 +frisms MACH_FRISMS FRISMS 3350 +msm7x27a_ffa MACH_MSM7X27A_FFA MSM7X27A_FFA 3351 +msm7x27a_surf MACH_MSM7X27A_SURF MSM7X27A_SURF 3352 +msm7x27a_rumi3 MACH_MSM7X27A_RUMI3 MSM7X27A_RUMI3 3353 +dimmsam9g20 MACH_DIMMSAM9G20 DIMMSAM9G20 3354 +dimm_imx28 MACH_DIMM_IMX28 DIMM_IMX28 3355 +amk_a4 MACH_AMK_A4 AMK_A4 3356 +gnet_sgme MACH_GNET_SGME GNET_SGME 3357 +shooter_u MACH_SHOOTER_U SHOOTER_U 3358 +vmx53 MACH_VMX53 VMX53 3359 +rhino MACH_RHINO RHINO 3360 +armlex4210 MACH_ARMLEX4210 ARMLEX4210 3361 +swarcoextmodem MACH_SWARCOEXTMODEM SWARCOEXTMODEM 3362 +snowball MACH_SNOWBALL SNOWBALL 3363 +pcm049 MACH_PCM049 PCM049 3364 +vigor MACH_VIGOR VIGOR 3365 +oslo_amundsen MACH_OSLO_AMUNDSEN OSLO_AMUNDSEN 3366 +gsl_diamond MACH_GSL_DIAMOND GSL_DIAMOND 3367 +cv2201 MACH_CV2201 CV2201 3368 +cv2202 MACH_CV2202 CV2202 3369 +cv2203 MACH_CV2203 CV2203 3370 +vit_ibox MACH_VIT_IBOX VIT_IBOX 3371 +dm6441_esp MACH_DM6441_ESP DM6441_ESP 3372 +at91sam9x5ek MACH_AT91SAM9X5EK AT91SAM9X5EK 3373 +libra MACH_LIBRA LIBRA 3374 +easycrrh MACH_EASYCRRH EASYCRRH 3375 +tripel MACH_TRIPEL TRIPEL 3376 +endian_mini MACH_ENDIAN_MINI ENDIAN_MINI 3377 +xilinx_ep107 MACH_XILINX_EP107 XILINX_EP107 3378 +nuri MACH_NURI NURI 3379 +janus MACH_JANUS JANUS 3380 +ddnas MACH_DDNAS DDNAS 3381 +tag MACH_TAG TAG 3382 +tagw MACH_TAGW TAGW 3383 +nitrogen_vm_imx51 MACH_NITROGEN_VM_IMX51 NITROGEN_VM_IMX51 3384 +viprinet MACH_VIPRINET VIPRINET 3385 +bockw MACH_BOCKW BOCKW 3386 +eva2000 MACH_EVA2000 EVA2000 3387 +steelyard MACH_STEELYARD STEELYARD 3388 +ea2468devkit MACH_LPC2468OEM LPC2468OEM 3389 +sdh001 MACH_MACH_SDH001 MACH_SDH001 3390 +fe2478mblox MACH_LPC2478MICROBLOX LPC2478MICROBLOX 3391 +nsslsboard MACH_NSSLSBOARD NSSLSBOARD 3392 +geneva_b5 MACH_GENEVA_B5 GENEVA_B5 3393 +spear1340 MACH_SPEAR1340 SPEAR1340 3394 +rexmas MACH_REXMAS REXMAS 3395 +msm8960_cdp MACH_MSM8960_CDP MSM8960_CDP 3396 +msm8960_mtp MACH_MSM8960_MDP MSM8960_MDP 3397 +msm8960_fluid MACH_MSM8960_FLUID MSM8960_FLUID 3398 +msm8960_apq MACH_MSM8960_APQ MSM8960_APQ 3399 +helios_v2 MACH_HELIOS_V2 HELIOS_V2 3400 +mif10p MACH_MIF10P MIF10P 3401 +iam28 MACH_IAM28 IAM28 3402 +picasso MACH_PICASSO PICASSO 3403 +mr301a MACH_MR301A MR301A 3404 +notle MACH_NOTLE NOTLE 3405 +eelx2 MACH_EELX2 EELX2 3406 +moon MACH_MOON MOON 3407 +ruby MACH_RUBY RUBY 3408 +goldengate MACH_GOLDENGATE GOLDENGATE 3409 +ctbu_gen2 MACH_CTBU_GEN2 CTBU_GEN2 3410 +kmp_am17_01 MACH_KMP_AM17_01 KMP_AM17_01 3411 +wtplug MACH_WTPLUG WTPLUG 3412 +mx27su2 MACH_MX27SU2 MX27SU2 3413 +nb31 MACH_NB31 NB31 3414 +hjsdu MACH_HJSDU HJSDU 3415 +td3_rev1 MACH_TD3_REV1 TD3_REV1 3416 +eag_ci4000 MACH_EAG_CI4000 EAG_CI4000 3417 +net5big_nand_v2 MACH_NET5BIG_NAND_V2 NET5BIG_NAND_V2 3418 +cpx2 MACH_CPX2 CPX2 3419 +net2big_nand_v2 MACH_NET2BIG_NAND_V2 NET2BIG_NAND_V2 3420 +ecuv5 MACH_ECUV5 ECUV5 3421 +hsgx6d MACH_HSGX6D HSGX6D 3422 +dawad7 MACH_DAWAD7 DAWAD7 3423 +sam9repeater MACH_SAM9REPEATER SAM9REPEATER 3424 +gt_i5700 MACH_GT_I5700 GT_I5700 3425 +ctera_plug_c2 MACH_CTERA_PLUG_C2 CTERA_PLUG_C2 3426 +marvelct MACH_MARVELCT MARVELCT 3427 +ag11005 MACH_AG11005 AG11005 3428 +omap_tabletblaze MACH_OMAP_BLAZE OMAP_BLAZE 3429 +vangogh MACH_VANGOGH VANGOGH 3430 +matrix505 MACH_MATRIX505 MATRIX505 3431 +oce_nigma MACH_OCE_NIGMA OCE_NIGMA 3432 +t55 MACH_T55 T55 3433 +bio3k MACH_BIO3K BIO3K 3434 +expressct MACH_EXPRESSCT EXPRESSCT 3435 +cardhu MACH_CARDHU CARDHU 3436 +aruba MACH_ARUBA ARUBA 3437 +bonaire MACH_BONAIRE BONAIRE 3438 +nuc700evb MACH_NUC700EVB NUC700EVB 3439 +nuc710evb MACH_NUC710EVB NUC710EVB 3440 +nuc740evb MACH_NUC740EVB NUC740EVB 3441 +nuc745evb MACH_NUC745EVB NUC745EVB 3442 +transcede MACH_TRANSCEDE TRANSCEDE 3443 +mora MACH_MORA MORA 3444 +nda_evm MACH_NDA_EVM NDA_EVM 3445 +timu MACH_TIMU TIMU 3446 +expressh MACH_EXPRESSH EXPRESSH 3447 +veridis_a300 MACH_VERIDIS_A300 VERIDIS_A300 3448 +dm368_leopard MACH_DM368_LEOPARD DM368_LEOPARD 3449 +omap_mcop MACH_OMAP_MCOP OMAP_MCOP 3450 +tritip MACH_TRITIP TRITIP 3451 +sm1k MACH_SM1K SM1K 3452 +monch MACH_MONCH MONCH 3453 +curacao MACH_CURACAO CURACAO 3454 +origen MACH_ORIGEN ORIGEN 3455 +epc10 MACH_EPC10 EPC10 3456 +sgh_i740 MACH_SGH_I740 SGH_I740 3457 +tuna MACH_TUNA TUNA 3458 +mx51_tulip MACH_MX51_TULIP MX51_TULIP 3459 +mx51_aster7 MACH_MX51_ASTER7 MX51_ASTER7 3460 +acro37xbrd MACH_ACRO37XBRD ACRO37XBRD 3461 +elke MACH_ELKE ELKE 3462 +sbc6000x MACH_SBC6000X SBC6000X 3463 +r1801e MACH_R1801E R1801E 3464 +h1600 MACH_H1600 H1600 3465 +mini210 MACH_MINI210 MINI210 3466 +mini8168 MACH_MINI8168 MINI8168 3467 +pc7308 MACH_PC7308 PC7308 3468 +ge863_pro3_evk MACH_GE863 GE863 3469 +kmm2m01 MACH_KMM2M01 KMM2M01 3470 +mx51erebus MACH_MX51EREBUS MX51EREBUS 3471 +wm8650refboard MACH_WM8650REFBOARD WM8650REFBOARD 3472 +tuxrail MACH_TUXRAIL TUXRAIL 3473 +arthur MACH_ARTHUR ARTHUR 3474 +doorboy MACH_DOORBOY DOORBOY 3475 +xarina MACH_XARINA XARINA 3476 +roverx7 MACH_ROVERX7 ROVERX7 3477 +sdvr MACH_SDVR SDVR 3478 +acer_maya MACH_ACER_MAYA ACER_MAYA 3479 +pico MACH_PICO PICO 3480 +cwmx233 MACH_CWMX233 CWMX233 3481 +cwam1808 MACH_CWAM1808 CWAM1808 3482 +cwdm365 MACH_CWDM365 CWDM365 3483 +mx51_moray MACH_MX51_MORAY MX51_MORAY 3484 +thales_cbc MACH_THALES_CBC THALES_CBC 3485 +bluepoint MACH_BLUEPOINT BLUEPOINT 3486 +dir665 MACH_DIR665 DIR665 3487 +acmerover1 MACH_ACMEROVER1 ACMEROVER1 3488 +shooter_ct MACH_SHOOTER_CT SHOOTER_CT 3489 +bliss MACH_BLISS BLISS 3490 +blissc MACH_BLISSC BLISSC 3491 +thales_adc MACH_THALES_ADC THALES_ADC 3492 +ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493 +atdgp318 MACH_ATDGP318 ATDGP318 3494 +dma210u MACH_DMA210U DMA210U 3495 +em_t3 MACH_EM_T3 EM_T3 3496 +htx3250 MACH_HTX3250 HTX3250 3497 +g50 MACH_G50 G50 3498 +eco5 MACH_ECO5 ECO5 3499 +wintergrasp MACH_WINTERGRASP WINTERGRASP 3500 +puro MACH_PURO PURO 3501 +shooter_k MACH_SHOOTER_K SHOOTER_K 3502 +nspire MACH_NSPIRE NSPIRE 3503 +mickxx MACH_MICKXX MICKXX 3504 +lxmb MACH_LXMB LXMB 3505 +tmdxscbp6618x MACH_TMDXSCBP6616X TMDXSCBP6616X 3506 +adam MACH_ADAM ADAM 3507 +b1004 MACH_B1004 B1004 3508 +oboea MACH_OBOEA OBOEA 3509 +a1015 MACH_A1015 A1015 3510 +robin_vbdt30 MACH_ROBIN_VBDT30 ROBIN_VBDT30 3511 +tegra_enterprise MACH_TEGRA_ENTERPRISE TEGRA_ENTERPRISE 3512 +rfl108200_mk10 MACH_RFL108200_MK10 RFL108200_MK10 3513 +rfl108300_mk16 MACH_RFL108300_MK16 RFL108300_MK16 3514 +rover_v7 MACH_ROVER_V7 ROVER_V7 3515 +miphone MACH_MIPHONE MIPHONE 3516 +femtobts MACH_FEMTOBTS FEMTOBTS 3517 +monopoli MACH_MONOPOLI MONOPOLI 3518 +boss MACH_BOSS BOSS 3519 +davinci_dm368_vtam MACH_DAVINCI_DM368_VTAM DAVINCI_DM368_VTAM 3520 +clcon MACH_CLCON CLCON 3521 +nokia_rm696 MACH_NOKIA_RM696 NOKIA_RM696 3522 +tahiti MACH_TAHITI TAHITI 3523 +fighter MACH_FIGHTER FIGHTER 3524 +sgh_i710 MACH_SGH_I710 SGH_I710 3525 +integreproscb MACH_INTEGREPROSCB INTEGREPROSCB 3526 +monza MACH_MONZA MONZA 3527 +calimain MACH_CALIMAIN CALIMAIN 3528 +mx6q_sabreauto MACH_MX6Q_SABREAUTO MX6Q_SABREAUTO 3529 +gma01x MACH_GMA01X GMA01X 3530 +sbc51 MACH_SBC51 SBC51 3531 +fit MACH_FIT FIT 3532 +steelhead MACH_STEELHEAD STEELHEAD 3533 +panther MACH_PANTHER PANTHER 3534 +msm8960_liquid MACH_MSM8960_LIQUID MSM8960_LIQUID 3535 +lexikonct MACH_LEXIKONCT LEXIKONCT 3536 +ns2816_stb MACH_NS2816_STB NS2816_STB 3537 +sei_mm2_lpc3250 MACH_SEI_MM2_LPC3250 SEI_MM2_LPC3250 3538 +cmimx53 MACH_CMIMX53 CMIMX53 3539 +sandwich MACH_SANDWICH SANDWICH 3540 +chief MACH_CHIEF CHIEF 3541 +pogo_e02 MACH_POGO_E02 POGO_E02 3542 +mikrap_x168 MACH_MIKRAP_X168 MIKRAP_X168 3543 +htcmozart MACH_HTCMOZART HTCMOZART 3544 +htcgold MACH_HTCGOLD HTCGOLD 3545 +mt72xx MACH_MT72XX MT72XX 3546 +mx51_ivy MACH_MX51_IVY MX51_IVY 3547 +mx51_lvd MACH_MX51_LVD MX51_LVD 3548 +omap3_wiser2 MACH_OMAP3_WISER2 OMAP3_WISER2 3549 +dreamplug MACH_DREAMPLUG DREAMPLUG 3550 +cobas_c_111 MACH_COBAS_C_111 COBAS_C_111 3551 +cobas_u_411 MACH_COBAS_U_411 COBAS_U_411 3552 +hssd MACH_HSSD HSSD 3553 +iom35x MACH_IOM35X IOM35X 3554 +psom_omap MACH_PSOM_OMAP PSOM_OMAP 3555 +iphone_2g MACH_IPHONE_2G IPHONE_2G 3556 +iphone_3g MACH_IPHONE_3G IPHONE_3G 3557 +ipod_touch_1g MACH_IPOD_TOUCH_1G IPOD_TOUCH_1G 3558 +pharos_tpc MACH_PHAROS_TPC PHAROS_TPC 3559 +mx53_hydra MACH_MX53_HYDRA MX53_HYDRA 3560 +ns2816_dev_board MACH_NS2816_DEV_BOARD NS2816_DEV_BOARD 3561 +iphone_3gs MACH_IPHONE_3GS IPHONE_3GS 3562 +iphone_4 MACH_IPHONE_4 IPHONE_4 3563 +ipod_touch_4g MACH_IPOD_TOUCH_4G IPOD_TOUCH_4G 3564 +dragon_e1100 MACH_DRAGON_E1100 DRAGON_E1100 3565 +topside MACH_TOPSIDE TOPSIDE 3566 +irisiii MACH_IRISIII IRISIII 3567 +deto_macarm9 MACH_DETO_MACARM9 DETO_MACARM9 3568 +eti_d1 MACH_ETI_D1 ETI_D1 3569 +som3530sdk MACH_SOM3530SDK SOM3530SDK 3570 +oc_engine MACH_OC_ENGINE OC_ENGINE 3571 +apq8064_sim MACH_APQ8064_SIM APQ8064_SIM 3572 +alps MACH_ALPS ALPS 3575 +tny_t3730 MACH_TNY_T3730 TNY_T3730 3576 +geryon_nfe MACH_GERYON_NFE GERYON_NFE 3577 +ns2816_ref_board MACH_NS2816_REF_BOARD NS2816_REF_BOARD 3578 +silverstone MACH_SILVERSTONE SILVERSTONE 3579 +mtt2440 MACH_MTT2440 MTT2440 3580 +ynicdb MACH_YNICDB YNICDB 3581 +bct MACH_BCT BCT 3582 +tuscan MACH_TUSCAN TUSCAN 3583 +xbt_sam9g45 MACH_XBT_SAM9G45 XBT_SAM9G45 3584 +enbw_cmc MACH_ENBW_CMC ENBW_CMC 3585 +msm8x60_dragon MACH_APQ8060_DRAGON APQ8060_DRAGON 3586 +ch104mx257 MACH_CH104MX257 CH104MX257 3587 +openpri MACH_OPENPRI OPENPRI 3588 +am335xevm MACH_AM335XEVM AM335XEVM 3589 +picodmb MACH_PICODMB PICODMB 3590 +waluigi MACH_WALUIGI WALUIGI 3591 +punicag7 MACH_PUNICAG7 PUNICAG7 3592 +ipad_1g MACH_IPAD_1G IPAD_1G 3593 +appletv_2g MACH_APPLETV_2G APPLETV_2G 3594 +mach_ecog45 MACH_MACH_ECOG45 MACH_ECOG45 3595 +ait_cam_enc_4xx MACH_AIT_CAM_ENC_4XX AIT_CAM_ENC_4XX 3596 +runnymede MACH_RUNNYMEDE RUNNYMEDE 3597 +play MACH_PLAY PLAY 3598 +hw90260 MACH_HW90260 HW90260 3599 +tagh MACH_TAGH TAGH 3600 +filbert MACH_FILBERT FILBERT 3601 +getinge_netcomv3 MACH_GETINGE_NETCOMV3 GETINGE_NETCOMV3 3602 +cw20 MACH_CW20 CW20 3603 +cinema MACH_CINEMA CINEMA 3604 +cinema_tea MACH_CINEMA_TEA CINEMA_TEA 3605 +cinema_coffee MACH_CINEMA_COFFEE CINEMA_COFFEE 3606 +cinema_juice MACH_CINEMA_JUICE CINEMA_JUICE 3607 +linux_pad MACH_THEPAD THEPAD 3608 +mx53_mirage2 MACH_MX53_MIRAGE2 MX53_MIRAGE2 3609 +mx53_efikasb MACH_MX53_EFIKASB MX53_EFIKASB 3610 +stm_b2000 MACH_STM_B2000 STM_B2000 3612 +m28evk MACH_M28EVK M28EVK 3613 +pda MACH_PDA PDA 3614 +meraki_mr58 MACH_MERAKI_MR58 MERAKI_MR58 3615 +kota2 MACH_KOTA2 KOTA2 3616 +letcool MACH_LETCOOL LETCOOL 3617 +mx27iat MACH_MX27IAT MX27IAT 3618 +apollo_td MACH_APOLLO_TD APOLLO_TD 3619 +arena MACH_ARENA ARENA 3620 +gsngateway MACH_GSNGATEWAY GSNGATEWAY 3621 +lf2000 MACH_LF2000 LF2000 3622 +bonito MACH_BONITO BONITO 3623 +asymptote MACH_ASYMPTOTE ASYMPTOTE 3624 +bst2brd MACH_BST2BRD BST2BRD 3625 +tx335s MACH_TX335S TX335S 3626 +pelco_tesla MACH_PELCO_TESLA PELCO_TESLA 3627 +rrhtestplat MACH_RRHTESTPLAT RRHTESTPLAT 3628 +vidtonic_pro MACH_VIDTONIC_PRO VIDTONIC_PRO 3629 +pl_apollo MACH_PL_APOLLO PL_APOLLO 3630 +pl_phoenix MACH_PL_PHOENIX PL_PHOENIX 3631 +m28cu3 MACH_M28CU3 M28CU3 3632 +vvbox_hd MACH_VVBOX_HD VVBOX_HD 3633 +coreware_sam9260_ MACH_COREWARE_SAM9260_ COREWARE_SAM9260_ 3634 +marmaduke MACH_MARMADUKE MARMADUKE 3635 +amg_xlcore_camera MACH_AMG_XLCORE_CAMERA AMG_XLCORE_CAMERA 3636 +omap3_egf MACH_OMAP3_EGF OMAP3_EGF 3637 +smdk4212 MACH_SMDK4212 SMDK4212 3638 +dnp9200 MACH_DNP9200 DNP9200 3639 +tf101 MACH_TF101 TF101 3640 +omap3silvio MACH_OMAP3SILVIO OMAP3SILVIO 3641 +picasso2 MACH_PICASSO2 PICASSO2 3642 +vangogh2 MACH_VANGOGH2 VANGOGH2 3643 +olpc_xo_1_75 MACH_OLPC_XO_1_75 OLPC_XO_1_75 3644 +gx400 MACH_GX400 GX400 3645 +gs300 MACH_GS300 GS300 3646 +acer_a9 MACH_ACER_A9 ACER_A9 3647 +vivow_evm MACH_VIVOW_EVM VIVOW_EVM 3648 +veloce_cxq MACH_VELOCE_CXQ VELOCE_CXQ 3649 +veloce_cxm MACH_VELOCE_CXM VELOCE_CXM 3650 +p1852 MACH_P1852 P1852 3651 +naxy100 MACH_NAXY100 NAXY100 3652 +taishan MACH_TAISHAN TAISHAN 3653 +touchlink MACH_TOUCHLINK TOUCHLINK 3654 +stm32f103ze MACH_STM32F103ZE STM32F103ZE 3655 +mcx MACH_MCX MCX 3656 +stm_nmhdk_fli7610 MACH_STM_NMHDK_FLI7610 STM_NMHDK_FLI7610 3657 +top28x MACH_TOP28X TOP28X 3658 +okl4vp_microvisor MACH_OKL4VP_MICROVISOR OKL4VP_MICROVISOR 3659 +pop MACH_POP POP 3660 +layer MACH_LAYER LAYER 3661 +trondheim MACH_TRONDHEIM TRONDHEIM 3662 +eva MACH_EVA EVA 3663 +trust_taurus MACH_TRUST_TAURUS TRUST_TAURUS 3664 +ns2816_huashan MACH_NS2816_HUASHAN NS2816_HUASHAN 3665 +ns2816_yangcheng MACH_NS2816_YANGCHENG NS2816_YANGCHENG 3666 +p852 MACH_P852 P852 3667 +flea3 MACH_FLEA3 FLEA3 3668 +bowfin MACH_BOWFIN BOWFIN 3669 +mv88de3100 MACH_MV88DE3100 MV88DE3100 3670 +pia_am35x MACH_PIA_AM35X PIA_AM35X 3671 +cedar MACH_CEDAR CEDAR 3672 +picasso_e MACH_PICASSO_E PICASSO_E 3673 +samsung_e60 MACH_SAMSUNG_E60 SAMSUNG_E60 3674 +msm9615_cdp MACH_MDM9615 MDM9615 3675 +sdvr_mini MACH_SDVR_MINI SDVR_MINI 3676 +omap3_ij3k MACH_OMAP3_IJ3K OMAP3_IJ3K 3677 +modasmc1 MACH_MODASMC1 MODASMC1 3678 +apq8064_rumi3 MACH_APQ8064_RUMI3 APQ8064_RUMI3 3679 +matrix506 MACH_MATRIX506 MATRIX506 3680 +msm9615_mtp MACH_MSM9615_MTP MSM9615_MTP 3681 +dm36x_spawndc MACH_DM36X_SPAWNDC DM36X_SPAWNDC 3682 +sff792 MACH_SFF792 SFF792 3683 +am335xiaevm MACH_AM335XIAEVM AM335XIAEVM 3684 +g3c2440 MACH_G3C2440 G3C2440 3685 +tion270 MACH_TION270 TION270 3686 +w22q7arm02 MACH_W22Q7ARM02 W22Q7ARM02 3687 +omap_cat MACH_OMAP_CAT OMAP_CAT 3688 +at91sam9n12ek MACH_AT91SAM9N12EK AT91SAM9N12EK 3689 +morrison MACH_MORRISON MORRISON 3690 +svdu MACH_SVDU SVDU 3691 +lpp01 MACH_LPP01 LPP01 3692 +ubc283 MACH_UBC283 UBC283 3693 +zeppelin MACH_ZEPPELIN ZEPPELIN 3694 +motus MACH_MOTUS MOTUS 3695 +neomainboard MACH_NEOMAINBOARD NEOMAINBOARD 3696 +devkit3250 MACH_DEVKIT3250 DEVKIT3250 3697 +devkit7000 MACH_DEVKIT7000 DEVKIT7000 3698 +fmc_uic MACH_FMC_UIC FMC_UIC 3699 +fmc_dcm MACH_FMC_DCM FMC_DCM 3700 +batwm MACH_BATWM BATWM 3701 +atlas6cb MACH_ATLAS6CB ATLAS6CB 3702 +quattro_f MACH_QUATTROF QUATTROF 3703 +quattro_u MACH_QUATTROU QUATTROU 3704 +blue MACH_BLUE BLUE 3705 +colorado MACH_COLORADO COLORADO 3706 +popc MACH_POPC POPC 3707 +promwad_jade MACH_PROMWAD_JADE PROMWAD_JADE 3708 +amp MACH_AMP AMP 3709 +gnet_amp MACH_GNET_AMP GNET_AMP 3710 +toques MACH_TOQUES TOQUES 3711 +apx4devkit MACH_APX4DEVKIT APX4DEVKIT 3712 +dct_storm MACH_DCT_STORM DCT_STORM 3713 +dm8168z3 MACH_Z3 Z3 3714 +owl MACH_OWL OWL 3715 +cogent_csb1741 MACH_COGENT_CSB1741 COGENT_CSB1741 3716 +omap3_kiko MACH_OMAP3 OMAP3 3717 +adillustra610 MACH_ADILLUSTRA610 ADILLUSTRA610 3718 +ecafe_na04 MACH_ECAFE_NA04 ECAFE_NA04 3719 +popct MACH_POPCT POPCT 3720 +omap3_helena MACH_OMAP3_HELENA OMAP3_HELENA 3721 +ach MACH_ACH ACH 3722 +module_dtb MACH_MODULE_DTB MODULE_DTB 3723 +ratebox MACH_RACKBOX RACKBOX 3724 +oslo_elisabeth MACH_OSLO_ELISABETH OSLO_ELISABETH 3725 +tt01 MACH_TT01 TT01 3726 +msm8930_cdp MACH_MSM8930_CDP MSM8930_CDP 3727 +msm8930_mtp MACH_MSM8930_MTP MSM8930_MTP 3728 +msm8930_fluid MACH_MSM8930_FLUID MSM8930_FLUID 3729 +ltu11 MACH_LTU11 LTU11 3730 +am1808_spawnco MACH_AM1808_SPAWNCO AM1808_SPAWNCO 3731 +flx6410 MACH_FLX6410 FLX6410 3732 +mx6q_qsb MACH_MX6Q_QSB MX6Q_QSB 3733 +mx53_plt424 MACH_MX53_PLT424 MX53_PLT424 3734 +jasmine MACH_JASMINE JASMINE 3735 +l138_owlboard_plus MACH_L138_OWLBOARD_PLUS L138_OWLBOARD_PLUS 3736 +wr21 MACH_WR21 WR21 3737 +peaboy MACH_PEABOY PEABOY 3739 +mx28_plato MACH_MX28_PLATO MX28_PLATO 3740 +kacom2 MACH_KACOM2 KACOM2 3741 +slco MACH_SLCO SLCO 3742 +imx51pico MACH_IMX51PICO IMX51PICO 3743 +glink1 MACH_GLINK1 GLINK1 3744 +diamond MACH_DIAMOND DIAMOND 3745 +d9000 MACH_D9000 D9000 3746 +w5300e01 MACH_W5300E01 W5300E01 3747 +im6000 MACH_IM6000 IM6000 3748 +mx51_fred51 MACH_MX51_FRED51 MX51_FRED51 3749 +stm32f2 MACH_STM32F2 STM32F2 3750 +ville MACH_VILLE VILLE 3751 +ptip_murnau MACH_PTIP_MURNAU PTIP_MURNAU 3752 +ptip_classic MACH_PTIP_CLASSIC PTIP_CLASSIC 3753 +mx53grb MACH_MX53GRB MX53GRB 3754 +gagarin MACH_GAGARIN GAGARIN 3755 +msm7627a_qrd1 MACH_MSM7X27A_QRD1 MSM7X27A_QRD1 3756 +nas2big MACH_NAS2BIG NAS2BIG 3757 +superfemto MACH_SUPERFEMTO SUPERFEMTO 3758 +teufel MACH_TEUFEL TEUFEL 3759 +dinara MACH_DINARA DINARA 3760 +vanquish MACH_VANQUISH VANQUISH 3761 +zipabox1 MACH_ZIPABOX1 ZIPABOX1 3762 +u9540 MACH_U9540 U9540 3763 +jet MACH_JET JET 3764 +smdk4412 MACH_SMDK4412 SMDK4412 3765 +elite MACH_ELITE ELITE 3766 +spear320_hmi MACH_SPEAR320_HMI SPEAR320_HMI 3767 +ontario MACH_ONTARIO ONTARIO 3768 +mx6q_sabrelite MACH_MX6Q_SABRELITE MX6Q_SABRELITE 3769 +vc200 MACH_VC200 VC200 3770 +msm7625a_ffa MACH_MSM7625A_FFA MSM7625A_FFA 3771 +msm7625a_surf MACH_MSM7625A_SURF MSM7625A_SURF 3772 +benthossbp MACH_BENTHOSSBP BENTHOSSBP 3773 +smdk5210 MACH_SMDK5210 SMDK5210 3774 +empq2300 MACH_EMPQ2300 EMPQ2300 3775 +minipos MACH_MINIPOS MINIPOS 3776 +omap5_sevm MACH_OMAP5_SEVM OMAP5_SEVM 3777 +shelter MACH_SHELTER SHELTER 3778 +omap3_devkit8500 MACH_OMAP3_DEVKIT8500 OMAP3_DEVKIT8500 3779 +edgetd MACH_EDGETD EDGETD 3780 +copperyard MACH_COPPERYARD COPPERYARD 3781 +edge_test MACH_EDGE EDGE 3782 +edge_u MACH_EDGE_U EDGE_U 3783 +edge_td MACH_EDGE_TD EDGE_TD 3784 +wdss MACH_WDSS WDSS 3785 +dl_pb25 MACH_DL_PB25 DL_PB25 3786 +dss11 MACH_DSS11 DSS11 3787 +cpa MACH_CPA CPA 3788 +aptp2000 MACH_APTP2000 APTP2000 3789 +marzen MACH_MARZEN MARZEN 3790 +st_turbine MACH_ST_TURBINE ST_TURBINE 3791 +gtl_it3300 MACH_GTL_IT3300 GTL_IT3300 3792 +mx6_mule MACH_MX6_MULE MX6_MULE 3793 +v7pxa_dt MACH_V7PXA_DT V7PXA_DT 3794 +v7mmp_dt MACH_V7MMP_DT V7MMP_DT 3795 +dragon7 MACH_DRAGON7 DRAGON7 3796 +krome MACH_KROME KROME 3797 +oratisdante MACH_ORATISDANTE ORATISDANTE 3798 +fathom MACH_FATHOM FATHOM 3799 +dns325 MACH_DNS325 DNS325 3800 +sarnen MACH_SARNEN SARNEN 3801 +ubisys_g1 MACH_UBISYS_G1 UBISYS_G1 3802 +mx53_pf1 MACH_MX53_PF1 MX53_PF1 3803 +asanti MACH_ASANTI ASANTI 3804 +volta MACH_VOLTA VOLTA 3805 +potenza MACH_S5P6450 S5P6450 3806 +knight MACH_KNIGHT KNIGHT 3807 +beaglebone MACH_BEAGLEBONE BEAGLEBONE 3808 +becker MACH_BECKER BECKER 3809 +fc360 MACH_FC360 FC360 3810 +pmi2_xls MACH_PMI2_XLS PMI2_XLS 3811 +taranto MACH_TARANTO TARANTO 3812 +plutux MACH_PLUTUX PLUTUX 3813 +ipmp_medcom MACH_IPMP_MEDCOM IPMP_MEDCOM 3814 +absolut MACH_ABSOLUT ABSOLUT 3815 +awpb3 MACH_AWPB3 AWPB3 3816 +nfp32xx_dt MACH_NFP32XX_DT NFP32XX_DT 3817 +dl_pb53 MACH_DL_PB53 DL_PB53 3818 +acu_ii MACH_ACU_II ACU_II 3819 +avalon MACH_AVALON AVALON 3820 +sphinx MACH_SPHINX SPHINX 3821 +titan_t MACH_TITAN_T TITAN_T 3822 +harvest_boris MACH_HARVEST_BORIS HARVEST_BORIS 3823 +mach_msm7x30_m3s MACH_MACH_MSM7X30_M3S MACH_MSM7X30_M3S 3824 +smdk5250 MACH_SMDK5250 SMDK5250 3825 +imxt_lite MACH_IMXT_LITE IMXT_LITE 3826 +imxt_std MACH_IMXT_STD IMXT_STD 3827 +imxt_log MACH_IMXT_LOG IMXT_LOG 3828 +imxt_nav MACH_IMXT_NAV IMXT_NAV 3829 +imxt_full MACH_IMXT_FULL IMXT_FULL 3830 +ag09015 MACH_AG09015 AG09015 3831 +am3517_mt_ventoux MACH_AM3517_MT_VENTOUX AM3517_MT_VENTOUX 3832 +dp1arm9 MACH_DP1ARM9 DP1ARM9 3833 +picasso_m MACH_PICASSO_M PICASSO_M 3834 +video_gadget MACH_VIDEO_GADGET VIDEO_GADGET 3835 +mtt_om3x MACH_MTT_OM3X MTT_OM3X 3836 +mx6q_arm2 MACH_MX6Q_ARM2 MX6Q_ARM2 3837 +picosam9g45 MACH_PICOSAM9G45 PICOSAM9G45 3838 +vpm_dm365 MACH_VPM_DM365 VPM_DM365 3839 +bonfire MACH_BONFIRE BONFIRE 3840 +mt2p2d MACH_MT2P2D MT2P2D 3841 +sigpda01 MACH_SIGPDA01 SIGPDA01 3842 +cn27 MACH_CN27 CN27 3843 +mx25_cwtap MACH_MX25_CWTAP MX25_CWTAP 3844 +apf28 MACH_APF28 APF28 3845 +pelco_maxwell MACH_PELCO_MAXWELL PELCO_MAXWELL 3846 +ge_phoenix MACH_GE_PHOENIX GE_PHOENIX 3847 +empc_a500 MACH_EMPC_A500 EMPC_A500 3848 +ims_arm9 MACH_IMS_ARM9 IMS_ARM9 3849 +mini2416 MACH_MINI2416 MINI2416 3850 +mini2450 MACH_MINI2450 MINI2450 3851 +mini310 MACH_MINI310 MINI310 3852 +spear_hurricane MACH_SPEAR_HURRICANE SPEAR_HURRICANE 3853 +mt7208 MACH_MT7208 MT7208 3854 +lpc178x MACH_LPC178X LPC178X 3855 +farleys MACH_FARLEYS FARLEYS 3856 +efm32gg_dk3750 MACH_EFM32GG_DK3750 EFM32GG_DK3750 3857 +zeus_board MACH_ZEUS_BOARD ZEUS_BOARD 3858 +cc51 MACH_CC51 CC51 3859 +fxi_c210 MACH_FXI_C210 FXI_C210 3860 +msm8627_cdp MACH_MSM8627_CDP MSM8627_CDP 3861 +msm8627_mtp MACH_MSM8627_MTP MSM8627_MTP 3862 +armadillo800eva MACH_ARMADILLO800EVA ARMADILLO800EVA 3863 +primou MACH_PRIMOU PRIMOU 3864 +primoc MACH_PRIMOC PRIMOC 3865 +primoct MACH_PRIMOCT PRIMOCT 3866 +a9500 MACH_A9500 A9500 3867 +pue_td MACH_PULSE_TD PULSE_TD 3868 +pluto MACH_PLUTO PLUTO 3869 +acfx100 MACH_ACFX100 ACFX100 3870 +msm8625_rumi3 MACH_MSM8625_RUMI3 MSM8625_RUMI3 3871 +valente MACH_VALENTE VALENTE 3872 +crfs_rfeye MACH_CRFS_RFEYE CRFS_RFEYE 3873 +rfeye MACH_RFEYE RFEYE 3874 +phidget_sbc3 MACH_PHIDGET_SBC3 PHIDGET_SBC3 3875 +tcw_mika MACH_TCW_MIKA TCW_MIKA 3876 +imx28_egf MACH_IMX28_EGF IMX28_EGF 3877 +valente_wx MACH_VALENTE_WX VALENTE_WX 3878 +huangshans MACH_HUANGSHANS HUANGSHANS 3879 +bosphorus1 MACH_BOSPHORUS1 BOSPHORUS1 3880 +prima MACH_PRIMA PRIMA 3881 +meson3_skt MACH_M3_SKT M3_SKT 3882 +meson3_ref MACH_M3_REF M3_REF 3883 +evita_ulk MACH_EVITA_ULK EVITA_ULK 3884 +merisc600 MACH_MERISC600 MERISC600 3885 +dolak MACH_DOLAK DOLAK 3886 +sbc53 MACH_SBC53 SBC53 3887 +elite_ulk MACH_ELITE_ULK ELITE_ULK 3888 +pov2 MACH_POV2 POV2 3889 +ipod_touch_2g MACH_IPOD_TOUCH_2G IPOD_TOUCH_2G 3890 +da850_pqab MACH_DA850_PQAB DA850_PQAB 3891 +fermi MACH_FERMI FERMI 3892 +ccardwmx28 MACH_CCARDWMX28 CCARDWMX28 3893 +ccardmx28 MACH_CCARDMX28 CCARDMX28 3894 +fs20_fcm2050 MACH_FS20_FCM2050 FS20_FCM2050 3895 +kinetis MACH_KINETIS KINETIS 3896 +kai MACH_KAI KAI 3897 +bcthb2 MACH_BCTHB2 BCTHB2 3898 +inels3_cu MACH_INELS3_CU INELS3_CU 3899 +da850_juniper MACH_JUNIPER JUNIPER 3900 +da850_apollo MACH_DA850_APOLLO DA850_APOLLO 3901 +tracnas MACH_TRACNAS TRACNAS 3902 +mityarm335x MACH_MITYARM335X MITYARM335X 3903 +xcgz7x MACH_XCGZ7X XCGZ7X 3904 +cubox MACH_CUBOX CUBOX 3905 +terminator MACH_TERMINATOR TERMINATOR 3906 +eye03 MACH_EYE03 EYE03 3907 +kota3 MACH_KOTA3 KOTA3 3908 +mx53_nitrogen_k MACH_MX5 MX5 3909 +pscpe MACH_PSCPE PSCPE 3910 +akt1100 MACH_AKT1100 AKT1100 3911 +pcaaxl2 MACH_PCAAXL2 PCAAXL2 3912 +primodd_ct MACH_PRIMODD_CT PRIMODD_CT 3913 +nsbc MACH_NSBC NSBC 3914 +meson2_skt MACH_MESON2_SKT MESON2_SKT 3915 +meson2_ref MACH_MESON2_REF MESON2_REF 3916 +ccardwmx28js MACH_CCARDWMX28JS CCARDWMX28JS 3917 +ccardmx28js MACH_CCARDMX28JS CCARDMX28JS 3918 +indico MACH_INDICO INDICO 3919 +msm8960dt MACH_MSM8960DT MSM8960DT 3920 +primods MACH_PRIMODS PRIMODS 3921 +beluga_m1388 MACH_BELUGA_M1388 BELUGA_M1388 3922 +primotd MACH_PRIMOTD PRIMOTD 3923 +varan_master MACH_VARAN_MASTER VARAN_MASTER 3924 +primodd MACH_PRIMODD PRIMODD 3925 +jetduo MACH_JETDUO JETDUO 3926 +mx53_umobo MACH_MX53_UMOBO MX53_UMOBO 3927 +trats MACH_TRATS TRATS 3928 +starcraft MACH_STARCRAFT STARCRAFT 3929 +qseven_tegra2 MACH_QSEVEN_TEGRA2 QSEVEN_TEGRA2 3930 +lichee_sun4i_devbd MACH_LICHEE_SUN4I_DEVBD LICHEE_SUN4I_DEVBD 3931 +movenow MACH_MOVENOW MOVENOW 3932 +golf_u MACH_GOLF_U GOLF_U 3933 +msm7627a_evb MACH_MSM7627A_EVB MSM7627A_EVB 3934 +rambo MACH_RAMBO RAMBO 3935 +golfu MACH_GOLFU GOLFU 3936 +mango310 MACH_MANGO310 MANGO310 3937 +dns343 MACH_DNS343 DNS343 3938 +var_som_om44 MACH_VAR_SOM_OM44 VAR_SOM_OM44 3939 +naon MACH_NAON NAON 3940 +vp4000 MACH_VP4000 VP4000 3941 +impcard MACH_IMPCARD IMPCARD 3942 +smoovcam MACH_SMOOVCAM SMOOVCAM 3943 +cobham3725 MACH_COBHAM3725 COBHAM3725 3944 +cobham3730 MACH_COBHAM3730 COBHAM3730 3945 +cobham3703 MACH_COBHAM3703 COBHAM3703 3946 +quetzal MACH_QUETZAL QUETZAL 3947 +apq8064_cdp MACH_APQ8064_CDP APQ8064_CDP 3948 +apq8064_mtp MACH_APQ8064_MTP APQ8064_MTP 3949 +apq8064_fluid MACH_APQ8064_FLUID APQ8064_FLUID 3950 +apq8064_liquid MACH_APQ8064_LIQUID APQ8064_LIQUID 3951 +mango210 MACH_MANGO210 MANGO210 3952 +mango100 MACH_MANGO100 MANGO100 3953 +mango24 MACH_MANGO24 MANGO24 3954 +mango64 MACH_MANGO64 MANGO64 3955 +nsa320 MACH_NSA320 NSA320 3956 +elv_ccu2 MACH_ELV_CCU2 ELV_CCU2 3957 +triton_x00 MACH_TRITON_X00 TRITON_X00 3958 +triton_1500_2000 MACH_TRITON_1500_2000 TRITON_1500_2000 3959 +pogoplugv4 MACH_POGOPLUGV4 POGOPLUGV4 3960 +venus_cl MACH_VENUS_CL VENUS_CL 3961 +vulcano_g20 MACH_VULCANO_G20 VULCANO_G20 3962 +sgs_i9100 MACH_SGS_I9100 SGS_I9100 3963 +stsv2 MACH_STSV2 STSV2 3964 +csb1724 MACH_CSB1724 CSB1724 3965 +omapl138_lcdk MACH_OMAPL138_LCDK OMAPL138_LCDK 3966 +jel_dd MACH_JEWEL_DD JEWEL_DD 3967 +pvd_mx25 MACH_PVD_MX25 PVD_MX25 3968 +meson6_skt MACH_MESON6_SKT MESON6_SKT 3969 +meson6_ref MACH_MESON6_REF MESON6_REF 3970 +pxm MACH_PXM PXM 3971 +stuttgart MACH_S3 S3 3972 +pogoplugv3 MACH_POGOPLUGV3 POGOPLUGV3 3973 +mlp89626 MACH_MLP89626 MLP89626 3974 +iomegahmndce MACH_IOMEGAHMNDCE IOMEGAHMNDCE 3975 +pogoplugv3pci MACH_POGOPLUGV3PCI POGOPLUGV3PCI 3976 +bntv250 MACH_BNTV250 BNTV250 3977 +mx53_qseven MACH_MX53_QSEVEN MX53_QSEVEN 3978 +gtl_it1100 MACH_GTL_IT1100 GTL_IT1100 3979 +mx6q_sabresd MACH_MX6Q_SABRESD MX6Q_SABRESD 3980 +mt4 MACH_MT4 MT4 3981 +jumbo_d MACH_JUMBO_D JUMBO_D 3982 +jumbo_i MACH_JUMBO_I JUMBO_I 3983 +fs20_dmp MACH_FS20_DMP FS20_DMP 3984 +dns320 MACH_DNS320 DNS320 3985 +mx28bacos MACH_MX28BACOS MX28BACOS 3986 +tl80 MACH_TL80 TL80 3987 +polatis_nic_1001 MACH_POLATIS_NIC_1001 POLATIS_NIC_1001 3988 +tely MACH_TELY TELY 3989 +u8520 MACH_U8520 U8520 3990 +manta MACH_MANTA MANTA 3991 +spear1340_lcad MACH_SPEAR_EM_S900 SPEAR_EM_S900 3992 +mpq8064_cdp MACH_MPQ8064_CDP MPQ8064_CDP 3993 +mpq8064_hrd MACH_MPQ8064_STB MPQ8064_STB 3994 +mpq8064_dtv MACH_MPQ8064_DTV MPQ8064_DTV 3995 +dm368som MACH_DM368SOM DM368SOM 3996 +gprisb2 MACH_GPRISB2 GPRISB2 3997 +chammid MACH_CHAMMID CHAMMID 3998 +seoul2 MACH_SEOUL2 SEOUL2 3999 +omap4_nooktablet MACH_OMAP4_NOOKTABLET OMAP4_NOOKTABLET 4000 +aalto MACH_AALTO AALTO 4001 +metro MACH_METRO METRO 4002 +cydm3730 MACH_CYDM3730 CYDM3730 4003 +tqma53 MACH_TQMA53 TQMA53 4004 +msm7627a_qrd3 MACH_MSM7627A_QRD3 MSM7627A_QRD3 4005 +mx28_canby MACH_MX28_CANBY MX28_CANBY 4006 +tiger MACH_TIGER TIGER 4007 +pcats_9307_type_a MACH_PCATS_9307_TYPE_A PCATS_9307_TYPE_A 4008 +pcats_9307_type_o MACH_PCATS_9307_TYPE_O PCATS_9307_TYPE_O 4009 +pcats_9307_type_r MACH_PCATS_9307_TYPE_R PCATS_9307_TYPE_R 4010 +streamplug MACH_STREAMPLUG STREAMPLUG 4011 +icechicken_dev MACH_ICECHICKEN_DEV ICECHICKEN_DEV 4012 +hedgehog MACH_HEDGEHOG HEDGEHOG 4013 +yusend_obc MACH_YUSEND_OBC YUSEND_OBC 4014 +imxninja MACH_IMXNINJA IMXNINJA 4015 +omap4_jarod MACH_OMAP4_JAROD OMAP4_JAROD 4016 +eco5_pk MACH_ECO5_PK ECO5_PK 4017 +qj2440 MACH_QJ2440 QJ2440 4018 +mx6q_mercury MACH_MX6Q_MERCURY MX6Q_MERCURY 4019 +cm6810 MACH_CM6810 CM6810 4020 +omap4_torpedo MACH_OMAP4_TORPEDO OMAP4_TORPEDO 4021 +nsa310 MACH_NSA310 NSA310 4022 +tmx536 MACH_TMX536 TMX536 4023 +ktt20 MACH_KTT20 KTT20 4024 +dragonix MACH_DRAGONIX DRAGONIX 4025 +lungching MACH_LUNGCHING LUNGCHING 4026 +bulogics MACH_BULOGICS BULOGICS 4027 +mx535_sx MACH_MX535_SX MX535_SX 4028 +ngui3250 MACH_NGUI3250 NGUI3250 4029 +salutec_dac MACH_SALUTEC_DAC SALUTEC_DAC 4030 +loco MACH_LOCO LOCO 4031 +ctera_plug_usi MACH_CTERA_PLUG_USI CTERA_PLUG_USI 4032 +scepter MACH_SCEPTER SCEPTER 4033 +sga MACH_SGA SGA 4034 +p_81_j5 MACH_P_81_J5 P_81_J5 4035 +p_81_o4 MACH_P_81_O4 P_81_O4 4036 +msm8625_surf MACH_MSM8625_SURF MSM8625_SURF 4037 +carallon_shark MACH_CARALLON_SHARK CARALLON_SHARK 4038 +lsgc_icam MACH_LSGCICAM LSGCICAM 4039 +ordog MACH_ORDOG ORDOG 4040 +puente_io MACH_PUENTE_IO PUENTE_IO 4041 +msm8625_evb MACH_MSM8625_EVB MSM8625_EVB 4042 +ev_am1707 MACH_EV_AM1707 EV_AM1707 4043 +ev_am1707e2 MACH_EV_AM1707E2 EV_AM1707E2 4044 +ev_am3517e2 MACH_EV_AM3517E2 EV_AM3517E2 4045 +calabria MACH_CALABRIA CALABRIA 4046 +ev_imx287 MACH_EV_IMX287 EV_IMX287 4047 +erau MACH_ERAU ERAU 4048 +sichuan MACH_SICHUAN SICHUAN 4049 +sopdm MACH_WIRMA3 WIRMA3 4050 +davinci_da850 MACH_DAVINCI_DA850 DAVINCI_DA850 4051 +omap138_trunarc MACH_OMAP138_TRUNARC OMAP138_TRUNARC 4052 +bcm4761 MACH_BCM4761 BCM4761 4053 +picasso_e2 MACH_PICASSO_E2 PICASSO_E2 4054 +picasso_mf MACH_PICASSO_MF PICASSO_MF 4055 +miro MACH_MIRO MIRO 4056 +at91sam9g20ewon3 MACH_AT91SAM9G20EWON3 AT91SAM9G20EWON3 4057 +yoyo MACH_YOYO YOYO 4058 +windjkl MACH_WINDJKL WINDJKL 4059 +monarudo MACH_MONARUDO MONARUDO 4060 +batan MACH_BATAN BATAN 4061 +tadao MACH_TADAO TADAO 4062 +baso MACH_BASO BASO 4063 +mahon MACH_MAHON MAHON 4064 +villec2 MACH_VILLEC2 VILLEC2 4065 +asi1230 MACH_ASI1230 ASI1230 4066 +alaska MACH_ALASKA ALASKA 4067 +swarco_shdsl2 MACH_SWARCO_SHDSL2 SWARCO_SHDSL2 4068 +oxrtu MACH_OXRTU OXRTU 4069 +omap5_panda MACH_OMAP5_PANDA OMAP5_PANDA 4070 +imx286 MACH_MX28XDI MX28XDI 4071 +c8000 MACH_C8000 C8000 4072 +bje_display3_5 MACH_BJE_DISPLAY3_5 BJE_DISPLAY3_5 4073 +picomod7 MACH_PICOMOD7 PICOMOD7 4074 +picocom5 MACH_PICOCOM5 PICOCOM5 4075 +qblissa8 MACH_QBLISSA8 QBLISSA8 4076 +armstonea8 MACH_ARMSTONEA8 ARMSTONEA8 4077 +netdcu14 MACH_NETDCU14 NETDCU14 4078 +at91sam9x5_epiphan MACH_AT91SAM9X5_EPIPHAN AT91SAM9X5_EPIPHAN 4079 +p2u MACH_P2U P2U 4080 +doris MACH_DORIS DORIS 4081 +j49 MACH_J49 J49 4082 +vdss2e MACH_VDSS2E VDSS2E 4083 +vc300 MACH_VC300 VC300 4084 +ns115_pad_test MACH_NS115_PAD_TEST NS115_PAD_TEST 4085 +ns115_pad_ref MACH_NS115_PAD_REF NS115_PAD_REF 4086 +ns115_phone_test MACH_NS115_PHONE_TEST NS115_PHONE_TEST 4087 +ns115_phone_ref MACH_NS115_PHONE_REF NS115_PHONE_REF 4088 +golfc MACH_GOLFC GOLFC 4089 +xerox_olympus MACH_XEROX_OLYMPUS XEROX_OLYMPUS 4090 +mx6sl_arm2 MACH_MX6SL_ARM2 MX6SL_ARM2 4091 +csb1701_csb1726 MACH_CSB1701_CSB1726 CSB1701_CSB1726 4092 +at91sam9xeek MACH_AT91SAM9XEEK AT91SAM9XEEK 4093 +ebv210 MACH_EBV210 EBV210 4094 +msm7627a_qrd7 MACH_MSM7627A_QRD7 MSM7627A_QRD7 4095 +svthin MACH_SVTHIN SVTHIN 4096 +duovero MACH_DUOVERO DUOVERO 4097 +chupacabra MACH_CHUPACABRA CHUPACABRA 4098 +scorpion MACH_SCORPION SCORPION 4099 +davinci_he_hmi10 MACH_DAVINCI_HE_HMI10 DAVINCI_HE_HMI10 4100 +topkick MACH_TOPKICK TOPKICK 4101 +m3_auguestrush MACH_M3_AUGUESTRUSH M3_AUGUESTRUSH 4102 +ipc335x MACH_IPC335X IPC335X 4103 +sun4i MACH_SUN4I SUN4I 4104 +imx233_olinuxino MACH_IMX233_OLINUXINO IMX233_OLINUXINO 4105 +k2_wl MACH_K2_WL K2_WL 4106 +k2_ul MACH_K2_UL K2_UL 4107 +k2_cl MACH_K2_CL K2_CL 4108 +minbari_w MACH_MINBARI_W MINBARI_W 4109 +minbari_m MACH_MINBARI_M MINBARI_M 4110 +k035 MACH_K035 K035 4111 +ariel MACH_ARIEL ARIEL 4112 +arielsaarc MACH_ARIELSAARC ARIELSAARC 4113 +arieldkb MACH_ARIELDKB ARIELDKB 4114 +armadillo810 MACH_ARMADILLO810 ARMADILLO810 4115 +tam335x MACH_TAM335X TAM335X 4116 +grouper MACH_GROUPER GROUPER 4117 +mpcsa21_9g20 MACH_MPCSA21_9G20 MPCSA21_9G20 4118 +m6u_cpu MACH_M6U_CPU M6U_CPU 4119 +davinci_dp10 MACH_DAVINCI_DP10 DAVINCI_DP10 4120 +ginkgo MACH_GINKGO GINKGO 4121 +cgt_qmx6 MACH_CGT_QMX6 CGT_QMX6 4122 +profpga MACH_PROFPGA PROFPGA 4123 +acfx100oc MACH_ACFX100OC ACFX100OC 4124 +acfx100nb MACH_ACFX100NB ACFX100NB 4125 +capricorn MACH_CAPRICORN CAPRICORN 4126 +pisces MACH_PISCES PISCES 4127 +aries MACH_ARIES ARIES 4128 +cancer MACH_CANCER CANCER 4129 +leo MACH_LEO LEO 4130 +virgo MACH_VIRGO VIRGO 4131 +sagittarius MACH_SAGITTARIUS SAGITTARIUS 4132 +devil MACH_DEVIL DEVIL 4133 +ballantines MACH_BALLANTINES BALLANTINES 4134 +omap3_procerusvpu MACH_OMAP3_PROCERUSVPU OMAP3_PROCERUSVPU 4135 +my27 MACH_MY27 MY27 4136 +sun6i MACH_SUN6I SUN6I 4137 +sun5i MACH_SUN5I SUN5I 4138 +mx512_mx MACH_MX512_MX MX512_MX 4139 +kzm9g MACH_KZM9G KZM9G 4140 +vdstbn MACH_VDSTBN VDSTBN 4141 +cfa10036 MACH_CFA10036 CFA10036 4142 +cfa10049 MACH_CFA10049 CFA10049 4143 +pcm051 MACH_PCM051 PCM051 4144 +vybrid_vf7xx MACH_VYBRID_VF7XX VYBRID_VF7XX 4145 +vybrid_vf6xx MACH_VYBRID_VF6XX VYBRID_VF6XX 4146 +vybrid_vf5xx MACH_VYBRID_VF5XX VYBRID_VF5XX 4147 +vybrid_vf4xx MACH_VYBRID_VF4XX VYBRID_VF4XX 4148 +aria_g25 MACH_ARIA_G25 ARIA_G25 4149 +bcm21553 MACH_BCM21553 BCM21553 4150 +smdk5410 MACH_SMDK5410 SMDK5410 4151 +lpc18xx MACH_LPC18XX LPC18XX 4152 +oratisparty MACH_ORATISPARTY ORATISPARTY 4153 +qseven MACH_QSEVEN QSEVEN 4154 +gmv_generic MACH_GMV_GENERIC GMV_GENERIC 4155 +th_link_eth MACH_TH_LINK_ETH TH_LINK_ETH 4156 +tn_muninn MACH_TN_MUNINN TN_MUNINN 4157 +rampage MACH_RAMPAGE RAMPAGE 4158 +visstrim_mv10 MACH_VISSTRIM_MV10 VISSTRIM_MV10 4159 +monacotdu MACH_MONACO_TDU MONACO_TDU 4160 +monacoul MACH_MONACO_UL MONACO_UL 4161 +enrc2u MACH_ENRC2_U ENRC2_U 4162 +evitareul MACH_EVITA_UL EVITA_UL 4163 +mx28_wilma MACH_MX28_WILMA MX28_WILMA 4164 +monacou MACH_MONACO_U MONACO_U 4165 +msm8625_ffa MACH_MSM8625_FFA MSM8625_FFA 4166 +vpu101 MACH_VPU101 VPU101 4167 +operaul MACH_OPERA_UL OPERA_UL 4168 +baileys MACH_BAILEYS BAILEYS 4169 +familybox MACH_FAMILYBOX FAMILYBOX 4170 +ensemble_mx35 MACH_ENSEMBLE_MX35 ENSEMBLE_MX35 4171 +sc_sps_1 MACH_SC_SPS_1 SC_SPS_1 4172 +ucsimply_sam9260 MACH_UCSIMPLY_SAM9260 UCSIMPLY_SAM9260 4173 +unicorn MACH_UNICORN UNICORN 4174 +m9g45a MACH_M9G45A M9G45A 4175 +mtwebif MACH_MTWEBIF MTWEBIF 4176 +playstone MACH_PLAYSTONE PLAYSTONE 4177 +chelsea MACH_CHELSEA CHELSEA 4178 +bayern MACH_BAYERN BAYERN 4179 +mitwo MACH_MITWO MITWO 4180 +mx25_noah MACH_MX25_NOAH MX25_NOAH 4181 +stm_b2020 MACH_STM_B2020 STM_B2020 4182 +annax_src MACH_ANNAX_SRC ANNAX_SRC 4183 +ionics_stratus MACH_IONICS_STRATUS IONICS_STRATUS 4184 +hugo MACH_HUGO HUGO 4185 +em300 MACH_EM300 EM300 4186 +mmp3_qseven MACH_MMP3_QSEVEN MMP3_QSEVEN 4187 +bosphorus2 MACH_BOSPHORUS2 BOSPHORUS2 4188 +tt2200 MACH_TT2200 TT2200 4189 +ocelot3 MACH_OCELOT3 OCELOT3 4190 +tek_cobra MACH_TEK_COBRA TEK_COBRA 4191 +protou MACH_PROTOU PROTOU 4192 Property changes on: projects/nand/sys/arm/conf/mach-types ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +t \ No newline at end of property Index: projects/nand/sys/boot/fdt/fdt_loader_cmd.c =================================================================== --- projects/nand/sys/boot/fdt/fdt_loader_cmd.c (revision 235262) +++ projects/nand/sys/boot/fdt/fdt_loader_cmd.c (revision 235263) @@ -1,1426 +1,1428 @@ /*- * Copyright (c) 2009-2010 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include "bootstrap.h" #include "glue.h" #define DEBUG #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif #define FDT_CWD_LEN 256 #define FDT_MAX_DEPTH 6 #define FDT_PROP_SEP " = " #define STR(number) #number #define STRINGIFY(number) STR(number) #define COPYOUT(s,d,l) archsw.arch_copyout((vm_offset_t)(s), d, l) #define FDT_STATIC_DTB_SYMBOL "fdt_static_dtb" static struct fdt_header *fdtp = NULL; static int fdt_cmd_nyi(int argc, char *argv[]); static int fdt_cmd_mkprop(int argc, char *argv[]); static int fdt_cmd_cd(int argc, char *argv[]); static int fdt_cmd_hdr(int argc, char *argv[]); static int fdt_cmd_ls(int argc, char *argv[]); static int fdt_cmd_prop(int argc, char *argv[]); static int fdt_cmd_pwd(int argc, char *argv[]); static int fdt_cmd_rm(int argc, char *argv[]); static int fdt_cmd_mknode(int argc, char *argv[]); typedef int cmdf_t(int, char *[]); struct cmdtab { char *name; cmdf_t *handler; }; static const struct cmdtab commands[] = { { "alias", &fdt_cmd_nyi }, { "cd", &fdt_cmd_cd }, { "header", &fdt_cmd_hdr }, { "ls", &fdt_cmd_ls }, { "mknode", &fdt_cmd_mknode }, { "mkprop", &fdt_cmd_mkprop }, { "mres", &fdt_cmd_nyi }, { "prop", &fdt_cmd_prop }, { "pwd", &fdt_cmd_pwd }, { "rm", &fdt_cmd_rm }, { NULL, NULL } }; static char cwd[FDT_CWD_LEN] = "/"; static vm_offset_t fdt_find_static_dtb(void) { Elf_Sym sym; vm_offset_t dyntab, esym; uint64_t offs; struct preloaded_file *kfp; struct file_metadata *md; Elf_Sym *symtab; Elf_Dyn *dyn; char *strtab, *strp; int i, sym_count; symtab = NULL; dyntab = esym = 0; strtab = strp = NULL; offs = __elfN(relocation_offset); kfp = file_findfile(NULL, NULL); if (kfp == NULL) return (0); md = file_findmetadata(kfp, MODINFOMD_ESYM); if (md == NULL) return (0); COPYOUT(md->md_data, &esym, sizeof(esym)); md = file_findmetadata(kfp, MODINFOMD_DYNAMIC); if (md == NULL) return (0); COPYOUT(md->md_data, &dyntab, sizeof(dyntab)); dyntab += offs; /* Locate STRTAB and DYNTAB */ for (dyn = (Elf_Dyn *)dyntab; dyn->d_tag != DT_NULL; dyn++) { if (dyn->d_tag == DT_STRTAB) { strtab = (char *)(uintptr_t)(dyn->d_un.d_ptr + offs); continue; } else if (dyn->d_tag == DT_SYMTAB) { symtab = (Elf_Sym *)(uintptr_t) (dyn->d_un.d_ptr + offs); continue; } } if (symtab == NULL || strtab == NULL) { /* * No symtab? No strtab? That should not happen here, * and should have been verified during __elfN(loadimage). * This must be some kind of a bug. */ return (0); } sym_count = (int)((Elf_Sym *)esym - symtab) / sizeof(Elf_Sym); /* * The most efficent way to find a symbol would be to calculate a * hash, find proper bucket and chain, and thus find a symbol. * However, that would involve code duplication (e.g. for hash * function). So we're using simpler and a bit slower way: we're * iterating through symbols, searching for the one which name is * 'equal' to 'fdt_static_dtb'. To speed up the process a little bit, * we are eliminating symbols type of which is not STT_NOTYPE, or(and) * those which binding attribute is not STB_GLOBAL. */ for (i = 0; i < sym_count; i++) { COPYOUT(symtab + i, &sym, sizeof(sym)); if (ELF_ST_BIND(sym.st_info) != STB_GLOBAL || ELF_ST_TYPE(sym.st_info) != STT_NOTYPE) continue; strp = strdupout((vm_offset_t)(strtab + sym.st_name)); if (strcmp(strp, FDT_STATIC_DTB_SYMBOL) == 0) { /* Found a match ! */ free(strp); return ((vm_offset_t)(sym.st_value + offs)); } free(strp); } return (0); } static int fdt_setup_fdtp() { struct preloaded_file *bfp; int err; /* * Find the device tree blob. */ bfp = file_findfile(NULL, "dtb"); if (bfp == NULL) { if ((fdtp = (struct fdt_header *)fdt_find_static_dtb()) == 0) { command_errmsg = "no device tree blob found!"; return (CMD_ERROR); } } else { /* Dynamic blob has precedence over static. */ fdtp = (struct fdt_header *)bfp->f_addr; } /* * Validate the blob. */ err = fdt_check_header(fdtp); if (err < 0) { if (err == -FDT_ERR_BADVERSION) sprintf(command_errbuf, "incompatible blob version: %d, should be: %d", fdt_version(fdtp), FDT_LAST_SUPPORTED_VERSION); else sprintf(command_errbuf, "error validating blob: %s", fdt_strerror(err)); return (CMD_ERROR); } return (CMD_OK); } #define fdt_strtovect(str, cellbuf, lim, cellsize) _fdt_strtovect((str), \ (cellbuf), (lim), (cellsize), 0); /* Force using base 16 */ #define fdt_strtovectx(str, cellbuf, lim, cellsize) _fdt_strtovect((str), \ (cellbuf), (lim), (cellsize), 16); static int _fdt_strtovect(char *str, void *cellbuf, int lim, unsigned char cellsize, uint8_t base) { char *buf = str; char *end = str + strlen(str) - 2; uint32_t *u32buf = NULL; uint8_t *u8buf = NULL; int cnt = 0; if (cellsize == sizeof(uint32_t)) u32buf = (uint32_t *)cellbuf; else u8buf = (uint8_t *)cellbuf; if (lim == 0) return (0); while (buf < end) { /* Skip white whitespace(s)/separators */ while (!isxdigit(*buf) && buf < end) buf++; if (u32buf != NULL) u32buf[cnt] = cpu_to_fdt32((uint32_t)strtol(buf, NULL, base)); else u8buf[cnt] = (uint8_t)strtol(buf, NULL, base); if (cnt + 1 <= lim - 1) cnt++; else break; buf++; /* Find another number */ while ((isxdigit(*buf) || *buf == 'x') && buf < end) buf++; } return (cnt); } #define TMP_MAX_ETH 8 void fixup_ethernet(const char *env, char *ethstr, int *eth_no, int len) { char *end, *str; uint8_t tmp_addr[6]; int i, n; /* Extract interface number */ i = strtol(env + 3, &end, 10); if (end == (env + 3)) /* 'ethaddr' means interface 0 address */ n = 0; else n = i; if (n > TMP_MAX_ETH) return; str = ub_env_get(env); /* Convert macaddr string into a vector of uints */ fdt_strtovectx(str, &tmp_addr, 6, sizeof(uint8_t)); if (n != 0) { i = strlen(env) - 7; strncpy(ethstr + 8, env + 3, i); } /* Set actual property to a value from vect */ fdt_setprop(fdtp, fdt_path_offset(fdtp, ethstr), "local-mac-address", &tmp_addr, 6 * sizeof(uint8_t)); /* Clear ethernet..XXXX.. string */ bzero(ethstr + 8, len - 8); if (n + 1 > *eth_no) *eth_no = n + 1; } void fixup_cpubusfreqs(unsigned long cpufreq, unsigned long busfreq) { int lo, o = 0, o2, maxo = 0, depth; const uint32_t zero = 0; /* We want to modify every subnode of /cpus */ o = fdt_path_offset(fdtp, "/cpus"); + if (o < 0) + return; /* maxo should contain offset of node next to /cpus */ depth = 0; maxo = o; while (depth != -1) maxo = fdt_next_node(fdtp, maxo, &depth); /* Find CPU frequency properties */ o = fdt_node_offset_by_prop_value(fdtp, o, "clock-frequency", &zero, sizeof(uint32_t)); o2 = fdt_node_offset_by_prop_value(fdtp, o, "bus-frequency", &zero, sizeof(uint32_t)); lo = MIN(o, o2); while (o != -FDT_ERR_NOTFOUND && o2 != -FDT_ERR_NOTFOUND) { o = fdt_node_offset_by_prop_value(fdtp, lo, "clock-frequency", &zero, sizeof(uint32_t)); o2 = fdt_node_offset_by_prop_value(fdtp, lo, "bus-frequency", &zero, sizeof(uint32_t)); /* We're only interested in /cpus subnode(s) */ if (lo > maxo) break; fdt_setprop_inplace_cell(fdtp, lo, "clock-frequency", (uint32_t)cpufreq); fdt_setprop_inplace_cell(fdtp, lo, "bus-frequency", (uint32_t)busfreq); lo = MIN(o, o2); } } int fdt_reg_valid(uint32_t *reg, int len, int addr_cells, int size_cells) { int cells_in_tuple, i, tuples, tuple_size; uint32_t cur_start, cur_size; cells_in_tuple = (addr_cells + size_cells); tuple_size = cells_in_tuple * sizeof(uint32_t); tuples = len / tuple_size; if (tuples == 0) return (EINVAL); for (i = 0; i < tuples; i++) { if (addr_cells == 2) cur_start = fdt64_to_cpu(reg[i * cells_in_tuple]); else cur_start = fdt32_to_cpu(reg[i * cells_in_tuple]); if (size_cells == 2) cur_size = fdt64_to_cpu(reg[i * cells_in_tuple + 2]); else cur_size = fdt32_to_cpu(reg[i * cells_in_tuple + 1]); if (cur_size == 0) return (EINVAL); debugf(" reg#%d (start: 0x%0x size: 0x%0x) valid!\n", i, cur_start, cur_size); } return (0); } void fixup_memory(struct sys_info *si) { struct mem_region *curmr; uint32_t addr_cells, size_cells; uint32_t *addr_cellsp, *reg, *size_cellsp; int err, i, len, memory, realmrno, root; uint8_t *buf, *sb; root = fdt_path_offset(fdtp, "/"); if (root < 0) { sprintf(command_errbuf, "Could not find root node !"); return; } memory = fdt_path_offset(fdtp, "/memory"); if (memory <= 0) { /* Create proper '/memory' node. */ memory = fdt_add_subnode(fdtp, root, "memory"); if (memory <= 0) { sprintf(command_errbuf, "Could not fixup '/memory' " "node, error code : %d!\n", memory); return; } err = fdt_setprop(fdtp, memory, "device_type", "memory", sizeof("memory")); if (err < 0) return; } addr_cellsp = (uint32_t *)fdt_getprop(fdtp, root, "#address-cells", NULL); size_cellsp = (uint32_t *)fdt_getprop(fdtp, root, "#size-cells", NULL); if (addr_cellsp == NULL || size_cellsp == NULL) { sprintf(command_errbuf, "Could not fixup '/memory' node : " "%s %s property not found in root node!\n", (!addr_cellsp) ? "#address-cells" : "", (!size_cellsp) ? "#size-cells" : ""); return; } addr_cells = fdt32_to_cpu(*addr_cellsp); size_cells = fdt32_to_cpu(*size_cellsp); /* Count valid memory regions entries in sysinfo. */ realmrno = si->mr_no; for (i = 0; i < si->mr_no; i++) if (si->mr[i].start == 0 && si->mr[i].size == 0) realmrno--; if (realmrno == 0) { sprintf(command_errbuf, "Could not fixup '/memory' node : " "sysinfo doesn't contain valid memory regions info!\n"); return; } if ((reg = (uint32_t *)fdt_getprop(fdtp, memory, "reg", &len)) != NULL) { if (fdt_reg_valid(reg, len, addr_cells, size_cells) == 0) /* * Do not apply fixup if existing 'reg' property * seems to be valid. */ return; } len = (addr_cells + size_cells) * realmrno * sizeof(uint32_t); sb = buf = (uint8_t *)malloc(len); if (!buf) return; bzero(buf, len); for (i = 0; i < si->mr_no; i++) { curmr = &si->mr[i]; if (curmr->size != 0) { /* Ensure endianess, and put cells into a buffer */ if (addr_cells == 2) *(uint64_t *)buf = cpu_to_fdt64(curmr->start); else *(uint32_t *)buf = cpu_to_fdt32(curmr->start); buf += sizeof(uint32_t) * addr_cells; if (size_cells == 2) *(uint64_t *)buf = cpu_to_fdt64(curmr->size); else *(uint32_t *)buf = cpu_to_fdt32(curmr->size); buf += sizeof(uint32_t) * size_cells; } } /* Set property */ if ((err = fdt_setprop(fdtp, memory, "reg", sb, len)) < 0) sprintf(command_errbuf, "Could not fixup '/memory' node.\n"); } void fixup_stdout(const char *env) { const char *str; char *ptr; int serialno; int len, no, sero; const struct fdt_property *prop; char *tmp[10]; str = ub_env_get(env); ptr = (char *)str + strlen(str) - 1; while (ptr > str && isdigit(*(str - 1))) str--; if (ptr == str) return; serialno = (int)strtol(ptr, NULL, 0); no = fdt_path_offset(fdtp, "/chosen"); if (no < 0) return; prop = fdt_get_property(fdtp, no, "stdout", &len); /* If /chosen/stdout does not extist, create it */ if (prop == NULL || (prop != NULL && len == 0)) { bzero(tmp, 10 * sizeof(char)); strcpy((char *)&tmp, "serial"); if (strlen(ptr) > 3) /* Serial number too long */ return; strncpy((char *)tmp + 6, ptr, 3); sero = fdt_path_offset(fdtp, (const char *)tmp); if (sero < 0) /* * If serial device we're trying to assign * stdout to doesn't exist in DT -- return. */ return; fdt_setprop(fdtp, no, "stdout", &tmp, strlen((char *)&tmp) + 1); fdt_setprop(fdtp, no, "stdin", &tmp, strlen((char *)&tmp) + 1); } } /* * Locate the blob, fix it up and return its location. */ void * fdt_fixup(void) { const char *env; char *ethstr; int chosen, err, eth_no, len; struct sys_info *si; env = NULL; eth_no = 0; ethstr = NULL; len = 0; err = fdt_setup_fdtp(); if (err) { sprintf(command_errbuf, "No valid device tree blob found!"); return (NULL); } /* Create /chosen node (if not exists) */ if ((chosen = fdt_subnode_offset(fdtp, 0, "chosen")) == -FDT_ERR_NOTFOUND) chosen = fdt_add_subnode(fdtp, 0, "chosen"); /* Value assigned to fixup-applied does not matter. */ if (fdt_getprop(fdtp, chosen, "fixup-applied", NULL)) goto success; /* Acquire sys_info */ si = ub_get_sys_info(); while ((env = ub_env_enum(env)) != NULL) { if (strncmp(env, "eth", 3) == 0 && strncmp(env + (strlen(env) - 4), "addr", 4) == 0) { /* * Handle Ethernet addrs: parse uboot env eth%daddr */ if (!eth_no) { /* * Check how many chars we will need to store * maximal eth iface number. */ len = strlen(STRINGIFY(TMP_MAX_ETH)) + strlen("ethernet"); /* * Reserve mem for string "ethernet" and len * chars for iface no. */ ethstr = (char *)malloc(len * sizeof(char)); bzero(ethstr, len * sizeof(char)); strcpy(ethstr, "ethernet0"); } /* Modify blob */ fixup_ethernet(env, ethstr, ð_no, len); } else if (strcmp(env, "consoledev") == 0) fixup_stdout(env); } /* Modify cpu(s) and bus clock frequenties in /cpus node [Hz] */ fixup_cpubusfreqs(si->clk_cpu, si->clk_bus); /* Fixup memory regions */ fixup_memory(si); fdt_setprop(fdtp, chosen, "fixup-applied", NULL, 0); success: return (fdtp); } int command_fdt_internal(int argc, char *argv[]) { cmdf_t *cmdh; char *cmd; int i, err; if (argc < 2) { command_errmsg = "usage is 'fdt []"; return (CMD_ERROR); } /* * Check if uboot env vars were parsed already. If not, do it now. */ if (fdt_fixup() == NULL) return (CMD_ERROR); /* * Validate fdt . */ cmd = strdup(argv[1]); i = 0; cmdh = NULL; while (!(commands[i].name == NULL)) { if (strcmp(cmd, commands[i].name) == 0) { /* found it */ cmdh = commands[i].handler; break; } i++; } if (cmdh == NULL) { command_errmsg = "unknown command"; return (CMD_ERROR); } /* * Call command handler. */ err = (*cmdh)(argc, argv); return (err); } static int fdt_cmd_cd(int argc, char *argv[]) { char *path; char tmp[FDT_CWD_LEN]; int len, o; path = (argc > 2) ? argv[2] : "/"; if (path[0] == '/') { len = strlen(path); if (len >= FDT_CWD_LEN) goto fail; } else { /* Handle path specification relative to cwd */ len = strlen(cwd) + strlen(path) + 1; if (len >= FDT_CWD_LEN) goto fail; strcpy(tmp, cwd); strcat(tmp, "/"); strcat(tmp, path); path = tmp; } o = fdt_path_offset(fdtp, path); if (o < 0) { sprintf(command_errbuf, "could not find node: '%s'", path); return (CMD_ERROR); } strcpy(cwd, path); return (CMD_OK); fail: sprintf(command_errbuf, "path too long: %d, max allowed: %d", len, FDT_CWD_LEN - 1); return (CMD_ERROR); } static int fdt_cmd_hdr(int argc __unused, char *argv[] __unused) { char line[80]; int ver; if (fdtp == NULL) { command_errmsg = "no device tree blob pointer?!"; return (CMD_ERROR); } ver = fdt_version(fdtp); pager_open(); sprintf(line, "\nFlattened device tree header (%p):\n", fdtp); pager_output(line); sprintf(line, " magic = 0x%08x\n", fdt_magic(fdtp)); pager_output(line); sprintf(line, " size = %d\n", fdt_totalsize(fdtp)); pager_output(line); sprintf(line, " off_dt_struct = 0x%08x\n", fdt_off_dt_struct(fdtp)); pager_output(line); sprintf(line, " off_dt_strings = 0x%08x\n", fdt_off_dt_strings(fdtp)); pager_output(line); sprintf(line, " off_mem_rsvmap = 0x%08x\n", fdt_off_mem_rsvmap(fdtp)); pager_output(line); sprintf(line, " version = %d\n", ver); pager_output(line); sprintf(line, " last compatible version = %d\n", fdt_last_comp_version(fdtp)); pager_output(line); if (ver >= 2) { sprintf(line, " boot_cpuid = %d\n", fdt_boot_cpuid_phys(fdtp)); pager_output(line); } if (ver >= 3) { sprintf(line, " size_dt_strings = %d\n", fdt_size_dt_strings(fdtp)); pager_output(line); } if (ver >= 17) { sprintf(line, " size_dt_struct = %d\n", fdt_size_dt_struct(fdtp)); pager_output(line); } pager_close(); return (CMD_OK); } static int fdt_cmd_ls(int argc, char *argv[]) { const char *prevname[FDT_MAX_DEPTH] = { NULL }; const char *name; char *path; int i, o, depth, len; path = (argc > 2) ? argv[2] : NULL; if (path == NULL) path = cwd; o = fdt_path_offset(fdtp, path); if (o < 0) { sprintf(command_errbuf, "could not find node: '%s'", path); return (CMD_ERROR); } for (depth = 0; (o >= 0) && (depth >= 0); o = fdt_next_node(fdtp, o, &depth)) { name = fdt_get_name(fdtp, o, &len); if (depth > FDT_MAX_DEPTH) { printf("max depth exceeded: %d\n", depth); continue; } prevname[depth] = name; /* Skip root (i = 1) when printing devices */ for (i = 1; i <= depth; i++) { if (prevname[i] == NULL) break; if (strcmp(cwd, "/") == 0) printf("/"); printf("%s", prevname[i]); } printf("\n"); } return (CMD_OK); } static __inline int isprint(int c) { return (c >= ' ' && c <= 0x7e); } static int fdt_isprint(const void *data, int len, int *count) { const char *d; char ch; int yesno, i; if (len == 0) return (0); d = (const char *)data; if (d[len - 1] != '\0') return (0); *count = 0; yesno = 1; for (i = 0; i < len; i++) { ch = *(d + i); if (isprint(ch) || (ch == '\0' && i > 0)) { /* Count strings */ if (ch == '\0') (*count)++; continue; } yesno = 0; break; } return (yesno); } static int fdt_data_str(const void *data, int len, int count, char **buf) { char *b, *tmp; const char *d; int buf_len, i, l; /* * Calculate the length for the string and allocate memory. * * Note that 'len' already includes at least one terminator. */ buf_len = len; if (count > 1) { /* * Each token had already a terminator buried in 'len', but we * only need one eventually, don't count space for these. */ buf_len -= count - 1; /* Each consecutive token requires a ", " separator. */ buf_len += count * 2; } /* Add some space for surrounding double quotes. */ buf_len += count * 2; /* Note that string being put in 'tmp' may be as big as 'buf_len'. */ b = (char *)malloc(buf_len); tmp = (char *)malloc(buf_len); if (b == NULL) goto error; if (tmp == NULL) { free(b); goto error; } b[0] = '\0'; /* * Now that we have space, format the string. */ i = 0; do { d = (const char *)data + i; l = strlen(d) + 1; sprintf(tmp, "\"%s\"%s", d, (i + l) < len ? ", " : ""); strcat(b, tmp); i += l; } while (i < len); *buf = b; free(tmp); return (0); error: return (1); } static int fdt_data_cell(const void *data, int len, char **buf) { char *b, *tmp; const uint32_t *c; int count, i, l; /* Number of cells */ count = len / 4; /* * Calculate the length for the string and allocate memory. */ /* Each byte translates to 2 output characters */ l = len * 2; if (count > 1) { /* Each consecutive cell requires a " " separator. */ l += (count - 1) * 1; } /* Each cell will have a "0x" prefix */ l += count * 2; /* Space for surrounding <> and terminator */ l += 3; b = (char *)malloc(l); tmp = (char *)malloc(l); if (b == NULL) goto error; if (tmp == NULL) { free(b); goto error; } b[0] = '\0'; strcat(b, "<"); for (i = 0; i < len; i += 4) { c = (const uint32_t *)((const uint8_t *)data + i); sprintf(tmp, "0x%08x%s", fdt32_to_cpu(*c), i < (len - 4) ? " " : ""); strcat(b, tmp); } strcat(b, ">"); *buf = b; free(tmp); return (0); error: return (1); } static int fdt_data_bytes(const void *data, int len, char **buf) { char *b, *tmp; const char *d; int i, l; /* * Calculate the length for the string and allocate memory. */ /* Each byte translates to 2 output characters */ l = len * 2; if (len > 1) /* Each consecutive byte requires a " " separator. */ l += (len - 1) * 1; /* Each byte will have a "0x" prefix */ l += len * 2; /* Space for surrounding [] and terminator. */ l += 3; b = (char *)malloc(l); tmp = (char *)malloc(l); if (b == NULL) goto error; if (tmp == NULL) { free(b); goto error; } b[0] = '\0'; strcat(b, "["); for (i = 0, d = data; i < len; i++) { sprintf(tmp, "0x%02x%s", d[i], i < len - 1 ? " " : ""); strcat(b, tmp); } strcat(b, "]"); *buf = b; free(tmp); return (0); error: return (1); } static int fdt_data_fmt(const void *data, int len, char **buf) { int count; if (len == 0) { *buf = NULL; return (1); } if (fdt_isprint(data, len, &count)) return (fdt_data_str(data, len, count, buf)); else if ((len % 4) == 0) return (fdt_data_cell(data, len, buf)); else return (fdt_data_bytes(data, len, buf)); } static int fdt_prop(int offset) { char *line, *buf; const struct fdt_property *prop; const char *name; const void *data; int len, rv; line = NULL; prop = fdt_offset_ptr(fdtp, offset, sizeof(*prop)); if (prop == NULL) return (1); name = fdt_string(fdtp, fdt32_to_cpu(prop->nameoff)); len = fdt32_to_cpu(prop->len); rv = 0; buf = NULL; if (len == 0) { /* Property without value */ line = (char *)malloc(strlen(name) + 2); if (line == NULL) { rv = 2; goto out2; } sprintf(line, "%s\n", name); goto out1; } /* * Process property with value */ data = prop->data; if (fdt_data_fmt(data, len, &buf) != 0) { rv = 3; goto out2; } line = (char *)malloc(strlen(name) + strlen(FDT_PROP_SEP) + strlen(buf) + 2); if (line == NULL) { sprintf(command_errbuf, "could not allocate space for string"); rv = 4; goto out2; } sprintf(line, "%s" FDT_PROP_SEP "%s\n", name, buf); out1: pager_open(); pager_output(line); pager_close(); out2: if (buf) free(buf); if (line) free(line); return (rv); } static int fdt_modprop(int nodeoff, char *propname, void *value, char mode) { uint32_t cells[100]; char *buf; int len, rv; const struct fdt_property *p; p = fdt_get_property(fdtp, nodeoff, propname, NULL); if (p != NULL) { if (mode == 1) { /* Adding inexistant value in mode 1 is forbidden */ sprintf(command_errbuf, "property already exists!"); return (CMD_ERROR); } } else if (mode == 0) { sprintf(command_errbuf, "property does not exist!"); return (CMD_ERROR); } len = strlen(value); rv = 0; buf = (char *)value; switch (*buf) { case '&': /* phandles */ break; case '<': /* Data cells */ len = fdt_strtovect(buf, (void *)&cells, 100, sizeof(uint32_t)); rv = fdt_setprop(fdtp, nodeoff, propname, &cells, len * sizeof(uint32_t)); break; case '[': /* Data bytes */ len = fdt_strtovect(buf, (void *)&cells, 100, sizeof(uint8_t)); rv = fdt_setprop(fdtp, nodeoff, propname, &cells, len * sizeof(uint8_t)); break; case '"': default: /* Default -- string */ rv = fdt_setprop_string(fdtp, nodeoff, propname, value); break; } if (rv != 0) { if (rv == -FDT_ERR_NOSPACE) sprintf(command_errbuf, "Device tree blob is too small!\n"); else sprintf(command_errbuf, "Could not add/modify property!\n"); } return (rv); } /* Merge strings from argv into a single string */ static int fdt_merge_strings(int argc, char *argv[], int start, char **buffer) { char *buf; int i, idx, sz; *buffer = NULL; sz = 0; for (i = start; i < argc; i++) sz += strlen(argv[i]); /* Additional bytes for whitespaces between args */ sz += argc - start; buf = (char *)malloc(sizeof(char) * sz); bzero(buf, sizeof(char) * sz); if (buf == NULL) { sprintf(command_errbuf, "could not allocate space " "for string"); return (1); } idx = 0; for (i = start, idx = 0; i < argc; i++) { strcpy(buf + idx, argv[i]); idx += strlen(argv[i]); buf[idx] = ' '; idx++; } buf[sz - 1] = '\0'; *buffer = buf; return (0); } /* Extract offset and name of node/property from a given path */ static int fdt_extract_nameloc(char **pathp, char **namep, int *nodeoff) { int o; char *path = *pathp, *name = NULL, *subpath = NULL; subpath = strrchr(path, '/'); if (subpath == NULL) { o = fdt_path_offset(fdtp, cwd); name = path; path = (char *)&cwd; } else { *subpath = '\0'; if (strlen(path) == 0) path = cwd; name = subpath + 1; o = fdt_path_offset(fdtp, path); } if (strlen(name) == 0) { sprintf(command_errbuf, "name not specified"); return (1); } if (o < 0) { sprintf(command_errbuf, "could not find node: '%s'", path); return (1); } *namep = name; *nodeoff = o; *pathp = path; return (0); } static int fdt_cmd_prop(int argc, char *argv[]) { char *path, *propname, *value; int o, next, depth, rv; uint32_t tag; path = (argc > 2) ? argv[2] : NULL; value = NULL; if (argc > 3) { /* Merge property value strings into one */ if (fdt_merge_strings(argc, argv, 3, &value) != 0) return (CMD_ERROR); } else value = NULL; if (path == NULL) path = cwd; rv = CMD_OK; if (value) { /* If value is specified -- try to modify prop. */ if (fdt_extract_nameloc(&path, &propname, &o) != 0) return (CMD_ERROR); rv = fdt_modprop(o, propname, value, 0); if (rv) return (CMD_ERROR); return (CMD_OK); } /* User wants to display properties */ o = fdt_path_offset(fdtp, path); if (o < 0) { sprintf(command_errbuf, "could not find node: '%s'", path); rv = CMD_ERROR; goto out; } depth = 0; while (depth >= 0) { tag = fdt_next_tag(fdtp, o, &next); switch (tag) { case FDT_NOP: break; case FDT_PROP: if (depth > 1) /* Don't process properties of nested nodes */ break; if (fdt_prop(o) != 0) { sprintf(command_errbuf, "could not process " "property"); rv = CMD_ERROR; goto out; } break; case FDT_BEGIN_NODE: depth++; if (depth > FDT_MAX_DEPTH) { printf("warning: nesting too deep: %d\n", depth); goto out; } break; case FDT_END_NODE: depth--; if (depth == 0) /* * This is the end of our starting node, force * the loop finish. */ depth--; break; } o = next; } out: return (rv); } static int fdt_cmd_mkprop(int argc, char *argv[]) { int o; char *path, *propname, *value; path = (argc > 2) ? argv[2] : NULL; value = NULL; if (argc > 3) { /* Merge property value strings into one */ if (fdt_merge_strings(argc, argv, 3, &value) != 0) return (CMD_ERROR); } else value = NULL; if (fdt_extract_nameloc(&path, &propname, &o) != 0) return (CMD_ERROR); if (fdt_modprop(o, propname, value, 1)) return (CMD_ERROR); return (CMD_OK); } static int fdt_cmd_rm(int argc, char *argv[]) { int o, rv; char *path = NULL, *propname; if (argc > 2) path = argv[2]; else { sprintf(command_errbuf, "no node/property name specified"); return (CMD_ERROR); } o = fdt_path_offset(fdtp, path); if (o < 0) { /* If node not found -- try to find & delete property */ if (fdt_extract_nameloc(&path, &propname, &o) != 0) return (CMD_ERROR); if ((rv = fdt_delprop(fdtp, o, propname)) != 0) { sprintf(command_errbuf, "could not delete" "%s\n", (rv == -FDT_ERR_NOTFOUND) ? "(property/node does not exist)" : ""); return (CMD_ERROR); } else return (CMD_OK); } /* If node exists -- remove node */ rv = fdt_del_node(fdtp, o); if (rv) { sprintf(command_errbuf, "could not delete node"); return (CMD_ERROR); } return (CMD_OK); } static int fdt_cmd_mknode(int argc, char *argv[]) { int o, rv; char *path = NULL, *nodename = NULL; if (argc > 2) path = argv[2]; else { sprintf(command_errbuf, "no node name specified"); return (CMD_ERROR); } if (fdt_extract_nameloc(&path, &nodename, &o) != 0) return (CMD_ERROR); rv = fdt_add_subnode(fdtp, o, nodename); if (rv < 0) { if (rv == -FDT_ERR_NOSPACE) sprintf(command_errbuf, "Device tree blob is too small!\n"); else sprintf(command_errbuf, "Could not add node!\n"); return (CMD_ERROR); } return (CMD_OK); } static int fdt_cmd_pwd(int argc, char *argv[]) { char line[FDT_CWD_LEN]; pager_open(); sprintf(line, "%s\n", cwd); pager_output(line); pager_close(); return (CMD_OK); } static int fdt_cmd_nyi(int argc, char *argv[]) { printf("command not yet implemented\n"); return (CMD_ERROR); } Index: projects/nand/sys/boot/i386/cdboot/cdboot.s =================================================================== --- projects/nand/sys/boot/i386/cdboot/cdboot.s (revision 235262) +++ projects/nand/sys/boot/i386/cdboot/cdboot.s (nonexistent) @@ -1,600 +0,0 @@ -# -# Copyright (c) 2001 John Baldwin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. Neither the name of the author nor the names of any co-contributors -# may be used to endorse or promote products derived from this software -# without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -# SUCH DAMAGE. -# - -# $FreeBSD$ - -# -# This program is a freestanding boot program to load an a.out binary -# from a CD-ROM booted with no emulation mode as described by the El -# Torito standard. Due to broken BIOSen that do not load the desired -# number of sectors, we try to fit this in as small a space as possible. -# -# Basically, we first create a set of boot arguments to pass to the loaded -# binary. Then we attempt to load /boot/loader from the CD we were booted -# off of. -# - -# -# Memory locations. -# - .set MEM_PAGE_SIZE,0x1000 # memory page size, 4k - .set MEM_ARG,0x900 # Arguments at start - .set MEM_ARG_BTX,0xa100 # Where we move them to so the - # BTX client can see them - .set MEM_ARG_SIZE,0x18 # Size of the arguments - .set MEM_BTX_ADDRESS,0x9000 # where BTX lives - .set MEM_BTX_ENTRY,0x9010 # where BTX starts to execute - .set MEM_BTX_OFFSET,MEM_PAGE_SIZE # offset of BTX in the loader - .set MEM_BTX_CLIENT,0xa000 # where BTX clients live -# -# a.out header fields -# - .set AOUT_TEXT,0x04 # text segment size - .set AOUT_DATA,0x08 # data segment size - .set AOUT_BSS,0x0c # zero'd BSS size - .set AOUT_SYMBOLS,0x10 # symbol table - .set AOUT_ENTRY,0x14 # entry point - .set AOUT_HEADER,MEM_PAGE_SIZE # size of the a.out header -# -# Flags for kargs->bootflags -# - .set KARGS_FLAGS_CD,0x1 # flag to indicate booting from - # CD loader -# -# Segment selectors. -# - .set SEL_SDATA,0x8 # Supervisor data - .set SEL_RDATA,0x10 # Real mode data - .set SEL_SCODE,0x18 # PM-32 code - .set SEL_SCODE16,0x20 # PM-16 code -# -# BTX constants -# - .set INT_SYS,0x30 # BTX syscall interrupt -# -# Constants for reading from the CD. -# - .set ERROR_TIMEOUT,0x80 # BIOS timeout on read - .set NUM_RETRIES,3 # Num times to retry - .set SECTOR_SIZE,0x800 # size of a sector - .set SECTOR_SHIFT,11 # number of place to shift - .set BUFFER_LEN,0x100 # number of sectors in buffer - .set MAX_READ,0x10000 # max we can read at a time - .set MAX_READ_SEC,MAX_READ >> SECTOR_SHIFT - .set MEM_READ_BUFFER,0x9000 # buffer to read from CD - .set MEM_VOLDESC,MEM_READ_BUFFER # volume descriptor - .set MEM_DIR,MEM_VOLDESC+SECTOR_SIZE # Lookup buffer - .set VOLDESC_LBA,0x10 # LBA of vol descriptor - .set VD_PRIMARY,1 # Primary VD - .set VD_END,255 # VD Terminator - .set VD_ROOTDIR,156 # Offset of Root Dir Record - .set DIR_LEN,0 # Offset of Dir Record length - .set DIR_EA_LEN,1 # Offset of EA length - .set DIR_EXTENT,2 # Offset of 64-bit LBA - .set DIR_SIZE,10 # Offset of 64-bit length - .set DIR_NAMELEN,32 # Offset of 8-bit name len - .set DIR_NAME,33 # Offset of dir name -# -# We expect to be loaded by the BIOS at 0x7c00 (standard boot loader entry -# point) -# - .code16 - .globl start - .org 0x0, 0x0 -# -# Program start. -# -start: cld # string ops inc - xor %ax,%ax # zero %ax - mov %ax,%ss # setup the - mov $start,%sp # stack - mov %ax,%ds # setup the - mov %ax,%es # data segments - mov %dl,drive # Save BIOS boot device - mov $msg_welcome,%si # %ds:(%si) -> welcome message - call putstr # display the welcome message -# -# Setup the arguments that the loader is expecting from boot[12] -# - mov $msg_bootinfo,%si # %ds:(%si) -> boot args message - call putstr # display the message - mov $MEM_ARG,%bx # %ds:(%bx) -> boot args - mov %bx,%di # %es:(%di) -> boot args - xor %eax,%eax # zero %eax - mov $(MEM_ARG_SIZE/4),%cx # Size of arguments in 32-bit - # dwords - rep # Clear the arguments - stosl # to zero - mov drive,%dl # Store BIOS boot device - mov %dl,0x4(%bx) # in kargs->bootdev - or $KARGS_FLAGS_CD,0x8(%bx) # kargs->bootflags |= - # KARGS_FLAGS_CD -# -# Load Volume Descriptor -# - mov $VOLDESC_LBA,%eax # Set LBA of first VD -load_vd: push %eax # Save %eax - mov $1,%dh # One sector - mov $MEM_VOLDESC,%ebx # Destination - call read # Read it in - cmpb $VD_PRIMARY,(%bx) # Primary VD? - je have_vd # Yes - pop %eax # Prepare to - inc %eax # try next - cmpb $VD_END,(%bx) # Last VD? - jne load_vd # No, read next - mov $msg_novd,%si # No VD - jmp error # Halt -have_vd: # Have Primary VD -# -# Try to look up the loader binary using the paths in the loader_paths -# array. -# - mov $loader_paths,%si # Point to start of array -lookup_path: push %si # Save file name pointer - call lookup # Try to find file - pop %di # Restore file name pointer - jnc lookup_found # Found this file - xor %al,%al # Look for next - mov $0xffff,%cx # path name by - repnz # scanning for - scasb # nul char - mov %di,%si # Point %si at next path - mov (%si),%al # Get first char of next path - or %al,%al # Is it double nul? - jnz lookup_path # No, try it. - mov $msg_failed,%si # Failed message - jmp error # Halt -lookup_found: # Found a loader file -# -# Load the binary into the buffer. Due to real mode addressing limitations -# we have to read it in 64k chunks. -# - mov DIR_SIZE(%bx),%eax # Read file length - add $SECTOR_SIZE-1,%eax # Convert length to sectors - shr $SECTOR_SHIFT,%eax - cmp $BUFFER_LEN,%eax - jbe load_sizeok - mov $msg_load2big,%si # Error message - call error -load_sizeok: movzbw %al,%cx # Num sectors to read - mov DIR_EXTENT(%bx),%eax # Load extent - xor %edx,%edx - mov DIR_EA_LEN(%bx),%dl - add %edx,%eax # Skip extended - mov $MEM_READ_BUFFER,%ebx # Read into the buffer -load_loop: mov %cl,%dh - cmp $MAX_READ_SEC,%cl # Truncate to max read size - jbe load_notrunc - mov $MAX_READ_SEC,%dh -load_notrunc: sub %dh,%cl # Update count - push %eax # Save - call read # Read it in - pop %eax # Restore - add $MAX_READ_SEC,%eax # Update LBA - add $MAX_READ,%ebx # Update dest addr - jcxz load_done # Done? - jmp load_loop # Keep going -load_done: -# -# Turn on the A20 address line -# - call seta20 # Turn A20 on -# -# Relocate the loader and BTX using a very lazy protected mode -# - mov $msg_relocate,%si # Display the - call putstr # relocation message - mov MEM_READ_BUFFER+AOUT_ENTRY,%edi # %edi is the destination - mov $(MEM_READ_BUFFER+AOUT_HEADER),%esi # %esi is - # the start of the text - # segment - mov MEM_READ_BUFFER+AOUT_TEXT,%ecx # %ecx = length of the text - # segment - push %edi # Save entry point for later - lgdt gdtdesc # setup our own gdt - cli # turn off interrupts - mov %cr0,%eax # Turn on - or $0x1,%al # protected - mov %eax,%cr0 # mode - ljmp $SEL_SCODE,$pm_start # long jump to clear the - # instruction pre-fetch queue - .code32 -pm_start: mov $SEL_SDATA,%ax # Initialize - mov %ax,%ds # %ds and - mov %ax,%es # %es to a flat selector - rep # Relocate the - movsb # text segment - add $(MEM_PAGE_SIZE - 1),%edi # pad %edi out to a new page - and $~(MEM_PAGE_SIZE - 1),%edi # for the data segment - mov MEM_READ_BUFFER+AOUT_DATA,%ecx # size of the data segment - rep # Relocate the - movsb # data segment - mov MEM_READ_BUFFER+AOUT_BSS,%ecx # size of the bss - xor %eax,%eax # zero %eax - add $3,%cl # round %ecx up to - shr $2,%ecx # a multiple of 4 - rep # zero the - stosl # bss - mov MEM_READ_BUFFER+AOUT_ENTRY,%esi # %esi -> relocated loader - add $MEM_BTX_OFFSET,%esi # %esi -> BTX in the loader - mov $MEM_BTX_ADDRESS,%edi # %edi -> where BTX needs to go - movzwl 0xa(%esi),%ecx # %ecx -> length of BTX - rep # Relocate - movsb # BTX - ljmp $SEL_SCODE16,$pm_16 # Jump to 16-bit PM - .code16 -pm_16: mov $SEL_RDATA,%ax # Initialize - mov %ax,%ds # %ds and - mov %ax,%es # %es to a real mode selector - mov %cr0,%eax # Turn off - and $~0x1,%al # protected - mov %eax,%cr0 # mode - ljmp $0,$pm_end # Long jump to clear the - # instruction pre-fetch queue -pm_end: sti # Turn interrupts back on now -# -# Copy the BTX client to MEM_BTX_CLIENT -# - xor %ax,%ax # zero %ax and set - mov %ax,%ds # %ds and %es - mov %ax,%es # to segment 0 - mov $MEM_BTX_CLIENT,%di # Prepare to relocate - mov $btx_client,%si # the simple btx client - mov $(btx_client_end-btx_client),%cx # length of btx client - rep # Relocate the - movsb # simple BTX client -# -# Copy the boot[12] args to where the BTX client can see them -# - mov $MEM_ARG,%si # where the args are at now - mov $MEM_ARG_BTX,%di # where the args are moving to - mov $(MEM_ARG_SIZE/4),%cx # size of the arguments in longs - rep # Relocate - movsl # the words -# -# Save the entry point so the client can get to it later on -# - pop %eax # Restore saved entry point - stosl # and add it to the end of - # the arguments -# -# Now we just start up BTX and let it do the rest -# - mov $msg_jump,%si # Display the - call putstr # jump message - ljmp $0,$MEM_BTX_ENTRY # Jump to the BTX entry point - -# -# Lookup the file in the path at [SI] from the root directory. -# -# Trashes: All but BX -# Returns: CF = 0 (success), BX = pointer to record -# CF = 1 (not found) -# -lookup: mov $VD_ROOTDIR+MEM_VOLDESC,%bx # Root directory record - push %si - mov $msg_lookup,%si # Display lookup message - call putstr - pop %si - push %si - call putstr - mov $msg_lookup2,%si - call putstr - pop %si -lookup_dir: lodsb # Get first char of path - cmp $0,%al # Are we done? - je lookup_done # Yes - cmp $'/',%al # Skip path separator. - je lookup_dir - dec %si # Undo lodsb side effect - call find_file # Lookup first path item - jnc lookup_dir # Try next component - mov $msg_lookupfail,%si # Not found message - call putstr - stc # Set carry - ret - jmp error -lookup_done: mov $msg_lookupok,%si # Success message - call putstr - clc # Clear carry - ret - -# -# Lookup file at [SI] in directory whose record is at [BX]. -# -# Trashes: All but returns -# Returns: CF = 0 (success), BX = pointer to record, SI = next path item -# CF = 1 (not found), SI = preserved -# -find_file: mov DIR_EXTENT(%bx),%eax # Load extent - xor %edx,%edx - mov DIR_EA_LEN(%bx),%dl - add %edx,%eax # Skip extended attributes - mov %eax,rec_lba # Save LBA - mov DIR_SIZE(%bx),%eax # Save size - mov %eax,rec_size - xor %cl,%cl # Zero length - push %si # Save -ff.namelen: inc %cl # Update length - lodsb # Read char - cmp $0,%al # Nul? - je ff.namedone # Yes - cmp $'/',%al # Path separator? - jnz ff.namelen # No, keep going -ff.namedone: dec %cl # Adjust length and save - mov %cl,name_len - pop %si # Restore -ff.load: mov rec_lba,%eax # Load LBA - mov $MEM_DIR,%ebx # Address buffer - mov $1,%dh # One sector - call read # Read directory block - incl rec_lba # Update LBA to next block -ff.scan: mov %ebx,%edx # Check for EOF - sub $MEM_DIR,%edx - cmp %edx,rec_size - ja ff.scan.1 - stc # EOF reached - ret -ff.scan.1: cmpb $0,DIR_LEN(%bx) # Last record in block? - je ff.nextblock - push %si # Save - movzbw DIR_NAMELEN(%bx),%si # Find end of string -ff.checkver: cmpb $'0',DIR_NAME-1(%bx,%si) # Less than '0'? - jb ff.checkver.1 - cmpb $'9',DIR_NAME-1(%bx,%si) # Greater than '9'? - ja ff.checkver.1 - dec %si # Next char - jnz ff.checkver - jmp ff.checklen # All numbers in name, so - # no version -ff.checkver.1: movzbw DIR_NAMELEN(%bx),%cx - cmp %cx,%si # Did we find any digits? - je ff.checkdot # No - cmpb $';',DIR_NAME-1(%bx,%si) # Check for semicolon - jne ff.checkver.2 - dec %si # Skip semicolon - mov %si,%cx - mov %cl,DIR_NAMELEN(%bx) # Adjust length - jmp ff.checkdot -ff.checkver.2: mov %cx,%si # Restore %si to end of string -ff.checkdot: cmpb $'.',DIR_NAME-1(%bx,%si) # Trailing dot? - jne ff.checklen # No - decb DIR_NAMELEN(%bx) # Adjust length -ff.checklen: pop %si # Restore - movzbw name_len,%cx # Load length of name - cmp %cl,DIR_NAMELEN(%bx) # Does length match? - je ff.checkname # Yes, check name -ff.nextrec: add DIR_LEN(%bx),%bl # Next record - adc $0,%bh - jmp ff.scan -ff.nextblock: subl $SECTOR_SIZE,rec_size # Adjust size - jnc ff.load # If subtract ok, keep going - ret # End of file, so not found -ff.checkname: lea DIR_NAME(%bx),%di # Address name in record - push %si # Save - repe cmpsb # Compare name - je ff.match # We have a winner! - pop %si # Restore - jmp ff.nextrec # Keep looking. -ff.match: add $2,%sp # Discard saved %si - clc # Clear carry - ret - -# -# Load DH sectors starting at LBA EAX into [EBX]. -# -# Trashes: EAX -# -read: push %si # Save - push %cx # Save since some BIOSs trash - mov %eax,edd_lba # LBA to read from - mov %ebx,%eax # Convert address - shr $4,%eax # to segment - mov %ax,edd_addr+0x2 # and store -read.retry: call twiddle # Entertain the user - push %dx # Save - mov $edd_packet,%si # Address Packet - mov %dh,edd_len # Set length - mov drive,%dl # BIOS Device - mov $0x42,%ah # BIOS: Extended Read - int $0x13 # Call BIOS - pop %dx # Restore - jc read.fail # Worked? - pop %cx # Restore - pop %si - ret # Return -read.fail: cmp $ERROR_TIMEOUT,%ah # Timeout? - je read.retry # Yes, Retry. -read.error: mov %ah,%al # Save error - mov $hex_error,%di # Format it - call hex8 # as hex - mov $msg_badread,%si # Display Read error message - -# -# Display error message at [SI] and halt. -# -error: call putstr # Display message -halt: hlt - jmp halt # Spin - -# -# Display a null-terminated string. -# -# Trashes: AX, SI -# -putstr: push %bx # Save -putstr.load: lodsb # load %al from %ds:(%si) - test %al,%al # stop at null - jnz putstr.putc # if the char != null, output it - pop %bx # Restore - ret # return when null is hit -putstr.putc: call putc # output char - jmp putstr.load # next char - -# -# Display a single char. -# -putc: mov $0x7,%bx # attribute for output - mov $0xe,%ah # BIOS: put_char - int $0x10 # call BIOS, print char in %al - ret # Return to caller - -# -# Output the "twiddle" -# -twiddle: push %ax # Save - push %bx # Save - mov twiddle_index,%al # Load index - mov $twiddle_chars,%bx # Address table - inc %al # Next - and $3,%al # char - mov %al,twiddle_index # Save index for next call - xlat # Get char - call putc # Output it - mov $8,%al # Backspace - call putc # Output it - pop %bx # Restore - pop %ax # Restore - ret - -# -# Enable A20. Put an upper limit on the amount of time we wait for the -# keyboard controller to get ready (65K x ISA access time). If -# we wait more than that amount, the hardware is probably -# legacy-free and simply doesn't have a keyboard controller. -# Thus, the A20 line is already enabled. -# -seta20: cli # Disable interrupts - xor %cx,%cx # Clear -seta20.1: inc %cx # Increment, overflow? - jz seta20.3 # Yes - in $0x64,%al # Get status - test $0x2,%al # Busy? - jnz seta20.1 # Yes - mov $0xd1,%al # Command: Write - out %al,$0x64 # output port -seta20.2: in $0x64,%al # Get status - test $0x2,%al # Busy? - jnz seta20.2 # Yes - mov $0xdf,%al # Enable - out %al,$0x60 # A20 -seta20.3: sti # Enable interrupts - ret # To caller - -# -# Convert AL to hex, saving the result to [EDI]. -# -hex8: pushl %eax # Save - shrb $0x4,%al # Do upper - call hex8.1 # 4 - popl %eax # Restore -hex8.1: andb $0xf,%al # Get lower 4 - cmpb $0xa,%al # Convert - sbbb $0x69,%al # to hex - das # digit - orb $0x20,%al # To lower case - stosb # Save char - ret # (Recursive) - -# -# BTX client to start btxldr -# - .code32 -btx_client: mov $(MEM_ARG_BTX-MEM_BTX_CLIENT+MEM_ARG_SIZE-4), %esi - # %ds:(%esi) -> end - # of boot[12] args - mov $(MEM_ARG_SIZE/4),%ecx # Number of words to push - std # Go backwards -push_arg: lodsl # Read argument - push %eax # Push it onto the stack - loop push_arg # Push all of the arguments - cld # In case anyone depends on this - pushl MEM_ARG_BTX-MEM_BTX_CLIENT+MEM_ARG_SIZE # Entry point of - # the loader - push %eax # Emulate a near call - mov $0x1,%eax # 'exec' system call - int $INT_SYS # BTX system call -btx_client_end: - .code16 - - .p2align 4 -# -# Global descriptor table. -# -gdt: .word 0x0,0x0,0x0,0x0 # Null entry - .word 0xffff,0x0,0x9200,0xcf # SEL_SDATA - .word 0xffff,0x0,0x9200,0x0 # SEL_RDATA - .word 0xffff,0x0,0x9a00,0xcf # SEL_SCODE (32-bit) - .word 0xffff,0x0,0x9a00,0x8f # SEL_SCODE16 (16-bit) -gdt.1: -# -# Pseudo-descriptors. -# -gdtdesc: .word gdt.1-gdt-1 # Limit - .long gdt # Base -# -# EDD Packet -# -edd_packet: .byte 0x10 # Length - .byte 0 # Reserved -edd_len: .byte 0x0 # Num to read - .byte 0 # Reserved -edd_addr: .word 0x0,0x0 # Seg:Off -edd_lba: .quad 0x0 # LBA - -drive: .byte 0 - -# -# State for searching dir -# -rec_lba: .long 0x0 # LBA (adjusted for EA) -rec_size: .long 0x0 # File size -name_len: .byte 0x0 # Length of current name - -twiddle_index: .byte 0x0 - -msg_welcome: .asciz "CD Loader 1.2\r\n\n" -msg_bootinfo: .asciz "Building the boot loader arguments\r\n" -msg_relocate: .asciz "Relocating the loader and the BTX\r\n" -msg_jump: .asciz "Starting the BTX loader\r\n" -msg_badread: .ascii "Read Error: 0x" -hex_error: .asciz "00\r\n" -msg_novd: .asciz "Could not find Primary Volume Descriptor\r\n" -msg_lookup: .asciz "Looking up " -msg_lookup2: .asciz "... " -msg_lookupok: .asciz "Found\r\n" -msg_lookupfail: .asciz "File not found\r\n" -msg_load2big: .asciz "File too big\r\n" -msg_failed: .asciz "Boot failed\r\n" -twiddle_chars: .ascii "|/-\\" -loader_paths: .asciz "/BOOT/LOADER" - .asciz "/boot/loader" - .byte 0 - Property changes on: projects/nand/sys/boot/i386/cdboot/cdboot.s ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: projects/nand/sys/boot/i386/cdboot/Makefile =================================================================== --- projects/nand/sys/boot/i386/cdboot/Makefile (revision 235262) +++ projects/nand/sys/boot/i386/cdboot/Makefile (revision 235263) @@ -1,13 +1,15 @@ # $FreeBSD$ PROG= cdboot STRIP= BINMODE=${NOBINMODE} NO_MAN= -SRCS= ${PROG}.s +SRCS= ${PROG}.S + +CFLAGS+=-I${.CURDIR}/../common ORG= 0x7c00 LDFLAGS=-e start -Ttext ${ORG} -Wl,-N,-S,--oformat,binary .include Index: projects/nand/sys/boot/i386/cdboot/cdboot.S =================================================================== --- projects/nand/sys/boot/i386/cdboot/cdboot.S (nonexistent) +++ projects/nand/sys/boot/i386/cdboot/cdboot.S (revision 235263) @@ -0,0 +1,597 @@ +# +# Copyright (c) 2001 John Baldwin +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of the author nor the names of any co-contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# + +# $FreeBSD$ + +# +# This program is a freestanding boot program to load an a.out binary +# from a CD-ROM booted with no emulation mode as described by the El +# Torito standard. Due to broken BIOSen that do not load the desired +# number of sectors, we try to fit this in as small a space as possible. +# +# Basically, we first create a set of boot arguments to pass to the loaded +# binary. Then we attempt to load /boot/loader from the CD we were booted +# off of. +# + +#include + +# +# Memory locations. +# + .set MEM_PAGE_SIZE,0x1000 # memory page size, 4k + .set MEM_ARG,0x900 # Arguments at start + .set MEM_ARG_BTX,0xa100 # Where we move them to so the + # BTX client can see them + .set MEM_ARG_SIZE,0x18 # Size of the arguments + .set MEM_BTX_ADDRESS,0x9000 # where BTX lives + .set MEM_BTX_ENTRY,0x9010 # where BTX starts to execute + .set MEM_BTX_OFFSET,MEM_PAGE_SIZE # offset of BTX in the loader + .set MEM_BTX_CLIENT,0xa000 # where BTX clients live +# +# a.out header fields +# + .set AOUT_TEXT,0x04 # text segment size + .set AOUT_DATA,0x08 # data segment size + .set AOUT_BSS,0x0c # zero'd BSS size + .set AOUT_SYMBOLS,0x10 # symbol table + .set AOUT_ENTRY,0x14 # entry point + .set AOUT_HEADER,MEM_PAGE_SIZE # size of the a.out header +# +# Segment selectors. +# + .set SEL_SDATA,0x8 # Supervisor data + .set SEL_RDATA,0x10 # Real mode data + .set SEL_SCODE,0x18 # PM-32 code + .set SEL_SCODE16,0x20 # PM-16 code +# +# BTX constants +# + .set INT_SYS,0x30 # BTX syscall interrupt +# +# Constants for reading from the CD. +# + .set ERROR_TIMEOUT,0x80 # BIOS timeout on read + .set NUM_RETRIES,3 # Num times to retry + .set SECTOR_SIZE,0x800 # size of a sector + .set SECTOR_SHIFT,11 # number of place to shift + .set BUFFER_LEN,0x100 # number of sectors in buffer + .set MAX_READ,0x10000 # max we can read at a time + .set MAX_READ_SEC,MAX_READ >> SECTOR_SHIFT + .set MEM_READ_BUFFER,0x9000 # buffer to read from CD + .set MEM_VOLDESC,MEM_READ_BUFFER # volume descriptor + .set MEM_DIR,MEM_VOLDESC+SECTOR_SIZE # Lookup buffer + .set VOLDESC_LBA,0x10 # LBA of vol descriptor + .set VD_PRIMARY,1 # Primary VD + .set VD_END,255 # VD Terminator + .set VD_ROOTDIR,156 # Offset of Root Dir Record + .set DIR_LEN,0 # Offset of Dir Record length + .set DIR_EA_LEN,1 # Offset of EA length + .set DIR_EXTENT,2 # Offset of 64-bit LBA + .set DIR_SIZE,10 # Offset of 64-bit length + .set DIR_NAMELEN,32 # Offset of 8-bit name len + .set DIR_NAME,33 # Offset of dir name +# +# We expect to be loaded by the BIOS at 0x7c00 (standard boot loader entry +# point) +# + .code16 + .globl start + .org 0x0, 0x0 +# +# Program start. +# +start: cld # string ops inc + xor %ax,%ax # zero %ax + mov %ax,%ss # setup the + mov $start,%sp # stack + mov %ax,%ds # setup the + mov %ax,%es # data segments + mov %dl,drive # Save BIOS boot device + mov $msg_welcome,%si # %ds:(%si) -> welcome message + call putstr # display the welcome message +# +# Setup the arguments that the loader is expecting from boot[12] +# + mov $msg_bootinfo,%si # %ds:(%si) -> boot args message + call putstr # display the message + mov $MEM_ARG,%bx # %ds:(%bx) -> boot args + mov %bx,%di # %es:(%di) -> boot args + xor %eax,%eax # zero %eax + mov $(MEM_ARG_SIZE/4),%cx # Size of arguments in 32-bit + # dwords + rep # Clear the arguments + stosl # to zero + mov drive,%dl # Store BIOS boot device + mov %dl,0x4(%bx) # in kargs->bootdev + or $KARGS_FLAGS_CD,0x8(%bx) # kargs->bootflags |= + # KARGS_FLAGS_CD +# +# Load Volume Descriptor +# + mov $VOLDESC_LBA,%eax # Set LBA of first VD +load_vd: push %eax # Save %eax + mov $1,%dh # One sector + mov $MEM_VOLDESC,%ebx # Destination + call read # Read it in + cmpb $VD_PRIMARY,(%bx) # Primary VD? + je have_vd # Yes + pop %eax # Prepare to + inc %eax # try next + cmpb $VD_END,(%bx) # Last VD? + jne load_vd # No, read next + mov $msg_novd,%si # No VD + jmp error # Halt +have_vd: # Have Primary VD +# +# Try to look up the loader binary using the paths in the loader_paths +# array. +# + mov $loader_paths,%si # Point to start of array +lookup_path: push %si # Save file name pointer + call lookup # Try to find file + pop %di # Restore file name pointer + jnc lookup_found # Found this file + xor %al,%al # Look for next + mov $0xffff,%cx # path name by + repnz # scanning for + scasb # nul char + mov %di,%si # Point %si at next path + mov (%si),%al # Get first char of next path + or %al,%al # Is it double nul? + jnz lookup_path # No, try it. + mov $msg_failed,%si # Failed message + jmp error # Halt +lookup_found: # Found a loader file +# +# Load the binary into the buffer. Due to real mode addressing limitations +# we have to read it in 64k chunks. +# + mov DIR_SIZE(%bx),%eax # Read file length + add $SECTOR_SIZE-1,%eax # Convert length to sectors + shr $SECTOR_SHIFT,%eax + cmp $BUFFER_LEN,%eax + jbe load_sizeok + mov $msg_load2big,%si # Error message + call error +load_sizeok: movzbw %al,%cx # Num sectors to read + mov DIR_EXTENT(%bx),%eax # Load extent + xor %edx,%edx + mov DIR_EA_LEN(%bx),%dl + add %edx,%eax # Skip extended + mov $MEM_READ_BUFFER,%ebx # Read into the buffer +load_loop: mov %cl,%dh + cmp $MAX_READ_SEC,%cl # Truncate to max read size + jbe load_notrunc + mov $MAX_READ_SEC,%dh +load_notrunc: sub %dh,%cl # Update count + push %eax # Save + call read # Read it in + pop %eax # Restore + add $MAX_READ_SEC,%eax # Update LBA + add $MAX_READ,%ebx # Update dest addr + jcxz load_done # Done? + jmp load_loop # Keep going +load_done: +# +# Turn on the A20 address line +# + call seta20 # Turn A20 on +# +# Relocate the loader and BTX using a very lazy protected mode +# + mov $msg_relocate,%si # Display the + call putstr # relocation message + mov MEM_READ_BUFFER+AOUT_ENTRY,%edi # %edi is the destination + mov $(MEM_READ_BUFFER+AOUT_HEADER),%esi # %esi is + # the start of the text + # segment + mov MEM_READ_BUFFER+AOUT_TEXT,%ecx # %ecx = length of the text + # segment + push %edi # Save entry point for later + lgdt gdtdesc # setup our own gdt + cli # turn off interrupts + mov %cr0,%eax # Turn on + or $0x1,%al # protected + mov %eax,%cr0 # mode + ljmp $SEL_SCODE,$pm_start # long jump to clear the + # instruction pre-fetch queue + .code32 +pm_start: mov $SEL_SDATA,%ax # Initialize + mov %ax,%ds # %ds and + mov %ax,%es # %es to a flat selector + rep # Relocate the + movsb # text segment + add $(MEM_PAGE_SIZE - 1),%edi # pad %edi out to a new page + and $~(MEM_PAGE_SIZE - 1),%edi # for the data segment + mov MEM_READ_BUFFER+AOUT_DATA,%ecx # size of the data segment + rep # Relocate the + movsb # data segment + mov MEM_READ_BUFFER+AOUT_BSS,%ecx # size of the bss + xor %eax,%eax # zero %eax + add $3,%cl # round %ecx up to + shr $2,%ecx # a multiple of 4 + rep # zero the + stosl # bss + mov MEM_READ_BUFFER+AOUT_ENTRY,%esi # %esi -> relocated loader + add $MEM_BTX_OFFSET,%esi # %esi -> BTX in the loader + mov $MEM_BTX_ADDRESS,%edi # %edi -> where BTX needs to go + movzwl 0xa(%esi),%ecx # %ecx -> length of BTX + rep # Relocate + movsb # BTX + ljmp $SEL_SCODE16,$pm_16 # Jump to 16-bit PM + .code16 +pm_16: mov $SEL_RDATA,%ax # Initialize + mov %ax,%ds # %ds and + mov %ax,%es # %es to a real mode selector + mov %cr0,%eax # Turn off + and $~0x1,%al # protected + mov %eax,%cr0 # mode + ljmp $0,$pm_end # Long jump to clear the + # instruction pre-fetch queue +pm_end: sti # Turn interrupts back on now +# +# Copy the BTX client to MEM_BTX_CLIENT +# + xor %ax,%ax # zero %ax and set + mov %ax,%ds # %ds and %es + mov %ax,%es # to segment 0 + mov $MEM_BTX_CLIENT,%di # Prepare to relocate + mov $btx_client,%si # the simple btx client + mov $(btx_client_end-btx_client),%cx # length of btx client + rep # Relocate the + movsb # simple BTX client +# +# Copy the boot[12] args to where the BTX client can see them +# + mov $MEM_ARG,%si # where the args are at now + mov $MEM_ARG_BTX,%di # where the args are moving to + mov $(MEM_ARG_SIZE/4),%cx # size of the arguments in longs + rep # Relocate + movsl # the words +# +# Save the entry point so the client can get to it later on +# + pop %eax # Restore saved entry point + stosl # and add it to the end of + # the arguments +# +# Now we just start up BTX and let it do the rest +# + mov $msg_jump,%si # Display the + call putstr # jump message + ljmp $0,$MEM_BTX_ENTRY # Jump to the BTX entry point + +# +# Lookup the file in the path at [SI] from the root directory. +# +# Trashes: All but BX +# Returns: CF = 0 (success), BX = pointer to record +# CF = 1 (not found) +# +lookup: mov $VD_ROOTDIR+MEM_VOLDESC,%bx # Root directory record + push %si + mov $msg_lookup,%si # Display lookup message + call putstr + pop %si + push %si + call putstr + mov $msg_lookup2,%si + call putstr + pop %si +lookup_dir: lodsb # Get first char of path + cmp $0,%al # Are we done? + je lookup_done # Yes + cmp $'/',%al # Skip path separator. + je lookup_dir + dec %si # Undo lodsb side effect + call find_file # Lookup first path item + jnc lookup_dir # Try next component + mov $msg_lookupfail,%si # Not found message + call putstr + stc # Set carry + ret + jmp error +lookup_done: mov $msg_lookupok,%si # Success message + call putstr + clc # Clear carry + ret + +# +# Lookup file at [SI] in directory whose record is at [BX]. +# +# Trashes: All but returns +# Returns: CF = 0 (success), BX = pointer to record, SI = next path item +# CF = 1 (not found), SI = preserved +# +find_file: mov DIR_EXTENT(%bx),%eax # Load extent + xor %edx,%edx + mov DIR_EA_LEN(%bx),%dl + add %edx,%eax # Skip extended attributes + mov %eax,rec_lba # Save LBA + mov DIR_SIZE(%bx),%eax # Save size + mov %eax,rec_size + xor %cl,%cl # Zero length + push %si # Save +ff.namelen: inc %cl # Update length + lodsb # Read char + cmp $0,%al # Nul? + je ff.namedone # Yes + cmp $'/',%al # Path separator? + jnz ff.namelen # No, keep going +ff.namedone: dec %cl # Adjust length and save + mov %cl,name_len + pop %si # Restore +ff.load: mov rec_lba,%eax # Load LBA + mov $MEM_DIR,%ebx # Address buffer + mov $1,%dh # One sector + call read # Read directory block + incl rec_lba # Update LBA to next block +ff.scan: mov %ebx,%edx # Check for EOF + sub $MEM_DIR,%edx + cmp %edx,rec_size + ja ff.scan.1 + stc # EOF reached + ret +ff.scan.1: cmpb $0,DIR_LEN(%bx) # Last record in block? + je ff.nextblock + push %si # Save + movzbw DIR_NAMELEN(%bx),%si # Find end of string +ff.checkver: cmpb $'0',DIR_NAME-1(%bx,%si) # Less than '0'? + jb ff.checkver.1 + cmpb $'9',DIR_NAME-1(%bx,%si) # Greater than '9'? + ja ff.checkver.1 + dec %si # Next char + jnz ff.checkver + jmp ff.checklen # All numbers in name, so + # no version +ff.checkver.1: movzbw DIR_NAMELEN(%bx),%cx + cmp %cx,%si # Did we find any digits? + je ff.checkdot # No + cmpb $';',DIR_NAME-1(%bx,%si) # Check for semicolon + jne ff.checkver.2 + dec %si # Skip semicolon + mov %si,%cx + mov %cl,DIR_NAMELEN(%bx) # Adjust length + jmp ff.checkdot +ff.checkver.2: mov %cx,%si # Restore %si to end of string +ff.checkdot: cmpb $'.',DIR_NAME-1(%bx,%si) # Trailing dot? + jne ff.checklen # No + decb DIR_NAMELEN(%bx) # Adjust length +ff.checklen: pop %si # Restore + movzbw name_len,%cx # Load length of name + cmp %cl,DIR_NAMELEN(%bx) # Does length match? + je ff.checkname # Yes, check name +ff.nextrec: add DIR_LEN(%bx),%bl # Next record + adc $0,%bh + jmp ff.scan +ff.nextblock: subl $SECTOR_SIZE,rec_size # Adjust size + jnc ff.load # If subtract ok, keep going + ret # End of file, so not found +ff.checkname: lea DIR_NAME(%bx),%di # Address name in record + push %si # Save + repe cmpsb # Compare name + je ff.match # We have a winner! + pop %si # Restore + jmp ff.nextrec # Keep looking. +ff.match: add $2,%sp # Discard saved %si + clc # Clear carry + ret + +# +# Load DH sectors starting at LBA EAX into [EBX]. +# +# Trashes: EAX +# +read: push %si # Save + push %cx # Save since some BIOSs trash + mov %eax,edd_lba # LBA to read from + mov %ebx,%eax # Convert address + shr $4,%eax # to segment + mov %ax,edd_addr+0x2 # and store +read.retry: call twiddle # Entertain the user + push %dx # Save + mov $edd_packet,%si # Address Packet + mov %dh,edd_len # Set length + mov drive,%dl # BIOS Device + mov $0x42,%ah # BIOS: Extended Read + int $0x13 # Call BIOS + pop %dx # Restore + jc read.fail # Worked? + pop %cx # Restore + pop %si + ret # Return +read.fail: cmp $ERROR_TIMEOUT,%ah # Timeout? + je read.retry # Yes, Retry. +read.error: mov %ah,%al # Save error + mov $hex_error,%di # Format it + call hex8 # as hex + mov $msg_badread,%si # Display Read error message + +# +# Display error message at [SI] and halt. +# +error: call putstr # Display message +halt: hlt + jmp halt # Spin + +# +# Display a null-terminated string. +# +# Trashes: AX, SI +# +putstr: push %bx # Save +putstr.load: lodsb # load %al from %ds:(%si) + test %al,%al # stop at null + jnz putstr.putc # if the char != null, output it + pop %bx # Restore + ret # return when null is hit +putstr.putc: call putc # output char + jmp putstr.load # next char + +# +# Display a single char. +# +putc: mov $0x7,%bx # attribute for output + mov $0xe,%ah # BIOS: put_char + int $0x10 # call BIOS, print char in %al + ret # Return to caller + +# +# Output the "twiddle" +# +twiddle: push %ax # Save + push %bx # Save + mov twiddle_index,%al # Load index + mov $twiddle_chars,%bx # Address table + inc %al # Next + and $3,%al # char + mov %al,twiddle_index # Save index for next call + xlat # Get char + call putc # Output it + mov $8,%al # Backspace + call putc # Output it + pop %bx # Restore + pop %ax # Restore + ret + +# +# Enable A20. Put an upper limit on the amount of time we wait for the +# keyboard controller to get ready (65K x ISA access time). If +# we wait more than that amount, the hardware is probably +# legacy-free and simply doesn't have a keyboard controller. +# Thus, the A20 line is already enabled. +# +seta20: cli # Disable interrupts + xor %cx,%cx # Clear +seta20.1: inc %cx # Increment, overflow? + jz seta20.3 # Yes + in $0x64,%al # Get status + test $0x2,%al # Busy? + jnz seta20.1 # Yes + mov $0xd1,%al # Command: Write + out %al,$0x64 # output port +seta20.2: in $0x64,%al # Get status + test $0x2,%al # Busy? + jnz seta20.2 # Yes + mov $0xdf,%al # Enable + out %al,$0x60 # A20 +seta20.3: sti # Enable interrupts + ret # To caller + +# +# Convert AL to hex, saving the result to [EDI]. +# +hex8: pushl %eax # Save + shrb $0x4,%al # Do upper + call hex8.1 # 4 + popl %eax # Restore +hex8.1: andb $0xf,%al # Get lower 4 + cmpb $0xa,%al # Convert + sbbb $0x69,%al # to hex + das # digit + orb $0x20,%al # To lower case + stosb # Save char + ret # (Recursive) + +# +# BTX client to start btxldr +# + .code32 +btx_client: mov $(MEM_ARG_BTX-MEM_BTX_CLIENT+MEM_ARG_SIZE-4), %esi + # %ds:(%esi) -> end + # of boot[12] args + mov $(MEM_ARG_SIZE/4),%ecx # Number of words to push + std # Go backwards +push_arg: lodsl # Read argument + push %eax # Push it onto the stack + loop push_arg # Push all of the arguments + cld # In case anyone depends on this + pushl MEM_ARG_BTX-MEM_BTX_CLIENT+MEM_ARG_SIZE # Entry point of + # the loader + push %eax # Emulate a near call + mov $0x1,%eax # 'exec' system call + int $INT_SYS # BTX system call +btx_client_end: + .code16 + + .p2align 4 +# +# Global descriptor table. +# +gdt: .word 0x0,0x0,0x0,0x0 # Null entry + .word 0xffff,0x0,0x9200,0xcf # SEL_SDATA + .word 0xffff,0x0,0x9200,0x0 # SEL_RDATA + .word 0xffff,0x0,0x9a00,0xcf # SEL_SCODE (32-bit) + .word 0xffff,0x0,0x9a00,0x8f # SEL_SCODE16 (16-bit) +gdt.1: +# +# Pseudo-descriptors. +# +gdtdesc: .word gdt.1-gdt-1 # Limit + .long gdt # Base +# +# EDD Packet +# +edd_packet: .byte 0x10 # Length + .byte 0 # Reserved +edd_len: .byte 0x0 # Num to read + .byte 0 # Reserved +edd_addr: .word 0x0,0x0 # Seg:Off +edd_lba: .quad 0x0 # LBA + +drive: .byte 0 + +# +# State for searching dir +# +rec_lba: .long 0x0 # LBA (adjusted for EA) +rec_size: .long 0x0 # File size +name_len: .byte 0x0 # Length of current name + +twiddle_index: .byte 0x0 + +msg_welcome: .asciz "CD Loader 1.2\r\n\n" +msg_bootinfo: .asciz "Building the boot loader arguments\r\n" +msg_relocate: .asciz "Relocating the loader and the BTX\r\n" +msg_jump: .asciz "Starting the BTX loader\r\n" +msg_badread: .ascii "Read Error: 0x" +hex_error: .asciz "00\r\n" +msg_novd: .asciz "Could not find Primary Volume Descriptor\r\n" +msg_lookup: .asciz "Looking up " +msg_lookup2: .asciz "... " +msg_lookupok: .asciz "Found\r\n" +msg_lookupfail: .asciz "File not found\r\n" +msg_load2big: .asciz "File too big\r\n" +msg_failed: .asciz "Boot failed\r\n" +twiddle_chars: .ascii "|/-\\" +loader_paths: .asciz "/BOOT/LOADER" + .asciz "/boot/loader" + .byte 0 + Property changes on: projects/nand/sys/boot/i386/cdboot/cdboot.S ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: projects/nand/sys/boot/i386/efi =================================================================== --- projects/nand/sys/boot/i386/efi (revision 235262) +++ projects/nand/sys/boot/i386/efi (revision 235263) Property changes on: projects/nand/sys/boot/i386/efi ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/boot/i386/efi:r235157-235262 Index: projects/nand/sys/boot/i386/loader/main.c =================================================================== --- projects/nand/sys/boot/i386/loader/main.c (revision 235262) +++ projects/nand/sys/boot/i386/loader/main.c (revision 235263) @@ -1,343 +1,344 @@ /*- * Copyright (c) 1998 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * MD bootstrap main() and assorted miscellaneous * commands. */ #include +#include #include #include #include #include #include "bootstrap.h" #include "common/bootargs.h" #include "libi386/libi386.h" #include "btxv86.h" CTASSERT(sizeof(struct bootargs) == BOOTARGS_SIZE); CTASSERT(offsetof(struct bootargs, bootinfo) == BA_BOOTINFO); CTASSERT(offsetof(struct bootargs, bootflags) == BA_BOOTFLAGS); CTASSERT(offsetof(struct bootinfo, bi_size) == BI_SIZE); /* Arguments passed in from the boot1/boot2 loader */ static struct bootargs *kargs; static u_int32_t initial_howto; static u_int32_t initial_bootdev; static struct bootinfo *initial_bootinfo; struct arch_switch archsw; /* MI/MD interface boundary */ static void extract_currdev(void); static int isa_inb(int port); static void isa_outb(int port, int value); void exit(int code); /* from vers.c */ extern char bootprog_name[], bootprog_rev[], bootprog_date[], bootprog_maker[]; /* XXX debugging */ extern char end[]; static void *heap_top; static void *heap_bottom; int main(void) { int i; /* Pick up arguments */ kargs = (void *)__args; initial_howto = kargs->howto; initial_bootdev = kargs->bootdev; initial_bootinfo = kargs->bootinfo ? (struct bootinfo *)PTOV(kargs->bootinfo) : NULL; /* Initialize the v86 register set to a known-good state. */ bzero(&v86, sizeof(v86)); v86.efl = PSL_RESERVED_DEFAULT | PSL_I; /* * Initialise the heap as early as possible. Once this is done, malloc() is usable. */ bios_getmem(); #if defined(LOADER_BZIP2_SUPPORT) || defined(LOADER_FIREWIRE_SUPPORT) || \ defined(LOADER_GPT_SUPPORT) || defined(LOADER_ZFS_SUPPORT) if (high_heap_size > 0) { heap_top = PTOV(high_heap_base + high_heap_size); heap_bottom = PTOV(high_heap_base); if (high_heap_base < memtop_copyin) memtop_copyin = high_heap_base; } else #endif { heap_top = (void *)PTOV(bios_basemem); heap_bottom = (void *)end; } setheap(heap_bottom, heap_top); /* * XXX Chicken-and-egg problem; we want to have console output early, but some * console attributes may depend on reading from eg. the boot device, which we * can't do yet. * * We can use printf() etc. once this is done. * If the previous boot stage has requested a serial console, prefer that. */ bi_setboothowto(initial_howto); if (initial_howto & RB_MULTIPLE) { if (initial_howto & RB_SERIAL) setenv("console", "comconsole vidconsole", 1); else setenv("console", "vidconsole comconsole", 1); } else if (initial_howto & RB_SERIAL) setenv("console", "comconsole", 1); else if (initial_howto & RB_MUTE) setenv("console", "nullconsole", 1); cons_probe(); /* * Initialise the block cache */ bcache_init(32, 512); /* 16k cache XXX tune this */ /* * Special handling for PXE and CD booting. */ if (kargs->bootinfo == 0) { /* * We only want the PXE disk to try to init itself in the below * walk through devsw if we actually booted off of PXE. */ if (kargs->bootflags & KARGS_FLAGS_PXE) pxe_enable(kargs->pxeinfo ? PTOV(kargs->pxeinfo) : NULL); else if (kargs->bootflags & KARGS_FLAGS_CD) bc_add(initial_bootdev); } archsw.arch_autoload = i386_autoload; archsw.arch_getdev = i386_getdev; archsw.arch_copyin = i386_copyin; archsw.arch_copyout = i386_copyout; archsw.arch_readin = i386_readin; archsw.arch_isainb = isa_inb; archsw.arch_isaoutb = isa_outb; /* * March through the device switch probing for things. */ for (i = 0; devsw[i] != NULL; i++) if (devsw[i]->dv_init != NULL) (devsw[i]->dv_init)(); printf("BIOS %dkB/%dkB available memory\n", bios_basemem / 1024, bios_extmem / 1024); if (initial_bootinfo != NULL) { initial_bootinfo->bi_basemem = bios_basemem / 1024; initial_bootinfo->bi_extmem = bios_extmem / 1024; } /* detect ACPI for future reference */ biosacpi_detect(); /* detect SMBIOS for future reference */ smbios_detect(); printf("\n"); printf("%s, Revision %s\n", bootprog_name, bootprog_rev); printf("(%s, %s)\n", bootprog_maker, bootprog_date); extract_currdev(); /* set $currdev and $loaddev */ setenv("LINES", "24", 1); /* optional */ bios_getsmap(); interact(); /* doesn't return */ /* if we ever get here, it is an error */ return (1); } /* * Set the 'current device' by (if possible) recovering the boot device as * supplied by the initial bootstrap. * * XXX should be extended for netbooting. */ static void extract_currdev(void) { struct i386_devdesc new_currdev; int biosdev = -1; /* Assume we are booting from a BIOS disk by default */ new_currdev.d_dev = &biosdisk; /* new-style boot loaders such as pxeldr and cdldr */ if (kargs->bootinfo == 0) { if ((kargs->bootflags & KARGS_FLAGS_CD) != 0) { /* we are booting from a CD with cdboot */ new_currdev.d_dev = &bioscd; new_currdev.d_unit = bc_bios2unit(initial_bootdev); } else if ((kargs->bootflags & KARGS_FLAGS_PXE) != 0) { /* we are booting from pxeldr */ new_currdev.d_dev = &pxedisk; new_currdev.d_unit = 0; } else { /* we don't know what our boot device is */ new_currdev.d_kind.biosdisk.slice = -1; new_currdev.d_kind.biosdisk.partition = 0; biosdev = -1; } } else if ((initial_bootdev & B_MAGICMASK) != B_DEVMAGIC) { /* The passed-in boot device is bad */ new_currdev.d_kind.biosdisk.slice = -1; new_currdev.d_kind.biosdisk.partition = 0; biosdev = -1; } else { new_currdev.d_kind.biosdisk.slice = B_SLICE(initial_bootdev) - 1; new_currdev.d_kind.biosdisk.partition = B_PARTITION(initial_bootdev); biosdev = initial_bootinfo->bi_bios_dev; /* * If we are booted by an old bootstrap, we have to guess at the BIOS * unit number. We will lose if there is more than one disk type * and we are not booting from the lowest-numbered disk type * (ie. SCSI when IDE also exists). */ if ((biosdev == 0) && (B_TYPE(initial_bootdev) != 2)) /* biosdev doesn't match major */ biosdev = 0x80 + B_UNIT(initial_bootdev); /* assume harddisk */ } new_currdev.d_type = new_currdev.d_dev->dv_type; /* * If we are booting off of a BIOS disk and we didn't succeed in determining * which one we booted off of, just use disk0: as a reasonable default. */ if ((new_currdev.d_type == biosdisk.dv_type) && ((new_currdev.d_unit = bd_bios2unit(biosdev)) == -1)) { printf("Can't work out which disk we are booting from.\n" "Guessed BIOS device 0x%x not found by probes, defaulting to disk0:\n", biosdev); new_currdev.d_unit = 0; } env_setenv("currdev", EV_VOLATILE, i386_fmtdev(&new_currdev), i386_setcurrdev, env_nounset); env_setenv("loaddev", EV_VOLATILE, i386_fmtdev(&new_currdev), env_noset, env_nounset); #ifdef LOADER_ZFS_SUPPORT /* * If we were started from a ZFS-aware boot2, we can work out * which ZFS pool we are booting from. */ if (kargs->bootflags & KARGS_FLAGS_ZFS) { /* * Dig out the pool guid and convert it to a 'unit number' */ uint64_t guid; int unit; char devname[32]; extern int zfs_guid_to_unit(uint64_t); guid = kargs->zfspool; unit = zfs_guid_to_unit(guid); if (unit >= 0) { sprintf(devname, "zfs%d", unit); setenv("currdev", devname, 1); } } #endif } COMMAND_SET(reboot, "reboot", "reboot the system", command_reboot); static int command_reboot(int argc, char *argv[]) { int i; for (i = 0; devsw[i] != NULL; ++i) if (devsw[i]->dv_cleanup != NULL) (devsw[i]->dv_cleanup)(); printf("Rebooting...\n"); delay(1000000); __exit(0); } /* provide this for panic, as it's not in the startup code */ void exit(int code) { __exit(code); } COMMAND_SET(heap, "heap", "show heap usage", command_heap); static int command_heap(int argc, char *argv[]) { mallocstats(); printf("heap base at %p, top at %p, upper limit at %p\n", heap_bottom, sbrk(0), heap_top); return(CMD_OK); } /* ISA bus access functions for PnP, derived from */ static int isa_inb(int port) { u_char data; if (__builtin_constant_p(port) && (((port) & 0xffff) < 0x100) && ((port) < 0x10000)) { __asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port))); } else { __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); } return(data); } static void isa_outb(int port, int value) { u_char al = value; if (__builtin_constant_p(port) && (((port) & 0xffff) < 0x100) && ((port) < 0x10000)) { __asm __volatile("outb %0,%1" : : "a" (al), "id" ((u_short)(port))); } else { __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); } } Index: projects/nand/sys/boot/i386/pxeldr/Makefile =================================================================== --- projects/nand/sys/boot/i386/pxeldr/Makefile (revision 235262) +++ projects/nand/sys/boot/i386/pxeldr/Makefile (revision 235263) @@ -1,47 +1,49 @@ # $FreeBSD$ # Pick up ../Makefile.inc early. .include PROG= ${LDR} INTERNALPROG= FILES= ${BOOT} MAN= ${BOOT}.8 SRCS= ${LDR}.S CLEANFILES= ${BOOT} BOOT= pxeboot LDR= pxeldr ORG= 0x7c00 LOADER= loader .if defined(BOOT_PXELDR_PROBE_KEYBOARD) CFLAGS+=-DPROBE_KEYBOARD .endif .if defined(BOOT_PXELDR_ALWAYS_SERIAL) CFLAGS+=-DALWAYS_SERIAL .endif +CFLAGS+=-I${.CURDIR}/../common + LOADERBIN= ${.OBJDIR}/../loader/loader.bin CLEANFILES+= ${BOOT}.tmp ${BOOT}: ${LDR} ${LOADER} cat ${LDR} ${LOADER} > ${.TARGET}.tmp dd if=${.TARGET}.tmp of=${.TARGET} obs=2k conv=osync rm ${.TARGET}.tmp LDFLAGS+=-e start -Ttext ${ORG} -Wl,-N,-S,--oformat,binary CLEANFILES+= ${LOADER} ${LOADER}: ${LOADERBIN} ${BTXLDR} ${BTXKERN} btxld -v -f aout -e ${LOADER_ADDRESS} -o ${.TARGET} -l ${BTXLDR} \ -b ${BTXKERN} ${LOADERBIN} .include # XXX: clang integrated-as doesn't grok .codeNN directives yet CFLAGS.pxeldr.S= ${CLANG_NO_IAS} CFLAGS+= ${CFLAGS.${.IMPSRC:T}} Index: projects/nand/sys/boot/i386/pxeldr/pxeldr.S =================================================================== --- projects/nand/sys/boot/i386/pxeldr/pxeldr.S (revision 235262) +++ projects/nand/sys/boot/i386/pxeldr/pxeldr.S (revision 235263) @@ -1,294 +1,290 @@ /* * Copyright (c) 2000 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms are freely * permitted provided that the above copyright notice and this * paragraph and the following disclaimer are duplicated in all * such forms. * * This software is provided "AS IS" and without any express or * implied warranties, including, without limitation, the implied * warranties of merchantability and fitness for a particular * purpose. * * $FreeBSD$ */ /* * This simple program is a preloader for the normal boot3 loader. It is simply * prepended to the beginning of a fully built and btxld'd loader. It then * copies the loader to the address boot2 normally loads it, emulates the * boot[12] environment (protected mode, a bootinfo struct, etc.), and then jumps * to the start of btxldr to start the boot process. This method allows a stock * /boot/loader to be booted over the network via PXE w/o having to write a * separate PXE-aware client just to load the loader. */ #include +#include /* * Memory locations. */ .set MEM_PAGE_SIZE,0x1000 # memory page size, 4k .set MEM_ARG,0x900 # Arguments at start .set MEM_ARG_BTX,0xa100 # Where we move them to so the # BTX client can see them .set MEM_ARG_SIZE,0x18 # Size of the arguments .set MEM_BTX_ADDRESS,0x9000 # where BTX lives .set MEM_BTX_ENTRY,0x9010 # where BTX starts to execute .set MEM_BTX_OFFSET,MEM_PAGE_SIZE # offset of BTX in the loader .set MEM_BTX_CLIENT,0xa000 # where BTX clients live .set MEM_BIOS_KEYBOARD,0x496 # BDA byte with keyboard bit /* * a.out header fields */ .set AOUT_TEXT,0x04 # text segment size .set AOUT_DATA,0x08 # data segment size .set AOUT_BSS,0x0c # zero'd BSS size .set AOUT_SYMBOLS,0x10 # symbol table .set AOUT_ENTRY,0x14 # entry point .set AOUT_HEADER,MEM_PAGE_SIZE # size of the a.out header -/* - * Flags for kargs->bootflags - */ - .set KARGS_FLAGS_PXE,0x2 # flag to indicate booting from - # PXE loader /* * Segment selectors. */ .set SEL_SDATA,0x8 # Supervisor data .set SEL_RDATA,0x10 # Real mode data .set SEL_SCODE,0x18 # PM-32 code .set SEL_SCODE16,0x20 # PM-16 code /* * BTX constants */ .set INT_SYS,0x30 # BTX syscall interrupt /* * Bit in MEM_BIOS_KEYBOARD that is set if an enhanced keyboard is present */ .set KEYBOARD_BIT,0x10 /* * We expect to be loaded by the BIOS at 0x7c00 (standard boot loader entry * point) */ .code16 .globl start .org 0x0, 0x0 /* * BTX program loader for PXE network booting */ start: cld # string ops inc xorw %ax, %ax # zero %ax movw %ax, %ss # setup the movw $start, %sp # stack movw %es, %cx # save PXENV+ segment movw %ax, %ds # setup the movw %ax, %es # data segments andl $0xffff, %ecx # clear upper words andl $0xffff, %ebx # of %ebx and %ecx shll $4, %ecx # calculate the offset of addl %ebx, %ecx # the PXENV+ struct and pushl %ecx # save it on the stack movw $welcome_msg, %si # %ds:(%si) -> welcome message callw putstr # display the welcome message /* * Setup the arguments that the loader is expecting from boot[12] */ movw $bootinfo_msg, %si # %ds:(%si) -> boot args message callw putstr # display the message movw $MEM_ARG, %bx # %ds:(%bx) -> boot args movw %bx, %di # %es:(%di) -> boot args xorl %eax, %eax # zero %eax movw $(MEM_ARG_SIZE/4), %cx # Size of arguments in 32-bit # dwords rep # Clear the arguments stosl # to zero orb $KARGS_FLAGS_PXE, 0x8(%bx) # kargs->bootflags |= # KARGS_FLAGS_PXE popl 0xc(%bx) # kargs->pxeinfo = *PXENV+ #ifdef ALWAYS_SERIAL /* * set the RBX_SERIAL bit in the howto byte. */ orl $RB_SERIAL, (%bx) # enable serial console #endif #ifdef PROBE_KEYBOARD /* * Look at the BIOS data area to see if we have an enhanced keyboard. If not, * set the RBX_DUAL and RBX_SERIAL bits in the howto byte. */ testb $KEYBOARD_BIT, MEM_BIOS_KEYBOARD # keyboard present? jnz keyb # yes, so skip orl $(RB_MULTIPLE | RB_SERIAL), (%bx) # enable serial console keyb: #endif /* * Turn on the A20 address line */ callw seta20 # Turn A20 on /* * Relocate the loader and BTX using a very lazy protected mode */ movw $relocate_msg, %si # Display the callw putstr # relocation message movl end+AOUT_ENTRY, %edi # %edi is the destination movl $(end+AOUT_HEADER), %esi # %esi is # the start of the text # segment movl end+AOUT_TEXT, %ecx # %ecx = length of the text # segment lgdt gdtdesc # setup our own gdt cli # turn off interrupts movl %cr0, %eax # Turn on orb $0x1, %al # protected movl %eax, %cr0 # mode ljmp $SEL_SCODE,$pm_start # long jump to clear the # instruction pre-fetch queue .code32 pm_start: movw $SEL_SDATA, %ax # Initialize movw %ax, %ds # %ds and movw %ax, %es # %es to a flat selector rep # Relocate the movsb # text segment addl $(MEM_PAGE_SIZE - 1), %edi # pad %edi out to a new page andl $~(MEM_PAGE_SIZE - 1), %edi # for the data segment movl end+AOUT_DATA, %ecx # size of the data segment rep # Relocate the movsb # data segment movl end+AOUT_BSS, %ecx # size of the bss xorl %eax, %eax # zero %eax addb $3, %cl # round %ecx up to shrl $2, %ecx # a multiple of 4 rep # zero the stosl # bss movl end+AOUT_ENTRY, %esi # %esi -> relocated loader addl $MEM_BTX_OFFSET, %esi # %esi -> BTX in the loader movl $MEM_BTX_ADDRESS, %edi # %edi -> where BTX needs to go movzwl 0xa(%esi), %ecx # %ecx -> length of BTX rep # Relocate movsb # BTX ljmp $SEL_SCODE16,$pm_16 # Jump to 16-bit PM .code16 pm_16: movw $SEL_RDATA, %ax # Initialize movw %ax, %ds # %ds and movw %ax, %es # %es to a real mode selector movl %cr0, %eax # Turn off andb $~0x1, %al # protected movl %eax, %cr0 # mode ljmp $0,$pm_end # Long jump to clear the # instruction pre-fetch queue pm_end: sti # Turn interrupts back on now /* * Copy the BTX client to MEM_BTX_CLIENT */ xorw %ax, %ax # zero %ax and set movw %ax, %ds # %ds and %es movw %ax, %es # to segment 0 movw $MEM_BTX_CLIENT, %di # Prepare to relocate movw $btx_client, %si # the simple btx client movw $(btx_client_end-btx_client), %cx # length of btx client rep # Relocate the movsb # simple BTX client /* * Copy the boot[12] args to where the BTX client can see them */ movw $MEM_ARG, %si # where the args are at now movw $MEM_ARG_BTX, %di # where the args are moving to movw $(MEM_ARG_SIZE/4), %cx # size of the arguments in longs rep # Relocate movsl # the words /* * Save the entry point so the client can get to it later on */ movl end+AOUT_ENTRY, %eax # load the entry point stosl # add it to the end of the # arguments /* * Now we just start up BTX and let it do the rest */ movw $jump_message, %si # Display the callw putstr # jump message ljmp $0,$MEM_BTX_ENTRY # Jump to the BTX entry point /* * Display a null-terminated string */ putstr: lodsb # load %al from %ds:(%si) testb %al,%al # stop at null jnz putc # if the char != null, output it retw # return when null is hit putc: movw $0x7,%bx # attribute for output movb $0xe,%ah # BIOS: put_char int $0x10 # call BIOS, print char in %al jmp putstr # keep looping /* * Enable A20. Put an upper limit on the amount of time we wait for the * keyboard controller to get ready (65K x ISA access time). If * we wait more than that amount, the hardware is probably * legacy-free and simply doesn't have a keyboard controller. * Thus, the A20 line is already enabled. */ seta20: cli # Disable interrupts xor %cx,%cx # Clear seta20.1: inc %cx # Increment, overflow? jz seta20.3 # Yes inb $0x64,%al # Get status testb $0x2,%al # Busy? jnz seta20.1 # Yes movb $0xd1,%al # Command: Write outb %al,$0x64 # output port seta20.2: inb $0x64,%al # Get status testb $0x2,%al # Busy? jnz seta20.2 # Yes movb $0xdf,%al # Enable outb %al,$0x60 # A20 seta20.3: sti # Enable interrupts retw # To caller /* * BTX client to start btxldr */ .code32 btx_client: movl $(MEM_ARG_BTX-MEM_BTX_CLIENT+MEM_ARG_SIZE-4), %esi # %ds:(%esi) -> end # of boot[12] args movl $(MEM_ARG_SIZE/4), %ecx # Number of words to push std # Go backwards push_arg: lodsl # Read argument pushl %eax # Push it onto the stack loop push_arg # Push all of the arguments cld # In case anyone depends on this pushl MEM_ARG_BTX-MEM_BTX_CLIENT+MEM_ARG_SIZE # Entry point of # the loader pushl %eax # Emulate a near call movl $0x1, %eax # 'exec' system call int $INT_SYS # BTX system call btx_client_end: .code16 .p2align 4 /* * Global descriptor table. */ gdt: .word 0x0,0x0,0x0,0x0 # Null entry .word 0xffff,0x0,0x9200,0xcf # SEL_SDATA .word 0xffff,0x0,0x9200,0x0 # SEL_RDATA .word 0xffff,0x0,0x9a00,0xcf # SEL_SCODE (32-bit) .word 0xffff,0x0,0x9a00,0x8f # SEL_SCODE16 (16-bit) gdt.1: /* * Pseudo-descriptors. */ gdtdesc: .word gdt.1-gdt-1 # Limit .long gdt # Base welcome_msg: .asciz "PXE Loader 1.00\r\n\n" bootinfo_msg: .asciz "Building the boot loader arguments\r\n" relocate_msg: .asciz "Relocating the loader and the BTX\r\n" jump_message: .asciz "Starting the BTX loader\r\n" .p2align 4 end: Index: projects/nand/sys/boot/ia64/efi =================================================================== --- projects/nand/sys/boot/ia64/efi (revision 235262) +++ projects/nand/sys/boot/ia64/efi (revision 235263) Property changes on: projects/nand/sys/boot/ia64/efi ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/boot/ia64/efi:r235157-235262 Index: projects/nand/sys/boot/ia64/ski =================================================================== --- projects/nand/sys/boot/ia64/ski (revision 235262) +++ projects/nand/sys/boot/ia64/ski (revision 235263) Property changes on: projects/nand/sys/boot/ia64/ski ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/boot/ia64/ski:r235157-235262 Index: projects/nand/sys/boot/powerpc/boot1.chrp =================================================================== --- projects/nand/sys/boot/powerpc/boot1.chrp (revision 235262) +++ projects/nand/sys/boot/powerpc/boot1.chrp (revision 235263) Property changes on: projects/nand/sys/boot/powerpc/boot1.chrp ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/boot/powerpc/boot1.chrp:r235157-235262 Index: projects/nand/sys/boot/powerpc/ofw =================================================================== --- projects/nand/sys/boot/powerpc/ofw (revision 235262) +++ projects/nand/sys/boot/powerpc/ofw (revision 235263) Property changes on: projects/nand/sys/boot/powerpc/ofw ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/boot/powerpc/ofw:r235157-235262 Index: projects/nand/sys/boot/sparc64/boot1/Makefile =================================================================== --- projects/nand/sys/boot/sparc64/boot1/Makefile (revision 235262) +++ projects/nand/sys/boot/sparc64/boot1/Makefile (revision 235263) @@ -1,28 +1,28 @@ # $FreeBSD$ PROG= boot1.elf INTERNALPROG= NO_MAN= FILES?= boot1 SRCS= _start.s boot1.c -CLEANFILES+=boot1 boot1.aout +CLEANFILES=${FILES} boot1.aout BOOTBLOCKBASE= 0x4000 CFLAGS+=-mcmodel=medlow -Os -I${.CURDIR}/../../common LDFLAGS=-Ttext ${BOOTBLOCKBASE} -Wl,-N # Construct boot1. sunlabel expects it to contain zeroed-out space for the # label, and to be of the correct size. -boot1: boot1.aout +${FILES}: boot1.aout @set -- `ls -l boot1.aout`; x=$$((7680-$$5)); \ echo "$$x bytes available"; test $$x -ge 0 dd if=/dev/zero of=${.TARGET} bs=512 count=16 dd if=boot1.aout of=${.TARGET} bs=512 oseek=1 conv=notrunc boot1.aout: boot1.elf elf2aout -o ${.TARGET} ${.ALLSRC} boot1.o: ${.CURDIR}/../../common/ufsread.c .include Index: projects/nand/sys/boot/sparc64/zfsboot/Makefile =================================================================== --- projects/nand/sys/boot/sparc64/zfsboot/Makefile (revision 235262) +++ projects/nand/sys/boot/sparc64/zfsboot/Makefile (revision 235263) @@ -1,13 +1,9 @@ # $FreeBSD$ .PATH: ${.CURDIR}/../boot1 PROGNAME= zfsboot CFLAGS+= -DZFSBOOT FILES= zfsboot -CLEANFILES+= zfsboot - -zfsboot: boot1 - ln -s ${.ALLSRC} ${.TARGET} .include "${.CURDIR}/../boot1/Makefile" Index: projects/nand/sys/boot =================================================================== --- projects/nand/sys/boot (revision 235262) +++ projects/nand/sys/boot (revision 235263) Property changes on: projects/nand/sys/boot ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/boot:r235157-235262 Index: projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c =================================================================== --- projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c (revision 235262) +++ projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c (revision 235263) @@ -1,1722 +1,1727 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved. */ /* * Copyright 2011 Nexenta Systems, Inc. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved. + * Copyright (c) 2012, Joyent, Inc. All rights reserved. + * Copyright (c) 2012, Martin Matuska . All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ int zfs_send_corrupt_data = B_FALSE; static char *dmu_recv_tag = "dmu_recv_tag"; -/* - * The list of data whose inclusion in a send stream can be pending from - * one call to backup_cb to another. Multiple calls to dump_free() and - * dump_freeobjects() can be aggregated into a single DRR_FREE or - * DRR_FREEOBJECTS replay record. - */ -typedef enum { - PENDING_NONE, - PENDING_FREE, - PENDING_FREEOBJECTS -} pendop_t; - -struct backuparg { - dmu_replay_record_t *drr; - kthread_t *td; - struct file *fp; - offset_t *off; - objset_t *os; - zio_cksum_t zc; - uint64_t toguid; - int err; - pendop_t pending_op; -}; - static int -dump_bytes(struct backuparg *ba, void *buf, int len) +dump_bytes(dmu_sendarg_t *dsp, void *buf, int len) { + dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset; struct uio auio; struct iovec aiov; ASSERT3U(len % 8, ==, 0); - fletcher_4_incremental_native(buf, len, &ba->zc); + fletcher_4_incremental_native(buf, len, &dsp->dsa_zc); aiov.iov_base = buf; aiov.iov_len = len; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_resid = len; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_WRITE; auio.uio_offset = (off_t)-1; - auio.uio_td = ba->td; + auio.uio_td = dsp->dsa_td; #ifdef _KERNEL - if (ba->fp->f_type == DTYPE_VNODE) + if (dsp->dsa_fp->f_type == DTYPE_VNODE) bwillwrite(); - ba->err = fo_write(ba->fp, &auio, ba->td->td_ucred, 0, ba->td); + dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0, + dsp->dsa_td); #else fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__); - ba->err = EOPNOTSUPP; + dsp->dsa_err = EOPNOTSUPP; #endif - *ba->off += len; - return (ba->err); + mutex_enter(&ds->ds_sendstream_lock); + *dsp->dsa_off += len; + mutex_exit(&ds->ds_sendstream_lock); + + return (dsp->dsa_err); } static int -dump_free(struct backuparg *ba, uint64_t object, uint64_t offset, +dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, uint64_t length) { - struct drr_free *drrf = &(ba->drr->drr_u.drr_free); + struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free); /* * If there is a pending op, but it's not PENDING_FREE, push it out, * since free block aggregation can only be done for blocks of the * same type (i.e., DRR_FREE records can only be aggregated with * other DRR_FREE records. DRR_FREEOBJECTS records can only be * aggregated with other DRR_FREEOBJECTS records. */ - if (ba->pending_op != PENDING_NONE && ba->pending_op != PENDING_FREE) { - if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0) + if (dsp->dsa_pending_op != PENDING_NONE && + dsp->dsa_pending_op != PENDING_FREE) { + if (dump_bytes(dsp, dsp->dsa_drr, + sizeof (dmu_replay_record_t)) != 0) return (EINTR); - ba->pending_op = PENDING_NONE; + dsp->dsa_pending_op = PENDING_NONE; } - if (ba->pending_op == PENDING_FREE) { + if (dsp->dsa_pending_op == PENDING_FREE) { /* * There should never be a PENDING_FREE if length is -1 * (because dump_dnode is the only place where this * function is called with a -1, and only after flushing * any pending record). */ ASSERT(length != -1ULL); /* * Check to see whether this free block can be aggregated * with pending one. */ if (drrf->drr_object == object && drrf->drr_offset + drrf->drr_length == offset) { drrf->drr_length += length; return (0); } else { /* not a continuation. Push out pending record */ - if (dump_bytes(ba, ba->drr, + if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) return (EINTR); - ba->pending_op = PENDING_NONE; + dsp->dsa_pending_op = PENDING_NONE; } } /* create a FREE record and make it pending */ - bzero(ba->drr, sizeof (dmu_replay_record_t)); - ba->drr->drr_type = DRR_FREE; + bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); + dsp->dsa_drr->drr_type = DRR_FREE; drrf->drr_object = object; drrf->drr_offset = offset; drrf->drr_length = length; - drrf->drr_toguid = ba->toguid; + drrf->drr_toguid = dsp->dsa_toguid; if (length == -1ULL) { - if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0) + if (dump_bytes(dsp, dsp->dsa_drr, + sizeof (dmu_replay_record_t)) != 0) return (EINTR); } else { - ba->pending_op = PENDING_FREE; + dsp->dsa_pending_op = PENDING_FREE; } return (0); } static int -dump_data(struct backuparg *ba, dmu_object_type_t type, +dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type, uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data) { - struct drr_write *drrw = &(ba->drr->drr_u.drr_write); + struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write); /* * If there is any kind of pending aggregation (currently either * a grouping of free objects or free blocks), push it out to * the stream, since aggregation can't be done across operations * of different types. */ - if (ba->pending_op != PENDING_NONE) { - if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0) + if (dsp->dsa_pending_op != PENDING_NONE) { + if (dump_bytes(dsp, dsp->dsa_drr, + sizeof (dmu_replay_record_t)) != 0) return (EINTR); - ba->pending_op = PENDING_NONE; + dsp->dsa_pending_op = PENDING_NONE; } /* write a DATA record */ - bzero(ba->drr, sizeof (dmu_replay_record_t)); - ba->drr->drr_type = DRR_WRITE; + bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); + dsp->dsa_drr->drr_type = DRR_WRITE; drrw->drr_object = object; drrw->drr_type = type; drrw->drr_offset = offset; drrw->drr_length = blksz; - drrw->drr_toguid = ba->toguid; + drrw->drr_toguid = dsp->dsa_toguid; drrw->drr_checksumtype = BP_GET_CHECKSUM(bp); if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup) drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP; DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp)); DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp)); DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp)); drrw->drr_key.ddk_cksum = bp->blk_cksum; - if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0) + if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) return (EINTR); - if (dump_bytes(ba, data, blksz) != 0) + if (dump_bytes(dsp, data, blksz) != 0) return (EINTR); return (0); } static int -dump_spill(struct backuparg *ba, uint64_t object, int blksz, void *data) +dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data) { - struct drr_spill *drrs = &(ba->drr->drr_u.drr_spill); + struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill); - if (ba->pending_op != PENDING_NONE) { - if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0) + if (dsp->dsa_pending_op != PENDING_NONE) { + if (dump_bytes(dsp, dsp->dsa_drr, + sizeof (dmu_replay_record_t)) != 0) return (EINTR); - ba->pending_op = PENDING_NONE; + dsp->dsa_pending_op = PENDING_NONE; } /* write a SPILL record */ - bzero(ba->drr, sizeof (dmu_replay_record_t)); - ba->drr->drr_type = DRR_SPILL; + bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); + dsp->dsa_drr->drr_type = DRR_SPILL; drrs->drr_object = object; drrs->drr_length = blksz; - drrs->drr_toguid = ba->toguid; + drrs->drr_toguid = dsp->dsa_toguid; - if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) + if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t))) return (EINTR); - if (dump_bytes(ba, data, blksz)) + if (dump_bytes(dsp, data, blksz)) return (EINTR); return (0); } static int -dump_freeobjects(struct backuparg *ba, uint64_t firstobj, uint64_t numobjs) +dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs) { - struct drr_freeobjects *drrfo = &(ba->drr->drr_u.drr_freeobjects); + struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects); /* * If there is a pending op, but it's not PENDING_FREEOBJECTS, * push it out, since free block aggregation can only be done for * blocks of the same type (i.e., DRR_FREE records can only be * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records * can only be aggregated with other DRR_FREEOBJECTS records. */ - if (ba->pending_op != PENDING_NONE && - ba->pending_op != PENDING_FREEOBJECTS) { - if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0) + if (dsp->dsa_pending_op != PENDING_NONE && + dsp->dsa_pending_op != PENDING_FREEOBJECTS) { + if (dump_bytes(dsp, dsp->dsa_drr, + sizeof (dmu_replay_record_t)) != 0) return (EINTR); - ba->pending_op = PENDING_NONE; + dsp->dsa_pending_op = PENDING_NONE; } - if (ba->pending_op == PENDING_FREEOBJECTS) { + if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) { /* * See whether this free object array can be aggregated * with pending one */ if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) { drrfo->drr_numobjs += numobjs; return (0); } else { /* can't be aggregated. Push out pending record */ - if (dump_bytes(ba, ba->drr, + if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) return (EINTR); - ba->pending_op = PENDING_NONE; + dsp->dsa_pending_op = PENDING_NONE; } } /* write a FREEOBJECTS record */ - bzero(ba->drr, sizeof (dmu_replay_record_t)); - ba->drr->drr_type = DRR_FREEOBJECTS; + bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); + dsp->dsa_drr->drr_type = DRR_FREEOBJECTS; drrfo->drr_firstobj = firstobj; drrfo->drr_numobjs = numobjs; - drrfo->drr_toguid = ba->toguid; + drrfo->drr_toguid = dsp->dsa_toguid; - ba->pending_op = PENDING_FREEOBJECTS; + dsp->dsa_pending_op = PENDING_FREEOBJECTS; return (0); } static int -dump_dnode(struct backuparg *ba, uint64_t object, dnode_phys_t *dnp) +dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp) { - struct drr_object *drro = &(ba->drr->drr_u.drr_object); + struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object); if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) - return (dump_freeobjects(ba, object, 1)); + return (dump_freeobjects(dsp, object, 1)); - if (ba->pending_op != PENDING_NONE) { - if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0) + if (dsp->dsa_pending_op != PENDING_NONE) { + if (dump_bytes(dsp, dsp->dsa_drr, + sizeof (dmu_replay_record_t)) != 0) return (EINTR); - ba->pending_op = PENDING_NONE; + dsp->dsa_pending_op = PENDING_NONE; } /* write an OBJECT record */ - bzero(ba->drr, sizeof (dmu_replay_record_t)); - ba->drr->drr_type = DRR_OBJECT; + bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); + dsp->dsa_drr->drr_type = DRR_OBJECT; drro->drr_object = object; drro->drr_type = dnp->dn_type; drro->drr_bonustype = dnp->dn_bonustype; drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; drro->drr_bonuslen = dnp->dn_bonuslen; drro->drr_checksumtype = dnp->dn_checksum; drro->drr_compress = dnp->dn_compress; - drro->drr_toguid = ba->toguid; + drro->drr_toguid = dsp->dsa_toguid; - if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0) + if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) return (EINTR); - if (dump_bytes(ba, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) + if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) return (EINTR); /* free anything past the end of the file */ - if (dump_free(ba, object, (dnp->dn_maxblkid + 1) * + if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) * (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL)) return (EINTR); - if (ba->err) + if (dsp->dsa_err) return (EINTR); return (0); } #define BP_SPAN(dnp, level) \ (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \ (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) /* ARGSUSED */ static int backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf, const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) { - struct backuparg *ba = arg; + dmu_sendarg_t *dsp = arg; dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; int err = 0; if (issig(JUSTLOOKING) && issig(FORREAL)) return (EINTR); if (zb->zb_object != DMU_META_DNODE_OBJECT && DMU_OBJECT_IS_SPECIAL(zb->zb_object)) { return (0); } else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) { uint64_t span = BP_SPAN(dnp, zb->zb_level); uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT; - err = dump_freeobjects(ba, dnobj, span >> DNODE_SHIFT); + err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT); } else if (bp == NULL) { uint64_t span = BP_SPAN(dnp, zb->zb_level); - err = dump_free(ba, zb->zb_object, zb->zb_blkid * span, span); + err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span); } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) { return (0); } else if (type == DMU_OT_DNODE) { dnode_phys_t *blk; int i; int blksz = BP_GET_LSIZE(bp); uint32_t aflags = ARC_WAIT; arc_buf_t *abuf; if (dsl_read(NULL, spa, bp, pbuf, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &aflags, zb) != 0) return (EIO); blk = abuf->b_data; for (i = 0; i < blksz >> DNODE_SHIFT; i++) { uint64_t dnobj = (zb->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i; - err = dump_dnode(ba, dnobj, blk+i); + err = dump_dnode(dsp, dnobj, blk+i); if (err) break; } (void) arc_buf_remove_ref(abuf, &abuf); } else if (type == DMU_OT_SA) { uint32_t aflags = ARC_WAIT; arc_buf_t *abuf; int blksz = BP_GET_LSIZE(bp); if (arc_read_nolock(NULL, spa, bp, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &aflags, zb) != 0) return (EIO); - err = dump_spill(ba, zb->zb_object, blksz, abuf->b_data); + err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data); (void) arc_buf_remove_ref(abuf, &abuf); } else { /* it's a level-0 block of a regular object */ uint32_t aflags = ARC_WAIT; arc_buf_t *abuf; int blksz = BP_GET_LSIZE(bp); if (dsl_read(NULL, spa, bp, pbuf, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &aflags, zb) != 0) { if (zfs_send_corrupt_data) { /* Send a block filled with 0x"zfs badd bloc" */ abuf = arc_buf_alloc(spa, blksz, &abuf, ARC_BUFC_DATA); uint64_t *ptr; for (ptr = abuf->b_data; (char *)ptr < (char *)abuf->b_data + blksz; ptr++) *ptr = 0x2f5baddb10c; } else { return (EIO); } } - err = dump_data(ba, type, zb->zb_object, zb->zb_blkid * blksz, + err = dump_data(dsp, type, zb->zb_object, zb->zb_blkid * blksz, blksz, bp, abuf->b_data); (void) arc_buf_remove_ref(abuf, &abuf); } ASSERT(err == 0 || err == EINTR); return (err); } int -dmu_sendbackup(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin, - struct file *fp, offset_t *off) +dmu_send(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin, + int outfd, struct file *fp, offset_t *off) { dsl_dataset_t *ds = tosnap->os_dsl_dataset; dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL; dmu_replay_record_t *drr; - struct backuparg ba; + dmu_sendarg_t *dsp; int err; uint64_t fromtxg = 0; /* tosnap must be a snapshot */ if (ds->ds_phys->ds_next_snap_obj == 0) return (EINVAL); /* fromsnap must be an earlier snapshot from the same fs as tosnap */ if (fromds && (ds->ds_dir != fromds->ds_dir || fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg)) return (EXDEV); if (fromorigin) { dsl_pool_t *dp = ds->ds_dir->dd_pool; if (fromsnap) return (EINVAL); if (dsl_dir_is_clone(ds->ds_dir)) { rw_enter(&dp->dp_config_rwlock, RW_READER); err = dsl_dataset_hold_obj(dp, ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds); rw_exit(&dp->dp_config_rwlock); if (err) return (err); } else { fromorigin = B_FALSE; } } drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); drr->drr_type = DRR_BEGIN; drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo, DMU_SUBSTREAM); #ifdef _KERNEL if (dmu_objset_type(tosnap) == DMU_OST_ZFS) { uint64_t version; - if (zfs_get_zplprop(tosnap, ZFS_PROP_VERSION, &version) != 0) + if (zfs_get_zplprop(tosnap, ZFS_PROP_VERSION, &version) != 0) { + kmem_free(drr, sizeof (dmu_replay_record_t)); return (EINVAL); + } if (version == ZPL_VERSION_SA) { DMU_SET_FEATUREFLAGS( drr->drr_u.drr_begin.drr_versioninfo, DMU_BACKUP_FEATURE_SA_SPILL); } } #endif drr->drr_u.drr_begin.drr_creation_time = ds->ds_phys->ds_creation_time; drr->drr_u.drr_begin.drr_type = tosnap->os_phys->os_type; if (fromorigin) drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE; drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid; if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA; if (fromds) drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid; dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname); if (fromds) fromtxg = fromds->ds_phys->ds_creation_txg; if (fromorigin) dsl_dataset_rele(fromds, FTAG); - ba.drr = drr; - ba.td = curthread; - ba.fp = fp; - ba.os = tosnap; - ba.off = off; - ba.toguid = ds->ds_phys->ds_guid; - ZIO_SET_CHECKSUM(&ba.zc, 0, 0, 0, 0); - ba.pending_op = PENDING_NONE; + dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP); - if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0) { - kmem_free(drr, sizeof (dmu_replay_record_t)); - return (ba.err); + dsp->dsa_drr = drr; + dsp->dsa_outfd = outfd; + dsp->dsa_proc = curproc; + dsp->dsa_td = curthread; + dsp->dsa_fp = fp; + dsp->dsa_os = tosnap; + dsp->dsa_off = off; + dsp->dsa_toguid = ds->ds_phys->ds_guid; + ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0); + dsp->dsa_pending_op = PENDING_NONE; + + mutex_enter(&ds->ds_sendstream_lock); + list_insert_head(&ds->ds_sendstreams, dsp); + mutex_exit(&ds->ds_sendstream_lock); + + if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) { + err = dsp->dsa_err; + goto out; } err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH, - backup_cb, &ba); + backup_cb, dsp); - if (ba.pending_op != PENDING_NONE) - if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0) + if (dsp->dsa_pending_op != PENDING_NONE) + if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) err = EINTR; if (err) { - if (err == EINTR && ba.err) - err = ba.err; - kmem_free(drr, sizeof (dmu_replay_record_t)); - return (err); + if (err == EINTR && dsp->dsa_err) + err = dsp->dsa_err; + goto out; } bzero(drr, sizeof (dmu_replay_record_t)); drr->drr_type = DRR_END; - drr->drr_u.drr_end.drr_checksum = ba.zc; - drr->drr_u.drr_end.drr_toguid = ba.toguid; + drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc; + drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid; - if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0) { - kmem_free(drr, sizeof (dmu_replay_record_t)); - return (ba.err); + if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) { + err = dsp->dsa_err; + goto out; } +out: + mutex_enter(&ds->ds_sendstream_lock); + list_remove(&ds->ds_sendstreams, dsp); + mutex_exit(&ds->ds_sendstream_lock); + kmem_free(drr, sizeof (dmu_replay_record_t)); + kmem_free(dsp, sizeof (dmu_sendarg_t)); - return (0); + return (err); } int dmu_send_estimate(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin, uint64_t *sizep) { dsl_dataset_t *ds = tosnap->os_dsl_dataset; dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL; dsl_pool_t *dp = ds->ds_dir->dd_pool; int err; uint64_t size; /* tosnap must be a snapshot */ if (ds->ds_phys->ds_next_snap_obj == 0) return (EINVAL); /* fromsnap must be an earlier snapshot from the same fs as tosnap */ if (fromds && (ds->ds_dir != fromds->ds_dir || fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg)) return (EXDEV); if (fromorigin) { if (fromsnap) return (EINVAL); if (dsl_dir_is_clone(ds->ds_dir)) { rw_enter(&dp->dp_config_rwlock, RW_READER); err = dsl_dataset_hold_obj(dp, ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds); rw_exit(&dp->dp_config_rwlock); if (err) return (err); } else { fromorigin = B_FALSE; } } /* Get uncompressed size estimate of changed data. */ if (fromds == NULL) { size = ds->ds_phys->ds_uncompressed_bytes; } else { uint64_t used, comp; err = dsl_dataset_space_written(fromds, ds, &used, &comp, &size); if (fromorigin) dsl_dataset_rele(fromds, FTAG); if (err) return (err); } /* * Assume that space (both on-disk and in-stream) is dominated by * data. We will adjust for indirect blocks and the copies property, * but ignore per-object space used (eg, dnodes and DRR_OBJECT records). */ /* * Subtract out approximate space used by indirect blocks. * Assume most space is used by data blocks (non-indirect, non-dnode). * Assume all blocks are recordsize. Assume ditto blocks and * internal fragmentation counter out compression. * * Therefore, space used by indirect blocks is sizeof(blkptr_t) per * block, which we observe in practice. */ uint64_t recordsize; rw_enter(&dp->dp_config_rwlock, RW_READER); err = dsl_prop_get_ds(ds, "recordsize", sizeof (recordsize), 1, &recordsize, NULL); rw_exit(&dp->dp_config_rwlock); if (err) return (err); size -= size / recordsize * sizeof (blkptr_t); /* Add in the space for the record associated with each block. */ size += size / recordsize * sizeof (dmu_replay_record_t); *sizep = size; return (0); } struct recvbeginsyncarg { const char *tofs; const char *tosnap; dsl_dataset_t *origin; uint64_t fromguid; dmu_objset_type_t type; void *tag; boolean_t force; uint64_t dsflags; char clonelastname[MAXNAMELEN]; dsl_dataset_t *ds; /* the ds to recv into; returned from the syncfunc */ cred_t *cr; }; /* ARGSUSED */ static int recv_new_check(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dir_t *dd = arg1; struct recvbeginsyncarg *rbsa = arg2; objset_t *mos = dd->dd_pool->dp_meta_objset; uint64_t val; int err; err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj, strrchr(rbsa->tofs, '/') + 1, sizeof (uint64_t), 1, &val); if (err != ENOENT) return (err ? err : EEXIST); if (rbsa->origin) { /* make sure it's a snap in the same pool */ if (rbsa->origin->ds_dir->dd_pool != dd->dd_pool) return (EXDEV); if (!dsl_dataset_is_snapshot(rbsa->origin)) return (EINVAL); if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid) return (ENODEV); } return (0); } static void recv_new_sync(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dir_t *dd = arg1; struct recvbeginsyncarg *rbsa = arg2; uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags; uint64_t dsobj; /* Create and open new dataset. */ dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1, rbsa->origin, flags, rbsa->cr, tx); VERIFY(0 == dsl_dataset_own_obj(dd->dd_pool, dsobj, B_TRUE, dmu_recv_tag, &rbsa->ds)); if (rbsa->origin == NULL) { (void) dmu_objset_create_impl(dd->dd_pool->dp_spa, rbsa->ds, &rbsa->ds->ds_phys->ds_bp, rbsa->type, tx); } spa_history_log_internal(LOG_DS_REPLAY_FULL_SYNC, dd->dd_pool->dp_spa, tx, "dataset = %lld", dsobj); } /* ARGSUSED */ static int recv_existing_check(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; struct recvbeginsyncarg *rbsa = arg2; int err; uint64_t val; /* must not have any changes since most recent snapshot */ if (!rbsa->force && dsl_dataset_modified_since_lastsnap(ds)) return (ETXTBSY); /* new snapshot name must not exist */ err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_phys->ds_snapnames_zapobj, rbsa->tosnap, 8, 1, &val); if (err == 0) return (EEXIST); if (err != ENOENT) return (err); if (rbsa->fromguid) { /* if incremental, most recent snapshot must match fromguid */ if (ds->ds_prev == NULL) return (ENODEV); /* * most recent snapshot must match fromguid, or there are no * changes since the fromguid one */ if (ds->ds_prev->ds_phys->ds_guid != rbsa->fromguid) { uint64_t birth = ds->ds_prev->ds_phys->ds_bp.blk_birth; uint64_t obj = ds->ds_prev->ds_phys->ds_prev_snap_obj; while (obj != 0) { dsl_dataset_t *snap; err = dsl_dataset_hold_obj(ds->ds_dir->dd_pool, obj, FTAG, &snap); if (err) return (ENODEV); if (snap->ds_phys->ds_creation_txg < birth) { dsl_dataset_rele(snap, FTAG); return (ENODEV); } if (snap->ds_phys->ds_guid == rbsa->fromguid) { dsl_dataset_rele(snap, FTAG); break; /* it's ok */ } obj = snap->ds_phys->ds_prev_snap_obj; dsl_dataset_rele(snap, FTAG); } if (obj == 0) return (ENODEV); } } else { /* if full, most recent snapshot must be $ORIGIN */ if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL) return (ENODEV); } /* temporary clone name must not exist */ err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_dir->dd_phys->dd_child_dir_zapobj, rbsa->clonelastname, 8, 1, &val); if (err == 0) return (EEXIST); if (err != ENOENT) return (err); return (0); } /* ARGSUSED */ static void recv_existing_sync(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ohds = arg1; struct recvbeginsyncarg *rbsa = arg2; dsl_pool_t *dp = ohds->ds_dir->dd_pool; dsl_dataset_t *cds; uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags; uint64_t dsobj; /* create and open the temporary clone */ dsobj = dsl_dataset_create_sync(ohds->ds_dir, rbsa->clonelastname, ohds->ds_prev, flags, rbsa->cr, tx); VERIFY(0 == dsl_dataset_own_obj(dp, dsobj, B_TRUE, dmu_recv_tag, &cds)); /* * If we actually created a non-clone, we need to create the * objset in our new dataset. */ if (BP_IS_HOLE(dsl_dataset_get_blkptr(cds))) { (void) dmu_objset_create_impl(dp->dp_spa, cds, dsl_dataset_get_blkptr(cds), rbsa->type, tx); } rbsa->ds = cds; spa_history_log_internal(LOG_DS_REPLAY_INC_SYNC, dp->dp_spa, tx, "dataset = %lld", dsobj); } static boolean_t dmu_recv_verify_features(dsl_dataset_t *ds, struct drr_begin *drrb) { int featureflags; featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); /* Verify pool version supports SA if SA_SPILL feature set */ return ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && (spa_version(dsl_dataset_get_spa(ds)) < SPA_VERSION_SA)); } /* * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() * succeeds; otherwise we will leak the holds on the datasets. */ int dmu_recv_begin(char *tofs, char *tosnap, char *top_ds, struct drr_begin *drrb, boolean_t force, objset_t *origin, dmu_recv_cookie_t *drc) { int err = 0; boolean_t byteswap; struct recvbeginsyncarg rbsa = { 0 }; uint64_t versioninfo; int flags; dsl_dataset_t *ds; if (drrb->drr_magic == DMU_BACKUP_MAGIC) byteswap = FALSE; else if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) byteswap = TRUE; else return (EINVAL); rbsa.tofs = tofs; rbsa.tosnap = tosnap; rbsa.origin = origin ? origin->os_dsl_dataset : NULL; rbsa.fromguid = drrb->drr_fromguid; rbsa.type = drrb->drr_type; rbsa.tag = FTAG; rbsa.dsflags = 0; rbsa.cr = CRED(); versioninfo = drrb->drr_versioninfo; flags = drrb->drr_flags; if (byteswap) { rbsa.type = BSWAP_32(rbsa.type); rbsa.fromguid = BSWAP_64(rbsa.fromguid); versioninfo = BSWAP_64(versioninfo); flags = BSWAP_32(flags); } if (DMU_GET_STREAM_HDRTYPE(versioninfo) == DMU_COMPOUNDSTREAM || rbsa.type >= DMU_OST_NUMTYPES || ((flags & DRR_FLAG_CLONE) && origin == NULL)) return (EINVAL); if (flags & DRR_FLAG_CI_DATA) rbsa.dsflags = DS_FLAG_CI_DATASET; bzero(drc, sizeof (dmu_recv_cookie_t)); drc->drc_drrb = drrb; drc->drc_tosnap = tosnap; drc->drc_top_ds = top_ds; drc->drc_force = force; /* * Process the begin in syncing context. */ /* open the dataset we are logically receiving into */ err = dsl_dataset_hold(tofs, dmu_recv_tag, &ds); if (err == 0) { if (dmu_recv_verify_features(ds, drrb)) { dsl_dataset_rele(ds, dmu_recv_tag); return (ENOTSUP); } /* target fs already exists; recv into temp clone */ /* Can't recv a clone into an existing fs */ if (flags & DRR_FLAG_CLONE) { dsl_dataset_rele(ds, dmu_recv_tag); return (EINVAL); } /* must not have an incremental recv already in progress */ if (!mutex_tryenter(&ds->ds_recvlock)) { dsl_dataset_rele(ds, dmu_recv_tag); return (EBUSY); } /* tmp clone name is: tofs/%tosnap" */ (void) snprintf(rbsa.clonelastname, sizeof (rbsa.clonelastname), "%%%s", tosnap); rbsa.force = force; err = dsl_sync_task_do(ds->ds_dir->dd_pool, recv_existing_check, recv_existing_sync, ds, &rbsa, 5); if (err) { mutex_exit(&ds->ds_recvlock); dsl_dataset_rele(ds, dmu_recv_tag); return (err); } drc->drc_logical_ds = ds; drc->drc_real_ds = rbsa.ds; } else if (err == ENOENT) { /* target fs does not exist; must be a full backup or clone */ char *cp; /* * If it's a non-clone incremental, we are missing the * target fs, so fail the recv. */ if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE)) return (ENOENT); /* Open the parent of tofs */ cp = strrchr(tofs, '/'); *cp = '\0'; err = dsl_dataset_hold(tofs, FTAG, &ds); *cp = '/'; if (err) return (err); if (dmu_recv_verify_features(ds, drrb)) { dsl_dataset_rele(ds, FTAG); return (ENOTSUP); } err = dsl_sync_task_do(ds->ds_dir->dd_pool, recv_new_check, recv_new_sync, ds->ds_dir, &rbsa, 5); dsl_dataset_rele(ds, FTAG); if (err) return (err); drc->drc_logical_ds = drc->drc_real_ds = rbsa.ds; drc->drc_newfs = B_TRUE; } return (err); } struct restorearg { int err; int byteswap; kthread_t *td; struct file *fp; char *buf; uint64_t voff; int bufsize; /* amount of memory allocated for buf */ zio_cksum_t cksum; avl_tree_t *guid_to_ds_map; }; typedef struct guid_map_entry { uint64_t guid; dsl_dataset_t *gme_ds; avl_node_t avlnode; } guid_map_entry_t; static int guid_compare(const void *arg1, const void *arg2) { const guid_map_entry_t *gmep1 = arg1; const guid_map_entry_t *gmep2 = arg2; if (gmep1->guid < gmep2->guid) return (-1); else if (gmep1->guid > gmep2->guid) return (1); return (0); } static void free_guid_map_onexit(void *arg) { avl_tree_t *ca = arg; void *cookie = NULL; guid_map_entry_t *gmep; while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) { dsl_dataset_rele(gmep->gme_ds, ca); kmem_free(gmep, sizeof (guid_map_entry_t)); } avl_destroy(ca); kmem_free(ca, sizeof (avl_tree_t)); } static int restore_bytes(struct restorearg *ra, void *buf, int len, off_t off, ssize_t *resid) { struct uio auio; struct iovec aiov; int error; aiov.iov_base = buf; aiov.iov_len = len; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_resid = len; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_offset = off; auio.uio_td = ra->td; #ifdef _KERNEL error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td); #else fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__); error = EOPNOTSUPP; #endif *resid = auio.uio_resid; return (error); } static void * restore_read(struct restorearg *ra, int len) { void *rv; int done = 0; /* some things will require 8-byte alignment, so everything must */ ASSERT3U(len % 8, ==, 0); while (done < len) { ssize_t resid; ra->err = restore_bytes(ra, (caddr_t)ra->buf + done, len - done, ra->voff, &resid); if (resid == len - done) ra->err = EINVAL; ra->voff += len - done - resid; done = len - resid; if (ra->err) return (NULL); } ASSERT3U(done, ==, len); rv = ra->buf; if (ra->byteswap) fletcher_4_incremental_byteswap(rv, len, &ra->cksum); else fletcher_4_incremental_native(rv, len, &ra->cksum); return (rv); } static void backup_byteswap(dmu_replay_record_t *drr) { #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) drr->drr_type = BSWAP_32(drr->drr_type); drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); switch (drr->drr_type) { case DRR_BEGIN: DO64(drr_begin.drr_magic); DO64(drr_begin.drr_versioninfo); DO64(drr_begin.drr_creation_time); DO32(drr_begin.drr_type); DO32(drr_begin.drr_flags); DO64(drr_begin.drr_toguid); DO64(drr_begin.drr_fromguid); break; case DRR_OBJECT: DO64(drr_object.drr_object); /* DO64(drr_object.drr_allocation_txg); */ DO32(drr_object.drr_type); DO32(drr_object.drr_bonustype); DO32(drr_object.drr_blksz); DO32(drr_object.drr_bonuslen); DO64(drr_object.drr_toguid); break; case DRR_FREEOBJECTS: DO64(drr_freeobjects.drr_firstobj); DO64(drr_freeobjects.drr_numobjs); DO64(drr_freeobjects.drr_toguid); break; case DRR_WRITE: DO64(drr_write.drr_object); DO32(drr_write.drr_type); DO64(drr_write.drr_offset); DO64(drr_write.drr_length); DO64(drr_write.drr_toguid); DO64(drr_write.drr_key.ddk_cksum.zc_word[0]); DO64(drr_write.drr_key.ddk_cksum.zc_word[1]); DO64(drr_write.drr_key.ddk_cksum.zc_word[2]); DO64(drr_write.drr_key.ddk_cksum.zc_word[3]); DO64(drr_write.drr_key.ddk_prop); break; case DRR_WRITE_BYREF: DO64(drr_write_byref.drr_object); DO64(drr_write_byref.drr_offset); DO64(drr_write_byref.drr_length); DO64(drr_write_byref.drr_toguid); DO64(drr_write_byref.drr_refguid); DO64(drr_write_byref.drr_refobject); DO64(drr_write_byref.drr_refoffset); DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]); DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]); DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]); DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]); DO64(drr_write_byref.drr_key.ddk_prop); break; case DRR_FREE: DO64(drr_free.drr_object); DO64(drr_free.drr_offset); DO64(drr_free.drr_length); DO64(drr_free.drr_toguid); break; case DRR_SPILL: DO64(drr_spill.drr_object); DO64(drr_spill.drr_length); DO64(drr_spill.drr_toguid); break; case DRR_END: DO64(drr_end.drr_checksum.zc_word[0]); DO64(drr_end.drr_checksum.zc_word[1]); DO64(drr_end.drr_checksum.zc_word[2]); DO64(drr_end.drr_checksum.zc_word[3]); DO64(drr_end.drr_toguid); break; } #undef DO64 #undef DO32 } static int restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro) { int err; dmu_tx_t *tx; void *data = NULL; if (drro->drr_type == DMU_OT_NONE || drro->drr_type >= DMU_OT_NUMTYPES || drro->drr_bonustype >= DMU_OT_NUMTYPES || drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS || drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || drro->drr_blksz < SPA_MINBLOCKSIZE || drro->drr_blksz > SPA_MAXBLOCKSIZE || drro->drr_bonuslen > DN_MAX_BONUSLEN) { return (EINVAL); } err = dmu_object_info(os, drro->drr_object, NULL); if (err != 0 && err != ENOENT) return (EINVAL); if (drro->drr_bonuslen) { data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8)); if (ra->err) return (ra->err); } if (err == ENOENT) { /* currently free, want to be allocated */ tx = dmu_tx_create(os); dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); err = dmu_tx_assign(tx, TXG_WAIT); if (err) { dmu_tx_abort(tx); return (err); } err = dmu_object_claim(os, drro->drr_object, drro->drr_type, drro->drr_blksz, drro->drr_bonustype, drro->drr_bonuslen, tx); dmu_tx_commit(tx); } else { /* currently allocated, want to be allocated */ err = dmu_object_reclaim(os, drro->drr_object, drro->drr_type, drro->drr_blksz, drro->drr_bonustype, drro->drr_bonuslen); } if (err) { return (EINVAL); } tx = dmu_tx_create(os); dmu_tx_hold_bonus(tx, drro->drr_object); err = dmu_tx_assign(tx, TXG_WAIT); if (err) { dmu_tx_abort(tx); return (err); } dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype, tx); dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx); if (data != NULL) { dmu_buf_t *db; VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db)); dmu_buf_will_dirty(db, tx); ASSERT3U(db->db_size, >=, drro->drr_bonuslen); bcopy(data, db->db_data, drro->drr_bonuslen); if (ra->byteswap) { dmu_ot[drro->drr_bonustype].ot_byteswap(db->db_data, drro->drr_bonuslen); } dmu_buf_rele(db, FTAG); } dmu_tx_commit(tx); return (0); } /* ARGSUSED */ static int restore_freeobjects(struct restorearg *ra, objset_t *os, struct drr_freeobjects *drrfo) { uint64_t obj; if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) return (EINVAL); for (obj = drrfo->drr_firstobj; obj < drrfo->drr_firstobj + drrfo->drr_numobjs; (void) dmu_object_next(os, &obj, FALSE, 0)) { int err; if (dmu_object_info(os, obj, NULL) != 0) continue; err = dmu_free_object(os, obj); if (err) return (err); } return (0); } static int restore_write(struct restorearg *ra, objset_t *os, struct drr_write *drrw) { dmu_tx_t *tx; void *data; int err; if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset || drrw->drr_type >= DMU_OT_NUMTYPES) return (EINVAL); data = restore_read(ra, drrw->drr_length); if (data == NULL) return (ra->err); if (dmu_object_info(os, drrw->drr_object, NULL) != 0) return (EINVAL); tx = dmu_tx_create(os); dmu_tx_hold_write(tx, drrw->drr_object, drrw->drr_offset, drrw->drr_length); err = dmu_tx_assign(tx, TXG_WAIT); if (err) { dmu_tx_abort(tx); return (err); } if (ra->byteswap) dmu_ot[drrw->drr_type].ot_byteswap(data, drrw->drr_length); dmu_write(os, drrw->drr_object, drrw->drr_offset, drrw->drr_length, data, tx); dmu_tx_commit(tx); return (0); } /* * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed * streams to refer to a copy of the data that is already on the * system because it came in earlier in the stream. This function * finds the earlier copy of the data, and uses that copy instead of * data from the stream to fulfill this write. */ static int restore_write_byref(struct restorearg *ra, objset_t *os, struct drr_write_byref *drrwbr) { dmu_tx_t *tx; int err; guid_map_entry_t gmesrch; guid_map_entry_t *gmep; avl_index_t where; objset_t *ref_os = NULL; dmu_buf_t *dbp; if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset) return (EINVAL); /* * If the GUID of the referenced dataset is different from the * GUID of the target dataset, find the referenced dataset. */ if (drrwbr->drr_toguid != drrwbr->drr_refguid) { gmesrch.guid = drrwbr->drr_refguid; if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch, &where)) == NULL) { return (EINVAL); } if (dmu_objset_from_ds(gmep->gme_ds, &ref_os)) return (EINVAL); } else { ref_os = os; } if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject, drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH)) return (err); tx = dmu_tx_create(os); dmu_tx_hold_write(tx, drrwbr->drr_object, drrwbr->drr_offset, drrwbr->drr_length); err = dmu_tx_assign(tx, TXG_WAIT); if (err) { dmu_tx_abort(tx); return (err); } dmu_write(os, drrwbr->drr_object, drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx); dmu_buf_rele(dbp, FTAG); dmu_tx_commit(tx); return (0); } static int restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs) { dmu_tx_t *tx; void *data; dmu_buf_t *db, *db_spill; int err; if (drrs->drr_length < SPA_MINBLOCKSIZE || drrs->drr_length > SPA_MAXBLOCKSIZE) return (EINVAL); data = restore_read(ra, drrs->drr_length); if (data == NULL) return (ra->err); if (dmu_object_info(os, drrs->drr_object, NULL) != 0) return (EINVAL); VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db)); if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) { dmu_buf_rele(db, FTAG); return (err); } tx = dmu_tx_create(os); dmu_tx_hold_spill(tx, db->db_object); err = dmu_tx_assign(tx, TXG_WAIT); if (err) { dmu_buf_rele(db, FTAG); dmu_buf_rele(db_spill, FTAG); dmu_tx_abort(tx); return (err); } dmu_buf_will_dirty(db_spill, tx); if (db_spill->db_size < drrs->drr_length) VERIFY(0 == dbuf_spill_set_blksz(db_spill, drrs->drr_length, tx)); bcopy(data, db_spill->db_data, drrs->drr_length); dmu_buf_rele(db, FTAG); dmu_buf_rele(db_spill, FTAG); dmu_tx_commit(tx); return (0); } /* ARGSUSED */ static int restore_free(struct restorearg *ra, objset_t *os, struct drr_free *drrf) { int err; if (drrf->drr_length != -1ULL && drrf->drr_offset + drrf->drr_length < drrf->drr_offset) return (EINVAL); if (dmu_object_info(os, drrf->drr_object, NULL) != 0) return (EINVAL); err = dmu_free_long_range(os, drrf->drr_object, drrf->drr_offset, drrf->drr_length); return (err); } /* * NB: callers *must* call dmu_recv_end() if this succeeds. */ int dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp, int cleanup_fd, uint64_t *action_handlep) { struct restorearg ra = { 0 }; dmu_replay_record_t *drr; objset_t *os; zio_cksum_t pcksum; int featureflags; if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) ra.byteswap = TRUE; { /* compute checksum of drr_begin record */ dmu_replay_record_t *drr; drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); drr->drr_type = DRR_BEGIN; drr->drr_u.drr_begin = *drc->drc_drrb; if (ra.byteswap) { fletcher_4_incremental_byteswap(drr, sizeof (dmu_replay_record_t), &ra.cksum); } else { fletcher_4_incremental_native(drr, sizeof (dmu_replay_record_t), &ra.cksum); } kmem_free(drr, sizeof (dmu_replay_record_t)); } if (ra.byteswap) { struct drr_begin *drrb = drc->drc_drrb; drrb->drr_magic = BSWAP_64(drrb->drr_magic); drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo); drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time); drrb->drr_type = BSWAP_32(drrb->drr_type); drrb->drr_toguid = BSWAP_64(drrb->drr_toguid); drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid); } ra.td = curthread; ra.fp = fp; ra.voff = *voffp; ra.bufsize = 1<<20; ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP); /* these were verified in dmu_recv_begin */ ASSERT(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo) == DMU_SUBSTREAM); ASSERT(drc->drc_drrb->drr_type < DMU_OST_NUMTYPES); /* * Open the objset we are modifying. */ VERIFY(dmu_objset_from_ds(drc->drc_real_ds, &os) == 0); ASSERT(drc->drc_real_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT); featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo); /* if this stream is dedup'ed, set up the avl tree for guid mapping */ if (featureflags & DMU_BACKUP_FEATURE_DEDUP) { minor_t minor; if (cleanup_fd == -1) { ra.err = EBADF; goto out; } ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor); if (ra.err) { cleanup_fd = -1; goto out; } if (*action_handlep == 0) { ra.guid_to_ds_map = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); avl_create(ra.guid_to_ds_map, guid_compare, sizeof (guid_map_entry_t), offsetof(guid_map_entry_t, avlnode)); ra.err = zfs_onexit_add_cb(minor, free_guid_map_onexit, ra.guid_to_ds_map, action_handlep); if (ra.err) goto out; } else { ra.err = zfs_onexit_cb_data(minor, *action_handlep, (void **)&ra.guid_to_ds_map); if (ra.err) goto out; } drc->drc_guid_to_ds_map = ra.guid_to_ds_map; } /* * Read records and process them. */ pcksum = ra.cksum; while (ra.err == 0 && NULL != (drr = restore_read(&ra, sizeof (*drr)))) { if (issig(JUSTLOOKING) && issig(FORREAL)) { ra.err = EINTR; goto out; } if (ra.byteswap) backup_byteswap(drr); switch (drr->drr_type) { case DRR_OBJECT: { /* * We need to make a copy of the record header, * because restore_{object,write} may need to * restore_read(), which will invalidate drr. */ struct drr_object drro = drr->drr_u.drr_object; ra.err = restore_object(&ra, os, &drro); break; } case DRR_FREEOBJECTS: { struct drr_freeobjects drrfo = drr->drr_u.drr_freeobjects; ra.err = restore_freeobjects(&ra, os, &drrfo); break; } case DRR_WRITE: { struct drr_write drrw = drr->drr_u.drr_write; ra.err = restore_write(&ra, os, &drrw); break; } case DRR_WRITE_BYREF: { struct drr_write_byref drrwbr = drr->drr_u.drr_write_byref; ra.err = restore_write_byref(&ra, os, &drrwbr); break; } case DRR_FREE: { struct drr_free drrf = drr->drr_u.drr_free; ra.err = restore_free(&ra, os, &drrf); break; } case DRR_END: { struct drr_end drre = drr->drr_u.drr_end; /* * We compare against the *previous* checksum * value, because the stored checksum is of * everything before the DRR_END record. */ if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum)) ra.err = ECKSUM; goto out; } case DRR_SPILL: { struct drr_spill drrs = drr->drr_u.drr_spill; ra.err = restore_spill(&ra, os, &drrs); break; } default: ra.err = EINVAL; goto out; } pcksum = ra.cksum; } ASSERT(ra.err != 0); out: if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1)) zfs_onexit_fd_rele(cleanup_fd); if (ra.err != 0) { /* * destroy what we created, so we don't leave it in the * inconsistent restoring state. */ txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0); (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE); if (drc->drc_real_ds != drc->drc_logical_ds) { mutex_exit(&drc->drc_logical_ds->ds_recvlock); dsl_dataset_rele(drc->drc_logical_ds, dmu_recv_tag); } } kmem_free(ra.buf, ra.bufsize); *voffp = ra.voff; return (ra.err); } struct recvendsyncarg { char *tosnap; uint64_t creation_time; uint64_t toguid; }; static int recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; struct recvendsyncarg *resa = arg2; return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx)); } static void recv_end_sync(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; struct recvendsyncarg *resa = arg2; dsl_dataset_snapshot_sync(ds, resa->tosnap, tx); /* set snapshot's creation time and guid */ dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time; ds->ds_prev->ds_phys->ds_guid = resa->toguid; ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; dmu_buf_will_dirty(ds->ds_dbuf, tx); ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; } static int add_ds_to_guidmap(avl_tree_t *guid_map, dsl_dataset_t *ds) { dsl_pool_t *dp = ds->ds_dir->dd_pool; uint64_t snapobj = ds->ds_phys->ds_prev_snap_obj; dsl_dataset_t *snapds; guid_map_entry_t *gmep; int err; ASSERT(guid_map != NULL); rw_enter(&dp->dp_config_rwlock, RW_READER); err = dsl_dataset_hold_obj(dp, snapobj, guid_map, &snapds); if (err == 0) { gmep = kmem_alloc(sizeof (guid_map_entry_t), KM_SLEEP); gmep->guid = snapds->ds_phys->ds_guid; gmep->gme_ds = snapds; avl_add(guid_map, gmep); } rw_exit(&dp->dp_config_rwlock); return (err); } static int dmu_recv_existing_end(dmu_recv_cookie_t *drc) { struct recvendsyncarg resa; dsl_dataset_t *ds = drc->drc_logical_ds; int err, myerr; /* * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean() * expects it to have a ds_user_ptr (and zil), but clone_swap() * can close it. */ txg_wait_synced(ds->ds_dir->dd_pool, 0); if (dsl_dataset_tryown(ds, FALSE, dmu_recv_tag)) { err = dsl_dataset_clone_swap(drc->drc_real_ds, ds, drc->drc_force); if (err) goto out; } else { mutex_exit(&ds->ds_recvlock); dsl_dataset_rele(ds, dmu_recv_tag); (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE); return (EBUSY); } resa.creation_time = drc->drc_drrb->drr_creation_time; resa.toguid = drc->drc_drrb->drr_toguid; resa.tosnap = drc->drc_tosnap; err = dsl_sync_task_do(ds->ds_dir->dd_pool, recv_end_check, recv_end_sync, ds, &resa, 3); if (err) { /* swap back */ (void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE); } out: mutex_exit(&ds->ds_recvlock); if (err == 0 && drc->drc_guid_to_ds_map != NULL) (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds); dsl_dataset_disown(ds, dmu_recv_tag); myerr = dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE); ASSERT3U(myerr, ==, 0); return (err); } static int dmu_recv_new_end(dmu_recv_cookie_t *drc) { struct recvendsyncarg resa; dsl_dataset_t *ds = drc->drc_logical_ds; int err; /* * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean() * expects it to have a ds_user_ptr (and zil), but clone_swap() * can close it. */ txg_wait_synced(ds->ds_dir->dd_pool, 0); resa.creation_time = drc->drc_drrb->drr_creation_time; resa.toguid = drc->drc_drrb->drr_toguid; resa.tosnap = drc->drc_tosnap; err = dsl_sync_task_do(ds->ds_dir->dd_pool, recv_end_check, recv_end_sync, ds, &resa, 3); if (err) { /* clean up the fs we just recv'd into */ (void) dsl_dataset_destroy(ds, dmu_recv_tag, B_FALSE); } else { if (drc->drc_guid_to_ds_map != NULL) (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds); /* release the hold from dmu_recv_begin */ dsl_dataset_disown(ds, dmu_recv_tag); } return (err); } int dmu_recv_end(dmu_recv_cookie_t *drc) { if (drc->drc_logical_ds != drc->drc_real_ds) return (dmu_recv_existing_end(drc)); else return (dmu_recv_new_end(drc)); } Index: projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c =================================================================== --- projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c (revision 235262) +++ projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c (revision 235263) @@ -1,4292 +1,4299 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved. + * Copyright (c) 2012, Joyent, Inc. All rights reserved. * Copyright (c) 2011 Pawel Jakub Dawidek . * All rights reserved. * Portions Copyright (c) 2011 Martin Matuska */ #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include static char *dsl_reaper = "the grim reaper"; static dsl_checkfunc_t dsl_dataset_destroy_begin_check; static dsl_syncfunc_t dsl_dataset_destroy_begin_sync; static dsl_syncfunc_t dsl_dataset_set_reservation_sync; #define SWITCH64(x, y) \ { \ uint64_t __tmp = (x); \ (x) = (y); \ (y) = __tmp; \ } #define DS_REF_MAX (1ULL << 62) #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper) /* * Figure out how much of this delta should be propogated to the dsl_dir * layer. If there's a refreservation, that space has already been * partially accounted for in our ancestors. */ static int64_t parent_delta(dsl_dataset_t *ds, int64_t delta) { uint64_t old_bytes, new_bytes; if (ds->ds_reserved == 0) return (delta); old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved); new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved); ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta)); return (new_bytes - old_bytes); } void dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx) { int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); int compressed = BP_GET_PSIZE(bp); int uncompressed = BP_GET_UCSIZE(bp); int64_t delta; dprintf_bp(bp, "ds=%p", ds); ASSERT(dmu_tx_is_syncing(tx)); /* It could have been compressed away to nothing */ if (BP_IS_HOLE(bp)) return; ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE); ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES); if (ds == NULL) { /* * Account for the meta-objset space in its placeholder * dsl_dir. */ ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */ dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD, used, compressed, uncompressed, tx); dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); return; } dmu_buf_will_dirty(ds->ds_dbuf, tx); mutex_enter(&ds->ds_dir->dd_lock); mutex_enter(&ds->ds_lock); delta = parent_delta(ds, used); ds->ds_phys->ds_used_bytes += used; ds->ds_phys->ds_compressed_bytes += compressed; ds->ds_phys->ds_uncompressed_bytes += uncompressed; ds->ds_phys->ds_unique_bytes += used; mutex_exit(&ds->ds_lock); dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta, compressed, uncompressed, tx); dsl_dir_transfer_space(ds->ds_dir, used - delta, DD_USED_REFRSRV, DD_USED_HEAD, tx); mutex_exit(&ds->ds_dir->dd_lock); } int dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx, boolean_t async) { if (BP_IS_HOLE(bp)) return (0); ASSERT(dmu_tx_is_syncing(tx)); ASSERT(bp->blk_birth <= tx->tx_txg); int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); int compressed = BP_GET_PSIZE(bp); int uncompressed = BP_GET_UCSIZE(bp); ASSERT(used > 0); if (ds == NULL) { /* * Account for the meta-objset space in its placeholder * dataset. */ dsl_free(tx->tx_pool, tx->tx_txg, bp); dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD, -used, -compressed, -uncompressed, tx); dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); return (used); } ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool); ASSERT(!dsl_dataset_is_snapshot(ds)); dmu_buf_will_dirty(ds->ds_dbuf, tx); if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) { int64_t delta; dprintf_bp(bp, "freeing ds=%llu", ds->ds_object); dsl_free(tx->tx_pool, tx->tx_txg, bp); mutex_enter(&ds->ds_dir->dd_lock); mutex_enter(&ds->ds_lock); ASSERT(ds->ds_phys->ds_unique_bytes >= used || !DS_UNIQUE_IS_ACCURATE(ds)); delta = parent_delta(ds, -used); ds->ds_phys->ds_unique_bytes -= used; mutex_exit(&ds->ds_lock); dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta, -compressed, -uncompressed, tx); dsl_dir_transfer_space(ds->ds_dir, -used - delta, DD_USED_REFRSRV, DD_USED_HEAD, tx); mutex_exit(&ds->ds_dir->dd_lock); } else { dprintf_bp(bp, "putting on dead list: %s", ""); if (async) { /* * We are here as part of zio's write done callback, * which means we're a zio interrupt thread. We can't * call dsl_deadlist_insert() now because it may block * waiting for I/O. Instead, put bp on the deferred * queue and let dsl_pool_sync() finish the job. */ bplist_append(&ds->ds_pending_deadlist, bp); } else { dsl_deadlist_insert(&ds->ds_deadlist, bp, tx); } ASSERT3U(ds->ds_prev->ds_object, ==, ds->ds_phys->ds_prev_snap_obj); ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0); /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */ if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object && bp->blk_birth > ds->ds_prev->ds_phys->ds_prev_snap_txg) { dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); mutex_enter(&ds->ds_prev->ds_lock); ds->ds_prev->ds_phys->ds_unique_bytes += used; mutex_exit(&ds->ds_prev->ds_lock); } if (bp->blk_birth > ds->ds_dir->dd_origin_txg) { dsl_dir_transfer_space(ds->ds_dir, used, DD_USED_HEAD, DD_USED_SNAP, tx); } } mutex_enter(&ds->ds_lock); ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used); ds->ds_phys->ds_used_bytes -= used; ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed); ds->ds_phys->ds_compressed_bytes -= compressed; ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed); ds->ds_phys->ds_uncompressed_bytes -= uncompressed; mutex_exit(&ds->ds_lock); return (used); } uint64_t dsl_dataset_prev_snap_txg(dsl_dataset_t *ds) { uint64_t trysnap = 0; if (ds == NULL) return (0); /* * The snapshot creation could fail, but that would cause an * incorrect FALSE return, which would only result in an * overestimation of the amount of space that an operation would * consume, which is OK. * * There's also a small window where we could miss a pending * snapshot, because we could set the sync task in the quiescing * phase. So this should only be used as a guess. */ if (ds->ds_trysnap_txg > spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa)) trysnap = ds->ds_trysnap_txg; return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap)); } boolean_t dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp, uint64_t blk_birth) { if (blk_birth <= dsl_dataset_prev_snap_txg(ds)) return (B_FALSE); ddt_prefetch(dsl_dataset_get_spa(ds), bp); return (B_TRUE); } /* ARGSUSED */ static void dsl_dataset_evict(dmu_buf_t *db, void *dsv) { dsl_dataset_t *ds = dsv; ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds)); unique_remove(ds->ds_fsid_guid); if (ds->ds_objset != NULL) dmu_objset_evict(ds->ds_objset); if (ds->ds_prev) { dsl_dataset_drop_ref(ds->ds_prev, ds); ds->ds_prev = NULL; } bplist_destroy(&ds->ds_pending_deadlist); if (db != NULL) { dsl_deadlist_close(&ds->ds_deadlist); } else { ASSERT(ds->ds_deadlist.dl_dbuf == NULL); ASSERT(!ds->ds_deadlist.dl_oldfmt); } if (ds->ds_dir) dsl_dir_close(ds->ds_dir, ds); ASSERT(!list_link_active(&ds->ds_synced_link)); if (mutex_owned(&ds->ds_lock)) mutex_exit(&ds->ds_lock); mutex_destroy(&ds->ds_lock); mutex_destroy(&ds->ds_recvlock); if (mutex_owned(&ds->ds_opening_lock)) mutex_exit(&ds->ds_opening_lock); mutex_destroy(&ds->ds_opening_lock); rw_destroy(&ds->ds_rwlock); cv_destroy(&ds->ds_exclusive_cv); kmem_free(ds, sizeof (dsl_dataset_t)); } static int dsl_dataset_get_snapname(dsl_dataset_t *ds) { dsl_dataset_phys_t *headphys; int err; dmu_buf_t *headdbuf; dsl_pool_t *dp = ds->ds_dir->dd_pool; objset_t *mos = dp->dp_meta_objset; if (ds->ds_snapname[0]) return (0); if (ds->ds_phys->ds_next_snap_obj == 0) return (0); err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &headdbuf); if (err) return (err); headphys = headdbuf->db_data; err = zap_value_search(dp->dp_meta_objset, headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname); dmu_buf_rele(headdbuf, FTAG); return (err); } static int dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value) { objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; matchtype_t mt; int err; if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) mt = MT_FIRST; else mt = MT_EXACT; err = zap_lookup_norm(mos, snapobj, name, 8, 1, value, mt, NULL, 0, NULL); if (err == ENOTSUP && mt == MT_FIRST) err = zap_lookup(mos, snapobj, name, 8, 1, value); return (err); } static int dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx) { objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; matchtype_t mt; int err; dsl_dir_snap_cmtime_update(ds->ds_dir); if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) mt = MT_FIRST; else mt = MT_EXACT; err = zap_remove_norm(mos, snapobj, name, mt, tx); if (err == ENOTSUP && mt == MT_FIRST) err = zap_remove(mos, snapobj, name, tx); return (err); } static int dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag, dsl_dataset_t **dsp) { objset_t *mos = dp->dp_meta_objset; dmu_buf_t *dbuf; dsl_dataset_t *ds; int err; dmu_object_info_t doi; ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || dsl_pool_sync_context(dp)); err = dmu_bonus_hold(mos, dsobj, tag, &dbuf); if (err) return (err); /* Make sure dsobj has the correct object type. */ dmu_object_info_from_db(dbuf, &doi); if (doi.doi_type != DMU_OT_DSL_DATASET) return (EINVAL); ds = dmu_buf_get_user(dbuf); if (ds == NULL) { dsl_dataset_t *winner; ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP); ds->ds_dbuf = dbuf; ds->ds_object = dsobj; ds->ds_phys = dbuf->db_data; mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL); + rw_init(&ds->ds_rwlock, 0, 0, 0); cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL); bplist_create(&ds->ds_pending_deadlist); dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj); + + list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t), + offsetof(dmu_sendarg_t, dsa_link)); if (err == 0) { err = dsl_dir_open_obj(dp, ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir); } if (err) { mutex_destroy(&ds->ds_lock); mutex_destroy(&ds->ds_recvlock); mutex_destroy(&ds->ds_opening_lock); rw_destroy(&ds->ds_rwlock); cv_destroy(&ds->ds_exclusive_cv); bplist_destroy(&ds->ds_pending_deadlist); dsl_deadlist_close(&ds->ds_deadlist); kmem_free(ds, sizeof (dsl_dataset_t)); dmu_buf_rele(dbuf, tag); return (err); } if (!dsl_dataset_is_snapshot(ds)) { ds->ds_snapname[0] = '\0'; if (ds->ds_phys->ds_prev_snap_obj) { err = dsl_dataset_get_ref(dp, ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev); } } else { if (zfs_flags & ZFS_DEBUG_SNAPNAMES) err = dsl_dataset_get_snapname(ds); if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) { err = zap_count( ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_phys->ds_userrefs_obj, &ds->ds_userrefs); } } if (err == 0 && !dsl_dataset_is_snapshot(ds)) { /* * In sync context, we're called with either no lock * or with the write lock. If we're not syncing, * we're always called with the read lock held. */ boolean_t need_lock = !RW_WRITE_HELD(&dp->dp_config_rwlock) && dsl_pool_sync_context(dp); if (need_lock) rw_enter(&dp->dp_config_rwlock, RW_READER); err = dsl_prop_get_ds(ds, "refreservation", sizeof (uint64_t), 1, &ds->ds_reserved, NULL); if (err == 0) { err = dsl_prop_get_ds(ds, "refquota", sizeof (uint64_t), 1, &ds->ds_quota, NULL); } if (need_lock) rw_exit(&dp->dp_config_rwlock); } else { ds->ds_reserved = ds->ds_quota = 0; } if (err == 0) { winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys, dsl_dataset_evict); } if (err || winner) { bplist_destroy(&ds->ds_pending_deadlist); dsl_deadlist_close(&ds->ds_deadlist); if (ds->ds_prev) dsl_dataset_drop_ref(ds->ds_prev, ds); dsl_dir_close(ds->ds_dir, ds); mutex_destroy(&ds->ds_lock); mutex_destroy(&ds->ds_recvlock); mutex_destroy(&ds->ds_opening_lock); rw_destroy(&ds->ds_rwlock); cv_destroy(&ds->ds_exclusive_cv); kmem_free(ds, sizeof (dsl_dataset_t)); if (err) { dmu_buf_rele(dbuf, tag); return (err); } ds = winner; } else { ds->ds_fsid_guid = unique_insert(ds->ds_phys->ds_fsid_guid); } } ASSERT3P(ds->ds_dbuf, ==, dbuf); ASSERT3P(ds->ds_phys, ==, dbuf->db_data); ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 || spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN || dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap); mutex_enter(&ds->ds_lock); if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) { mutex_exit(&ds->ds_lock); dmu_buf_rele(ds->ds_dbuf, tag); return (ENOENT); } mutex_exit(&ds->ds_lock); *dsp = ds; return (0); } static int dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag) { dsl_pool_t *dp = ds->ds_dir->dd_pool; /* * In syncing context we don't want the rwlock lock: there * may be an existing writer waiting for sync phase to * finish. We don't need to worry about such writers, since * sync phase is single-threaded, so the writer can't be * doing anything while we are active. */ if (dsl_pool_sync_context(dp)) { ASSERT(!DSL_DATASET_IS_DESTROYED(ds)); return (0); } /* * Normal users will hold the ds_rwlock as a READER until they * are finished (i.e., call dsl_dataset_rele()). "Owners" will * drop their READER lock after they set the ds_owner field. * * If the dataset is being destroyed, the destroy thread will * obtain a WRITER lock for exclusive access after it's done its * open-context work and then change the ds_owner to * dsl_reaper once destruction is assured. So threads * may block here temporarily, until the "destructability" of * the dataset is determined. */ ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock)); mutex_enter(&ds->ds_lock); while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) { rw_exit(&dp->dp_config_rwlock); cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock); if (DSL_DATASET_IS_DESTROYED(ds)) { mutex_exit(&ds->ds_lock); dsl_dataset_drop_ref(ds, tag); rw_enter(&dp->dp_config_rwlock, RW_READER); return (ENOENT); } /* * The dp_config_rwlock lives above the ds_lock. And * we need to check DSL_DATASET_IS_DESTROYED() while * holding the ds_lock, so we have to drop and reacquire * the ds_lock here. */ mutex_exit(&ds->ds_lock); rw_enter(&dp->dp_config_rwlock, RW_READER); mutex_enter(&ds->ds_lock); } mutex_exit(&ds->ds_lock); return (0); } int dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag, dsl_dataset_t **dsp) { int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp); if (err) return (err); return (dsl_dataset_hold_ref(*dsp, tag)); } int dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok, void *tag, dsl_dataset_t **dsp) { int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp); if (err) return (err); if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) { dsl_dataset_rele(*dsp, tag); *dsp = NULL; return (EBUSY); } return (0); } int dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp) { dsl_dir_t *dd; dsl_pool_t *dp; const char *snapname; uint64_t obj; int err = 0; err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname); if (err) return (err); dp = dd->dd_pool; obj = dd->dd_phys->dd_head_dataset_obj; rw_enter(&dp->dp_config_rwlock, RW_READER); if (obj) err = dsl_dataset_get_ref(dp, obj, tag, dsp); else err = ENOENT; if (err) goto out; err = dsl_dataset_hold_ref(*dsp, tag); /* we may be looking for a snapshot */ if (err == 0 && snapname != NULL) { dsl_dataset_t *ds = NULL; if (*snapname++ != '@') { dsl_dataset_rele(*dsp, tag); err = ENOENT; goto out; } dprintf("looking for snapshot '%s'\n", snapname); err = dsl_dataset_snap_lookup(*dsp, snapname, &obj); if (err == 0) err = dsl_dataset_get_ref(dp, obj, tag, &ds); dsl_dataset_rele(*dsp, tag); ASSERT3U((err == 0), ==, (ds != NULL)); if (ds) { mutex_enter(&ds->ds_lock); if (ds->ds_snapname[0] == 0) (void) strlcpy(ds->ds_snapname, snapname, sizeof (ds->ds_snapname)); mutex_exit(&ds->ds_lock); err = dsl_dataset_hold_ref(ds, tag); *dsp = err ? NULL : ds; } } out: rw_exit(&dp->dp_config_rwlock); dsl_dir_close(dd, FTAG); return (err); } int dsl_dataset_own(const char *name, boolean_t inconsistentok, void *tag, dsl_dataset_t **dsp) { int err = dsl_dataset_hold(name, tag, dsp); if (err) return (err); if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) { dsl_dataset_rele(*dsp, tag); return (EBUSY); } return (0); } void dsl_dataset_name(dsl_dataset_t *ds, char *name) { if (ds == NULL) { (void) strcpy(name, "mos"); } else { dsl_dir_name(ds->ds_dir, name); VERIFY(0 == dsl_dataset_get_snapname(ds)); if (ds->ds_snapname[0]) { (void) strcat(name, "@"); /* * We use a "recursive" mutex so that we * can call dprintf_ds() with ds_lock held. */ if (!MUTEX_HELD(&ds->ds_lock)) { mutex_enter(&ds->ds_lock); (void) strcat(name, ds->ds_snapname); mutex_exit(&ds->ds_lock); } else { (void) strcat(name, ds->ds_snapname); } } } } static int dsl_dataset_namelen(dsl_dataset_t *ds) { int result; if (ds == NULL) { result = 3; /* "mos" */ } else { result = dsl_dir_namelen(ds->ds_dir); VERIFY(0 == dsl_dataset_get_snapname(ds)); if (ds->ds_snapname[0]) { ++result; /* adding one for the @-sign */ if (!MUTEX_HELD(&ds->ds_lock)) { mutex_enter(&ds->ds_lock); result += strlen(ds->ds_snapname); mutex_exit(&ds->ds_lock); } else { result += strlen(ds->ds_snapname); } } } return (result); } void dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag) { dmu_buf_rele(ds->ds_dbuf, tag); } void dsl_dataset_rele(dsl_dataset_t *ds, void *tag) { if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) { rw_exit(&ds->ds_rwlock); } dsl_dataset_drop_ref(ds, tag); } void dsl_dataset_disown(dsl_dataset_t *ds, void *tag) { ASSERT((ds->ds_owner == tag && ds->ds_dbuf) || (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL)); mutex_enter(&ds->ds_lock); ds->ds_owner = NULL; if (RW_WRITE_HELD(&ds->ds_rwlock)) { rw_exit(&ds->ds_rwlock); cv_broadcast(&ds->ds_exclusive_cv); } mutex_exit(&ds->ds_lock); if (ds->ds_dbuf) dsl_dataset_drop_ref(ds, tag); else dsl_dataset_evict(NULL, ds); } boolean_t dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag) { boolean_t gotit = FALSE; mutex_enter(&ds->ds_lock); if (ds->ds_owner == NULL && (!DS_IS_INCONSISTENT(ds) || inconsistentok)) { ds->ds_owner = tag; if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) rw_exit(&ds->ds_rwlock); gotit = TRUE; } mutex_exit(&ds->ds_lock); return (gotit); } void dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner) { ASSERT3P(owner, ==, ds->ds_owner); if (!RW_WRITE_HELD(&ds->ds_rwlock)) rw_enter(&ds->ds_rwlock, RW_WRITER); } uint64_t dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin, uint64_t flags, dmu_tx_t *tx) { dsl_pool_t *dp = dd->dd_pool; dmu_buf_t *dbuf; dsl_dataset_phys_t *dsphys; uint64_t dsobj; objset_t *mos = dp->dp_meta_objset; if (origin == NULL) origin = dp->dp_origin_snap; ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp); ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0); ASSERT(dmu_tx_is_syncing(tx)); ASSERT(dd->dd_phys->dd_head_dataset_obj == 0); dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0, DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx); VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf)); dmu_buf_will_dirty(dbuf, tx); dsphys = dbuf->db_data; bzero(dsphys, sizeof (dsl_dataset_phys_t)); dsphys->ds_dir_obj = dd->dd_object; dsphys->ds_flags = flags; dsphys->ds_fsid_guid = unique_create(); (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid, sizeof (dsphys->ds_guid)); dsphys->ds_snapnames_zapobj = zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP, DMU_OT_NONE, 0, tx); dsphys->ds_creation_time = gethrestime_sec(); dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg; if (origin == NULL) { dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx); } else { dsl_dataset_t *ohds; dsphys->ds_prev_snap_obj = origin->ds_object; dsphys->ds_prev_snap_txg = origin->ds_phys->ds_creation_txg; dsphys->ds_used_bytes = origin->ds_phys->ds_used_bytes; dsphys->ds_compressed_bytes = origin->ds_phys->ds_compressed_bytes; dsphys->ds_uncompressed_bytes = origin->ds_phys->ds_uncompressed_bytes; dsphys->ds_bp = origin->ds_phys->ds_bp; dsphys->ds_flags |= origin->ds_phys->ds_flags; dmu_buf_will_dirty(origin->ds_dbuf, tx); origin->ds_phys->ds_num_children++; VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds)); dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist, dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx); dsl_dataset_rele(ohds, FTAG); if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) { if (origin->ds_phys->ds_next_clones_obj == 0) { origin->ds_phys->ds_next_clones_obj = zap_create(mos, DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); } VERIFY(0 == zap_add_int(mos, origin->ds_phys->ds_next_clones_obj, dsobj, tx)); } dmu_buf_will_dirty(dd->dd_dbuf, tx); dd->dd_phys->dd_origin_obj = origin->ds_object; if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) { if (origin->ds_dir->dd_phys->dd_clones == 0) { dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); origin->ds_dir->dd_phys->dd_clones = zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx); } VERIFY3U(0, ==, zap_add_int(mos, origin->ds_dir->dd_phys->dd_clones, dsobj, tx)); } } if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; dmu_buf_rele(dbuf, FTAG); dmu_buf_will_dirty(dd->dd_dbuf, tx); dd->dd_phys->dd_head_dataset_obj = dsobj; return (dsobj); } uint64_t dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname, dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx) { dsl_pool_t *dp = pdd->dd_pool; uint64_t dsobj, ddobj; dsl_dir_t *dd; ASSERT(lastname[0] != '@'); ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx); VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd)); dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx); dsl_deleg_set_create_perms(dd, tx, cr); dsl_dir_close(dd, FTAG); /* * If we are creating a clone, make sure we zero out any stale * data from the origin snapshots zil header. */ if (origin != NULL) { dsl_dataset_t *ds; objset_t *os; VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os)); bzero(&os->os_zil_header, sizeof (os->os_zil_header)); dsl_dataset_dirty(ds, tx); dsl_dataset_rele(ds, FTAG); } return (dsobj); } #ifdef __FreeBSD__ /* FreeBSD ioctl compat begin */ struct destroyarg { nvlist_t *nvl; const char *snapname; }; static int dsl_check_snap_cb(const char *name, void *arg) { struct destroyarg *da = arg; dsl_dataset_t *ds; char *dsname; dsname = kmem_asprintf("%s@%s", name, da->snapname); VERIFY(nvlist_add_boolean(da->nvl, dsname) == 0); return (0); } int dmu_get_recursive_snaps_nvl(const char *fsname, const char *snapname, nvlist_t *snaps) { struct destroyarg *da; int err; da = kmem_zalloc(sizeof (struct destroyarg), KM_SLEEP); da->nvl = snaps; da->snapname = snapname; err = dmu_objset_find(fsname, dsl_check_snap_cb, da, DS_FIND_CHILDREN); kmem_free(da, sizeof (struct destroyarg)); return (err); } /* FreeBSD ioctl compat end */ #endif /* __FreeBSD__ */ /* * The snapshots must all be in the same pool. */ int dmu_snapshots_destroy_nvl(nvlist_t *snaps, boolean_t defer, char *failed) { int err; dsl_sync_task_t *dst; spa_t *spa; nvpair_t *pair; dsl_sync_task_group_t *dstg; pair = nvlist_next_nvpair(snaps, NULL); if (pair == NULL) return (0); err = spa_open(nvpair_name(pair), &spa, FTAG); if (err) return (err); dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) { dsl_dataset_t *ds; int err; err = dsl_dataset_own(nvpair_name(pair), B_TRUE, dstg, &ds); if (err == 0) { struct dsl_ds_destroyarg *dsda; dsl_dataset_make_exclusive(ds, dstg); dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg), KM_SLEEP); dsda->ds = ds; dsda->defer = defer; dsl_sync_task_create(dstg, dsl_dataset_destroy_check, dsl_dataset_destroy_sync, dsda, dstg, 0); } else if (err == ENOENT) { err = 0; } else { (void) strcpy(failed, nvpair_name(pair)); break; } } if (err == 0) err = dsl_sync_task_group_wait(dstg); for (dst = list_head(&dstg->dstg_tasks); dst; dst = list_next(&dstg->dstg_tasks, dst)) { struct dsl_ds_destroyarg *dsda = dst->dst_arg1; dsl_dataset_t *ds = dsda->ds; /* * Return the file system name that triggered the error */ if (dst->dst_err) { dsl_dataset_name(ds, failed); } ASSERT3P(dsda->rm_origin, ==, NULL); dsl_dataset_disown(ds, dstg); kmem_free(dsda, sizeof (struct dsl_ds_destroyarg)); } dsl_sync_task_group_destroy(dstg); spa_close(spa, FTAG); return (err); } static boolean_t dsl_dataset_might_destroy_origin(dsl_dataset_t *ds) { boolean_t might_destroy = B_FALSE; mutex_enter(&ds->ds_lock); if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 && DS_IS_DEFER_DESTROY(ds)) might_destroy = B_TRUE; mutex_exit(&ds->ds_lock); return (might_destroy); } /* * If we're removing a clone, and these three conditions are true: * 1) the clone's origin has no other children * 2) the clone's origin has no user references * 3) the clone's origin has been marked for deferred destruction * Then, prepare to remove the origin as part of this sync task group. */ static int dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag) { dsl_dataset_t *ds = dsda->ds; dsl_dataset_t *origin = ds->ds_prev; if (dsl_dataset_might_destroy_origin(origin)) { char *name; int namelen; int error; namelen = dsl_dataset_namelen(origin) + 1; name = kmem_alloc(namelen, KM_SLEEP); dsl_dataset_name(origin, name); #ifdef _KERNEL error = zfs_unmount_snap(name, NULL); if (error) { kmem_free(name, namelen); return (error); } #endif error = dsl_dataset_own(name, B_TRUE, tag, &origin); kmem_free(name, namelen); if (error) return (error); dsda->rm_origin = origin; dsl_dataset_make_exclusive(origin, tag); } return (0); } /* * ds must be opened as OWNER. On return (whether successful or not), * ds will be closed and caller can no longer dereference it. */ int dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer) { int err; dsl_sync_task_group_t *dstg; objset_t *os; dsl_dir_t *dd; uint64_t obj; struct dsl_ds_destroyarg dsda = { 0 }; dsl_dataset_t dummy_ds = { 0 }; dsda.ds = ds; if (dsl_dataset_is_snapshot(ds)) { /* Destroying a snapshot is simpler */ dsl_dataset_make_exclusive(ds, tag); dsda.defer = defer; err = dsl_sync_task_do(ds->ds_dir->dd_pool, dsl_dataset_destroy_check, dsl_dataset_destroy_sync, &dsda, tag, 0); ASSERT3P(dsda.rm_origin, ==, NULL); goto out; } else if (defer) { err = EINVAL; goto out; } dd = ds->ds_dir; dummy_ds.ds_dir = dd; dummy_ds.ds_object = ds->ds_object; /* * Check for errors and mark this ds as inconsistent, in * case we crash while freeing the objects. */ err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check, dsl_dataset_destroy_begin_sync, ds, NULL, 0); if (err) goto out; err = dmu_objset_from_ds(ds, &os); if (err) goto out; /* * remove the objects in open context, so that we won't * have too much to do in syncing context. */ for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, ds->ds_phys->ds_prev_snap_txg)) { /* * Ignore errors, if there is not enough disk space * we will deal with it in dsl_dataset_destroy_sync(). */ (void) dmu_free_object(os, obj); } if (err != ESRCH) goto out; /* * Only the ZIL knows how to free log blocks. */ zil_destroy(dmu_objset_zil(os), B_FALSE); /* * Sync out all in-flight IO. */ txg_wait_synced(dd->dd_pool, 0); /* * If we managed to free all the objects in open * context, the user space accounting should be zero. */ if (ds->ds_phys->ds_bp.blk_fill == 0 && dmu_objset_userused_enabled(os)) { uint64_t count; ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 || count == 0); ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 || count == 0); } rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd); rw_exit(&dd->dd_pool->dp_config_rwlock); if (err) goto out; /* * Blow away the dsl_dir + head dataset. */ dsl_dataset_make_exclusive(ds, tag); /* * If we're removing a clone, we might also need to remove its * origin. */ do { dsda.need_prep = B_FALSE; if (dsl_dir_is_clone(dd)) { err = dsl_dataset_origin_rm_prep(&dsda, tag); if (err) { dsl_dir_close(dd, FTAG); goto out; } } dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool); dsl_sync_task_create(dstg, dsl_dataset_destroy_check, dsl_dataset_destroy_sync, &dsda, tag, 0); dsl_sync_task_create(dstg, dsl_dir_destroy_check, dsl_dir_destroy_sync, &dummy_ds, FTAG, 0); err = dsl_sync_task_group_wait(dstg); dsl_sync_task_group_destroy(dstg); /* * We could be racing against 'zfs release' or 'zfs destroy -d' * on the origin snap, in which case we can get EBUSY if we * needed to destroy the origin snap but were not ready to * do so. */ if (dsda.need_prep) { ASSERT(err == EBUSY); ASSERT(dsl_dir_is_clone(dd)); ASSERT(dsda.rm_origin == NULL); } } while (dsda.need_prep); if (dsda.rm_origin != NULL) dsl_dataset_disown(dsda.rm_origin, tag); /* if it is successful, dsl_dir_destroy_sync will close the dd */ if (err) dsl_dir_close(dd, FTAG); out: dsl_dataset_disown(ds, tag); return (err); } blkptr_t * dsl_dataset_get_blkptr(dsl_dataset_t *ds) { return (&ds->ds_phys->ds_bp); } void dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx) { ASSERT(dmu_tx_is_syncing(tx)); /* If it's the meta-objset, set dp_meta_rootbp */ if (ds == NULL) { tx->tx_pool->dp_meta_rootbp = *bp; } else { dmu_buf_will_dirty(ds->ds_dbuf, tx); ds->ds_phys->ds_bp = *bp; } } spa_t * dsl_dataset_get_spa(dsl_dataset_t *ds) { return (ds->ds_dir->dd_pool->dp_spa); } void dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx) { dsl_pool_t *dp; if (ds == NULL) /* this is the meta-objset */ return; ASSERT(ds->ds_objset != NULL); if (ds->ds_phys->ds_next_snap_obj != 0) panic("dirtying snapshot!"); dp = ds->ds_dir->dd_pool; if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) { /* up the hold count until we can be written out */ dmu_buf_add_ref(ds->ds_dbuf, ds); } } /* * The unique space in the head dataset can be calculated by subtracting * the space used in the most recent snapshot, that is still being used * in this file system, from the space currently in use. To figure out * the space in the most recent snapshot still in use, we need to take * the total space used in the snapshot and subtract out the space that * has been freed up since the snapshot was taken. */ static void dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds) { uint64_t mrs_used; uint64_t dlused, dlcomp, dluncomp; ASSERT(!dsl_dataset_is_snapshot(ds)); if (ds->ds_phys->ds_prev_snap_obj != 0) mrs_used = ds->ds_prev->ds_phys->ds_used_bytes; else mrs_used = 0; dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp); ASSERT3U(dlused, <=, mrs_used); ds->ds_phys->ds_unique_bytes = ds->ds_phys->ds_used_bytes - (mrs_used - dlused); if (spa_version(ds->ds_dir->dd_pool->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; } struct killarg { dsl_dataset_t *ds; dmu_tx_t *tx; }; /* ARGSUSED */ static int kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf, const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) { struct killarg *ka = arg; dmu_tx_t *tx = ka->tx; if (bp == NULL) return (0); if (zb->zb_level == ZB_ZIL_LEVEL) { ASSERT(zilog != NULL); /* * It's a block in the intent log. It has no * accounting, so just free it. */ dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp); } else { ASSERT(zilog == NULL); ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg); (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE); } return (0); } /* ARGSUSED */ static int dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; uint64_t count; int err; /* * Can't delete a head dataset if there are snapshots of it. * (Except if the only snapshots are from the branch we cloned * from.) */ if (ds->ds_prev != NULL && ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) return (EBUSY); /* * This is really a dsl_dir thing, but check it here so that * we'll be less likely to leave this dataset inconsistent & * nearly destroyed. */ err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count); if (err) return (err); if (count != 0) return (EEXIST); return (0); } /* ARGSUSED */ static void dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; dsl_pool_t *dp = ds->ds_dir->dd_pool; /* Mark it as inconsistent on-disk, in case we crash */ dmu_buf_will_dirty(ds->ds_dbuf, tx); ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx, "dataset = %llu", ds->ds_object); } static int dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag, dmu_tx_t *tx) { dsl_dataset_t *ds = dsda->ds; dsl_dataset_t *ds_prev = ds->ds_prev; if (dsl_dataset_might_destroy_origin(ds_prev)) { struct dsl_ds_destroyarg ndsda = {0}; /* * If we're not prepared to remove the origin, don't remove * the clone either. */ if (dsda->rm_origin == NULL) { dsda->need_prep = B_TRUE; return (EBUSY); } ndsda.ds = ds_prev; ndsda.is_origin_rm = B_TRUE; return (dsl_dataset_destroy_check(&ndsda, tag, tx)); } /* * If we're not going to remove the origin after all, * undo the open context setup. */ if (dsda->rm_origin != NULL) { dsl_dataset_disown(dsda->rm_origin, tag); dsda->rm_origin = NULL; } return (0); } /* * If you add new checks here, you may need to add * additional checks to the "temporary" case in * snapshot_check() in dmu_objset.c. */ /* ARGSUSED */ int dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx) { struct dsl_ds_destroyarg *dsda = arg1; dsl_dataset_t *ds = dsda->ds; /* we have an owner hold, so noone else can destroy us */ ASSERT(!DSL_DATASET_IS_DESTROYED(ds)); /* * Only allow deferred destroy on pools that support it. * NOTE: deferred destroy is only supported on snapshots. */ if (dsda->defer) { if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS) return (ENOTSUP); ASSERT(dsl_dataset_is_snapshot(ds)); return (0); } /* * Can't delete a head dataset if there are snapshots of it. * (Except if the only snapshots are from the branch we cloned * from.) */ if (ds->ds_prev != NULL && ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) return (EBUSY); /* * If we made changes this txg, traverse_dsl_dataset won't find * them. Try again. */ if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg) return (EAGAIN); if (dsl_dataset_is_snapshot(ds)) { /* * If this snapshot has an elevated user reference count, * we can't destroy it yet. */ if (ds->ds_userrefs > 0 && !dsda->releasing) return (EBUSY); mutex_enter(&ds->ds_lock); /* * Can't delete a branch point. However, if we're destroying * a clone and removing its origin due to it having a user * hold count of 0 and having been marked for deferred destroy, * it's OK for the origin to have a single clone. */ if (ds->ds_phys->ds_num_children > (dsda->is_origin_rm ? 2 : 1)) { mutex_exit(&ds->ds_lock); return (EEXIST); } mutex_exit(&ds->ds_lock); } else if (dsl_dir_is_clone(ds->ds_dir)) { return (dsl_dataset_origin_check(dsda, arg2, tx)); } /* XXX we should do some i/o error checking... */ return (0); } struct refsarg { kmutex_t lock; boolean_t gone; kcondvar_t cv; }; /* ARGSUSED */ static void dsl_dataset_refs_gone(dmu_buf_t *db, void *argv) { struct refsarg *arg = argv; mutex_enter(&arg->lock); arg->gone = TRUE; cv_signal(&arg->cv); mutex_exit(&arg->lock); } static void dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag) { struct refsarg arg; bzero(&arg, sizeof(arg)); mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&arg.cv, NULL, CV_DEFAULT, NULL); arg.gone = FALSE; (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys, dsl_dataset_refs_gone); dmu_buf_rele(ds->ds_dbuf, tag); mutex_enter(&arg.lock); while (!arg.gone) cv_wait(&arg.cv, &arg.lock); ASSERT(arg.gone); mutex_exit(&arg.lock); ds->ds_dbuf = NULL; ds->ds_phys = NULL; mutex_destroy(&arg.lock); cv_destroy(&arg.cv); } static void remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx) { objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; uint64_t count; int err; ASSERT(ds->ds_phys->ds_num_children >= 2); err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx); /* * The err should not be ENOENT, but a bug in a previous version * of the code could cause upgrade_clones_cb() to not set * ds_next_snap_obj when it should, leading to a missing entry. * If we knew that the pool was created after * SPA_VERSION_NEXT_CLONES, we could assert that it isn't * ENOENT. However, at least we can check that we don't have * too many entries in the next_clones_obj even after failing to * remove this one. */ if (err != ENOENT) { VERIFY3U(err, ==, 0); } ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj, &count)); ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2); } static void dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx) { objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; zap_cursor_t zc; zap_attribute_t za; /* * If it is the old version, dd_clones doesn't exist so we can't * find the clones, but deadlist_remove_key() is a no-op so it * doesn't matter. */ if (ds->ds_dir->dd_phys->dd_clones == 0) return; for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { dsl_dataset_t *clone; VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool, za.za_first_integer, FTAG, &clone)); if (clone->ds_dir->dd_origin_txg > mintxg) { dsl_deadlist_remove_key(&clone->ds_deadlist, mintxg, tx); dsl_dataset_remove_clones_key(clone, mintxg, tx); } dsl_dataset_rele(clone, FTAG); } zap_cursor_fini(&zc); } struct process_old_arg { dsl_dataset_t *ds; dsl_dataset_t *ds_prev; boolean_t after_branch_point; zio_t *pio; uint64_t used, comp, uncomp; }; static int process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) { struct process_old_arg *poa = arg; dsl_pool_t *dp = poa->ds->ds_dir->dd_pool; if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) { dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx); if (poa->ds_prev && !poa->after_branch_point && bp->blk_birth > poa->ds_prev->ds_phys->ds_prev_snap_txg) { poa->ds_prev->ds_phys->ds_unique_bytes += bp_get_dsize_sync(dp->dp_spa, bp); } } else { poa->used += bp_get_dsize_sync(dp->dp_spa, bp); poa->comp += BP_GET_PSIZE(bp); poa->uncomp += BP_GET_UCSIZE(bp); dsl_free_sync(poa->pio, dp, tx->tx_txg, bp); } return (0); } static void process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev, dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx) { struct process_old_arg poa = { 0 }; dsl_pool_t *dp = ds->ds_dir->dd_pool; objset_t *mos = dp->dp_meta_objset; ASSERT(ds->ds_deadlist.dl_oldfmt); ASSERT(ds_next->ds_deadlist.dl_oldfmt); poa.ds = ds; poa.ds_prev = ds_prev; poa.after_branch_point = after_branch_point; poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj, process_old_cb, &poa, tx)); VERIFY3U(zio_wait(poa.pio), ==, 0); ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes); /* change snapused */ dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP, -poa.used, -poa.comp, -poa.uncomp, tx); /* swap next's deadlist to our deadlist */ dsl_deadlist_close(&ds->ds_deadlist); dsl_deadlist_close(&ds_next->ds_deadlist); SWITCH64(ds_next->ds_phys->ds_deadlist_obj, ds->ds_phys->ds_deadlist_obj); dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj); dsl_deadlist_open(&ds_next->ds_deadlist, mos, ds_next->ds_phys->ds_deadlist_obj); } void dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx) { struct dsl_ds_destroyarg *dsda = arg1; dsl_dataset_t *ds = dsda->ds; int err; int after_branch_point = FALSE; dsl_pool_t *dp = ds->ds_dir->dd_pool; objset_t *mos = dp->dp_meta_objset; dsl_dataset_t *ds_prev = NULL; boolean_t wont_destroy; uint64_t obj; wont_destroy = (dsda->defer && (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1)); ASSERT(ds->ds_owner || wont_destroy); ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1); ASSERT(ds->ds_prev == NULL || ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object); ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg); if (wont_destroy) { ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); dmu_buf_will_dirty(ds->ds_dbuf, tx); ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY; return; } /* signal any waiters that this dataset is going away */ mutex_enter(&ds->ds_lock); ds->ds_owner = dsl_reaper; cv_broadcast(&ds->ds_exclusive_cv); mutex_exit(&ds->ds_lock); /* Remove our reservation */ if (ds->ds_reserved != 0) { dsl_prop_setarg_t psa; uint64_t value = 0; dsl_prop_setarg_init_uint64(&psa, "refreservation", (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED), &value); psa.psa_effective_value = 0; /* predict default value */ dsl_dataset_set_reservation_sync(ds, &psa, tx); ASSERT3U(ds->ds_reserved, ==, 0); } ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); dsl_scan_ds_destroyed(ds, tx); obj = ds->ds_object; if (ds->ds_phys->ds_prev_snap_obj != 0) { if (ds->ds_prev) { ds_prev = ds->ds_prev; } else { VERIFY(0 == dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev)); } after_branch_point = (ds_prev->ds_phys->ds_next_snap_obj != obj); dmu_buf_will_dirty(ds_prev->ds_dbuf, tx); if (after_branch_point && ds_prev->ds_phys->ds_next_clones_obj != 0) { remove_from_next_clones(ds_prev, obj, tx); if (ds->ds_phys->ds_next_snap_obj != 0) { VERIFY(0 == zap_add_int(mos, ds_prev->ds_phys->ds_next_clones_obj, ds->ds_phys->ds_next_snap_obj, tx)); } } if (after_branch_point && ds->ds_phys->ds_next_snap_obj == 0) { /* This clone is toast. */ ASSERT(ds_prev->ds_phys->ds_num_children > 1); ds_prev->ds_phys->ds_num_children--; /* * If the clone's origin has no other clones, no * user holds, and has been marked for deferred * deletion, then we should have done the necessary * destroy setup for it. */ if (ds_prev->ds_phys->ds_num_children == 1 && ds_prev->ds_userrefs == 0 && DS_IS_DEFER_DESTROY(ds_prev)) { ASSERT3P(dsda->rm_origin, !=, NULL); } else { ASSERT3P(dsda->rm_origin, ==, NULL); } } else if (!after_branch_point) { ds_prev->ds_phys->ds_next_snap_obj = ds->ds_phys->ds_next_snap_obj; } } if (dsl_dataset_is_snapshot(ds)) { dsl_dataset_t *ds_next; uint64_t old_unique; uint64_t used = 0, comp = 0, uncomp = 0; VERIFY(0 == dsl_dataset_hold_obj(dp, ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next)); ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj); old_unique = ds_next->ds_phys->ds_unique_bytes; dmu_buf_will_dirty(ds_next->ds_dbuf, tx); ds_next->ds_phys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj; ds_next->ds_phys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg; ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==, ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0); if (ds_next->ds_deadlist.dl_oldfmt) { process_old_deadlist(ds, ds_prev, ds_next, after_branch_point, tx); } else { /* Adjust prev's unique space. */ if (ds_prev && !after_branch_point) { dsl_deadlist_space_range(&ds_next->ds_deadlist, ds_prev->ds_phys->ds_prev_snap_txg, ds->ds_phys->ds_prev_snap_txg, &used, &comp, &uncomp); ds_prev->ds_phys->ds_unique_bytes += used; } /* Adjust snapused. */ dsl_deadlist_space_range(&ds_next->ds_deadlist, ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, &used, &comp, &uncomp); dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP, -used, -comp, -uncomp, tx); /* Move blocks to be freed to pool's free list. */ dsl_deadlist_move_bpobj(&ds_next->ds_deadlist, &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg, tx); dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, used, comp, uncomp, tx); dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx); /* Merge our deadlist into next's and free it. */ dsl_deadlist_merge(&ds_next->ds_deadlist, ds->ds_phys->ds_deadlist_obj, tx); } dsl_deadlist_close(&ds->ds_deadlist); dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx); /* Collapse range in clone heads */ dsl_dataset_remove_clones_key(ds, ds->ds_phys->ds_creation_txg, tx); if (dsl_dataset_is_snapshot(ds_next)) { dsl_dataset_t *ds_nextnext; /* * Update next's unique to include blocks which * were previously shared by only this snapshot * and it. Those blocks will be born after the * prev snap and before this snap, and will have * died after the next snap and before the one * after that (ie. be on the snap after next's * deadlist). */ VERIFY(0 == dsl_dataset_hold_obj(dp, ds_next->ds_phys->ds_next_snap_obj, FTAG, &ds_nextnext)); dsl_deadlist_space_range(&ds_nextnext->ds_deadlist, ds->ds_phys->ds_prev_snap_txg, ds->ds_phys->ds_creation_txg, &used, &comp, &uncomp); ds_next->ds_phys->ds_unique_bytes += used; dsl_dataset_rele(ds_nextnext, FTAG); ASSERT3P(ds_next->ds_prev, ==, NULL); /* Collapse range in this head. */ dsl_dataset_t *hds; VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &hds)); dsl_deadlist_remove_key(&hds->ds_deadlist, ds->ds_phys->ds_creation_txg, tx); dsl_dataset_rele(hds, FTAG); } else { ASSERT3P(ds_next->ds_prev, ==, ds); dsl_dataset_drop_ref(ds_next->ds_prev, ds_next); ds_next->ds_prev = NULL; if (ds_prev) { VERIFY(0 == dsl_dataset_get_ref(dp, ds->ds_phys->ds_prev_snap_obj, ds_next, &ds_next->ds_prev)); } dsl_dataset_recalc_head_uniq(ds_next); /* * Reduce the amount of our unconsmed refreservation * being charged to our parent by the amount of * new unique data we have gained. */ if (old_unique < ds_next->ds_reserved) { int64_t mrsdelta; uint64_t new_unique = ds_next->ds_phys->ds_unique_bytes; ASSERT(old_unique <= new_unique); mrsdelta = MIN(new_unique - old_unique, ds_next->ds_reserved - old_unique); dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, -mrsdelta, 0, 0, tx); } } dsl_dataset_rele(ds_next, FTAG); } else { /* * There's no next snapshot, so this is a head dataset. * Destroy the deadlist. Unless it's a clone, the * deadlist should be empty. (If it's a clone, it's * safe to ignore the deadlist contents.) */ struct killarg ka; dsl_deadlist_close(&ds->ds_deadlist); dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx); ds->ds_phys->ds_deadlist_obj = 0; /* * Free everything that we point to (that's born after * the previous snapshot, if we are a clone) * * NB: this should be very quick, because we already * freed all the objects in open context. */ ka.ds = ds; ka.tx = tx; err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg, TRAVERSE_POST, kill_blkptr, &ka); ASSERT3U(err, ==, 0); ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0); if (ds->ds_prev != NULL) { if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) { VERIFY3U(0, ==, zap_remove_int(mos, ds->ds_prev->ds_dir->dd_phys->dd_clones, ds->ds_object, tx)); } dsl_dataset_rele(ds->ds_prev, ds); ds->ds_prev = ds_prev = NULL; } } /* * This must be done after the dsl_traverse(), because it will * re-open the objset. */ if (ds->ds_objset) { dmu_objset_evict(ds->ds_objset); ds->ds_objset = NULL; } if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) { /* Erase the link in the dir */ dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); ds->ds_dir->dd_phys->dd_head_dataset_obj = 0; ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0); err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx); ASSERT(err == 0); } else { /* remove from snapshot namespace */ dsl_dataset_t *ds_head; ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0); VERIFY(0 == dsl_dataset_hold_obj(dp, ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head)); VERIFY(0 == dsl_dataset_get_snapname(ds)); #ifdef ZFS_DEBUG { uint64_t val; err = dsl_dataset_snap_lookup(ds_head, ds->ds_snapname, &val); ASSERT3U(err, ==, 0); ASSERT3U(val, ==, obj); } #endif err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx); ASSERT(err == 0); dsl_dataset_rele(ds_head, FTAG); } if (ds_prev && ds->ds_prev != ds_prev) dsl_dataset_rele(ds_prev, FTAG); spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx); spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx, "dataset = %llu", ds->ds_object); if (ds->ds_phys->ds_next_clones_obj != 0) { uint64_t count; ASSERT(0 == zap_count(mos, ds->ds_phys->ds_next_clones_obj, &count) && count == 0); VERIFY(0 == dmu_object_free(mos, ds->ds_phys->ds_next_clones_obj, tx)); } if (ds->ds_phys->ds_props_obj != 0) VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx)); if (ds->ds_phys->ds_userrefs_obj != 0) VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx)); dsl_dir_close(ds->ds_dir, ds); ds->ds_dir = NULL; dsl_dataset_drain_refs(ds, tag); VERIFY(0 == dmu_object_free(mos, obj, tx)); if (dsda->rm_origin) { /* * Remove the origin of the clone we just destroyed. */ struct dsl_ds_destroyarg ndsda = {0}; ndsda.ds = dsda->rm_origin; dsl_dataset_destroy_sync(&ndsda, tag, tx); } } static int dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx) { uint64_t asize; if (!dmu_tx_is_syncing(tx)) return (0); /* * If there's an fs-only reservation, any blocks that might become * owned by the snapshot dataset must be accommodated by space * outside of the reservation. */ ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds)); asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved); if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) return (ENOSPC); /* * Propogate any reserved space for this snapshot to other * snapshot checks in this sync group. */ if (asize > 0) dsl_dir_willuse_space(ds->ds_dir, asize, tx); return (0); } int dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; const char *snapname = arg2; int err; uint64_t value; /* * We don't allow multiple snapshots of the same txg. If there * is already one, try again. */ if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg) return (EAGAIN); /* * Check for conflicting name snapshot name. */ err = dsl_dataset_snap_lookup(ds, snapname, &value); if (err == 0) return (EEXIST); if (err != ENOENT) return (err); /* * Check that the dataset's name is not too long. Name consists * of the dataset's length + 1 for the @-sign + snapshot name's length */ if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN) return (ENAMETOOLONG); err = dsl_dataset_snapshot_reserve_space(ds, tx); if (err) return (err); ds->ds_trysnap_txg = tx->tx_txg; return (0); } void dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; const char *snapname = arg2; dsl_pool_t *dp = ds->ds_dir->dd_pool; dmu_buf_t *dbuf; dsl_dataset_phys_t *dsphys; uint64_t dsobj, crtxg; objset_t *mos = dp->dp_meta_objset; int err; ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); /* * The origin's ds_creation_txg has to be < TXG_INITIAL */ if (strcmp(snapname, ORIGIN_DIR_NAME) == 0) crtxg = 1; else crtxg = tx->tx_txg; dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0, DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx); VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf)); dmu_buf_will_dirty(dbuf, tx); dsphys = dbuf->db_data; bzero(dsphys, sizeof (dsl_dataset_phys_t)); dsphys->ds_dir_obj = ds->ds_dir->dd_object; dsphys->ds_fsid_guid = unique_create(); (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid, sizeof (dsphys->ds_guid)); dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj; dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg; dsphys->ds_next_snap_obj = ds->ds_object; dsphys->ds_num_children = 1; dsphys->ds_creation_time = gethrestime_sec(); dsphys->ds_creation_txg = crtxg; dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj; dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes; dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes; dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes; dsphys->ds_flags = ds->ds_phys->ds_flags; dsphys->ds_bp = ds->ds_phys->ds_bp; dmu_buf_rele(dbuf, FTAG); ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0); if (ds->ds_prev) { uint64_t next_clones_obj = ds->ds_prev->ds_phys->ds_next_clones_obj; ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object || ds->ds_prev->ds_phys->ds_num_children > 1); if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) { dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==, ds->ds_prev->ds_phys->ds_creation_txg); ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj; } else if (next_clones_obj != 0) { remove_from_next_clones(ds->ds_prev, dsphys->ds_next_snap_obj, tx); VERIFY3U(0, ==, zap_add_int(mos, next_clones_obj, dsobj, tx)); } } /* * If we have a reference-reservation on this dataset, we will * need to increase the amount of refreservation being charged * since our unique space is going to zero. */ if (ds->ds_reserved) { int64_t delta; ASSERT(DS_UNIQUE_IS_ACCURATE(ds)); delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved); dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx); } dmu_buf_will_dirty(ds->ds_dbuf, tx); zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu", ds->ds_dir->dd_myname, snapname, dsobj, ds->ds_phys->ds_prev_snap_txg); ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist, UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx); dsl_deadlist_close(&ds->ds_deadlist); dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj); dsl_deadlist_add_key(&ds->ds_deadlist, ds->ds_phys->ds_prev_snap_txg, tx); ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg); ds->ds_phys->ds_prev_snap_obj = dsobj; ds->ds_phys->ds_prev_snap_txg = crtxg; ds->ds_phys->ds_unique_bytes = 0; if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj, snapname, 8, 1, &dsobj, tx); ASSERT(err == 0); if (ds->ds_prev) dsl_dataset_drop_ref(ds->ds_prev, ds); VERIFY(0 == dsl_dataset_get_ref(dp, ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev)); dsl_scan_ds_snapshotted(ds, tx); dsl_dir_snap_cmtime_update(ds->ds_dir); spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx, "dataset = %llu", dsobj); } void dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx) { ASSERT(dmu_tx_is_syncing(tx)); ASSERT(ds->ds_objset != NULL); ASSERT(ds->ds_phys->ds_next_snap_obj == 0); /* * in case we had to change ds_fsid_guid when we opened it, * sync it out now. */ dmu_buf_will_dirty(ds->ds_dbuf, tx); ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid; dsl_dir_dirty(ds->ds_dir, tx); dmu_objset_sync(ds->ds_objset, zio, tx); } static void get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv) { uint64_t count = 0; objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; zap_cursor_t zc; zap_attribute_t za; nvlist_t *propval; nvlist_t *val; rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER); VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_alloc(&val, NV_UNIQUE_NAME, KM_SLEEP) == 0); /* * There may me missing entries in ds_next_clones_obj * due to a bug in a previous version of the code. * Only trust it if it has the right number of entries. */ if (ds->ds_phys->ds_next_clones_obj != 0) { ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj, &count)); } if (count != ds->ds_phys->ds_num_children - 1) { goto fail; } for (zap_cursor_init(&zc, mos, ds->ds_phys->ds_next_clones_obj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { dsl_dataset_t *clone; char buf[ZFS_MAXNAMELEN]; if (dsl_dataset_hold_obj(ds->ds_dir->dd_pool, za.za_first_integer, FTAG, &clone) != 0) { goto fail; } dsl_dir_name(clone->ds_dir, buf); VERIFY(nvlist_add_boolean(val, buf) == 0); dsl_dataset_rele(clone, FTAG); } zap_cursor_fini(&zc); VERIFY(nvlist_add_nvlist(propval, ZPROP_VALUE, val) == 0); VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES), propval) == 0); fail: nvlist_free(val); nvlist_free(propval); rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock); } void dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv) { uint64_t refd, avail, uobjs, aobjs, ratio; dsl_dir_stats(ds->ds_dir, nv); dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION, ds->ds_phys->ds_creation_time); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG, ds->ds_phys->ds_creation_txg); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA, ds->ds_quota); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION, ds->ds_reserved); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID, ds->ds_phys->ds_guid); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE, ds->ds_phys->ds_unique_bytes); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID, ds->ds_object); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS, ds->ds_userrefs); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY, DS_IS_DEFER_DESTROY(ds) ? 1 : 0); if (ds->ds_phys->ds_prev_snap_obj != 0) { uint64_t written, comp, uncomp; dsl_pool_t *dp = ds->ds_dir->dd_pool; dsl_dataset_t *prev; rw_enter(&dp->dp_config_rwlock, RW_READER); int err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, FTAG, &prev); rw_exit(&dp->dp_config_rwlock); if (err == 0) { err = dsl_dataset_space_written(prev, ds, &written, &comp, &uncomp); dsl_dataset_rele(prev, FTAG); if (err == 0) { dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN, written); } } } ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 : (ds->ds_phys->ds_uncompressed_bytes * 100 / ds->ds_phys->ds_compressed_bytes); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio); if (ds->ds_phys->ds_next_snap_obj) { /* * This is a snapshot; override the dd's space used with * our unique space and compression ratio. */ dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED, ds->ds_phys->ds_unique_bytes); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio); get_clones_stat(ds, nv); } } void dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat) { stat->dds_creation_txg = ds->ds_phys->ds_creation_txg; stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT; stat->dds_guid = ds->ds_phys->ds_guid; if (ds->ds_phys->ds_next_snap_obj) { stat->dds_is_snapshot = B_TRUE; stat->dds_num_clones = ds->ds_phys->ds_num_children - 1; } else { stat->dds_is_snapshot = B_FALSE; stat->dds_num_clones = 0; } /* clone origin is really a dsl_dir thing... */ rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER); if (dsl_dir_is_clone(ds->ds_dir)) { dsl_dataset_t *ods; VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool, ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods)); dsl_dataset_name(ods, stat->dds_origin); dsl_dataset_drop_ref(ods, FTAG); } else { stat->dds_origin[0] = '\0'; } rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock); } uint64_t dsl_dataset_fsid_guid(dsl_dataset_t *ds) { return (ds->ds_fsid_guid); } void dsl_dataset_space(dsl_dataset_t *ds, uint64_t *refdbytesp, uint64_t *availbytesp, uint64_t *usedobjsp, uint64_t *availobjsp) { *refdbytesp = ds->ds_phys->ds_used_bytes; *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE); if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes; if (ds->ds_quota != 0) { /* * Adjust available bytes according to refquota */ if (*refdbytesp < ds->ds_quota) *availbytesp = MIN(*availbytesp, ds->ds_quota - *refdbytesp); else *availbytesp = 0; } *usedobjsp = ds->ds_phys->ds_bp.blk_fill; *availobjsp = DN_MAX_OBJECT - *usedobjsp; } boolean_t dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds) { dsl_pool_t *dp = ds->ds_dir->dd_pool; ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || dsl_pool_sync_context(dp)); if (ds->ds_prev == NULL) return (B_FALSE); if (ds->ds_phys->ds_bp.blk_birth > ds->ds_prev->ds_phys->ds_creation_txg) { objset_t *os, *os_prev; /* * It may be that only the ZIL differs, because it was * reset in the head. Don't count that as being * modified. */ if (dmu_objset_from_ds(ds, &os) != 0) return (B_TRUE); if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0) return (B_TRUE); return (bcmp(&os->os_phys->os_meta_dnode, &os_prev->os_phys->os_meta_dnode, sizeof (os->os_phys->os_meta_dnode)) != 0); } return (B_FALSE); } /* ARGSUSED */ static int dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; char *newsnapname = arg2; dsl_dir_t *dd = ds->ds_dir; dsl_dataset_t *hds; uint64_t val; int err; err = dsl_dataset_hold_obj(dd->dd_pool, dd->dd_phys->dd_head_dataset_obj, FTAG, &hds); if (err) return (err); /* new name better not be in use */ err = dsl_dataset_snap_lookup(hds, newsnapname, &val); dsl_dataset_rele(hds, FTAG); if (err == 0) err = EEXIST; else if (err == ENOENT) err = 0; /* dataset name + 1 for the "@" + the new snapshot name must fit */ if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN) err = ENAMETOOLONG; return (err); } static void dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx) { char oldname[MAXPATHLEN], newname[MAXPATHLEN]; dsl_dataset_t *ds = arg1; const char *newsnapname = arg2; dsl_dir_t *dd = ds->ds_dir; objset_t *mos = dd->dd_pool->dp_meta_objset; dsl_dataset_t *hds; int err; ASSERT(ds->ds_phys->ds_next_snap_obj != 0); VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, dd->dd_phys->dd_head_dataset_obj, FTAG, &hds)); VERIFY(0 == dsl_dataset_get_snapname(ds)); err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx); ASSERT3U(err, ==, 0); dsl_dataset_name(ds, oldname); mutex_enter(&ds->ds_lock); (void) strcpy(ds->ds_snapname, newsnapname); mutex_exit(&ds->ds_lock); err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname, 8, 1, &ds->ds_object, tx); ASSERT3U(err, ==, 0); dsl_dataset_name(ds, newname); #ifdef _KERNEL zvol_rename_minors(oldname, newname); #endif spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx, "dataset = %llu", ds->ds_object); dsl_dataset_rele(hds, FTAG); } struct renamesnaparg { dsl_sync_task_group_t *dstg; char failed[MAXPATHLEN]; char *oldsnap; char *newsnap; }; static int dsl_snapshot_rename_one(const char *name, void *arg) { struct renamesnaparg *ra = arg; dsl_dataset_t *ds = NULL; char *snapname; int err; snapname = kmem_asprintf("%s@%s", name, ra->oldsnap); (void) strlcpy(ra->failed, snapname, sizeof (ra->failed)); /* * For recursive snapshot renames the parent won't be changing * so we just pass name for both the to/from argument. */ err = zfs_secpolicy_rename_perms(snapname, snapname, CRED()); if (err != 0) { strfree(snapname); return (err == ENOENT ? 0 : err); } #ifdef _KERNEL /* * For all filesystems undergoing rename, we'll need to unmount it. */ (void) zfs_unmount_snap(snapname, NULL); #endif err = dsl_dataset_hold(snapname, ra->dstg, &ds); strfree(snapname); if (err != 0) return (err == ENOENT ? 0 : err); dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check, dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0); return (0); } static int dsl_recursive_rename(char *oldname, const char *newname) { int err; struct renamesnaparg *ra; dsl_sync_task_t *dst; spa_t *spa; char *cp, *fsname = spa_strdup(oldname); int len = strlen(oldname) + 1; /* truncate the snapshot name to get the fsname */ cp = strchr(fsname, '@'); *cp = '\0'; err = spa_open(fsname, &spa, FTAG); if (err) { kmem_free(fsname, len); return (err); } ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP); ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); ra->oldsnap = strchr(oldname, '@') + 1; ra->newsnap = strchr(newname, '@') + 1; *ra->failed = '\0'; err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra, DS_FIND_CHILDREN); kmem_free(fsname, len); if (err == 0) { err = dsl_sync_task_group_wait(ra->dstg); } for (dst = list_head(&ra->dstg->dstg_tasks); dst; dst = list_next(&ra->dstg->dstg_tasks, dst)) { dsl_dataset_t *ds = dst->dst_arg1; if (dst->dst_err) { dsl_dir_name(ds->ds_dir, ra->failed); (void) strlcat(ra->failed, "@", sizeof (ra->failed)); (void) strlcat(ra->failed, ra->newsnap, sizeof (ra->failed)); } dsl_dataset_rele(ds, ra->dstg); } if (err) (void) strlcpy(oldname, ra->failed, sizeof (ra->failed)); dsl_sync_task_group_destroy(ra->dstg); kmem_free(ra, sizeof (struct renamesnaparg)); spa_close(spa, FTAG); return (err); } static int dsl_valid_rename(const char *oldname, void *arg) { int delta = *(int *)arg; if (strlen(oldname) + delta >= MAXNAMELEN) return (ENAMETOOLONG); return (0); } #pragma weak dmu_objset_rename = dsl_dataset_rename int dsl_dataset_rename(char *oldname, const char *newname, int flags) { dsl_dir_t *dd; dsl_dataset_t *ds; const char *tail; int err; err = dsl_dir_open(oldname, FTAG, &dd, &tail); if (err) return (err); if (tail == NULL) { int delta = strlen(newname) - strlen(oldname); /* if we're growing, validate child name lengths */ if (delta > 0) err = dmu_objset_find(oldname, dsl_valid_rename, &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); if (err == 0) err = dsl_dir_rename(dd, newname, flags); dsl_dir_close(dd, FTAG); return (err); } if (tail[0] != '@') { /* the name ended in a nonexistent component */ dsl_dir_close(dd, FTAG); return (ENOENT); } dsl_dir_close(dd, FTAG); /* new name must be snapshot in same filesystem */ tail = strchr(newname, '@'); if (tail == NULL) return (EINVAL); tail++; if (strncmp(oldname, newname, tail - newname) != 0) return (EXDEV); if (flags & ZFS_RENAME_RECURSIVE) { err = dsl_recursive_rename(oldname, newname); } else { err = dsl_dataset_hold(oldname, FTAG, &ds); if (err) return (err); err = dsl_sync_task_do(ds->ds_dir->dd_pool, dsl_dataset_snapshot_rename_check, dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1); dsl_dataset_rele(ds, FTAG); } return (err); } struct promotenode { list_node_t link; dsl_dataset_t *ds; }; struct promotearg { list_t shared_snaps, origin_snaps, clone_snaps; dsl_dataset_t *origin_origin; uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap; char *err_ds; }; static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep); static boolean_t snaplist_unstable(list_t *l); static int dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *hds = arg1; struct promotearg *pa = arg2; struct promotenode *snap = list_head(&pa->shared_snaps); dsl_dataset_t *origin_ds = snap->ds; int err; uint64_t unused; /* Check that it is a real clone */ if (!dsl_dir_is_clone(hds->ds_dir)) return (EINVAL); /* Since this is so expensive, don't do the preliminary check */ if (!dmu_tx_is_syncing(tx)) return (0); if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE) return (EXDEV); /* compute origin's new unique space */ snap = list_tail(&pa->clone_snaps); ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object); dsl_deadlist_space_range(&snap->ds->ds_deadlist, origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, &pa->unique, &unused, &unused); /* * Walk the snapshots that we are moving * * Compute space to transfer. Consider the incremental changes * to used for each snapshot: * (my used) = (prev's used) + (blocks born) - (blocks killed) * So each snapshot gave birth to: * (blocks born) = (my used) - (prev's used) + (blocks killed) * So a sequence would look like: * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0) * Which simplifies to: * uN + kN + kN-1 + ... + k1 + k0 * Note however, if we stop before we reach the ORIGIN we get: * uN + kN + kN-1 + ... + kM - uM-1 */ pa->used = origin_ds->ds_phys->ds_used_bytes; pa->comp = origin_ds->ds_phys->ds_compressed_bytes; pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes; for (snap = list_head(&pa->shared_snaps); snap; snap = list_next(&pa->shared_snaps, snap)) { uint64_t val, dlused, dlcomp, dluncomp; dsl_dataset_t *ds = snap->ds; /* Check that the snapshot name does not conflict */ VERIFY(0 == dsl_dataset_get_snapname(ds)); err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val); if (err == 0) { err = EEXIST; goto out; } if (err != ENOENT) goto out; /* The very first snapshot does not have a deadlist */ if (ds->ds_phys->ds_prev_snap_obj == 0) continue; dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp); pa->used += dlused; pa->comp += dlcomp; pa->uncomp += dluncomp; } /* * If we are a clone of a clone then we never reached ORIGIN, * so we need to subtract out the clone origin's used space. */ if (pa->origin_origin) { pa->used -= pa->origin_origin->ds_phys->ds_used_bytes; pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes; pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes; } /* Check that there is enough space here */ err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir, pa->used); if (err) return (err); /* * Compute the amounts of space that will be used by snapshots * after the promotion (for both origin and clone). For each, * it is the amount of space that will be on all of their * deadlists (that was not born before their new origin). */ if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) { uint64_t space; /* * Note, typically this will not be a clone of a clone, * so dd_origin_txg will be < TXG_INITIAL, so * these snaplist_space() -> dsl_deadlist_space_range() * calls will be fast because they do not have to * iterate over all bps. */ snap = list_head(&pa->origin_snaps); err = snaplist_space(&pa->shared_snaps, snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap); if (err) return (err); err = snaplist_space(&pa->clone_snaps, snap->ds->ds_dir->dd_origin_txg, &space); if (err) return (err); pa->cloneusedsnap += space; } if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) { err = snaplist_space(&pa->origin_snaps, origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap); if (err) return (err); } return (0); out: pa->err_ds = snap->ds->ds_snapname; return (err); } static void dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *hds = arg1; struct promotearg *pa = arg2; struct promotenode *snap = list_head(&pa->shared_snaps); dsl_dataset_t *origin_ds = snap->ds; dsl_dataset_t *origin_head; dsl_dir_t *dd = hds->ds_dir; dsl_pool_t *dp = hds->ds_dir->dd_pool; dsl_dir_t *odd = NULL; uint64_t oldnext_obj; int64_t delta; ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)); snap = list_head(&pa->origin_snaps); origin_head = snap->ds; /* * We need to explicitly open odd, since origin_ds's dd will be * changing. */ VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object, NULL, FTAG, &odd)); /* change origin's next snap */ dmu_buf_will_dirty(origin_ds->ds_dbuf, tx); oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj; snap = list_tail(&pa->clone_snaps); ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object); origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object; /* change the origin's next clone */ if (origin_ds->ds_phys->ds_next_clones_obj) { remove_from_next_clones(origin_ds, snap->ds->ds_object, tx); VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, origin_ds->ds_phys->ds_next_clones_obj, oldnext_obj, tx)); } /* change origin */ dmu_buf_will_dirty(dd->dd_dbuf, tx); ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object); dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj; dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg; dmu_buf_will_dirty(odd->dd_dbuf, tx); odd->dd_phys->dd_origin_obj = origin_ds->ds_object; origin_head->ds_dir->dd_origin_txg = origin_ds->ds_phys->ds_creation_txg; /* change dd_clone entries */ if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) { VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, odd->dd_phys->dd_clones, hds->ds_object, tx)); VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, pa->origin_origin->ds_dir->dd_phys->dd_clones, hds->ds_object, tx)); VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, pa->origin_origin->ds_dir->dd_phys->dd_clones, origin_head->ds_object, tx)); if (dd->dd_phys->dd_clones == 0) { dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset, DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx); } VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, dd->dd_phys->dd_clones, origin_head->ds_object, tx)); } /* move snapshots to this dir */ for (snap = list_head(&pa->shared_snaps); snap; snap = list_next(&pa->shared_snaps, snap)) { dsl_dataset_t *ds = snap->ds; /* unregister props as dsl_dir is changing */ if (ds->ds_objset) { dmu_objset_evict(ds->ds_objset); ds->ds_objset = NULL; } /* move snap name entry */ VERIFY(0 == dsl_dataset_get_snapname(ds)); VERIFY(0 == dsl_dataset_snap_remove(origin_head, ds->ds_snapname, tx)); VERIFY(0 == zap_add(dp->dp_meta_objset, hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname, 8, 1, &ds->ds_object, tx)); /* change containing dsl_dir */ dmu_buf_will_dirty(ds->ds_dbuf, tx); ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object); ds->ds_phys->ds_dir_obj = dd->dd_object; ASSERT3P(ds->ds_dir, ==, odd); dsl_dir_close(ds->ds_dir, ds); VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object, NULL, ds, &ds->ds_dir)); /* move any clone references */ if (ds->ds_phys->ds_next_clones_obj && spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) { zap_cursor_t zc; zap_attribute_t za; for (zap_cursor_init(&zc, dp->dp_meta_objset, ds->ds_phys->ds_next_clones_obj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { dsl_dataset_t *cnds; uint64_t o; if (za.za_first_integer == oldnext_obj) { /* * We've already moved the * origin's reference. */ continue; } VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, za.za_first_integer, FTAG, &cnds)); o = cnds->ds_dir->dd_phys->dd_head_dataset_obj; VERIFY3U(zap_remove_int(dp->dp_meta_objset, odd->dd_phys->dd_clones, o, tx), ==, 0); VERIFY3U(zap_add_int(dp->dp_meta_objset, dd->dd_phys->dd_clones, o, tx), ==, 0); dsl_dataset_rele(cnds, FTAG); } zap_cursor_fini(&zc); } ASSERT3U(dsl_prop_numcb(ds), ==, 0); } /* * Change space accounting. * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either * both be valid, or both be 0 (resulting in delta == 0). This * is true for each of {clone,origin} independently. */ delta = pa->cloneusedsnap - dd->dd_phys->dd_used_breakdown[DD_USED_SNAP]; ASSERT3S(delta, >=, 0); ASSERT3U(pa->used, >=, delta); dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx); dsl_dir_diduse_space(dd, DD_USED_HEAD, pa->used - delta, pa->comp, pa->uncomp, tx); delta = pa->originusedsnap - odd->dd_phys->dd_used_breakdown[DD_USED_SNAP]; ASSERT3S(delta, <=, 0); ASSERT3U(pa->used, >=, -delta); dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx); dsl_dir_diduse_space(odd, DD_USED_HEAD, -pa->used - delta, -pa->comp, -pa->uncomp, tx); origin_ds->ds_phys->ds_unique_bytes = pa->unique; /* log history record */ spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx, "dataset = %llu", hds->ds_object); dsl_dir_close(odd, FTAG); } static char *snaplist_tag = "snaplist"; /* * Make a list of dsl_dataset_t's for the snapshots between first_obj * (exclusive) and last_obj (inclusive). The list will be in reverse * order (last_obj will be the list_head()). If first_obj == 0, do all * snapshots back to this dataset's origin. */ static int snaplist_make(dsl_pool_t *dp, boolean_t own, uint64_t first_obj, uint64_t last_obj, list_t *l) { uint64_t obj = last_obj; ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock)); list_create(l, sizeof (struct promotenode), offsetof(struct promotenode, link)); while (obj != first_obj) { dsl_dataset_t *ds; struct promotenode *snap; int err; if (own) { err = dsl_dataset_own_obj(dp, obj, 0, snaplist_tag, &ds); if (err == 0) dsl_dataset_make_exclusive(ds, snaplist_tag); } else { err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds); } if (err == ENOENT) { /* lost race with snapshot destroy */ struct promotenode *last = list_tail(l); ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj); obj = last->ds->ds_phys->ds_prev_snap_obj; continue; } else if (err) { return (err); } if (first_obj == 0) first_obj = ds->ds_dir->dd_phys->dd_origin_obj; snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP); snap->ds = ds; list_insert_tail(l, snap); obj = ds->ds_phys->ds_prev_snap_obj; } return (0); } static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep) { struct promotenode *snap; *spacep = 0; for (snap = list_head(l); snap; snap = list_next(l, snap)) { uint64_t used, comp, uncomp; dsl_deadlist_space_range(&snap->ds->ds_deadlist, mintxg, UINT64_MAX, &used, &comp, &uncomp); *spacep += used; } return (0); } static void snaplist_destroy(list_t *l, boolean_t own) { struct promotenode *snap; if (!l || !list_link_active(&l->list_head)) return; while ((snap = list_tail(l)) != NULL) { list_remove(l, snap); if (own) dsl_dataset_disown(snap->ds, snaplist_tag); else dsl_dataset_rele(snap->ds, snaplist_tag); kmem_free(snap, sizeof (struct promotenode)); } list_destroy(l); } /* * Promote a clone. Nomenclature note: * "clone" or "cds": the original clone which is being promoted * "origin" or "ods": the snapshot which is originally clone's origin * "origin head" or "ohds": the dataset which is the head * (filesystem/volume) for the origin * "origin origin": the origin of the origin's filesystem (typically * NULL, indicating that the clone is not a clone of a clone). */ int dsl_dataset_promote(const char *name, char *conflsnap) { dsl_dataset_t *ds; dsl_dir_t *dd; dsl_pool_t *dp; dmu_object_info_t doi; struct promotearg pa = { 0 }; struct promotenode *snap; int err; err = dsl_dataset_hold(name, FTAG, &ds); if (err) return (err); dd = ds->ds_dir; dp = dd->dd_pool; err = dmu_object_info(dp->dp_meta_objset, ds->ds_phys->ds_snapnames_zapobj, &doi); if (err) { dsl_dataset_rele(ds, FTAG); return (err); } if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) { dsl_dataset_rele(ds, FTAG); return (EINVAL); } /* * We are going to inherit all the snapshots taken before our * origin (i.e., our new origin will be our parent's origin). * Take ownership of them so that we can rename them into our * namespace. */ rw_enter(&dp->dp_config_rwlock, RW_READER); err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj, &pa.shared_snaps); if (err != 0) goto out; err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps); if (err != 0) goto out; snap = list_head(&pa.shared_snaps); ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj); err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj, snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps); if (err != 0) goto out; if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) { err = dsl_dataset_hold_obj(dp, snap->ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &pa.origin_origin); if (err != 0) goto out; } out: rw_exit(&dp->dp_config_rwlock); /* * Add in 128x the snapnames zapobj size, since we will be moving * a bunch of snapnames to the promoted ds, and dirtying their * bonus buffers. */ if (err == 0) { err = dsl_sync_task_do(dp, dsl_dataset_promote_check, dsl_dataset_promote_sync, ds, &pa, 2 + 2 * doi.doi_physical_blocks_512); if (err && pa.err_ds && conflsnap) (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN); } snaplist_destroy(&pa.shared_snaps, B_TRUE); snaplist_destroy(&pa.clone_snaps, B_FALSE); snaplist_destroy(&pa.origin_snaps, B_FALSE); if (pa.origin_origin) dsl_dataset_rele(pa.origin_origin, FTAG); dsl_dataset_rele(ds, FTAG); return (err); } struct cloneswaparg { dsl_dataset_t *cds; /* clone dataset */ dsl_dataset_t *ohds; /* origin's head dataset */ boolean_t force; int64_t unused_refres_delta; /* change in unconsumed refreservation */ }; /* ARGSUSED */ static int dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx) { struct cloneswaparg *csa = arg1; /* they should both be heads */ if (dsl_dataset_is_snapshot(csa->cds) || dsl_dataset_is_snapshot(csa->ohds)) return (EINVAL); /* the branch point should be just before them */ if (csa->cds->ds_prev != csa->ohds->ds_prev) return (EINVAL); /* cds should be the clone (unless they are unrelated) */ if (csa->cds->ds_prev != NULL && csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap && csa->ohds->ds_object != csa->cds->ds_prev->ds_phys->ds_next_snap_obj) return (EINVAL); /* the clone should be a child of the origin */ if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir) return (EINVAL); /* ohds shouldn't be modified unless 'force' */ if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds)) return (ETXTBSY); /* adjust amount of any unconsumed refreservation */ csa->unused_refres_delta = (int64_t)MIN(csa->ohds->ds_reserved, csa->ohds->ds_phys->ds_unique_bytes) - (int64_t)MIN(csa->ohds->ds_reserved, csa->cds->ds_phys->ds_unique_bytes); if (csa->unused_refres_delta > 0 && csa->unused_refres_delta > dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE)) return (ENOSPC); if (csa->ohds->ds_quota != 0 && csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota) return (EDQUOT); return (0); } /* ARGSUSED */ static void dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx) { struct cloneswaparg *csa = arg1; dsl_pool_t *dp = csa->cds->ds_dir->dd_pool; ASSERT(csa->cds->ds_reserved == 0); ASSERT(csa->ohds->ds_quota == 0 || csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota); dmu_buf_will_dirty(csa->cds->ds_dbuf, tx); dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx); if (csa->cds->ds_objset != NULL) { dmu_objset_evict(csa->cds->ds_objset); csa->cds->ds_objset = NULL; } if (csa->ohds->ds_objset != NULL) { dmu_objset_evict(csa->ohds->ds_objset); csa->ohds->ds_objset = NULL; } /* * Reset origin's unique bytes, if it exists. */ if (csa->cds->ds_prev) { dsl_dataset_t *origin = csa->cds->ds_prev; uint64_t comp, uncomp; dmu_buf_will_dirty(origin->ds_dbuf, tx); dsl_deadlist_space_range(&csa->cds->ds_deadlist, origin->ds_phys->ds_prev_snap_txg, UINT64_MAX, &origin->ds_phys->ds_unique_bytes, &comp, &uncomp); } /* swap blkptrs */ { blkptr_t tmp; tmp = csa->ohds->ds_phys->ds_bp; csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp; csa->cds->ds_phys->ds_bp = tmp; } /* set dd_*_bytes */ { int64_t dused, dcomp, duncomp; uint64_t cdl_used, cdl_comp, cdl_uncomp; uint64_t odl_used, odl_comp, odl_uncomp; ASSERT3U(csa->cds->ds_dir->dd_phys-> dd_used_breakdown[DD_USED_SNAP], ==, 0); dsl_deadlist_space(&csa->cds->ds_deadlist, &cdl_used, &cdl_comp, &cdl_uncomp); dsl_deadlist_space(&csa->ohds->ds_deadlist, &odl_used, &odl_comp, &odl_uncomp); dused = csa->cds->ds_phys->ds_used_bytes + cdl_used - (csa->ohds->ds_phys->ds_used_bytes + odl_used); dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp - (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp); duncomp = csa->cds->ds_phys->ds_uncompressed_bytes + cdl_uncomp - (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp); dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD, dused, dcomp, duncomp, tx); dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD, -dused, -dcomp, -duncomp, tx); /* * The difference in the space used by snapshots is the * difference in snapshot space due to the head's * deadlist (since that's the only thing that's * changing that affects the snapused). */ dsl_deadlist_space_range(&csa->cds->ds_deadlist, csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX, &cdl_used, &cdl_comp, &cdl_uncomp); dsl_deadlist_space_range(&csa->ohds->ds_deadlist, csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX, &odl_used, &odl_comp, &odl_uncomp); dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used, DD_USED_HEAD, DD_USED_SNAP, tx); } /* swap ds_*_bytes */ SWITCH64(csa->ohds->ds_phys->ds_used_bytes, csa->cds->ds_phys->ds_used_bytes); SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes, csa->cds->ds_phys->ds_compressed_bytes); SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes, csa->cds->ds_phys->ds_uncompressed_bytes); SWITCH64(csa->ohds->ds_phys->ds_unique_bytes, csa->cds->ds_phys->ds_unique_bytes); /* apply any parent delta for change in unconsumed refreservation */ dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV, csa->unused_refres_delta, 0, 0, tx); /* * Swap deadlists. */ dsl_deadlist_close(&csa->cds->ds_deadlist); dsl_deadlist_close(&csa->ohds->ds_deadlist); SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj, csa->cds->ds_phys->ds_deadlist_obj); dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset, csa->cds->ds_phys->ds_deadlist_obj); dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset, csa->ohds->ds_phys->ds_deadlist_obj); dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx); } /* * Swap 'clone' with its origin head datasets. Used at the end of "zfs * recv" into an existing fs to swizzle the file system to the new * version, and by "zfs rollback". Can also be used to swap two * independent head datasets if neither has any snapshots. */ int dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head, boolean_t force) { struct cloneswaparg csa; int error; ASSERT(clone->ds_owner); ASSERT(origin_head->ds_owner); retry: /* * Need exclusive access for the swap. If we're swapping these * datasets back after an error, we already hold the locks. */ if (!RW_WRITE_HELD(&clone->ds_rwlock)) rw_enter(&clone->ds_rwlock, RW_WRITER); if (!RW_WRITE_HELD(&origin_head->ds_rwlock) && !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) { rw_exit(&clone->ds_rwlock); rw_enter(&origin_head->ds_rwlock, RW_WRITER); if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) { rw_exit(&origin_head->ds_rwlock); goto retry; } } csa.cds = clone; csa.ohds = origin_head; csa.force = force; error = dsl_sync_task_do(clone->ds_dir->dd_pool, dsl_dataset_clone_swap_check, dsl_dataset_clone_swap_sync, &csa, NULL, 9); return (error); } /* * Given a pool name and a dataset object number in that pool, * return the name of that dataset. */ int dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf) { spa_t *spa; dsl_pool_t *dp; dsl_dataset_t *ds; int error; if ((error = spa_open(pname, &spa, FTAG)) != 0) return (error); dp = spa_get_dsl(spa); rw_enter(&dp->dp_config_rwlock, RW_READER); if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) { dsl_dataset_name(ds, buf); dsl_dataset_rele(ds, FTAG); } rw_exit(&dp->dp_config_rwlock); spa_close(spa, FTAG); return (error); } int dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota, uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv) { int error = 0; ASSERT3S(asize, >, 0); /* * *ref_rsrv is the portion of asize that will come from any * unconsumed refreservation space. */ *ref_rsrv = 0; mutex_enter(&ds->ds_lock); /* * Make a space adjustment for reserved bytes. */ if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) { ASSERT3U(*used, >=, ds->ds_reserved - ds->ds_phys->ds_unique_bytes); *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes); *ref_rsrv = asize - MIN(asize, parent_delta(ds, asize + inflight)); } if (!check_quota || ds->ds_quota == 0) { mutex_exit(&ds->ds_lock); return (0); } /* * If they are requesting more space, and our current estimate * is over quota, they get to try again unless the actual * on-disk is over quota and there are no pending changes (which * may free up space for us). */ if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) { if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota) error = ERESTART; else error = EDQUOT; } mutex_exit(&ds->ds_lock); return (error); } /* ARGSUSED */ static int dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; dsl_prop_setarg_t *psa = arg2; int err; if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA) return (ENOTSUP); if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0) return (err); if (psa->psa_effective_value == 0) return (0); if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes || psa->psa_effective_value < ds->ds_reserved) return (ENOSPC); return (0); } extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *); void dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; dsl_prop_setarg_t *psa = arg2; uint64_t effective_value = psa->psa_effective_value; dsl_prop_set_sync(ds, psa, tx); DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa); if (ds->ds_quota != effective_value) { dmu_buf_will_dirty(ds->ds_dbuf, tx); ds->ds_quota = effective_value; spa_history_log_internal(LOG_DS_REFQUOTA, ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ", (longlong_t)ds->ds_quota, ds->ds_object); } } int dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota) { dsl_dataset_t *ds; dsl_prop_setarg_t psa; int err; dsl_prop_setarg_init_uint64(&psa, "refquota", source, "a); err = dsl_dataset_hold(dsname, FTAG, &ds); if (err) return (err); /* * If someone removes a file, then tries to set the quota, we * want to make sure the file freeing takes effect. */ txg_wait_open(ds->ds_dir->dd_pool, 0); err = dsl_sync_task_do(ds->ds_dir->dd_pool, dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync, ds, &psa, 0); dsl_dataset_rele(ds, FTAG); return (err); } static int dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; dsl_prop_setarg_t *psa = arg2; uint64_t effective_value; uint64_t unique; int err; if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFRESERVATION) return (ENOTSUP); if (dsl_dataset_is_snapshot(ds)) return (EINVAL); if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0) return (err); effective_value = psa->psa_effective_value; /* * If we are doing the preliminary check in open context, the * space estimates may be inaccurate. */ if (!dmu_tx_is_syncing(tx)) return (0); mutex_enter(&ds->ds_lock); if (!DS_UNIQUE_IS_ACCURATE(ds)) dsl_dataset_recalc_head_uniq(ds); unique = ds->ds_phys->ds_unique_bytes; mutex_exit(&ds->ds_lock); if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) { uint64_t delta = MAX(unique, effective_value) - MAX(unique, ds->ds_reserved); if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) return (ENOSPC); if (ds->ds_quota > 0 && effective_value > ds->ds_quota) return (ENOSPC); } return (0); } static void dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; dsl_prop_setarg_t *psa = arg2; uint64_t effective_value = psa->psa_effective_value; uint64_t unique; int64_t delta; dsl_prop_set_sync(ds, psa, tx); DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa); dmu_buf_will_dirty(ds->ds_dbuf, tx); mutex_enter(&ds->ds_dir->dd_lock); mutex_enter(&ds->ds_lock); ASSERT(DS_UNIQUE_IS_ACCURATE(ds)); unique = ds->ds_phys->ds_unique_bytes; delta = MAX(0, (int64_t)(effective_value - unique)) - MAX(0, (int64_t)(ds->ds_reserved - unique)); ds->ds_reserved = effective_value; mutex_exit(&ds->ds_lock); dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx); mutex_exit(&ds->ds_dir->dd_lock); spa_history_log_internal(LOG_DS_REFRESERV, ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu", (longlong_t)effective_value, ds->ds_object); } int dsl_dataset_set_reservation(const char *dsname, zprop_source_t source, uint64_t reservation) { dsl_dataset_t *ds; dsl_prop_setarg_t psa; int err; dsl_prop_setarg_init_uint64(&psa, "refreservation", source, &reservation); err = dsl_dataset_hold(dsname, FTAG, &ds); if (err) return (err); err = dsl_sync_task_do(ds->ds_dir->dd_pool, dsl_dataset_set_reservation_check, dsl_dataset_set_reservation_sync, ds, &psa, 0); dsl_dataset_rele(ds, FTAG); return (err); } typedef struct zfs_hold_cleanup_arg { dsl_pool_t *dp; uint64_t dsobj; char htag[MAXNAMELEN]; } zfs_hold_cleanup_arg_t; static void dsl_dataset_user_release_onexit(void *arg) { zfs_hold_cleanup_arg_t *ca = arg; (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag, B_TRUE); kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t)); } void dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag, minor_t minor) { zfs_hold_cleanup_arg_t *ca; ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP); ca->dp = ds->ds_dir->dd_pool; ca->dsobj = ds->ds_object; (void) strlcpy(ca->htag, htag, sizeof (ca->htag)); VERIFY3U(0, ==, zfs_onexit_add_cb(minor, dsl_dataset_user_release_onexit, ca, NULL)); } /* * If you add new checks here, you may need to add * additional checks to the "temporary" case in * snapshot_check() in dmu_objset.c. */ static int dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; struct dsl_ds_holdarg *ha = arg2; char *htag = ha->htag; objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; int error = 0; if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS) return (ENOTSUP); if (!dsl_dataset_is_snapshot(ds)) return (EINVAL); /* tags must be unique */ mutex_enter(&ds->ds_lock); if (ds->ds_phys->ds_userrefs_obj) { error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag, 8, 1, tx); if (error == 0) error = EEXIST; else if (error == ENOENT) error = 0; } mutex_exit(&ds->ds_lock); if (error == 0 && ha->temphold && strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN) error = E2BIG; return (error); } void dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dataset_t *ds = arg1; struct dsl_ds_holdarg *ha = arg2; char *htag = ha->htag; dsl_pool_t *dp = ds->ds_dir->dd_pool; objset_t *mos = dp->dp_meta_objset; uint64_t now = gethrestime_sec(); uint64_t zapobj; mutex_enter(&ds->ds_lock); if (ds->ds_phys->ds_userrefs_obj == 0) { /* * This is the first user hold for this dataset. Create * the userrefs zap object. */ dmu_buf_will_dirty(ds->ds_dbuf, tx); zapobj = ds->ds_phys->ds_userrefs_obj = zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx); } else { zapobj = ds->ds_phys->ds_userrefs_obj; } ds->ds_userrefs++; mutex_exit(&ds->ds_lock); VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx)); if (ha->temphold) { VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object, htag, &now, tx)); } spa_history_log_internal(LOG_DS_USER_HOLD, dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag, (int)ha->temphold, ds->ds_object); } static int dsl_dataset_user_hold_one(const char *dsname, void *arg) { struct dsl_ds_holdarg *ha = arg; dsl_dataset_t *ds; int error; char *name; /* alloc a buffer to hold dsname@snapname plus terminating NULL */ name = kmem_asprintf("%s@%s", dsname, ha->snapname); error = dsl_dataset_hold(name, ha->dstg, &ds); strfree(name); if (error == 0) { ha->gotone = B_TRUE; dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync, ds, ha, 0); } else if (error == ENOENT && ha->recursive) { error = 0; } else { (void) strlcpy(ha->failed, dsname, sizeof (ha->failed)); } return (error); } int dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag, boolean_t temphold) { struct dsl_ds_holdarg *ha; int error; ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP); ha->htag = htag; ha->temphold = temphold; error = dsl_sync_task_do(ds->ds_dir->dd_pool, dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync, ds, ha, 0); kmem_free(ha, sizeof (struct dsl_ds_holdarg)); return (error); } int dsl_dataset_user_hold(char *dsname, char *snapname, char *htag, boolean_t recursive, boolean_t temphold, int cleanup_fd) { struct dsl_ds_holdarg *ha; dsl_sync_task_t *dst; spa_t *spa; int error; minor_t minor = 0; if (cleanup_fd != -1) { /* Currently we only support cleanup-on-exit of tempholds. */ if (!temphold) return (EINVAL); error = zfs_onexit_fd_hold(cleanup_fd, &minor); if (error) return (error); } ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP); (void) strlcpy(ha->failed, dsname, sizeof (ha->failed)); error = spa_open(dsname, &spa, FTAG); if (error) { kmem_free(ha, sizeof (struct dsl_ds_holdarg)); if (cleanup_fd != -1) zfs_onexit_fd_rele(cleanup_fd); return (error); } ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); ha->htag = htag; ha->snapname = snapname; ha->recursive = recursive; ha->temphold = temphold; if (recursive) { error = dmu_objset_find(dsname, dsl_dataset_user_hold_one, ha, DS_FIND_CHILDREN); } else { error = dsl_dataset_user_hold_one(dsname, ha); } if (error == 0) error = dsl_sync_task_group_wait(ha->dstg); for (dst = list_head(&ha->dstg->dstg_tasks); dst; dst = list_next(&ha->dstg->dstg_tasks, dst)) { dsl_dataset_t *ds = dst->dst_arg1; if (dst->dst_err) { dsl_dataset_name(ds, ha->failed); *strchr(ha->failed, '@') = '\0'; } else if (error == 0 && minor != 0 && temphold) { /* * If this hold is to be released upon process exit, * register that action now. */ dsl_register_onexit_hold_cleanup(ds, htag, minor); } dsl_dataset_rele(ds, ha->dstg); } if (error == 0 && recursive && !ha->gotone) error = ENOENT; if (error) (void) strlcpy(dsname, ha->failed, sizeof (ha->failed)); dsl_sync_task_group_destroy(ha->dstg); kmem_free(ha, sizeof (struct dsl_ds_holdarg)); spa_close(spa, FTAG); if (cleanup_fd != -1) zfs_onexit_fd_rele(cleanup_fd); return (error); } struct dsl_ds_releasearg { dsl_dataset_t *ds; const char *htag; boolean_t own; /* do we own or just hold ds? */ }; static int dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag, boolean_t *might_destroy) { objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; uint64_t zapobj; uint64_t tmp; int error; *might_destroy = B_FALSE; mutex_enter(&ds->ds_lock); zapobj = ds->ds_phys->ds_userrefs_obj; if (zapobj == 0) { /* The tag can't possibly exist */ mutex_exit(&ds->ds_lock); return (ESRCH); } /* Make sure the tag exists */ error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp); if (error) { mutex_exit(&ds->ds_lock); if (error == ENOENT) error = ESRCH; return (error); } if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 && DS_IS_DEFER_DESTROY(ds)) *might_destroy = B_TRUE; mutex_exit(&ds->ds_lock); return (0); } static int dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx) { struct dsl_ds_releasearg *ra = arg1; dsl_dataset_t *ds = ra->ds; boolean_t might_destroy; int error; if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS) return (ENOTSUP); error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy); if (error) return (error); if (might_destroy) { struct dsl_ds_destroyarg dsda = {0}; if (dmu_tx_is_syncing(tx)) { /* * If we're not prepared to remove the snapshot, * we can't allow the release to happen right now. */ if (!ra->own) return (EBUSY); } dsda.ds = ds; dsda.releasing = B_TRUE; return (dsl_dataset_destroy_check(&dsda, tag, tx)); } return (0); } static void dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx) { struct dsl_ds_releasearg *ra = arg1; dsl_dataset_t *ds = ra->ds; dsl_pool_t *dp = ds->ds_dir->dd_pool; objset_t *mos = dp->dp_meta_objset; uint64_t zapobj; uint64_t dsobj = ds->ds_object; uint64_t refs; int error; mutex_enter(&ds->ds_lock); ds->ds_userrefs--; refs = ds->ds_userrefs; mutex_exit(&ds->ds_lock); error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx); VERIFY(error == 0 || error == ENOENT); zapobj = ds->ds_phys->ds_userrefs_obj; VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx)); if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 && DS_IS_DEFER_DESTROY(ds)) { struct dsl_ds_destroyarg dsda = {0}; ASSERT(ra->own); dsda.ds = ds; dsda.releasing = B_TRUE; /* We already did the destroy_check */ dsl_dataset_destroy_sync(&dsda, tag, tx); } spa_history_log_internal(LOG_DS_USER_RELEASE, dp->dp_spa, tx, "<%s> %lld dataset = %llu", ra->htag, (longlong_t)refs, dsobj); } static int dsl_dataset_user_release_one(const char *dsname, void *arg) { struct dsl_ds_holdarg *ha = arg; struct dsl_ds_releasearg *ra; dsl_dataset_t *ds; int error; void *dtag = ha->dstg; char *name; boolean_t own = B_FALSE; boolean_t might_destroy; /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */ name = kmem_asprintf("%s@%s", dsname, ha->snapname); error = dsl_dataset_hold(name, dtag, &ds); strfree(name); if (error == ENOENT && ha->recursive) return (0); (void) strlcpy(ha->failed, dsname, sizeof (ha->failed)); if (error) return (error); ha->gotone = B_TRUE; ASSERT(dsl_dataset_is_snapshot(ds)); error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy); if (error) { dsl_dataset_rele(ds, dtag); return (error); } if (might_destroy) { #ifdef _KERNEL name = kmem_asprintf("%s@%s", dsname, ha->snapname); error = zfs_unmount_snap(name, NULL); strfree(name); if (error) { dsl_dataset_rele(ds, dtag); return (error); } #endif if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) { dsl_dataset_rele(ds, dtag); return (EBUSY); } else { own = B_TRUE; dsl_dataset_make_exclusive(ds, dtag); } } ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP); ra->ds = ds; ra->htag = ha->htag; ra->own = own; dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check, dsl_dataset_user_release_sync, ra, dtag, 0); return (0); } int dsl_dataset_user_release(char *dsname, char *snapname, char *htag, boolean_t recursive) { struct dsl_ds_holdarg *ha; dsl_sync_task_t *dst; spa_t *spa; int error; top: ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP); (void) strlcpy(ha->failed, dsname, sizeof (ha->failed)); error = spa_open(dsname, &spa, FTAG); if (error) { kmem_free(ha, sizeof (struct dsl_ds_holdarg)); return (error); } ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); ha->htag = htag; ha->snapname = snapname; ha->recursive = recursive; if (recursive) { error = dmu_objset_find(dsname, dsl_dataset_user_release_one, ha, DS_FIND_CHILDREN); } else { error = dsl_dataset_user_release_one(dsname, ha); } if (error == 0) error = dsl_sync_task_group_wait(ha->dstg); for (dst = list_head(&ha->dstg->dstg_tasks); dst; dst = list_next(&ha->dstg->dstg_tasks, dst)) { struct dsl_ds_releasearg *ra = dst->dst_arg1; dsl_dataset_t *ds = ra->ds; if (dst->dst_err) dsl_dataset_name(ds, ha->failed); if (ra->own) dsl_dataset_disown(ds, ha->dstg); else dsl_dataset_rele(ds, ha->dstg); kmem_free(ra, sizeof (struct dsl_ds_releasearg)); } if (error == 0 && recursive && !ha->gotone) error = ENOENT; if (error && error != EBUSY) (void) strlcpy(dsname, ha->failed, sizeof (ha->failed)); dsl_sync_task_group_destroy(ha->dstg); kmem_free(ha, sizeof (struct dsl_ds_holdarg)); spa_close(spa, FTAG); /* * We can get EBUSY if we were racing with deferred destroy and * dsl_dataset_user_release_check() hadn't done the necessary * open context setup. We can also get EBUSY if we're racing * with destroy and that thread is the ds_owner. Either way * the busy condition should be transient, and we should retry * the release operation. */ if (error == EBUSY) goto top; return (error); } /* * Called at spa_load time (with retry == B_FALSE) to release a stale * temporary user hold. Also called by the onexit code (with retry == B_TRUE). */ int dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag, boolean_t retry) { dsl_dataset_t *ds; char *snap; char *name; int namelen; int error; do { rw_enter(&dp->dp_config_rwlock, RW_READER); error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds); rw_exit(&dp->dp_config_rwlock); if (error) return (error); namelen = dsl_dataset_namelen(ds)+1; name = kmem_alloc(namelen, KM_SLEEP); dsl_dataset_name(ds, name); dsl_dataset_rele(ds, FTAG); snap = strchr(name, '@'); *snap = '\0'; ++snap; error = dsl_dataset_user_release(name, snap, htag, B_FALSE); kmem_free(name, namelen); /* * The object can't have been destroyed because we have a hold, * but it might have been renamed, resulting in ENOENT. Retry * if we've been requested to do so. * * It would be nice if we could use the dsobj all the way * through and avoid ENOENT entirely. But we might need to * unmount the snapshot, and there's currently no way to lookup * a vfsp using a ZFS object id. */ } while ((error == ENOENT) && retry); return (error); } int dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp) { dsl_dataset_t *ds; int err; err = dsl_dataset_hold(dsname, FTAG, &ds); if (err) return (err); VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP)); if (ds->ds_phys->ds_userrefs_obj != 0) { zap_attribute_t *za; zap_cursor_t zc; za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_phys->ds_userrefs_obj); zap_cursor_retrieve(&zc, za) == 0; zap_cursor_advance(&zc)) { VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name, za->za_first_integer)); } zap_cursor_fini(&zc); kmem_free(za, sizeof (zap_attribute_t)); } dsl_dataset_rele(ds, FTAG); return (0); } /* * Note, this function is used as the callback for dmu_objset_find(). We * always return 0 so that we will continue to find and process * inconsistent datasets, even if we encounter an error trying to * process one of them. */ /* ARGSUSED */ int dsl_destroy_inconsistent(const char *dsname, void *arg) { dsl_dataset_t *ds; if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) { if (DS_IS_INCONSISTENT(ds)) (void) dsl_dataset_destroy(ds, FTAG, B_FALSE); else dsl_dataset_disown(ds, FTAG); } return (0); } /* * Return (in *usedp) the amount of space written in new that is not * present in oldsnap. New may be a snapshot or the head. Old must be * a snapshot before new, in new's filesystem (or its origin). If not then * fail and return EINVAL. * * The written space is calculated by considering two components: First, we * ignore any freed space, and calculate the written as new's used space * minus old's used space. Next, we add in the amount of space that was freed * between the two snapshots, thus reducing new's used space relative to old's. * Specifically, this is the space that was born before old->ds_creation_txg, * and freed before new (ie. on new's deadlist or a previous deadlist). * * space freed [---------------------] * snapshots ---O-------O--------O-------O------ * oldsnap new */ int dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp) { int err = 0; uint64_t snapobj; dsl_pool_t *dp = new->ds_dir->dd_pool; *usedp = 0; *usedp += new->ds_phys->ds_used_bytes; *usedp -= oldsnap->ds_phys->ds_used_bytes; *compp = 0; *compp += new->ds_phys->ds_compressed_bytes; *compp -= oldsnap->ds_phys->ds_compressed_bytes; *uncompp = 0; *uncompp += new->ds_phys->ds_uncompressed_bytes; *uncompp -= oldsnap->ds_phys->ds_uncompressed_bytes; rw_enter(&dp->dp_config_rwlock, RW_READER); snapobj = new->ds_object; while (snapobj != oldsnap->ds_object) { dsl_dataset_t *snap; uint64_t used, comp, uncomp; err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap); if (err != 0) break; if (snap->ds_phys->ds_prev_snap_txg == oldsnap->ds_phys->ds_creation_txg) { /* * The blocks in the deadlist can not be born after * ds_prev_snap_txg, so get the whole deadlist space, * which is more efficient (especially for old-format * deadlists). Unfortunately the deadlist code * doesn't have enough information to make this * optimization itself. */ dsl_deadlist_space(&snap->ds_deadlist, &used, &comp, &uncomp); } else { dsl_deadlist_space_range(&snap->ds_deadlist, 0, oldsnap->ds_phys->ds_creation_txg, &used, &comp, &uncomp); } *usedp += used; *compp += comp; *uncompp += uncomp; /* * If we get to the beginning of the chain of snapshots * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap * was not a snapshot of/before new. */ snapobj = snap->ds_phys->ds_prev_snap_obj; dsl_dataset_rele(snap, FTAG); if (snapobj == 0) { err = EINVAL; break; } } rw_exit(&dp->dp_config_rwlock); return (err); } /* * Return (in *usedp) the amount of space that will be reclaimed if firstsnap, * lastsnap, and all snapshots in between are deleted. * * blocks that would be freed [---------------------------] * snapshots ---O-------O--------O-------O--------O * firstsnap lastsnap * * This is the set of blocks that were born after the snap before firstsnap, * (birth > firstsnap->prev_snap_txg) and died before the snap after the * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist). * We calculate this by iterating over the relevant deadlists (from the snap * after lastsnap, backward to the snap after firstsnap), summing up the * space on the deadlist that was born after the snap before firstsnap. */ int dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap, dsl_dataset_t *lastsnap, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp) { int err = 0; uint64_t snapobj; dsl_pool_t *dp = firstsnap->ds_dir->dd_pool; ASSERT(dsl_dataset_is_snapshot(firstsnap)); ASSERT(dsl_dataset_is_snapshot(lastsnap)); /* * Check that the snapshots are in the same dsl_dir, and firstsnap * is before lastsnap. */ if (firstsnap->ds_dir != lastsnap->ds_dir || firstsnap->ds_phys->ds_creation_txg > lastsnap->ds_phys->ds_creation_txg) return (EINVAL); *usedp = *compp = *uncompp = 0; rw_enter(&dp->dp_config_rwlock, RW_READER); snapobj = lastsnap->ds_phys->ds_next_snap_obj; while (snapobj != firstsnap->ds_object) { dsl_dataset_t *ds; uint64_t used, comp, uncomp; err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds); if (err != 0) break; dsl_deadlist_space_range(&ds->ds_deadlist, firstsnap->ds_phys->ds_prev_snap_txg, UINT64_MAX, &used, &comp, &uncomp); *usedp += used; *compp += comp; *uncompp += uncomp; snapobj = ds->ds_phys->ds_prev_snap_obj; ASSERT3U(snapobj, !=, 0); dsl_dataset_rele(ds, FTAG); } rw_exit(&dp->dp_config_rwlock); return (err); } Index: projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h =================================================================== --- projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h (revision 235262) +++ projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h (revision 235263) @@ -1,749 +1,750 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved. */ /* * Copyright 2011 Nexenta Systems, Inc. All rights reserved. + * Copyright (c) 2012, Joyent, Inc. All rights reserved. */ /* Portions Copyright 2010 Robert Milkowski */ #ifndef _SYS_DMU_H #define _SYS_DMU_H /* * This file describes the interface that the DMU provides for its * consumers. * * The DMU also interacts with the SPA. That interface is described in * dmu_spa.h. */ #include #include #include #include #ifdef __cplusplus extern "C" { #endif struct uio; struct xuio; struct page; struct vnode; struct spa; struct zilog; struct zio; struct blkptr; struct zap_cursor; struct dsl_dataset; struct dsl_pool; struct dnode; struct drr_begin; struct drr_end; struct zbookmark; struct spa; struct nvlist; struct arc_buf; struct zio_prop; struct sa_handle; struct file; typedef struct objset objset_t; typedef struct dmu_tx dmu_tx_t; typedef struct dsl_dir dsl_dir_t; typedef enum dmu_object_type { DMU_OT_NONE, /* general: */ DMU_OT_OBJECT_DIRECTORY, /* ZAP */ DMU_OT_OBJECT_ARRAY, /* UINT64 */ DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */ DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */ DMU_OT_BPOBJ, /* UINT64 */ DMU_OT_BPOBJ_HDR, /* UINT64 */ /* spa: */ DMU_OT_SPACE_MAP_HEADER, /* UINT64 */ DMU_OT_SPACE_MAP, /* UINT64 */ /* zil: */ DMU_OT_INTENT_LOG, /* UINT64 */ /* dmu: */ DMU_OT_DNODE, /* DNODE */ DMU_OT_OBJSET, /* OBJSET */ /* dsl: */ DMU_OT_DSL_DIR, /* UINT64 */ DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */ DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */ DMU_OT_DSL_PROPS, /* ZAP */ DMU_OT_DSL_DATASET, /* UINT64 */ /* zpl: */ DMU_OT_ZNODE, /* ZNODE */ DMU_OT_OLDACL, /* Old ACL */ DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */ DMU_OT_DIRECTORY_CONTENTS, /* ZAP */ DMU_OT_MASTER_NODE, /* ZAP */ DMU_OT_UNLINKED_SET, /* ZAP */ /* zvol: */ DMU_OT_ZVOL, /* UINT8 */ DMU_OT_ZVOL_PROP, /* ZAP */ /* other; for testing only! */ DMU_OT_PLAIN_OTHER, /* UINT8 */ DMU_OT_UINT64_OTHER, /* UINT64 */ DMU_OT_ZAP_OTHER, /* ZAP */ /* new object types: */ DMU_OT_ERROR_LOG, /* ZAP */ DMU_OT_SPA_HISTORY, /* UINT8 */ DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */ DMU_OT_POOL_PROPS, /* ZAP */ DMU_OT_DSL_PERMS, /* ZAP */ DMU_OT_ACL, /* ACL */ DMU_OT_SYSACL, /* SYSACL */ DMU_OT_FUID, /* FUID table (Packed NVLIST UINT8) */ DMU_OT_FUID_SIZE, /* FUID table size UINT64 */ DMU_OT_NEXT_CLONES, /* ZAP */ DMU_OT_SCAN_QUEUE, /* ZAP */ DMU_OT_USERGROUP_USED, /* ZAP */ DMU_OT_USERGROUP_QUOTA, /* ZAP */ DMU_OT_USERREFS, /* ZAP */ DMU_OT_DDT_ZAP, /* ZAP */ DMU_OT_DDT_STATS, /* ZAP */ DMU_OT_SA, /* System attr */ DMU_OT_SA_MASTER_NODE, /* ZAP */ DMU_OT_SA_ATTR_REGISTRATION, /* ZAP */ DMU_OT_SA_ATTR_LAYOUTS, /* ZAP */ DMU_OT_SCAN_XLATE, /* ZAP */ DMU_OT_DEDUP, /* fake dedup BP from ddt_bp_create() */ DMU_OT_DEADLIST, /* ZAP */ DMU_OT_DEADLIST_HDR, /* UINT64 */ DMU_OT_DSL_CLONES, /* ZAP */ DMU_OT_BPOBJ_SUBOBJ, /* UINT64 */ DMU_OT_NUMTYPES } dmu_object_type_t; typedef enum dmu_objset_type { DMU_OST_NONE, DMU_OST_META, DMU_OST_ZFS, DMU_OST_ZVOL, DMU_OST_OTHER, /* For testing only! */ DMU_OST_ANY, /* Be careful! */ DMU_OST_NUMTYPES } dmu_objset_type_t; void byteswap_uint64_array(void *buf, size_t size); void byteswap_uint32_array(void *buf, size_t size); void byteswap_uint16_array(void *buf, size_t size); void byteswap_uint8_array(void *buf, size_t size); void zap_byteswap(void *buf, size_t size); void zfs_oldacl_byteswap(void *buf, size_t size); void zfs_acl_byteswap(void *buf, size_t size); void zfs_znode_byteswap(void *buf, size_t size); #define DS_FIND_SNAPSHOTS (1<<0) #define DS_FIND_CHILDREN (1<<1) /* * The maximum number of bytes that can be accessed as part of one * operation, including metadata. */ #define DMU_MAX_ACCESS (10<<20) /* 10MB */ #define DMU_MAX_DELETEBLKCNT (20480) /* ~5MB of indirect blocks */ #define DMU_USERUSED_OBJECT (-1ULL) #define DMU_GROUPUSED_OBJECT (-2ULL) #define DMU_DEADLIST_OBJECT (-3ULL) /* * artificial blkids for bonus buffer and spill blocks */ #define DMU_BONUS_BLKID (-1ULL) #define DMU_SPILL_BLKID (-2ULL) /* * Public routines to create, destroy, open, and close objsets. */ int dmu_objset_hold(const char *name, void *tag, objset_t **osp); int dmu_objset_own(const char *name, dmu_objset_type_t type, boolean_t readonly, void *tag, objset_t **osp); void dmu_objset_rele(objset_t *os, void *tag); void dmu_objset_disown(objset_t *os, void *tag); int dmu_objset_open_ds(struct dsl_dataset *ds, objset_t **osp); int dmu_objset_evict_dbufs(objset_t *os); int dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg); int dmu_objset_clone(const char *name, struct dsl_dataset *clone_origin, uint64_t flags); int dmu_objset_destroy(const char *name, boolean_t defer); int dmu_get_recursive_snaps_nvl(const char *fsname, const char *snapname, struct nvlist *snaps); int dmu_snapshots_destroy_nvl(struct nvlist *snaps, boolean_t defer, char *); int dmu_objset_snapshot(char *fsname, char *snapname, char *tag, struct nvlist *props, boolean_t recursive, boolean_t temporary, int fd); int dmu_objset_rename(const char *name, const char *newname, boolean_t recursive); int dmu_objset_find(const char *name, int func(const char *, void *), void *arg, int flags); void dmu_objset_byteswap(void *buf, size_t size); typedef struct dmu_buf { uint64_t db_object; /* object that this buffer is part of */ uint64_t db_offset; /* byte offset in this object */ uint64_t db_size; /* size of buffer in bytes */ void *db_data; /* data in buffer */ } dmu_buf_t; typedef void dmu_buf_evict_func_t(struct dmu_buf *db, void *user_ptr); /* * The names of zap entries in the DIRECTORY_OBJECT of the MOS. */ #define DMU_POOL_DIRECTORY_OBJECT 1 #define DMU_POOL_CONFIG "config" #define DMU_POOL_ROOT_DATASET "root_dataset" #define DMU_POOL_SYNC_BPOBJ "sync_bplist" #define DMU_POOL_ERRLOG_SCRUB "errlog_scrub" #define DMU_POOL_ERRLOG_LAST "errlog_last" #define DMU_POOL_SPARES "spares" #define DMU_POOL_DEFLATE "deflate" #define DMU_POOL_HISTORY "history" #define DMU_POOL_PROPS "pool_props" #define DMU_POOL_L2CACHE "l2cache" #define DMU_POOL_TMP_USERREFS "tmp_userrefs" #define DMU_POOL_DDT "DDT-%s-%s-%s" #define DMU_POOL_DDT_STATS "DDT-statistics" #define DMU_POOL_CREATION_VERSION "creation_version" #define DMU_POOL_SCAN "scan" #define DMU_POOL_FREE_BPOBJ "free_bpobj" /* * Allocate an object from this objset. The range of object numbers * available is (0, DN_MAX_OBJECT). Object 0 is the meta-dnode. * * The transaction must be assigned to a txg. The newly allocated * object will be "held" in the transaction (ie. you can modify the * newly allocated object in this transaction). * * dmu_object_alloc() chooses an object and returns it in *objectp. * * dmu_object_claim() allocates a specific object number. If that * number is already allocated, it fails and returns EEXIST. * * Return 0 on success, or ENOSPC or EEXIST as specified above. */ uint64_t dmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); int dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot, int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); int dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot, int blocksize, dmu_object_type_t bonustype, int bonuslen); /* * Free an object from this objset. * * The object's data will be freed as well (ie. you don't need to call * dmu_free(object, 0, -1, tx)). * * The object need not be held in the transaction. * * If there are any holds on this object's buffers (via dmu_buf_hold()), * or tx holds on the object (via dmu_tx_hold_object()), you can not * free it; it fails and returns EBUSY. * * If the object is not allocated, it fails and returns ENOENT. * * Return 0 on success, or EBUSY or ENOENT as specified above. */ int dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx); /* * Find the next allocated or free object. * * The objectp parameter is in-out. It will be updated to be the next * object which is allocated. Ignore objects which have not been * modified since txg. * * XXX Can only be called on a objset with no dirty data. * * Returns 0 on success, or ENOENT if there are no more objects. */ int dmu_object_next(objset_t *os, uint64_t *objectp, boolean_t hole, uint64_t txg); /* * Set the data blocksize for an object. * * The object cannot have any blocks allcated beyond the first. If * the first block is allocated already, the new size must be greater * than the current block size. If these conditions are not met, * ENOTSUP will be returned. * * Returns 0 on success, or EBUSY if there are any holds on the object * contents, or ENOTSUP as described above. */ int dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, dmu_tx_t *tx); /* * Set the checksum property on a dnode. The new checksum algorithm will * apply to all newly written blocks; existing blocks will not be affected. */ void dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, dmu_tx_t *tx); /* * Set the compress property on a dnode. The new compression algorithm will * apply to all newly written blocks; existing blocks will not be affected. */ void dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, dmu_tx_t *tx); /* * Decide how to write a block: checksum, compression, number of copies, etc. */ #define WP_NOFILL 0x1 #define WP_DMU_SYNC 0x2 #define WP_SPILL 0x4 void dmu_write_policy(objset_t *os, struct dnode *dn, int level, int wp, struct zio_prop *zp); /* * The bonus data is accessed more or less like a regular buffer. * You must dmu_bonus_hold() to get the buffer, which will give you a * dmu_buf_t with db_offset==-1ULL, and db_size = the size of the bonus * data. As with any normal buffer, you must call dmu_buf_read() to * read db_data, dmu_buf_will_dirty() before modifying it, and the * object must be held in an assigned transaction before calling * dmu_buf_will_dirty. You may use dmu_buf_set_user() on the bonus * buffer as well. You must release your hold with dmu_buf_rele(). */ int dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **); int dmu_bonus_max(void); int dmu_set_bonus(dmu_buf_t *, int, dmu_tx_t *); int dmu_set_bonustype(dmu_buf_t *, dmu_object_type_t, dmu_tx_t *); dmu_object_type_t dmu_get_bonustype(dmu_buf_t *); int dmu_rm_spill(objset_t *, uint64_t, dmu_tx_t *); /* * Special spill buffer support used by "SA" framework */ int dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp); int dmu_spill_hold_by_dnode(struct dnode *dn, uint32_t flags, void *tag, dmu_buf_t **dbp); int dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp); /* * Obtain the DMU buffer from the specified object which contains the * specified offset. dmu_buf_hold() puts a "hold" on the buffer, so * that it will remain in memory. You must release the hold with * dmu_buf_rele(). You musn't access the dmu_buf_t after releasing your * hold. You must have a hold on any dmu_buf_t* you pass to the DMU. * * You must call dmu_buf_read, dmu_buf_will_dirty, or dmu_buf_will_fill * on the returned buffer before reading or writing the buffer's * db_data. The comments for those routines describe what particular * operations are valid after calling them. * * The object number must be a valid, allocated object number. */ int dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, void *tag, dmu_buf_t **, int flags); void dmu_buf_add_ref(dmu_buf_t *db, void* tag); void dmu_buf_rele(dmu_buf_t *db, void *tag); uint64_t dmu_buf_refcount(dmu_buf_t *db); /* * dmu_buf_hold_array holds the DMU buffers which contain all bytes in a * range of an object. A pointer to an array of dmu_buf_t*'s is * returned (in *dbpp). * * dmu_buf_rele_array releases the hold on an array of dmu_buf_t*'s, and * frees the array. The hold on the array of buffers MUST be released * with dmu_buf_rele_array. You can NOT release the hold on each buffer * individually with dmu_buf_rele. */ int dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset, uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp); void dmu_buf_rele_array(dmu_buf_t **, int numbufs, void *tag); /* * Returns NULL on success, or the existing user ptr if it's already * been set. * * user_ptr is for use by the user and can be obtained via dmu_buf_get_user(). * * user_data_ptr_ptr should be NULL, or a pointer to a pointer which * will be set to db->db_data when you are allowed to access it. Note * that db->db_data (the pointer) can change when you do dmu_buf_read(), * dmu_buf_tryupgrade(), dmu_buf_will_dirty(), or dmu_buf_will_fill(). * *user_data_ptr_ptr will be set to the new value when it changes. * * If non-NULL, pageout func will be called when this buffer is being * excised from the cache, so that you can clean up the data structure * pointed to by user_ptr. * * dmu_evict_user() will call the pageout func for all buffers in a * objset with a given pageout func. */ void *dmu_buf_set_user(dmu_buf_t *db, void *user_ptr, void *user_data_ptr_ptr, dmu_buf_evict_func_t *pageout_func); /* * set_user_ie is the same as set_user, but request immediate eviction * when hold count goes to zero. */ void *dmu_buf_set_user_ie(dmu_buf_t *db, void *user_ptr, void *user_data_ptr_ptr, dmu_buf_evict_func_t *pageout_func); void *dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr, void *user_data_ptr_ptr, dmu_buf_evict_func_t *pageout_func); void dmu_evict_user(objset_t *os, dmu_buf_evict_func_t *func); /* * Returns the user_ptr set with dmu_buf_set_user(), or NULL if not set. */ void *dmu_buf_get_user(dmu_buf_t *db); /* * Indicate that you are going to modify the buffer's data (db_data). * * The transaction (tx) must be assigned to a txg (ie. you've called * dmu_tx_assign()). The buffer's object must be held in the tx * (ie. you've called dmu_tx_hold_object(tx, db->db_object)). */ void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx); /* * Tells if the given dbuf is freeable. */ boolean_t dmu_buf_freeable(dmu_buf_t *); /* * You must create a transaction, then hold the objects which you will * (or might) modify as part of this transaction. Then you must assign * the transaction to a transaction group. Once the transaction has * been assigned, you can modify buffers which belong to held objects as * part of this transaction. You can't modify buffers before the * transaction has been assigned; you can't modify buffers which don't * belong to objects which this transaction holds; you can't hold * objects once the transaction has been assigned. You may hold an * object which you are going to free (with dmu_object_free()), but you * don't have to. * * You can abort the transaction before it has been assigned. * * Note that you may hold buffers (with dmu_buf_hold) at any time, * regardless of transaction state. */ #define DMU_NEW_OBJECT (-1ULL) #define DMU_OBJECT_END (-1ULL) dmu_tx_t *dmu_tx_create(objset_t *os); void dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len); void dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len); void dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name); void dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object); void dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object); void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow); void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size); void dmu_tx_abort(dmu_tx_t *tx); int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how); void dmu_tx_wait(dmu_tx_t *tx); void dmu_tx_commit(dmu_tx_t *tx); /* * To register a commit callback, dmu_tx_callback_register() must be called. * * dcb_data is a pointer to caller private data that is passed on as a * callback parameter. The caller is responsible for properly allocating and * freeing it. * * When registering a callback, the transaction must be already created, but * it cannot be committed or aborted. It can be assigned to a txg or not. * * The callback will be called after the transaction has been safely written * to stable storage and will also be called if the dmu_tx is aborted. * If there is any error which prevents the transaction from being committed to * disk, the callback will be called with a value of error != 0. */ typedef void dmu_tx_callback_func_t(void *dcb_data, int error); void dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *dcb_func, void *dcb_data); /* * Free up the data blocks for a defined range of a file. If size is * zero, the range from offset to end-of-file is freed. */ int dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, dmu_tx_t *tx); int dmu_free_long_range(objset_t *os, uint64_t object, uint64_t offset, uint64_t size); int dmu_free_object(objset_t *os, uint64_t object); /* * Convenience functions. * * Canfail routines will return 0 on success, or an errno if there is a * nonrecoverable I/O error. */ #define DMU_READ_PREFETCH 0 /* prefetch */ #define DMU_READ_NO_PREFETCH 1 /* don't prefetch */ int dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, void *buf, uint32_t flags); void dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, const void *buf, dmu_tx_t *tx); void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, dmu_tx_t *tx); int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size); int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size, dmu_tx_t *tx); int dmu_write_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size, dmu_tx_t *tx); int dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, struct page *pp, dmu_tx_t *tx); struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size); void dmu_return_arcbuf(struct arc_buf *buf); void dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, struct arc_buf *buf, dmu_tx_t *tx); int dmu_xuio_init(struct xuio *uio, int niov); void dmu_xuio_fini(struct xuio *uio); int dmu_xuio_add(struct xuio *uio, struct arc_buf *abuf, offset_t off, size_t n); int dmu_xuio_cnt(struct xuio *uio); struct arc_buf *dmu_xuio_arcbuf(struct xuio *uio, int i); void dmu_xuio_clear(struct xuio *uio, int i); void xuio_stat_wbuf_copied(); void xuio_stat_wbuf_nocopy(); extern int zfs_prefetch_disable; /* * Asynchronously try to read in the data. */ void dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len); typedef struct dmu_object_info { /* All sizes are in bytes unless otherwise indicated. */ uint32_t doi_data_block_size; uint32_t doi_metadata_block_size; dmu_object_type_t doi_type; dmu_object_type_t doi_bonus_type; uint64_t doi_bonus_size; uint8_t doi_indirection; /* 2 = dnode->indirect->data */ uint8_t doi_checksum; uint8_t doi_compress; uint8_t doi_pad[5]; uint64_t doi_physical_blocks_512; /* data + metadata, 512b blks */ uint64_t doi_max_offset; uint64_t doi_fill_count; /* number of non-empty blocks */ } dmu_object_info_t; typedef void arc_byteswap_func_t(void *buf, size_t size); typedef struct dmu_object_type_info { arc_byteswap_func_t *ot_byteswap; boolean_t ot_metadata; char *ot_name; } dmu_object_type_info_t; extern const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES]; /* * Get information on a DMU object. * * Return 0 on success or ENOENT if object is not allocated. * * If doi is NULL, just indicates whether the object exists. */ int dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi); void dmu_object_info_from_dnode(struct dnode *dn, dmu_object_info_t *doi); void dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi); void dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, u_longlong_t *nblk512); typedef struct dmu_objset_stats { uint64_t dds_num_clones; /* number of clones of this */ uint64_t dds_creation_txg; uint64_t dds_guid; dmu_objset_type_t dds_type; uint8_t dds_is_snapshot; uint8_t dds_inconsistent; char dds_origin[MAXNAMELEN]; } dmu_objset_stats_t; /* * Get stats on a dataset. */ void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat); /* * Add entries to the nvlist for all the objset's properties. See * zfs_prop_table[] and zfs(1m) for details on the properties. */ void dmu_objset_stats(objset_t *os, struct nvlist *nv); /* * Get the space usage statistics for statvfs(). * * refdbytes is the amount of space "referenced" by this objset. * availbytes is the amount of space available to this objset, taking * into account quotas & reservations, assuming that no other objsets * use the space first. These values correspond to the 'referenced' and * 'available' properties, described in the zfs(1m) manpage. * * usedobjs and availobjs are the number of objects currently allocated, * and available. */ void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, uint64_t *usedobjsp, uint64_t *availobjsp); /* * The fsid_guid is a 56-bit ID that can change to avoid collisions. * (Contrast with the ds_guid which is a 64-bit ID that will never * change, so there is a small probability that it will collide.) */ uint64_t dmu_objset_fsid_guid(objset_t *os); /* * Get the [cm]time for an objset's snapshot dir */ timestruc_t dmu_objset_snap_cmtime(objset_t *os); int dmu_objset_is_snapshot(objset_t *os); extern struct spa *dmu_objset_spa(objset_t *os); extern struct zilog *dmu_objset_zil(objset_t *os); extern struct dsl_pool *dmu_objset_pool(objset_t *os); extern struct dsl_dataset *dmu_objset_ds(objset_t *os); extern void dmu_objset_name(objset_t *os, char *buf); extern dmu_objset_type_t dmu_objset_type(objset_t *os); extern uint64_t dmu_objset_id(objset_t *os); extern uint64_t dmu_objset_syncprop(objset_t *os); extern uint64_t dmu_objset_logbias(objset_t *os); extern int dmu_snapshot_list_next(objset_t *os, int namelen, char *name, uint64_t *id, uint64_t *offp, boolean_t *case_conflict); extern int dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, boolean_t *conflict); extern int dmu_dir_list_next(objset_t *os, int namelen, char *name, uint64_t *idp, uint64_t *offp); typedef int objset_used_cb_t(dmu_object_type_t bonustype, void *bonus, uint64_t *userp, uint64_t *groupp); extern void dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb); extern void dmu_objset_set_user(objset_t *os, void *user_ptr); extern void *dmu_objset_get_user(objset_t *os); /* * Return the txg number for the given assigned transaction. */ uint64_t dmu_tx_get_txg(dmu_tx_t *tx); /* * Synchronous write. * If a parent zio is provided this function initiates a write on the * provided buffer as a child of the parent zio. * In the absence of a parent zio, the write is completed synchronously. * At write completion, blk is filled with the bp of the written block. * Note that while the data covered by this function will be on stable * storage when the write completes this new data does not become a * permanent part of the file until the associated transaction commits. */ /* * {zfs,zvol,ztest}_get_done() args */ typedef struct zgd { struct zilog *zgd_zilog; struct blkptr *zgd_bp; dmu_buf_t *zgd_db; struct rl *zgd_rl; void *zgd_private; } zgd_t; typedef void dmu_sync_cb_t(zgd_t *arg, int error); int dmu_sync(struct zio *zio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd); /* * Find the next hole or data block in file starting at *off * Return found offset in *off. Return ESRCH for end of file. */ int dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off); /* * Initial setup and final teardown. */ extern void dmu_init(void); extern void dmu_fini(void); typedef void (*dmu_traverse_cb_t)(objset_t *os, void *arg, struct blkptr *bp, uint64_t object, uint64_t offset, int len); void dmu_traverse_objset(objset_t *os, uint64_t txg_start, dmu_traverse_cb_t cb, void *arg); -int dmu_sendbackup(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin, - struct file *fp, offset_t *off); +int dmu_send(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin, + int outfd, struct file *fp, offset_t *off); int dmu_send_estimate(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin, uint64_t *sizep); typedef struct dmu_recv_cookie { /* * This structure is opaque! * * If logical and real are different, we are recving the stream * into the "real" temporary clone, and then switching it with * the "logical" target. */ struct dsl_dataset *drc_logical_ds; struct dsl_dataset *drc_real_ds; struct drr_begin *drc_drrb; char *drc_tosnap; char *drc_top_ds; boolean_t drc_newfs; boolean_t drc_force; struct avl_tree *drc_guid_to_ds_map; } dmu_recv_cookie_t; int dmu_recv_begin(char *tofs, char *tosnap, char *topds, struct drr_begin *, boolean_t force, objset_t *origin, dmu_recv_cookie_t *); int dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp, int cleanup_fd, uint64_t *action_handlep); int dmu_recv_end(dmu_recv_cookie_t *drc); int dmu_diff(objset_t *tosnap, objset_t *fromsnap, struct file *fp, offset_t *off); /* CRC64 table */ #define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */ extern uint64_t zfs_crc64_table[256]; #ifdef __cplusplus } #endif #endif /* _SYS_DMU_H */ Index: projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_impl.h =================================================================== --- projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_impl.h (revision 235262) +++ projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_impl.h (revision 235263) @@ -1,273 +1,303 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2010 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. + * Copyright (c) 2012, Joyent, Inc. All rights reserved. + * Copyright (c) 2012, Martin Matuska . All rights reserved. */ #ifndef _SYS_DMU_IMPL_H #define _SYS_DMU_IMPL_H #include #include #include #include #include +#include #ifdef __cplusplus extern "C" { #endif /* * This is the locking strategy for the DMU. Numbers in parenthesis are * cases that use that lock order, referenced below: * * ARC is self-contained * bplist is self-contained * refcount is self-contained * txg is self-contained (hopefully!) * zst_lock * zf_rwlock * * XXX try to improve evicting path? * * dp_config_rwlock > os_obj_lock > dn_struct_rwlock > * dn_dbufs_mtx > hash_mutexes > db_mtx > dd_lock > leafs * * dp_config_rwlock * must be held before: everything * protects dd namespace changes * protects property changes globally * held from: * dsl_dir_open/r: * dsl_dir_create_sync/w: * dsl_dir_sync_destroy/w: * dsl_dir_rename_sync/w: * dsl_prop_changed_notify/r: * * os_obj_lock * must be held before: * everything except dp_config_rwlock * protects os_obj_next * held from: * dmu_object_alloc: dn_dbufs_mtx, db_mtx, hash_mutexes, dn_struct_rwlock * * dn_struct_rwlock * must be held before: * everything except dp_config_rwlock and os_obj_lock * protects structure of dnode (eg. nlevels) * db_blkptr can change when syncing out change to nlevels * dn_maxblkid * dn_nlevels * dn_*blksz* * phys nlevels, maxblkid, physical blkptr_t's (?) * held from: * callers of dbuf_read_impl, dbuf_hold[_impl], dbuf_prefetch * dmu_object_info_from_dnode: dn_dirty_mtx (dn_datablksz) * dmu_tx_count_free: * dbuf_read_impl: db_mtx, dmu_zfetch() * dmu_zfetch: zf_rwlock/r, zst_lock, dbuf_prefetch() * dbuf_new_size: db_mtx * dbuf_dirty: db_mtx * dbuf_findbp: (callers, phys? - the real need) * dbuf_create: dn_dbufs_mtx, hash_mutexes, db_mtx (phys?) * dbuf_prefetch: dn_dirty_mtx, hash_mutexes, db_mtx, dn_dbufs_mtx * dbuf_hold_impl: hash_mutexes, db_mtx, dn_dbufs_mtx, dbuf_findbp() * dnode_sync/w (increase_indirection): db_mtx (phys) * dnode_set_blksz/w: dn_dbufs_mtx (dn_*blksz*) * dnode_new_blkid/w: (dn_maxblkid) * dnode_free_range/w: dn_dirty_mtx (dn_maxblkid) * dnode_next_offset: (phys) * * dn_dbufs_mtx * must be held before: * db_mtx, hash_mutexes * protects: * dn_dbufs * dn_evicted * held from: * dmu_evict_user: db_mtx (dn_dbufs) * dbuf_free_range: db_mtx (dn_dbufs) * dbuf_remove_ref: db_mtx, callees: * dbuf_hash_remove: hash_mutexes, db_mtx * dbuf_create: hash_mutexes, db_mtx (dn_dbufs) * dnode_set_blksz: (dn_dbufs) * * hash_mutexes (global) * must be held before: * db_mtx * protects dbuf_hash_table (global) and db_hash_next * held from: * dbuf_find: db_mtx * dbuf_hash_insert: db_mtx * dbuf_hash_remove: db_mtx * * db_mtx (meta-leaf) * must be held before: * dn_mtx, dn_dirty_mtx, dd_lock (leaf mutexes) * protects: * db_state * db_holds * db_buf * db_changed * db_data_pending * db_dirtied * db_link * db_dirty_node (??) * db_dirtycnt * db_d.* * db.* * held from: * dbuf_dirty: dn_mtx, dn_dirty_mtx * dbuf_dirty->dsl_dir_willuse_space: dd_lock * dbuf_dirty->dbuf_new_block->dsl_dataset_block_freeable: dd_lock * dbuf_undirty: dn_dirty_mtx (db_d) * dbuf_write_done: dn_dirty_mtx (db_state) * dbuf_* * dmu_buf_update_user: none (db_d) * dmu_evict_user: none (db_d) (maybe can eliminate) * dbuf_find: none (db_holds) * dbuf_hash_insert: none (db_holds) * dmu_buf_read_array_impl: none (db_state, db_changed) * dmu_sync: none (db_dirty_node, db_d) * dnode_reallocate: none (db) * * dn_mtx (leaf) * protects: * dn_dirty_dbufs * dn_ranges * phys accounting * dn_allocated_txg * dn_free_txg * dn_assigned_txg * dd_assigned_tx * dn_notxholds * dn_dirtyctx * dn_dirtyctx_firstset * (dn_phys copy fields?) * (dn_phys contents?) * held from: * dnode_* * dbuf_dirty: none * dbuf_sync: none (phys accounting) * dbuf_undirty: none (dn_ranges, dn_dirty_dbufs) * dbuf_write_done: none (phys accounting) * dmu_object_info_from_dnode: none (accounting) * dmu_tx_commit: none * dmu_tx_hold_object_impl: none * dmu_tx_try_assign: dn_notxholds(cv) * dmu_tx_unassign: none * * dd_lock * must be held before: * ds_lock * ancestors' dd_lock * protects: * dd_prop_cbs * dd_sync_* * dd_used_bytes * dd_tempreserved * dd_space_towrite * dd_myname * dd_phys accounting? * held from: * dsl_dir_* * dsl_prop_changed_notify: none (dd_prop_cbs) * dsl_prop_register: none (dd_prop_cbs) * dsl_prop_unregister: none (dd_prop_cbs) * dsl_dataset_block_freeable: none (dd_sync_*) * * os_lock (leaf) * protects: * os_dirty_dnodes * os_free_dnodes * os_dnodes * os_downgraded_dbufs * dn_dirtyblksz * dn_dirty_link * held from: * dnode_create: none (os_dnodes) * dnode_destroy: none (os_dnodes) * dnode_setdirty: none (dn_dirtyblksz, os_*_dnodes) * dnode_free: none (dn_dirtyblksz, os_*_dnodes) * * ds_lock * protects: * ds_objset * ds_open_refcount * ds_snapname * ds_phys accounting * ds_phys userrefs zapobj * ds_reserved * held from: * dsl_dataset_* * * dr_mtx (leaf) * protects: * dr_children * held from: * dbuf_dirty * dbuf_undirty * dbuf_sync_indirect * dnode_new_blkid */ struct objset; struct dmu_pool; typedef struct dmu_xuio { int next; int cnt; struct arc_buf **bufs; iovec_t *iovp; } dmu_xuio_t; typedef struct xuio_stats { /* loaned yet not returned arc_buf */ kstat_named_t xuiostat_onloan_rbuf; kstat_named_t xuiostat_onloan_wbuf; /* whether a copy is made when loaning out a read buffer */ kstat_named_t xuiostat_rbuf_copied; kstat_named_t xuiostat_rbuf_nocopy; /* whether a copy is made when assigning a write buffer */ kstat_named_t xuiostat_wbuf_copied; kstat_named_t xuiostat_wbuf_nocopy; } xuio_stats_t; static xuio_stats_t xuio_stats = { { "onloan_read_buf", KSTAT_DATA_UINT64 }, { "onloan_write_buf", KSTAT_DATA_UINT64 }, { "read_buf_copied", KSTAT_DATA_UINT64 }, { "read_buf_nocopy", KSTAT_DATA_UINT64 }, { "write_buf_copied", KSTAT_DATA_UINT64 }, { "write_buf_nocopy", KSTAT_DATA_UINT64 } }; #define XUIOSTAT_INCR(stat, val) \ atomic_add_64(&xuio_stats.stat.value.ui64, (val)) #define XUIOSTAT_BUMP(stat) XUIOSTAT_INCR(stat, 1) + +/* + * The list of data whose inclusion in a send stream can be pending from + * one call to backup_cb to another. Multiple calls to dump_free() and + * dump_freeobjects() can be aggregated into a single DRR_FREE or + * DRR_FREEOBJECTS replay record. + */ +typedef enum { + PENDING_NONE, + PENDING_FREE, + PENDING_FREEOBJECTS +} dmu_pendop_t; + +typedef struct dmu_sendarg { + list_node_t dsa_link; + dmu_replay_record_t *dsa_drr; + kthread_t *dsa_td; + struct file *dsa_fp; + int dsa_outfd; + struct proc *dsa_proc; + offset_t *dsa_off; + objset_t *dsa_os; + zio_cksum_t dsa_zc; + uint64_t dsa_toguid; + int dsa_err; + dmu_pendop_t dsa_pending_op; +} dmu_sendarg_t; #ifdef __cplusplus } #endif #endif /* _SYS_DMU_IMPL_H */ Index: projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_dataset.h =================================================================== --- projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_dataset.h (revision 235262) +++ projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_dataset.h (revision 235263) @@ -1,296 +1,300 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011 Pawel Jakub Dawidek . * All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved. + * Copyright (c) 2012, Joyent, Inc. All rights reserved. */ #ifndef _SYS_DSL_DATASET_H #define _SYS_DSL_DATASET_H #include #include #include #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif struct dsl_dataset; struct dsl_dir; struct dsl_pool; #define DS_FLAG_INCONSISTENT (1ULL<<0) #define DS_IS_INCONSISTENT(ds) \ ((ds)->ds_phys->ds_flags & DS_FLAG_INCONSISTENT) /* * NB: nopromote can not yet be set, but we want support for it in this * on-disk version, so that we don't need to upgrade for it later. It * will be needed when we implement 'zfs split' (where the split off * clone should not be promoted). */ #define DS_FLAG_NOPROMOTE (1ULL<<1) /* * DS_FLAG_UNIQUE_ACCURATE is set if ds_unique_bytes has been correctly * calculated for head datasets (starting with SPA_VERSION_UNIQUE_ACCURATE, * refquota/refreservations). */ #define DS_FLAG_UNIQUE_ACCURATE (1ULL<<2) /* * DS_FLAG_DEFER_DESTROY is set after 'zfs destroy -d' has been called * on a dataset. This allows the dataset to be destroyed using 'zfs release'. */ #define DS_FLAG_DEFER_DESTROY (1ULL<<3) #define DS_IS_DEFER_DESTROY(ds) \ ((ds)->ds_phys->ds_flags & DS_FLAG_DEFER_DESTROY) /* * DS_FLAG_CI_DATASET is set if the dataset contains a file system whose * name lookups should be performed case-insensitively. */ #define DS_FLAG_CI_DATASET (1ULL<<16) typedef struct dsl_dataset_phys { uint64_t ds_dir_obj; /* DMU_OT_DSL_DIR */ uint64_t ds_prev_snap_obj; /* DMU_OT_DSL_DATASET */ uint64_t ds_prev_snap_txg; uint64_t ds_next_snap_obj; /* DMU_OT_DSL_DATASET */ uint64_t ds_snapnames_zapobj; /* DMU_OT_DSL_DS_SNAP_MAP 0 for snaps */ uint64_t ds_num_children; /* clone/snap children; ==0 for head */ uint64_t ds_creation_time; /* seconds since 1970 */ uint64_t ds_creation_txg; uint64_t ds_deadlist_obj; /* DMU_OT_DEADLIST */ uint64_t ds_used_bytes; uint64_t ds_compressed_bytes; uint64_t ds_uncompressed_bytes; uint64_t ds_unique_bytes; /* only relevant to snapshots */ /* * The ds_fsid_guid is a 56-bit ID that can change to avoid * collisions. The ds_guid is a 64-bit ID that will never * change, so there is a small probability that it will collide. */ uint64_t ds_fsid_guid; uint64_t ds_guid; uint64_t ds_flags; /* DS_FLAG_* */ blkptr_t ds_bp; uint64_t ds_next_clones_obj; /* DMU_OT_DSL_CLONES */ uint64_t ds_props_obj; /* DMU_OT_DSL_PROPS for snaps */ uint64_t ds_userrefs_obj; /* DMU_OT_USERREFS */ uint64_t ds_pad[5]; /* pad out to 320 bytes for good measure */ } dsl_dataset_phys_t; typedef struct dsl_dataset { /* Immutable: */ struct dsl_dir *ds_dir; dsl_dataset_phys_t *ds_phys; dmu_buf_t *ds_dbuf; uint64_t ds_object; uint64_t ds_fsid_guid; /* only used in syncing context, only valid for non-snapshots: */ struct dsl_dataset *ds_prev; /* has internal locking: */ dsl_deadlist_t ds_deadlist; bplist_t ds_pending_deadlist; /* to protect against multiple concurrent incremental recv */ kmutex_t ds_recvlock; /* protected by lock on pool's dp_dirty_datasets list */ txg_node_t ds_dirty_link; list_node_t ds_synced_link; /* * ds_phys->ds_ is also protected by ds_lock. * Protected by ds_lock: */ kmutex_t ds_lock; objset_t *ds_objset; uint64_t ds_userrefs; /* * ds_owner is protected by the ds_rwlock and the ds_lock */ krwlock_t ds_rwlock; kcondvar_t ds_exclusive_cv; void *ds_owner; /* no locking; only for making guesses */ uint64_t ds_trysnap_txg; /* for objset_open() */ kmutex_t ds_opening_lock; uint64_t ds_reserved; /* cached refreservation */ uint64_t ds_quota; /* cached refquota */ + + kmutex_t ds_sendstream_lock; + list_t ds_sendstreams; /* Protected by ds_lock; keep at end of struct for better locality */ char ds_snapname[MAXNAMELEN]; } dsl_dataset_t; struct dsl_ds_destroyarg { dsl_dataset_t *ds; /* ds to destroy */ dsl_dataset_t *rm_origin; /* also remove our origin? */ boolean_t is_origin_rm; /* set if removing origin snap */ boolean_t defer; /* destroy -d requested? */ boolean_t releasing; /* destroying due to release? */ boolean_t need_prep; /* do we need to retry due to EBUSY? */ }; /* * The max length of a temporary tag prefix is the number of hex digits * required to express UINT64_MAX plus one for the hyphen. */ #define MAX_TAG_PREFIX_LEN 17 struct dsl_ds_holdarg { dsl_sync_task_group_t *dstg; char *htag; char *snapname; boolean_t recursive; boolean_t gotone; boolean_t temphold; char failed[MAXPATHLEN]; }; /* * Flags for dsl_dataset_rename(). */ #define ZFS_RENAME_RECURSIVE 0x01 #define ZFS_RENAME_ALLOW_MOUNTED 0x02 #define dsl_dataset_is_snapshot(ds) \ ((ds)->ds_phys->ds_num_children != 0) #define DS_UNIQUE_IS_ACCURATE(ds) \ (((ds)->ds_phys->ds_flags & DS_FLAG_UNIQUE_ACCURATE) != 0) int dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp); int dsl_dataset_hold_obj(struct dsl_pool *dp, uint64_t dsobj, void *tag, dsl_dataset_t **); int dsl_dataset_own(const char *name, boolean_t inconsistentok, void *tag, dsl_dataset_t **dsp); int dsl_dataset_own_obj(struct dsl_pool *dp, uint64_t dsobj, boolean_t inconsistentok, void *tag, dsl_dataset_t **dsp); void dsl_dataset_name(dsl_dataset_t *ds, char *name); void dsl_dataset_rele(dsl_dataset_t *ds, void *tag); void dsl_dataset_disown(dsl_dataset_t *ds, void *tag); void dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag); boolean_t dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag); void dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *tag); void dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag, minor_t minor); uint64_t dsl_dataset_create_sync(dsl_dir_t *pds, const char *lastname, dsl_dataset_t *origin, uint64_t flags, cred_t *, dmu_tx_t *); uint64_t dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin, uint64_t flags, dmu_tx_t *tx); int dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer); int dsl_snapshots_destroy(char *fsname, char *snapname, boolean_t defer); dsl_checkfunc_t dsl_dataset_destroy_check; dsl_syncfunc_t dsl_dataset_destroy_sync; dsl_checkfunc_t dsl_dataset_snapshot_check; dsl_syncfunc_t dsl_dataset_snapshot_sync; dsl_syncfunc_t dsl_dataset_user_hold_sync; int dsl_dataset_rename(char *name, const char *newname, int flags); int dsl_dataset_promote(const char *name, char *conflsnap); int dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head, boolean_t force); int dsl_dataset_user_hold(char *dsname, char *snapname, char *htag, boolean_t recursive, boolean_t temphold, int cleanup_fd); int dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag, boolean_t temphold); int dsl_dataset_user_release(char *dsname, char *snapname, char *htag, boolean_t recursive); int dsl_dataset_user_release_tmp(struct dsl_pool *dp, uint64_t dsobj, char *htag, boolean_t retry); int dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp); blkptr_t *dsl_dataset_get_blkptr(dsl_dataset_t *ds); void dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx); spa_t *dsl_dataset_get_spa(dsl_dataset_t *ds); boolean_t dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds); void dsl_dataset_sync(dsl_dataset_t *os, zio_t *zio, dmu_tx_t *tx); void dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx); int dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx, boolean_t async); boolean_t dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp, uint64_t blk_birth); uint64_t dsl_dataset_prev_snap_txg(dsl_dataset_t *ds); void dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx); void dsl_dataset_stats(dsl_dataset_t *os, nvlist_t *nv); void dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat); void dsl_dataset_space(dsl_dataset_t *ds, uint64_t *refdbytesp, uint64_t *availbytesp, uint64_t *usedobjsp, uint64_t *availobjsp); uint64_t dsl_dataset_fsid_guid(dsl_dataset_t *ds); int dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp); int dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap, dsl_dataset_t *last, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp); int dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf); int dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota, uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv); int dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota); dsl_syncfunc_t dsl_dataset_set_quota_sync; int dsl_dataset_set_reservation(const char *dsname, zprop_source_t source, uint64_t reservation); int dsl_destroy_inconsistent(const char *dsname, void *arg); #ifdef ZFS_DEBUG #define dprintf_ds(ds, fmt, ...) do { \ if (zfs_flags & ZFS_DEBUG_DPRINTF) { \ char *__ds_name = kmem_alloc(MAXNAMELEN, KM_SLEEP); \ dsl_dataset_name(ds, __ds_name); \ dprintf("ds=%s " fmt, __ds_name, __VA_ARGS__); \ kmem_free(__ds_name, MAXNAMELEN); \ } \ _NOTE(CONSTCOND) } while (0) #else #define dprintf_ds(dd, fmt, ...) #endif #ifdef __cplusplus } #endif #endif /* _SYS_DSL_DATASET_H */ Index: projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c =================================================================== --- projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c (revision 235262) +++ projects/nand/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c (revision 235263) @@ -1,5426 +1,5474 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011-2012 Pawel Jakub Dawidek . * All rights reserved. * Portions Copyright 2011 Martin Matuska * Copyright 2011 Nexenta Systems, Inc. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved. + * Copyright (c) 2012, Joyent, Inc. All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "zfs_namecheck.h" #include "zfs_prop.h" #include "zfs_deleg.h" #include "zfs_comutil.h" #include "zfs_ioctl_compat.h" CTASSERT(sizeof(zfs_cmd_t) < IOCPARM_MAX); static int snapshot_list_prefetch; SYSCTL_DECL(_vfs_zfs); TUNABLE_INT("vfs.zfs.snapshot_list_prefetch", &snapshot_list_prefetch); SYSCTL_INT(_vfs_zfs, OID_AUTO, snapshot_list_prefetch, CTLFLAG_RW, &snapshot_list_prefetch, 0, "Prefetch data when listing snapshots"); static struct cdev *zfsdev; extern void zfs_init(void); extern void zfs_fini(void); typedef int zfs_ioc_func_t(zfs_cmd_t *); typedef int zfs_secpolicy_func_t(zfs_cmd_t *, cred_t *); typedef enum { NO_NAME, POOL_NAME, DATASET_NAME } zfs_ioc_namecheck_t; typedef struct zfs_ioc_vec { zfs_ioc_func_t *zvec_func; zfs_secpolicy_func_t *zvec_secpolicy; zfs_ioc_namecheck_t zvec_namecheck; boolean_t zvec_his_log; boolean_t zvec_pool_check; } zfs_ioc_vec_t; /* This array is indexed by zfs_userquota_prop_t */ static const char *userquota_perms[] = { ZFS_DELEG_PERM_USERUSED, ZFS_DELEG_PERM_USERQUOTA, ZFS_DELEG_PERM_GROUPUSED, ZFS_DELEG_PERM_GROUPQUOTA, }; static int zfs_ioc_userspace_upgrade(zfs_cmd_t *zc); static int zfs_check_settable(const char *name, nvpair_t *property, cred_t *cr); static int zfs_check_clearable(char *dataset, nvlist_t *props, nvlist_t **errors); static int zfs_fill_zplprops_root(uint64_t, nvlist_t *, nvlist_t *, boolean_t *); int zfs_set_prop_nvlist(const char *, zprop_source_t, nvlist_t *, nvlist_t **); static void zfsdev_close(void *data); /* _NOTE(PRINTFLIKE(4)) - this is printf-like, but lint is too whiney */ void __dprintf(const char *file, const char *func, int line, const char *fmt, ...) { const char *newfile; char buf[512]; va_list adx; /* * Get rid of annoying "../common/" prefix to filename. */ newfile = strrchr(file, '/'); if (newfile != NULL) { newfile = newfile + 1; /* Get rid of leading / */ } else { newfile = file; } va_start(adx, fmt); (void) vsnprintf(buf, sizeof (buf), fmt, adx); va_end(adx); /* * To get this data, use the zfs-dprintf probe as so: * dtrace -q -n 'zfs-dprintf \ * /stringof(arg0) == "dbuf.c"/ \ * {printf("%s: %s", stringof(arg1), stringof(arg3))}' * arg0 = file name * arg1 = function name * arg2 = line number * arg3 = message */ DTRACE_PROBE4(zfs__dprintf, char *, newfile, char *, func, int, line, char *, buf); } static void history_str_free(char *buf) { kmem_free(buf, HIS_MAX_RECORD_LEN); } static char * history_str_get(zfs_cmd_t *zc) { char *buf; if (zc->zc_history == 0) return (NULL); buf = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP); if (copyinstr((void *)(uintptr_t)zc->zc_history, buf, HIS_MAX_RECORD_LEN, NULL) != 0) { history_str_free(buf); return (NULL); } buf[HIS_MAX_RECORD_LEN -1] = '\0'; return (buf); } /* * Check to see if the named dataset is currently defined as bootable */ static boolean_t zfs_is_bootfs(const char *name) { objset_t *os; if (dmu_objset_hold(name, FTAG, &os) == 0) { boolean_t ret; ret = (dmu_objset_id(os) == spa_bootfs(dmu_objset_spa(os))); dmu_objset_rele(os, FTAG); return (ret); } return (B_FALSE); } /* * zfs_earlier_version * * Return non-zero if the spa version is less than requested version. */ static int zfs_earlier_version(const char *name, int version) { spa_t *spa; if (spa_open(name, &spa, FTAG) == 0) { if (spa_version(spa) < version) { spa_close(spa, FTAG); return (1); } spa_close(spa, FTAG); } return (0); } /* * zpl_earlier_version * * Return TRUE if the ZPL version is less than requested version. */ static boolean_t zpl_earlier_version(const char *name, int version) { objset_t *os; boolean_t rc = B_TRUE; if (dmu_objset_hold(name, FTAG, &os) == 0) { uint64_t zplversion; if (dmu_objset_type(os) != DMU_OST_ZFS) { dmu_objset_rele(os, FTAG); return (B_TRUE); } /* XXX reading from non-owned objset */ if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &zplversion) == 0) rc = zplversion < version; dmu_objset_rele(os, FTAG); } return (rc); } static void zfs_log_history(zfs_cmd_t *zc) { spa_t *spa; char *buf; if ((buf = history_str_get(zc)) == NULL) return; if (spa_open(zc->zc_name, &spa, FTAG) == 0) { if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY) (void) spa_history_log(spa, buf, LOG_CMD_NORMAL); spa_close(spa, FTAG); } history_str_free(buf); } /* * Policy for top-level read operations (list pools). Requires no privileges, * and can be used in the local zone, as there is no associated dataset. */ /* ARGSUSED */ static int zfs_secpolicy_none(zfs_cmd_t *zc, cred_t *cr) { return (0); } /* * Policy for dataset read operations (list children, get statistics). Requires * no privileges, but must be visible in the local zone. */ /* ARGSUSED */ static int zfs_secpolicy_read(zfs_cmd_t *zc, cred_t *cr) { if (INGLOBALZONE(curthread) || zone_dataset_visible(zc->zc_name, NULL)) return (0); return (ENOENT); } static int zfs_dozonecheck_impl(const char *dataset, uint64_t zoned, cred_t *cr) { int writable = 1; /* * The dataset must be visible by this zone -- check this first * so they don't see EPERM on something they shouldn't know about. */ if (!INGLOBALZONE(curthread) && !zone_dataset_visible(dataset, &writable)) return (ENOENT); if (INGLOBALZONE(curthread)) { /* * If the fs is zoned, only root can access it from the * global zone. */ if (secpolicy_zfs(cr) && zoned) return (EPERM); } else { /* * If we are in a local zone, the 'zoned' property must be set. */ if (!zoned) return (EPERM); /* must be writable by this zone */ if (!writable) return (EPERM); } return (0); } static int zfs_dozonecheck(const char *dataset, cred_t *cr) { uint64_t zoned; if (dsl_prop_get_integer(dataset, "jailed", &zoned, NULL)) return (ENOENT); return (zfs_dozonecheck_impl(dataset, zoned, cr)); } static int zfs_dozonecheck_ds(const char *dataset, dsl_dataset_t *ds, cred_t *cr) { uint64_t zoned; rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER); if (dsl_prop_get_ds(ds, "jailed", 8, 1, &zoned, NULL)) { rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock); return (ENOENT); } rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock); return (zfs_dozonecheck_impl(dataset, zoned, cr)); } /* * If name ends in a '@', then require recursive permissions. */ int zfs_secpolicy_write_perms(const char *name, const char *perm, cred_t *cr) { int error; boolean_t descendent = B_FALSE; dsl_dataset_t *ds; char *at; at = strchr(name, '@'); if (at != NULL && at[1] == '\0') { *at = '\0'; descendent = B_TRUE; } error = dsl_dataset_hold(name, FTAG, &ds); if (at != NULL) *at = '@'; if (error != 0) return (error); error = zfs_dozonecheck_ds(name, ds, cr); if (error == 0) { error = secpolicy_zfs(cr); if (error) error = dsl_deleg_access_impl(ds, descendent, perm, cr); } dsl_dataset_rele(ds, FTAG); return (error); } int zfs_secpolicy_write_perms_ds(const char *name, dsl_dataset_t *ds, const char *perm, cred_t *cr) { int error; error = zfs_dozonecheck_ds(name, ds, cr); if (error == 0) { error = secpolicy_zfs(cr); if (error) error = dsl_deleg_access_impl(ds, B_FALSE, perm, cr); } return (error); } #ifdef SECLABEL /* * Policy for setting the security label property. * * Returns 0 for success, non-zero for access and other errors. */ static int zfs_set_slabel_policy(const char *name, char *strval, cred_t *cr) { char ds_hexsl[MAXNAMELEN]; bslabel_t ds_sl, new_sl; boolean_t new_default = FALSE; uint64_t zoned; int needed_priv = -1; int error; /* First get the existing dataset label. */ error = dsl_prop_get(name, zfs_prop_to_name(ZFS_PROP_MLSLABEL), 1, sizeof (ds_hexsl), &ds_hexsl, NULL); if (error) return (EPERM); if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0) new_default = TRUE; /* The label must be translatable */ if (!new_default && (hexstr_to_label(strval, &new_sl) != 0)) return (EINVAL); /* * In a non-global zone, disallow attempts to set a label that * doesn't match that of the zone; otherwise no other checks * are needed. */ if (!INGLOBALZONE(curproc)) { if (new_default || !blequal(&new_sl, CR_SL(CRED()))) return (EPERM); return (0); } /* * For global-zone datasets (i.e., those whose zoned property is * "off", verify that the specified new label is valid for the * global zone. */ if (dsl_prop_get_integer(name, zfs_prop_to_name(ZFS_PROP_ZONED), &zoned, NULL)) return (EPERM); if (!zoned) { if (zfs_check_global_label(name, strval) != 0) return (EPERM); } /* * If the existing dataset label is nondefault, check if the * dataset is mounted (label cannot be changed while mounted). * Get the zfsvfs; if there isn't one, then the dataset isn't * mounted (or isn't a dataset, doesn't exist, ...). */ if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) != 0) { objset_t *os; static char *setsl_tag = "setsl_tag"; /* * Try to own the dataset; abort if there is any error, * (e.g., already mounted, in use, or other error). */ error = dmu_objset_own(name, DMU_OST_ZFS, B_TRUE, setsl_tag, &os); if (error) return (EPERM); dmu_objset_disown(os, setsl_tag); if (new_default) { needed_priv = PRIV_FILE_DOWNGRADE_SL; goto out_check; } if (hexstr_to_label(strval, &new_sl) != 0) return (EPERM); if (blstrictdom(&ds_sl, &new_sl)) needed_priv = PRIV_FILE_DOWNGRADE_SL; else if (blstrictdom(&new_sl, &ds_sl)) needed_priv = PRIV_FILE_UPGRADE_SL; } else { /* dataset currently has a default label */ if (!new_default) needed_priv = PRIV_FILE_UPGRADE_SL; } out_check: if (needed_priv != -1) return (PRIV_POLICY(cr, needed_priv, B_FALSE, EPERM, NULL)); return (0); } #endif /* SECLABEL */ static int zfs_secpolicy_setprop(const char *dsname, zfs_prop_t prop, nvpair_t *propval, cred_t *cr) { char *strval; /* * Check permissions for special properties. */ switch (prop) { case ZFS_PROP_ZONED: /* * Disallow setting of 'zoned' from within a local zone. */ if (!INGLOBALZONE(curthread)) return (EPERM); break; case ZFS_PROP_QUOTA: if (!INGLOBALZONE(curthread)) { uint64_t zoned; char setpoint[MAXNAMELEN]; /* * Unprivileged users are allowed to modify the * quota on things *under* (ie. contained by) * the thing they own. */ if (dsl_prop_get_integer(dsname, "jailed", &zoned, setpoint)) return (EPERM); if (!zoned || strlen(dsname) <= strlen(setpoint)) return (EPERM); } break; case ZFS_PROP_MLSLABEL: #ifdef SECLABEL if (!is_system_labeled()) return (EPERM); if (nvpair_value_string(propval, &strval) == 0) { int err; err = zfs_set_slabel_policy(dsname, strval, CRED()); if (err != 0) return (err); } #else return (EOPNOTSUPP); #endif break; } return (zfs_secpolicy_write_perms(dsname, zfs_prop_to_name(prop), cr)); } int zfs_secpolicy_fsacl(zfs_cmd_t *zc, cred_t *cr) { int error; error = zfs_dozonecheck(zc->zc_name, cr); if (error) return (error); /* * permission to set permissions will be evaluated later in * dsl_deleg_can_allow() */ return (0); } int zfs_secpolicy_rollback(zfs_cmd_t *zc, cred_t *cr) { return (zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_ROLLBACK, cr)); } int zfs_secpolicy_send(zfs_cmd_t *zc, cred_t *cr) { spa_t *spa; dsl_pool_t *dp; dsl_dataset_t *ds; char *cp; int error; /* * Generate the current snapshot name from the given objsetid, then * use that name for the secpolicy/zone checks. */ cp = strchr(zc->zc_name, '@'); if (cp == NULL) return (EINVAL); error = spa_open(zc->zc_name, &spa, FTAG); if (error) return (error); dp = spa_get_dsl(spa); rw_enter(&dp->dp_config_rwlock, RW_READER); error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &ds); rw_exit(&dp->dp_config_rwlock); spa_close(spa, FTAG); if (error) return (error); dsl_dataset_name(ds, zc->zc_name); error = zfs_secpolicy_write_perms_ds(zc->zc_name, ds, ZFS_DELEG_PERM_SEND, cr); dsl_dataset_rele(ds, FTAG); return (error); } static int zfs_secpolicy_deleg_share(zfs_cmd_t *zc, cred_t *cr) { vnode_t *vp; int error; if ((error = lookupname(zc->zc_value, UIO_SYSSPACE, NO_FOLLOW, NULL, &vp)) != 0) return (error); /* Now make sure mntpnt and dataset are ZFS */ if (strcmp(vp->v_vfsp->mnt_stat.f_fstypename, "zfs") != 0 || (strcmp((char *)refstr_value(vp->v_vfsp->vfs_resource), zc->zc_name) != 0)) { VN_RELE(vp); return (EPERM); } VN_RELE(vp); return (dsl_deleg_access(zc->zc_name, ZFS_DELEG_PERM_SHARE, cr)); } int zfs_secpolicy_share(zfs_cmd_t *zc, cred_t *cr) { if (!INGLOBALZONE(curthread)) return (EPERM); if (secpolicy_nfs(cr) == 0) { return (0); } else { return (zfs_secpolicy_deleg_share(zc, cr)); } } int zfs_secpolicy_smb_acl(zfs_cmd_t *zc, cred_t *cr) { if (!INGLOBALZONE(curthread)) return (EPERM); if (secpolicy_smb(cr) == 0) { return (0); } else { return (zfs_secpolicy_deleg_share(zc, cr)); } } static int zfs_get_parent(const char *datasetname, char *parent, int parentsize) { char *cp; /* * Remove the @bla or /bla from the end of the name to get the parent. */ (void) strncpy(parent, datasetname, parentsize); cp = strrchr(parent, '@'); if (cp != NULL) { cp[0] = '\0'; } else { cp = strrchr(parent, '/'); if (cp == NULL) return (ENOENT); cp[0] = '\0'; } return (0); } int zfs_secpolicy_destroy_perms(const char *name, cred_t *cr) { int error; if ((error = zfs_secpolicy_write_perms(name, ZFS_DELEG_PERM_MOUNT, cr)) != 0) return (error); return (zfs_secpolicy_write_perms(name, ZFS_DELEG_PERM_DESTROY, cr)); } static int zfs_secpolicy_destroy(zfs_cmd_t *zc, cred_t *cr) { return (zfs_secpolicy_destroy_perms(zc->zc_name, cr)); } /* * Destroying snapshots with delegated permissions requires * descendent mount and destroy permissions. */ static int zfs_secpolicy_destroy_recursive(zfs_cmd_t *zc, cred_t *cr) { int error; char *dsname; dsname = kmem_asprintf("%s@", zc->zc_name); error = zfs_secpolicy_destroy_perms(dsname, cr); if (error == ENOENT) error = zfs_secpolicy_destroy_perms(zc->zc_name, cr); strfree(dsname); return (error); } int zfs_secpolicy_rename_perms(const char *from, const char *to, cred_t *cr) { char parentname[MAXNAMELEN]; int error; if ((error = zfs_secpolicy_write_perms(from, ZFS_DELEG_PERM_RENAME, cr)) != 0) return (error); if ((error = zfs_secpolicy_write_perms(from, ZFS_DELEG_PERM_MOUNT, cr)) != 0) return (error); if ((error = zfs_get_parent(to, parentname, sizeof (parentname))) != 0) return (error); if ((error = zfs_secpolicy_write_perms(parentname, ZFS_DELEG_PERM_CREATE, cr)) != 0) return (error); if ((error = zfs_secpolicy_write_perms(parentname, ZFS_DELEG_PERM_MOUNT, cr)) != 0) return (error); return (error); } static int zfs_secpolicy_rename(zfs_cmd_t *zc, cred_t *cr) { return (zfs_secpolicy_rename_perms(zc->zc_name, zc->zc_value, cr)); } static int zfs_secpolicy_promote(zfs_cmd_t *zc, cred_t *cr) { char parentname[MAXNAMELEN]; objset_t *clone; int error; error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_PROMOTE, cr); if (error) return (error); error = dmu_objset_hold(zc->zc_name, FTAG, &clone); if (error == 0) { dsl_dataset_t *pclone = NULL; dsl_dir_t *dd; dd = clone->os_dsl_dataset->ds_dir; rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); error = dsl_dataset_hold_obj(dd->dd_pool, dd->dd_phys->dd_origin_obj, FTAG, &pclone); rw_exit(&dd->dd_pool->dp_config_rwlock); if (error) { dmu_objset_rele(clone, FTAG); return (error); } error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_MOUNT, cr); dsl_dataset_name(pclone, parentname); dmu_objset_rele(clone, FTAG); dsl_dataset_rele(pclone, FTAG); if (error == 0) error = zfs_secpolicy_write_perms(parentname, ZFS_DELEG_PERM_PROMOTE, cr); } return (error); } static int zfs_secpolicy_receive(zfs_cmd_t *zc, cred_t *cr) { int error; if ((error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_RECEIVE, cr)) != 0) return (error); if ((error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_MOUNT, cr)) != 0) return (error); return (zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_CREATE, cr)); } int zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr) { return (zfs_secpolicy_write_perms(name, ZFS_DELEG_PERM_SNAPSHOT, cr)); } static int zfs_secpolicy_snapshot(zfs_cmd_t *zc, cred_t *cr) { return (zfs_secpolicy_snapshot_perms(zc->zc_name, cr)); } static int zfs_secpolicy_create(zfs_cmd_t *zc, cred_t *cr) { char parentname[MAXNAMELEN]; int error; if ((error = zfs_get_parent(zc->zc_name, parentname, sizeof (parentname))) != 0) return (error); if (zc->zc_value[0] != '\0') { if ((error = zfs_secpolicy_write_perms(zc->zc_value, ZFS_DELEG_PERM_CLONE, cr)) != 0) return (error); } if ((error = zfs_secpolicy_write_perms(parentname, ZFS_DELEG_PERM_CREATE, cr)) != 0) return (error); error = zfs_secpolicy_write_perms(parentname, ZFS_DELEG_PERM_MOUNT, cr); return (error); } static int zfs_secpolicy_umount(zfs_cmd_t *zc, cred_t *cr) { int error; error = secpolicy_fs_unmount(cr, NULL); if (error) { error = dsl_deleg_access(zc->zc_name, ZFS_DELEG_PERM_MOUNT, cr); } return (error); } /* * Policy for pool operations - create/destroy pools, add vdevs, etc. Requires * SYS_CONFIG privilege, which is not available in a local zone. */ /* ARGSUSED */ static int zfs_secpolicy_config(zfs_cmd_t *zc, cred_t *cr) { if (secpolicy_sys_config(cr, B_FALSE) != 0) return (EPERM); return (0); } /* * Policy for object to name lookups. */ /* ARGSUSED */ static int zfs_secpolicy_diff(zfs_cmd_t *zc, cred_t *cr) { int error; if ((error = secpolicy_sys_config(cr, B_FALSE)) == 0) return (0); error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_DIFF, cr); return (error); } /* * Policy for fault injection. Requires all privileges. */ /* ARGSUSED */ static int zfs_secpolicy_inject(zfs_cmd_t *zc, cred_t *cr) { return (secpolicy_zinject(cr)); } static int zfs_secpolicy_inherit(zfs_cmd_t *zc, cred_t *cr) { zfs_prop_t prop = zfs_name_to_prop(zc->zc_value); if (prop == ZPROP_INVAL) { if (!zfs_prop_user(zc->zc_value)) return (EINVAL); return (zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_USERPROP, cr)); } else { return (zfs_secpolicy_setprop(zc->zc_name, prop, NULL, cr)); } } static int zfs_secpolicy_userspace_one(zfs_cmd_t *zc, cred_t *cr) { int err = zfs_secpolicy_read(zc, cr); if (err) return (err); if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS) return (EINVAL); if (zc->zc_value[0] == 0) { /* * They are asking about a posix uid/gid. If it's * themself, allow it. */ if (zc->zc_objset_type == ZFS_PROP_USERUSED || zc->zc_objset_type == ZFS_PROP_USERQUOTA) { if (zc->zc_guid == crgetuid(cr)) return (0); } else { if (groupmember(zc->zc_guid, cr)) return (0); } } return (zfs_secpolicy_write_perms(zc->zc_name, userquota_perms[zc->zc_objset_type], cr)); } static int zfs_secpolicy_userspace_many(zfs_cmd_t *zc, cred_t *cr) { int err = zfs_secpolicy_read(zc, cr); if (err) return (err); if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS) return (EINVAL); return (zfs_secpolicy_write_perms(zc->zc_name, userquota_perms[zc->zc_objset_type], cr)); } static int zfs_secpolicy_userspace_upgrade(zfs_cmd_t *zc, cred_t *cr) { return (zfs_secpolicy_setprop(zc->zc_name, ZFS_PROP_VERSION, NULL, cr)); } static int zfs_secpolicy_hold(zfs_cmd_t *zc, cred_t *cr) { return (zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_HOLD, cr)); } static int zfs_secpolicy_release(zfs_cmd_t *zc, cred_t *cr) { return (zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_RELEASE, cr)); } /* * Policy for allowing temporary snapshots to be taken or released */ static int zfs_secpolicy_tmp_snapshot(zfs_cmd_t *zc, cred_t *cr) { /* * A temporary snapshot is the same as a snapshot, * hold, destroy and release all rolled into one. * Delegated diff alone is sufficient that we allow this. */ int error; if ((error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_DIFF, cr)) == 0) return (0); error = zfs_secpolicy_snapshot(zc, cr); if (!error) error = zfs_secpolicy_hold(zc, cr); if (!error) error = zfs_secpolicy_release(zc, cr); if (!error) error = zfs_secpolicy_destroy(zc, cr); return (error); } /* * Returns the nvlist as specified by the user in the zfs_cmd_t. */ static int get_nvlist(uint64_t nvl, uint64_t size, int iflag, nvlist_t **nvp) { char *packed; int error; nvlist_t *list = NULL; /* * Read in and unpack the user-supplied nvlist. */ if (size == 0) return (EINVAL); packed = kmem_alloc(size, KM_SLEEP); if ((error = ddi_copyin((void *)(uintptr_t)nvl, packed, size, iflag)) != 0) { kmem_free(packed, size); return (error); } if ((error = nvlist_unpack(packed, size, &list, 0)) != 0) { kmem_free(packed, size); return (error); } kmem_free(packed, size); *nvp = list; return (0); } static int fit_error_list(zfs_cmd_t *zc, nvlist_t **errors) { size_t size; VERIFY(nvlist_size(*errors, &size, NV_ENCODE_NATIVE) == 0); if (size > zc->zc_nvlist_dst_size) { nvpair_t *more_errors; int n = 0; if (zc->zc_nvlist_dst_size < 1024) return (ENOMEM); VERIFY(nvlist_add_int32(*errors, ZPROP_N_MORE_ERRORS, 0) == 0); more_errors = nvlist_prev_nvpair(*errors, NULL); do { nvpair_t *pair = nvlist_prev_nvpair(*errors, more_errors); VERIFY(nvlist_remove_nvpair(*errors, pair) == 0); n++; VERIFY(nvlist_size(*errors, &size, NV_ENCODE_NATIVE) == 0); } while (size > zc->zc_nvlist_dst_size); VERIFY(nvlist_remove_nvpair(*errors, more_errors) == 0); VERIFY(nvlist_add_int32(*errors, ZPROP_N_MORE_ERRORS, n) == 0); ASSERT(nvlist_size(*errors, &size, NV_ENCODE_NATIVE) == 0); ASSERT(size <= zc->zc_nvlist_dst_size); } return (0); } static int put_nvlist(zfs_cmd_t *zc, nvlist_t *nvl) { char *packed = NULL; int error = 0; size_t size; VERIFY(nvlist_size(nvl, &size, NV_ENCODE_NATIVE) == 0); if (size > zc->zc_nvlist_dst_size) { /* * Solaris returns ENOMEM here, because even if an error is * returned from an ioctl(2), new zc_nvlist_dst_size will be * passed to the userland. This is not the case for FreeBSD. * We need to return 0, so the kernel will copy the * zc_nvlist_dst_size back and the userland can discover that a * bigger buffer is needed. */ error = 0; } else { packed = kmem_alloc(size, KM_SLEEP); VERIFY(nvlist_pack(nvl, &packed, &size, NV_ENCODE_NATIVE, KM_SLEEP) == 0); if (ddi_copyout(packed, (void *)(uintptr_t)zc->zc_nvlist_dst, size, zc->zc_iflags) != 0) error = EFAULT; kmem_free(packed, size); } zc->zc_nvlist_dst_size = size; return (error); } static int getzfsvfs(const char *dsname, zfsvfs_t **zfvp) { objset_t *os; int error; error = dmu_objset_hold(dsname, FTAG, &os); if (error) return (error); if (dmu_objset_type(os) != DMU_OST_ZFS) { dmu_objset_rele(os, FTAG); return (EINVAL); } mutex_enter(&os->os_user_ptr_lock); *zfvp = dmu_objset_get_user(os); if (*zfvp) { VFS_HOLD((*zfvp)->z_vfs); } else { error = ESRCH; } mutex_exit(&os->os_user_ptr_lock); dmu_objset_rele(os, FTAG); return (error); } /* * Find a zfsvfs_t for a mounted filesystem, or create our own, in which * case its z_vfs will be NULL, and it will be opened as the owner. */ static int zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp, boolean_t writer) { int error = 0; if (getzfsvfs(name, zfvp) != 0) error = zfsvfs_create(name, zfvp); if (error == 0) { rrw_enter(&(*zfvp)->z_teardown_lock, (writer) ? RW_WRITER : RW_READER, tag); if ((*zfvp)->z_unmounted) { /* * XXX we could probably try again, since the unmounting * thread should be just about to disassociate the * objset from the zfsvfs. */ rrw_exit(&(*zfvp)->z_teardown_lock, tag); return (EBUSY); } } return (error); } static void zfsvfs_rele(zfsvfs_t *zfsvfs, void *tag) { rrw_exit(&zfsvfs->z_teardown_lock, tag); if (zfsvfs->z_vfs) { VFS_RELE(zfsvfs->z_vfs); } else { dmu_objset_disown(zfsvfs->z_os, zfsvfs); zfsvfs_free(zfsvfs); } } static int zfs_ioc_pool_create(zfs_cmd_t *zc) { int error; nvlist_t *config, *props = NULL; nvlist_t *rootprops = NULL; nvlist_t *zplprops = NULL; char *buf; if (error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &config)) return (error); if (zc->zc_nvlist_src_size != 0 && (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &props))) { nvlist_free(config); return (error); } if (props) { nvlist_t *nvl = NULL; uint64_t version = SPA_VERSION; (void) nvlist_lookup_uint64(props, zpool_prop_to_name(ZPOOL_PROP_VERSION), &version); if (version < SPA_VERSION_INITIAL || version > SPA_VERSION) { error = EINVAL; goto pool_props_bad; } (void) nvlist_lookup_nvlist(props, ZPOOL_ROOTFS_PROPS, &nvl); if (nvl) { error = nvlist_dup(nvl, &rootprops, KM_SLEEP); if (error != 0) { nvlist_free(config); nvlist_free(props); return (error); } (void) nvlist_remove_all(props, ZPOOL_ROOTFS_PROPS); } VERIFY(nvlist_alloc(&zplprops, NV_UNIQUE_NAME, KM_SLEEP) == 0); error = zfs_fill_zplprops_root(version, rootprops, zplprops, NULL); if (error) goto pool_props_bad; } buf = history_str_get(zc); error = spa_create(zc->zc_name, config, props, buf, zplprops); /* * Set the remaining root properties */ if (!error && (error = zfs_set_prop_nvlist(zc->zc_name, ZPROP_SRC_LOCAL, rootprops, NULL)) != 0) (void) spa_destroy(zc->zc_name); if (buf != NULL) history_str_free(buf); pool_props_bad: nvlist_free(rootprops); nvlist_free(zplprops); nvlist_free(config); nvlist_free(props); return (error); } static int zfs_ioc_pool_destroy(zfs_cmd_t *zc) { int error; zfs_log_history(zc); error = spa_destroy(zc->zc_name); if (error == 0) zvol_remove_minors(zc->zc_name); return (error); } static int zfs_ioc_pool_import(zfs_cmd_t *zc) { nvlist_t *config, *props = NULL; uint64_t guid; int error; if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &config)) != 0) return (error); if (zc->zc_nvlist_src_size != 0 && (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &props))) { nvlist_free(config); return (error); } if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) != 0 || guid != zc->zc_guid) error = EINVAL; else error = spa_import(zc->zc_name, config, props, zc->zc_cookie); if (zc->zc_nvlist_dst != 0) { int err; if ((err = put_nvlist(zc, config)) != 0) error = err; } nvlist_free(config); if (props) nvlist_free(props); return (error); } static int zfs_ioc_pool_export(zfs_cmd_t *zc) { int error; boolean_t force = (boolean_t)zc->zc_cookie; boolean_t hardforce = (boolean_t)zc->zc_guid; zfs_log_history(zc); error = spa_export(zc->zc_name, NULL, force, hardforce); if (error == 0) zvol_remove_minors(zc->zc_name); return (error); } static int zfs_ioc_pool_configs(zfs_cmd_t *zc) { nvlist_t *configs; int error; if ((configs = spa_all_configs(&zc->zc_cookie)) == NULL) return (EEXIST); error = put_nvlist(zc, configs); nvlist_free(configs); return (error); } static int zfs_ioc_pool_stats(zfs_cmd_t *zc) { nvlist_t *config; int error; int ret = 0; error = spa_get_stats(zc->zc_name, &config, zc->zc_value, sizeof (zc->zc_value)); if (config != NULL) { ret = put_nvlist(zc, config); nvlist_free(config); /* * The config may be present even if 'error' is non-zero. * In this case we return success, and preserve the real errno * in 'zc_cookie'. */ zc->zc_cookie = error; } else { ret = error; } return (ret); } /* * Try to import the given pool, returning pool stats as appropriate so that * user land knows which devices are available and overall pool health. */ static int zfs_ioc_pool_tryimport(zfs_cmd_t *zc) { nvlist_t *tryconfig, *config; int error; if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &tryconfig)) != 0) return (error); config = spa_tryimport(tryconfig); nvlist_free(tryconfig); if (config == NULL) return (EINVAL); error = put_nvlist(zc, config); nvlist_free(config); return (error); } /* * inputs: * zc_name name of the pool * zc_cookie scan func (pool_scan_func_t) */ static int zfs_ioc_pool_scan(zfs_cmd_t *zc) { spa_t *spa; int error; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); if (zc->zc_cookie == POOL_SCAN_NONE) error = spa_scan_stop(spa); else error = spa_scan(spa, zc->zc_cookie); spa_close(spa, FTAG); return (error); } static int zfs_ioc_pool_freeze(zfs_cmd_t *zc) { spa_t *spa; int error; error = spa_open(zc->zc_name, &spa, FTAG); if (error == 0) { spa_freeze(spa); spa_close(spa, FTAG); } return (error); } static int zfs_ioc_pool_upgrade(zfs_cmd_t *zc) { spa_t *spa; int error; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); if (zc->zc_cookie < spa_version(spa) || zc->zc_cookie > SPA_VERSION) { spa_close(spa, FTAG); return (EINVAL); } spa_upgrade(spa, zc->zc_cookie); spa_close(spa, FTAG); return (error); } static int zfs_ioc_pool_get_history(zfs_cmd_t *zc) { spa_t *spa; char *hist_buf; uint64_t size; int error; if ((size = zc->zc_history_len) == 0) return (EINVAL); if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY) { spa_close(spa, FTAG); return (ENOTSUP); } hist_buf = kmem_alloc(size, KM_SLEEP); if ((error = spa_history_get(spa, &zc->zc_history_offset, &zc->zc_history_len, hist_buf)) == 0) { error = ddi_copyout(hist_buf, (void *)(uintptr_t)zc->zc_history, zc->zc_history_len, zc->zc_iflags); } spa_close(spa, FTAG); kmem_free(hist_buf, size); return (error); } static int zfs_ioc_pool_reguid(zfs_cmd_t *zc) { spa_t *spa; int error; error = spa_open(zc->zc_name, &spa, FTAG); if (error == 0) { error = spa_change_guid(spa); spa_close(spa, FTAG); } return (error); } static int zfs_ioc_dsobj_to_dsname(zfs_cmd_t *zc) { int error; if (error = dsl_dsobj_to_dsname(zc->zc_name, zc->zc_obj, zc->zc_value)) return (error); return (0); } /* * inputs: * zc_name name of filesystem * zc_obj object to find * * outputs: * zc_value name of object */ static int zfs_ioc_obj_to_path(zfs_cmd_t *zc) { objset_t *os; int error; /* XXX reading from objset not owned */ if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os)) != 0) return (error); if (dmu_objset_type(os) != DMU_OST_ZFS) { dmu_objset_rele(os, FTAG); return (EINVAL); } error = zfs_obj_to_path(os, zc->zc_obj, zc->zc_value, sizeof (zc->zc_value)); dmu_objset_rele(os, FTAG); return (error); } /* * inputs: * zc_name name of filesystem * zc_obj object to find * * outputs: * zc_stat stats on object * zc_value path to object */ static int zfs_ioc_obj_to_stats(zfs_cmd_t *zc) { objset_t *os; int error; /* XXX reading from objset not owned */ if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os)) != 0) return (error); if (dmu_objset_type(os) != DMU_OST_ZFS) { dmu_objset_rele(os, FTAG); return (EINVAL); } error = zfs_obj_to_stats(os, zc->zc_obj, &zc->zc_stat, zc->zc_value, sizeof (zc->zc_value)); dmu_objset_rele(os, FTAG); return (error); } static int zfs_ioc_vdev_add(zfs_cmd_t *zc) { spa_t *spa; int error; nvlist_t *config, **l2cache, **spares; uint_t nl2cache = 0, nspares = 0; error = spa_open(zc->zc_name, &spa, FTAG); if (error != 0) return (error); error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &config); (void) nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache); (void) nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_SPARES, &spares, &nspares); /* * A root pool with concatenated devices is not supported. * Thus, can not add a device to a root pool. * * Intent log device can not be added to a rootpool because * during mountroot, zil is replayed, a seperated log device * can not be accessed during the mountroot time. * * l2cache and spare devices are ok to be added to a rootpool. */ if (spa_bootfs(spa) != 0 && nl2cache == 0 && nspares == 0) { nvlist_free(config); spa_close(spa, FTAG); return (EDOM); } if (error == 0) { error = spa_vdev_add(spa, config); nvlist_free(config); } spa_close(spa, FTAG); return (error); } /* * inputs: * zc_name name of the pool * zc_nvlist_conf nvlist of devices to remove * zc_cookie to stop the remove? */ static int zfs_ioc_vdev_remove(zfs_cmd_t *zc) { spa_t *spa; int error; error = spa_open(zc->zc_name, &spa, FTAG); if (error != 0) return (error); error = spa_vdev_remove(spa, zc->zc_guid, B_FALSE); spa_close(spa, FTAG); return (error); } static int zfs_ioc_vdev_set_state(zfs_cmd_t *zc) { spa_t *spa; int error; vdev_state_t newstate = VDEV_STATE_UNKNOWN; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); switch (zc->zc_cookie) { case VDEV_STATE_ONLINE: error = vdev_online(spa, zc->zc_guid, zc->zc_obj, &newstate); break; case VDEV_STATE_OFFLINE: error = vdev_offline(spa, zc->zc_guid, zc->zc_obj); break; case VDEV_STATE_FAULTED: if (zc->zc_obj != VDEV_AUX_ERR_EXCEEDED && zc->zc_obj != VDEV_AUX_EXTERNAL) zc->zc_obj = VDEV_AUX_ERR_EXCEEDED; error = vdev_fault(spa, zc->zc_guid, zc->zc_obj); break; case VDEV_STATE_DEGRADED: if (zc->zc_obj != VDEV_AUX_ERR_EXCEEDED && zc->zc_obj != VDEV_AUX_EXTERNAL) zc->zc_obj = VDEV_AUX_ERR_EXCEEDED; error = vdev_degrade(spa, zc->zc_guid, zc->zc_obj); break; default: error = EINVAL; } zc->zc_cookie = newstate; spa_close(spa, FTAG); return (error); } static int zfs_ioc_vdev_attach(zfs_cmd_t *zc) { spa_t *spa; int replacing = zc->zc_cookie; nvlist_t *config; int error; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &config)) == 0) { error = spa_vdev_attach(spa, zc->zc_guid, config, replacing); nvlist_free(config); } spa_close(spa, FTAG); return (error); } static int zfs_ioc_vdev_detach(zfs_cmd_t *zc) { spa_t *spa; int error; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); error = spa_vdev_detach(spa, zc->zc_guid, 0, B_FALSE); spa_close(spa, FTAG); return (error); } static int zfs_ioc_vdev_split(zfs_cmd_t *zc) { spa_t *spa; nvlist_t *config, *props = NULL; int error; boolean_t exp = !!(zc->zc_cookie & ZPOOL_EXPORT_AFTER_SPLIT); if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); if (error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size, zc->zc_iflags, &config)) { spa_close(spa, FTAG); return (error); } if (zc->zc_nvlist_src_size != 0 && (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &props))) { spa_close(spa, FTAG); nvlist_free(config); return (error); } error = spa_vdev_split_mirror(spa, zc->zc_string, config, props, exp); spa_close(spa, FTAG); nvlist_free(config); nvlist_free(props); return (error); } static int zfs_ioc_vdev_setpath(zfs_cmd_t *zc) { spa_t *spa; char *path = zc->zc_value; uint64_t guid = zc->zc_guid; int error; error = spa_open(zc->zc_name, &spa, FTAG); if (error != 0) return (error); error = spa_vdev_setpath(spa, guid, path); spa_close(spa, FTAG); return (error); } static int zfs_ioc_vdev_setfru(zfs_cmd_t *zc) { spa_t *spa; char *fru = zc->zc_value; uint64_t guid = zc->zc_guid; int error; error = spa_open(zc->zc_name, &spa, FTAG); if (error != 0) return (error); error = spa_vdev_setfru(spa, guid, fru); spa_close(spa, FTAG); return (error); } static int zfs_ioc_objset_stats_impl(zfs_cmd_t *zc, objset_t *os) { int error = 0; nvlist_t *nv; dmu_objset_fast_stat(os, &zc->zc_objset_stats); if (zc->zc_nvlist_dst != 0 && (error = dsl_prop_get_all(os, &nv)) == 0) { dmu_objset_stats(os, nv); /* * NB: zvol_get_stats() will read the objset contents, * which we aren't supposed to do with a * DS_MODE_USER hold, because it could be * inconsistent. So this is a bit of a workaround... * XXX reading with out owning */ if (!zc->zc_objset_stats.dds_inconsistent && dmu_objset_type(os) == DMU_OST_ZVOL) { error = zvol_get_stats(os, nv); if (error == EIO) return (error); VERIFY3S(error, ==, 0); } error = put_nvlist(zc, nv); nvlist_free(nv); } return (error); } /* * inputs: * zc_name name of filesystem * zc_nvlist_dst_size size of buffer for property nvlist * * outputs: * zc_objset_stats stats * zc_nvlist_dst property nvlist * zc_nvlist_dst_size size of property nvlist */ static int zfs_ioc_objset_stats(zfs_cmd_t *zc) { objset_t *os = NULL; int error; if (error = dmu_objset_hold(zc->zc_name, FTAG, &os)) return (error); error = zfs_ioc_objset_stats_impl(zc, os); dmu_objset_rele(os, FTAG); if (error == ENOMEM) error = 0; return (error); } /* * inputs: * zc_name name of filesystem * zc_nvlist_dst_size size of buffer for property nvlist * * outputs: * zc_nvlist_dst received property nvlist * zc_nvlist_dst_size size of received property nvlist * * Gets received properties (distinct from local properties on or after * SPA_VERSION_RECVD_PROPS) for callers who want to differentiate received from * local property values. */ static int zfs_ioc_objset_recvd_props(zfs_cmd_t *zc) { objset_t *os = NULL; int error; nvlist_t *nv; if (error = dmu_objset_hold(zc->zc_name, FTAG, &os)) return (error); /* * Without this check, we would return local property values if the * caller has not already received properties on or after * SPA_VERSION_RECVD_PROPS. */ if (!dsl_prop_get_hasrecvd(os)) { dmu_objset_rele(os, FTAG); return (ENOTSUP); } if (zc->zc_nvlist_dst != 0 && (error = dsl_prop_get_received(os, &nv)) == 0) { error = put_nvlist(zc, nv); nvlist_free(nv); } dmu_objset_rele(os, FTAG); return (error); } static int nvl_add_zplprop(objset_t *os, nvlist_t *props, zfs_prop_t prop) { uint64_t value; int error; /* * zfs_get_zplprop() will either find a value or give us * the default value (if there is one). */ if ((error = zfs_get_zplprop(os, prop, &value)) != 0) return (error); VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(prop), value) == 0); return (0); } /* * inputs: * zc_name name of filesystem * zc_nvlist_dst_size size of buffer for zpl property nvlist * * outputs: * zc_nvlist_dst zpl property nvlist * zc_nvlist_dst_size size of zpl property nvlist */ static int zfs_ioc_objset_zplprops(zfs_cmd_t *zc) { objset_t *os; int err; /* XXX reading without owning */ if (err = dmu_objset_hold(zc->zc_name, FTAG, &os)) return (err); dmu_objset_fast_stat(os, &zc->zc_objset_stats); /* * NB: nvl_add_zplprop() will read the objset contents, * which we aren't supposed to do with a DS_MODE_USER * hold, because it could be inconsistent. */ if (zc->zc_nvlist_dst != 0 && !zc->zc_objset_stats.dds_inconsistent && dmu_objset_type(os) == DMU_OST_ZFS) { nvlist_t *nv; VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); if ((err = nvl_add_zplprop(os, nv, ZFS_PROP_VERSION)) == 0 && (err = nvl_add_zplprop(os, nv, ZFS_PROP_NORMALIZE)) == 0 && (err = nvl_add_zplprop(os, nv, ZFS_PROP_UTF8ONLY)) == 0 && (err = nvl_add_zplprop(os, nv, ZFS_PROP_CASE)) == 0) err = put_nvlist(zc, nv); nvlist_free(nv); } else { err = ENOENT; } dmu_objset_rele(os, FTAG); return (err); } boolean_t dataset_name_hidden(const char *name) { /* * Skip over datasets that are not visible in this zone, * internal datasets (which have a $ in their name), and * temporary datasets (which have a % in their name). */ if (strchr(name, '$') != NULL) return (B_TRUE); if (strchr(name, '%') != NULL) return (B_TRUE); if (!INGLOBALZONE(curthread) && !zone_dataset_visible(name, NULL)) return (B_TRUE); return (B_FALSE); } /* * inputs: * zc_name name of filesystem * zc_cookie zap cursor * zc_nvlist_dst_size size of buffer for property nvlist * * outputs: * zc_name name of next filesystem * zc_cookie zap cursor * zc_objset_stats stats * zc_nvlist_dst property nvlist * zc_nvlist_dst_size size of property nvlist */ static int zfs_ioc_dataset_list_next(zfs_cmd_t *zc) { objset_t *os; int error; char *p; size_t orig_len = strlen(zc->zc_name); top: if (error = dmu_objset_hold(zc->zc_name, FTAG, &os)) { if (error == ENOENT) error = ESRCH; return (error); } p = strrchr(zc->zc_name, '/'); if (p == NULL || p[1] != '\0') (void) strlcat(zc->zc_name, "/", sizeof (zc->zc_name)); p = zc->zc_name + strlen(zc->zc_name); /* * Pre-fetch the datasets. dmu_objset_prefetch() always returns 0 * but is not declared void because its called by dmu_objset_find(). */ if (zc->zc_cookie == 0) { uint64_t cookie = 0; int len = sizeof (zc->zc_name) - (p - zc->zc_name); while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) { if (!dataset_name_hidden(zc->zc_name)) (void) dmu_objset_prefetch(zc->zc_name, NULL); } } do { error = dmu_dir_list_next(os, sizeof (zc->zc_name) - (p - zc->zc_name), p, NULL, &zc->zc_cookie); if (error == ENOENT) error = ESRCH; } while (error == 0 && dataset_name_hidden(zc->zc_name)); dmu_objset_rele(os, FTAG); /* * If it's an internal dataset (ie. with a '$' in its name), * don't try to get stats for it, otherwise we'll return ENOENT. */ if (error == 0 && strchr(zc->zc_name, '$') == NULL) { error = zfs_ioc_objset_stats(zc); /* fill in the stats */ if (error == ENOENT) { /* We lost a race with destroy, get the next one. */ zc->zc_name[orig_len] = '\0'; goto top; } } return (error); } /* * inputs: * zc_name name of filesystem * zc_cookie zap cursor * zc_nvlist_dst_size size of buffer for property nvlist * zc_simple when set, only name is requested * * outputs: * zc_name name of next snapshot * zc_objset_stats stats * zc_nvlist_dst property nvlist * zc_nvlist_dst_size size of property nvlist */ static int zfs_ioc_snapshot_list_next(zfs_cmd_t *zc) { objset_t *os; int error; top: if (snapshot_list_prefetch && zc->zc_cookie == 0 && !zc->zc_simple) (void) dmu_objset_find(zc->zc_name, dmu_objset_prefetch, NULL, DS_FIND_SNAPSHOTS); error = dmu_objset_hold(zc->zc_name, FTAG, &os); if (error) return (error == ENOENT ? ESRCH : error); /* * A dataset name of maximum length cannot have any snapshots, * so exit immediately. */ if (strlcat(zc->zc_name, "@", sizeof (zc->zc_name)) >= MAXNAMELEN) { dmu_objset_rele(os, FTAG); return (ESRCH); } error = dmu_snapshot_list_next(os, sizeof (zc->zc_name) - strlen(zc->zc_name), zc->zc_name + strlen(zc->zc_name), &zc->zc_obj, &zc->zc_cookie, NULL); if (error == 0 && !zc->zc_simple) { dsl_dataset_t *ds; dsl_pool_t *dp = os->os_dsl_dataset->ds_dir->dd_pool; /* * Since we probably don't have a hold on this snapshot, * it's possible that the objsetid could have been destroyed * and reused for a new objset. It's OK if this happens during * a zfs send operation, since the new createtxg will be * beyond the range we're interested in. */ rw_enter(&dp->dp_config_rwlock, RW_READER); error = dsl_dataset_hold_obj(dp, zc->zc_obj, FTAG, &ds); rw_exit(&dp->dp_config_rwlock); if (error) { if (error == ENOENT) { /* Racing with destroy, get the next one. */ *strchr(zc->zc_name, '@') = '\0'; dmu_objset_rele(os, FTAG); goto top; } } else { objset_t *ossnap; error = dmu_objset_from_ds(ds, &ossnap); if (error == 0) error = zfs_ioc_objset_stats_impl(zc, ossnap); dsl_dataset_rele(ds, FTAG); } } else if (error == ENOENT) { error = ESRCH; } dmu_objset_rele(os, FTAG); /* if we failed, undo the @ that we tacked on to zc_name */ if (error) *strchr(zc->zc_name, '@') = '\0'; return (error); } static int zfs_prop_set_userquota(const char *dsname, nvpair_t *pair) { const char *propname = nvpair_name(pair); uint64_t *valary; unsigned int vallen; const char *domain; char *dash; zfs_userquota_prop_t type; uint64_t rid; uint64_t quota; zfsvfs_t *zfsvfs; int err; if (nvpair_type(pair) == DATA_TYPE_NVLIST) { nvlist_t *attrs; VERIFY(nvpair_value_nvlist(pair, &attrs) == 0); if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &pair) != 0) return (EINVAL); } /* * A correctly constructed propname is encoded as * userquota@-. */ if ((dash = strchr(propname, '-')) == NULL || nvpair_value_uint64_array(pair, &valary, &vallen) != 0 || vallen != 3) return (EINVAL); domain = dash + 1; type = valary[0]; rid = valary[1]; quota = valary[2]; err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_FALSE); if (err == 0) { err = zfs_set_userquota(zfsvfs, type, domain, rid, quota); zfsvfs_rele(zfsvfs, FTAG); } return (err); } /* * If the named property is one that has a special function to set its value, * return 0 on success and a positive error code on failure; otherwise if it is * not one of the special properties handled by this function, return -1. * * XXX: It would be better for callers of the property interface if we handled * these special cases in dsl_prop.c (in the dsl layer). */ static int zfs_prop_set_special(const char *dsname, zprop_source_t source, nvpair_t *pair) { const char *propname = nvpair_name(pair); zfs_prop_t prop = zfs_name_to_prop(propname); uint64_t intval; int err; if (prop == ZPROP_INVAL) { if (zfs_prop_userquota(propname)) return (zfs_prop_set_userquota(dsname, pair)); return (-1); } if (nvpair_type(pair) == DATA_TYPE_NVLIST) { nvlist_t *attrs; VERIFY(nvpair_value_nvlist(pair, &attrs) == 0); VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &pair) == 0); } if (zfs_prop_get_type(prop) == PROP_TYPE_STRING) return (-1); VERIFY(0 == nvpair_value_uint64(pair, &intval)); switch (prop) { case ZFS_PROP_QUOTA: err = dsl_dir_set_quota(dsname, source, intval); break; case ZFS_PROP_REFQUOTA: err = dsl_dataset_set_quota(dsname, source, intval); break; case ZFS_PROP_RESERVATION: err = dsl_dir_set_reservation(dsname, source, intval); break; case ZFS_PROP_REFRESERVATION: err = dsl_dataset_set_reservation(dsname, source, intval); break; case ZFS_PROP_VOLSIZE: err = zvol_set_volsize(dsname, ddi_driver_major(zfs_dip), intval); break; case ZFS_PROP_VERSION: { zfsvfs_t *zfsvfs; if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_TRUE)) != 0) break; err = zfs_set_version(zfsvfs, intval); zfsvfs_rele(zfsvfs, FTAG); if (err == 0 && intval >= ZPL_VERSION_USERSPACE) { zfs_cmd_t *zc; zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP); (void) strcpy(zc->zc_name, dsname); (void) zfs_ioc_userspace_upgrade(zc); kmem_free(zc, sizeof (zfs_cmd_t)); } break; } default: err = -1; } return (err); } /* * This function is best effort. If it fails to set any of the given properties, * it continues to set as many as it can and returns the first error * encountered. If the caller provides a non-NULL errlist, it also gives the * complete list of names of all the properties it failed to set along with the * corresponding error numbers. The caller is responsible for freeing the * returned errlist. * * If every property is set successfully, zero is returned and the list pointed * at by errlist is NULL. */ int zfs_set_prop_nvlist(const char *dsname, zprop_source_t source, nvlist_t *nvl, nvlist_t **errlist) { nvpair_t *pair; nvpair_t *propval; int rv = 0; uint64_t intval; char *strval; nvlist_t *genericnvl; nvlist_t *errors; nvlist_t *retrynvl; VERIFY(nvlist_alloc(&genericnvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_alloc(&retrynvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); retry: pair = NULL; while ((pair = nvlist_next_nvpair(nvl, pair)) != NULL) { const char *propname = nvpair_name(pair); zfs_prop_t prop = zfs_name_to_prop(propname); int err = 0; /* decode the property value */ propval = pair; if (nvpair_type(pair) == DATA_TYPE_NVLIST) { nvlist_t *attrs; VERIFY(nvpair_value_nvlist(pair, &attrs) == 0); if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &propval) != 0) err = EINVAL; } /* Validate value type */ if (err == 0 && prop == ZPROP_INVAL) { if (zfs_prop_user(propname)) { if (nvpair_type(propval) != DATA_TYPE_STRING) err = EINVAL; } else if (zfs_prop_userquota(propname)) { if (nvpair_type(propval) != DATA_TYPE_UINT64_ARRAY) err = EINVAL; } else { err = EINVAL; } } else if (err == 0) { if (nvpair_type(propval) == DATA_TYPE_STRING) { if (zfs_prop_get_type(prop) != PROP_TYPE_STRING) err = EINVAL; } else if (nvpair_type(propval) == DATA_TYPE_UINT64) { const char *unused; VERIFY(nvpair_value_uint64(propval, &intval) == 0); switch (zfs_prop_get_type(prop)) { case PROP_TYPE_NUMBER: break; case PROP_TYPE_STRING: err = EINVAL; break; case PROP_TYPE_INDEX: if (zfs_prop_index_to_string(prop, intval, &unused) != 0) err = EINVAL; break; default: cmn_err(CE_PANIC, "unknown property type"); } } else { err = EINVAL; } } /* Validate permissions */ if (err == 0) err = zfs_check_settable(dsname, pair, CRED()); if (err == 0) { err = zfs_prop_set_special(dsname, source, pair); if (err == -1) { /* * For better performance we build up a list of * properties to set in a single transaction. */ err = nvlist_add_nvpair(genericnvl, pair); } else if (err != 0 && nvl != retrynvl) { /* * This may be a spurious error caused by * receiving quota and reservation out of order. * Try again in a second pass. */ err = nvlist_add_nvpair(retrynvl, pair); } } if (err != 0) VERIFY(nvlist_add_int32(errors, propname, err) == 0); } if (nvl != retrynvl && !nvlist_empty(retrynvl)) { nvl = retrynvl; goto retry; } if (!nvlist_empty(genericnvl) && dsl_props_set(dsname, source, genericnvl) != 0) { /* * If this fails, we still want to set as many properties as we * can, so try setting them individually. */ pair = NULL; while ((pair = nvlist_next_nvpair(genericnvl, pair)) != NULL) { const char *propname = nvpair_name(pair); int err = 0; propval = pair; if (nvpair_type(pair) == DATA_TYPE_NVLIST) { nvlist_t *attrs; VERIFY(nvpair_value_nvlist(pair, &attrs) == 0); VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &propval) == 0); } if (nvpair_type(propval) == DATA_TYPE_STRING) { VERIFY(nvpair_value_string(propval, &strval) == 0); err = dsl_prop_set(dsname, propname, source, 1, strlen(strval) + 1, strval); } else { VERIFY(nvpair_value_uint64(propval, &intval) == 0); err = dsl_prop_set(dsname, propname, source, 8, 1, &intval); } if (err != 0) { VERIFY(nvlist_add_int32(errors, propname, err) == 0); } } } nvlist_free(genericnvl); nvlist_free(retrynvl); if ((pair = nvlist_next_nvpair(errors, NULL)) == NULL) { nvlist_free(errors); errors = NULL; } else { VERIFY(nvpair_value_int32(pair, &rv) == 0); } if (errlist == NULL) nvlist_free(errors); else *errlist = errors; return (rv); } /* * Check that all the properties are valid user properties. */ static int zfs_check_userprops(char *fsname, nvlist_t *nvl) { nvpair_t *pair = NULL; int error = 0; while ((pair = nvlist_next_nvpair(nvl, pair)) != NULL) { const char *propname = nvpair_name(pair); char *valstr; if (!zfs_prop_user(propname) || nvpair_type(pair) != DATA_TYPE_STRING) return (EINVAL); if (error = zfs_secpolicy_write_perms(fsname, ZFS_DELEG_PERM_USERPROP, CRED())) return (error); if (strlen(propname) >= ZAP_MAXNAMELEN) return (ENAMETOOLONG); VERIFY(nvpair_value_string(pair, &valstr) == 0); if (strlen(valstr) >= ZAP_MAXVALUELEN) return (E2BIG); } return (0); } static void props_skip(nvlist_t *props, nvlist_t *skipped, nvlist_t **newprops) { nvpair_t *pair; VERIFY(nvlist_alloc(newprops, NV_UNIQUE_NAME, KM_SLEEP) == 0); pair = NULL; while ((pair = nvlist_next_nvpair(props, pair)) != NULL) { if (nvlist_exists(skipped, nvpair_name(pair))) continue; VERIFY(nvlist_add_nvpair(*newprops, pair) == 0); } } static int clear_received_props(objset_t *os, const char *fs, nvlist_t *props, nvlist_t *skipped) { int err = 0; nvlist_t *cleared_props = NULL; props_skip(props, skipped, &cleared_props); if (!nvlist_empty(cleared_props)) { /* * Acts on local properties until the dataset has received * properties at least once on or after SPA_VERSION_RECVD_PROPS. */ zprop_source_t flags = (ZPROP_SRC_NONE | (dsl_prop_get_hasrecvd(os) ? ZPROP_SRC_RECEIVED : 0)); err = zfs_set_prop_nvlist(fs, flags, cleared_props, NULL); } nvlist_free(cleared_props); return (err); } /* * inputs: * zc_name name of filesystem * zc_value name of property to set * zc_nvlist_src{_size} nvlist of properties to apply * zc_cookie received properties flag * * outputs: * zc_nvlist_dst{_size} error for each unapplied received property */ static int zfs_ioc_set_prop(zfs_cmd_t *zc) { nvlist_t *nvl; boolean_t received = zc->zc_cookie; zprop_source_t source = (received ? ZPROP_SRC_RECEIVED : ZPROP_SRC_LOCAL); nvlist_t *errors = NULL; int error; if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &nvl)) != 0) return (error); if (received) { nvlist_t *origprops; objset_t *os; if (dmu_objset_hold(zc->zc_name, FTAG, &os) == 0) { if (dsl_prop_get_received(os, &origprops) == 0) { (void) clear_received_props(os, zc->zc_name, origprops, nvl); nvlist_free(origprops); } dsl_prop_set_hasrecvd(os); dmu_objset_rele(os, FTAG); } } error = zfs_set_prop_nvlist(zc->zc_name, source, nvl, &errors); if (zc->zc_nvlist_dst != 0 && errors != NULL) { (void) put_nvlist(zc, errors); } nvlist_free(errors); nvlist_free(nvl); return (error); } /* * inputs: * zc_name name of filesystem * zc_value name of property to inherit * zc_cookie revert to received value if TRUE * * outputs: none */ static int zfs_ioc_inherit_prop(zfs_cmd_t *zc) { const char *propname = zc->zc_value; zfs_prop_t prop = zfs_name_to_prop(propname); boolean_t received = zc->zc_cookie; zprop_source_t source = (received ? ZPROP_SRC_NONE /* revert to received value, if any */ : ZPROP_SRC_INHERITED); /* explicitly inherit */ if (received) { nvlist_t *dummy; nvpair_t *pair; zprop_type_t type; int err; /* * zfs_prop_set_special() expects properties in the form of an * nvpair with type info. */ if (prop == ZPROP_INVAL) { if (!zfs_prop_user(propname)) return (EINVAL); type = PROP_TYPE_STRING; } else if (prop == ZFS_PROP_VOLSIZE || prop == ZFS_PROP_VERSION) { return (EINVAL); } else { type = zfs_prop_get_type(prop); } VERIFY(nvlist_alloc(&dummy, NV_UNIQUE_NAME, KM_SLEEP) == 0); switch (type) { case PROP_TYPE_STRING: VERIFY(0 == nvlist_add_string(dummy, propname, "")); break; case PROP_TYPE_NUMBER: case PROP_TYPE_INDEX: VERIFY(0 == nvlist_add_uint64(dummy, propname, 0)); break; default: nvlist_free(dummy); return (EINVAL); } pair = nvlist_next_nvpair(dummy, NULL); err = zfs_prop_set_special(zc->zc_name, source, pair); nvlist_free(dummy); if (err != -1) return (err); /* special property already handled */ } else { /* * Only check this in the non-received case. We want to allow * 'inherit -S' to revert non-inheritable properties like quota * and reservation to the received or default values even though * they are not considered inheritable. */ if (prop != ZPROP_INVAL && !zfs_prop_inheritable(prop)) return (EINVAL); } /* the property name has been validated by zfs_secpolicy_inherit() */ return (dsl_prop_set(zc->zc_name, zc->zc_value, source, 0, 0, NULL)); } static int zfs_ioc_pool_set_props(zfs_cmd_t *zc) { nvlist_t *props; spa_t *spa; int error; nvpair_t *pair; if (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &props)) return (error); /* * If the only property is the configfile, then just do a spa_lookup() * to handle the faulted case. */ pair = nvlist_next_nvpair(props, NULL); if (pair != NULL && strcmp(nvpair_name(pair), zpool_prop_to_name(ZPOOL_PROP_CACHEFILE)) == 0 && nvlist_next_nvpair(props, pair) == NULL) { mutex_enter(&spa_namespace_lock); if ((spa = spa_lookup(zc->zc_name)) != NULL) { spa_configfile_set(spa, props, B_FALSE); spa_config_sync(spa, B_FALSE, B_TRUE); } mutex_exit(&spa_namespace_lock); if (spa != NULL) { nvlist_free(props); return (0); } } if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) { nvlist_free(props); return (error); } error = spa_prop_set(spa, props); nvlist_free(props); spa_close(spa, FTAG); return (error); } static int zfs_ioc_pool_get_props(zfs_cmd_t *zc) { spa_t *spa; int error; nvlist_t *nvp = NULL; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) { /* * If the pool is faulted, there may be properties we can still * get (such as altroot and cachefile), so attempt to get them * anyway. */ mutex_enter(&spa_namespace_lock); if ((spa = spa_lookup(zc->zc_name)) != NULL) error = spa_prop_get(spa, &nvp); mutex_exit(&spa_namespace_lock); } else { error = spa_prop_get(spa, &nvp); spa_close(spa, FTAG); } if (error == 0 && zc->zc_nvlist_dst != 0) error = put_nvlist(zc, nvp); else error = EFAULT; nvlist_free(nvp); return (error); } /* * inputs: * zc_name name of filesystem * zc_nvlist_src{_size} nvlist of delegated permissions * zc_perm_action allow/unallow flag * * outputs: none */ static int zfs_ioc_set_fsacl(zfs_cmd_t *zc) { int error; nvlist_t *fsaclnv = NULL; if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &fsaclnv)) != 0) return (error); /* * Verify nvlist is constructed correctly */ if ((error = zfs_deleg_verify_nvlist(fsaclnv)) != 0) { nvlist_free(fsaclnv); return (EINVAL); } /* * If we don't have PRIV_SYS_MOUNT, then validate * that user is allowed to hand out each permission in * the nvlist(s) */ error = secpolicy_zfs(CRED()); if (error) { if (zc->zc_perm_action == B_FALSE) { error = dsl_deleg_can_allow(zc->zc_name, fsaclnv, CRED()); } else { error = dsl_deleg_can_unallow(zc->zc_name, fsaclnv, CRED()); } } if (error == 0) error = dsl_deleg_set(zc->zc_name, fsaclnv, zc->zc_perm_action); nvlist_free(fsaclnv); return (error); } /* * inputs: * zc_name name of filesystem * * outputs: * zc_nvlist_src{_size} nvlist of delegated permissions */ static int zfs_ioc_get_fsacl(zfs_cmd_t *zc) { nvlist_t *nvp; int error; if ((error = dsl_deleg_get(zc->zc_name, &nvp)) == 0) { error = put_nvlist(zc, nvp); nvlist_free(nvp); } return (error); } /* * Search the vfs list for a specified resource. Returns a pointer to it * or NULL if no suitable entry is found. The caller of this routine * is responsible for releasing the returned vfs pointer. */ static vfs_t * zfs_get_vfs(const char *resource) { vfs_t *vfsp; mtx_lock(&mountlist_mtx); TAILQ_FOREACH(vfsp, &mountlist, mnt_list) { if (strcmp(refstr_value(vfsp->vfs_resource), resource) == 0) { VFS_HOLD(vfsp); break; } } mtx_unlock(&mountlist_mtx); return (vfsp); } /* ARGSUSED */ static void zfs_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) { zfs_creat_t *zct = arg; zfs_create_fs(os, cr, zct->zct_zplprops, tx); } #define ZFS_PROP_UNDEFINED ((uint64_t)-1) /* * inputs: * createprops list of properties requested by creator * default_zplver zpl version to use if unspecified in createprops * fuids_ok fuids allowed in this version of the spa? * os parent objset pointer (NULL if root fs) * * outputs: * zplprops values for the zplprops we attach to the master node object * is_ci true if requested file system will be purely case-insensitive * * Determine the settings for utf8only, normalization and * casesensitivity. Specific values may have been requested by the * creator and/or we can inherit values from the parent dataset. If * the file system is of too early a vintage, a creator can not * request settings for these properties, even if the requested * setting is the default value. We don't actually want to create dsl * properties for these, so remove them from the source nvlist after * processing. */ static int zfs_fill_zplprops_impl(objset_t *os, uint64_t zplver, boolean_t fuids_ok, boolean_t sa_ok, nvlist_t *createprops, nvlist_t *zplprops, boolean_t *is_ci) { uint64_t sense = ZFS_PROP_UNDEFINED; uint64_t norm = ZFS_PROP_UNDEFINED; uint64_t u8 = ZFS_PROP_UNDEFINED; ASSERT(zplprops != NULL); /* * Pull out creator prop choices, if any. */ if (createprops) { (void) nvlist_lookup_uint64(createprops, zfs_prop_to_name(ZFS_PROP_VERSION), &zplver); (void) nvlist_lookup_uint64(createprops, zfs_prop_to_name(ZFS_PROP_NORMALIZE), &norm); (void) nvlist_remove_all(createprops, zfs_prop_to_name(ZFS_PROP_NORMALIZE)); (void) nvlist_lookup_uint64(createprops, zfs_prop_to_name(ZFS_PROP_UTF8ONLY), &u8); (void) nvlist_remove_all(createprops, zfs_prop_to_name(ZFS_PROP_UTF8ONLY)); (void) nvlist_lookup_uint64(createprops, zfs_prop_to_name(ZFS_PROP_CASE), &sense); (void) nvlist_remove_all(createprops, zfs_prop_to_name(ZFS_PROP_CASE)); } /* * If the zpl version requested is whacky or the file system * or pool is version is too "young" to support normalization * and the creator tried to set a value for one of the props, * error out. */ if ((zplver < ZPL_VERSION_INITIAL || zplver > ZPL_VERSION) || (zplver >= ZPL_VERSION_FUID && !fuids_ok) || (zplver >= ZPL_VERSION_SA && !sa_ok) || (zplver < ZPL_VERSION_NORMALIZATION && (norm != ZFS_PROP_UNDEFINED || u8 != ZFS_PROP_UNDEFINED || sense != ZFS_PROP_UNDEFINED))) return (ENOTSUP); /* * Put the version in the zplprops */ VERIFY(nvlist_add_uint64(zplprops, zfs_prop_to_name(ZFS_PROP_VERSION), zplver) == 0); if (norm == ZFS_PROP_UNDEFINED) VERIFY(zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &norm) == 0); VERIFY(nvlist_add_uint64(zplprops, zfs_prop_to_name(ZFS_PROP_NORMALIZE), norm) == 0); /* * If we're normalizing, names must always be valid UTF-8 strings. */ if (norm) u8 = 1; if (u8 == ZFS_PROP_UNDEFINED) VERIFY(zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &u8) == 0); VERIFY(nvlist_add_uint64(zplprops, zfs_prop_to_name(ZFS_PROP_UTF8ONLY), u8) == 0); if (sense == ZFS_PROP_UNDEFINED) VERIFY(zfs_get_zplprop(os, ZFS_PROP_CASE, &sense) == 0); VERIFY(nvlist_add_uint64(zplprops, zfs_prop_to_name(ZFS_PROP_CASE), sense) == 0); if (is_ci) *is_ci = (sense == ZFS_CASE_INSENSITIVE); return (0); } static int zfs_fill_zplprops(const char *dataset, nvlist_t *createprops, nvlist_t *zplprops, boolean_t *is_ci) { boolean_t fuids_ok, sa_ok; uint64_t zplver = ZPL_VERSION; objset_t *os = NULL; char parentname[MAXNAMELEN]; char *cp; spa_t *spa; uint64_t spa_vers; int error; (void) strlcpy(parentname, dataset, sizeof (parentname)); cp = strrchr(parentname, '/'); ASSERT(cp != NULL); cp[0] = '\0'; if ((error = spa_open(dataset, &spa, FTAG)) != 0) return (error); spa_vers = spa_version(spa); spa_close(spa, FTAG); zplver = zfs_zpl_version_map(spa_vers); fuids_ok = (zplver >= ZPL_VERSION_FUID); sa_ok = (zplver >= ZPL_VERSION_SA); /* * Open parent object set so we can inherit zplprop values. */ if ((error = dmu_objset_hold(parentname, FTAG, &os)) != 0) return (error); error = zfs_fill_zplprops_impl(os, zplver, fuids_ok, sa_ok, createprops, zplprops, is_ci); dmu_objset_rele(os, FTAG); return (error); } static int zfs_fill_zplprops_root(uint64_t spa_vers, nvlist_t *createprops, nvlist_t *zplprops, boolean_t *is_ci) { boolean_t fuids_ok; boolean_t sa_ok; uint64_t zplver = ZPL_VERSION; int error; zplver = zfs_zpl_version_map(spa_vers); fuids_ok = (zplver >= ZPL_VERSION_FUID); sa_ok = (zplver >= ZPL_VERSION_SA); error = zfs_fill_zplprops_impl(NULL, zplver, fuids_ok, sa_ok, createprops, zplprops, is_ci); return (error); } /* * inputs: * zc_objset_type type of objset to create (fs vs zvol) * zc_name name of new objset * zc_value name of snapshot to clone from (may be empty) * zc_nvlist_src{_size} nvlist of properties to apply * * outputs: none */ static int zfs_ioc_create(zfs_cmd_t *zc) { objset_t *clone; int error = 0; zfs_creat_t zct; nvlist_t *nvprops = NULL; void (*cbfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx); dmu_objset_type_t type = zc->zc_objset_type; switch (type) { case DMU_OST_ZFS: cbfunc = zfs_create_cb; break; case DMU_OST_ZVOL: cbfunc = zvol_create_cb; break; default: cbfunc = NULL; break; } if (strchr(zc->zc_name, '@') || strchr(zc->zc_name, '%')) return (EINVAL); if (zc->zc_nvlist_src != 0 && (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &nvprops)) != 0) return (error); zct.zct_zplprops = NULL; zct.zct_props = nvprops; if (zc->zc_value[0] != '\0') { /* * We're creating a clone of an existing snapshot. */ zc->zc_value[sizeof (zc->zc_value) - 1] = '\0'; if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0) { nvlist_free(nvprops); return (EINVAL); } error = dmu_objset_hold(zc->zc_value, FTAG, &clone); if (error) { nvlist_free(nvprops); return (error); } error = dmu_objset_clone(zc->zc_name, dmu_objset_ds(clone), 0); dmu_objset_rele(clone, FTAG); if (error) { nvlist_free(nvprops); return (error); } } else { boolean_t is_insensitive = B_FALSE; if (cbfunc == NULL) { nvlist_free(nvprops); return (EINVAL); } if (type == DMU_OST_ZVOL) { uint64_t volsize, volblocksize; if (nvprops == NULL || nvlist_lookup_uint64(nvprops, zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) != 0) { nvlist_free(nvprops); return (EINVAL); } if ((error = nvlist_lookup_uint64(nvprops, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize)) != 0 && error != ENOENT) { nvlist_free(nvprops); return (EINVAL); } if (error != 0) volblocksize = zfs_prop_default_numeric( ZFS_PROP_VOLBLOCKSIZE); if ((error = zvol_check_volblocksize( volblocksize)) != 0 || (error = zvol_check_volsize(volsize, volblocksize)) != 0) { nvlist_free(nvprops); return (error); } } else if (type == DMU_OST_ZFS) { int error; /* * We have to have normalization and * case-folding flags correct when we do the * file system creation, so go figure them out * now. */ VERIFY(nvlist_alloc(&zct.zct_zplprops, NV_UNIQUE_NAME, KM_SLEEP) == 0); error = zfs_fill_zplprops(zc->zc_name, nvprops, zct.zct_zplprops, &is_insensitive); if (error != 0) { nvlist_free(nvprops); nvlist_free(zct.zct_zplprops); return (error); } } error = dmu_objset_create(zc->zc_name, type, is_insensitive ? DS_FLAG_CI_DATASET : 0, cbfunc, &zct); nvlist_free(zct.zct_zplprops); } /* * It would be nice to do this atomically. */ if (error == 0) { error = zfs_set_prop_nvlist(zc->zc_name, ZPROP_SRC_LOCAL, nvprops, NULL); if (error != 0) (void) dmu_objset_destroy(zc->zc_name, B_FALSE); } nvlist_free(nvprops); #ifdef __FreeBSD__ if (error == 0 && type == DMU_OST_ZVOL) zvol_create_minors(zc->zc_name); #endif return (error); } /* * inputs: * zc_name name of filesystem * zc_value short name of snapshot * zc_cookie recursive flag * zc_nvlist_src[_size] property list * * outputs: * zc_value short snapname (i.e. part after the '@') */ static int zfs_ioc_snapshot(zfs_cmd_t *zc) { nvlist_t *nvprops = NULL; int error; boolean_t recursive = zc->zc_cookie; if (snapshot_namecheck(zc->zc_value, NULL, NULL) != 0) return (EINVAL); if (zc->zc_nvlist_src != 0 && (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &nvprops)) != 0) return (error); error = zfs_check_userprops(zc->zc_name, nvprops); if (error) goto out; if (!nvlist_empty(nvprops) && zfs_earlier_version(zc->zc_name, SPA_VERSION_SNAP_PROPS)) { error = ENOTSUP; goto out; } error = dmu_objset_snapshot(zc->zc_name, zc->zc_value, NULL, nvprops, recursive, B_FALSE, -1); out: nvlist_free(nvprops); return (error); } int zfs_unmount_snap(const char *name, void *arg) { vfs_t *vfsp = NULL; if (arg) { char *snapname = arg; char *fullname = kmem_asprintf("%s@%s", name, snapname); vfsp = zfs_get_vfs(fullname); strfree(fullname); } else if (strchr(name, '@')) { vfsp = zfs_get_vfs(name); } if (vfsp) { /* * Always force the unmount for snapshots. */ int flag = MS_FORCE; int err; if ((err = vn_vfswlock(vfsp->vfs_vnodecovered)) != 0) { VFS_RELE(vfsp); return (err); } VFS_RELE(vfsp); mtx_lock(&Giant); /* dounmount() */ dounmount(vfsp, flag, curthread); mtx_unlock(&Giant); /* dounmount() */ } return (0); } /* * inputs: * zc_name name of filesystem, snaps must be under it * zc_nvlist_src[_size] full names of snapshots to destroy * zc_defer_destroy mark for deferred destroy * * outputs: * zc_name on failure, name of failed snapshot */ static int zfs_ioc_destroy_snaps_nvl(zfs_cmd_t *zc) { int err, len; nvlist_t *nvl; nvpair_t *pair; if ((err = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &nvl)) != 0) { #ifndef __FreeBSD__ return (err); #else /* * We are probably called by older binaries, * allocate and populate nvlist with recursive snapshots */ if (snapshot_namecheck(zc->zc_value, NULL, NULL) != 0) return (EINVAL); VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); err = dmu_get_recursive_snaps_nvl(zc->zc_name, zc->zc_value, nvl); if (err) { nvlist_free(nvl); return (err); } #endif /* __FreeBSD__ */ } len = strlen(zc->zc_name); for (pair = nvlist_next_nvpair(nvl, NULL); pair != NULL; pair = nvlist_next_nvpair(nvl, pair)) { const char *name = nvpair_name(pair); /* * The snap name must be underneath the zc_name. This ensures * that our permission checks were legitimate. */ if (strncmp(zc->zc_name, name, len) != 0 || (name[len] != '@' && name[len] != '/')) { nvlist_free(nvl); return (EINVAL); } (void) zfs_unmount_snap(name, NULL); } err = dmu_snapshots_destroy_nvl(nvl, zc->zc_defer_destroy, zc->zc_name); nvlist_free(nvl); return (err); } /* * inputs: * zc_name name of dataset to destroy * zc_objset_type type of objset * zc_defer_destroy mark for deferred destroy * * outputs: none */ static int zfs_ioc_destroy(zfs_cmd_t *zc) { int err; if (strchr(zc->zc_name, '@') && zc->zc_objset_type == DMU_OST_ZFS) { err = zfs_unmount_snap(zc->zc_name, NULL); if (err) return (err); } err = dmu_objset_destroy(zc->zc_name, zc->zc_defer_destroy); if (zc->zc_objset_type == DMU_OST_ZVOL && err == 0) (void) zvol_remove_minor(zc->zc_name); return (err); } /* * inputs: * zc_name name of dataset to rollback (to most recent snapshot) * * outputs: none */ static int zfs_ioc_rollback(zfs_cmd_t *zc) { dsl_dataset_t *ds, *clone; int error; zfsvfs_t *zfsvfs; char *clone_name; error = dsl_dataset_hold(zc->zc_name, FTAG, &ds); if (error) return (error); /* must not be a snapshot */ if (dsl_dataset_is_snapshot(ds)) { dsl_dataset_rele(ds, FTAG); return (EINVAL); } /* must have a most recent snapshot */ if (ds->ds_phys->ds_prev_snap_txg < TXG_INITIAL) { dsl_dataset_rele(ds, FTAG); return (EINVAL); } /* * Create clone of most recent snapshot. */ clone_name = kmem_asprintf("%s/%%rollback", zc->zc_name); error = dmu_objset_clone(clone_name, ds->ds_prev, DS_FLAG_INCONSISTENT); if (error) goto out; error = dsl_dataset_own(clone_name, B_TRUE, FTAG, &clone); if (error) goto out; /* * Do clone swap. */ if (getzfsvfs(zc->zc_name, &zfsvfs) == 0) { error = zfs_suspend_fs(zfsvfs); if (error == 0) { int resume_err; if (dsl_dataset_tryown(ds, B_FALSE, FTAG)) { error = dsl_dataset_clone_swap(clone, ds, B_TRUE); dsl_dataset_disown(ds, FTAG); ds = NULL; } else { error = EBUSY; } resume_err = zfs_resume_fs(zfsvfs, zc->zc_name); error = error ? error : resume_err; } VFS_RELE(zfsvfs->z_vfs); } else { if (dsl_dataset_tryown(ds, B_FALSE, FTAG)) { error = dsl_dataset_clone_swap(clone, ds, B_TRUE); dsl_dataset_disown(ds, FTAG); ds = NULL; } else { error = EBUSY; } } /* * Destroy clone (which also closes it). */ (void) dsl_dataset_destroy(clone, FTAG, B_FALSE); out: strfree(clone_name); if (ds) dsl_dataset_rele(ds, FTAG); return (error); } /* * inputs: * zc_name old name of dataset * zc_value new name of dataset * zc_cookie recursive flag (only valid for snapshots) * * outputs: none */ static int zfs_ioc_rename(zfs_cmd_t *zc) { int flags = 0; if (zc->zc_cookie & 1) flags |= ZFS_RENAME_RECURSIVE; if (zc->zc_cookie & 2) flags |= ZFS_RENAME_ALLOW_MOUNTED; zc->zc_value[sizeof (zc->zc_value) - 1] = '\0'; if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0 || strchr(zc->zc_value, '%')) return (EINVAL); /* * Unmount snapshot unless we're doing a recursive rename, * in which case the dataset code figures out which snapshots * to unmount. */ if (!(flags & ZFS_RENAME_RECURSIVE) && strchr(zc->zc_name, '@') != NULL && zc->zc_objset_type == DMU_OST_ZFS) { int err = zfs_unmount_snap(zc->zc_name, NULL); if (err) return (err); } return (dmu_objset_rename(zc->zc_name, zc->zc_value, flags)); } static int zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr) { const char *propname = nvpair_name(pair); boolean_t issnap = (strchr(dsname, '@') != NULL); zfs_prop_t prop = zfs_name_to_prop(propname); uint64_t intval; int err; if (prop == ZPROP_INVAL) { if (zfs_prop_user(propname)) { if (err = zfs_secpolicy_write_perms(dsname, ZFS_DELEG_PERM_USERPROP, cr)) return (err); return (0); } if (!issnap && zfs_prop_userquota(propname)) { const char *perm = NULL; const char *uq_prefix = zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA]; const char *gq_prefix = zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA]; if (strncmp(propname, uq_prefix, strlen(uq_prefix)) == 0) { perm = ZFS_DELEG_PERM_USERQUOTA; } else if (strncmp(propname, gq_prefix, strlen(gq_prefix)) == 0) { perm = ZFS_DELEG_PERM_GROUPQUOTA; } else { /* USERUSED and GROUPUSED are read-only */ return (EINVAL); } if (err = zfs_secpolicy_write_perms(dsname, perm, cr)) return (err); return (0); } return (EINVAL); } if (issnap) return (EINVAL); if (nvpair_type(pair) == DATA_TYPE_NVLIST) { /* * dsl_prop_get_all_impl() returns properties in this * format. */ nvlist_t *attrs; VERIFY(nvpair_value_nvlist(pair, &attrs) == 0); VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &pair) == 0); } /* * Check that this value is valid for this pool version */ switch (prop) { case ZFS_PROP_COMPRESSION: /* * If the user specified gzip compression, make sure * the SPA supports it. We ignore any errors here since * we'll catch them later. */ if (nvpair_type(pair) == DATA_TYPE_UINT64 && nvpair_value_uint64(pair, &intval) == 0) { if (intval >= ZIO_COMPRESS_GZIP_1 && intval <= ZIO_COMPRESS_GZIP_9 && zfs_earlier_version(dsname, SPA_VERSION_GZIP_COMPRESSION)) { return (ENOTSUP); } if (intval == ZIO_COMPRESS_ZLE && zfs_earlier_version(dsname, SPA_VERSION_ZLE_COMPRESSION)) return (ENOTSUP); /* * If this is a bootable dataset then * verify that the compression algorithm * is supported for booting. We must return * something other than ENOTSUP since it * implies a downrev pool version. */ if (zfs_is_bootfs(dsname) && !BOOTFS_COMPRESS_VALID(intval)) { return (ERANGE); } } break; case ZFS_PROP_COPIES: if (zfs_earlier_version(dsname, SPA_VERSION_DITTO_BLOCKS)) return (ENOTSUP); break; case ZFS_PROP_DEDUP: if (zfs_earlier_version(dsname, SPA_VERSION_DEDUP)) return (ENOTSUP); break; case ZFS_PROP_SHARESMB: if (zpl_earlier_version(dsname, ZPL_VERSION_FUID)) return (ENOTSUP); break; case ZFS_PROP_ACLINHERIT: if (nvpair_type(pair) == DATA_TYPE_UINT64 && nvpair_value_uint64(pair, &intval) == 0) { if (intval == ZFS_ACL_PASSTHROUGH_X && zfs_earlier_version(dsname, SPA_VERSION_PASSTHROUGH_X)) return (ENOTSUP); } break; } return (zfs_secpolicy_setprop(dsname, prop, pair, CRED())); } /* * Removes properties from the given props list that fail permission checks * needed to clear them and to restore them in case of a receive error. For each * property, make sure we have both set and inherit permissions. * * Returns the first error encountered if any permission checks fail. If the * caller provides a non-NULL errlist, it also gives the complete list of names * of all the properties that failed a permission check along with the * corresponding error numbers. The caller is responsible for freeing the * returned errlist. * * If every property checks out successfully, zero is returned and the list * pointed at by errlist is NULL. */ static int zfs_check_clearable(char *dataset, nvlist_t *props, nvlist_t **errlist) { zfs_cmd_t *zc; nvpair_t *pair, *next_pair; nvlist_t *errors; int err, rv = 0; if (props == NULL) return (0); VERIFY(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP) == 0); zc = kmem_alloc(sizeof (zfs_cmd_t), KM_SLEEP); (void) strcpy(zc->zc_name, dataset); pair = nvlist_next_nvpair(props, NULL); while (pair != NULL) { next_pair = nvlist_next_nvpair(props, pair); (void) strcpy(zc->zc_value, nvpair_name(pair)); if ((err = zfs_check_settable(dataset, pair, CRED())) != 0 || (err = zfs_secpolicy_inherit(zc, CRED())) != 0) { VERIFY(nvlist_remove_nvpair(props, pair) == 0); VERIFY(nvlist_add_int32(errors, zc->zc_value, err) == 0); } pair = next_pair; } kmem_free(zc, sizeof (zfs_cmd_t)); if ((pair = nvlist_next_nvpair(errors, NULL)) == NULL) { nvlist_free(errors); errors = NULL; } else { VERIFY(nvpair_value_int32(pair, &rv) == 0); } if (errlist == NULL) nvlist_free(errors); else *errlist = errors; return (rv); } static boolean_t propval_equals(nvpair_t *p1, nvpair_t *p2) { if (nvpair_type(p1) == DATA_TYPE_NVLIST) { /* dsl_prop_get_all_impl() format */ nvlist_t *attrs; VERIFY(nvpair_value_nvlist(p1, &attrs) == 0); VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &p1) == 0); } if (nvpair_type(p2) == DATA_TYPE_NVLIST) { nvlist_t *attrs; VERIFY(nvpair_value_nvlist(p2, &attrs) == 0); VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &p2) == 0); } if (nvpair_type(p1) != nvpair_type(p2)) return (B_FALSE); if (nvpair_type(p1) == DATA_TYPE_STRING) { char *valstr1, *valstr2; VERIFY(nvpair_value_string(p1, (char **)&valstr1) == 0); VERIFY(nvpair_value_string(p2, (char **)&valstr2) == 0); return (strcmp(valstr1, valstr2) == 0); } else { uint64_t intval1, intval2; VERIFY(nvpair_value_uint64(p1, &intval1) == 0); VERIFY(nvpair_value_uint64(p2, &intval2) == 0); return (intval1 == intval2); } } /* * Remove properties from props if they are not going to change (as determined * by comparison with origprops). Remove them from origprops as well, since we * do not need to clear or restore properties that won't change. */ static void props_reduce(nvlist_t *props, nvlist_t *origprops) { nvpair_t *pair, *next_pair; if (origprops == NULL) return; /* all props need to be received */ pair = nvlist_next_nvpair(props, NULL); while (pair != NULL) { const char *propname = nvpair_name(pair); nvpair_t *match; next_pair = nvlist_next_nvpair(props, pair); if ((nvlist_lookup_nvpair(origprops, propname, &match) != 0) || !propval_equals(pair, match)) goto next; /* need to set received value */ /* don't clear the existing received value */ (void) nvlist_remove_nvpair(origprops, match); /* don't bother receiving the property */ (void) nvlist_remove_nvpair(props, pair); next: pair = next_pair; } } #ifdef DEBUG static boolean_t zfs_ioc_recv_inject_err; #endif /* * inputs: * zc_name name of containing filesystem * zc_nvlist_src{_size} nvlist of properties to apply * zc_value name of snapshot to create * zc_string name of clone origin (if DRR_FLAG_CLONE) * zc_cookie file descriptor to recv from * zc_begin_record the BEGIN record of the stream (not byteswapped) * zc_guid force flag * zc_cleanup_fd cleanup-on-exit file descriptor * zc_action_handle handle for this guid/ds mapping (or zero on first call) * * outputs: * zc_cookie number of bytes read * zc_nvlist_dst{_size} error for each unapplied received property * zc_obj zprop_errflags_t * zc_action_handle handle for this guid/ds mapping */ static int zfs_ioc_recv(zfs_cmd_t *zc) { file_t *fp; objset_t *os; dmu_recv_cookie_t drc; boolean_t force = (boolean_t)zc->zc_guid; int fd; int error = 0; int props_error = 0; nvlist_t *errors; offset_t off; nvlist_t *props = NULL; /* sent properties */ nvlist_t *origprops = NULL; /* existing properties */ objset_t *origin = NULL; char *tosnap; char tofs[ZFS_MAXNAMELEN]; boolean_t first_recvd_props = B_FALSE; if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0 || strchr(zc->zc_value, '@') == NULL || strchr(zc->zc_value, '%')) return (EINVAL); (void) strcpy(tofs, zc->zc_value); tosnap = strchr(tofs, '@'); *tosnap++ = '\0'; if (zc->zc_nvlist_src != 0 && (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &props)) != 0) return (error); fd = zc->zc_cookie; fp = getf(fd); if (fp == NULL) { nvlist_free(props); return (EBADF); } VERIFY(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP) == 0); if (props && dmu_objset_hold(tofs, FTAG, &os) == 0) { if ((spa_version(os->os_spa) >= SPA_VERSION_RECVD_PROPS) && !dsl_prop_get_hasrecvd(os)) { first_recvd_props = B_TRUE; } /* * If new received properties are supplied, they are to * completely replace the existing received properties, so stash * away the existing ones. */ if (dsl_prop_get_received(os, &origprops) == 0) { nvlist_t *errlist = NULL; /* * Don't bother writing a property if its value won't * change (and avoid the unnecessary security checks). * * The first receive after SPA_VERSION_RECVD_PROPS is a * special case where we blow away all local properties * regardless. */ if (!first_recvd_props) props_reduce(props, origprops); if (zfs_check_clearable(tofs, origprops, &errlist) != 0) (void) nvlist_merge(errors, errlist, 0); nvlist_free(errlist); } dmu_objset_rele(os, FTAG); } if (zc->zc_string[0]) { error = dmu_objset_hold(zc->zc_string, FTAG, &origin); if (error) goto out; } error = dmu_recv_begin(tofs, tosnap, zc->zc_top_ds, &zc->zc_begin_record, force, origin, &drc); if (origin) dmu_objset_rele(origin, FTAG); if (error) goto out; /* * Set properties before we receive the stream so that they are applied * to the new data. Note that we must call dmu_recv_stream() if * dmu_recv_begin() succeeds. */ if (props) { nvlist_t *errlist; if (dmu_objset_from_ds(drc.drc_logical_ds, &os) == 0) { if (drc.drc_newfs) { if (spa_version(os->os_spa) >= SPA_VERSION_RECVD_PROPS) first_recvd_props = B_TRUE; } else if (origprops != NULL) { if (clear_received_props(os, tofs, origprops, first_recvd_props ? NULL : props) != 0) zc->zc_obj |= ZPROP_ERR_NOCLEAR; } else { zc->zc_obj |= ZPROP_ERR_NOCLEAR; } dsl_prop_set_hasrecvd(os); } else if (!drc.drc_newfs) { zc->zc_obj |= ZPROP_ERR_NOCLEAR; } (void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_RECEIVED, props, &errlist); (void) nvlist_merge(errors, errlist, 0); nvlist_free(errlist); } if (fit_error_list(zc, &errors) != 0 || put_nvlist(zc, errors) != 0) { /* * Caller made zc->zc_nvlist_dst less than the minimum expected * size or supplied an invalid address. */ props_error = EINVAL; } off = fp->f_offset; error = dmu_recv_stream(&drc, fp, &off, zc->zc_cleanup_fd, &zc->zc_action_handle); if (error == 0) { zfsvfs_t *zfsvfs = NULL; if (getzfsvfs(tofs, &zfsvfs) == 0) { /* online recv */ int end_err; error = zfs_suspend_fs(zfsvfs); /* * If the suspend fails, then the recv_end will * likely also fail, and clean up after itself. */ end_err = dmu_recv_end(&drc); if (error == 0) error = zfs_resume_fs(zfsvfs, tofs); error = error ? error : end_err; VFS_RELE(zfsvfs->z_vfs); } else { error = dmu_recv_end(&drc); } } zc->zc_cookie = off - fp->f_offset; if (off >= 0 && off <= MAXOFFSET_T) fp->f_offset = off; #ifdef DEBUG if (zfs_ioc_recv_inject_err) { zfs_ioc_recv_inject_err = B_FALSE; error = 1; } #endif /* * On error, restore the original props. */ if (error && props) { if (dmu_objset_hold(tofs, FTAG, &os) == 0) { if (clear_received_props(os, tofs, props, NULL) != 0) { /* * We failed to clear the received properties. * Since we may have left a $recvd value on the * system, we can't clear the $hasrecvd flag. */ zc->zc_obj |= ZPROP_ERR_NORESTORE; } else if (first_recvd_props) { dsl_prop_unset_hasrecvd(os); } dmu_objset_rele(os, FTAG); } else if (!drc.drc_newfs) { /* We failed to clear the received properties. */ zc->zc_obj |= ZPROP_ERR_NORESTORE; } if (origprops == NULL && !drc.drc_newfs) { /* We failed to stash the original properties. */ zc->zc_obj |= ZPROP_ERR_NORESTORE; } /* * dsl_props_set() will not convert RECEIVED to LOCAL on or * after SPA_VERSION_RECVD_PROPS, so we need to specify LOCAL * explictly if we're restoring local properties cleared in the * first new-style receive. */ if (origprops != NULL && zfs_set_prop_nvlist(tofs, (first_recvd_props ? ZPROP_SRC_LOCAL : ZPROP_SRC_RECEIVED), origprops, NULL) != 0) { /* * We stashed the original properties but failed to * restore them. */ zc->zc_obj |= ZPROP_ERR_NORESTORE; } } out: nvlist_free(props); nvlist_free(origprops); nvlist_free(errors); releasef(fd); if (error == 0) error = props_error; return (error); } /* * inputs: * zc_name name of snapshot to send * zc_cookie file descriptor to send stream to * zc_obj fromorigin flag (mutually exclusive with zc_fromobj) * zc_sendobj objsetid of snapshot to send * zc_fromobj objsetid of incremental fromsnap (may be zero) * zc_guid if set, estimate size of stream only. zc_cookie is ignored. * output size in zc_objset_type. * * outputs: none */ static int zfs_ioc_send(zfs_cmd_t *zc) { objset_t *fromsnap = NULL; objset_t *tosnap; int error; offset_t off; dsl_dataset_t *ds; dsl_dataset_t *dsfrom = NULL; spa_t *spa; dsl_pool_t *dp; boolean_t estimate = (zc->zc_guid != 0); error = spa_open(zc->zc_name, &spa, FTAG); if (error) return (error); dp = spa_get_dsl(spa); rw_enter(&dp->dp_config_rwlock, RW_READER); error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &ds); rw_exit(&dp->dp_config_rwlock); if (error) { spa_close(spa, FTAG); return (error); } error = dmu_objset_from_ds(ds, &tosnap); if (error) { dsl_dataset_rele(ds, FTAG); spa_close(spa, FTAG); return (error); } if (zc->zc_fromobj != 0) { rw_enter(&dp->dp_config_rwlock, RW_READER); error = dsl_dataset_hold_obj(dp, zc->zc_fromobj, FTAG, &dsfrom); rw_exit(&dp->dp_config_rwlock); spa_close(spa, FTAG); if (error) { dsl_dataset_rele(ds, FTAG); return (error); } error = dmu_objset_from_ds(dsfrom, &fromsnap); if (error) { dsl_dataset_rele(dsfrom, FTAG); dsl_dataset_rele(ds, FTAG); return (error); } } else { spa_close(spa, FTAG); } if (estimate) { error = dmu_send_estimate(tosnap, fromsnap, zc->zc_obj, &zc->zc_objset_type); } else { file_t *fp = getf(zc->zc_cookie); if (fp == NULL) { dsl_dataset_rele(ds, FTAG); if (dsfrom) dsl_dataset_rele(dsfrom, FTAG); return (EBADF); } off = fp->f_offset; - error = dmu_sendbackup(tosnap, fromsnap, zc->zc_obj, fp, &off); + error = dmu_send(tosnap, fromsnap, zc->zc_obj, + zc->zc_cookie, fp, &off); if (off >= 0 && off <= MAXOFFSET_T) fp->f_offset = off; releasef(zc->zc_cookie); } if (dsfrom) dsl_dataset_rele(dsfrom, FTAG); dsl_dataset_rele(ds, FTAG); return (error); } +/* + * inputs: + * zc_name name of snapshot on which to report progress + * zc_cookie file descriptor of send stream + * + * outputs: + * zc_cookie number of bytes written in send stream thus far + */ static int +zfs_ioc_send_progress(zfs_cmd_t *zc) +{ + dsl_dataset_t *ds; + dmu_sendarg_t *dsp = NULL; + int error; + + if ((error = dsl_dataset_hold(zc->zc_name, FTAG, &ds)) != 0) + return (error); + + mutex_enter(&ds->ds_sendstream_lock); + + /* + * Iterate over all the send streams currently active on this dataset. + * If there's one which matches the specified file descriptor _and_ the + * stream was started by the current process, return the progress of + * that stream. + */ + for (dsp = list_head(&ds->ds_sendstreams); dsp != NULL; + dsp = list_next(&ds->ds_sendstreams, dsp)) { + if (dsp->dsa_outfd == zc->zc_cookie && + dsp->dsa_proc == curproc) + break; + } + + if (dsp != NULL) + zc->zc_cookie = *(dsp->dsa_off); + else + error = ENOENT; + + mutex_exit(&ds->ds_sendstream_lock); + dsl_dataset_rele(ds, FTAG); + return (error); +} + +static int zfs_ioc_inject_fault(zfs_cmd_t *zc) { int id, error; error = zio_inject_fault(zc->zc_name, (int)zc->zc_guid, &id, &zc->zc_inject_record); if (error == 0) zc->zc_guid = (uint64_t)id; return (error); } static int zfs_ioc_clear_fault(zfs_cmd_t *zc) { return (zio_clear_fault((int)zc->zc_guid)); } static int zfs_ioc_inject_list_next(zfs_cmd_t *zc) { int id = (int)zc->zc_guid; int error; error = zio_inject_list_next(&id, zc->zc_name, sizeof (zc->zc_name), &zc->zc_inject_record); zc->zc_guid = id; return (error); } static int zfs_ioc_error_log(zfs_cmd_t *zc) { spa_t *spa; int error; size_t count = (size_t)zc->zc_nvlist_dst_size; if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) return (error); error = spa_get_errlog(spa, (void *)(uintptr_t)zc->zc_nvlist_dst, &count); if (error == 0) zc->zc_nvlist_dst_size = count; else zc->zc_nvlist_dst_size = spa_get_errlog_size(spa); spa_close(spa, FTAG); return (error); } static int zfs_ioc_clear(zfs_cmd_t *zc) { spa_t *spa; vdev_t *vd; int error; /* * On zpool clear we also fix up missing slogs */ mutex_enter(&spa_namespace_lock); spa = spa_lookup(zc->zc_name); if (spa == NULL) { mutex_exit(&spa_namespace_lock); return (EIO); } if (spa_get_log_state(spa) == SPA_LOG_MISSING) { /* we need to let spa_open/spa_load clear the chains */ spa_set_log_state(spa, SPA_LOG_CLEAR); } spa->spa_last_open_failed = 0; mutex_exit(&spa_namespace_lock); if (zc->zc_cookie & ZPOOL_NO_REWIND) { error = spa_open(zc->zc_name, &spa, FTAG); } else { nvlist_t *policy; nvlist_t *config = NULL; if (zc->zc_nvlist_src == 0) return (EINVAL); if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &policy)) == 0) { error = spa_open_rewind(zc->zc_name, &spa, FTAG, policy, &config); if (config != NULL) { int err; if ((err = put_nvlist(zc, config)) != 0) error = err; nvlist_free(config); } nvlist_free(policy); } } if (error) return (error); spa_vdev_state_enter(spa, SCL_NONE); if (zc->zc_guid == 0) { vd = NULL; } else { vd = spa_lookup_by_guid(spa, zc->zc_guid, B_TRUE); if (vd == NULL) { (void) spa_vdev_state_exit(spa, NULL, ENODEV); spa_close(spa, FTAG); return (ENODEV); } } vdev_clear(spa, vd); (void) spa_vdev_state_exit(spa, NULL, 0); /* * Resume any suspended I/Os. */ if (zio_resume(spa) != 0) error = EIO; spa_close(spa, FTAG); return (error); } /* * inputs: * zc_name name of filesystem * zc_value name of origin snapshot * * outputs: * zc_string name of conflicting snapshot, if there is one */ static int zfs_ioc_promote(zfs_cmd_t *zc) { char *cp; /* * We don't need to unmount *all* the origin fs's snapshots, but * it's easier. */ cp = strchr(zc->zc_value, '@'); if (cp) *cp = '\0'; (void) dmu_objset_find(zc->zc_value, zfs_unmount_snap, NULL, DS_FIND_SNAPSHOTS); return (dsl_dataset_promote(zc->zc_name, zc->zc_string)); } /* * Retrieve a single {user|group}{used|quota}@... property. * * inputs: * zc_name name of filesystem * zc_objset_type zfs_userquota_prop_t * zc_value domain name (eg. "S-1-234-567-89") * zc_guid RID/UID/GID * * outputs: * zc_cookie property value */ static int zfs_ioc_userspace_one(zfs_cmd_t *zc) { zfsvfs_t *zfsvfs; int error; if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS) return (EINVAL); error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE); if (error) return (error); error = zfs_userspace_one(zfsvfs, zc->zc_objset_type, zc->zc_value, zc->zc_guid, &zc->zc_cookie); zfsvfs_rele(zfsvfs, FTAG); return (error); } /* * inputs: * zc_name name of filesystem * zc_cookie zap cursor * zc_objset_type zfs_userquota_prop_t * zc_nvlist_dst[_size] buffer to fill (not really an nvlist) * * outputs: * zc_nvlist_dst[_size] data buffer (array of zfs_useracct_t) * zc_cookie zap cursor */ static int zfs_ioc_userspace_many(zfs_cmd_t *zc) { zfsvfs_t *zfsvfs; int bufsize = zc->zc_nvlist_dst_size; if (bufsize <= 0) return (ENOMEM); int error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE); if (error) return (error); void *buf = kmem_alloc(bufsize, KM_SLEEP); error = zfs_userspace_many(zfsvfs, zc->zc_objset_type, &zc->zc_cookie, buf, &zc->zc_nvlist_dst_size); if (error == 0) { error = ddi_copyout(buf, (void *)(uintptr_t)zc->zc_nvlist_dst, zc->zc_nvlist_dst_size, zc->zc_iflags); } kmem_free(buf, bufsize); zfsvfs_rele(zfsvfs, FTAG); return (error); } /* * inputs: * zc_name name of filesystem * * outputs: * none */ static int zfs_ioc_userspace_upgrade(zfs_cmd_t *zc) { objset_t *os; int error = 0; zfsvfs_t *zfsvfs; if (getzfsvfs(zc->zc_name, &zfsvfs) == 0) { if (!dmu_objset_userused_enabled(zfsvfs->z_os)) { /* * If userused is not enabled, it may be because the * objset needs to be closed & reopened (to grow the * objset_phys_t). Suspend/resume the fs will do that. */ error = zfs_suspend_fs(zfsvfs); if (error == 0) error = zfs_resume_fs(zfsvfs, zc->zc_name); } if (error == 0) error = dmu_objset_userspace_upgrade(zfsvfs->z_os); VFS_RELE(zfsvfs->z_vfs); } else { /* XXX kind of reading contents without owning */ error = dmu_objset_hold(zc->zc_name, FTAG, &os); if (error) return (error); error = dmu_objset_userspace_upgrade(os); dmu_objset_rele(os, FTAG); } return (error); } #ifdef sun /* * We don't want to have a hard dependency * against some special symbols in sharefs * nfs, and smbsrv. Determine them if needed when * the first file system is shared. * Neither sharefs, nfs or smbsrv are unloadable modules. */ int (*znfsexport_fs)(void *arg); int (*zshare_fs)(enum sharefs_sys_op, share_t *, uint32_t); int (*zsmbexport_fs)(void *arg, boolean_t add_share); int zfs_nfsshare_inited; int zfs_smbshare_inited; ddi_modhandle_t nfs_mod; ddi_modhandle_t sharefs_mod; ddi_modhandle_t smbsrv_mod; #endif /* sun */ kmutex_t zfs_share_lock; #ifdef sun static int zfs_init_sharefs() { int error; ASSERT(MUTEX_HELD(&zfs_share_lock)); /* Both NFS and SMB shares also require sharetab support. */ if (sharefs_mod == NULL && ((sharefs_mod = ddi_modopen("fs/sharefs", KRTLD_MODE_FIRST, &error)) == NULL)) { return (ENOSYS); } if (zshare_fs == NULL && ((zshare_fs = (int (*)(enum sharefs_sys_op, share_t *, uint32_t)) ddi_modsym(sharefs_mod, "sharefs_impl", &error)) == NULL)) { return (ENOSYS); } return (0); } #endif /* sun */ static int zfs_ioc_share(zfs_cmd_t *zc) { #ifdef sun int error; int opcode; switch (zc->zc_share.z_sharetype) { case ZFS_SHARE_NFS: case ZFS_UNSHARE_NFS: if (zfs_nfsshare_inited == 0) { mutex_enter(&zfs_share_lock); if (nfs_mod == NULL && ((nfs_mod = ddi_modopen("fs/nfs", KRTLD_MODE_FIRST, &error)) == NULL)) { mutex_exit(&zfs_share_lock); return (ENOSYS); } if (znfsexport_fs == NULL && ((znfsexport_fs = (int (*)(void *)) ddi_modsym(nfs_mod, "nfs_export", &error)) == NULL)) { mutex_exit(&zfs_share_lock); return (ENOSYS); } error = zfs_init_sharefs(); if (error) { mutex_exit(&zfs_share_lock); return (ENOSYS); } zfs_nfsshare_inited = 1; mutex_exit(&zfs_share_lock); } break; case ZFS_SHARE_SMB: case ZFS_UNSHARE_SMB: if (zfs_smbshare_inited == 0) { mutex_enter(&zfs_share_lock); if (smbsrv_mod == NULL && ((smbsrv_mod = ddi_modopen("drv/smbsrv", KRTLD_MODE_FIRST, &error)) == NULL)) { mutex_exit(&zfs_share_lock); return (ENOSYS); } if (zsmbexport_fs == NULL && ((zsmbexport_fs = (int (*)(void *, boolean_t))ddi_modsym(smbsrv_mod, "smb_server_share", &error)) == NULL)) { mutex_exit(&zfs_share_lock); return (ENOSYS); } error = zfs_init_sharefs(); if (error) { mutex_exit(&zfs_share_lock); return (ENOSYS); } zfs_smbshare_inited = 1; mutex_exit(&zfs_share_lock); } break; default: return (EINVAL); } switch (zc->zc_share.z_sharetype) { case ZFS_SHARE_NFS: case ZFS_UNSHARE_NFS: if (error = znfsexport_fs((void *) (uintptr_t)zc->zc_share.z_exportdata)) return (error); break; case ZFS_SHARE_SMB: case ZFS_UNSHARE_SMB: if (error = zsmbexport_fs((void *) (uintptr_t)zc->zc_share.z_exportdata, zc->zc_share.z_sharetype == ZFS_SHARE_SMB ? B_TRUE: B_FALSE)) { return (error); } break; } opcode = (zc->zc_share.z_sharetype == ZFS_SHARE_NFS || zc->zc_share.z_sharetype == ZFS_SHARE_SMB) ? SHAREFS_ADD : SHAREFS_REMOVE; /* * Add or remove share from sharetab */ error = zshare_fs(opcode, (void *)(uintptr_t)zc->zc_share.z_sharedata, zc->zc_share.z_sharemax); return (error); #else /* !sun */ return (ENOSYS); #endif /* !sun */ } ace_t full_access[] = { {(uid_t)-1, ACE_ALL_PERMS, ACE_EVERYONE, 0} }; /* * inputs: * zc_name name of containing filesystem * zc_obj object # beyond which we want next in-use object # * * outputs: * zc_obj next in-use object # */ static int zfs_ioc_next_obj(zfs_cmd_t *zc) { objset_t *os = NULL; int error; error = dmu_objset_hold(zc->zc_name, FTAG, &os); if (error) return (error); error = dmu_object_next(os, &zc->zc_obj, B_FALSE, os->os_dsl_dataset->ds_phys->ds_prev_snap_txg); dmu_objset_rele(os, FTAG); return (error); } /* * inputs: * zc_name name of filesystem * zc_value prefix name for snapshot * zc_cleanup_fd cleanup-on-exit file descriptor for calling process * * outputs: */ static int zfs_ioc_tmp_snapshot(zfs_cmd_t *zc) { char *snap_name; int error; snap_name = kmem_asprintf("%s-%016llx", zc->zc_value, (u_longlong_t)ddi_get_lbolt64()); if (strlen(snap_name) >= MAXNAMELEN) { strfree(snap_name); return (E2BIG); } error = dmu_objset_snapshot(zc->zc_name, snap_name, snap_name, NULL, B_FALSE, B_TRUE, zc->zc_cleanup_fd); if (error != 0) { strfree(snap_name); return (error); } (void) strcpy(zc->zc_value, snap_name); strfree(snap_name); return (0); } /* * inputs: * zc_name name of "to" snapshot * zc_value name of "from" snapshot * zc_cookie file descriptor to write diff data on * * outputs: * dmu_diff_record_t's to the file descriptor */ static int zfs_ioc_diff(zfs_cmd_t *zc) { objset_t *fromsnap; objset_t *tosnap; file_t *fp; offset_t off; int error; error = dmu_objset_hold(zc->zc_name, FTAG, &tosnap); if (error) return (error); error = dmu_objset_hold(zc->zc_value, FTAG, &fromsnap); if (error) { dmu_objset_rele(tosnap, FTAG); return (error); } fp = getf(zc->zc_cookie); if (fp == NULL) { dmu_objset_rele(fromsnap, FTAG); dmu_objset_rele(tosnap, FTAG); return (EBADF); } off = fp->f_offset; error = dmu_diff(tosnap, fromsnap, fp, &off); if (off >= 0 && off <= MAXOFFSET_T) fp->f_offset = off; releasef(zc->zc_cookie); dmu_objset_rele(fromsnap, FTAG); dmu_objset_rele(tosnap, FTAG); return (error); } #ifdef sun /* * Remove all ACL files in shares dir */ static int zfs_smb_acl_purge(znode_t *dzp) { zap_cursor_t zc; zap_attribute_t zap; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; int error; for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); (error = zap_cursor_retrieve(&zc, &zap)) == 0; zap_cursor_advance(&zc)) { if ((error = VOP_REMOVE(ZTOV(dzp), zap.za_name, kcred, NULL, 0)) != 0) break; } zap_cursor_fini(&zc); return (error); } #endif /* sun */ static int zfs_ioc_smb_acl(zfs_cmd_t *zc) { #ifdef sun vnode_t *vp; znode_t *dzp; vnode_t *resourcevp = NULL; znode_t *sharedir; zfsvfs_t *zfsvfs; nvlist_t *nvlist; char *src, *target; vattr_t vattr; vsecattr_t vsec; int error = 0; if ((error = lookupname(zc->zc_value, UIO_SYSSPACE, NO_FOLLOW, NULL, &vp)) != 0) return (error); /* Now make sure mntpnt and dataset are ZFS */ if (strcmp(vp->v_vfsp->mnt_stat.f_fstypename, "zfs") != 0 || (strcmp((char *)refstr_value(vp->v_vfsp->vfs_resource), zc->zc_name) != 0)) { VN_RELE(vp); return (EINVAL); } dzp = VTOZ(vp); zfsvfs = dzp->z_zfsvfs; ZFS_ENTER(zfsvfs); /* * Create share dir if its missing. */ mutex_enter(&zfsvfs->z_lock); if (zfsvfs->z_shares_dir == 0) { dmu_tx_t *tx; tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, TRUE, ZFS_SHARES_DIR); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); } else { error = zfs_create_share_dir(zfsvfs, tx); dmu_tx_commit(tx); } if (error) { mutex_exit(&zfsvfs->z_lock); VN_RELE(vp); ZFS_EXIT(zfsvfs); return (error); } } mutex_exit(&zfsvfs->z_lock); ASSERT(zfsvfs->z_shares_dir); if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &sharedir)) != 0) { VN_RELE(vp); ZFS_EXIT(zfsvfs); return (error); } switch (zc->zc_cookie) { case ZFS_SMB_ACL_ADD: vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE; vattr.va_type = VREG; vattr.va_mode = S_IFREG|0777; vattr.va_uid = 0; vattr.va_gid = 0; vsec.vsa_mask = VSA_ACE; vsec.vsa_aclentp = &full_access; vsec.vsa_aclentsz = sizeof (full_access); vsec.vsa_aclcnt = 1; error = VOP_CREATE(ZTOV(sharedir), zc->zc_string, &vattr, EXCL, 0, &resourcevp, kcred, 0, NULL, &vsec); if (resourcevp) VN_RELE(resourcevp); break; case ZFS_SMB_ACL_REMOVE: error = VOP_REMOVE(ZTOV(sharedir), zc->zc_string, kcred, NULL, 0); break; case ZFS_SMB_ACL_RENAME: if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size, zc->zc_iflags, &nvlist)) != 0) { VN_RELE(vp); ZFS_EXIT(zfsvfs); return (error); } if (nvlist_lookup_string(nvlist, ZFS_SMB_ACL_SRC, &src) || nvlist_lookup_string(nvlist, ZFS_SMB_ACL_TARGET, &target)) { VN_RELE(vp); VN_RELE(ZTOV(sharedir)); ZFS_EXIT(zfsvfs); nvlist_free(nvlist); return (error); } error = VOP_RENAME(ZTOV(sharedir), src, ZTOV(sharedir), target, kcred, NULL, 0); nvlist_free(nvlist); break; case ZFS_SMB_ACL_PURGE: error = zfs_smb_acl_purge(sharedir); break; default: error = EINVAL; break; } VN_RELE(vp); VN_RELE(ZTOV(sharedir)); ZFS_EXIT(zfsvfs); return (error); #else /* !sun */ return (EOPNOTSUPP); #endif /* !sun */ } /* * inputs: * zc_name name of filesystem * zc_value short name of snap * zc_string user-supplied tag for this hold * zc_cookie recursive flag * zc_temphold set if hold is temporary * zc_cleanup_fd cleanup-on-exit file descriptor for calling process * zc_sendobj if non-zero, the objid for zc_name@zc_value * zc_createtxg if zc_sendobj is non-zero, snap must have zc_createtxg * * outputs: none */ static int zfs_ioc_hold(zfs_cmd_t *zc) { boolean_t recursive = zc->zc_cookie; spa_t *spa; dsl_pool_t *dp; dsl_dataset_t *ds; int error; minor_t minor = 0; if (snapshot_namecheck(zc->zc_value, NULL, NULL) != 0) return (EINVAL); if (zc->zc_sendobj == 0) { return (dsl_dataset_user_hold(zc->zc_name, zc->zc_value, zc->zc_string, recursive, zc->zc_temphold, zc->zc_cleanup_fd)); } if (recursive) return (EINVAL); error = spa_open(zc->zc_name, &spa, FTAG); if (error) return (error); dp = spa_get_dsl(spa); rw_enter(&dp->dp_config_rwlock, RW_READER); error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &ds); rw_exit(&dp->dp_config_rwlock); spa_close(spa, FTAG); if (error) return (error); /* * Until we have a hold on this snapshot, it's possible that * zc_sendobj could've been destroyed and reused as part * of a later txg. Make sure we're looking at the right object. */ if (zc->zc_createtxg != ds->ds_phys->ds_creation_txg) { dsl_dataset_rele(ds, FTAG); return (ENOENT); } if (zc->zc_cleanup_fd != -1 && zc->zc_temphold) { error = zfs_onexit_fd_hold(zc->zc_cleanup_fd, &minor); if (error) { dsl_dataset_rele(ds, FTAG); return (error); } } error = dsl_dataset_user_hold_for_send(ds, zc->zc_string, zc->zc_temphold); if (minor != 0) { if (error == 0) { dsl_register_onexit_hold_cleanup(ds, zc->zc_string, minor); } zfs_onexit_fd_rele(zc->zc_cleanup_fd); } dsl_dataset_rele(ds, FTAG); return (error); } /* * inputs: * zc_name name of dataset from which we're releasing a user hold * zc_value short name of snap * zc_string user-supplied tag for this hold * zc_cookie recursive flag * * outputs: none */ static int zfs_ioc_release(zfs_cmd_t *zc) { boolean_t recursive = zc->zc_cookie; if (snapshot_namecheck(zc->zc_value, NULL, NULL) != 0) return (EINVAL); return (dsl_dataset_user_release(zc->zc_name, zc->zc_value, zc->zc_string, recursive)); } /* * inputs: * zc_name name of filesystem * * outputs: * zc_nvlist_src{_size} nvlist of snapshot holds */ static int zfs_ioc_get_holds(zfs_cmd_t *zc) { nvlist_t *nvp; int error; if ((error = dsl_dataset_get_holds(zc->zc_name, &nvp)) == 0) { error = put_nvlist(zc, nvp); nvlist_free(nvp); } return (error); } /* * inputs: * zc_name name of new filesystem or snapshot * zc_value full name of old snapshot * * outputs: * zc_cookie space in bytes * zc_objset_type compressed space in bytes * zc_perm_action uncompressed space in bytes */ static int zfs_ioc_space_written(zfs_cmd_t *zc) { int error; dsl_dataset_t *new, *old; error = dsl_dataset_hold(zc->zc_name, FTAG, &new); if (error != 0) return (error); error = dsl_dataset_hold(zc->zc_value, FTAG, &old); if (error != 0) { dsl_dataset_rele(new, FTAG); return (error); } error = dsl_dataset_space_written(old, new, &zc->zc_cookie, &zc->zc_objset_type, &zc->zc_perm_action); dsl_dataset_rele(old, FTAG); dsl_dataset_rele(new, FTAG); return (error); } /* * inputs: * zc_name full name of last snapshot * zc_value full name of first snapshot * * outputs: * zc_cookie space in bytes * zc_objset_type compressed space in bytes * zc_perm_action uncompressed space in bytes */ static int zfs_ioc_space_snaps(zfs_cmd_t *zc) { int error; dsl_dataset_t *new, *old; error = dsl_dataset_hold(zc->zc_name, FTAG, &new); if (error != 0) return (error); error = dsl_dataset_hold(zc->zc_value, FTAG, &old); if (error != 0) { dsl_dataset_rele(new, FTAG); return (error); } error = dsl_dataset_space_wouldfree(old, new, &zc->zc_cookie, &zc->zc_objset_type, &zc->zc_perm_action); dsl_dataset_rele(old, FTAG); dsl_dataset_rele(new, FTAG); return (error); } /* * pool create, destroy, and export don't log the history as part of * zfsdev_ioctl, but rather zfs_ioc_pool_create, and zfs_ioc_pool_export * do the logging of those commands. */ static int zfs_ioc_jail(zfs_cmd_t *zc) { return (zone_dataset_attach(curthread->td_ucred, zc->zc_name, (int)zc->zc_jailid)); } static int zfs_ioc_unjail(zfs_cmd_t *zc) { return (zone_dataset_detach(curthread->td_ucred, zc->zc_name, (int)zc->zc_jailid)); } static zfs_ioc_vec_t zfs_ioc_vec[] = { { zfs_ioc_pool_create, zfs_secpolicy_config, POOL_NAME, B_FALSE, B_FALSE }, { zfs_ioc_pool_destroy, zfs_secpolicy_config, POOL_NAME, B_FALSE, B_FALSE }, { zfs_ioc_pool_import, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_FALSE }, { zfs_ioc_pool_export, zfs_secpolicy_config, POOL_NAME, B_FALSE, B_FALSE }, { zfs_ioc_pool_configs, zfs_secpolicy_none, NO_NAME, B_FALSE, B_FALSE }, { zfs_ioc_pool_stats, zfs_secpolicy_read, POOL_NAME, B_FALSE, B_FALSE }, { zfs_ioc_pool_tryimport, zfs_secpolicy_config, NO_NAME, B_FALSE, B_FALSE }, { zfs_ioc_pool_scan, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_TRUE }, { zfs_ioc_pool_freeze, zfs_secpolicy_config, NO_NAME, B_FALSE, B_FALSE }, { zfs_ioc_pool_upgrade, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_TRUE }, { zfs_ioc_pool_get_history, zfs_secpolicy_config, POOL_NAME, B_FALSE, B_FALSE }, { zfs_ioc_vdev_add, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_TRUE }, { zfs_ioc_vdev_remove, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_TRUE }, { zfs_ioc_vdev_set_state, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_FALSE }, { zfs_ioc_vdev_attach, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_TRUE }, { zfs_ioc_vdev_detach, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_TRUE }, { zfs_ioc_vdev_setpath, zfs_secpolicy_config, POOL_NAME, B_FALSE, B_TRUE }, { zfs_ioc_vdev_setfru, zfs_secpolicy_config, POOL_NAME, B_FALSE, B_TRUE }, { zfs_ioc_objset_stats, zfs_secpolicy_read, DATASET_NAME, B_FALSE, B_TRUE }, { zfs_ioc_objset_zplprops, zfs_secpolicy_read, DATASET_NAME, B_FALSE, B_FALSE }, { zfs_ioc_dataset_list_next, zfs_secpolicy_read, DATASET_NAME, B_FALSE, B_TRUE }, { zfs_ioc_snapshot_list_next, zfs_secpolicy_read, DATASET_NAME, B_FALSE, B_TRUE }, { zfs_ioc_set_prop, zfs_secpolicy_none, DATASET_NAME, B_TRUE, B_TRUE }, { zfs_ioc_create, zfs_secpolicy_create, DATASET_NAME, B_TRUE, B_TRUE }, { zfs_ioc_destroy, zfs_secpolicy_destroy, DATASET_NAME, B_TRUE, B_TRUE}, { zfs_ioc_rollback, zfs_secpolicy_rollback, DATASET_NAME, B_TRUE, B_TRUE }, { zfs_ioc_rename, zfs_secpolicy_rename, DATASET_NAME, B_TRUE, B_TRUE }, { zfs_ioc_recv, zfs_secpolicy_receive, DATASET_NAME, B_TRUE, B_TRUE }, { zfs_ioc_send, zfs_secpolicy_send, DATASET_NAME, B_FALSE, B_FALSE }, { zfs_ioc_inject_fault, zfs_secpolicy_inject, NO_NAME, B_FALSE, B_FALSE }, { zfs_ioc_clear_fault, zfs_secpolicy_inject, NO_NAME, B_FALSE, B_FALSE }, { zfs_ioc_inject_list_next, zfs_secpolicy_inject, NO_NAME, B_FALSE, B_FALSE }, { zfs_ioc_error_log, zfs_secpolicy_inject, POOL_NAME, B_FALSE, B_FALSE }, { zfs_ioc_clear, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_FALSE }, { zfs_ioc_promote, zfs_secpolicy_promote, DATASET_NAME, B_TRUE, B_TRUE }, { zfs_ioc_destroy_snaps_nvl, zfs_secpolicy_destroy_recursive, DATASET_NAME, B_TRUE, B_TRUE }, { zfs_ioc_snapshot, zfs_secpolicy_snapshot, DATASET_NAME, B_TRUE, B_TRUE }, { zfs_ioc_dsobj_to_dsname, zfs_secpolicy_diff, POOL_NAME, B_FALSE, B_FALSE }, { zfs_ioc_obj_to_path, zfs_secpolicy_diff, DATASET_NAME, B_FALSE, B_TRUE }, { zfs_ioc_pool_set_props, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_TRUE }, { zfs_ioc_pool_get_props, zfs_secpolicy_read, POOL_NAME, B_FALSE, B_FALSE }, { zfs_ioc_set_fsacl, zfs_secpolicy_fsacl, DATASET_NAME, B_TRUE, B_TRUE }, { zfs_ioc_get_fsacl, zfs_secpolicy_read, DATASET_NAME, B_FALSE, B_FALSE }, { zfs_ioc_share, zfs_secpolicy_share, DATASET_NAME, B_FALSE, B_FALSE }, { zfs_ioc_inherit_prop, zfs_secpolicy_inherit, DATASET_NAME, B_TRUE, B_TRUE }, { zfs_ioc_smb_acl, zfs_secpolicy_smb_acl, DATASET_NAME, B_FALSE, B_FALSE }, { zfs_ioc_userspace_one, zfs_secpolicy_userspace_one, DATASET_NAME, B_FALSE, B_FALSE }, { zfs_ioc_userspace_many, zfs_secpolicy_userspace_many, DATASET_NAME, B_FALSE, B_FALSE }, { zfs_ioc_userspace_upgrade, zfs_secpolicy_userspace_upgrade, DATASET_NAME, B_FALSE, B_TRUE }, { zfs_ioc_hold, zfs_secpolicy_hold, DATASET_NAME, B_TRUE, B_TRUE }, { zfs_ioc_release, zfs_secpolicy_release, DATASET_NAME, B_TRUE, B_TRUE }, { zfs_ioc_get_holds, zfs_secpolicy_read, DATASET_NAME, B_FALSE, B_TRUE }, { zfs_ioc_objset_recvd_props, zfs_secpolicy_read, DATASET_NAME, B_FALSE, B_FALSE }, { zfs_ioc_vdev_split, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_TRUE }, { zfs_ioc_next_obj, zfs_secpolicy_read, DATASET_NAME, B_FALSE, B_FALSE }, { zfs_ioc_diff, zfs_secpolicy_diff, DATASET_NAME, B_FALSE, B_FALSE }, { zfs_ioc_tmp_snapshot, zfs_secpolicy_tmp_snapshot, DATASET_NAME, B_FALSE, B_FALSE }, { zfs_ioc_obj_to_stats, zfs_secpolicy_diff, DATASET_NAME, B_FALSE, B_TRUE }, { zfs_ioc_jail, zfs_secpolicy_config, DATASET_NAME, B_TRUE, B_FALSE }, { zfs_ioc_unjail, zfs_secpolicy_config, DATASET_NAME, B_TRUE, B_FALSE }, { zfs_ioc_pool_reguid, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_TRUE }, { zfs_ioc_space_written, zfs_secpolicy_read, DATASET_NAME, B_FALSE, B_TRUE }, { zfs_ioc_space_snaps, zfs_secpolicy_read, DATASET_NAME, B_FALSE, - B_TRUE } + B_TRUE }, + { zfs_ioc_send_progress, zfs_secpolicy_read, DATASET_NAME, B_FALSE, + B_FALSE } }; int pool_status_check(const char *name, zfs_ioc_namecheck_t type) { spa_t *spa; int error; ASSERT(type == POOL_NAME || type == DATASET_NAME); error = spa_open(name, &spa, FTAG); if (error == 0) { if (spa_suspended(spa)) error = EAGAIN; spa_close(spa, FTAG); } return (error); } /* * Find a free minor number. */ minor_t zfsdev_minor_alloc(void) { static minor_t last_minor; minor_t m; ASSERT(MUTEX_HELD(&spa_namespace_lock)); for (m = last_minor + 1; m != last_minor; m++) { if (m > ZFSDEV_MAX_MINOR) m = 1; if (ddi_get_soft_state(zfsdev_state, m) == NULL) { last_minor = m; return (m); } } return (0); } static int zfs_ctldev_init(struct cdev *devp) { minor_t minor; zfs_soft_state_t *zs; ASSERT(MUTEX_HELD(&spa_namespace_lock)); minor = zfsdev_minor_alloc(); if (minor == 0) return (ENXIO); if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) return (EAGAIN); devfs_set_cdevpriv((void *)(uintptr_t)minor, zfsdev_close); zs = ddi_get_soft_state(zfsdev_state, minor); zs->zss_type = ZSST_CTLDEV; zfs_onexit_init((zfs_onexit_t **)&zs->zss_data); return (0); } static void zfs_ctldev_destroy(zfs_onexit_t *zo, minor_t minor) { ASSERT(MUTEX_HELD(&spa_namespace_lock)); zfs_onexit_destroy(zo); ddi_soft_state_free(zfsdev_state, minor); } void * zfsdev_get_soft_state(minor_t minor, enum zfs_soft_state_type which) { zfs_soft_state_t *zp; zp = ddi_get_soft_state(zfsdev_state, minor); if (zp == NULL || zp->zss_type != which) return (NULL); return (zp->zss_data); } static int zfsdev_open(struct cdev *devp, int flag, int mode, struct thread *td) { int error = 0; #ifdef sun if (getminor(*devp) != 0) return (zvol_open(devp, flag, otyp, cr)); #endif /* This is the control device. Allocate a new minor if requested. */ if (flag & FEXCL) { mutex_enter(&spa_namespace_lock); error = zfs_ctldev_init(devp); mutex_exit(&spa_namespace_lock); } return (error); } static void zfsdev_close(void *data) { zfs_onexit_t *zo; minor_t minor = (minor_t)(uintptr_t)data; if (minor == 0) return; mutex_enter(&spa_namespace_lock); zo = zfsdev_get_soft_state(minor, ZSST_CTLDEV); if (zo == NULL) { mutex_exit(&spa_namespace_lock); return; } zfs_ctldev_destroy(zo, minor); mutex_exit(&spa_namespace_lock); } static int zfsdev_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { zfs_cmd_t *zc; uint_t vec; int cflag, error, len; cflag = ZFS_CMD_COMPAT_NONE; len = IOCPARM_LEN(cmd); /* * Check if we have sufficient kernel memory allocated * for the zfs_cmd_t request. Bail out if not so we * will not access undefined memory region. */ if (len < sizeof(zfs_cmd_t)) if (len == sizeof(zfs_cmd_v15_t)) { cflag = ZFS_CMD_COMPAT_V15; vec = zfs_ioctl_v15_to_v28[ZFS_IOC(cmd)]; } else return (EINVAL); else vec = ZFS_IOC(cmd); if (cflag != ZFS_CMD_COMPAT_NONE) { if (vec == ZFS_IOC_COMPAT_PASS) return (0); else if (vec == ZFS_IOC_COMPAT_FAIL) return (ENOTSUP); } if (vec >= sizeof (zfs_ioc_vec) / sizeof (zfs_ioc_vec[0])) return (EINVAL); if (cflag != ZFS_CMD_COMPAT_NONE) { zc = kmem_zalloc(sizeof(zfs_cmd_t), KM_SLEEP); bzero(zc, sizeof(zfs_cmd_t)); zfs_cmd_compat_get(zc, addr, cflag); zfs_ioctl_compat_pre(zc, &vec, cflag); } else { zc = (void *)addr; } error = zfs_ioc_vec[vec].zvec_secpolicy(zc, td->td_ucred); /* * Ensure that all pool/dataset names are valid before we pass down to * the lower layers. */ if (error == 0) { zc->zc_name[sizeof (zc->zc_name) - 1] = '\0'; zc->zc_iflags = flag & FKIOCTL; switch (zfs_ioc_vec[vec].zvec_namecheck) { case POOL_NAME: if (pool_namecheck(zc->zc_name, NULL, NULL) != 0) error = EINVAL; if (zfs_ioc_vec[vec].zvec_pool_check) error = pool_status_check(zc->zc_name, zfs_ioc_vec[vec].zvec_namecheck); break; case DATASET_NAME: if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0) error = EINVAL; if (zfs_ioc_vec[vec].zvec_pool_check) error = pool_status_check(zc->zc_name, zfs_ioc_vec[vec].zvec_namecheck); break; case NO_NAME: break; } } if (error == 0) error = zfs_ioc_vec[vec].zvec_func(zc); if (error == 0) { if (zfs_ioc_vec[vec].zvec_his_log) zfs_log_history(zc); } if (cflag != ZFS_CMD_COMPAT_NONE) { zfs_ioctl_compat_post(zc, ZFS_IOC(cmd), cflag); zfs_cmd_compat_put(zc, addr, cflag); kmem_free(zc, sizeof(zfs_cmd_t)); } return (error); } #ifdef sun static int zfs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { if (cmd != DDI_ATTACH) return (DDI_FAILURE); if (ddi_create_minor_node(dip, "zfs", S_IFCHR, 0, DDI_PSEUDO, 0) == DDI_FAILURE) return (DDI_FAILURE); zfs_dip = dip; ddi_report_dev(dip); return (DDI_SUCCESS); } static int zfs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { if (spa_busy() || zfs_busy() || zvol_busy()) return (DDI_FAILURE); if (cmd != DDI_DETACH) return (DDI_FAILURE); zfs_dip = NULL; ddi_prop_remove_all(dip); ddi_remove_minor_node(dip, NULL); return (DDI_SUCCESS); } /*ARGSUSED*/ static int zfs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) { switch (infocmd) { case DDI_INFO_DEVT2DEVINFO: *result = zfs_dip; return (DDI_SUCCESS); case DDI_INFO_DEVT2INSTANCE: *result = (void *)0; return (DDI_SUCCESS); } return (DDI_FAILURE); } #endif /* sun */ /* * OK, so this is a little weird. * * /dev/zfs is the control node, i.e. minor 0. * /dev/zvol/[r]dsk/pool/dataset are the zvols, minor > 0. * * /dev/zfs has basically nothing to do except serve up ioctls, * so most of the standard driver entry points are in zvol.c. */ #ifdef sun static struct cb_ops zfs_cb_ops = { zfsdev_open, /* open */ zfsdev_close, /* close */ zvol_strategy, /* strategy */ nodev, /* print */ zvol_dump, /* dump */ zvol_read, /* read */ zvol_write, /* write */ zfsdev_ioctl, /* ioctl */ nodev, /* devmap */ nodev, /* mmap */ nodev, /* segmap */ nochpoll, /* poll */ ddi_prop_op, /* prop_op */ NULL, /* streamtab */ D_NEW | D_MP | D_64BIT, /* Driver compatibility flag */ CB_REV, /* version */ nodev, /* async read */ nodev, /* async write */ }; static struct dev_ops zfs_dev_ops = { DEVO_REV, /* version */ 0, /* refcnt */ zfs_info, /* info */ nulldev, /* identify */ nulldev, /* probe */ zfs_attach, /* attach */ zfs_detach, /* detach */ nodev, /* reset */ &zfs_cb_ops, /* driver operations */ NULL, /* no bus operations */ NULL, /* power */ ddi_quiesce_not_needed, /* quiesce */ }; static struct modldrv zfs_modldrv = { &mod_driverops, "ZFS storage pool", &zfs_dev_ops }; static struct modlinkage modlinkage = { MODREV_1, (void *)&zfs_modlfs, (void *)&zfs_modldrv, NULL }; #endif /* sun */ static struct cdevsw zfs_cdevsw = { .d_version = D_VERSION, .d_open = zfsdev_open, .d_ioctl = zfsdev_ioctl, .d_name = ZFS_DEV_NAME }; static void zfsdev_init(void) { zfsdev = make_dev(&zfs_cdevsw, 0x0, UID_ROOT, GID_OPERATOR, 0666, ZFS_DEV_NAME); } static void zfsdev_fini(void) { if (zfsdev != NULL) destroy_dev(zfsdev); } static struct root_hold_token *zfs_root_token; struct proc *zfsproc; uint_t zfs_fsyncer_key; extern uint_t rrw_tsd_key; #ifdef sun int _init(void) { int error; spa_init(FREAD | FWRITE); zfs_init(); zvol_init(); if ((error = mod_install(&modlinkage)) != 0) { zvol_fini(); zfs_fini(); spa_fini(); return (error); } tsd_create(&zfs_fsyncer_key, NULL); tsd_create(&rrw_tsd_key, NULL); error = ldi_ident_from_mod(&modlinkage, &zfs_li); ASSERT(error == 0); mutex_init(&zfs_share_lock, NULL, MUTEX_DEFAULT, NULL); return (0); } int _fini(void) { int error; if (spa_busy() || zfs_busy() || zvol_busy() || zio_injection_enabled) return (EBUSY); if ((error = mod_remove(&modlinkage)) != 0) return (error); zvol_fini(); zfs_fini(); spa_fini(); if (zfs_nfsshare_inited) (void) ddi_modclose(nfs_mod); if (zfs_smbshare_inited) (void) ddi_modclose(smbsrv_mod); if (zfs_nfsshare_inited || zfs_smbshare_inited) (void) ddi_modclose(sharefs_mod); tsd_destroy(&zfs_fsyncer_key); ldi_ident_release(zfs_li); zfs_li = NULL; mutex_destroy(&zfs_share_lock); return (error); } int _info(struct modinfo *modinfop) { return (mod_info(&modlinkage, modinfop)); } #endif /* sun */ static int zfs_modevent(module_t mod, int type, void *unused __unused) { int error = 0; switch (type) { case MOD_LOAD: zfs_root_token = root_mount_hold("ZFS"); mutex_init(&zfs_share_lock, NULL, MUTEX_DEFAULT, NULL); spa_init(FREAD | FWRITE); zfs_init(); zvol_init(); tsd_create(&zfs_fsyncer_key, NULL); tsd_create(&rrw_tsd_key, NULL); printf("ZFS storage pool version " SPA_VERSION_STRING "\n"); root_mount_rel(zfs_root_token); zfsdev_init(); break; case MOD_UNLOAD: if (spa_busy() || zfs_busy() || zvol_busy() || zio_injection_enabled) { error = EBUSY; break; } zfsdev_fini(); zvol_fini(); zfs_fini(); spa_fini(); tsd_destroy(&zfs_fsyncer_key); tsd_destroy(&rrw_tsd_key); mutex_destroy(&zfs_share_lock); break; default: error = EOPNOTSUPP; break; } return (error); } static moduledata_t zfs_mod = { "zfsctrl", zfs_modevent, 0 }; DECLARE_MODULE(zfsctrl, zfs_mod, SI_SUB_VFS, SI_ORDER_ANY); MODULE_DEPEND(zfsctrl, opensolaris, 1, 1, 1); MODULE_DEPEND(zfsctrl, krpc, 1, 1, 1); MODULE_DEPEND(zfsctrl, acl_nfs4, 1, 1, 1); Index: projects/nand/sys/cddl/contrib/opensolaris/uts/common/sys/fs/zfs.h =================================================================== --- projects/nand/sys/cddl/contrib/opensolaris/uts/common/sys/fs/zfs.h (revision 235262) +++ projects/nand/sys/cddl/contrib/opensolaris/uts/common/sys/fs/zfs.h (revision 235263) @@ -1,931 +1,934 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved. * Copyright 2011 Nexenta Systems, Inc. All rights reserved. + * Copyright (c) 2012, Joyent, Inc. All rights reserved. + * Copyright (c) 2012, Martin Matuska . All rights reserved. */ /* Portions Copyright 2010 Robert Milkowski */ #ifndef _SYS_FS_ZFS_H #define _SYS_FS_ZFS_H #include #include #include #ifdef __cplusplus extern "C" { #endif /* * Types and constants shared between userland and the kernel. */ /* * Each dataset can be one of the following types. These constants can be * combined into masks that can be passed to various functions. */ typedef enum { ZFS_TYPE_FILESYSTEM = 0x1, ZFS_TYPE_SNAPSHOT = 0x2, ZFS_TYPE_VOLUME = 0x4, ZFS_TYPE_POOL = 0x8 } zfs_type_t; #define ZFS_TYPE_DATASET \ (ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME | ZFS_TYPE_SNAPSHOT) #define ZAP_MAXNAMELEN 256 #define ZAP_MAXVALUELEN (1024 * 8) #define ZAP_OLDMAXVALUELEN 1024 /* * Dataset properties are identified by these constants and must be added to * the end of this list to ensure that external consumers are not affected * by the change. If you make any changes to this list, be sure to update * the property table in usr/src/common/zfs/zfs_prop.c. */ typedef enum { ZFS_PROP_TYPE, ZFS_PROP_CREATION, ZFS_PROP_USED, ZFS_PROP_AVAILABLE, ZFS_PROP_REFERENCED, ZFS_PROP_COMPRESSRATIO, ZFS_PROP_MOUNTED, ZFS_PROP_ORIGIN, ZFS_PROP_QUOTA, ZFS_PROP_RESERVATION, ZFS_PROP_VOLSIZE, ZFS_PROP_VOLBLOCKSIZE, ZFS_PROP_RECORDSIZE, ZFS_PROP_MOUNTPOINT, ZFS_PROP_SHARENFS, ZFS_PROP_CHECKSUM, ZFS_PROP_COMPRESSION, ZFS_PROP_ATIME, ZFS_PROP_DEVICES, ZFS_PROP_EXEC, ZFS_PROP_SETUID, ZFS_PROP_READONLY, ZFS_PROP_ZONED, ZFS_PROP_SNAPDIR, ZFS_PROP_ACLMODE, ZFS_PROP_ACLINHERIT, ZFS_PROP_CREATETXG, /* not exposed to the user */ ZFS_PROP_NAME, /* not exposed to the user */ ZFS_PROP_CANMOUNT, ZFS_PROP_ISCSIOPTIONS, /* not exposed to the user */ ZFS_PROP_XATTR, ZFS_PROP_NUMCLONES, /* not exposed to the user */ ZFS_PROP_COPIES, ZFS_PROP_VERSION, ZFS_PROP_UTF8ONLY, ZFS_PROP_NORMALIZE, ZFS_PROP_CASE, ZFS_PROP_VSCAN, ZFS_PROP_NBMAND, ZFS_PROP_SHARESMB, ZFS_PROP_REFQUOTA, ZFS_PROP_REFRESERVATION, ZFS_PROP_GUID, ZFS_PROP_PRIMARYCACHE, ZFS_PROP_SECONDARYCACHE, ZFS_PROP_USEDSNAP, ZFS_PROP_USEDDS, ZFS_PROP_USEDCHILD, ZFS_PROP_USEDREFRESERV, ZFS_PROP_USERACCOUNTING, /* not exposed to the user */ ZFS_PROP_STMF_SHAREINFO, /* not exposed to the user */ ZFS_PROP_DEFER_DESTROY, ZFS_PROP_USERREFS, ZFS_PROP_LOGBIAS, ZFS_PROP_UNIQUE, /* not exposed to the user */ ZFS_PROP_OBJSETID, /* not exposed to the user */ ZFS_PROP_DEDUP, ZFS_PROP_MLSLABEL, ZFS_PROP_SYNC, ZFS_PROP_REFRATIO, ZFS_PROP_WRITTEN, ZFS_PROP_CLONES, ZFS_NUM_PROPS } zfs_prop_t; typedef enum { ZFS_PROP_USERUSED, ZFS_PROP_USERQUOTA, ZFS_PROP_GROUPUSED, ZFS_PROP_GROUPQUOTA, ZFS_NUM_USERQUOTA_PROPS } zfs_userquota_prop_t; extern const char *zfs_userquota_prop_prefixes[ZFS_NUM_USERQUOTA_PROPS]; /* * Pool properties are identified by these constants and must be added to the * end of this list to ensure that external consumers are not affected * by the change. If you make any changes to this list, be sure to update * the property table in usr/src/common/zfs/zpool_prop.c. */ typedef enum { ZPOOL_PROP_NAME, ZPOOL_PROP_SIZE, ZPOOL_PROP_CAPACITY, ZPOOL_PROP_ALTROOT, ZPOOL_PROP_HEALTH, ZPOOL_PROP_GUID, ZPOOL_PROP_VERSION, ZPOOL_PROP_BOOTFS, ZPOOL_PROP_DELEGATION, ZPOOL_PROP_AUTOREPLACE, ZPOOL_PROP_CACHEFILE, ZPOOL_PROP_FAILUREMODE, ZPOOL_PROP_LISTSNAPS, ZPOOL_PROP_AUTOEXPAND, ZPOOL_PROP_DEDUPDITTO, ZPOOL_PROP_DEDUPRATIO, ZPOOL_PROP_FREE, ZPOOL_PROP_ALLOCATED, ZPOOL_PROP_READONLY, ZPOOL_PROP_COMMENT, ZPOOL_NUM_PROPS } zpool_prop_t; /* Small enough to not hog a whole line of printout in zpool(1M). */ #define ZPROP_MAX_COMMENT 32 #define ZPROP_CONT -2 #define ZPROP_INVAL -1 #define ZPROP_VALUE "value" #define ZPROP_SOURCE "source" typedef enum { ZPROP_SRC_NONE = 0x1, ZPROP_SRC_DEFAULT = 0x2, ZPROP_SRC_TEMPORARY = 0x4, ZPROP_SRC_LOCAL = 0x8, ZPROP_SRC_INHERITED = 0x10, ZPROP_SRC_RECEIVED = 0x20 } zprop_source_t; #define ZPROP_SRC_ALL 0x3f #define ZPROP_SOURCE_VAL_RECVD "$recvd" #define ZPROP_N_MORE_ERRORS "N_MORE_ERRORS" /* * Dataset flag implemented as a special entry in the props zap object * indicating that the dataset has received properties on or after * SPA_VERSION_RECVD_PROPS. The first such receive blows away local properties * just as it did in earlier versions, and thereafter, local properties are * preserved. */ #define ZPROP_HAS_RECVD "$hasrecvd" typedef enum { ZPROP_ERR_NOCLEAR = 0x1, /* failure to clear existing props */ ZPROP_ERR_NORESTORE = 0x2 /* failure to restore props on error */ } zprop_errflags_t; typedef int (*zprop_func)(int, void *); /* * Properties to be set on the root file system of a new pool * are stuffed into their own nvlist, which is then included in * the properties nvlist with the pool properties. */ #define ZPOOL_ROOTFS_PROPS "root-props-nvl" /* * Dataset property functions shared between libzfs and kernel. */ const char *zfs_prop_default_string(zfs_prop_t); uint64_t zfs_prop_default_numeric(zfs_prop_t); boolean_t zfs_prop_readonly(zfs_prop_t); boolean_t zfs_prop_inheritable(zfs_prop_t); boolean_t zfs_prop_setonce(zfs_prop_t); const char *zfs_prop_to_name(zfs_prop_t); zfs_prop_t zfs_name_to_prop(const char *); boolean_t zfs_prop_user(const char *); boolean_t zfs_prop_userquota(const char *); int zfs_prop_index_to_string(zfs_prop_t, uint64_t, const char **); int zfs_prop_string_to_index(zfs_prop_t, const char *, uint64_t *); uint64_t zfs_prop_random_value(zfs_prop_t, uint64_t seed); boolean_t zfs_prop_valid_for_type(int, zfs_type_t); /* * Pool property functions shared between libzfs and kernel. */ zpool_prop_t zpool_name_to_prop(const char *); const char *zpool_prop_to_name(zpool_prop_t); const char *zpool_prop_default_string(zpool_prop_t); uint64_t zpool_prop_default_numeric(zpool_prop_t); boolean_t zpool_prop_readonly(zpool_prop_t); int zpool_prop_index_to_string(zpool_prop_t, uint64_t, const char **); int zpool_prop_string_to_index(zpool_prop_t, const char *, uint64_t *); uint64_t zpool_prop_random_value(zpool_prop_t, uint64_t seed); /* * Definitions for the Delegation. */ typedef enum { ZFS_DELEG_WHO_UNKNOWN = 0, ZFS_DELEG_USER = 'u', ZFS_DELEG_USER_SETS = 'U', ZFS_DELEG_GROUP = 'g', ZFS_DELEG_GROUP_SETS = 'G', ZFS_DELEG_EVERYONE = 'e', ZFS_DELEG_EVERYONE_SETS = 'E', ZFS_DELEG_CREATE = 'c', ZFS_DELEG_CREATE_SETS = 'C', ZFS_DELEG_NAMED_SET = 's', ZFS_DELEG_NAMED_SET_SETS = 'S' } zfs_deleg_who_type_t; typedef enum { ZFS_DELEG_NONE = 0, ZFS_DELEG_PERM_LOCAL = 1, ZFS_DELEG_PERM_DESCENDENT = 2, ZFS_DELEG_PERM_LOCALDESCENDENT = 3, ZFS_DELEG_PERM_CREATE = 4 } zfs_deleg_inherit_t; #define ZFS_DELEG_PERM_UID "uid" #define ZFS_DELEG_PERM_GID "gid" #define ZFS_DELEG_PERM_GROUPS "groups" #define ZFS_MLSLABEL_DEFAULT "none" #define ZFS_SMB_ACL_SRC "src" #define ZFS_SMB_ACL_TARGET "target" typedef enum { ZFS_CANMOUNT_OFF = 0, ZFS_CANMOUNT_ON = 1, ZFS_CANMOUNT_NOAUTO = 2 } zfs_canmount_type_t; typedef enum { ZFS_LOGBIAS_LATENCY = 0, ZFS_LOGBIAS_THROUGHPUT = 1 } zfs_logbias_op_t; typedef enum zfs_share_op { ZFS_SHARE_NFS = 0, ZFS_UNSHARE_NFS = 1, ZFS_SHARE_SMB = 2, ZFS_UNSHARE_SMB = 3 } zfs_share_op_t; typedef enum zfs_smb_acl_op { ZFS_SMB_ACL_ADD, ZFS_SMB_ACL_REMOVE, ZFS_SMB_ACL_RENAME, ZFS_SMB_ACL_PURGE } zfs_smb_acl_op_t; typedef enum zfs_cache_type { ZFS_CACHE_NONE = 0, ZFS_CACHE_METADATA = 1, ZFS_CACHE_ALL = 2 } zfs_cache_type_t; typedef enum { ZFS_SYNC_STANDARD = 0, ZFS_SYNC_ALWAYS = 1, ZFS_SYNC_DISABLED = 2 } zfs_sync_type_t; /* * On-disk version number. */ #define SPA_VERSION_1 1ULL #define SPA_VERSION_2 2ULL #define SPA_VERSION_3 3ULL #define SPA_VERSION_4 4ULL #define SPA_VERSION_5 5ULL #define SPA_VERSION_6 6ULL #define SPA_VERSION_7 7ULL #define SPA_VERSION_8 8ULL #define SPA_VERSION_9 9ULL #define SPA_VERSION_10 10ULL #define SPA_VERSION_11 11ULL #define SPA_VERSION_12 12ULL #define SPA_VERSION_13 13ULL #define SPA_VERSION_14 14ULL #define SPA_VERSION_15 15ULL #define SPA_VERSION_16 16ULL #define SPA_VERSION_17 17ULL #define SPA_VERSION_18 18ULL #define SPA_VERSION_19 19ULL #define SPA_VERSION_20 20ULL #define SPA_VERSION_21 21ULL #define SPA_VERSION_22 22ULL #define SPA_VERSION_23 23ULL #define SPA_VERSION_24 24ULL #define SPA_VERSION_25 25ULL #define SPA_VERSION_26 26ULL #define SPA_VERSION_27 27ULL #define SPA_VERSION_28 28ULL /* * When bumping up SPA_VERSION, make sure GRUB ZFS understands the on-disk * format change. Go to usr/src/grub/grub-0.97/stage2/{zfs-include/, fsys_zfs*}, * and do the appropriate changes. Also bump the version number in * usr/src/grub/capability. */ #define SPA_VERSION SPA_VERSION_28 #define SPA_VERSION_STRING "28" /* * Symbolic names for the changes that caused a SPA_VERSION switch. * Used in the code when checking for presence or absence of a feature. * Feel free to define multiple symbolic names for each version if there * were multiple changes to on-disk structures during that version. * * NOTE: When checking the current SPA_VERSION in your code, be sure * to use spa_version() since it reports the version of the * last synced uberblock. Checking the in-flight version can * be dangerous in some cases. */ #define SPA_VERSION_INITIAL SPA_VERSION_1 #define SPA_VERSION_DITTO_BLOCKS SPA_VERSION_2 #define SPA_VERSION_SPARES SPA_VERSION_3 #define SPA_VERSION_RAIDZ2 SPA_VERSION_3 #define SPA_VERSION_BPOBJ_ACCOUNT SPA_VERSION_3 #define SPA_VERSION_RAIDZ_DEFLATE SPA_VERSION_3 #define SPA_VERSION_DNODE_BYTES SPA_VERSION_3 #define SPA_VERSION_ZPOOL_HISTORY SPA_VERSION_4 #define SPA_VERSION_GZIP_COMPRESSION SPA_VERSION_5 #define SPA_VERSION_BOOTFS SPA_VERSION_6 #define SPA_VERSION_SLOGS SPA_VERSION_7 #define SPA_VERSION_DELEGATED_PERMS SPA_VERSION_8 #define SPA_VERSION_FUID SPA_VERSION_9 #define SPA_VERSION_REFRESERVATION SPA_VERSION_9 #define SPA_VERSION_REFQUOTA SPA_VERSION_9 #define SPA_VERSION_UNIQUE_ACCURATE SPA_VERSION_9 #define SPA_VERSION_L2CACHE SPA_VERSION_10 #define SPA_VERSION_NEXT_CLONES SPA_VERSION_11 #define SPA_VERSION_ORIGIN SPA_VERSION_11 #define SPA_VERSION_DSL_SCRUB SPA_VERSION_11 #define SPA_VERSION_SNAP_PROPS SPA_VERSION_12 #define SPA_VERSION_USED_BREAKDOWN SPA_VERSION_13 #define SPA_VERSION_PASSTHROUGH_X SPA_VERSION_14 #define SPA_VERSION_USERSPACE SPA_VERSION_15 #define SPA_VERSION_STMF_PROP SPA_VERSION_16 #define SPA_VERSION_RAIDZ3 SPA_VERSION_17 #define SPA_VERSION_USERREFS SPA_VERSION_18 #define SPA_VERSION_HOLES SPA_VERSION_19 #define SPA_VERSION_ZLE_COMPRESSION SPA_VERSION_20 #define SPA_VERSION_DEDUP SPA_VERSION_21 #define SPA_VERSION_RECVD_PROPS SPA_VERSION_22 #define SPA_VERSION_SLIM_ZIL SPA_VERSION_23 #define SPA_VERSION_SA SPA_VERSION_24 #define SPA_VERSION_SCAN SPA_VERSION_25 #define SPA_VERSION_DIR_CLONES SPA_VERSION_26 #define SPA_VERSION_DEADLISTS SPA_VERSION_26 #define SPA_VERSION_FAST_SNAP SPA_VERSION_27 #define SPA_VERSION_MULTI_REPLACE SPA_VERSION_28 /* * ZPL version - rev'd whenever an incompatible on-disk format change * occurs. This is independent of SPA/DMU/ZAP versioning. You must * also update the version_table[] and help message in zfs_prop.c. * * When changing, be sure to teach GRUB how to read the new format! * See usr/src/grub/grub-0.97/stage2/{zfs-include/,fsys_zfs*} */ #define ZPL_VERSION_1 1ULL #define ZPL_VERSION_2 2ULL #define ZPL_VERSION_3 3ULL #define ZPL_VERSION_4 4ULL #define ZPL_VERSION_5 5ULL #define ZPL_VERSION ZPL_VERSION_5 #define ZPL_VERSION_STRING "5" #define ZPL_VERSION_INITIAL ZPL_VERSION_1 #define ZPL_VERSION_DIRENT_TYPE ZPL_VERSION_2 #define ZPL_VERSION_FUID ZPL_VERSION_3 #define ZPL_VERSION_NORMALIZATION ZPL_VERSION_3 #define ZPL_VERSION_SYSATTR ZPL_VERSION_3 #define ZPL_VERSION_USERSPACE ZPL_VERSION_4 #define ZPL_VERSION_SA ZPL_VERSION_5 /* Rewind request information */ #define ZPOOL_NO_REWIND 1 /* No policy - default behavior */ #define ZPOOL_NEVER_REWIND 2 /* Do not search for best txg or rewind */ #define ZPOOL_TRY_REWIND 4 /* Search for best txg, but do not rewind */ #define ZPOOL_DO_REWIND 8 /* Rewind to best txg w/in deferred frees */ #define ZPOOL_EXTREME_REWIND 16 /* Allow extreme measures to find best txg */ #define ZPOOL_REWIND_MASK 28 /* All the possible rewind bits */ #define ZPOOL_REWIND_POLICIES 31 /* All the possible policy bits */ typedef struct zpool_rewind_policy { uint32_t zrp_request; /* rewind behavior requested */ uint64_t zrp_maxmeta; /* max acceptable meta-data errors */ uint64_t zrp_maxdata; /* max acceptable data errors */ uint64_t zrp_txg; /* specific txg to load */ } zpool_rewind_policy_t; /* * The following are configuration names used in the nvlist describing a pool's * configuration. */ #define ZPOOL_CONFIG_VERSION "version" #define ZPOOL_CONFIG_POOL_NAME "name" #define ZPOOL_CONFIG_POOL_STATE "state" #define ZPOOL_CONFIG_POOL_TXG "txg" #define ZPOOL_CONFIG_POOL_GUID "pool_guid" #define ZPOOL_CONFIG_CREATE_TXG "create_txg" #define ZPOOL_CONFIG_TOP_GUID "top_guid" #define ZPOOL_CONFIG_VDEV_TREE "vdev_tree" #define ZPOOL_CONFIG_TYPE "type" #define ZPOOL_CONFIG_CHILDREN "children" #define ZPOOL_CONFIG_ID "id" #define ZPOOL_CONFIG_GUID "guid" #define ZPOOL_CONFIG_PATH "path" #define ZPOOL_CONFIG_DEVID "devid" #define ZPOOL_CONFIG_METASLAB_ARRAY "metaslab_array" #define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift" #define ZPOOL_CONFIG_ASHIFT "ashift" #define ZPOOL_CONFIG_ASIZE "asize" #define ZPOOL_CONFIG_DTL "DTL" #define ZPOOL_CONFIG_SCAN_STATS "scan_stats" /* not stored on disk */ #define ZPOOL_CONFIG_VDEV_STATS "vdev_stats" /* not stored on disk */ #define ZPOOL_CONFIG_WHOLE_DISK "whole_disk" #define ZPOOL_CONFIG_ERRCOUNT "error_count" #define ZPOOL_CONFIG_NOT_PRESENT "not_present" #define ZPOOL_CONFIG_SPARES "spares" #define ZPOOL_CONFIG_IS_SPARE "is_spare" #define ZPOOL_CONFIG_NPARITY "nparity" #define ZPOOL_CONFIG_HOSTID "hostid" #define ZPOOL_CONFIG_HOSTNAME "hostname" #define ZPOOL_CONFIG_LOADED_TIME "initial_load_time" #define ZPOOL_CONFIG_UNSPARE "unspare" #define ZPOOL_CONFIG_PHYS_PATH "phys_path" #define ZPOOL_CONFIG_IS_LOG "is_log" #define ZPOOL_CONFIG_L2CACHE "l2cache" #define ZPOOL_CONFIG_HOLE_ARRAY "hole_array" #define ZPOOL_CONFIG_VDEV_CHILDREN "vdev_children" #define ZPOOL_CONFIG_IS_HOLE "is_hole" #define ZPOOL_CONFIG_DDT_HISTOGRAM "ddt_histogram" #define ZPOOL_CONFIG_DDT_OBJ_STATS "ddt_object_stats" #define ZPOOL_CONFIG_DDT_STATS "ddt_stats" #define ZPOOL_CONFIG_SPLIT "splitcfg" #define ZPOOL_CONFIG_ORIG_GUID "orig_guid" #define ZPOOL_CONFIG_SPLIT_GUID "split_guid" #define ZPOOL_CONFIG_SPLIT_LIST "guid_list" #define ZPOOL_CONFIG_REMOVING "removing" #define ZPOOL_CONFIG_RESILVERING "resilvering" #define ZPOOL_CONFIG_COMMENT "comment" #define ZPOOL_CONFIG_SUSPENDED "suspended" /* not stored on disk */ #define ZPOOL_CONFIG_TIMESTAMP "timestamp" /* not stored on disk */ #define ZPOOL_CONFIG_BOOTFS "bootfs" /* not stored on disk */ #define ZPOOL_CONFIG_MISSING_DEVICES "missing_vdevs" /* not stored on disk */ #define ZPOOL_CONFIG_LOAD_INFO "load_info" /* not stored on disk */ /* * The persistent vdev state is stored as separate values rather than a single * 'vdev_state' entry. This is because a device can be in multiple states, such * as offline and degraded. */ #define ZPOOL_CONFIG_OFFLINE "offline" #define ZPOOL_CONFIG_FAULTED "faulted" #define ZPOOL_CONFIG_DEGRADED "degraded" #define ZPOOL_CONFIG_REMOVED "removed" #define ZPOOL_CONFIG_FRU "fru" #define ZPOOL_CONFIG_AUX_STATE "aux_state" /* Rewind policy parameters */ #define ZPOOL_REWIND_POLICY "rewind-policy" #define ZPOOL_REWIND_REQUEST "rewind-request" #define ZPOOL_REWIND_REQUEST_TXG "rewind-request-txg" #define ZPOOL_REWIND_META_THRESH "rewind-meta-thresh" #define ZPOOL_REWIND_DATA_THRESH "rewind-data-thresh" /* Rewind data discovered */ #define ZPOOL_CONFIG_LOAD_TIME "rewind_txg_ts" #define ZPOOL_CONFIG_LOAD_DATA_ERRORS "verify_data_errors" #define ZPOOL_CONFIG_REWIND_TIME "seconds_of_rewind" #define VDEV_TYPE_ROOT "root" #define VDEV_TYPE_MIRROR "mirror" #define VDEV_TYPE_REPLACING "replacing" #define VDEV_TYPE_RAIDZ "raidz" #define VDEV_TYPE_DISK "disk" #define VDEV_TYPE_FILE "file" #define VDEV_TYPE_MISSING "missing" #define VDEV_TYPE_HOLE "hole" #define VDEV_TYPE_SPARE "spare" #define VDEV_TYPE_LOG "log" #define VDEV_TYPE_L2CACHE "l2cache" /* * This is needed in userland to report the minimum necessary device size. */ #define SPA_MINDEVSIZE (64ULL << 20) /* * The location of the pool configuration repository, shared between kernel and * userland. */ #define ZPOOL_CACHE "/boot/zfs/zpool.cache" /* * vdev states are ordered from least to most healthy. * A vdev that's CANT_OPEN or below is considered unusable. */ typedef enum vdev_state { VDEV_STATE_UNKNOWN = 0, /* Uninitialized vdev */ VDEV_STATE_CLOSED, /* Not currently open */ VDEV_STATE_OFFLINE, /* Not allowed to open */ VDEV_STATE_REMOVED, /* Explicitly removed from system */ VDEV_STATE_CANT_OPEN, /* Tried to open, but failed */ VDEV_STATE_FAULTED, /* External request to fault device */ VDEV_STATE_DEGRADED, /* Replicated vdev with unhealthy kids */ VDEV_STATE_HEALTHY /* Presumed good */ } vdev_state_t; #define VDEV_STATE_ONLINE VDEV_STATE_HEALTHY /* * vdev aux states. When a vdev is in the CANT_OPEN state, the aux field * of the vdev stats structure uses these constants to distinguish why. */ typedef enum vdev_aux { VDEV_AUX_NONE, /* no error */ VDEV_AUX_OPEN_FAILED, /* ldi_open_*() or vn_open() failed */ VDEV_AUX_CORRUPT_DATA, /* bad label or disk contents */ VDEV_AUX_NO_REPLICAS, /* insufficient number of replicas */ VDEV_AUX_BAD_GUID_SUM, /* vdev guid sum doesn't match */ VDEV_AUX_TOO_SMALL, /* vdev size is too small */ VDEV_AUX_BAD_LABEL, /* the label is OK but invalid */ VDEV_AUX_VERSION_NEWER, /* on-disk version is too new */ VDEV_AUX_VERSION_OLDER, /* on-disk version is too old */ VDEV_AUX_SPARED, /* hot spare used in another pool */ VDEV_AUX_ERR_EXCEEDED, /* too many errors */ VDEV_AUX_IO_FAILURE, /* experienced I/O failure */ VDEV_AUX_BAD_LOG, /* cannot read log chain(s) */ VDEV_AUX_EXTERNAL, /* external diagnosis */ VDEV_AUX_SPLIT_POOL /* vdev was split off into another pool */ } vdev_aux_t; /* * pool state. The following states are written to disk as part of the normal * SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE, L2CACHE. The remaining * states are software abstractions used at various levels to communicate * pool state. */ typedef enum pool_state { POOL_STATE_ACTIVE = 0, /* In active use */ POOL_STATE_EXPORTED, /* Explicitly exported */ POOL_STATE_DESTROYED, /* Explicitly destroyed */ POOL_STATE_SPARE, /* Reserved for hot spare use */ POOL_STATE_L2CACHE, /* Level 2 ARC device */ POOL_STATE_UNINITIALIZED, /* Internal spa_t state */ POOL_STATE_UNAVAIL, /* Internal libzfs state */ POOL_STATE_POTENTIALLY_ACTIVE /* Internal libzfs state */ } pool_state_t; /* * Scan Functions. */ typedef enum pool_scan_func { POOL_SCAN_NONE, POOL_SCAN_SCRUB, POOL_SCAN_RESILVER, POOL_SCAN_FUNCS } pool_scan_func_t; /* * ZIO types. Needed to interpret vdev statistics below. */ typedef enum zio_type { ZIO_TYPE_NULL = 0, ZIO_TYPE_READ, ZIO_TYPE_WRITE, ZIO_TYPE_FREE, ZIO_TYPE_CLAIM, ZIO_TYPE_IOCTL, ZIO_TYPES } zio_type_t; /* * Pool statistics. Note: all fields should be 64-bit because this * is passed between kernel and userland as an nvlist uint64 array. */ typedef struct pool_scan_stat { /* values stored on disk */ uint64_t pss_func; /* pool_scan_func_t */ uint64_t pss_state; /* dsl_scan_state_t */ uint64_t pss_start_time; /* scan start time */ uint64_t pss_end_time; /* scan end time */ uint64_t pss_to_examine; /* total bytes to scan */ uint64_t pss_examined; /* total examined bytes */ uint64_t pss_to_process; /* total bytes to process */ uint64_t pss_processed; /* total processed bytes */ uint64_t pss_errors; /* scan errors */ /* values not stored on disk */ uint64_t pss_pass_exam; /* examined bytes per scan pass */ uint64_t pss_pass_start; /* start time of a scan pass */ } pool_scan_stat_t; typedef enum dsl_scan_state { DSS_NONE, DSS_SCANNING, DSS_FINISHED, DSS_CANCELED, DSS_NUM_STATES } dsl_scan_state_t; /* * Vdev statistics. Note: all fields should be 64-bit because this * is passed between kernel and userland as an nvlist uint64 array. */ typedef struct vdev_stat { hrtime_t vs_timestamp; /* time since vdev load */ uint64_t vs_state; /* vdev state */ uint64_t vs_aux; /* see vdev_aux_t */ uint64_t vs_alloc; /* space allocated */ uint64_t vs_space; /* total capacity */ uint64_t vs_dspace; /* deflated capacity */ uint64_t vs_rsize; /* replaceable dev size */ uint64_t vs_ops[ZIO_TYPES]; /* operation count */ uint64_t vs_bytes[ZIO_TYPES]; /* bytes read/written */ uint64_t vs_read_errors; /* read errors */ uint64_t vs_write_errors; /* write errors */ uint64_t vs_checksum_errors; /* checksum errors */ uint64_t vs_self_healed; /* self-healed bytes */ uint64_t vs_scan_removing; /* removing? */ uint64_t vs_scan_processed; /* scan processed bytes */ } vdev_stat_t; /* * DDT statistics. Note: all fields should be 64-bit because this * is passed between kernel and userland as an nvlist uint64 array. */ typedef struct ddt_object { uint64_t ddo_count; /* number of elments in ddt */ uint64_t ddo_dspace; /* size of ddt on disk */ uint64_t ddo_mspace; /* size of ddt in-core */ } ddt_object_t; typedef struct ddt_stat { uint64_t dds_blocks; /* blocks */ uint64_t dds_lsize; /* logical size */ uint64_t dds_psize; /* physical size */ uint64_t dds_dsize; /* deflated allocated size */ uint64_t dds_ref_blocks; /* referenced blocks */ uint64_t dds_ref_lsize; /* referenced lsize * refcnt */ uint64_t dds_ref_psize; /* referenced psize * refcnt */ uint64_t dds_ref_dsize; /* referenced dsize * refcnt */ } ddt_stat_t; typedef struct ddt_histogram { ddt_stat_t ddh_stat[64]; /* power-of-two histogram buckets */ } ddt_histogram_t; #define ZVOL_DRIVER "zvol" #define ZFS_DRIVER "zfs" #define ZFS_DEV_NAME "zfs" #define ZFS_DEV "/dev/" ZFS_DEV_NAME /* general zvol path */ #define ZVOL_DIR "/dev/zvol" /* expansion */ #define ZVOL_PSEUDO_DEV "/devices/pseudo/zfs@0:" /* for dump and swap */ #define ZVOL_FULL_DEV_DIR ZVOL_DIR "/dsk/" #define ZVOL_FULL_RDEV_DIR ZVOL_DIR "/rdsk/" #define ZVOL_PROP_NAME "name" #define ZVOL_DEFAULT_BLOCKSIZE 8192 /* * /dev/zfs ioctl numbers. */ typedef unsigned long zfs_ioc_t; #define ZFS_IOC(ioreq) ((ioreq) & 0xff) #define ZFS_IOC_POOL_CREATE _IOWR('Z', 0, struct zfs_cmd) #define ZFS_IOC_POOL_DESTROY _IOWR('Z', 1, struct zfs_cmd) #define ZFS_IOC_POOL_IMPORT _IOWR('Z', 2, struct zfs_cmd) #define ZFS_IOC_POOL_EXPORT _IOWR('Z', 3, struct zfs_cmd) #define ZFS_IOC_POOL_CONFIGS _IOWR('Z', 4, struct zfs_cmd) #define ZFS_IOC_POOL_STATS _IOWR('Z', 5, struct zfs_cmd) #define ZFS_IOC_POOL_TRYIMPORT _IOWR('Z', 6, struct zfs_cmd) #define ZFS_IOC_POOL_SCAN _IOWR('Z', 7, struct zfs_cmd) #define ZFS_IOC_POOL_FREEZE _IOWR('Z', 8, struct zfs_cmd) #define ZFS_IOC_POOL_UPGRADE _IOWR('Z', 9, struct zfs_cmd) #define ZFS_IOC_POOL_GET_HISTORY _IOWR('Z', 10, struct zfs_cmd) #define ZFS_IOC_VDEV_ADD _IOWR('Z', 11, struct zfs_cmd) #define ZFS_IOC_VDEV_REMOVE _IOWR('Z', 12, struct zfs_cmd) #define ZFS_IOC_VDEV_SET_STATE _IOWR('Z', 13, struct zfs_cmd) #define ZFS_IOC_VDEV_ATTACH _IOWR('Z', 14, struct zfs_cmd) #define ZFS_IOC_VDEV_DETACH _IOWR('Z', 15, struct zfs_cmd) #define ZFS_IOC_VDEV_SETPATH _IOWR('Z', 16, struct zfs_cmd) #define ZFS_IOC_VDEV_SETFRU _IOWR('Z', 17, struct zfs_cmd) #define ZFS_IOC_OBJSET_STATS _IOWR('Z', 18, struct zfs_cmd) #define ZFS_IOC_OBJSET_ZPLPROPS _IOWR('Z', 19, struct zfs_cmd) #define ZFS_IOC_DATASET_LIST_NEXT _IOWR('Z', 20, struct zfs_cmd) #define ZFS_IOC_SNAPSHOT_LIST_NEXT _IOWR('Z', 21, struct zfs_cmd) #define ZFS_IOC_SET_PROP _IOWR('Z', 22, struct zfs_cmd) #define ZFS_IOC_CREATE _IOWR('Z', 23, struct zfs_cmd) #define ZFS_IOC_DESTROY _IOWR('Z', 24, struct zfs_cmd) #define ZFS_IOC_ROLLBACK _IOWR('Z', 25, struct zfs_cmd) #define ZFS_IOC_RENAME _IOWR('Z', 26, struct zfs_cmd) #define ZFS_IOC_RECV _IOWR('Z', 27, struct zfs_cmd) #define ZFS_IOC_SEND _IOWR('Z', 28, struct zfs_cmd) #define ZFS_IOC_INJECT_FAULT _IOWR('Z', 29, struct zfs_cmd) #define ZFS_IOC_CLEAR_FAULT _IOWR('Z', 30, struct zfs_cmd) #define ZFS_IOC_INJECT_LIST_NEXT _IOWR('Z', 31, struct zfs_cmd) #define ZFS_IOC_ERROR_LOG _IOWR('Z', 32, struct zfs_cmd) #define ZFS_IOC_CLEAR _IOWR('Z', 33, struct zfs_cmd) #define ZFS_IOC_PROMOTE _IOWR('Z', 34, struct zfs_cmd) #define ZFS_IOC_DESTROY_SNAPS_NVL _IOWR('Z', 35, struct zfs_cmd) #define ZFS_IOC_SNAPSHOT _IOWR('Z', 36, struct zfs_cmd) #define ZFS_IOC_DSOBJ_TO_DSNAME _IOWR('Z', 37, struct zfs_cmd) #define ZFS_IOC_OBJ_TO_PATH _IOWR('Z', 38, struct zfs_cmd) #define ZFS_IOC_POOL_SET_PROPS _IOWR('Z', 39, struct zfs_cmd) #define ZFS_IOC_POOL_GET_PROPS _IOWR('Z', 40, struct zfs_cmd) #define ZFS_IOC_SET_FSACL _IOWR('Z', 41, struct zfs_cmd) #define ZFS_IOC_GET_FSACL _IOWR('Z', 42, struct zfs_cmd) #define ZFS_IOC_SHARE _IOWR('Z', 43, struct zfs_cmd) #define ZFS_IOC_INHERIT_PROP _IOWR('Z', 44, struct zfs_cmd) #define ZFS_IOC_SMB_ACL _IOWR('Z', 45, struct zfs_cmd) #define ZFS_IOC_USERSPACE_ONE _IOWR('Z', 46, struct zfs_cmd) #define ZFS_IOC_USERSPACE_MANY _IOWR('Z', 47, struct zfs_cmd) #define ZFS_IOC_USERSPACE_UPGRADE _IOWR('Z', 48, struct zfs_cmd) #define ZFS_IOC_HOLD _IOWR('Z', 49, struct zfs_cmd) #define ZFS_IOC_RELEASE _IOWR('Z', 50, struct zfs_cmd) #define ZFS_IOC_GET_HOLDS _IOWR('Z', 51, struct zfs_cmd) #define ZFS_IOC_OBJSET_RECVD_PROPS _IOWR('Z', 52, struct zfs_cmd) #define ZFS_IOC_VDEV_SPLIT _IOWR('Z', 53, struct zfs_cmd) #define ZFS_IOC_NEXT_OBJ _IOWR('Z', 54, struct zfs_cmd) #define ZFS_IOC_DIFF _IOWR('Z', 55, struct zfs_cmd) #define ZFS_IOC_TMP_SNAPSHOT _IOWR('Z', 56, struct zfs_cmd) #define ZFS_IOC_OBJ_TO_STATS _IOWR('Z', 57, struct zfs_cmd) #define ZFS_IOC_JAIL _IOWR('Z', 58, struct zfs_cmd) #define ZFS_IOC_UNJAIL _IOWR('Z', 59, struct zfs_cmd) #define ZFS_IOC_POOL_REGUID _IOWR('Z', 60, struct zfs_cmd) #define ZFS_IOC_SPACE_WRITTEN _IOWR('Z', 61, struct zfs_cmd) #define ZFS_IOC_SPACE_SNAPS _IOWR('Z', 62, struct zfs_cmd) +#define ZFS_IOC_SEND_PROGRESS _IOWR('Z', 63, struct zfs_cmd) /* * Internal SPA load state. Used by FMA diagnosis engine. */ typedef enum { SPA_LOAD_NONE, /* no load in progress */ SPA_LOAD_OPEN, /* normal open */ SPA_LOAD_IMPORT, /* import in progress */ SPA_LOAD_TRYIMPORT, /* tryimport in progress */ SPA_LOAD_RECOVER, /* recovery requested */ SPA_LOAD_ERROR /* load failed */ } spa_load_state_t; /* * Bookmark name values. */ #define ZPOOL_ERR_LIST "error list" #define ZPOOL_ERR_DATASET "dataset" #define ZPOOL_ERR_OBJECT "object" #define HIS_MAX_RECORD_LEN (MAXPATHLEN + MAXPATHLEN + 1) /* * The following are names used in the nvlist describing * the pool's history log. */ #define ZPOOL_HIST_RECORD "history record" #define ZPOOL_HIST_TIME "history time" #define ZPOOL_HIST_CMD "history command" #define ZPOOL_HIST_WHO "history who" #define ZPOOL_HIST_ZONE "history zone" #define ZPOOL_HIST_HOST "history hostname" #define ZPOOL_HIST_TXG "history txg" #define ZPOOL_HIST_INT_EVENT "history internal event" #define ZPOOL_HIST_INT_STR "history internal str" /* * Flags for ZFS_IOC_VDEV_SET_STATE */ #define ZFS_ONLINE_CHECKREMOVE 0x1 #define ZFS_ONLINE_UNSPARE 0x2 #define ZFS_ONLINE_FORCEFAULT 0x4 #define ZFS_ONLINE_EXPAND 0x8 #define ZFS_OFFLINE_TEMPORARY 0x1 /* * Flags for ZFS_IOC_POOL_IMPORT */ #define ZFS_IMPORT_NORMAL 0x0 #define ZFS_IMPORT_VERBATIM 0x1 #define ZFS_IMPORT_ANY_HOST 0x2 #define ZFS_IMPORT_MISSING_LOG 0x4 #define ZFS_IMPORT_ONLY 0x8 /* * Sysevent payload members. ZFS will generate the following sysevents with the * given payloads: * * ESC_ZFS_RESILVER_START * ESC_ZFS_RESILVER_END * ESC_ZFS_POOL_DESTROY * ESC_ZFS_POOL_REGUID * * ZFS_EV_POOL_NAME DATA_TYPE_STRING * ZFS_EV_POOL_GUID DATA_TYPE_UINT64 * * ESC_ZFS_VDEV_REMOVE * ESC_ZFS_VDEV_CLEAR * ESC_ZFS_VDEV_CHECK * * ZFS_EV_POOL_NAME DATA_TYPE_STRING * ZFS_EV_POOL_GUID DATA_TYPE_UINT64 * ZFS_EV_VDEV_PATH DATA_TYPE_STRING (optional) * ZFS_EV_VDEV_GUID DATA_TYPE_UINT64 */ #define ZFS_EV_POOL_NAME "pool_name" #define ZFS_EV_POOL_GUID "pool_guid" #define ZFS_EV_VDEV_PATH "vdev_path" #define ZFS_EV_VDEV_GUID "vdev_guid" /* * Note: This is encoded on-disk, so new events must be added to the * end, and unused events can not be removed. Be sure to edit * libzfs_pool.c: hist_event_table[]. */ typedef enum history_internal_events { LOG_NO_EVENT = 0, LOG_POOL_CREATE, LOG_POOL_VDEV_ADD, LOG_POOL_REMOVE, LOG_POOL_DESTROY, LOG_POOL_EXPORT, LOG_POOL_IMPORT, LOG_POOL_VDEV_ATTACH, LOG_POOL_VDEV_REPLACE, LOG_POOL_VDEV_DETACH, LOG_POOL_VDEV_ONLINE, LOG_POOL_VDEV_OFFLINE, LOG_POOL_UPGRADE, LOG_POOL_CLEAR, LOG_POOL_SCAN, LOG_POOL_PROPSET, LOG_DS_CREATE, LOG_DS_CLONE, LOG_DS_DESTROY, LOG_DS_DESTROY_BEGIN, LOG_DS_INHERIT, LOG_DS_PROPSET, LOG_DS_QUOTA, LOG_DS_PERM_UPDATE, LOG_DS_PERM_REMOVE, LOG_DS_PERM_WHO_REMOVE, LOG_DS_PROMOTE, LOG_DS_RECEIVE, LOG_DS_RENAME, LOG_DS_RESERVATION, LOG_DS_REPLAY_INC_SYNC, LOG_DS_REPLAY_FULL_SYNC, LOG_DS_ROLLBACK, LOG_DS_SNAPSHOT, LOG_DS_UPGRADE, LOG_DS_REFQUOTA, LOG_DS_REFRESERV, LOG_POOL_SCAN_DONE, LOG_DS_USER_HOLD, LOG_DS_USER_RELEASE, LOG_POOL_SPLIT, LOG_END } history_internal_events_t; #ifdef __cplusplus } #endif #endif /* _SYS_FS_ZFS_H */ Index: projects/nand/sys/cddl/contrib/opensolaris =================================================================== --- projects/nand/sys/cddl/contrib/opensolaris (revision 235262) +++ projects/nand/sys/cddl/contrib/opensolaris (revision 235263) Property changes on: projects/nand/sys/cddl/contrib/opensolaris ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/cddl/contrib/opensolaris:r235157-235262 Index: projects/nand/sys/conf/files =================================================================== --- projects/nand/sys/conf/files (revision 235262) +++ projects/nand/sys/conf/files (revision 235263) @@ -1,3718 +1,3719 @@ # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # acpi_quirks.h optional acpi \ dependency "$S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ compile-with "${AWK} -f $S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ no-obj no-implicit-rule before-depend \ clean "acpi_quirks.h" aicasm optional ahc | ahd \ dependency "$S/dev/aic7xxx/aicasm/*.[chyl]" \ compile-with "CC='${CC}' ${MAKE} -f $S/dev/aic7xxx/aicasm/Makefile MAKESRCPATH=$S/dev/aic7xxx/aicasm" \ no-obj no-implicit-rule \ clean "aicasm* y.tab.h" aic7xxx_seq.h optional ahc \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic7xxx_seq.h -r aic7xxx_reg.h -p aic7xxx_reg_print.c -i $S/dev/aic7xxx/aic7xxx_osm.h $S/dev/aic7xxx/aic7xxx.seq" \ no-obj no-implicit-rule before-depend local \ clean "aic7xxx_seq.h" \ dependency "$S/dev/aic7xxx/aic7xxx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic7xxx_reg.h optional ahc \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic7xxx_seq.h -r aic7xxx_reg.h -p aic7xxx_reg_print.c -i $S/dev/aic7xxx/aic7xxx_osm.h $S/dev/aic7xxx/aic7xxx.seq" \ no-obj no-implicit-rule before-depend local \ clean "aic7xxx_reg.h" \ dependency "$S/dev/aic7xxx/aic7xxx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic7xxx_reg_print.c optional ahc \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic7xxx_seq.h -r aic7xxx_reg.h -p aic7xxx_reg_print.c -i $S/dev/aic7xxx/aic7xxx_osm.h $S/dev/aic7xxx/aic7xxx.seq" \ no-obj no-implicit-rule local \ clean "aic7xxx_reg_print.c" \ dependency "$S/dev/aic7xxx/aic7xxx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic7xxx_reg_print.o optional ahc ahc_reg_pretty_print \ compile-with "${NORMAL_C}" \ no-implicit-rule local aic79xx_seq.h optional ahd pci \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic79xx_seq.h -r aic79xx_reg.h -p aic79xx_reg_print.c -i $S/dev/aic7xxx/aic79xx_osm.h $S/dev/aic7xxx/aic79xx.seq" \ no-obj no-implicit-rule before-depend local \ clean "aic79xx_seq.h" \ dependency "$S/dev/aic7xxx/aic79xx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic79xx_reg.h optional ahd pci \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic79xx_seq.h -r aic79xx_reg.h -p aic79xx_reg_print.c -i $S/dev/aic7xxx/aic79xx_osm.h $S/dev/aic7xxx/aic79xx.seq" \ no-obj no-implicit-rule before-depend local \ clean "aic79xx_reg.h" \ dependency "$S/dev/aic7xxx/aic79xx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic79xx_reg_print.c optional ahd pci \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic79xx_seq.h -r aic79xx_reg.h -p aic79xx_reg_print.c -i $S/dev/aic7xxx/aic79xx_osm.h $S/dev/aic7xxx/aic79xx.seq" \ no-obj no-implicit-rule local \ clean "aic79xx_reg_print.c" \ dependency "$S/dev/aic7xxx/aic79xx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic79xx_reg_print.o optional ahd pci ahd_reg_pretty_print \ compile-with "${NORMAL_C}" \ no-implicit-rule local # # The 'fdt_dtb_file' target covers an actual DTB file name, which is derived # from the specified source (DTS) file: .dts -> .dtb # fdt_dtb_file optional fdt \ compile-with "if [ -f $S/boot/fdt/dts/${FDT_DTS_FILE} ]; then dtc -O dtb -o `echo ${FDT_DTS_FILE} | cut -d. -f1`.dtb -b 0 -p 1024 $S/boot/fdt/dts/${FDT_DTS_FILE}; fi" \ no-obj no-implicit-rule before-depend \ clean "`echo ${FDT_DTS_FILE} | cut -d. -f1`.dtb" fdt_static_dtb.h optional fdt fdt_dtb_static \ compile-with "sh $S/tools/fdt/make_dtbh.sh ${FDT_DTS_FILE} ." \ no-obj no-implicit-rule before-depend \ clean "fdt_static_dtb.h" feeder_eq_gen.h optional sound \ dependency "$S/tools/sound/feeder_eq_mkfilter.awk" \ compile-with "${AWK} -f $S/tools/sound/feeder_eq_mkfilter.awk -- ${FEEDER_EQ_PRESETS} > feeder_eq_gen.h" \ no-obj no-implicit-rule before-depend \ clean "feeder_eq_gen.h" feeder_rate_gen.h optional sound \ dependency "$S/tools/sound/feeder_rate_mkfilter.awk" \ compile-with "${AWK} -f $S/tools/sound/feeder_rate_mkfilter.awk -- ${FEEDER_RATE_PRESETS} > feeder_rate_gen.h" \ no-obj no-implicit-rule before-depend \ clean "feeder_rate_gen.h" snd_fxdiv_gen.h optional sound \ dependency "$S/tools/sound/snd_fxdiv_gen.awk" \ compile-with "${AWK} -f $S/tools/sound/snd_fxdiv_gen.awk -- > snd_fxdiv_gen.h" \ no-obj no-implicit-rule before-depend \ clean "snd_fxdiv_gen.h" miidevs.h optional miibus | mii \ dependency "$S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ compile-with "${AWK} -f $S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ no-obj no-implicit-rule before-depend \ clean "miidevs.h" pccarddevs.h standard \ dependency "$S/tools/pccarddevs2h.awk $S/dev/pccard/pccarddevs" \ compile-with "${AWK} -f $S/tools/pccarddevs2h.awk $S/dev/pccard/pccarddevs" \ no-obj no-implicit-rule before-depend \ clean "pccarddevs.h" teken_state.h optional sc \ dependency "$S/teken/gensequences $S/teken/sequences" \ compile-with "${AWK} -f $S/teken/gensequences $S/teken/sequences > teken_state.h" \ no-obj no-implicit-rule before-depend \ clean "teken_state.h" usbdevs.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -h" \ no-obj no-implicit-rule before-depend \ clean "usbdevs.h" usbdevs_data.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -d" \ no-obj no-implicit-rule before-depend \ clean "usbdevs_data.h" cam/cam.c optional scbus cam/cam_periph.c optional scbus cam/cam_queue.c optional scbus cam/cam_sim.c optional scbus cam/cam_xpt.c optional scbus cam/ata/ata_all.c optional scbus cam/ata/ata_xpt.c optional scbus cam/ata/ata_pmp.c optional scbus cam/scsi/scsi_xpt.c optional scbus cam/scsi/scsi_all.c optional scbus cam/scsi/scsi_cd.c optional cd cam/scsi/scsi_ch.c optional ch cam/ata/ata_da.c optional ada | da cam/ctl/ctl.c optional ctl cam/ctl/ctl_backend.c optional ctl cam/ctl/ctl_backend_block.c optional ctl cam/ctl/ctl_backend_ramdisk.c optional ctl cam/ctl/ctl_cmd_table.c optional ctl cam/ctl/ctl_frontend.c optional ctl cam/ctl/ctl_frontend_cam_sim.c optional ctl cam/ctl/ctl_frontend_internal.c optional ctl cam/ctl/ctl_mem_pool.c optional ctl cam/ctl/ctl_scsi_all.c optional ctl cam/ctl/ctl_error.c optional ctl cam/ctl/ctl_util.c optional ctl cam/ctl/scsi_ctl.c optional ctl cam/scsi/scsi_da.c optional da cam/scsi/scsi_low.c optional ct | ncv | nsp | stg cam/scsi/scsi_low_pisa.c optional ct | ncv | nsp | stg cam/scsi/scsi_pass.c optional pass cam/scsi/scsi_pt.c optional pt cam/scsi/scsi_sa.c optional sa cam/scsi/scsi_ses.c optional ses cam/scsi/scsi_sg.c optional sg cam/scsi/scsi_targ_bh.c optional targbh cam/scsi/scsi_target.c optional targ cam/scsi/smp_all.c optional scbus # shared between zfs and dtrace cddl/compat/opensolaris/kern/opensolaris.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_cmn_err.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_kmem.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_misc.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_sunddi.c optional zfs compile-with "${ZFS_C}" # zfs specific cddl/compat/opensolaris/kern/opensolaris_acl.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_kobj.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_kstat.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_lookup.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_policy.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_string.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_sysevent.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_taskq.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_uio.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_vfs.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_zone.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/acl/acl_common.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/avl/avl.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/nvpair/nvpair.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/nvpair/nvpair_alloc_fixed.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/unicode/u8_textprep.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_comutil.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_deleg.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_fletcher.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_ioctl_compat.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_namecheck.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_prop.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zpool_prop.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zprop_common.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/gfs.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/vnode.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bplist.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/ddt.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/ddt_zap.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_diff.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_zfetch.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c optional zfs compile-with "${ZFS_C}" \ warning "kernel contains CDDL licensed ZFS filesystem" cddl/contrib/opensolaris/uts/common/fs/zfs/dnode_sync.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deadlist.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deleg.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_pool.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_prop.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_synctask.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/gzip.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lzjb.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/refcount.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/rrwlock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/sha256.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_config.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_errlog.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_history.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/space_map.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/txg.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/uberblock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/unique.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_cache.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_file.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_label.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_mirror.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_missing.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_queue.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_raidz.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_root.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zap.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zap_leaf.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zap_micro.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_acl.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_byteswap.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_debug.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_dir.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fm.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fuid.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_log.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_onexit.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_replay.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_rlock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_sa.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_znode.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zil.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio_checksum.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio_compress.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio_inject.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zle.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zrlock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zvol.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/callb.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/fm.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/list.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/nvpair_alloc_system.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/adler32.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/deflate.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/inffast.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/inflate.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/inftrees.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/opensolaris_crc32.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/trees.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/zmod.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/zmod_subr.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/zutil.c optional zfs compile-with "${ZFS_C}" contrib/altq/altq/altq_cbq.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_cdnr.c optional altq contrib/altq/altq/altq_hfsc.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_priq.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_red.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_rio.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_rmclass.c optional altq contrib/altq/altq/altq_subr.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/dev/acpica/components/debugger/dbcmds.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbdisply.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbexec.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbfileio.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbhistry.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbinput.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbmethod.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbnames.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbstats.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbutils.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbxface.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmbuffer.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmnames.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmopcode.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmobject.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrc.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcl.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcl2.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcs.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmutils.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmwalk.c optional acpi acpi_debug contrib/dev/acpica/components/dispatcher/dsargs.c optional acpi contrib/dev/acpica/components/dispatcher/dscontrol.c optional acpi contrib/dev/acpica/components/dispatcher/dsfield.c optional acpi contrib/dev/acpica/components/dispatcher/dsinit.c optional acpi contrib/dev/acpica/components/dispatcher/dsmethod.c optional acpi contrib/dev/acpica/components/dispatcher/dsmthdat.c optional acpi contrib/dev/acpica/components/dispatcher/dsobject.c optional acpi contrib/dev/acpica/components/dispatcher/dsopcode.c optional acpi contrib/dev/acpica/components/dispatcher/dsutils.c optional acpi contrib/dev/acpica/components/dispatcher/dswexec.c optional acpi contrib/dev/acpica/components/dispatcher/dswload.c optional acpi contrib/dev/acpica/components/dispatcher/dswload2.c optional acpi contrib/dev/acpica/components/dispatcher/dswscope.c optional acpi contrib/dev/acpica/components/dispatcher/dswstate.c optional acpi contrib/dev/acpica/components/events/evevent.c optional acpi contrib/dev/acpica/components/events/evglock.c optional acpi contrib/dev/acpica/components/events/evgpe.c optional acpi contrib/dev/acpica/components/events/evgpeblk.c optional acpi contrib/dev/acpica/components/events/evgpeinit.c optional acpi contrib/dev/acpica/components/events/evgpeutil.c optional acpi contrib/dev/acpica/components/events/evmisc.c optional acpi contrib/dev/acpica/components/events/evregion.c optional acpi contrib/dev/acpica/components/events/evrgnini.c optional acpi contrib/dev/acpica/components/events/evsci.c optional acpi contrib/dev/acpica/components/events/evxface.c optional acpi contrib/dev/acpica/components/events/evxfevnt.c optional acpi contrib/dev/acpica/components/events/evxfgpe.c optional acpi contrib/dev/acpica/components/events/evxfregn.c optional acpi contrib/dev/acpica/components/executer/exconfig.c optional acpi contrib/dev/acpica/components/executer/exconvrt.c optional acpi contrib/dev/acpica/components/executer/excreate.c optional acpi contrib/dev/acpica/components/executer/exdebug.c optional acpi contrib/dev/acpica/components/executer/exdump.c optional acpi contrib/dev/acpica/components/executer/exfield.c optional acpi contrib/dev/acpica/components/executer/exfldio.c optional acpi contrib/dev/acpica/components/executer/exmisc.c optional acpi contrib/dev/acpica/components/executer/exmutex.c optional acpi contrib/dev/acpica/components/executer/exnames.c optional acpi contrib/dev/acpica/components/executer/exoparg1.c optional acpi contrib/dev/acpica/components/executer/exoparg2.c optional acpi contrib/dev/acpica/components/executer/exoparg3.c optional acpi contrib/dev/acpica/components/executer/exoparg6.c optional acpi contrib/dev/acpica/components/executer/exprep.c optional acpi contrib/dev/acpica/components/executer/exregion.c optional acpi contrib/dev/acpica/components/executer/exresnte.c optional acpi contrib/dev/acpica/components/executer/exresolv.c optional acpi contrib/dev/acpica/components/executer/exresop.c optional acpi contrib/dev/acpica/components/executer/exstore.c optional acpi contrib/dev/acpica/components/executer/exstoren.c optional acpi contrib/dev/acpica/components/executer/exstorob.c optional acpi contrib/dev/acpica/components/executer/exsystem.c optional acpi contrib/dev/acpica/components/executer/exutils.c optional acpi contrib/dev/acpica/components/hardware/hwacpi.c optional acpi contrib/dev/acpica/components/hardware/hwesleep.c optional acpi contrib/dev/acpica/components/hardware/hwgpe.c optional acpi contrib/dev/acpica/components/hardware/hwpci.c optional acpi contrib/dev/acpica/components/hardware/hwregs.c optional acpi contrib/dev/acpica/components/hardware/hwsleep.c optional acpi contrib/dev/acpica/components/hardware/hwtimer.c optional acpi contrib/dev/acpica/components/hardware/hwvalid.c optional acpi contrib/dev/acpica/components/hardware/hwxface.c optional acpi contrib/dev/acpica/components/hardware/hwxfsleep.c optional acpi contrib/dev/acpica/components/namespace/nsaccess.c optional acpi contrib/dev/acpica/components/namespace/nsalloc.c optional acpi contrib/dev/acpica/components/namespace/nsdump.c optional acpi contrib/dev/acpica/components/namespace/nseval.c optional acpi contrib/dev/acpica/components/namespace/nsinit.c optional acpi contrib/dev/acpica/components/namespace/nsload.c optional acpi contrib/dev/acpica/components/namespace/nsnames.c optional acpi contrib/dev/acpica/components/namespace/nsobject.c optional acpi contrib/dev/acpica/components/namespace/nsparse.c optional acpi contrib/dev/acpica/components/namespace/nspredef.c optional acpi contrib/dev/acpica/components/namespace/nsrepair.c optional acpi contrib/dev/acpica/components/namespace/nsrepair2.c optional acpi contrib/dev/acpica/components/namespace/nssearch.c optional acpi contrib/dev/acpica/components/namespace/nsutils.c optional acpi contrib/dev/acpica/components/namespace/nswalk.c optional acpi contrib/dev/acpica/components/namespace/nsxfeval.c optional acpi contrib/dev/acpica/components/namespace/nsxfname.c optional acpi contrib/dev/acpica/components/namespace/nsxfobj.c optional acpi contrib/dev/acpica/components/parser/psargs.c optional acpi contrib/dev/acpica/components/parser/psloop.c optional acpi contrib/dev/acpica/components/parser/psopcode.c optional acpi contrib/dev/acpica/components/parser/psparse.c optional acpi contrib/dev/acpica/components/parser/psscope.c optional acpi contrib/dev/acpica/components/parser/pstree.c optional acpi contrib/dev/acpica/components/parser/psutils.c optional acpi contrib/dev/acpica/components/parser/pswalk.c optional acpi contrib/dev/acpica/components/parser/psxface.c optional acpi contrib/dev/acpica/components/resources/rsaddr.c optional acpi contrib/dev/acpica/components/resources/rscalc.c optional acpi contrib/dev/acpica/components/resources/rscreate.c optional acpi contrib/dev/acpica/components/resources/rsdump.c optional acpi contrib/dev/acpica/components/resources/rsinfo.c optional acpi contrib/dev/acpica/components/resources/rsio.c optional acpi contrib/dev/acpica/components/resources/rsirq.c optional acpi contrib/dev/acpica/components/resources/rslist.c optional acpi contrib/dev/acpica/components/resources/rsmemory.c optional acpi contrib/dev/acpica/components/resources/rsmisc.c optional acpi contrib/dev/acpica/components/resources/rsserial.c optional acpi contrib/dev/acpica/components/resources/rsutils.c optional acpi contrib/dev/acpica/components/resources/rsxface.c optional acpi contrib/dev/acpica/components/tables/tbfadt.c optional acpi contrib/dev/acpica/components/tables/tbfind.c optional acpi contrib/dev/acpica/components/tables/tbinstal.c optional acpi contrib/dev/acpica/components/tables/tbutils.c optional acpi contrib/dev/acpica/components/tables/tbxface.c optional acpi contrib/dev/acpica/components/tables/tbxfroot.c optional acpi contrib/dev/acpica/components/utilities/utaddress.c optional acpi contrib/dev/acpica/components/utilities/utalloc.c optional acpi contrib/dev/acpica/components/utilities/utcache.c optional acpi contrib/dev/acpica/components/utilities/utcopy.c optional acpi contrib/dev/acpica/components/utilities/utdebug.c optional acpi contrib/dev/acpica/components/utilities/utdecode.c optional acpi contrib/dev/acpica/components/utilities/utdelete.c optional acpi contrib/dev/acpica/components/utilities/uteval.c optional acpi contrib/dev/acpica/components/utilities/utglobal.c optional acpi contrib/dev/acpica/components/utilities/utids.c optional acpi contrib/dev/acpica/components/utilities/utinit.c optional acpi contrib/dev/acpica/components/utilities/utlock.c optional acpi contrib/dev/acpica/components/utilities/utmath.c optional acpi contrib/dev/acpica/components/utilities/utmisc.c optional acpi contrib/dev/acpica/components/utilities/utmutex.c optional acpi contrib/dev/acpica/components/utilities/utobject.c optional acpi contrib/dev/acpica/components/utilities/utosi.c optional acpi contrib/dev/acpica/components/utilities/utresrc.c optional acpi contrib/dev/acpica/components/utilities/utstate.c optional acpi contrib/dev/acpica/components/utilities/utxface.c optional acpi contrib/dev/acpica/components/utilities/utxferror.c optional acpi #contrib/dev/acpica/components/utilities/utxfmutex.c optional acpi contrib/ipfilter/netinet/fil.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_auth.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_fil_freebsd.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_frag.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_log.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_nat.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_proxy.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_state.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_lookup.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-error -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_pool.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_htable.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_sync.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/mlfk_ipl.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/libfdt/fdt.c optional fdt contrib/libfdt/fdt_ro.c optional fdt contrib/libfdt/fdt_rw.c optional fdt contrib/libfdt/fdt_strerror.c optional fdt contrib/libfdt/fdt_sw.c optional fdt contrib/libfdt/fdt_wip.c optional fdt contrib/ngatm/netnatm/api/cc_conn.c optional ngatm_ccatm \ compile-with "${NORMAL_C_NOWERROR} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_data.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_dump.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_port.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_sig.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_user.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/unisap.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/misc/straddr.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/misc/unimsg_common.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/traffic.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/uni_ie.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/uni_msg.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/saal/saal_sscfu.c optional ngatm_sscfu \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/saal/saal_sscop.c optional ngatm_sscop \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_call.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_coord.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_party.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_print.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_reset.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_uni.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_unimsgcpy.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_verify.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/pf/net/if_pflog.c optional pflog pf inet \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/if_pfsync.c optional pfsync pf inet \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf.c optional pf inet \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_if.c optional pf inet \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_ioctl.c optional pf inet \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_lb.c optional pf inet \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_norm.c optional pf inet \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_osfp.c optional pf inet \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_ruleset.c optional pf inet \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_table.c optional pf inet \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/netinet/in4_cksum.c optional pf inet crypto/blowfish/bf_ecb.c optional ipsec crypto/blowfish/bf_skey.c optional crypto | ipsec crypto/camellia/camellia.c optional crypto | ipsec crypto/camellia/camellia-api.c optional crypto | ipsec crypto/des/des_ecb.c optional crypto | ipsec | netsmb crypto/des/des_setkey.c optional crypto | ipsec | netsmb crypto/rc4/rc4.c optional netgraph_mppc_encryption | kgssapi crypto/rijndael/rijndael-alg-fst.c optional crypto | geom_bde | \ ipsec | random | wlan_ccmp crypto/rijndael/rijndael-api-fst.c optional geom_bde | random crypto/rijndael/rijndael-api.c optional crypto | ipsec | wlan_ccmp crypto/sha1.c optional carp | crypto | ipsec | \ netgraph_mppc_encryption | sctp crypto/sha2/sha2.c optional crypto | geom_bde | ipsec | random | \ sctp | zfs ddb/db_access.c optional ddb ddb/db_break.c optional ddb ddb/db_capture.c optional ddb ddb/db_command.c optional ddb ddb/db_examine.c optional ddb ddb/db_expr.c optional ddb ddb/db_input.c optional ddb ddb/db_lex.c optional ddb ddb/db_main.c optional ddb ddb/db_output.c optional ddb ddb/db_print.c optional ddb ddb/db_ps.c optional ddb ddb/db_run.c optional ddb ddb/db_script.c optional ddb ddb/db_sym.c optional ddb ddb/db_thread.c optional ddb ddb/db_textdump.c optional ddb ddb/db_variables.c optional ddb ddb/db_watch.c optional ddb ddb/db_write_cmd.c optional ddb #dev/dpt/dpt_control.c optional dpt dev/aac/aac.c optional aac dev/aac/aac_cam.c optional aacp aac dev/aac/aac_debug.c optional aac dev/aac/aac_disk.c optional aac dev/aac/aac_linux.c optional aac compat_linux dev/aac/aac_pci.c optional aac pci dev/acpi_support/acpi_wmi.c optional acpi_wmi acpi dev/acpi_support/acpi_asus.c optional acpi_asus acpi dev/acpi_support/acpi_fujitsu.c optional acpi_fujitsu acpi dev/acpi_support/acpi_hp.c optional acpi_hp acpi dev/acpi_support/acpi_ibm.c optional acpi_ibm acpi dev/acpi_support/acpi_panasonic.c optional acpi_panasonic acpi dev/acpi_support/acpi_sony.c optional acpi_sony acpi dev/acpi_support/acpi_toshiba.c optional acpi_toshiba acpi dev/acpi_support/atk0110.c optional aibs acpi dev/acpica/Osd/OsdDebug.c optional acpi dev/acpica/Osd/OsdHardware.c optional acpi dev/acpica/Osd/OsdInterrupt.c optional acpi dev/acpica/Osd/OsdMemory.c optional acpi dev/acpica/Osd/OsdSchedule.c optional acpi dev/acpica/Osd/OsdStream.c optional acpi dev/acpica/Osd/OsdSynch.c optional acpi dev/acpica/Osd/OsdTable.c optional acpi dev/acpica/acpi.c optional acpi dev/acpica/acpi_acad.c optional acpi dev/acpica/acpi_battery.c optional acpi dev/acpica/acpi_button.c optional acpi dev/acpica/acpi_cmbat.c optional acpi dev/acpica/acpi_cpu.c optional acpi dev/acpica/acpi_ec.c optional acpi dev/acpica/acpi_hpet.c optional acpi dev/acpica/acpi_isab.c optional acpi isa dev/acpica/acpi_lid.c optional acpi dev/acpica/acpi_package.c optional acpi dev/acpica/acpi_pci.c optional acpi pci dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pcib_acpi.c optional acpi pci dev/acpica/acpi_pcib_pci.c optional acpi pci dev/acpica/acpi_perf.c optional acpi dev/acpica/acpi_powerres.c optional acpi dev/acpica/acpi_quirk.c optional acpi dev/acpica/acpi_resource.c optional acpi dev/acpica/acpi_smbat.c optional acpi dev/acpica/acpi_thermal.c optional acpi dev/acpica/acpi_throttle.c optional acpi dev/acpica/acpi_timer.c optional acpi dev/acpica/acpi_video.c optional acpi_video acpi dev/acpica/acpi_dock.c optional acpi_dock acpi dev/adlink/adlink.c optional adlink dev/advansys/adv_eisa.c optional adv eisa dev/advansys/adv_pci.c optional adv pci dev/advansys/advansys.c optional adv dev/advansys/advlib.c optional adv dev/advansys/advmcode.c optional adv dev/advansys/adw_pci.c optional adw pci dev/advansys/adwcam.c optional adw dev/advansys/adwlib.c optional adw dev/advansys/adwmcode.c optional adw dev/ae/if_ae.c optional ae pci dev/age/if_age.c optional age pci dev/agp/agp.c optional agp pci dev/agp/agp_if.m optional agp pci dev/aha/aha.c optional aha dev/aha/aha_isa.c optional aha isa dev/aha/aha_mca.c optional aha mca dev/ahb/ahb.c optional ahb eisa dev/ahci/ahci.c optional ahci pci dev/aic/aic.c optional aic dev/aic/aic_pccard.c optional aic pccard dev/aic7xxx/ahc_eisa.c optional ahc eisa dev/aic7xxx/ahc_isa.c optional ahc isa dev/aic7xxx/ahc_pci.c optional ahc pci \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/aic7xxx/ahd_pci.c optional ahd pci \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/aic7xxx/aic7770.c optional ahc dev/aic7xxx/aic79xx.c optional ahd pci dev/aic7xxx/aic79xx_osm.c optional ahd pci dev/aic7xxx/aic79xx_pci.c optional ahd pci dev/aic7xxx/aic7xxx.c optional ahc dev/aic7xxx/aic7xxx_93cx6.c optional ahc dev/aic7xxx/aic7xxx_osm.c optional ahc dev/aic7xxx/aic7xxx_pci.c optional ahc pci dev/alc/if_alc.c optional alc pci dev/ale/if_ale.c optional ale pci dev/amr/amr.c optional amr dev/amr/amr_cam.c optional amrp amr dev/amr/amr_disk.c optional amr dev/amr/amr_linux.c optional amr compat_linux dev/amr/amr_pci.c optional amr pci dev/an/if_an.c optional an dev/an/if_an_isa.c optional an isa dev/an/if_an_pccard.c optional an pccard dev/an/if_an_pci.c optional an pci dev/asr/asr.c optional asr pci \ compile-with "${NORMAL_C} ${NO_WARRAY_BOUNDS}" # dev/ata/ata_if.m optional ata | atacore dev/ata/ata-all.c optional ata | atacore dev/ata/ata-dma.c optional ata | atacore dev/ata/ata-lowlevel.c optional ata | atacore dev/ata/ata-queue.c optional ata | atacore dev/ata/ata-sata.c optional ata | atacore dev/ata/ata-card.c optional ata pccard | atapccard dev/ata/ata-cbus.c optional ata pc98 | atapc98 dev/ata/ata-isa.c optional ata isa | ataisa dev/ata/ata-pci.c optional ata pci | atapci dev/ata/chipsets/ata-ahci.c optional ata pci | ataahci | ataacerlabs | \ ataati | ataintel | atajmicron | \ atavia | atanvidia dev/ata/chipsets/ata-acard.c optional ata pci | ataacard dev/ata/chipsets/ata-acerlabs.c optional ata pci | ataacerlabs dev/ata/chipsets/ata-adaptec.c optional ata pci | ataadaptec dev/ata/chipsets/ata-amd.c optional ata pci | ataamd dev/ata/chipsets/ata-ati.c optional ata pci | ataati dev/ata/chipsets/ata-cenatek.c optional ata pci | atacenatek dev/ata/chipsets/ata-cypress.c optional ata pci | atacypress dev/ata/chipsets/ata-cyrix.c optional ata pci | atacyrix dev/ata/chipsets/ata-highpoint.c optional ata pci | atahighpoint dev/ata/chipsets/ata-intel.c optional ata pci | ataintel dev/ata/chipsets/ata-ite.c optional ata pci | ataite dev/ata/chipsets/ata-jmicron.c optional ata pci | atajmicron dev/ata/chipsets/ata-marvell.c optional ata pci | atamarvell | ataadaptec dev/ata/chipsets/ata-micron.c optional ata pci | atamicron dev/ata/chipsets/ata-national.c optional ata pci | atanational dev/ata/chipsets/ata-netcell.c optional ata pci | atanetcell dev/ata/chipsets/ata-nvidia.c optional ata pci | atanvidia dev/ata/chipsets/ata-promise.c optional ata pci | atapromise dev/ata/chipsets/ata-serverworks.c optional ata pci | ataserverworks dev/ata/chipsets/ata-siliconimage.c optional ata pci | atasiliconimage | ataati dev/ata/chipsets/ata-sis.c optional ata pci | atasis dev/ata/chipsets/ata-via.c optional ata pci | atavia dev/ata/ata-disk.c optional atadisk dev/ata/ata-raid.c optional ataraid dev/ata/atapi-cd.c optional atapicd dev/ata/atapi-fd.c optional atapifd dev/ata/atapi-tape.c optional atapist dev/ata/atapi-cam.c optional atapicam # dev/ath/if_ath_pci.c optional ath_pci pci \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/ath/if_ath_ahb.c optional ath_ahb \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/ath/if_ath.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_debug.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_keycache.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_led.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tx.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tx_ht.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_sysctl.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ah_osdep.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/ath/ath_hal/ah.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v1.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v3.c optional ath_hal | ath_ar5211 | ath_ar5212 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v14.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v4k.c \ optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_9287.c \ optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_regdomain.c optional ath \ compile-with "${NORMAL_C} ${NO_WSHIFT_COUNT_NEGATIVE} ${NO_WSHIFT_COUNT_OVERFLOW} -I$S/dev/ath" # ar5210 dev/ath/ath_hal/ar5210/ar5210_attach.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_beacon.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_interrupts.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_keycache.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_misc.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_phy.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_power.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_recv.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_reset.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_xmit.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar5211 dev/ath/ath_hal/ar5211/ar5211_attach.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_beacon.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_interrupts.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_keycache.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_misc.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_phy.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_power.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_recv.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_reset.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_xmit.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar5212 dev/ath/ath_hal/ar5212/ar5212_ani.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_attach.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_beacon.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_eeprom.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_gpio.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_interrupts.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_keycache.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_misc.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_phy.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_power.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_recv.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_reset.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_rfgain.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_xmit.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar5416 (depends on ar5212) dev/ath/ath_hal/ar5416/ar5416_ani.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_attach.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_beacon.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_iq.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_adcgain.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_adcdc.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_eeprom.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_gpio.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_interrupts.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_keycache.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_misc.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_phy.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_power.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_radar.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_recv.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_reset.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_xmit.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9130 (depends upon ar5416) - also requires AH_SUPPORT_AR9130 # # Since this is an embedded MAC SoC, there's no need to compile it into the # default HAL. dev/ath/ath_hal/ar9001/ar9130_attach.c optional ath_ar9130 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9001/ar9130_phy.c optional ath_ar9130 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9001/ar9130_eeprom.c optional ath_ar9130 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9160 (depends on ar5416) dev/ath/ath_hal/ar9001/ar9160_attach.c optional ath_hal | ath_ar9160 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9280 (depends on ar5416) dev/ath/ath_hal/ar9002/ar9280_attach.c optional ath_hal | ath_ar9280 | \ ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9280_olc.c optional ath_hal | ath_ar9280 | \ ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9285 (depends on ar5416 and ar9280) dev/ath/ath_hal/ar9002/ar9285_attach.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_reset.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_cal.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_phy.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_diversity.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9287 (depends on ar5416) dev/ath/ath_hal/ar9002/ar9287_attach.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_reset.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_cal.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_olc.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # rf backends dev/ath/ath_hal/ar5212/ar2316.c optional ath_rf2316 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2317.c optional ath_rf2317 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2413.c optional ath_hal | ath_rf2413 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2425.c optional ath_hal | ath_rf2425 | ath_rf2417 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5111.c optional ath_hal | ath_rf5111 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5112.c optional ath_hal | ath_rf5112 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5413.c optional ath_hal | ath_rf5413 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar2133.c optional ath_hal | ath_ar5416 | \ ath_ar9130 | ath_ar9160 | ath_ar9280 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9280.c optional ath_hal | ath_ar9280 | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ath rate control algorithms dev/ath/ath_rate/amrr/amrr.c optional ath_rate_amrr \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_rate/onoe/onoe.c optional ath_rate_onoe \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_rate/sample/sample.c optional ath_rate_sample \ compile-with "${NORMAL_C} -I$S/dev/ath" # ath DFS modules dev/ath/ath_dfs/null/dfs_null.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/bce/if_bce.c optional bce dev/bfe/if_bfe.c optional bfe dev/bge/if_bge.c optional bge dev/bktr/bktr_audio.c optional bktr pci dev/bktr/bktr_card.c optional bktr pci dev/bktr/bktr_core.c optional bktr pci dev/bktr/bktr_i2c.c optional bktr pci smbus dev/bktr/bktr_os.c optional bktr pci dev/bktr/bktr_tuner.c optional bktr pci dev/bktr/msp34xx.c optional bktr pci dev/buslogic/bt.c optional bt dev/buslogic/bt_eisa.c optional bt eisa dev/buslogic/bt_isa.c optional bt isa dev/buslogic/bt_mca.c optional bt mca dev/buslogic/bt_pci.c optional bt pci dev/bwi/bwimac.c optional bwi dev/bwi/bwiphy.c optional bwi dev/bwi/bwirf.c optional bwi dev/bwi/if_bwi.c optional bwi dev/bwi/if_bwi_pci.c optional bwi pci dev/bwn/if_bwn.c optional bwn siba_bwn dev/bxe/if_bxe.c optional bxe dev/bxe/bxe_link.c optional bxe dev/cardbus/cardbus.c optional cardbus dev/cardbus/cardbus_cis.c optional cardbus dev/cardbus/cardbus_device.c optional cardbus dev/cas/if_cas.c optional cas dev/cfi/cfi_core.c optional cfi dev/cfi/cfi_dev.c optional cfi dev/cfi/cfi_disk.c optional cfid dev/ciss/ciss.c optional ciss dev/cm/smc90cx6.c optional cm dev/cmx/cmx.c optional cmx dev/cmx/cmx_pccard.c optional cmx pccard dev/cpufreq/ichss.c optional cpufreq dev/cs/if_cs.c optional cs dev/cs/if_cs_isa.c optional cs isa dev/cs/if_cs_pccard.c optional cs pccard dev/cxgb/cxgb_main.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/cxgb_offload.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/cxgb_sge.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_mc5.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_vsc7323.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_vsc8211.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_ael1002.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_aq100x.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_mv88e1xxx.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_xgmac.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_t3_hw.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_tn1010.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/sys/uipc_mvec.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/cxgb_t3fw.c optional cxgb cxgb_t3fw \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgbe/t4_main.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_sge.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_l2t.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/common/t4_hw.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cy/cy.c optional cy dev/cy/cy_isa.c optional cy isa dev/cy/cy_pci.c optional cy pci dev/dc/if_dc.c optional dc pci dev/dc/dcphy.c optional dc pci dev/dc/pnphy.c optional dc pci dev/dcons/dcons.c optional dcons dev/dcons/dcons_crom.c optional dcons_crom dev/dcons/dcons_os.c optional dcons dev/de/if_de.c optional de pci dev/digi/CX.c optional digi_CX dev/digi/CX_PCI.c optional digi_CX_PCI dev/digi/EPCX.c optional digi_EPCX dev/digi/EPCX_PCI.c optional digi_EPCX_PCI dev/digi/Xe.c optional digi_Xe dev/digi/Xem.c optional digi_Xem dev/digi/Xr.c optional digi_Xr dev/digi/digi.c optional digi dev/digi/digi_isa.c optional digi isa dev/digi/digi_pci.c optional digi pci dev/dpt/dpt_eisa.c optional dpt eisa dev/dpt/dpt_pci.c optional dpt pci dev/dpt/dpt_scsi.c optional dpt dev/drm/ati_pcigart.c optional drm dev/drm/drm_agpsupport.c optional drm dev/drm/drm_auth.c optional drm dev/drm/drm_bufs.c optional drm dev/drm/drm_context.c optional drm dev/drm/drm_dma.c optional drm dev/drm/drm_drawable.c optional drm dev/drm/drm_drv.c optional drm dev/drm/drm_fops.c optional drm dev/drm/drm_hashtab.c optional drm dev/drm/drm_ioctl.c optional drm dev/drm/drm_irq.c optional drm dev/drm/drm_lock.c optional drm dev/drm/drm_memory.c optional drm dev/drm/drm_mm.c optional drm dev/drm/drm_pci.c optional drm dev/drm/drm_scatter.c optional drm dev/drm/drm_sman.c optional drm dev/drm/drm_sysctl.c optional drm dev/drm/drm_vm.c optional drm dev/drm/i915_dma.c optional i915drm dev/drm/i915_drv.c optional i915drm dev/drm/i915_irq.c optional i915drm dev/drm/i915_mem.c optional i915drm dev/drm/i915_suspend.c optional i915drm dev/drm/mach64_dma.c optional mach64drm dev/drm/mach64_drv.c optional mach64drm dev/drm/mach64_irq.c optional mach64drm dev/drm/mach64_state.c optional mach64drm dev/drm/mga_dma.c optional mgadrm dev/drm/mga_drv.c optional mgadrm dev/drm/mga_irq.c optional mgadrm dev/drm/mga_state.c optional mgadrm \ compile-with "${NORMAL_C} -finline-limit=13500" dev/drm/mga_warp.c optional mgadrm dev/drm/r128_cce.c optional r128drm \ compile-with "${NORMAL_C} ${NO_WUNUSED_VALUE} ${NO_WCONSTANT_CONVERSION}" dev/drm/r128_drv.c optional r128drm dev/drm/r128_irq.c optional r128drm dev/drm/r128_state.c optional r128drm \ compile-with "${NORMAL_C} ${NO_WUNUSED_VALUE} -finline-limit=13500" dev/drm/r300_cmdbuf.c optional radeondrm dev/drm/r600_blit.c optional radeondrm dev/drm/r600_cp.c optional radeondrm \ compile-with "${NORMAL_C} ${NO_WUNUSED_VALUE} ${NO_WCONSTANT_CONVERSION}" dev/drm/radeon_cp.c optional radeondrm \ compile-with "${NORMAL_C} ${NO_WUNUSED_VALUE} ${NO_WCONSTANT_CONVERSION}" dev/drm/radeon_cs.c optional radeondrm dev/drm/radeon_drv.c optional radeondrm dev/drm/radeon_irq.c optional radeondrm dev/drm/radeon_mem.c optional radeondrm dev/drm/radeon_state.c optional radeondrm dev/drm/savage_bci.c optional savagedrm dev/drm/savage_drv.c optional savagedrm dev/drm/savage_state.c optional savagedrm dev/drm/sis_drv.c optional sisdrm dev/drm/sis_ds.c optional sisdrm dev/drm/sis_mm.c optional sisdrm dev/drm/tdfx_drv.c optional tdfxdrm dev/drm/via_dma.c optional viadrm dev/drm/via_dmablit.c optional viadrm dev/drm/via_drv.c optional viadrm dev/drm/via_irq.c optional viadrm dev/drm/via_map.c optional viadrm dev/drm/via_mm.c optional viadrm dev/drm/via_verifier.c optional viadrm dev/drm/via_video.c optional viadrm dev/ed/if_ed.c optional ed dev/ed/if_ed_novell.c optional ed dev/ed/if_ed_rtl80x9.c optional ed dev/ed/if_ed_pccard.c optional ed pccard dev/ed/if_ed_pci.c optional ed pci dev/eisa/eisa_if.m standard dev/eisa/eisaconf.c optional eisa dev/e1000/if_em.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/if_lem.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/if_igb.c optional igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_80003es2lan.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82540.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82541.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82542.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82543.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82571.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82575.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_ich8lan.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_api.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_mac.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_manage.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_nvm.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_phy.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_vf.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_mbx.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_osdep.c optional em | igb \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/et/if_et.c optional et dev/en/if_en_pci.c optional en pci dev/en/midway.c optional en dev/ep/if_ep.c optional ep dev/ep/if_ep_eisa.c optional ep eisa dev/ep/if_ep_isa.c optional ep isa dev/ep/if_ep_mca.c optional ep mca dev/ep/if_ep_pccard.c optional ep pccard dev/esp/esp_pci.c optional esp pci dev/esp/ncr53c9x.c optional esp dev/ex/if_ex.c optional ex dev/ex/if_ex_isa.c optional ex isa dev/ex/if_ex_pccard.c optional ex pccard dev/exca/exca.c optional cbb dev/fatm/if_fatm.c optional fatm pci dev/fb/splash.c optional splash dev/fdt/fdt_common.c optional fdt dev/fdt/fdt_pci.c optional fdt pci dev/fdt/fdt_static_dtb.S optional fdt fdt_dtb_static dev/fdt/fdtbus.c optional fdt dev/fdt/simplebus.c optional fdt dev/fe/if_fe.c optional fe dev/fe/if_fe_pccard.c optional fe pccard dev/firewire/firewire.c optional firewire dev/firewire/fwcrom.c optional firewire dev/firewire/fwdev.c optional firewire dev/firewire/fwdma.c optional firewire dev/firewire/fwmem.c optional firewire dev/firewire/fwohci.c optional firewire dev/firewire/fwohci_pci.c optional firewire pci dev/firewire/if_fwe.c optional fwe dev/firewire/if_fwip.c optional fwip dev/firewire/sbp.c optional sbp dev/firewire/sbp_targ.c optional sbp_targ dev/flash/at45d.c optional at45d dev/flash/mx25l.c optional mx25l dev/fxp/if_fxp.c optional fxp dev/fxp/inphy.c optional fxp dev/gem/if_gem.c optional gem dev/gem/if_gem_pci.c optional gem pci dev/gem/if_gem_sbus.c optional gem sbus dev/gpio/gpiobus.c optional gpio \ dependency "gpiobus_if.h" dev/gpio/gpioc.c optional gpio \ dependency "gpio_if.h" dev/gpio/gpioiic.c optional gpioiic dev/gpio/gpioled.c optional gpioled dev/gpio/gpio_if.m optional gpio dev/gpio/gpiobus_if.m optional gpio dev/hatm/if_hatm.c optional hatm pci dev/hatm/if_hatm_intr.c optional hatm pci dev/hatm/if_hatm_ioctl.c optional hatm pci dev/hatm/if_hatm_rx.c optional hatm pci dev/hatm/if_hatm_tx.c optional hatm pci dev/hifn/hifn7751.c optional hifn dev/hme/if_hme.c optional hme dev/hme/if_hme_pci.c optional hme pci dev/hme/if_hme_sbus.c optional hme sbus dev/hptiop/hptiop.c optional hptiop scbus dev/hwpmc/hwpmc_logging.c optional hwpmc dev/hwpmc/hwpmc_mod.c optional hwpmc dev/hwpmc/hwpmc_soft.c optional hwpmc dev/ichsmb/ichsmb.c optional ichsmb dev/ichsmb/ichsmb_pci.c optional ichsmb pci dev/ida/ida.c optional ida dev/ida/ida_disk.c optional ida dev/ida/ida_eisa.c optional ida eisa dev/ida/ida_pci.c optional ida pci dev/ie/if_ie.c optional ie isa nowerror dev/ie/if_ie_isa.c optional ie isa dev/ieee488/ibfoo.c optional pcii | tnt4882 dev/ieee488/pcii.c optional pcii dev/ieee488/tnt4882.c optional tnt4882 dev/ieee488/upd7210.c optional pcii | tnt4882 dev/iicbus/ad7418.c optional ad7418 dev/iicbus/ds133x.c optional ds133x dev/iicbus/ds1374.c optional ds1374 dev/iicbus/ds1672.c optional ds1672 dev/iicbus/icee.c optional icee dev/iicbus/if_ic.c optional ic dev/iicbus/iic.c optional iic dev/iicbus/iicbb.c optional iicbb dev/iicbus/iicbb_if.m optional iicbb dev/iicbus/iicbus.c optional iicbus dev/iicbus/iicbus_if.m optional iicbus dev/iicbus/iiconf.c optional iicbus dev/iicbus/iicsmb.c optional iicsmb \ dependency "iicbus_if.h" dev/iicbus/iicoc.c optional iicoc dev/iicbus/pcf8563.c optional pcf8563 dev/iir/iir.c optional iir dev/iir/iir_ctrl.c optional iir dev/iir/iir_pci.c optional iir pci dev/ips/ips.c optional ips dev/ips/ips_commands.c optional ips dev/ips/ips_disk.c optional ips dev/ips/ips_ioctl.c optional ips dev/ips/ips_pci.c optional ips pci dev/ipw/if_ipw.c optional ipw ipwbssfw.c optional ipwbssfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_bss.fw:ipw_bss:130 -lintel_ipw -mipw_bss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ipwbssfw.c" ipw_bss.fwo optional ipwbssfw | ipwfw \ dependency "ipw_bss.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} ipw_bss.fw" \ no-implicit-rule \ clean "ipw_bss.fwo" ipw_bss.fw optional ipwbssfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/ipw/ipw2100-1.3.fw.uu" \ no-obj no-implicit-rule \ clean "ipw_bss.fw" ipwibssfw.c optional ipwibssfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_ibss.fw:ipw_ibss:130 -lintel_ipw -mipw_ibss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ipwibssfw.c" ipw_ibss.fwo optional ipwibssfw | ipwfw \ dependency "ipw_ibss.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} ipw_ibss.fw" \ no-implicit-rule \ clean "ipw_ibss.fwo" ipw_ibss.fw optional ipwibssfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3-i.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/ipw/ipw2100-1.3-i.fw.uu" \ no-obj no-implicit-rule \ clean "ipw_ibss.fw" ipwmonitorfw.c optional ipwmonitorfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_monitor.fw:ipw_monitor:130 -lintel_ipw -mipw_monitor -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ipwmonitorfw.c" ipw_monitor.fwo optional ipwmonitorfw | ipwfw \ dependency "ipw_monitor.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} ipw_monitor.fw" \ no-implicit-rule \ clean "ipw_monitor.fwo" ipw_monitor.fw optional ipwmonitorfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3-p.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/ipw/ipw2100-1.3-p.fw.uu" \ no-obj no-implicit-rule \ clean "ipw_monitor.fw" dev/iscsi/initiator/iscsi.c optional iscsi_initiator scbus dev/iscsi/initiator/iscsi_subr.c optional iscsi_initiator scbus dev/iscsi/initiator/isc_cam.c optional iscsi_initiator scbus dev/iscsi/initiator/isc_soc.c optional iscsi_initiator scbus dev/iscsi/initiator/isc_sm.c optional iscsi_initiator scbus dev/iscsi/initiator/isc_subr.c optional iscsi_initiator scbus dev/isp/isp.c optional isp dev/isp/isp_freebsd.c optional isp dev/isp/isp_library.c optional isp dev/isp/isp_pci.c optional isp pci dev/isp/isp_sbus.c optional isp sbus dev/isp/isp_target.c optional isp dev/ispfw/ispfw.c optional ispfw dev/iwi/if_iwi.c optional iwi iwibssfw.c optional iwibssfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_bss.fw:iwi_bss:300 -lintel_iwi -miwi_bss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwibssfw.c" iwi_bss.fwo optional iwibssfw | iwifw \ dependency "iwi_bss.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} iwi_bss.fw" \ no-implicit-rule \ clean "iwi_bss.fwo" iwi_bss.fw optional iwibssfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-bss.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/iwi/ipw2200-bss.fw.uu" \ no-obj no-implicit-rule \ clean "iwi_bss.fw" iwiibssfw.c optional iwiibssfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_ibss.fw:iwi_ibss:300 -lintel_iwi -miwi_ibss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwiibssfw.c" iwi_ibss.fwo optional iwiibssfw | iwifw \ dependency "iwi_ibss.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} iwi_ibss.fw" \ no-implicit-rule \ clean "iwi_ibss.fwo" iwi_ibss.fw optional iwiibssfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-ibss.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/iwi/ipw2200-ibss.fw.uu" \ no-obj no-implicit-rule \ clean "iwi_ibss.fw" iwimonitorfw.c optional iwimonitorfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_monitor.fw:iwi_monitor:300 -lintel_iwi -miwi_monitor -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwimonitorfw.c" iwi_monitor.fwo optional iwimonitorfw | iwifw \ dependency "iwi_monitor.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} iwi_monitor.fw" \ no-implicit-rule \ clean "iwi_monitor.fwo" iwi_monitor.fw optional iwimonitorfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-sniffer.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/iwi/ipw2200-sniffer.fw.uu" \ no-obj no-implicit-rule \ clean "iwi_monitor.fw" dev/iwn/if_iwn.c optional iwn iwn1000fw.c optional iwn1000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn1000.fw:iwn1000fw -miwn1000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn1000fw.c" iwn1000fw.fwo optional iwn1000fw | iwnfw \ dependency "iwn1000.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} iwn1000.fw" \ no-implicit-rule \ clean "iwn1000fw.fwo" iwn1000.fw optional iwn1000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-1000-39.31.5.1.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/iwn/iwlwifi-1000-39.31.5.1.fw.uu" \ no-obj no-implicit-rule \ clean "iwn1000.fw" iwn4965fw.c optional iwn4965fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn4965.fw:iwn4965fw -miwn4965fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn4965fw.c" iwn4965fw.fwo optional iwn4965fw | iwnfw \ dependency "iwn4965.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} iwn4965.fw" \ no-implicit-rule \ clean "iwn4965fw.fwo" iwn4965.fw optional iwn4965fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-4965-228.61.2.24.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/iwn/iwlwifi-4965-228.61.2.24.fw.uu" \ no-obj no-implicit-rule \ clean "iwn4965.fw" iwn5000fw.c optional iwn5000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5000.fw:iwn5000fw -miwn5000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn5000fw.c" iwn5000fw.fwo optional iwn5000fw | iwnfw \ dependency "iwn5000.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} iwn5000.fw" \ no-implicit-rule \ clean "iwn5000fw.fwo" iwn5000.fw optional iwn5000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-5000-8.83.5.1.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/iwn/iwlwifi-5000-8.83.5.1.fw.uu" \ no-obj no-implicit-rule \ clean "iwn5000.fw" iwn5150fw.c optional iwn5150fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5150.fw:iwn5150fw -miwn5150fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn5150fw.c" iwn5150fw.fwo optional iwn5150fw | iwnfw \ dependency "iwn5150.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} iwn5150.fw" \ no-implicit-rule \ clean "iwn5150fw.fwo" iwn5150.fw optional iwn5150fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-5150-8.24.2.2.fw.uu"\ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/iwn/iwlwifi-5150-8.24.2.2.fw.uu" \ no-obj no-implicit-rule \ clean "iwn5150.fw" iwn6000fw.c optional iwn6000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000.fw:iwn6000fw -miwn6000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6000fw.c" iwn6000fw.fwo optional iwn6000fw | iwnfw \ dependency "iwn6000.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} iwn6000.fw" \ no-implicit-rule \ clean "iwn6000fw.fwo" iwn6000.fw optional iwn6000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000-9.221.4.1.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/iwn/iwlwifi-6000-9.221.4.1.fw.uu" \ no-obj no-implicit-rule \ clean "iwn6000.fw" iwn6000g2afw.c optional iwn6000g2afw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2a.fw:iwn6000g2afw -miwn6000g2afw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6000g2afw.c" iwn6000g2afw.fwo optional iwn6000g2afw | iwnfw \ dependency "iwn6000g2a.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} iwn6000g2a.fw" \ no-implicit-rule \ clean "iwn6000g2afw.fwo" iwn6000g2a.fw optional iwn6000g2afw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000g2a-17.168.5.2.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/iwn/iwlwifi-6000g2a-17.168.5.2.fw.uu" \ no-obj no-implicit-rule \ clean "iwn6000g2a.fw" iwn6000g2bfw.c optional iwn6000g2bfw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2b.fw:iwn6000g2bfw -miwn6000g2bfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6000g2bfw.c" iwn6000g2bfw.fwo optional iwn6000g2bfw | iwnfw \ dependency "iwn6000g2b.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} iwn6000g2b.fw" \ no-implicit-rule \ clean "iwn6000g2bfw.fwo" iwn6000g2b.fw optional iwn6000g2bfw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000g2b-17.168.5.2.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/iwn/iwlwifi-6000g2b-17.168.5.2.fw.uu" \ no-obj no-implicit-rule \ clean "iwn6000g2b.fw" iwn6050fw.c optional iwn6050fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6050.fw:iwn6050fw -miwn6050fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6050fw.c" iwn6050fw.fwo optional iwn6050fw | iwnfw \ dependency "iwn6050.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} iwn6050.fw" \ no-implicit-rule \ clean "iwn6050fw.fwo" iwn6050.fw optional iwn6050fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6050-41.28.5.1.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/iwn/iwlwifi-6050-41.28.5.1.fw.uu" \ no-obj no-implicit-rule \ clean "iwn6050.fw" dev/ixgb/if_ixgb.c optional ixgb dev/ixgb/ixgb_ee.c optional ixgb dev/ixgb/ixgb_hw.c optional ixgb dev/ixgbe/ixgbe.c optional ixgbe inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixv.c optional ixgbe inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_phy.c optional ixgbe inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_api.c optional ixgbe inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_common.c optional ixgbe inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_mbx.c optional ixgbe inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_vf.c optional ixgbe inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_82598.c optional ixgbe inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_82599.c optional ixgbe inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_x540.c optional ixgbe inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/jme/if_jme.c optional jme pci dev/joy/joy.c optional joy dev/joy/joy_isa.c optional joy isa dev/joy/joy_pccard.c optional joy pccard dev/kbdmux/kbdmux.c optional kbdmux dev/ksyms/ksyms.c optional ksyms dev/le/am7990.c optional le dev/le/am79900.c optional le dev/le/if_le_pci.c optional le pci dev/le/lance.c optional le dev/led/led.c standard dev/lge/if_lge.c optional lge dev/lmc/if_lmc.c optional lmc dev/malo/if_malo.c optional malo dev/malo/if_malohal.c optional malo dev/malo/if_malo_pci.c optional malo pci dev/mc146818/mc146818.c optional mc146818 dev/mca/mca_bus.c optional mca dev/mcd/mcd.c optional mcd isa nowerror dev/mcd/mcd_isa.c optional mcd isa nowerror dev/md/md.c optional md dev/mem/memdev.c optional mem dev/mem/memutil.c optional mem dev/mfi/mfi.c optional mfi dev/mfi/mfi_debug.c optional mfi dev/mfi/mfi_pci.c optional mfi pci dev/mfi/mfi_disk.c optional mfi dev/mfi/mfi_syspd.c optional mfi dev/mfi/mfi_tbolt.c optional mfi dev/mfi/mfi_linux.c optional mfi compat_linux dev/mfi/mfi_cam.c optional mfip scbus dev/mii/acphy.c optional miibus | acphy dev/mii/amphy.c optional miibus | amphy dev/mii/atphy.c optional miibus | atphy dev/mii/axphy.c optional miibus | axphy dev/mii/bmtphy.c optional miibus | bmtphy dev/mii/brgphy.c optional miibus | brgphy dev/mii/ciphy.c optional miibus | ciphy dev/mii/e1000phy.c optional miibus | e1000phy dev/mii/gentbi.c optional miibus | gentbi dev/mii/icsphy.c optional miibus | icsphy dev/mii/ip1000phy.c optional miibus | ip1000phy dev/mii/jmphy.c optional miibus | jmphy dev/mii/lxtphy.c optional miibus | lxtphy dev/mii/mii.c optional miibus | mii dev/mii/mii_bitbang.c optional miibus | mii_bitbang dev/mii/mii_physubr.c optional miibus | mii dev/mii/miibus_if.m optional miibus | mii dev/mii/mlphy.c optional miibus | mlphy dev/mii/nsgphy.c optional miibus | nsgphy dev/mii/nsphy.c optional miibus | nsphy dev/mii/nsphyter.c optional miibus | nsphyter dev/mii/pnaphy.c optional miibus | pnaphy dev/mii/qsphy.c optional miibus | qsphy dev/mii/rdcphy.c optional miibus | rdcphy dev/mii/rgephy.c optional miibus | rgephy dev/mii/rlphy.c optional miibus | rlphy dev/mii/rlswitch.c optional rlswitch dev/mii/smcphy.c optional miibus | smcphy dev/mii/tdkphy.c optional miibus | tdkphy dev/mii/tlphy.c optional miibus | tlphy dev/mii/truephy.c optional miibus | truephy dev/mii/ukphy.c optional miibus | mii dev/mii/ukphy_subr.c optional miibus | mii dev/mii/xmphy.c optional miibus | xmphy dev/mk48txx/mk48txx.c optional mk48txx dev/mlx/mlx.c optional mlx dev/mlx/mlx_disk.c optional mlx dev/mlx/mlx_pci.c optional mlx pci dev/mly/mly.c optional mly dev/mmc/mmc.c optional mmc dev/mmc/mmcbr_if.m standard dev/mmc/mmcbus_if.m standard dev/mmc/mmcsd.c optional mmcsd dev/mn/if_mn.c optional mn pci dev/mps/mps.c optional mps dev/mps/mps_config.c optional mps dev/mps/mps_mapping.c optional mps dev/mps/mps_pci.c optional mps pci dev/mps/mps_sas.c optional mps \ compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}" dev/mps/mps_sas_lsi.c optional mps dev/mps/mps_table.c optional mps dev/mps/mps_user.c optional mps dev/mpt/mpt.c optional mpt dev/mpt/mpt_cam.c optional mpt dev/mpt/mpt_debug.c optional mpt dev/mpt/mpt_pci.c optional mpt pci dev/mpt/mpt_raid.c optional mpt dev/mpt/mpt_user.c optional mpt dev/msk/if_msk.c optional msk dev/mvs/mvs.c optional mvs dev/mvs/mvs_if.m optional mvs dev/mvs/mvs_pci.c optional mvs pci dev/mwl/if_mwl.c optional mwl dev/mwl/if_mwl_pci.c optional mwl pci dev/mwl/mwlhal.c optional mwl mwlfw.c optional mwlfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk mw88W8363.fw:mw88W8363fw mwlboot.fw:mwlboot -mmwl -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "mwlfw.c" mw88W8363.fwo optional mwlfw \ dependency "mw88W8363.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} mw88W8363.fw" \ no-implicit-rule \ clean "mw88W8363.fwo" mw88W8363.fw optional mwlfw \ dependency "$S/contrib/dev/mwl/mw88W8363.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/mwl/mw88W8363.fw.uu" \ no-obj no-implicit-rule \ clean "mw88W8363.fw" mwlboot.fwo optional mwlfw \ dependency "mwlboot.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} mwlboot.fw" \ no-implicit-rule \ clean "mwlboot.fwo" mwlboot.fw optional mwlfw \ dependency "$S/contrib/dev/mwl/mwlboot.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/mwl/mwlboot.fw.uu" \ no-obj no-implicit-rule \ clean "mwlboot.fw" dev/mxge/if_mxge.c optional mxge pci dev/mxge/mxge_lro.c optional mxge pci dev/mxge/mxge_eth_z8e.c optional mxge pci dev/mxge/mxge_ethp_z8e.c optional mxge pci dev/mxge/mxge_rss_eth_z8e.c optional mxge pci dev/mxge/mxge_rss_ethp_z8e.c optional mxge pci dev/my/if_my.c optional my dev/nand/nand.c optional nand dev/nand/nand_bbt.c optional nand dev/nand/nand_cdev.c optional nand dev/nand/nand_generic.c optional nand dev/nand/nand_geom.c optional nand dev/nand/nand_id.c optional nand dev/nand/nandbus.c optional nand dev/nand/nandbus_if.m optional nand dev/nand/nand_if.m optional nand dev/nand/nandsim.c optional nandsim nand dev/nand/nandsim_chip.c optional nandsim nand dev/nand/nandsim_ctrl.c optional nandsim nand dev/nand/nandsim_log.c optional nandsim nand dev/nand/nandsim_swap.c optional nandsim nand dev/nand/nfc_if.m optional nand dev/ncv/ncr53c500.c optional ncv dev/ncv/ncr53c500_pccard.c optional ncv pccard dev/netmap/netmap.c optional netmap dev/nge/if_nge.c optional nge dev/nxge/if_nxge.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-device.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-mm.c optional nxge dev/nxge/xgehal/xge-queue.c optional nxge dev/nxge/xgehal/xgehal-driver.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-ring.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-channel.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-fifo.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-stats.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-config.c optional nxge dev/nxge/xgehal/xgehal-mgmt.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nmdm/nmdm.c optional nmdm dev/nsp/nsp.c optional nsp dev/nsp/nsp_pccard.c optional nsp pccard dev/null/null.c standard dev/oce/oce_hw.c optional oce pci dev/oce/oce_if.c optional oce pci dev/oce/oce_mbox.c optional oce pci dev/oce/oce_queue.c optional oce pci dev/oce/oce_sysctl.c optional oce pci dev/oce/oce_util.c optional oce pci dev/patm/if_patm.c optional patm pci dev/patm/if_patm_attach.c optional patm pci dev/patm/if_patm_intr.c optional patm pci dev/patm/if_patm_ioctl.c optional patm pci dev/patm/if_patm_rtables.c optional patm pci dev/patm/if_patm_rx.c optional patm pci dev/patm/if_patm_tx.c optional patm pci dev/pbio/pbio.c optional pbio isa dev/pccard/card_if.m standard dev/pccard/pccard.c optional pccard dev/pccard/pccard_cis.c optional pccard dev/pccard/pccard_cis_quirks.c optional pccard dev/pccard/pccard_device.c optional pccard dev/pccard/power_if.m standard dev/pccbb/pccbb.c optional cbb dev/pccbb/pccbb_isa.c optional cbb isa dev/pccbb/pccbb_pci.c optional cbb pci dev/pcf/pcf.c optional pcf dev/pci/eisa_pci.c optional pci eisa dev/pci/fixup_pci.c optional pci dev/pci/hostb_pci.c optional pci dev/pci/ignore_pci.c optional pci dev/pci/isa_pci.c optional pci isa dev/pci/pci.c optional pci dev/pci/pci_if.m standard dev/pci/pci_pci.c optional pci dev/pci/pci_subr.c optional pci dev/pci/pci_user.c optional pci dev/pci/pcib_if.m standard dev/pci/vga_pci.c optional pci dev/pcn/if_pcn.c optional pcn pci dev/pdq/if_fea.c optional fea eisa dev/pdq/if_fpa.c optional fpa pci dev/pdq/pdq.c optional nowerror fea eisa | fpa pci dev/pdq/pdq_ifsubr.c optional nowerror fea eisa | fpa pci dev/ppbus/if_plip.c optional plip dev/ppbus/immio.c optional vpo dev/ppbus/lpbb.c optional lpbb dev/ppbus/lpt.c optional lpt dev/ppbus/pcfclock.c optional pcfclock dev/ppbus/ppb_1284.c optional ppbus dev/ppbus/ppb_base.c optional ppbus dev/ppbus/ppb_msq.c optional ppbus dev/ppbus/ppbconf.c optional ppbus dev/ppbus/ppbus_if.m optional ppbus dev/ppbus/ppi.c optional ppi dev/ppbus/pps.c optional pps dev/ppbus/vpo.c optional vpo dev/ppbus/vpoio.c optional vpo dev/ppc/ppc.c optional ppc dev/ppc/ppc_acpi.c optional ppc acpi dev/ppc/ppc_isa.c optional ppc isa dev/ppc/ppc_pci.c optional ppc pci dev/ppc/ppc_puc.c optional ppc puc dev/pst/pst-iop.c optional pst dev/pst/pst-pci.c optional pst pci dev/pst/pst-raid.c optional pst dev/pty/pty.c optional pty dev/puc/puc.c optional puc dev/puc/puc_cfg.c optional puc dev/puc/puc_pccard.c optional puc pccard dev/puc/puc_pci.c optional puc pci dev/puc/pucdata.c optional puc pci dev/quicc/quicc_core.c optional quicc dev/ral/rt2560.c optional ral dev/ral/rt2661.c optional ral +dev/ral/rt2860.c optional ral dev/ral/if_ral_pci.c optional ral pci rt2561fw.c optional rt2561fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561.fw:rt2561fw -mrt2561 -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2561fw.c" rt2561fw.fwo optional rt2561fw | ralfw \ dependency "rt2561.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} rt2561.fw" \ no-implicit-rule \ clean "rt2561fw.fwo" rt2561.fw optional rt2561fw | ralfw \ dependency "$S/contrib/dev/ral/rt2561.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/ral/rt2561.fw.uu" \ no-obj no-implicit-rule \ clean "rt2561.fw" rt2561sfw.c optional rt2561sfw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561s.fw:rt2561sfw -mrt2561s -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2561sfw.c" rt2561sfw.fwo optional rt2561sfw | ralfw \ dependency "rt2561s.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} rt2561s.fw" \ no-implicit-rule \ clean "rt2561sfw.fwo" rt2561s.fw optional rt2561sfw | ralfw \ dependency "$S/contrib/dev/ral/rt2561s.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/ral/rt2561s.fw.uu" \ no-obj no-implicit-rule \ clean "rt2561s.fw" rt2661fw.c optional rt2661fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2661.fw:rt2661fw -mrt2661 -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2661fw.c" rt2661fw.fwo optional rt2661fw | ralfw \ dependency "rt2661.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} rt2661.fw" \ no-implicit-rule \ clean "rt2661fw.fwo" rt2661.fw optional rt2661fw | ralfw \ dependency "$S/contrib/dev/ral/rt2661.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/ral/rt2661.fw.uu" \ no-obj no-implicit-rule \ clean "rt2661.fw" rt2860fw.c optional rt2860fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2860.fw:rt2860fw -mrt2860 -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2860fw.c" rt2860fw.fwo optional rt2860fw | ralfw \ dependency "rt2860.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} rt2860.fw" \ no-implicit-rule \ clean "rt2860fw.fwo" rt2860.fw optional rt2860fw | ralfw \ dependency "$S/contrib/dev/ral/rt2860.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/ral/rt2860.fw.uu" \ no-obj no-implicit-rule \ clean "rt2860.fw" dev/random/harvest.c standard dev/random/hash.c optional random dev/random/probe.c optional random dev/random/randomdev.c optional random dev/random/randomdev_soft.c optional random dev/random/yarrow.c optional random dev/rc/rc.c optional rc dev/re/if_re.c optional re dev/rndtest/rndtest.c optional rndtest dev/rp/rp.c optional rp dev/rp/rp_isa.c optional rp isa dev/rp/rp_pci.c optional rp pci dev/safe/safe.c optional safe dev/scc/scc_if.m optional scc dev/scc/scc_bfe_ebus.c optional scc ebus dev/scc/scc_bfe_quicc.c optional scc quicc dev/scc/scc_bfe_sbus.c optional scc fhc | scc sbus dev/scc/scc_core.c optional scc dev/scc/scc_dev_quicc.c optional scc quicc dev/scc/scc_dev_sab82532.c optional scc dev/scc/scc_dev_z8530.c optional scc dev/scd/scd.c optional scd isa dev/scd/scd_isa.c optional scd isa dev/sdhci/sdhci.c optional sdhci pci dev/sf/if_sf.c optional sf pci dev/sge/if_sge.c optional sge pci dev/si/si.c optional si dev/si/si2_z280.c optional si dev/si/si3_t225.c optional si dev/si/si_eisa.c optional si eisa dev/si/si_isa.c optional si isa dev/si/si_pci.c optional si pci dev/siba/siba_bwn.c optional siba_bwn pci dev/siba/siba_core.c optional siba_bwn pci dev/siis/siis.c optional siis pci dev/sis/if_sis.c optional sis pci dev/sk/if_sk.c optional sk pci dev/smbus/smb.c optional smb dev/smbus/smbconf.c optional smbus dev/smbus/smbus.c optional smbus dev/smbus/smbus_if.m optional smbus dev/smc/if_smc.c optional smc dev/sn/if_sn.c optional sn dev/sn/if_sn_isa.c optional sn isa dev/sn/if_sn_pccard.c optional sn pccard dev/snp/snp.c optional snp dev/sound/clone.c optional sound dev/sound/unit.c optional sound dev/sound/isa/ad1816.c optional snd_ad1816 isa dev/sound/isa/ess.c optional snd_ess isa dev/sound/isa/gusc.c optional snd_gusc isa dev/sound/isa/mss.c optional snd_mss isa dev/sound/isa/sb16.c optional snd_sb16 isa dev/sound/isa/sb8.c optional snd_sb8 isa dev/sound/isa/sbc.c optional snd_sbc isa dev/sound/isa/sndbuf_dma.c optional sound isa dev/sound/pci/als4000.c optional snd_als4000 pci dev/sound/pci/atiixp.c optional snd_atiixp pci dev/sound/pci/cmi.c optional snd_cmi pci dev/sound/pci/cs4281.c optional snd_cs4281 pci dev/sound/pci/csa.c optional snd_csa pci dev/sound/pci/csapcm.c optional snd_csa pci dev/sound/pci/ds1.c optional snd_ds1 pci dev/sound/pci/emu10k1.c optional snd_emu10k1 pci dev/sound/pci/emu10kx.c optional snd_emu10kx pci dev/sound/pci/emu10kx-pcm.c optional snd_emu10kx pci dev/sound/pci/emu10kx-midi.c optional snd_emu10kx pci dev/sound/pci/envy24.c optional snd_envy24 pci dev/sound/pci/envy24ht.c optional snd_envy24ht pci dev/sound/pci/es137x.c optional snd_es137x pci dev/sound/pci/fm801.c optional snd_fm801 pci dev/sound/pci/ich.c optional snd_ich pci dev/sound/pci/maestro.c optional snd_maestro pci dev/sound/pci/maestro3.c optional snd_maestro3 pci dev/sound/pci/neomagic.c optional snd_neomagic pci dev/sound/pci/solo.c optional snd_solo pci dev/sound/pci/spicds.c optional snd_spicds pci dev/sound/pci/t4dwave.c optional snd_t4dwave pci dev/sound/pci/via8233.c optional snd_via8233 pci dev/sound/pci/via82c686.c optional snd_via82c686 pci dev/sound/pci/vibes.c optional snd_vibes pci dev/sound/pci/hda/hdaa.c optional snd_hda pci dev/sound/pci/hda/hdaa_patches.c optional snd_hda pci dev/sound/pci/hda/hdac.c optional snd_hda pci dev/sound/pci/hda/hdac_if.m optional snd_hda pci dev/sound/pci/hda/hdacc.c optional snd_hda pci dev/sound/pci/hdspe.c optional snd_hdspe pci dev/sound/pci/hdspe-pcm.c optional snd_hdspe pci dev/sound/pcm/ac97.c optional sound dev/sound/pcm/ac97_if.m optional sound dev/sound/pcm/ac97_patch.c optional sound dev/sound/pcm/buffer.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/channel.c optional sound dev/sound/pcm/channel_if.m optional sound dev/sound/pcm/dsp.c optional sound dev/sound/pcm/feeder.c optional sound dev/sound/pcm/feeder_chain.c optional sound dev/sound/pcm/feeder_eq.c optional sound \ dependency "feeder_eq_gen.h" \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_if.m optional sound dev/sound/pcm/feeder_format.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_matrix.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_mixer.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_rate.c optional sound \ dependency "feeder_rate_gen.h" \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_volume.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/mixer.c optional sound dev/sound/pcm/mixer_if.m optional sound dev/sound/pcm/sndstat.c optional sound dev/sound/pcm/sound.c optional sound dev/sound/pcm/vchan.c optional sound dev/sound/usb/uaudio.c optional snd_uaudio usb dev/sound/usb/uaudio_pcm.c optional snd_uaudio usb dev/sound/midi/midi.c optional sound dev/sound/midi/mpu401.c optional sound dev/sound/midi/mpu_if.m optional sound dev/sound/midi/mpufoi_if.m optional sound dev/sound/midi/sequencer.c optional sound dev/sound/midi/synth_if.m optional sound dev/spibus/spibus.c optional spibus \ dependency "spibus_if.h" dev/spibus/spibus_if.m optional spibus dev/ste/if_ste.c optional ste pci dev/stg/tmc18c30.c optional stg dev/stg/tmc18c30_isa.c optional stg isa dev/stg/tmc18c30_pccard.c optional stg pccard dev/stg/tmc18c30_pci.c optional stg pci dev/stg/tmc18c30_subr.c optional stg dev/stge/if_stge.c optional stge dev/streams/streams.c optional streams dev/sym/sym_hipd.c optional sym \ dependency "$S/dev/sym/sym_{conf,defs}.h" dev/syscons/blank/blank_saver.c optional blank_saver dev/syscons/daemon/daemon_saver.c optional daemon_saver dev/syscons/dragon/dragon_saver.c optional dragon_saver dev/syscons/fade/fade_saver.c optional fade_saver dev/syscons/fire/fire_saver.c optional fire_saver dev/syscons/green/green_saver.c optional green_saver dev/syscons/logo/logo.c optional logo_saver dev/syscons/logo/logo_saver.c optional logo_saver dev/syscons/rain/rain_saver.c optional rain_saver dev/syscons/schistory.c optional sc dev/syscons/scmouse.c optional sc dev/syscons/scterm.c optional sc dev/syscons/scvidctl.c optional sc dev/syscons/snake/snake_saver.c optional snake_saver dev/syscons/star/star_saver.c optional star_saver dev/syscons/syscons.c optional sc dev/syscons/sysmouse.c optional sc dev/syscons/warp/warp_saver.c optional warp_saver dev/tdfx/tdfx_linux.c optional tdfx_linux tdfx compat_linux dev/tdfx/tdfx_pci.c optional tdfx pci dev/ti/if_ti.c optional ti pci dev/tl/if_tl.c optional tl pci dev/trm/trm.c optional trm dev/twa/tw_cl_init.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_intr.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_io.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_misc.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_osl_cam.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_osl_freebsd.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twe/twe.c optional twe dev/twe/twe_freebsd.c optional twe dev/tws/tws.c optional tws dev/tws/tws_cam.c optional tws dev/tws/tws_hdm.c optional tws dev/tws/tws_services.c optional tws dev/tws/tws_user.c optional tws dev/tx/if_tx.c optional tx dev/txp/if_txp.c optional txp dev/uart/uart_bus_acpi.c optional uart acpi #dev/uart/uart_bus_cbus.c optional uart cbus dev/uart/uart_bus_ebus.c optional uart ebus dev/uart/uart_bus_fdt.c optional uart fdt dev/uart/uart_bus_isa.c optional uart isa dev/uart/uart_bus_pccard.c optional uart pccard dev/uart/uart_bus_pci.c optional uart pci dev/uart/uart_bus_puc.c optional uart puc dev/uart/uart_bus_scc.c optional uart scc dev/uart/uart_core.c optional uart dev/uart/uart_dbg.c optional uart gdb dev/uart/uart_dev_ns8250.c optional uart uart_ns8250 dev/uart/uart_dev_quicc.c optional uart quicc dev/uart/uart_dev_sab82532.c optional uart uart_sab82532 dev/uart/uart_dev_sab82532.c optional uart scc dev/uart/uart_dev_z8530.c optional uart uart_z8530 dev/uart/uart_dev_z8530.c optional uart scc dev/uart/uart_if.m optional uart dev/uart/uart_subr.c optional uart dev/uart/uart_tty.c optional uart dev/ubsec/ubsec.c optional ubsec # # USB controller drivers # dev/usb/controller/at91dci.c optional at91dci dev/usb/controller/at91dci_atmelarm.c optional at91dci at91rm9200 dev/usb/controller/musb_otg.c optional musb dev/usb/controller/musb_otg_atmelarm.c optional musb at91rm9200 dev/usb/controller/ehci.c optional ehci dev/usb/controller/ehci_pci.c optional ehci pci dev/usb/controller/ohci.c optional ohci dev/usb/controller/ohci_atmelarm.c optional ohci at91rm9200 dev/usb/controller/ohci_pci.c optional ohci pci dev/usb/controller/uhci.c optional uhci dev/usb/controller/uhci_pci.c optional uhci pci dev/usb/controller/xhci.c optional xhci dev/usb/controller/xhci_pci.c optional xhci pci dev/usb/controller/uss820dci.c optional uss820dci dev/usb/controller/uss820dci_atmelarm.c optional uss820dci at91rm9200 dev/usb/controller/usb_controller.c optional usb # # USB storage drivers # dev/usb/storage/umass.c optional umass dev/usb/storage/urio.c optional urio dev/usb/storage/ustorage_fs.c optional usfs # # USB core # dev/usb/usb_busdma.c optional usb dev/usb/usb_compat_linux.c optional usb dev/usb/usb_core.c optional usb dev/usb/usb_debug.c optional usb dev/usb/usb_dev.c optional usb dev/usb/usb_device.c optional usb dev/usb/usb_dynamic.c optional usb dev/usb/usb_error.c optional usb dev/usb/usb_generic.c optional usb dev/usb/usb_handle_request.c optional usb dev/usb/usb_hid.c optional usb dev/usb/usb_hub.c optional usb dev/usb/usb_if.m optional usb dev/usb/usb_lookup.c optional usb dev/usb/usb_mbuf.c optional usb dev/usb/usb_msctest.c optional usb dev/usb/usb_parse.c optional usb dev/usb/usb_pf.c optional usb dev/usb/usb_process.c optional usb dev/usb/usb_request.c optional usb dev/usb/usb_transfer.c optional usb dev/usb/usb_util.c optional usb # # USB network drivers # dev/usb/net/if_aue.c optional aue dev/usb/net/if_axe.c optional axe dev/usb/net/if_cdce.c optional cdce dev/usb/net/if_cue.c optional cue dev/usb/net/if_ipheth.c optional ipheth dev/usb/net/if_kue.c optional kue dev/usb/net/if_mos.c optional mos dev/usb/net/if_rue.c optional rue dev/usb/net/if_udav.c optional udav dev/usb/net/if_usie.c optional usie dev/usb/net/ruephy.c optional rue dev/usb/net/usb_ethernet.c optional aue | axe | cdce | cue | kue | mos | \ rue | udav dev/usb/net/uhso.c optional uhso # # USB WLAN drivers # dev/usb/wlan/if_rum.c optional rum dev/usb/wlan/if_run.c optional run runfw.c optional runfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk runfw:runfw -mrunfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "runfw.c" runfw.fwo optional runfw \ dependency "runfw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} runfw" \ no-implicit-rule \ clean "runfw.fwo" runfw optional runfw \ dependency "$S/contrib/dev/run/rt2870.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/run/rt2870.fw.uu" \ no-obj no-implicit-rule \ clean "runfw" dev/usb/wlan/if_uath.c optional uath dev/usb/wlan/if_upgt.c optional upgt dev/usb/wlan/if_ural.c optional ural dev/usb/wlan/if_urtw.c optional urtw dev/usb/wlan/if_zyd.c optional zyd # # USB serial and parallel port drivers # dev/usb/serial/u3g.c optional u3g dev/usb/serial/uark.c optional uark dev/usb/serial/ubsa.c optional ubsa dev/usb/serial/ubser.c optional ubser dev/usb/serial/uchcom.c optional uchcom dev/usb/serial/ucycom.c optional ucycom dev/usb/serial/ufoma.c optional ufoma dev/usb/serial/uftdi.c optional uftdi dev/usb/serial/ugensa.c optional ugensa dev/usb/serial/uipaq.c optional uipaq dev/usb/serial/ulpt.c optional ulpt dev/usb/serial/umcs.c optional umcs dev/usb/serial/umct.c optional umct dev/usb/serial/umodem.c optional umodem dev/usb/serial/umoscom.c optional umoscom dev/usb/serial/uplcom.c optional uplcom dev/usb/serial/uslcom.c optional uslcom dev/usb/serial/uvisor.c optional uvisor dev/usb/serial/uvscom.c optional uvscom dev/usb/serial/usb_serial.c optional ucom | u3g | uark | ubsa | ubser | \ uchcom | ucycom | ufoma | uftdi | \ ugensa | uipaq | umcs | umct | \ umodem | umoscom | uplcom | usie | \ uslcom | uvisor | uvscom # # USB misc drivers # dev/usb/misc/ufm.c optional ufm dev/usb/misc/udbp.c optional udbp # # USB input drivers # dev/usb/input/atp.c optional atp dev/usb/input/uep.c optional uep dev/usb/input/uhid.c optional uhid dev/usb/input/ukbd.c optional ukbd dev/usb/input/ums.c optional ums # # USB quirks # dev/usb/quirk/usb_quirk.c optional usb # # USB templates # dev/usb/template/usb_template.c optional usb_template dev/usb/template/usb_template_audio.c optional usb_template dev/usb/template/usb_template_cdce.c optional usb_template dev/usb/template/usb_template_kbd.c optional usb_template dev/usb/template/usb_template_modem.c optional usb_template dev/usb/template/usb_template_mouse.c optional usb_template dev/usb/template/usb_template_msc.c optional usb_template dev/usb/template/usb_template_mtp.c optional usb_template # # USB END # dev/utopia/idtphy.c optional utopia dev/utopia/suni.c optional utopia dev/utopia/utopia.c optional utopia dev/vge/if_vge.c optional vge dev/vkbd/vkbd.c optional vkbd dev/vr/if_vr.c optional vr pci dev/vte/if_vte.c optional vte pci dev/vx/if_vx.c optional vx dev/vx/if_vx_eisa.c optional vx eisa dev/vx/if_vx_pci.c optional vx pci dev/vxge/vxge.c optional vxge dev/vxge/vxgehal/vxgehal-ifmsg.c optional vxge dev/vxge/vxgehal/vxgehal-mrpcim.c optional vxge dev/vxge/vxgehal/vxge-queue.c optional vxge dev/vxge/vxgehal/vxgehal-ring.c optional vxge dev/vxge/vxgehal/vxgehal-swapper.c optional vxge dev/vxge/vxgehal/vxgehal-mgmt.c optional vxge dev/vxge/vxgehal/vxgehal-srpcim.c optional vxge dev/vxge/vxgehal/vxgehal-config.c optional vxge dev/vxge/vxgehal/vxgehal-blockpool.c optional vxge dev/vxge/vxgehal/vxgehal-doorbells.c optional vxge dev/vxge/vxgehal/vxgehal-mgmtaux.c optional vxge dev/vxge/vxgehal/vxgehal-device.c optional vxge dev/vxge/vxgehal/vxgehal-mm.c optional vxge dev/vxge/vxgehal/vxgehal-driver.c optional vxge dev/vxge/vxgehal/vxgehal-virtualpath.c optional vxge dev/vxge/vxgehal/vxgehal-channel.c optional vxge dev/vxge/vxgehal/vxgehal-fifo.c optional vxge dev/watchdog/watchdog.c standard dev/wb/if_wb.c optional wb pci dev/wds/wd7000.c optional wds isa dev/wi/if_wi.c optional wi dev/wi/if_wi_pccard.c optional wi pccard dev/wi/if_wi_pci.c optional wi pci dev/wl/if_wl.c optional wl isa dev/wpi/if_wpi.c optional wpi pci wpifw.c optional wpifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk wpi.fw:wpifw:153229 -mwpi -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "wpifw.c" wpifw.fwo optional wpifw \ dependency "wpi.fw" \ compile-with "${LD} -b binary -d -warn-common -r -d -o ${.TARGET} wpi.fw" \ no-implicit-rule \ clean "wpifw.fwo" wpi.fw optional wpifw \ dependency "$S/contrib/dev/wpi/iwlwifi-3945-15.32.2.9.fw.uu" \ compile-with "uudecode -o ${.TARGET} $S/contrib/dev/wpi/iwlwifi-3945-15.32.2.9.fw.uu" \ no-obj no-implicit-rule \ clean "wpi.fw" dev/xe/if_xe.c optional xe dev/xe/if_xe_pccard.c optional xe pccard dev/xl/if_xl.c optional xl pci dev/xl/xlphy.c optional xl pci fs/coda/coda_fbsd.c optional vcoda fs/coda/coda_psdev.c optional vcoda fs/coda/coda_subr.c optional vcoda fs/coda/coda_venus.c optional vcoda fs/coda/coda_vfsops.c optional vcoda fs/coda/coda_vnops.c optional vcoda fs/deadfs/dead_vnops.c standard fs/devfs/devfs_devs.c standard fs/devfs/devfs_dir.c standard fs/devfs/devfs_rule.c standard fs/devfs/devfs_vfsops.c standard fs/devfs/devfs_vnops.c standard fs/fdescfs/fdesc_vfsops.c optional fdescfs fs/fdescfs/fdesc_vnops.c optional fdescfs fs/fifofs/fifo_vnops.c standard fs/hpfs/hpfs_alsubr.c optional hpfs fs/hpfs/hpfs_lookup.c optional hpfs fs/hpfs/hpfs_subr.c optional hpfs fs/hpfs/hpfs_vfsops.c optional hpfs fs/hpfs/hpfs_vnops.c optional hpfs fs/msdosfs/msdosfs_conv.c optional msdosfs fs/msdosfs/msdosfs_denode.c optional msdosfs fs/msdosfs/msdosfs_fat.c optional msdosfs fs/msdosfs/msdosfs_fileno.c optional msdosfs fs/msdosfs/msdosfs_iconv.c optional msdosfs_iconv fs/msdosfs/msdosfs_lookup.c optional msdosfs fs/msdosfs/msdosfs_vfsops.c optional msdosfs fs/msdosfs/msdosfs_vnops.c optional msdosfs fs/nandfs/bmap.c optional nandfs fs/nandfs/nandfs_alloc.c optional nandfs fs/nandfs/nandfs_bmap.c optional nandfs fs/nandfs/nandfs_buffer.c optional nandfs fs/nandfs/nandfs_cleaner.c optional nandfs fs/nandfs/nandfs_cpfile.c optional nandfs fs/nandfs/nandfs_dat.c optional nandfs fs/nandfs/nandfs_dir.c optional nandfs fs/nandfs/nandfs_ifile.c optional nandfs fs/nandfs/nandfs_segment.c optional nandfs fs/nandfs/nandfs_subr.c optional nandfs fs/nandfs/nandfs_sufile.c optional nandfs fs/nandfs/nandfs_vfsops.c optional nandfs fs/nandfs/nandfs_vnops.c optional nandfs fs/nfs/nfs_commonkrpc.c optional nfscl | nfsd fs/nfs/nfs_commonsubs.c optional nfscl | nfsd fs/nfs/nfs_commonport.c optional nfscl | nfsd fs/nfs/nfs_commonacl.c optional nfscl | nfsd fs/nfsclient/nfs_clcomsubs.c optional nfscl fs/nfsclient/nfs_clsubs.c optional nfscl fs/nfsclient/nfs_clstate.c optional nfscl fs/nfsclient/nfs_clkrpc.c optional nfscl fs/nfsclient/nfs_clrpcops.c optional nfscl fs/nfsclient/nfs_clvnops.c optional nfscl fs/nfsclient/nfs_clnode.c optional nfscl fs/nfsclient/nfs_clvfsops.c optional nfscl fs/nfsclient/nfs_clport.c optional nfscl fs/nfsclient/nfs_clbio.c optional nfscl fs/nfsclient/nfs_clnfsiod.c optional nfscl fs/nfsserver/nfs_nfsdsocket.c optional nfsd inet fs/nfsserver/nfs_nfsdsubs.c optional nfsd inet fs/nfsserver/nfs_nfsdstate.c optional nfsd inet fs/nfsserver/nfs_nfsdkrpc.c optional nfsd inet fs/nfsserver/nfs_nfsdserv.c optional nfsd inet fs/nfsserver/nfs_nfsdport.c optional nfsd inet fs/nfsserver/nfs_nfsdcache.c optional nfsd inet fs/ntfs/ntfs_compr.c optional ntfs fs/ntfs/ntfs_iconv.c optional ntfs_iconv fs/ntfs/ntfs_ihash.c optional ntfs fs/ntfs/ntfs_subr.c optional ntfs fs/ntfs/ntfs_vfsops.c optional ntfs fs/ntfs/ntfs_vnops.c optional ntfs fs/nullfs/null_subr.c optional nullfs fs/nullfs/null_vfsops.c optional nullfs fs/nullfs/null_vnops.c optional nullfs fs/nwfs/nwfs_io.c optional nwfs fs/nwfs/nwfs_ioctl.c optional nwfs fs/nwfs/nwfs_node.c optional nwfs fs/nwfs/nwfs_subr.c optional nwfs fs/nwfs/nwfs_vfsops.c optional nwfs fs/nwfs/nwfs_vnops.c optional nwfs fs/portalfs/portal_vfsops.c optional portalfs fs/portalfs/portal_vnops.c optional portalfs fs/procfs/procfs.c optional procfs fs/procfs/procfs_ctl.c optional procfs fs/procfs/procfs_dbregs.c optional procfs fs/procfs/procfs_fpregs.c optional procfs fs/procfs/procfs_ioctl.c optional procfs fs/procfs/procfs_map.c optional procfs fs/procfs/procfs_mem.c optional procfs fs/procfs/procfs_note.c optional procfs fs/procfs/procfs_osrel.c optional procfs fs/procfs/procfs_regs.c optional procfs fs/procfs/procfs_rlimit.c optional procfs fs/procfs/procfs_status.c optional procfs fs/procfs/procfs_type.c optional procfs fs/pseudofs/pseudofs.c optional pseudofs fs/pseudofs/pseudofs_fileno.c optional pseudofs fs/pseudofs/pseudofs_vncache.c optional pseudofs fs/pseudofs/pseudofs_vnops.c optional pseudofs fs/smbfs/smbfs_io.c optional smbfs fs/smbfs/smbfs_node.c optional smbfs fs/smbfs/smbfs_smb.c optional smbfs fs/smbfs/smbfs_subr.c optional smbfs fs/smbfs/smbfs_vfsops.c optional smbfs fs/smbfs/smbfs_vnops.c optional smbfs fs/udf/osta.c optional udf fs/udf/udf_iconv.c optional udf_iconv fs/udf/udf_vfsops.c optional udf fs/udf/udf_vnops.c optional udf fs/unionfs/union_subr.c optional unionfs fs/unionfs/union_vfsops.c optional unionfs fs/unionfs/union_vnops.c optional unionfs fs/tmpfs/tmpfs_vnops.c optional tmpfs fs/tmpfs/tmpfs_fifoops.c optional tmpfs fs/tmpfs/tmpfs_vfsops.c optional tmpfs fs/tmpfs/tmpfs_subr.c optional tmpfs gdb/gdb_cons.c optional gdb gdb/gdb_main.c optional gdb gdb/gdb_packet.c optional gdb geom/bde/g_bde.c optional geom_bde geom/bde/g_bde_crypt.c optional geom_bde geom/bde/g_bde_lock.c optional geom_bde geom/bde/g_bde_work.c optional geom_bde geom/cache/g_cache.c optional geom_cache geom/concat/g_concat.c optional geom_concat geom/eli/g_eli.c optional geom_eli geom/eli/g_eli_crypto.c optional geom_eli geom/eli/g_eli_ctl.c optional geom_eli geom/eli/g_eli_integrity.c optional geom_eli geom/eli/g_eli_key.c optional geom_eli geom/eli/g_eli_key_cache.c optional geom_eli geom/eli/g_eli_privacy.c optional geom_eli geom/eli/pkcs5v2.c optional geom_eli geom/gate/g_gate.c optional geom_gate geom/geom_aes.c optional geom_aes geom/geom_bsd.c optional geom_bsd geom/geom_bsd_enc.c optional geom_bsd geom/geom_ccd.c optional ccd | geom_ccd geom/geom_ctl.c standard geom/geom_dev.c standard geom/geom_disk.c standard geom/geom_dump.c standard geom/geom_event.c standard geom/geom_fox.c optional geom_fox geom/geom_io.c standard geom/geom_kern.c standard geom/geom_map.c optional geom_map geom/geom_mbr.c optional geom_mbr geom/geom_mbr_enc.c optional geom_mbr geom/geom_pc98.c optional geom_pc98 geom/geom_pc98_enc.c optional geom_pc98 geom/geom_redboot.c optional geom_redboot geom/geom_slice.c standard geom/geom_subr.c standard geom/geom_sunlabel.c optional geom_sunlabel geom/geom_sunlabel_enc.c optional geom_sunlabel geom/geom_vfs.c standard geom/geom_vol_ffs.c optional geom_vol geom/journal/g_journal.c optional geom_journal geom/journal/g_journal_ufs.c optional geom_journal geom/label/g_label.c optional geom_label geom/label/g_label_ext2fs.c optional geom_label geom/label/g_label_iso9660.c optional geom_label geom/label/g_label_msdosfs.c optional geom_label geom/label/g_label_ntfs.c optional geom_label geom/label/g_label_reiserfs.c optional geom_label geom/label/g_label_ufs.c optional geom_label geom/label/g_label_gpt.c optional geom_label geom/linux_lvm/g_linux_lvm.c optional geom_linux_lvm geom/mirror/g_mirror.c optional geom_mirror geom/mirror/g_mirror_ctl.c optional geom_mirror geom/mountver/g_mountver.c optional geom_mountver geom/multipath/g_multipath.c optional geom_multipath geom/nop/g_nop.c optional geom_nop geom/part/g_part.c standard geom/part/g_part_if.m standard geom/part/g_part_apm.c optional geom_part_apm geom/part/g_part_bsd.c optional geom_part_bsd geom/part/g_part_ebr.c optional geom_part_ebr geom/part/g_part_gpt.c optional geom_part_gpt geom/part/g_part_ldm.c optional geom_part_ldm geom/part/g_part_mbr.c optional geom_part_mbr geom/part/g_part_pc98.c optional geom_part_pc98 geom/part/g_part_vtoc8.c optional geom_part_vtoc8 geom/raid/g_raid.c optional geom_raid geom/raid/g_raid_ctl.c optional geom_raid geom/raid/g_raid_md_if.m optional geom_raid geom/raid/g_raid_tr_if.m optional geom_raid geom/raid/md_ddf.c optional geom_raid geom/raid/md_intel.c optional geom_raid geom/raid/md_jmicron.c optional geom_raid geom/raid/md_nvidia.c optional geom_raid geom/raid/md_promise.c optional geom_raid geom/raid/md_sii.c optional geom_raid geom/raid/tr_concat.c optional geom_raid geom/raid/tr_raid0.c optional geom_raid geom/raid/tr_raid1.c optional geom_raid geom/raid/tr_raid1e.c optional geom_raid geom/raid/tr_raid5.c optional geom_raid geom/raid3/g_raid3.c optional geom_raid3 geom/raid3/g_raid3_ctl.c optional geom_raid3 geom/shsec/g_shsec.c optional geom_shsec geom/stripe/g_stripe.c optional geom_stripe geom/uncompress/g_uncompress.c optional geom_uncompress contrib/xz-embedded/freebsd/xz_malloc.c \ optional xz_embedded | geom_uncompress \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_crc32.c \ optional xz_embedded | geom_uncompress \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_bcj.c \ optional xz_embedded | geom_uncompress \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_lzma2.c \ optional xz_embedded | geom_uncompress \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_stream.c \ optional xz_embedded | geom_uncompress \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" geom/uzip/g_uzip.c optional geom_uzip geom/virstor/binstream.c optional geom_virstor geom/virstor/g_virstor.c optional geom_virstor geom/virstor/g_virstor_md.c optional geom_virstor geom/zero/g_zero.c optional geom_zero fs/ext2fs/ext2_alloc.c optional ext2fs fs/ext2fs/ext2_balloc.c optional ext2fs fs/ext2fs/ext2_bmap.c optional ext2fs fs/ext2fs/ext2_inode.c optional ext2fs fs/ext2fs/ext2_inode_cnv.c optional ext2fs fs/ext2fs/ext2_lookup.c optional ext2fs fs/ext2fs/ext2_subr.c optional ext2fs fs/ext2fs/ext2_vfsops.c optional ext2fs fs/ext2fs/ext2_vnops.c optional ext2fs gnu/fs/reiserfs/reiserfs_hashes.c optional reiserfs \ warning "kernel contains GPL contaminated ReiserFS filesystem" gnu/fs/reiserfs/reiserfs_inode.c optional reiserfs gnu/fs/reiserfs/reiserfs_item_ops.c optional reiserfs gnu/fs/reiserfs/reiserfs_namei.c optional reiserfs gnu/fs/reiserfs/reiserfs_prints.c optional reiserfs gnu/fs/reiserfs/reiserfs_stree.c optional reiserfs gnu/fs/reiserfs/reiserfs_vfsops.c optional reiserfs gnu/fs/reiserfs/reiserfs_vnops.c optional reiserfs # isa/isa_if.m standard isa/isa_common.c optional isa isa/isahint.c optional isa isa/pnp.c optional isa isapnp isa/pnpparse.c optional isa isapnp fs/cd9660/cd9660_bmap.c optional cd9660 fs/cd9660/cd9660_lookup.c optional cd9660 fs/cd9660/cd9660_node.c optional cd9660 fs/cd9660/cd9660_rrip.c optional cd9660 fs/cd9660/cd9660_util.c optional cd9660 fs/cd9660/cd9660_vfsops.c optional cd9660 fs/cd9660/cd9660_vnops.c optional cd9660 fs/cd9660/cd9660_iconv.c optional cd9660_iconv kern/bus_if.m standard kern/clock_if.m standard kern/cpufreq_if.m standard kern/device_if.m standard kern/imgact_elf.c standard kern/imgact_shell.c standard kern/inflate.c optional gzip kern/init_main.c standard kern/init_sysent.c standard kern/ksched.c optional _kposix_priority_scheduling kern/kern_acct.c standard kern/kern_alq.c optional alq kern/kern_clock.c standard kern/kern_condvar.c standard kern/kern_conf.c standard kern/kern_cons.c standard kern/kern_cpu.c standard kern/kern_cpuset.c standard kern/kern_context.c standard kern/kern_descrip.c standard kern/kern_dtrace.c optional kdtrace_hooks kern/kern_environment.c standard kern/kern_et.c standard kern/kern_event.c standard kern/kern_exec.c standard kern/kern_exit.c standard kern/kern_fail.c standard kern/kern_ffclock.c standard kern/kern_fork.c standard kern/kern_gzio.c optional gzio kern/kern_hhook.c standard kern/kern_idle.c standard kern/kern_intr.c standard kern/kern_jail.c standard kern/kern_khelp.c standard kern/kern_kthread.c standard kern/kern_ktr.c optional ktr kern/kern_ktrace.c standard kern/kern_linker.c standard kern/kern_lock.c standard kern/kern_lockf.c standard kern/kern_lockstat.c optional kdtrace_hooks kern/kern_loginclass.c standard kern/kern_malloc.c standard kern/kern_mbuf.c standard kern/kern_mib.c standard kern/kern_module.c standard kern/kern_mtxpool.c standard kern/kern_mutex.c standard kern/kern_ntptime.c standard kern/kern_osd.c standard kern/kern_physio.c standard kern/kern_pmc.c standard kern/kern_poll.c optional device_polling kern/kern_priv.c standard kern/kern_proc.c standard kern/kern_prot.c standard kern/kern_racct.c standard kern/kern_rctl.c standard kern/kern_resource.c standard kern/kern_rmlock.c standard kern/kern_rwlock.c standard kern/kern_sdt.c optional kdtrace_hooks kern/kern_sema.c standard kern/kern_shutdown.c standard kern/kern_sig.c standard kern/kern_switch.c standard kern/kern_sx.c standard kern/kern_synch.c standard kern/kern_syscalls.c standard kern/kern_sysctl.c standard kern/kern_tc.c standard kern/kern_thr.c standard kern/kern_thread.c standard kern/kern_time.c standard kern/kern_timeout.c standard kern/kern_umtx.c standard kern/kern_uuid.c standard kern/kern_xxx.c standard kern/link_elf.c standard kern/linker_if.m standard kern/md4c.c optional netsmb kern/md5c.c standard kern/p1003_1b.c standard kern/posix4_mib.c standard kern/sched_4bsd.c optional sched_4bsd kern/sched_ule.c optional sched_ule kern/serdev_if.m standard kern/stack_protector.c standard \ compile-with "${NORMAL_C:N-fstack-protector*}" kern/subr_acl_nfs4.c optional ufs_acl | zfs kern/subr_acl_posix1e.c optional ufs_acl kern/subr_autoconf.c standard kern/subr_blist.c standard kern/subr_bus.c standard kern/subr_bufring.c standard kern/subr_clock.c standard kern/subr_devstat.c standard kern/subr_disk.c standard kern/subr_eventhandler.c standard kern/subr_fattime.c standard kern/subr_firmware.c optional firmware kern/subr_hash.c standard kern/subr_hints.c standard kern/subr_kdb.c standard kern/subr_kobj.c standard kern/subr_lock.c standard kern/subr_log.c standard kern/subr_mbpool.c optional libmbpool kern/subr_mchain.c optional libmchain kern/subr_module.c standard kern/subr_msgbuf.c standard kern/subr_param.c standard kern/subr_pcpu.c standard kern/subr_power.c standard kern/subr_prf.c standard kern/subr_prof.c standard kern/subr_rman.c standard kern/subr_rtc.c standard kern/subr_sbuf.c standard kern/subr_scanf.c standard kern/subr_sglist.c standard kern/subr_sleepqueue.c standard kern/subr_smp.c standard kern/subr_stack.c optional ddb | stack | ktr kern/subr_taskqueue.c standard kern/subr_trap.c standard kern/subr_turnstile.c standard kern/subr_uio.c standard kern/subr_unit.c standard kern/subr_witness.c optional witness kern/sys_capability.c standard kern/sys_generic.c standard kern/sys_pipe.c standard kern/sys_procdesc.c standard kern/sys_process.c standard kern/sys_socket.c standard kern/syscalls.c standard kern/sysv_ipc.c standard kern/sysv_msg.c optional sysvmsg kern/sysv_sem.c optional sysvsem kern/sysv_shm.c optional sysvshm kern/tty.c standard kern/tty_compat.c optional compat_43tty kern/tty_info.c standard kern/tty_inq.c standard kern/tty_outq.c standard kern/tty_pts.c standard kern/tty_tty.c standard kern/tty_ttydisc.c standard kern/uipc_accf.c optional inet kern/uipc_cow.c optional zero_copy_sockets kern/uipc_debug.c optional ddb kern/uipc_domain.c standard kern/uipc_mbuf.c standard kern/uipc_mbuf2.c standard kern/uipc_mqueue.c optional p1003_1b_mqueue kern/uipc_sem.c optional p1003_1b_semaphores kern/uipc_shm.c standard kern/uipc_sockbuf.c standard kern/uipc_socket.c standard kern/uipc_syscalls.c standard kern/uipc_usrreq.c standard kern/vfs_acl.c standard kern/vfs_aio.c optional vfs_aio kern/vfs_bio.c standard kern/vfs_cache.c standard kern/vfs_cluster.c standard kern/vfs_default.c standard kern/vfs_export.c standard kern/vfs_extattr.c standard kern/vfs_hash.c standard kern/vfs_init.c standard kern/vfs_lookup.c standard kern/vfs_mount.c standard kern/vfs_mountroot.c standard kern/vfs_subr.c standard kern/vfs_syscalls.c standard kern/vfs_vnops.c standard # # Kernel GSS-API # gssd.h optional kgssapi \ dependency "$S/kgssapi/gssd.x" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/kgssapi/gssd.x | grep -v pthread.h > gssd.h" \ no-obj no-implicit-rule before-depend local \ clean "gssd.h" gssd_xdr.c optional kgssapi \ dependency "$S/kgssapi/gssd.x gssd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/kgssapi/gssd.x -o gssd_xdr.c" \ no-implicit-rule before-depend local \ clean "gssd_xdr.c" gssd_clnt.c optional kgssapi \ dependency "$S/kgssapi/gssd.x gssd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/kgssapi/gssd.x | grep -v string.h > gssd_clnt.c" \ no-implicit-rule before-depend local \ clean "gssd_clnt.c" kgssapi/gss_accept_sec_context.c optional kgssapi kgssapi/gss_add_oid_set_member.c optional kgssapi kgssapi/gss_acquire_cred.c optional kgssapi kgssapi/gss_canonicalize_name.c optional kgssapi kgssapi/gss_create_empty_oid_set.c optional kgssapi kgssapi/gss_delete_sec_context.c optional kgssapi kgssapi/gss_display_status.c optional kgssapi kgssapi/gss_export_name.c optional kgssapi kgssapi/gss_get_mic.c optional kgssapi kgssapi/gss_init_sec_context.c optional kgssapi kgssapi/gss_impl.c optional kgssapi kgssapi/gss_import_name.c optional kgssapi kgssapi/gss_names.c optional kgssapi kgssapi/gss_pname_to_uid.c optional kgssapi kgssapi/gss_release_buffer.c optional kgssapi kgssapi/gss_release_cred.c optional kgssapi kgssapi/gss_release_name.c optional kgssapi kgssapi/gss_release_oid_set.c optional kgssapi kgssapi/gss_set_cred_option.c optional kgssapi kgssapi/gss_test_oid_set_member.c optional kgssapi kgssapi/gss_unwrap.c optional kgssapi kgssapi/gss_verify_mic.c optional kgssapi kgssapi/gss_wrap.c optional kgssapi kgssapi/gss_wrap_size_limit.c optional kgssapi kgssapi/gssd_prot.c optional kgssapi kgssapi/krb5/krb5_mech.c optional kgssapi kgssapi/krb5/kcrypto.c optional kgssapi kgssapi/krb5/kcrypto_aes.c optional kgssapi kgssapi/krb5/kcrypto_arcfour.c optional kgssapi kgssapi/krb5/kcrypto_des.c optional kgssapi kgssapi/krb5/kcrypto_des3.c optional kgssapi kgssapi/kgss_if.m optional kgssapi kgssapi/gsstest.c optional kgssapi_debug # These files in libkern/ are those needed by all architectures. Some # of the files in libkern/ are only needed on some architectures, e.g., # libkern/divdi3.c is needed by i386 but not alpha. Also, some of these # routines may be optimized for a particular platform. In either case, # the file should be moved to conf/files. from here. # libkern/arc4random.c standard libkern/bcd.c standard libkern/bsearch.c standard libkern/crc32.c standard libkern/fnmatch.c standard libkern/iconv.c optional libiconv libkern/iconv_converter_if.m optional libiconv libkern/iconv_ucs.c optional libiconv libkern/iconv_xlat.c optional libiconv libkern/iconv_xlat16.c optional libiconv libkern/inet_aton.c standard libkern/inet_ntoa.c standard libkern/inet_ntop.c standard libkern/inet_pton.c standard libkern/mcount.c optional profiling-routine libkern/memcchr.c standard libkern/memcmp.c standard libkern/qsort.c standard libkern/qsort_r.c standard libkern/random.c standard libkern/scanc.c standard libkern/strcasecmp.c standard libkern/strcat.c standard libkern/strchr.c standard libkern/strcmp.c standard libkern/strcpy.c standard libkern/strcspn.c standard libkern/strdup.c standard libkern/strlcat.c standard libkern/strlcpy.c standard libkern/strlen.c standard libkern/strncmp.c standard libkern/strncpy.c standard libkern/strnlen.c standard libkern/strrchr.c standard libkern/strsep.c standard libkern/strspn.c standard libkern/strstr.c standard libkern/strtol.c standard libkern/strtoq.c standard libkern/strtoul.c standard libkern/strtouq.c standard libkern/strvalid.c standard net/bpf.c standard net/bpf_buffer.c optional bpf net/bpf_jitter.c optional bpf_jitter net/bpf_filter.c optional bpf | netgraph_bpf net/bpf_zerocopy.c optional bpf net/bridgestp.c optional bridge | if_bridge net/flowtable.c optional flowtable inet | flowtable inet6 net/ieee8023ad_lacp.c optional lagg net/if.c standard net/if_arcsubr.c optional arcnet net/if_atmsubr.c optional atm net/if_bridge.c optional bridge inet | if_bridge inet net/if_clone.c standard net/if_dead.c standard net/if_debug.c optional ddb net/if_disc.c optional disc net/if_edsc.c optional edsc net/if_ef.c optional ef net/if_enc.c optional enc ipsec inet | enc ipsec inet6 net/if_epair.c optional epair net/if_ethersubr.c optional ether \ compile-with "${NORMAL_C} -I$S/contrib/pf" net/if_faith.c optional faith net/if_fddisubr.c optional fddi net/if_fwsubr.c optional fwip net/if_gif.c optional gif | netgraph_gif net/if_gre.c optional gre inet net/if_iso88025subr.c optional token net/if_lagg.c optional lagg net/if_loop.c optional loop net/if_llatbl.c standard net/if_media.c standard net/if_mib.c standard net/if_spppfr.c optional sppp | netgraph_sppp net/if_spppsubr.c optional sppp | netgraph_sppp net/if_stf.c optional stf inet inet6 net/if_tun.c optional tun net/if_tap.c optional tap net/if_vlan.c optional vlan net/mppcc.c optional netgraph_mppc_compression net/mppcd.c optional netgraph_mppc_compression net/netisr.c standard net/pfil.c optional ether | inet net/radix.c standard net/radix_mpath.c standard net/raw_cb.c standard net/raw_usrreq.c standard net/route.c standard net/rtsock.c standard net/slcompress.c optional netgraph_vjc | sppp | \ netgraph_sppp net/vnet.c optional vimage net/zlib.c optional crypto | geom_uzip | ipsec | \ mxge | netgraph_deflate | \ ddb_ctf | gzio | geom_uncompress net80211/ieee80211.c optional wlan net80211/ieee80211_acl.c optional wlan wlan_acl net80211/ieee80211_action.c optional wlan net80211/ieee80211_ageq.c optional wlan net80211/ieee80211_adhoc.c optional wlan net80211/ieee80211_ageq.c optional wlan net80211/ieee80211_amrr.c optional wlan | wlan_amrr net80211/ieee80211_crypto.c optional wlan net80211/ieee80211_crypto_ccmp.c optional wlan wlan_ccmp net80211/ieee80211_crypto_none.c optional wlan net80211/ieee80211_crypto_tkip.c optional wlan wlan_tkip net80211/ieee80211_crypto_wep.c optional wlan wlan_wep net80211/ieee80211_ddb.c optional wlan ddb net80211/ieee80211_dfs.c optional wlan net80211/ieee80211_freebsd.c optional wlan net80211/ieee80211_hostap.c optional wlan net80211/ieee80211_ht.c optional wlan net80211/ieee80211_hwmp.c optional wlan ieee80211_support_mesh net80211/ieee80211_input.c optional wlan net80211/ieee80211_ioctl.c optional wlan net80211/ieee80211_mesh.c optional wlan ieee80211_support_mesh net80211/ieee80211_monitor.c optional wlan net80211/ieee80211_node.c optional wlan net80211/ieee80211_output.c optional wlan net80211/ieee80211_phy.c optional wlan net80211/ieee80211_power.c optional wlan net80211/ieee80211_proto.c optional wlan net80211/ieee80211_radiotap.c optional wlan net80211/ieee80211_ratectl.c optional wlan net80211/ieee80211_ratectl_none.c optional wlan net80211/ieee80211_regdomain.c optional wlan net80211/ieee80211_rssadapt.c optional wlan wlan_rssadapt net80211/ieee80211_scan.c optional wlan net80211/ieee80211_scan_sta.c optional wlan net80211/ieee80211_sta.c optional wlan net80211/ieee80211_superg.c optional wlan ieee80211_support_superg net80211/ieee80211_tdma.c optional wlan ieee80211_support_tdma net80211/ieee80211_wds.c optional wlan net80211/ieee80211_xauth.c optional wlan wlan_xauth net80211/ieee80211_alq.c optional wlan ieee80211_alq netatalk/aarp.c optional netatalk netatalk/at_control.c optional netatalk netatalk/at_proto.c optional netatalk netatalk/at_rmx.c optional netatalk netatalk/ddp_input.c optional netatalk netatalk/ddp_output.c optional netatalk netatalk/ddp_pcb.c optional netatalk netatalk/ddp_usrreq.c optional netatalk netgraph/atm/ccatm/ng_ccatm.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/ng_atm.c optional ngatm_atm netgraph/atm/ngatmbase.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/sscfu/ng_sscfu.c optional ngatm_sscfu \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/sscop/ng_sscop.c optional ngatm_sscop \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/uni/ng_uni.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/bluetooth/common/ng_bluetooth.c optional netgraph_bluetooth netgraph/bluetooth/drivers/bt3c/ng_bt3c_pccard.c optional netgraph_bluetooth_bt3c netgraph/bluetooth/drivers/h4/ng_h4.c optional netgraph_bluetooth_h4 netgraph/bluetooth/drivers/ubt/ng_ubt.c optional netgraph_bluetooth_ubt usb netgraph/bluetooth/drivers/ubtbcmfw/ubtbcmfw.c optional netgraph_bluetooth_ubtbcmfw usb netgraph/bluetooth/hci/ng_hci_cmds.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_evnt.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_main.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_misc.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_ulpi.c optional netgraph_bluetooth_hci netgraph/bluetooth/l2cap/ng_l2cap_cmds.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_evnt.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_llpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_main.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_misc.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_ulpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/socket/ng_btsocket.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_hci_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_rfcomm.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_sco.c optional netgraph_bluetooth_socket netgraph/netflow/netflow.c optional netgraph_netflow netgraph/netflow/netflow_v9.c optional netgraph_netflow netgraph/netflow/ng_netflow.c optional netgraph_netflow netgraph/ng_UI.c optional netgraph_UI netgraph/ng_async.c optional netgraph_async netgraph/ng_atmllc.c optional netgraph_atmllc netgraph/ng_base.c optional netgraph netgraph/ng_bpf.c optional netgraph_bpf netgraph/ng_bridge.c optional netgraph_bridge netgraph/ng_car.c optional netgraph_car netgraph/ng_cisco.c optional netgraph_cisco netgraph/ng_deflate.c optional netgraph_deflate netgraph/ng_device.c optional netgraph_device netgraph/ng_echo.c optional netgraph_echo netgraph/ng_eiface.c optional netgraph_eiface netgraph/ng_ether.c optional netgraph_ether netgraph/ng_ether_echo.c optional netgraph_ether_echo netgraph/ng_fec.c optional netgraph_fec netgraph/ng_frame_relay.c optional netgraph_frame_relay netgraph/ng_gif.c optional netgraph_gif netgraph/ng_gif_demux.c optional netgraph_gif_demux netgraph/ng_hole.c optional netgraph_hole netgraph/ng_iface.c optional netgraph_iface netgraph/ng_ip_input.c optional netgraph_ip_input netgraph/ng_ipfw.c optional netgraph_ipfw inet ipfirewall netgraph/ng_ksocket.c optional netgraph_ksocket netgraph/ng_l2tp.c optional netgraph_l2tp netgraph/ng_lmi.c optional netgraph_lmi netgraph/ng_mppc.c optional netgraph_mppc_compression | \ netgraph_mppc_encryption netgraph/ng_nat.c optional netgraph_nat inet libalias netgraph/ng_one2many.c optional netgraph_one2many netgraph/ng_parse.c optional netgraph netgraph/ng_patch.c optional netgraph_patch netgraph/ng_pipe.c optional netgraph_pipe netgraph/ng_ppp.c optional netgraph_ppp netgraph/ng_pppoe.c optional netgraph_pppoe netgraph/ng_pptpgre.c optional netgraph_pptpgre netgraph/ng_pred1.c optional netgraph_pred1 netgraph/ng_rfc1490.c optional netgraph_rfc1490 netgraph/ng_socket.c optional netgraph_socket netgraph/ng_split.c optional netgraph_split netgraph/ng_sppp.c optional netgraph_sppp netgraph/ng_tag.c optional netgraph_tag netgraph/ng_tcpmss.c optional netgraph_tcpmss netgraph/ng_tee.c optional netgraph_tee netgraph/ng_tty.c optional netgraph_tty netgraph/ng_vjc.c optional netgraph_vjc netgraph/ng_vlan.c optional netgraph_vlan netinet/accf_data.c optional accept_filter_data inet netinet/accf_dns.c optional accept_filter_dns inet netinet/accf_http.c optional accept_filter_http inet netinet/if_atm.c optional atm netinet/if_ether.c optional inet ether netinet/igmp.c optional inet netinet/in.c optional inet netinet/in_debug.c optional inet ddb netinet/ip_carp.c optional inet carp | inet6 carp netinet/in_gif.c optional gif inet | netgraph_gif inet netinet/ip_gre.c optional gre inet netinet/ip_id.c optional inet netinet/in_mcast.c optional inet netinet/in_pcb.c optional inet | inet6 netinet/in_pcbgroup.c optional inet pcbgroup | inet6 pcbgroup netinet/in_proto.c optional inet | inet6 \ compile-with "${NORMAL_C} -I$S/contrib/pf" netinet/in_rmx.c optional inet netinet/ip_divert.c optional inet ipdivert ipfirewall netinet/ipfw/dn_heap.c optional inet dummynet netinet/ipfw/dn_sched_fifo.c optional inet dummynet netinet/ipfw/dn_sched_prio.c optional inet dummynet netinet/ipfw/dn_sched_qfq.c optional inet dummynet netinet/ipfw/dn_sched_rr.c optional inet dummynet netinet/ipfw/dn_sched_wf2q.c optional inet dummynet netinet/ipfw/ip_dummynet.c optional inet dummynet netinet/ipfw/ip_dn_io.c optional inet dummynet netinet/ipfw/ip_dn_glue.c optional inet dummynet netinet/ip_ecn.c optional inet | inet6 netinet/ip_encap.c optional inet | inet6 netinet/ip_fastfwd.c optional inet netinet/ipfw/ip_fw2.c optional inet ipfirewall \ compile-with "${NORMAL_C} -I$S/contrib/pf" netinet/ipfw/ip_fw_dynamic.c optional inet ipfirewall netinet/ipfw/ip_fw_log.c optional inet ipfirewall netinet/ipfw/ip_fw_pfil.c optional inet ipfirewall netinet/ipfw/ip_fw_sockopt.c optional inet ipfirewall netinet/ipfw/ip_fw_table.c optional inet ipfirewall netinet/ipfw/ip_fw_nat.c optional inet ipfirewall_nat netinet/ip_icmp.c optional inet | inet6 netinet/ip_input.c optional inet netinet/ip_ipsec.c optional inet ipsec netinet/ip_mroute.c optional mrouting inet netinet/ip_options.c optional inet netinet/ip_output.c optional inet netinet/raw_ip.c optional inet | inet6 netinet/cc/cc.c optional inet | inet6 netinet/cc/cc_newreno.c optional inet | inet6 netinet/sctp_asconf.c optional inet sctp | inet6 sctp netinet/sctp_auth.c optional inet sctp | inet6 sctp netinet/sctp_bsd_addr.c optional inet sctp | inet6 sctp netinet/sctp_cc_functions.c optional inet sctp | inet6 sctp netinet/sctp_crc32.c optional inet sctp | inet6 sctp netinet/sctp_indata.c optional inet sctp | inet6 sctp netinet/sctp_input.c optional inet sctp | inet6 sctp netinet/sctp_output.c optional inet sctp | inet6 sctp netinet/sctp_pcb.c optional inet sctp | inet6 sctp netinet/sctp_peeloff.c optional inet sctp | inet6 sctp netinet/sctp_ss_functions.c optional inet sctp | inet6 sctp netinet/sctp_sysctl.c optional inet sctp | inet6 sctp netinet/sctp_timer.c optional inet sctp | inet6 sctp netinet/sctp_usrreq.c optional inet sctp | inet6 sctp netinet/sctputil.c optional inet sctp | inet6 sctp netinet/tcp_debug.c optional tcpdebug netinet/tcp_hostcache.c optional inet | inet6 netinet/tcp_input.c optional inet | inet6 netinet/tcp_lro.c optional inet | inet6 netinet/tcp_output.c optional inet | inet6 netinet/tcp_offload.c optional inet | inet6 netinet/tcp_reass.c optional inet | inet6 netinet/tcp_sack.c optional inet | inet6 netinet/tcp_subr.c optional inet | inet6 netinet/tcp_syncache.c optional inet | inet6 netinet/tcp_timer.c optional inet | inet6 netinet/tcp_timewait.c optional inet | inet6 netinet/tcp_usrreq.c optional inet | inet6 netinet/udp_usrreq.c optional inet | inet6 netinet/libalias/alias.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_db.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_mod.c optional libalias | netgraph_nat netinet/libalias/alias_proxy.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_util.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_sctp.c optional libalias inet | netgraph_nat inet netinet6/dest6.c optional inet6 netinet6/frag6.c optional inet6 netinet6/icmp6.c optional inet6 netinet6/in6.c optional inet6 netinet6/in6_cksum.c optional inet6 netinet6/in6_gif.c optional gif inet6 | netgraph_gif inet6 netinet6/in6_ifattach.c optional inet6 netinet6/in6_mcast.c optional inet6 netinet6/in6_pcb.c optional inet6 netinet6/in6_pcbgroup.c optional inet6 pcbgroup netinet6/in6_proto.c optional inet6 netinet6/in6_rmx.c optional inet6 netinet6/in6_src.c optional inet6 netinet6/ip6_forward.c optional inet6 netinet6/ip6_id.c optional inet6 netinet6/ip6_input.c optional inet6 netinet6/ip6_mroute.c optional mrouting inet6 netinet6/ip6_output.c optional inet6 netinet6/ip6_ipsec.c optional inet6 ipsec netinet6/mld6.c optional inet6 netinet6/nd6.c optional inet6 netinet6/nd6_nbr.c optional inet6 netinet6/nd6_rtr.c optional inet6 netinet6/raw_ip6.c optional inet6 netinet6/route6.c optional inet6 netinet6/scope6.c optional inet6 netinet6/sctp6_usrreq.c optional inet6 sctp netinet6/udp6_usrreq.c optional inet6 netipsec/ipsec.c optional ipsec inet | ipsec inet6 netipsec/ipsec_input.c optional ipsec inet | ipsec inet6 netipsec/ipsec_mbuf.c optional ipsec inet | ipsec inet6 netipsec/ipsec_output.c optional ipsec inet | ipsec inet6 netipsec/key.c optional ipsec inet | ipsec inet6 netipsec/key_debug.c optional ipsec inet | ipsec inet6 netipsec/keysock.c optional ipsec inet | ipsec inet6 netipsec/xform_ah.c optional ipsec inet | ipsec inet6 netipsec/xform_esp.c optional ipsec inet | ipsec inet6 netipsec/xform_ipcomp.c optional ipsec inet | ipsec inet6 netipsec/xform_ipip.c optional ipsec inet | ipsec inet6 netipsec/xform_tcp.c optional ipsec inet tcp_signature | \ ipsec inet6 tcp_signature netipx/ipx.c optional ipx netipx/ipx_cksum.c optional ipx netipx/ipx_input.c optional ipx netipx/ipx_outputfl.c optional ipx netipx/ipx_pcb.c optional ipx netipx/ipx_proto.c optional ipx netipx/ipx_usrreq.c optional ipx netipx/spx_debug.c optional ipx netipx/spx_reass.c optional ipx netipx/spx_usrreq.c optional ipx netnatm/natm.c optional natm netnatm/natm_pcb.c optional natm netnatm/natm_proto.c optional natm netncp/ncp_conn.c optional ncp netncp/ncp_crypt.c optional ncp netncp/ncp_login.c optional ncp netncp/ncp_mod.c optional ncp netncp/ncp_ncp.c optional ncp netncp/ncp_nls.c optional ncp netncp/ncp_rq.c optional ncp netncp/ncp_sock.c optional ncp netncp/ncp_subr.c optional ncp netsmb/smb_conn.c optional netsmb netsmb/smb_crypt.c optional netsmb netsmb/smb_dev.c optional netsmb netsmb/smb_iod.c optional netsmb netsmb/smb_rq.c optional netsmb netsmb/smb_smb.c optional netsmb netsmb/smb_subr.c optional netsmb netsmb/smb_trantcp.c optional netsmb netsmb/smb_usr.c optional netsmb nfs/bootp_subr.c optional bootp nfsclient | bootp nfscl nfs/krpc_subr.c optional bootp nfsclient | bootp nfscl nfs/nfs_common.c optional nfsclient | nfsserver nfs/nfs_diskless.c optional nfsclient nfs_root | nfscl nfs_root nfs/nfs_lock.c optional nfsclient | nfscl | nfslockd | nfsd nfsclient/nfs_bio.c optional nfsclient nfsclient/nfs_node.c optional nfsclient nfsclient/nfs_krpc.c optional nfsclient nfsclient/nfs_subs.c optional nfsclient nfsclient/nfs_nfsiod.c optional nfsclient nfsclient/nfs_vfsops.c optional nfsclient nfsclient/nfs_vnops.c optional nfsclient nfsserver/nfs_fha.c optional nfsserver nfsserver/nfs_serv.c optional nfsserver nfsserver/nfs_srvkrpc.c optional nfsserver nfsserver/nfs_srvsubs.c optional nfsserver nfs/nfs_nfssvc.c optional nfsserver | nfscl | nfsd nlm/nlm_advlock.c optional nfslockd | nfsd nlm/nlm_prot_clnt.c optional nfslockd | nfsd nlm/nlm_prot_impl.c optional nfslockd | nfsd nlm/nlm_prot_server.c optional nfslockd | nfsd nlm/nlm_prot_svc.c optional nfslockd | nfsd nlm/nlm_prot_xdr.c optional nfslockd | nfsd nlm/sm_inter_xdr.c optional nfslockd | nfsd # OpenFabrics Enterprise Distribution (Infiniband) ofed/include/linux/linux_compat.c optional ofed \ no-depend compile-with "${OFED_C}" ofed/include/linux/linux_idr.c optional ofed \ no-depend compile-with "${OFED_C}" ofed/include/linux/linux_radix.c optional ofed \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/core/addr.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/agent.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/cache.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" # XXX Mad.c must be ordered before cm.c for sysinit sets to occur in # the correct order. ofed/drivers/infiniband/core/mad.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/cm.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/cma.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/device.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/fmr_pool.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/iwcm.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/local_sa.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/mad_rmpp.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/multicast.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/notice.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/packer.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/sa_query.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/smi.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/sysfs.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/ucm.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/ucma.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/ud_header.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/umem.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/user_mad.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/uverbs_cmd.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/uverbs_main.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/uverbs_marshall.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/verbs.c optional ofed \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c optional ipoib \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" #ofed/drivers/infiniband/ulp/ipoib/ipoib_fs.c optional ipoib \ # no-depend \ # compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c optional ipoib \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c optional ipoib \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c optional ipoib \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_verbs.c optional ipoib \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" #ofed/drivers/infiniband/ulp/ipoib/ipoib_vlan.c optional ipoib \ # no-depend \ # compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/sdp/sdp_bcopy.c optional sdp inet \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_main.c optional sdp inet \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_rx.c optional sdp inet \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_cma.c optional sdp inet \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_tx.c optional sdp inet \ no-depend \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/hw/mlx4/ah.c optional mlx4ib \ no-depend obj-prefix "mlx4ib_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/infiniband/hw/mlx4/" ofed/drivers/infiniband/hw/mlx4/cq.c optional mlx4ib \ no-depend obj-prefix "mlx4ib_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/infiniband/hw/mlx4/" ofed/drivers/infiniband/hw/mlx4/doorbell.c optional mlx4ib \ no-depend obj-prefix "mlx4ib_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/infiniband/hw/mlx4/" ofed/drivers/infiniband/hw/mlx4/mad.c optional mlx4ib \ no-depend obj-prefix "mlx4ib_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/infiniband/hw/mlx4/" ofed/drivers/infiniband/hw/mlx4/main.c optional mlx4ib \ no-depend obj-prefix "mlx4ib_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/infiniband/hw/mlx4/" ofed/drivers/infiniband/hw/mlx4/mr.c optional mlx4ib \ no-depend obj-prefix "mlx4ib_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/infiniband/hw/mlx4/" ofed/drivers/infiniband/hw/mlx4/qp.c optional mlx4ib \ no-depend obj-prefix "mlx4ib_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/infiniband/hw/mlx4/" ofed/drivers/infiniband/hw/mlx4/srq.c optional mlx4ib \ no-depend obj-prefix "mlx4ib_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/infiniband/hw/mlx4/" ofed/drivers/infiniband/hw/mlx4/wc.c optional mlx4ib \ no-depend obj-prefix "mlx4ib_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/infiniband/hw/mlx4/" ofed/drivers/net/mlx4/alloc.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/catas.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/cmd.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/cq.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/eq.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/fw.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/icm.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/intf.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/main.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/mcg.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/mr.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/pd.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/port.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/profile.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/qp.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/reset.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/sense.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/srq.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/xrcd.c optional mlx4ib | mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/en_cq.c optional mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/en_frag.c optional mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/en_main.c optional mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/en_netdev.c optional mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/en_port.c optional mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/en_resources.c optional mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/en_rx.c optional mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/en_tx.c optional mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/infiniband/hw/mthca/mthca_allocator.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_av.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_catas.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_cmd.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_cq.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_eq.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_mad.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_main.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_mcg.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_memfree.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_mr.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_pd.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_profile.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_provider.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_qp.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_reset.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_srq.c optional mthca \ no-depend compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_uar.c optional mthca \ no-depend compile-with "${OFED_C}" # crypto support opencrypto/cast.c optional crypto | ipsec opencrypto/criov.c optional crypto opencrypto/crypto.c optional crypto opencrypto/cryptodev.c optional cryptodev opencrypto/cryptodev_if.m optional crypto opencrypto/cryptosoft.c optional crypto opencrypto/deflate.c optional crypto opencrypto/rmd160.c optional crypto | ipsec opencrypto/skipjack.c optional crypto opencrypto/xform.c optional crypto pci/alpm.c optional alpm pci pci/amdpm.c optional amdpm pci | nfpm pci pci/amdsmb.c optional amdsmb pci pci/if_rl.c optional rl pci pci/intpm.c optional intpm pci pci/ncr.c optional ncr pci pci/nfsmb.c optional nfsmb pci pci/viapm.c optional viapm pci rpc/auth_none.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd rpc/auth_unix.c optional krpc | nfslockd | nfsclient | nfscl | nfsd rpc/authunix_prot.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd rpc/clnt_dg.c optional krpc | nfslockd | nfsclient | nfscl | nfsd rpc/clnt_rc.c optional krpc | nfslockd | nfsclient | nfscl | nfsd rpc/clnt_vc.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd rpc/getnetconfig.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd rpc/replay.c optional krpc | nfslockd | nfsserver | nfscl | nfsd rpc/rpc_callmsg.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd rpc/rpc_generic.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd rpc/rpc_prot.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd rpc/rpcb_clnt.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd rpc/rpcb_prot.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd rpc/svc.c optional krpc | nfslockd | nfsserver | nfscl | nfsd rpc/svc_auth.c optional krpc | nfslockd | nfsserver | nfscl | nfsd rpc/svc_auth_unix.c optional krpc | nfslockd | nfsserver | nfscl | nfsd rpc/svc_dg.c optional krpc | nfslockd | nfsserver | nfscl | nfsd rpc/svc_generic.c optional krpc | nfslockd | nfsserver | nfscl | nfsd rpc/svc_vc.c optional krpc | nfslockd | nfsserver | nfscl | nfsd rpc/rpcsec_gss/rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_conf.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_misc.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_prot.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/svc_rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi security/audit/audit.c optional audit security/audit/audit_arg.c optional audit security/audit/audit_bsm.c optional audit security/audit/audit_bsm_domain.c optional audit security/audit/audit_bsm_errno.c optional audit security/audit/audit_bsm_fcntl.c optional audit security/audit/audit_bsm_klib.c optional audit security/audit/audit_bsm_socket_type.c optional audit security/audit/audit_bsm_token.c optional audit security/audit/audit_pipe.c optional audit security/audit/audit_syscalls.c standard security/audit/audit_trigger.c optional audit security/audit/audit_worker.c optional audit security/mac/mac_atalk.c optional mac netatalk security/mac/mac_audit.c optional mac audit security/mac/mac_cred.c optional mac security/mac/mac_framework.c optional mac security/mac/mac_inet.c optional mac inet | mac inet6 security/mac/mac_inet6.c optional mac inet6 security/mac/mac_label.c optional mac security/mac/mac_net.c optional mac security/mac/mac_pipe.c optional mac security/mac/mac_posix_sem.c optional mac security/mac/mac_posix_shm.c optional mac security/mac/mac_priv.c optional mac security/mac/mac_process.c optional mac security/mac/mac_socket.c optional mac security/mac/mac_syscalls.c standard security/mac/mac_system.c optional mac security/mac/mac_sysv_msg.c optional mac security/mac/mac_sysv_sem.c optional mac security/mac/mac_sysv_shm.c optional mac security/mac/mac_vfs.c optional mac security/mac_biba/mac_biba.c optional mac_biba security/mac_bsdextended/mac_bsdextended.c optional mac_bsdextended security/mac_bsdextended/ugidfw_system.c optional mac_bsdextended security/mac_bsdextended/ugidfw_vnode.c optional mac_bsdextended security/mac_ifoff/mac_ifoff.c optional mac_ifoff security/mac_lomac/mac_lomac.c optional mac_lomac security/mac_mls/mac_mls.c optional mac_mls security/mac_none/mac_none.c optional mac_none security/mac_partition/mac_partition.c optional mac_partition security/mac_portacl/mac_portacl.c optional mac_portacl security/mac_seeotheruids/mac_seeotheruids.c optional mac_seeotheruids security/mac_stub/mac_stub.c optional mac_stub security/mac_test/mac_test.c optional mac_test teken/teken.c optional sc ufs/ffs/ffs_alloc.c optional ffs ufs/ffs/ffs_balloc.c optional ffs ufs/ffs/ffs_inode.c optional ffs ufs/ffs/ffs_snapshot.c optional ffs ufs/ffs/ffs_softdep.c optional ffs ufs/ffs/ffs_subr.c optional ffs ufs/ffs/ffs_tables.c optional ffs ufs/ffs/ffs_vfsops.c optional ffs ufs/ffs/ffs_vnops.c optional ffs ufs/ffs/ffs_rawread.c optional directio ufs/ufs/ufs_acl.c optional ffs ufs/ufs/ufs_bmap.c optional ffs ufs/ufs/ufs_dirhash.c optional ffs ufs/ufs/ufs_extattr.c optional ffs ufs/ufs/ufs_gjournal.c optional ffs UFS_GJOURNAL ufs/ufs/ufs_inode.c optional ffs ufs/ufs/ufs_lookup.c optional ffs ufs/ufs/ufs_quota.c optional ffs ufs/ufs/ufs_vfsops.c optional ffs ufs/ufs/ufs_vnops.c optional ffs vm/default_pager.c standard vm/device_pager.c standard vm/phys_pager.c standard vm/redzone.c optional DEBUG_REDZONE vm/sg_pager.c standard vm/swap_pager.c standard vm/uma_core.c standard vm/uma_dbg.c standard vm/vm_contig.c standard vm/memguard.c optional DEBUG_MEMGUARD vm/vm_fault.c standard vm/vm_glue.c standard vm/vm_init.c standard vm/vm_kern.c standard vm/vm_map.c standard vm/vm_meter.c standard vm/vm_mmap.c standard vm/vm_object.c standard vm/vm_page.c standard vm/vm_pageout.c standard vm/vm_pager.c standard vm/vm_phys.c standard vm/vm_reserv.c standard vm/vm_unix.c standard vm/vm_zeroidle.c standard vm/vnode_pager.c standard xdr/xdr.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd xdr/xdr_array.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd xdr/xdr_mbuf.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd xdr/xdr_mem.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd xdr/xdr_reference.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd xdr/xdr_sizeof.c optional krpc | nfslockd | nfsclient | nfsserver | nfscl | nfsd # gnu/fs/xfs/xfs_alloc.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" \ warning "kernel contains GPL contaminated xfs filesystem" gnu/fs/xfs/xfs_alloc_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_bit.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_bmap.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_bmap_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_buf_item.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_da_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_block.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_data.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_leaf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_node.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_sf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_trace.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir_leaf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_error.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_extfree_item.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_fsops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_ialloc.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_ialloc_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_inode.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_inode_item.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_iocore.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_itable.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dfrag.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_log.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_log_recover.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_mount.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_rename.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_ail.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_buf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_extfree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_inode.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_item.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_utils.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_vfsops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_vnodeops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_rw.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_attr_leaf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_attr.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dmops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_qmops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_iget.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_freebsd_iget.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_mountops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_vnops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_frw.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_buf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_globals.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_dmistubs.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_super.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_stats.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_vfs.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_vnode.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_sysctl.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_fs_subr.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_ioctl.c optional xfs \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/debug.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/ktrace.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/mrlock.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/uuid.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/kmem.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_iomap.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_behavior.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" xen/gnttab.c optional xen | xenhvm xen/features.c optional xen | xenhvm xen/evtchn/evtchn.c optional xen xen/evtchn/evtchn_dev.c optional xen | xenhvm xen/xenbus/xenbus_if.m optional xen | xenhvm xen/xenbus/xenbus.c optional xen | xenhvm xen/xenbus/xenbusb_if.m optional xen | xenhvm xen/xenbus/xenbusb.c optional xen | xenhvm xen/xenbus/xenbusb_front.c optional xen | xenhvm xen/xenbus/xenbusb_back.c optional xen | xenhvm xen/xenstore/xenstore.c optional xen | xenhvm xen/xenstore/xenstore_dev.c optional xen | xenhvm dev/xen/balloon/balloon.c optional xen | xenhvm dev/xen/blkfront/blkfront.c optional xen | xenhvm dev/xen/blkback/blkback.c optional xen | xenhvm dev/xen/console/console.c optional xen dev/xen/console/xencons_ring.c optional xen dev/xen/control/control.c optional xen | xenhvm dev/xen/netback/netback.c optional xen | xenhvm dev/xen/netfront/netfront.c optional xen | xenhvm dev/xen/xenpci/xenpci.c optional xenpci dev/xen/xenpci/evtchn.c optional xenpci dev/etherswitch/mdio_if.m optional miiproxy dev/etherswitch/mdio.c optional miiproxy dev/etherswitch/miiproxy.c optional miiproxy Index: projects/nand/sys/conf/files.arm =================================================================== --- projects/nand/sys/conf/files.arm (revision 235262) +++ projects/nand/sys/conf/files.arm (revision 235263) @@ -1,80 +1,86 @@ # $FreeBSD$ crypto/blowfish/bf_enc.c optional crypto | ipsec crypto/des/des_enc.c optional crypto | ipsec | netsmb arm/arm/autoconf.c standard arm/arm/bcopy_page.S standard arm/arm/bcopyinout.S standard arm/arm/blockio.S standard arm/arm/bootconfig.c standard arm/arm/bus_space_asm_generic.S standard arm/arm/busdma_machdep.c standard arm/arm/copystr.S standard arm/arm/cpufunc.c standard arm/arm/cpufunc_asm.S standard arm/arm/cpufunc_asm_armv4.S standard arm/arm/db_disasm.c optional ddb arm/arm/db_interface.c optional ddb arm/arm/db_trace.c optional ddb arm/arm/disassem.c optional ddb arm/arm/dump_machdep.c standard arm/arm/elf_machdep.c standard arm/arm/exception.S standard arm/arm/fiq.c standard arm/arm/fiq_subr.S standard arm/arm/fusu.S standard arm/arm/gdb_machdep.c optional gdb arm/arm/identcpu.c standard arm/arm/in_cksum.c optional inet | inet6 arm/arm/in_cksum_arm.S optional inet | inet6 arm/arm/intr.c standard arm/arm/locore.S standard no-obj arm/arm/machdep.c standard arm/arm/mem.c optional mem arm/arm/minidump_machdep.c optional mem arm/arm/nexus.c standard arm/arm/pmap.c standard arm/arm/setcpsr.S standard arm/arm/setstack.s standard arm/arm/stack_machdep.c optional ddb | stack arm/arm/support.S standard arm/arm/swtch.S standard arm/arm/sys_machdep.c standard arm/arm/trap.c standard arm/arm/uio_machdep.c standard arm/arm/undefined.c standard arm/arm/vectors.S standard arm/arm/vm_machdep.c standard arm/fpe-arm/armfpe_glue.S optional armfpe arm/fpe-arm/armfpe_init.c optional armfpe arm/fpe-arm/armfpe.S optional armfpe cddl/compat/opensolaris/kern/opensolaris_atomic.c optional zfs compile-with "${ZFS_C}" dev/hwpmc/hwpmc_arm.c optional hwpmc dev/ofw/openfirm.c optional fdt dev/ofw/openfirmio.c optional fdt dev/ofw/ofw_bus_if.m optional fdt dev/ofw/ofw_if.m optional fdt dev/ofw/ofw_bus_subr.c optional fdt dev/ofw/ofw_fdt.c optional fdt geom/geom_bsd.c optional geom_bsd geom/geom_bsd_enc.c optional geom_bsd geom/geom_mbr.c optional geom_mbr geom/geom_mbr_enc.c optional geom_mbr libkern/arm/divsi3.S standard libkern/arm/ffs.S standard libkern/arm/muldi3.c standard libkern/ashldi3.c standard libkern/ashrdi3.c standard libkern/divdi3.c standard libkern/ffsl.c standard libkern/fls.c standard libkern/flsl.c standard libkern/lshrdi3.c standard libkern/memchr.c optional fdt libkern/moddi3.c standard libkern/qdivrem.c standard libkern/ucmpdi2.c standard libkern/udivdi3.c standard libkern/umoddi3.c standard #XXX: We can't use these versions, as strcmp.c is included conf/files #libkern/arm/strcmp.S standard #libkern/arm/strncmp.S standard +# +board_id.h standard \ + dependency "$S/arm/conf/genboardid.awk $S/arm/conf/mach-types" \ + compile-with "${AWK} -f $S/arm/conf/genboardid.awk $S/arm/conf/mach-types > board_id.h" \ + no-obj no-implicit-rule before-depend \ + clean "board_id.h" Index: projects/nand/sys/conf =================================================================== --- projects/nand/sys/conf (revision 235262) +++ projects/nand/sys/conf (revision 235263) Property changes on: projects/nand/sys/conf ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/conf:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/changes.txt =================================================================== --- projects/nand/sys/contrib/dev/acpica/changes.txt (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/changes.txt (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/changes.txt ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/changes.txt:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/common =================================================================== --- projects/nand/sys/contrib/dev/acpica/common (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/common (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/common ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/common:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/compiler =================================================================== --- projects/nand/sys/contrib/dev/acpica/compiler (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/compiler (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/compiler ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/compiler:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/components/debugger =================================================================== --- projects/nand/sys/contrib/dev/acpica/components/debugger (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/components/debugger (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/components/debugger ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/components/debugger:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/components/disassembler =================================================================== --- projects/nand/sys/contrib/dev/acpica/components/disassembler (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/components/disassembler (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/components/disassembler ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/components/disassembler:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/components/dispatcher =================================================================== --- projects/nand/sys/contrib/dev/acpica/components/dispatcher (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/components/dispatcher (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/components/dispatcher ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/components/dispatcher:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/components/events =================================================================== --- projects/nand/sys/contrib/dev/acpica/components/events (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/components/events (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/components/events ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/components/events:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/components/executer =================================================================== --- projects/nand/sys/contrib/dev/acpica/components/executer (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/components/executer (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/components/executer ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/components/executer:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/components/hardware =================================================================== --- projects/nand/sys/contrib/dev/acpica/components/hardware (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/components/hardware (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/components/hardware ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/components/hardware:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/components/namespace =================================================================== --- projects/nand/sys/contrib/dev/acpica/components/namespace (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/components/namespace (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/components/namespace ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/components/namespace:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/components/parser =================================================================== --- projects/nand/sys/contrib/dev/acpica/components/parser (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/components/parser (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/components/parser ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/components/parser:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/components/resources =================================================================== --- projects/nand/sys/contrib/dev/acpica/components/resources (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/components/resources (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/components/resources ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/components/resources:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/components/tables =================================================================== --- projects/nand/sys/contrib/dev/acpica/components/tables (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/components/tables (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/components/tables ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/components/tables:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/components/utilities =================================================================== --- projects/nand/sys/contrib/dev/acpica/components/utilities (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/components/utilities (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/components/utilities ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/components/utilities:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/include =================================================================== --- projects/nand/sys/contrib/dev/acpica/include (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/include (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/include ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/include:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica/os_specific =================================================================== --- projects/nand/sys/contrib/dev/acpica/os_specific (revision 235262) +++ projects/nand/sys/contrib/dev/acpica/os_specific (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica/os_specific ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica/os_specific:r235157-235262 Index: projects/nand/sys/contrib/dev/acpica =================================================================== --- projects/nand/sys/contrib/dev/acpica (revision 235262) +++ projects/nand/sys/contrib/dev/acpica (revision 235263) Property changes on: projects/nand/sys/contrib/dev/acpica ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/dev/acpica:r235157-235262 Index: projects/nand/sys/contrib/octeon-sdk =================================================================== --- projects/nand/sys/contrib/octeon-sdk (revision 235262) +++ projects/nand/sys/contrib/octeon-sdk (revision 235263) Property changes on: projects/nand/sys/contrib/octeon-sdk ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/octeon-sdk:r235157-235262 Index: projects/nand/sys/contrib/pf =================================================================== --- projects/nand/sys/contrib/pf (revision 235262) +++ projects/nand/sys/contrib/pf (revision 235263) Property changes on: projects/nand/sys/contrib/pf ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/pf:r235157-235262 Index: projects/nand/sys/contrib/x86emu =================================================================== --- projects/nand/sys/contrib/x86emu (revision 235262) +++ projects/nand/sys/contrib/x86emu (revision 235263) Property changes on: projects/nand/sys/contrib/x86emu ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/x86emu:r235157-235262 Index: projects/nand/sys/dev/ath/ath_hal/ar5210/ar5210.h =================================================================== --- projects/nand/sys/dev/ath/ath_hal/ar5210/ar5210.h (revision 235262) +++ projects/nand/sys/dev/ath/ath_hal/ar5210/ar5210.h (revision 235263) @@ -1,288 +1,290 @@ /* * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * Copyright (c) 2002-2004 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ #ifndef _ATH_AR5210_H_ #define _ATH_AR5210_H_ #define AR5210_MAGIC 0x19980124 #if 0 /* * RTS_ENABLE includes LONG_PKT because they essentially * imply the same thing, and are set or not set together * for this chip */ #define AR5210_TXD_CTRL_A_HDR_LEN(_val) (((_val) ) & 0x0003f) #define AR5210_TXD_CTRL_A_TX_RATE(_val) (((_val) << 6) & 0x003c0) #define AR5210_TXD_CTRL_A_RTS_ENABLE ( 0x00c00) #define AR5210_TXD_CTRL_A_CLEAR_DEST_MASK(_val) (((_val) << 12) & 0x01000) #define AR5210_TXD_CTRL_A_ANT_MODE(_val) (((_val) << 13) & 0x02000) #define AR5210_TXD_CTRL_A_PKT_TYPE(_val) (((_val) << 14) & 0x1c000) #define AR5210_TXD_CTRL_A_INT_REQ ( 0x20000) #define AR5210_TXD_CTRL_A_KEY_VALID ( 0x40000) #define AR5210_TXD_CTRL_B_KEY_ID(_val) (((_val) ) & 0x0003f) #define AR5210_TXD_CTRL_B_RTS_DURATION(_val) (((_val) << 6) & 0x7ffc0) #endif #define INIT_CONFIG_STATUS 0x00000000 #define INIT_ACKTOPS 0x00000008 #define INIT_BCON_CNTRL_REG 0x00000000 #define INIT_SLOT_TIME 0x00000168 #define INIT_SLOT_TIME_TURBO 0x000001e0 /* More aggressive turbo slot timing = 6 us */ #define INIT_ACK_CTS_TIMEOUT 0x04000400 #define INIT_ACK_CTS_TIMEOUT_TURBO 0x08000800 #define INIT_USEC 0x27 #define INIT_USEC_TURBO 0x4f #define INIT_USEC_32 0x1f #define INIT_TX_LATENCY 0x36 #define INIT_RX_LATENCY 0x1D #define INIT_TRANSMIT_LATENCY \ ((INIT_RX_LATENCY << AR_USEC_RX_LATENCY_S) | \ (INIT_TX_LATENCY << AR_USEC_TX_LATENCY_S) | \ (INIT_USEC_32 << 7) | INIT_USEC ) #define INIT_TRANSMIT_LATENCY_TURBO \ ((INIT_RX_LATENCY << AR_USEC_RX_LATENCY_S) | \ (INIT_TX_LATENCY << AR_USEC_TX_LATENCY_S) | \ (INIT_USEC_32 << 7) | INIT_USEC_TURBO) #define INIT_SIFS 0x230 /* = 16 us - 2 us */ #define INIT_SIFS_TURBO 0x1E0 /* More aggressive turbo SIFS timing - 8 us - 2 us */ /* * Various fifo fill before Tx start, in 64-byte units * i.e. put the frame in the air while still DMAing */ #define MIN_TX_FIFO_THRESHOLD 0x1 #define MAX_TX_FIFO_THRESHOLD ((IEEE80211_MAX_LEN / 64) + 1) #define INIT_NEXT_CFP_START 0xffffffff #define INIT_BEACON_PERIOD 0xffff #define INIT_BEACON_EN 0 /* this should be set by AP only when it's ready */ #define INIT_BEACON_CONTROL \ ((INIT_RESET_TSF << 24) | (INIT_BEACON_EN << 23) | \ (INIT_TIM_OFFSET<<16) | INIT_BEACON_PERIOD) #define INIT_RSSI_THR 0x00000700 /* Missed beacon counter initialized to max value of 7 */ #define INIT_ProgIFS 0x398 /* PIFS - 2us */ #define INIT_ProgIFS_TURBO 0x3C0 #define INIT_EIFS 0xd70 #define INIT_EIFS_TURBO 0x1ae0 #define INIT_CARR_SENSE_EN 1 #define INIT_PROTO_TIME_CNTRL ( (INIT_CARR_SENSE_EN << 26) | (INIT_EIFS << 12) | \ (INIT_ProgIFS) ) #define INIT_PROTO_TIME_CNTRL_TURBO ( (INIT_CARR_SENSE_EN << 26) | (INIT_EIFS_TURBO << 12) | \ (INIT_ProgIFS_TURBO) ) #define AR5210_MAX_RATE_POWER 60 #undef HAL_NUM_TX_QUEUES /* from ah.h */ #define HAL_NUM_TX_QUEUES 3 struct ath_hal_5210 { struct ath_hal_private ah_priv; /* base definitions */ uint8_t ah_macaddr[IEEE80211_ADDR_LEN]; /* * Runtime state. */ uint32_t ah_maskReg; /* shadow of IMR+IER regs */ uint32_t ah_txOkInterruptMask; uint32_t ah_txErrInterruptMask; uint32_t ah_txDescInterruptMask; uint32_t ah_txEolInterruptMask; uint32_t ah_txUrnInterruptMask; HAL_POWER_MODE ah_powerMode; uint8_t ah_bssid[IEEE80211_ADDR_LEN]; HAL_TX_QUEUE_INFO ah_txq[HAL_NUM_TX_QUEUES]; /* beacon+cab+data */ /* * Station mode support. */ uint32_t ah_staId1Defaults; /* STA_ID1 default settings */ uint32_t ah_rssiThr; /* RSSI_THR settings */ u_int ah_sifstime; /* user-specified sifs time */ u_int ah_slottime; /* user-specified slot time */ u_int ah_acktimeout; /* user-specified ack timeout */ u_int ah_ctstimeout; /* user-specified cts timeout */ }; #define AH5210(ah) ((struct ath_hal_5210 *)(ah)) struct ath_hal; extern void ar5210Detach(struct ath_hal *ah); extern HAL_BOOL ar5210Reset(struct ath_hal *, HAL_OPMODE, struct ieee80211_channel *, HAL_BOOL bChannelChange, HAL_STATUS *); extern void ar5210SetPCUConfig(struct ath_hal *); extern HAL_BOOL ar5210PhyDisable(struct ath_hal *); extern HAL_BOOL ar5210Disable(struct ath_hal *); extern HAL_BOOL ar5210ChipReset(struct ath_hal *, struct ieee80211_channel *); extern HAL_BOOL ar5210PerCalibration(struct ath_hal *, struct ieee80211_channel *, HAL_BOOL *); extern HAL_BOOL ar5210PerCalibrationN(struct ath_hal *ah, struct ieee80211_channel *chan, u_int chainMask, HAL_BOOL longCal, HAL_BOOL *isCalDone); extern HAL_BOOL ar5210ResetCalValid(struct ath_hal *ah, const struct ieee80211_channel *); extern int16_t ar5210GetNoiseFloor(struct ath_hal *); extern int16_t ar5210GetNfAdjust(struct ath_hal *, const HAL_CHANNEL_INTERNAL *); extern HAL_BOOL ar5210SetTxPowerLimit(struct ath_hal *, uint32_t limit); extern HAL_BOOL ar5210SetTransmitPower(struct ath_hal *, const struct ieee80211_channel *); extern HAL_BOOL ar5210CalNoiseFloor(struct ath_hal *, HAL_CHANNEL_INTERNAL *); extern HAL_BOOL ar5210ResetDma(struct ath_hal *, HAL_OPMODE); extern HAL_BOOL ar5210SetTxQueueProps(struct ath_hal *ah, int q, const HAL_TXQ_INFO *qInfo); extern HAL_BOOL ar5210GetTxQueueProps(struct ath_hal *ah, int q, HAL_TXQ_INFO *qInfo); extern int ar5210SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type, const HAL_TXQ_INFO *qInfo); extern HAL_BOOL ar5210ReleaseTxQueue(struct ath_hal *ah, u_int q); extern HAL_BOOL ar5210ResetTxQueue(struct ath_hal *ah, u_int q); extern uint32_t ar5210GetTxDP(struct ath_hal *, u_int); extern HAL_BOOL ar5210SetTxDP(struct ath_hal *, u_int, uint32_t txdp); extern HAL_BOOL ar5210UpdateTxTrigLevel(struct ath_hal *, HAL_BOOL); extern uint32_t ar5210NumTxPending(struct ath_hal *, u_int); extern HAL_BOOL ar5210StartTxDma(struct ath_hal *, u_int); extern HAL_BOOL ar5210StopTxDma(struct ath_hal *, u_int); extern HAL_BOOL ar5210SetupTxDesc(struct ath_hal *, struct ath_desc *, u_int pktLen, u_int hdrLen, HAL_PKT_TYPE type, u_int txPower, u_int txRate0, u_int txRetries0, u_int keyIx, u_int antMode, u_int flags, u_int rtsctsRate, u_int rtsctsDuration, u_int compicvLen, u_int compivLen, u_int comp); extern HAL_BOOL ar5210SetupXTxDesc(struct ath_hal *, struct ath_desc *, u_int txRate1, u_int txRetries1, u_int txRate2, u_int txRetries2, u_int txRate3, u_int txRetries3); extern HAL_BOOL ar5210FillTxDesc(struct ath_hal *, struct ath_desc *, u_int segLen, HAL_BOOL firstSeg, HAL_BOOL lastSeg, const struct ath_desc *ds0); extern HAL_STATUS ar5210ProcTxDesc(struct ath_hal *, struct ath_desc *, struct ath_tx_status *); extern void ar5210GetTxIntrQueue(struct ath_hal *ah, uint32_t *); extern void ar5210IntrReqTxDesc(struct ath_hal *ah, struct ath_desc *); extern HAL_BOOL ar5210GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *, int *rates, int *tries); extern uint32_t ar5210GetRxDP(struct ath_hal *); extern void ar5210SetRxDP(struct ath_hal *, uint32_t rxdp); extern void ar5210EnableReceive(struct ath_hal *); extern HAL_BOOL ar5210StopDmaReceive(struct ath_hal *); extern void ar5210StartPcuReceive(struct ath_hal *); extern void ar5210StopPcuReceive(struct ath_hal *); extern void ar5210SetMulticastFilter(struct ath_hal *, uint32_t filter0, uint32_t filter1); extern HAL_BOOL ar5210ClrMulticastFilterIndex(struct ath_hal *, uint32_t); extern HAL_BOOL ar5210SetMulticastFilterIndex(struct ath_hal *, uint32_t); extern uint32_t ar5210GetRxFilter(struct ath_hal *); extern void ar5210SetRxFilter(struct ath_hal *, uint32_t); extern HAL_BOOL ar5210SetupRxDesc(struct ath_hal *, struct ath_desc *, uint32_t, u_int flags); extern HAL_STATUS ar5210ProcRxDesc(struct ath_hal *, struct ath_desc *, uint32_t, struct ath_desc *, uint64_t, struct ath_rx_status *); extern void ar5210GetMacAddress(struct ath_hal *, uint8_t *); extern HAL_BOOL ar5210SetMacAddress(struct ath_hal *ah, const uint8_t *); extern void ar5210GetBssIdMask(struct ath_hal *, uint8_t *); extern HAL_BOOL ar5210SetBssIdMask(struct ath_hal *, const uint8_t *); extern HAL_BOOL ar5210EepromRead(struct ath_hal *, u_int off, uint16_t *data); extern HAL_BOOL ar5210EepromWrite(struct ath_hal *, u_int off, uint16_t data); extern HAL_BOOL ar5210SetRegulatoryDomain(struct ath_hal *, uint16_t, HAL_STATUS *); extern u_int ar5210GetWirelessModes(struct ath_hal *ah); extern void ar5210EnableRfKill(struct ath_hal *); extern HAL_BOOL ar5210GpioCfgInput(struct ath_hal *, uint32_t gpio); extern HAL_BOOL ar5210GpioCfgOutput(struct ath_hal *, uint32_t gpio, HAL_GPIO_MUX_TYPE); extern uint32_t ar5210GpioGet(struct ath_hal *, uint32_t gpio); extern HAL_BOOL ar5210GpioSet(struct ath_hal *, uint32_t gpio, uint32_t); extern void ar5210Gpio0SetIntr(struct ath_hal *, u_int, uint32_t ilevel); extern void ar5210SetLedState(struct ath_hal *, HAL_LED_STATE); extern u_int ar5210GetDefAntenna(struct ath_hal *); extern void ar5210SetDefAntenna(struct ath_hal *, u_int); extern HAL_ANT_SETTING ar5210GetAntennaSwitch(struct ath_hal *); extern HAL_BOOL ar5210SetAntennaSwitch(struct ath_hal *, HAL_ANT_SETTING); extern void ar5210WriteAssocid(struct ath_hal *, const uint8_t *bssid, uint16_t assocId); extern uint32_t ar5210GetTsf32(struct ath_hal *); extern uint64_t ar5210GetTsf64(struct ath_hal *); extern void ar5210ResetTsf(struct ath_hal *); extern uint32_t ar5210GetRandomSeed(struct ath_hal *); extern HAL_BOOL ar5210DetectCardPresent(struct ath_hal *); extern void ar5210UpdateMibCounters(struct ath_hal *, HAL_MIB_STATS *); extern void ar5210EnableHwEncryption(struct ath_hal *); extern void ar5210DisableHwEncryption(struct ath_hal *); extern HAL_RFGAIN ar5210GetRfgain(struct ath_hal *); extern HAL_BOOL ar5210SetSifsTime(struct ath_hal *, u_int); extern u_int ar5210GetSifsTime(struct ath_hal *); extern HAL_BOOL ar5210SetSlotTime(struct ath_hal *, u_int); extern u_int ar5210GetSlotTime(struct ath_hal *); extern HAL_BOOL ar5210SetAckTimeout(struct ath_hal *, u_int); extern u_int ar5210GetAckTimeout(struct ath_hal *); extern HAL_BOOL ar5210SetAckCTSRate(struct ath_hal *, u_int); extern u_int ar5210GetAckCTSRate(struct ath_hal *); extern HAL_BOOL ar5210SetCTSTimeout(struct ath_hal *, u_int); extern u_int ar5210GetCTSTimeout(struct ath_hal *); extern HAL_BOOL ar5210SetDecompMask(struct ath_hal *, uint16_t, int); void ar5210SetCoverageClass(struct ath_hal *, uint8_t, int); extern HAL_STATUS ar5210GetCapability(struct ath_hal *, HAL_CAPABILITY_TYPE, uint32_t, uint32_t *); extern HAL_BOOL ar5210SetCapability(struct ath_hal *, HAL_CAPABILITY_TYPE, uint32_t, uint32_t, HAL_STATUS *); extern HAL_BOOL ar5210GetDiagState(struct ath_hal *ah, int request, const void *args, uint32_t argsize, void **result, uint32_t *resultsize); extern uint32_t ar5210Get11nExtBusy(struct ath_hal *); extern HAL_BOOL ar5210GetMibCycleCounts(struct ath_hal *, HAL_SURVEY_SAMPLE *); +extern void ar5210EnableDfs(struct ath_hal *, HAL_PHYERR_PARAM *); +extern void ar5210GetDfsThresh(struct ath_hal *, HAL_PHYERR_PARAM *); extern u_int ar5210GetKeyCacheSize(struct ath_hal *); extern HAL_BOOL ar5210IsKeyCacheEntryValid(struct ath_hal *, uint16_t); extern HAL_BOOL ar5210ResetKeyCacheEntry(struct ath_hal *, uint16_t entry); extern HAL_BOOL ar5210SetKeyCacheEntry(struct ath_hal *, uint16_t entry, const HAL_KEYVAL *, const uint8_t *mac, int xorKey); extern HAL_BOOL ar5210SetKeyCacheEntryMac(struct ath_hal *, uint16_t, const uint8_t *); extern HAL_BOOL ar5210SetPowerMode(struct ath_hal *, uint32_t powerRequest, int setChip); extern HAL_POWER_MODE ar5210GetPowerMode(struct ath_hal *); extern void ar5210SetBeaconTimers(struct ath_hal *, const HAL_BEACON_TIMERS *); extern void ar5210BeaconInit(struct ath_hal *, uint32_t, uint32_t); extern void ar5210SetStaBeaconTimers(struct ath_hal *, const HAL_BEACON_STATE *); extern void ar5210ResetStaBeaconTimers(struct ath_hal *); extern uint64_t ar5210GetNextTBTT(struct ath_hal *); extern HAL_BOOL ar5210IsInterruptPending(struct ath_hal *); extern HAL_BOOL ar5210GetPendingInterrupts(struct ath_hal *, HAL_INT *); extern HAL_INT ar5210GetInterrupts(struct ath_hal *); extern HAL_INT ar5210SetInterrupts(struct ath_hal *, HAL_INT ints); extern const HAL_RATE_TABLE *ar5210GetRateTable(struct ath_hal *, u_int mode); extern HAL_BOOL ar5210AniControl(struct ath_hal *, HAL_ANI_CMD, int ); extern void ar5210AniPoll(struct ath_hal *, const struct ieee80211_channel *); extern void ar5210RxMonitor(struct ath_hal *, const HAL_NODE_STATS *, const struct ieee80211_channel *); extern void ar5210MibEvent(struct ath_hal *, const HAL_NODE_STATS *); #endif /* _ATH_AR5210_H_ */ Index: projects/nand/sys/dev/ath/ath_hal/ar5210/ar5210_attach.c =================================================================== --- projects/nand/sys/dev/ath/ath_hal/ar5210/ar5210_attach.c (revision 235262) +++ projects/nand/sys/dev/ath/ath_hal/ar5210/ar5210_attach.c (revision 235263) @@ -1,404 +1,408 @@ /* * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * Copyright (c) 2002-2004 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ #include "opt_ah.h" #include "ah.h" #include "ah_internal.h" #include "ah_devid.h" #include "ar5210/ar5210.h" #include "ar5210/ar5210reg.h" #include "ar5210/ar5210phy.h" #include "ah_eeprom_v1.h" static HAL_BOOL ar5210GetChannelEdges(struct ath_hal *, uint16_t flags, uint16_t *low, uint16_t *high); static HAL_BOOL ar5210GetChipPowerLimits(struct ath_hal *ah, struct ieee80211_channel *chan); static void ar5210ConfigPCIE(struct ath_hal *ah, HAL_BOOL restore); static void ar5210DisablePCIE(struct ath_hal *ah); static const struct ath_hal_private ar5210hal = {{ .ah_magic = AR5210_MAGIC, .ah_getRateTable = ar5210GetRateTable, .ah_detach = ar5210Detach, /* Reset Functions */ .ah_reset = ar5210Reset, .ah_phyDisable = ar5210PhyDisable, .ah_disable = ar5210Disable, .ah_configPCIE = ar5210ConfigPCIE, .ah_disablePCIE = ar5210DisablePCIE, .ah_setPCUConfig = ar5210SetPCUConfig, .ah_perCalibration = ar5210PerCalibration, .ah_perCalibrationN = ar5210PerCalibrationN, .ah_resetCalValid = ar5210ResetCalValid, .ah_setTxPowerLimit = ar5210SetTxPowerLimit, .ah_getChanNoise = ath_hal_getChanNoise, /* Transmit functions */ .ah_updateTxTrigLevel = ar5210UpdateTxTrigLevel, .ah_setupTxQueue = ar5210SetupTxQueue, .ah_setTxQueueProps = ar5210SetTxQueueProps, .ah_getTxQueueProps = ar5210GetTxQueueProps, .ah_releaseTxQueue = ar5210ReleaseTxQueue, .ah_resetTxQueue = ar5210ResetTxQueue, .ah_getTxDP = ar5210GetTxDP, .ah_setTxDP = ar5210SetTxDP, .ah_numTxPending = ar5210NumTxPending, .ah_startTxDma = ar5210StartTxDma, .ah_stopTxDma = ar5210StopTxDma, .ah_setupTxDesc = ar5210SetupTxDesc, .ah_setupXTxDesc = ar5210SetupXTxDesc, .ah_fillTxDesc = ar5210FillTxDesc, .ah_procTxDesc = ar5210ProcTxDesc, .ah_getTxIntrQueue = ar5210GetTxIntrQueue, .ah_reqTxIntrDesc = ar5210IntrReqTxDesc, .ah_getTxCompletionRates = ar5210GetTxCompletionRates, /* RX Functions */ .ah_getRxDP = ar5210GetRxDP, .ah_setRxDP = ar5210SetRxDP, .ah_enableReceive = ar5210EnableReceive, .ah_stopDmaReceive = ar5210StopDmaReceive, .ah_startPcuReceive = ar5210StartPcuReceive, .ah_stopPcuReceive = ar5210StopPcuReceive, .ah_setMulticastFilter = ar5210SetMulticastFilter, .ah_setMulticastFilterIndex = ar5210SetMulticastFilterIndex, .ah_clrMulticastFilterIndex = ar5210ClrMulticastFilterIndex, .ah_getRxFilter = ar5210GetRxFilter, .ah_setRxFilter = ar5210SetRxFilter, .ah_setupRxDesc = ar5210SetupRxDesc, .ah_procRxDesc = ar5210ProcRxDesc, .ah_rxMonitor = ar5210RxMonitor, .ah_aniPoll = ar5210AniPoll, .ah_procMibEvent = ar5210MibEvent, /* Misc Functions */ .ah_getCapability = ar5210GetCapability, .ah_setCapability = ar5210SetCapability, .ah_getDiagState = ar5210GetDiagState, .ah_getMacAddress = ar5210GetMacAddress, .ah_setMacAddress = ar5210SetMacAddress, .ah_getBssIdMask = ar5210GetBssIdMask, .ah_setBssIdMask = ar5210SetBssIdMask, .ah_setRegulatoryDomain = ar5210SetRegulatoryDomain, .ah_setLedState = ar5210SetLedState, .ah_writeAssocid = ar5210WriteAssocid, .ah_gpioCfgInput = ar5210GpioCfgInput, .ah_gpioCfgOutput = ar5210GpioCfgOutput, .ah_gpioGet = ar5210GpioGet, .ah_gpioSet = ar5210GpioSet, .ah_gpioSetIntr = ar5210Gpio0SetIntr, .ah_getTsf32 = ar5210GetTsf32, .ah_getTsf64 = ar5210GetTsf64, .ah_resetTsf = ar5210ResetTsf, .ah_detectCardPresent = ar5210DetectCardPresent, .ah_updateMibCounters = ar5210UpdateMibCounters, .ah_getRfGain = ar5210GetRfgain, .ah_getDefAntenna = ar5210GetDefAntenna, .ah_setDefAntenna = ar5210SetDefAntenna, .ah_getAntennaSwitch = ar5210GetAntennaSwitch, .ah_setAntennaSwitch = ar5210SetAntennaSwitch, .ah_setSifsTime = ar5210SetSifsTime, .ah_getSifsTime = ar5210GetSifsTime, .ah_setSlotTime = ar5210SetSlotTime, .ah_getSlotTime = ar5210GetSlotTime, .ah_setAckTimeout = ar5210SetAckTimeout, .ah_getAckTimeout = ar5210GetAckTimeout, .ah_setAckCTSRate = ar5210SetAckCTSRate, .ah_getAckCTSRate = ar5210GetAckCTSRate, .ah_setCTSTimeout = ar5210SetCTSTimeout, .ah_getCTSTimeout = ar5210GetCTSTimeout, .ah_setDecompMask = ar5210SetDecompMask, .ah_setCoverageClass = ar5210SetCoverageClass, .ah_get11nExtBusy = ar5210Get11nExtBusy, .ah_getMibCycleCounts = ar5210GetMibCycleCounts, + .ah_enableDfs = ar5210EnableDfs, + .ah_getDfsThresh = ar5210GetDfsThresh, + /* XXX procRadarEvent */ + /* XXX isFastClockEnabled */ /* Key Cache Functions */ .ah_getKeyCacheSize = ar5210GetKeyCacheSize, .ah_resetKeyCacheEntry = ar5210ResetKeyCacheEntry, .ah_isKeyCacheEntryValid = ar5210IsKeyCacheEntryValid, .ah_setKeyCacheEntry = ar5210SetKeyCacheEntry, .ah_setKeyCacheEntryMac = ar5210SetKeyCacheEntryMac, /* Power Management Functions */ .ah_setPowerMode = ar5210SetPowerMode, .ah_getPowerMode = ar5210GetPowerMode, /* Beacon Functions */ .ah_setBeaconTimers = ar5210SetBeaconTimers, .ah_beaconInit = ar5210BeaconInit, .ah_setStationBeaconTimers = ar5210SetStaBeaconTimers, .ah_resetStationBeaconTimers = ar5210ResetStaBeaconTimers, .ah_getNextTBTT = ar5210GetNextTBTT, /* Interrupt Functions */ .ah_isInterruptPending = ar5210IsInterruptPending, .ah_getPendingInterrupts = ar5210GetPendingInterrupts, .ah_getInterrupts = ar5210GetInterrupts, .ah_setInterrupts = ar5210SetInterrupts }, .ah_getChannelEdges = ar5210GetChannelEdges, .ah_getWirelessModes = ar5210GetWirelessModes, .ah_eepromRead = ar5210EepromRead, #ifdef AH_SUPPORT_WRITE_EEPROM .ah_eepromWrite = ar5210EepromWrite, #endif .ah_getChipPowerLimits = ar5210GetChipPowerLimits, }; static HAL_BOOL ar5210FillCapabilityInfo(struct ath_hal *ah); /* * Attach for an AR5210 part. */ static struct ath_hal * ar5210Attach(uint16_t devid, HAL_SOFTC sc, HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata, HAL_STATUS *status) { #define N(a) (sizeof(a)/sizeof(a[0])) struct ath_hal_5210 *ahp; struct ath_hal *ah; uint32_t revid, pcicfg; uint16_t eeval; HAL_STATUS ecode; int i; HALDEBUG(AH_NULL, HAL_DEBUG_ATTACH, "%s: devid 0x%x sc %p st %p sh %p\n", __func__, devid, sc, (void*) st, (void*) sh); /* NB: memory is returned zero'd */ ahp = ath_hal_malloc(sizeof (struct ath_hal_5210)); if (ahp == AH_NULL) { HALDEBUG(AH_NULL, HAL_DEBUG_ANY, "%s: no memory for state block\n", __func__); ecode = HAL_ENOMEM; goto bad; } ah = &ahp->ah_priv.h; /* set initial values */ OS_MEMCPY(&ahp->ah_priv, &ar5210hal, sizeof(struct ath_hal_private)); ah->ah_sc = sc; ah->ah_st = st; ah->ah_sh = sh; ah->ah_devid = devid; /* NB: for AH_DEBUG_ALQ */ AH_PRIVATE(ah)->ah_devid = devid; AH_PRIVATE(ah)->ah_subvendorid = 0; /* XXX */ AH_PRIVATE(ah)->ah_powerLimit = AR5210_MAX_RATE_POWER; AH_PRIVATE(ah)->ah_tpScale = HAL_TP_SCALE_MAX; /* no scaling */ ahp->ah_powerMode = HAL_PM_UNDEFINED; ahp->ah_staId1Defaults = 0; ahp->ah_rssiThr = INIT_RSSI_THR; ahp->ah_sifstime = (u_int) -1; ahp->ah_slottime = (u_int) -1; ahp->ah_acktimeout = (u_int) -1; ahp->ah_ctstimeout = (u_int) -1; if (!ar5210ChipReset(ah, AH_NULL)) { /* reset chip */ HALDEBUG(ah, HAL_DEBUG_ANY, "%s: chip reset failed\n", __func__); ecode = HAL_EIO; goto bad; } /* Read Revisions from Chips */ AH_PRIVATE(ah)->ah_macVersion = 1; AH_PRIVATE(ah)->ah_macRev = OS_REG_READ(ah, AR_SREV) & 0xff; AH_PRIVATE(ah)->ah_phyRev = OS_REG_READ(ah, AR_PHY_CHIPID); AH_PRIVATE(ah)->ah_analog2GhzRev = 0; /* Read Radio Chip Rev Extract */ OS_REG_WRITE(ah, (AR_PHY_BASE + (0x34 << 2)), 0x00001c16); for (i = 0; i < 4; i++) OS_REG_WRITE(ah, (AR_PHY_BASE + (0x20 << 2)), 0x00010000); revid = (OS_REG_READ(ah, AR_PHY_BASE + (256 << 2)) >> 28) & 0xf; /* Chip labelling is 1 greater than revision register for AR5110 */ AH_PRIVATE(ah)->ah_analog5GhzRev = ath_hal_reverseBits(revid, 4) + 1; /* * Read all the settings from the EEPROM and stash * ones we'll use later. */ pcicfg = OS_REG_READ(ah, AR_PCICFG); OS_REG_WRITE(ah, AR_PCICFG, pcicfg | AR_PCICFG_EEPROMSEL); ecode = ath_hal_v1EepromAttach(ah); if (ecode != HAL_OK) { goto eebad; } ecode = ath_hal_eepromGet(ah, AR_EEP_REGDMN_0, &eeval); if (ecode != HAL_OK) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: cannot read regulatory domain from EEPROM\n", __func__); goto eebad; } AH_PRIVATE(ah)->ah_currentRD = eeval; ecode = ath_hal_eepromGet(ah, AR_EEP_MACADDR, ahp->ah_macaddr); if (ecode != HAL_OK) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: error getting mac address from EEPROM\n", __func__); goto eebad; } OS_REG_WRITE(ah, AR_PCICFG, pcicfg); /* disable EEPROM access */ AH_PRIVATE(ah)->ah_getNfAdjust = ar5210GetNfAdjust; /* * Got everything we need now to setup the capabilities. */ (void) ar5210FillCapabilityInfo(ah); HALDEBUG(ah, HAL_DEBUG_ATTACH, "%s: return\n", __func__); return ah; eebad: OS_REG_WRITE(ah, AR_PCICFG, pcicfg); /* disable EEPROM access */ bad: if (ahp) ath_hal_free(ahp); if (status) *status = ecode; return AH_NULL; #undef N } void ar5210Detach(struct ath_hal *ah) { HALDEBUG(ah, HAL_DEBUG_ATTACH, "%s:\n", __func__); HALASSERT(ah != AH_NULL); HALASSERT(ah->ah_magic == AR5210_MAGIC); ath_hal_eepromDetach(ah); ath_hal_free(ah); } /* * Store the channel edges for the requested operational mode */ static HAL_BOOL ar5210GetChannelEdges(struct ath_hal *ah, uint16_t flags, uint16_t *low, uint16_t *high) { if (flags & IEEE80211_CHAN_5GHZ) { *low = 5120; *high = 5430; return AH_TRUE; } else { return AH_FALSE; } } static HAL_BOOL ar5210GetChipPowerLimits(struct ath_hal *ah, struct ieee80211_channel *chan) { /* XXX fill in, this is just a placeholder */ HALDEBUG(ah, HAL_DEBUG_ATTACH, "%s: no min/max power for %u/0x%x\n", __func__, chan->ic_freq, chan->ic_flags); chan->ic_maxpower = AR5210_MAX_RATE_POWER; chan->ic_minpower = 0; return AH_TRUE; } static void ar5210ConfigPCIE(struct ath_hal *ah, HAL_BOOL restore) { } static void ar5210DisablePCIE(struct ath_hal *ah) { } /* * Fill all software cached or static hardware state information. */ static HAL_BOOL ar5210FillCapabilityInfo(struct ath_hal *ah) { struct ath_hal_private *ahpriv = AH_PRIVATE(ah); HAL_CAPABILITIES *pCap = &ahpriv->ah_caps; pCap->halWirelessModes |= HAL_MODE_11A; pCap->halLow5GhzChan = 5120; pCap->halHigh5GhzChan = 5430; pCap->halSleepAfterBeaconBroken = AH_TRUE; pCap->halPSPollBroken = AH_FALSE; pCap->halTotalQueues = HAL_NUM_TX_QUEUES; pCap->halKeyCacheSize = 64; /* XXX not needed */ pCap->halChanHalfRate = AH_FALSE; pCap->halChanQuarterRate = AH_FALSE; /* * RSSI uses the combined field; some 11n NICs may use * the control chain RSSI. */ pCap->halUseCombinedRadarRssi = AH_TRUE; if (ath_hal_eepromGetFlag(ah, AR_EEP_RFKILL)) { /* * Setup initial rfsilent settings based on the EEPROM * contents. Pin 0, polarity 0 is fixed; record this * using the EEPROM format found in later parts. */ ahpriv->ah_rfsilent = SM(0, AR_EEPROM_RFSILENT_GPIO_SEL) | SM(0, AR_EEPROM_RFSILENT_POLARITY); ahpriv->ah_rfkillEnabled = AH_TRUE; pCap->halRfSilentSupport = AH_TRUE; } pCap->halTstampPrecision = 15; /* NB: s/w extended from 13 */ pCap->halIntrMask = (HAL_INT_COMMON - HAL_INT_BNR) | HAL_INT_RX | HAL_INT_TX | HAL_INT_FATAL ; pCap->hal4kbSplitTransSupport = AH_TRUE; pCap->halHasRxSelfLinkedTail = AH_TRUE; ahpriv->ah_rxornIsFatal = AH_TRUE; return AH_TRUE; } static const char* ar5210Probe(uint16_t vendorid, uint16_t devid) { if (vendorid == ATHEROS_VENDOR_ID && (devid == AR5210_PROD || devid == AR5210_DEFAULT)) return "Atheros 5210"; return AH_NULL; } AH_CHIP(AR5210, ar5210Probe, ar5210Attach); Index: projects/nand/sys/dev/ath/ath_hal/ar5210/ar5210_misc.c =================================================================== --- projects/nand/sys/dev/ath/ath_hal/ar5210/ar5210_misc.c (revision 235262) +++ projects/nand/sys/dev/ath/ath_hal/ar5210/ar5210_misc.c (revision 235263) @@ -1,669 +1,679 @@ /* * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * Copyright (c) 2002-2004 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ #include "opt_ah.h" #include "ah.h" #include "ah_internal.h" #include "ar5210/ar5210.h" #include "ar5210/ar5210reg.h" #include "ar5210/ar5210phy.h" #include "ah_eeprom_v1.h" #define AR_NUM_GPIO 6 /* 6 GPIO bits */ #define AR_GPIOD_MASK 0x2f /* 6-bit mask */ void ar5210GetMacAddress(struct ath_hal *ah, uint8_t *mac) { struct ath_hal_5210 *ahp = AH5210(ah); OS_MEMCPY(mac, ahp->ah_macaddr, IEEE80211_ADDR_LEN); } HAL_BOOL ar5210SetMacAddress(struct ath_hal *ah, const uint8_t *mac) { struct ath_hal_5210 *ahp = AH5210(ah); OS_MEMCPY(ahp->ah_macaddr, mac, IEEE80211_ADDR_LEN); return AH_TRUE; } void ar5210GetBssIdMask(struct ath_hal *ah, uint8_t *mask) { static const uint8_t ones[IEEE80211_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; OS_MEMCPY(mask, ones, IEEE80211_ADDR_LEN); } HAL_BOOL ar5210SetBssIdMask(struct ath_hal *ah, const uint8_t *mask) { return AH_FALSE; } /* * Read 16 bits of data from the specified EEPROM offset. */ HAL_BOOL ar5210EepromRead(struct ath_hal *ah, u_int off, uint16_t *data) { (void) OS_REG_READ(ah, AR_EP_AIR(off)); /* activate read op */ if (!ath_hal_wait(ah, AR_EP_STA, AR_EP_STA_RDCMPLT | AR_EP_STA_RDERR, AR_EP_STA_RDCMPLT)) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: read failed for entry 0x%x\n", __func__, AR_EP_AIR(off)); return AH_FALSE; } *data = OS_REG_READ(ah, AR_EP_RDATA) & 0xffff; return AH_TRUE; } #ifdef AH_SUPPORT_WRITE_EEPROM /* * Write 16 bits of data to the specified EEPROM offset. */ HAL_BOOL ar5210EepromWrite(struct ath_hal *ah, u_int off, uint16_t data) { return AH_FALSE; } #endif /* AH_SUPPORT_WRITE_EEPROM */ /* * Attempt to change the cards operating regulatory domain to the given value */ HAL_BOOL ar5210SetRegulatoryDomain(struct ath_hal *ah, uint16_t regDomain, HAL_STATUS *status) { HAL_STATUS ecode; if (AH_PRIVATE(ah)->ah_currentRD == regDomain) { ecode = HAL_EINVAL; goto bad; } /* * Check if EEPROM is configured to allow this; must * be a proper version and the protection bits must * permit re-writing that segment of the EEPROM. */ if (ath_hal_eepromGetFlag(ah, AR_EEP_WRITEPROTECT)) { ecode = HAL_EEWRITE; goto bad; } ecode = HAL_EIO; /* disallow all writes */ bad: if (status) *status = ecode; return AH_FALSE; } /* * Return the wireless modes (a,b,g,t) supported by hardware. * * This value is what is actually supported by the hardware * and is unaffected by regulatory/country code settings. * */ u_int ar5210GetWirelessModes(struct ath_hal *ah) { /* XXX could enable turbo mode but can't do all rates */ return HAL_MODE_11A; } /* * Called if RfKill is supported (according to EEPROM). Set the interrupt and * GPIO values so the ISR and can disable RF on a switch signal */ void ar5210EnableRfKill(struct ath_hal *ah) { uint16_t rfsilent = AH_PRIVATE(ah)->ah_rfsilent; int select = MS(rfsilent, AR_EEPROM_RFSILENT_GPIO_SEL); int polarity = MS(rfsilent, AR_EEPROM_RFSILENT_POLARITY); /* * If radio disable switch connection to GPIO bit 0 is enabled * program GPIO interrupt. * If rfkill bit on eeprom is 1, setupeeprommap routine has already * verified that it is a later version of eeprom, it has a place for * rfkill bit and it is set to 1, indicating that GPIO bit 0 hardware * connection is present. */ ar5210Gpio0SetIntr(ah, select, (ar5210GpioGet(ah, select) == polarity)); } /* * Configure GPIO Output lines */ HAL_BOOL ar5210GpioCfgOutput(struct ath_hal *ah, uint32_t gpio, HAL_GPIO_MUX_TYPE type) { HALASSERT(gpio < AR_NUM_GPIO); OS_REG_WRITE(ah, AR_GPIOCR, (OS_REG_READ(ah, AR_GPIOCR) &~ AR_GPIOCR_ALL(gpio)) | AR_GPIOCR_OUT1(gpio)); return AH_TRUE; } /* * Configure GPIO Input lines */ HAL_BOOL ar5210GpioCfgInput(struct ath_hal *ah, uint32_t gpio) { HALASSERT(gpio < AR_NUM_GPIO); OS_REG_WRITE(ah, AR_GPIOCR, (OS_REG_READ(ah, AR_GPIOCR) &~ AR_GPIOCR_ALL(gpio)) | AR_GPIOCR_IN(gpio)); return AH_TRUE; } /* * Once configured for I/O - set output lines */ HAL_BOOL ar5210GpioSet(struct ath_hal *ah, uint32_t gpio, uint32_t val) { uint32_t reg; HALASSERT(gpio < AR_NUM_GPIO); reg = OS_REG_READ(ah, AR_GPIODO); reg &= ~(1 << gpio); reg |= (val&1) << gpio; OS_REG_WRITE(ah, AR_GPIODO, reg); return AH_TRUE; } /* * Once configured for I/O - get input lines */ uint32_t ar5210GpioGet(struct ath_hal *ah, uint32_t gpio) { if (gpio < AR_NUM_GPIO) { uint32_t val = OS_REG_READ(ah, AR_GPIODI); val = ((val & AR_GPIOD_MASK) >> gpio) & 0x1; return val; } else { return 0xffffffff; } } /* * Set the GPIO 0 Interrupt */ void ar5210Gpio0SetIntr(struct ath_hal *ah, u_int gpio, uint32_t ilevel) { uint32_t val = OS_REG_READ(ah, AR_GPIOCR); /* Clear the bits that we will modify. */ val &= ~(AR_GPIOCR_INT_SEL(gpio) | AR_GPIOCR_INT_SELH | AR_GPIOCR_INT_ENA | AR_GPIOCR_ALL(gpio)); val |= AR_GPIOCR_INT_SEL(gpio) | AR_GPIOCR_INT_ENA; if (ilevel) val |= AR_GPIOCR_INT_SELH; /* Don't need to change anything for low level interrupt. */ OS_REG_WRITE(ah, AR_GPIOCR, val); /* Change the interrupt mask. */ ar5210SetInterrupts(ah, AH5210(ah)->ah_maskReg | HAL_INT_GPIO); } /* * Change the LED blinking pattern to correspond to the connectivity */ void ar5210SetLedState(struct ath_hal *ah, HAL_LED_STATE state) { uint32_t val; val = OS_REG_READ(ah, AR_PCICFG); switch (state) { case HAL_LED_INIT: val &= ~(AR_PCICFG_LED_PEND | AR_PCICFG_LED_ACT); break; case HAL_LED_RUN: /* normal blink when connected */ val &= ~AR_PCICFG_LED_PEND; val |= AR_PCICFG_LED_ACT; break; default: val |= AR_PCICFG_LED_PEND; val &= ~AR_PCICFG_LED_ACT; break; } OS_REG_WRITE(ah, AR_PCICFG, val); } /* * Return 1 or 2 for the corresponding antenna that is in use */ u_int ar5210GetDefAntenna(struct ath_hal *ah) { uint32_t val = OS_REG_READ(ah, AR_STA_ID1); return (val & AR_STA_ID1_DEFAULT_ANTENNA ? 2 : 1); } void ar5210SetDefAntenna(struct ath_hal *ah, u_int antenna) { uint32_t val = OS_REG_READ(ah, AR_STA_ID1); if (antenna != (val & AR_STA_ID1_DEFAULT_ANTENNA ? 2 : 1)) { /* * Antenna change requested, force a toggle of the default. */ OS_REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_DEFAULT_ANTENNA); } } HAL_ANT_SETTING ar5210GetAntennaSwitch(struct ath_hal *ah) { return HAL_ANT_VARIABLE; } HAL_BOOL ar5210SetAntennaSwitch(struct ath_hal *ah, HAL_ANT_SETTING settings) { /* XXX not sure how to fix antenna */ return (settings == HAL_ANT_VARIABLE); } /* * Change association related fields programmed into the hardware. * Writing a valid BSSID to the hardware effectively enables the hardware * to synchronize its TSF to the correct beacons and receive frames coming * from that BSSID. It is called by the SME JOIN operation. */ void ar5210WriteAssocid(struct ath_hal *ah, const uint8_t *bssid, uint16_t assocId) { struct ath_hal_5210 *ahp = AH5210(ah); /* XXX save bssid for possible re-use on reset */ OS_MEMCPY(ahp->ah_bssid, bssid, IEEE80211_ADDR_LEN); OS_REG_WRITE(ah, AR_BSS_ID0, LE_READ_4(ahp->ah_bssid)); OS_REG_WRITE(ah, AR_BSS_ID1, LE_READ_2(ahp->ah_bssid+4) | ((assocId & 0x3fff)<> 19) & 0x1ff; if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); return (OS_REG_READ(ah, AR_TSF_U32) ^ OS_REG_READ(ah, AR_TSF_L32) ^ nf); } /* * Detect if our card is present */ HAL_BOOL ar5210DetectCardPresent(struct ath_hal *ah) { /* * Read the Silicon Revision register and compare that * to what we read at attach time. If the same, we say * a card/device is present. */ return (AH_PRIVATE(ah)->ah_macRev == (OS_REG_READ(ah, AR_SREV) & 0xff)); } /* * Update MIB Counters */ void ar5210UpdateMibCounters(struct ath_hal *ah, HAL_MIB_STATS *stats) { stats->ackrcv_bad += OS_REG_READ(ah, AR_ACK_FAIL); stats->rts_bad += OS_REG_READ(ah, AR_RTS_FAIL); stats->fcs_bad += OS_REG_READ(ah, AR_FCS_FAIL); stats->rts_good += OS_REG_READ(ah, AR_RTS_OK); stats->beacons += OS_REG_READ(ah, AR_BEACON_CNT); } HAL_BOOL ar5210SetSifsTime(struct ath_hal *ah, u_int us) { struct ath_hal_5210 *ahp = AH5210(ah); if (us > ath_hal_mac_usec(ah, 0x7ff)) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad SIFS time %u\n", __func__, us); ahp->ah_sifstime = (u_int) -1; /* restore default handling */ return AH_FALSE; } else { /* convert to system clocks */ OS_REG_RMW_FIELD(ah, AR_IFS0, AR_IFS0_SIFS, ath_hal_mac_clks(ah, us)); ahp->ah_sifstime = us; return AH_TRUE; } } u_int ar5210GetSifsTime(struct ath_hal *ah) { u_int clks = OS_REG_READ(ah, AR_IFS0) & 0x7ff; return ath_hal_mac_usec(ah, clks); /* convert from system clocks */ } HAL_BOOL ar5210SetSlotTime(struct ath_hal *ah, u_int us) { struct ath_hal_5210 *ahp = AH5210(ah); if (us < HAL_SLOT_TIME_9 || us > ath_hal_mac_usec(ah, 0xffff)) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad slot time %u\n", __func__, us); ahp->ah_slottime = (u_int) -1; /* restore default handling */ return AH_FALSE; } else { /* convert to system clocks */ OS_REG_WRITE(ah, AR_SLOT_TIME, ath_hal_mac_clks(ah, us)); ahp->ah_slottime = us; return AH_TRUE; } } u_int ar5210GetSlotTime(struct ath_hal *ah) { u_int clks = OS_REG_READ(ah, AR_SLOT_TIME) & 0xffff; return ath_hal_mac_usec(ah, clks); /* convert from system clocks */ } HAL_BOOL ar5210SetAckTimeout(struct ath_hal *ah, u_int us) { struct ath_hal_5210 *ahp = AH5210(ah); if (us > ath_hal_mac_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad ack timeout %u\n", __func__, us); ahp->ah_acktimeout = (u_int) -1; /* restore default handling */ return AH_FALSE; } else { /* convert to system clocks */ OS_REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, ath_hal_mac_clks(ah, us)); ahp->ah_acktimeout = us; return AH_TRUE; } } u_int ar5210GetAckTimeout(struct ath_hal *ah) { u_int clks = MS(OS_REG_READ(ah, AR_TIME_OUT), AR_TIME_OUT_ACK); return ath_hal_mac_usec(ah, clks); /* convert from system clocks */ } u_int ar5210GetAckCTSRate(struct ath_hal *ah) { return ((AH5210(ah)->ah_staId1Defaults & AR_STA_ID1_ACKCTS_6MB) == 0); } HAL_BOOL ar5210SetAckCTSRate(struct ath_hal *ah, u_int high) { struct ath_hal_5210 *ahp = AH5210(ah); if (high) { OS_REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_ACKCTS_6MB); ahp->ah_staId1Defaults &= ~AR_STA_ID1_ACKCTS_6MB; } else { OS_REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_ACKCTS_6MB); ahp->ah_staId1Defaults |= AR_STA_ID1_ACKCTS_6MB; } return AH_TRUE; } HAL_BOOL ar5210SetCTSTimeout(struct ath_hal *ah, u_int us) { struct ath_hal_5210 *ahp = AH5210(ah); if (us > ath_hal_mac_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad cts timeout %u\n", __func__, us); ahp->ah_ctstimeout = (u_int) -1; /* restore default handling */ return AH_FALSE; } else { /* convert to system clocks */ OS_REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, ath_hal_mac_clks(ah, us)); ahp->ah_ctstimeout = us; return AH_TRUE; } } u_int ar5210GetCTSTimeout(struct ath_hal *ah) { u_int clks = MS(OS_REG_READ(ah, AR_TIME_OUT), AR_TIME_OUT_CTS); return ath_hal_mac_usec(ah, clks); /* convert from system clocks */ } HAL_BOOL ar5210SetDecompMask(struct ath_hal *ah, uint16_t keyidx, int en) { /* nothing to do */ return AH_TRUE; } void ar5210SetCoverageClass(struct ath_hal *ah, uint8_t coverageclass, int now) { } /* * Control Adaptive Noise Immunity Parameters */ HAL_BOOL ar5210AniControl(struct ath_hal *ah, HAL_ANI_CMD cmd, int param) { return AH_FALSE; } void ar5210RxMonitor(struct ath_hal *ah, const HAL_NODE_STATS *stats, const struct ieee80211_channel *chan) { } void ar5210AniPoll(struct ath_hal *ah, const struct ieee80211_channel *chan) { } void ar5210MibEvent(struct ath_hal *ah, const HAL_NODE_STATS *stats) { } #define AR_DIAG_SW_DIS_CRYPTO (AR_DIAG_SW_DIS_ENC | AR_DIAG_SW_DIS_DEC) HAL_STATUS ar5210GetCapability(struct ath_hal *ah, HAL_CAPABILITY_TYPE type, uint32_t capability, uint32_t *result) { switch (type) { case HAL_CAP_CIPHER: /* cipher handled in hardware */ return (capability == HAL_CIPHER_WEP ? HAL_OK : HAL_ENOTSUPP); default: return ath_hal_getcapability(ah, type, capability, result); } } HAL_BOOL ar5210SetCapability(struct ath_hal *ah, HAL_CAPABILITY_TYPE type, uint32_t capability, uint32_t setting, HAL_STATUS *status) { switch (type) { case HAL_CAP_DIAG: /* hardware diagnostic support */ /* * NB: could split this up into virtual capabilities, * (e.g. 1 => ACK, 2 => CTS, etc.) but it hardly * seems worth the additional complexity. */ #ifdef AH_DEBUG AH_PRIVATE(ah)->ah_diagreg = setting; #else AH_PRIVATE(ah)->ah_diagreg = setting & 0x6; /* ACK+CTS */ #endif OS_REG_WRITE(ah, AR_DIAG_SW, AH_PRIVATE(ah)->ah_diagreg); return AH_TRUE; case HAL_CAP_RXORN_FATAL: /* HAL_INT_RXORN treated as fatal */ return AH_FALSE; /* NB: disallow */ default: return ath_hal_setcapability(ah, type, capability, setting, status); } } HAL_BOOL ar5210GetDiagState(struct ath_hal *ah, int request, const void *args, uint32_t argsize, void **result, uint32_t *resultsize) { #ifdef AH_PRIVATE_DIAG uint32_t pcicfg; HAL_BOOL ok; switch (request) { case HAL_DIAG_EEPROM: /* XXX */ break; case HAL_DIAG_EEREAD: if (argsize != sizeof(uint16_t)) return AH_FALSE; pcicfg = OS_REG_READ(ah, AR_PCICFG); OS_REG_WRITE(ah, AR_PCICFG, pcicfg | AR_PCICFG_EEPROMSEL); ok = ath_hal_eepromRead(ah, *(const uint16_t *)args, *result); OS_REG_WRITE(ah, AR_PCICFG, pcicfg); if (ok) *resultsize = sizeof(uint16_t); return ok; } #endif return ath_hal_getdiagstate(ah, request, args, argsize, result, resultsize); } /* * Return what percentage of the extension channel is busy. * This is always disabled for AR5210 series NICs. */ uint32_t ar5210Get11nExtBusy(struct ath_hal *ah) { return (0); } /* * There's no channel survey support for the AR5210. */ HAL_BOOL ar5210GetMibCycleCounts(struct ath_hal *ah, HAL_SURVEY_SAMPLE *hsample) { return (AH_FALSE); } + +void +ar5210EnableDfs(struct ath_hal *ah, HAL_PHYERR_PARAM *pe) +{ +} + +void +ar5210GetDfsThresh(struct ath_hal *ah, HAL_PHYERR_PARAM *pe) +{ +} Index: projects/nand/sys/dev/ath/ath_hal/ar5211/ar5211.h =================================================================== --- projects/nand/sys/dev/ath/ath_hal/ar5211/ar5211.h (revision 235262) +++ projects/nand/sys/dev/ath/ath_hal/ar5211/ar5211.h (revision 235263) @@ -1,316 +1,318 @@ /* * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * Copyright (c) 2002-2006 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ #ifndef _ATH_AR5211_H_ #define _ATH_AR5211_H_ #include "ah_eeprom.h" #define AR5211_MAGIC 0x19570405 /* Classes for WME streams */ #define AC_BK 0 #define AC_BE 1 #define AC_VI 2 #define AC_VO 3 /* DCU Transmit Filter macros */ #define CALC_MMR(dcu, idx) \ ( (4 * dcu) + (idx < 32 ? 0 : (idx < 64 ? 1 : (idx < 96 ? 2 : 3))) ) #define TXBLK_FROM_MMR(mmr) \ (AR_D_TXBLK_BASE + ((mmr & 0x1f) << 6) + ((mmr & 0x20) >> 3)) #define CALC_TXBLK_ADDR(dcu, idx) (TXBLK_FROM_MMR(CALC_MMR(dcu, idx))) #define CALC_TXBLK_VALUE(idx) (1 << (idx & 0x1f)) /* MAC register values */ #define INIT_INTERRUPT_MASK \ ( AR_IMR_TXERR | AR_IMR_TXOK | AR_IMR_RXORN | \ AR_IMR_RXERR | AR_IMR_RXOK | AR_IMR_TXURN | \ AR_IMR_HIUERR ) #define INIT_BEACON_CONTROL \ ( (INIT_RESET_TSF << 24) | (INIT_BEACON_EN << 23) | \ (INIT_TIM_OFFSET << 16) | INIT_BEACON_PERIOD ) #define INIT_CONFIG_STATUS 0x00000000 #define INIT_RSSI_THR 0x00000700 /* Missed beacon counter initialized to 0x7 (max is 0xff) */ #define INIT_IQCAL_LOG_COUNT_MAX 0xF #define INIT_BCON_CNTRL_REG 0x00000000 #define INIT_BEACON_PERIOD 0xffff #define INIT_TIM_OFFSET 0 #define INIT_BEACON_EN 0 /* this should be set by AP only when it's ready */ #define INIT_RESET_TSF 0 /* * Various fifo fill before Tx start, in 64-byte units * i.e. put the frame in the air while still DMAing */ #define MIN_TX_FIFO_THRESHOLD 0x1 #define MAX_TX_FIFO_THRESHOLD ((IEEE80211_MAX_LEN / 64) + 1) #define INIT_TX_FIFO_THRESHOLD MIN_TX_FIFO_THRESHOLD /* * Gain support. */ typedef struct _gainOptStep { int16_t paramVal[4]; int32_t stepGain; int8_t stepName[16]; } GAIN_OPTIMIZATION_STEP; typedef struct { uint32_t numStepsInLadder; uint32_t defaultStepNum; GAIN_OPTIMIZATION_STEP optStep[10]; } GAIN_OPTIMIZATION_LADDER; typedef struct { uint32_t currStepNum; uint32_t currGain; uint32_t targetGain; uint32_t loTrig; uint32_t hiTrig; uint32_t active; const GAIN_OPTIMIZATION_STEP *currStep; } GAIN_VALUES; enum { RFGAIN_INACTIVE, RFGAIN_READ_REQUESTED, RFGAIN_NEED_CHANGE }; /* * Header Info - general parameters and * values set for each chipset board solution * that are programmed every reset */ struct ath_hal_5211 { struct ath_hal_private ah_priv; /* base class */ GAIN_VALUES ah_gainValues; uint8_t ah_macaddr[IEEE80211_ADDR_LEN]; uint8_t ah_bssid[IEEE80211_ADDR_LEN]; /* * Runtime state. */ uint32_t ah_maskReg; /* copy of AR_IMR */ uint32_t ah_txOkInterruptMask; uint32_t ah_txErrInterruptMask; uint32_t ah_txDescInterruptMask; uint32_t ah_txEolInterruptMask; uint32_t ah_txUrnInterruptMask; HAL_TX_QUEUE_INFO ah_txq[HAL_NUM_TX_QUEUES]; HAL_POWER_MODE ah_powerMode; HAL_ANT_SETTING ah_diversityControl; /* antenna setting */ uint32_t ah_calibrationTime; HAL_BOOL ah_bIQCalibration; int ah_rfgainState; uint32_t ah_tx6PowerInHalfDbm; /* power output for 6Mb tx */ uint32_t ah_staId1Defaults; /* STA_ID1 default settings */ uint32_t ah_beaconInterval; uint32_t ah_rssiThr; /* RSSI_THR settings */ u_int ah_sifstime; /* user-specified sifs time */ u_int ah_slottime; /* user-specified slot time */ u_int ah_acktimeout; /* user-specified ack timeout */ u_int ah_ctstimeout; /* user-specified cts timeout */ /* * RF Silent handling. */ uint32_t ah_gpioSelect; /* GPIO pin to use */ uint32_t ah_polarity; /* polarity to disable RF */ uint32_t ah_gpioBit; /* after init, prev value */ }; #define AH5211(ah) ((struct ath_hal_5211 *)(ah)) struct ath_hal; extern void ar5211Detach(struct ath_hal *); extern HAL_BOOL ar5211Reset(struct ath_hal *, HAL_OPMODE, struct ieee80211_channel *, HAL_BOOL bChannelChange, HAL_STATUS *); extern HAL_BOOL ar5211PhyDisable(struct ath_hal *); extern HAL_BOOL ar5211Disable(struct ath_hal *); extern HAL_BOOL ar5211ChipReset(struct ath_hal *, const struct ieee80211_channel *); extern HAL_BOOL ar5211PerCalibration(struct ath_hal *, struct ieee80211_channel *, HAL_BOOL *); extern HAL_BOOL ar5211PerCalibrationN(struct ath_hal *ah, struct ieee80211_channel *chan, u_int chainMask, HAL_BOOL longCal, HAL_BOOL *isCalDone); extern HAL_BOOL ar5211ResetCalValid(struct ath_hal *ah, const struct ieee80211_channel *); extern HAL_BOOL ar5211SetTxPowerLimit(struct ath_hal *, uint32_t limit); extern HAL_BOOL ar5211CalNoiseFloor(struct ath_hal *, const struct ieee80211_channel *); extern HAL_BOOL ar5211SetAntennaSwitchInternal(struct ath_hal *, HAL_ANT_SETTING, const struct ieee80211_channel *); extern int16_t ar5211GetNfAdjust(struct ath_hal *, const HAL_CHANNEL_INTERNAL *); extern HAL_BOOL ar5211ResetDma(struct ath_hal *, HAL_OPMODE); extern void ar5211InitializeGainValues(struct ath_hal *); extern HAL_RFGAIN ar5211GetRfgain(struct ath_hal *); extern void ar5211SetPCUConfig(struct ath_hal *); extern HAL_BOOL ar5211SetTxQueueProps(struct ath_hal *ah, int q, const HAL_TXQ_INFO *qInfo); extern HAL_BOOL ar5211GetTxQueueProps(struct ath_hal *ah, int q, HAL_TXQ_INFO *qInfo); extern int ar5211SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type, const HAL_TXQ_INFO *qInfo); extern HAL_BOOL ar5211ReleaseTxQueue(struct ath_hal *ah, u_int q); extern HAL_BOOL ar5211ResetTxQueue(struct ath_hal *ah, u_int q); extern uint32_t ar5211GetTxDP(struct ath_hal *, u_int); extern HAL_BOOL ar5211SetTxDP(struct ath_hal *, u_int, uint32_t txdp); extern HAL_BOOL ar5211UpdateTxTrigLevel(struct ath_hal *, HAL_BOOL); extern HAL_BOOL ar5211StartTxDma(struct ath_hal *, u_int); extern HAL_BOOL ar5211StopTxDma(struct ath_hal *, u_int); extern uint32_t ar5211NumTxPending(struct ath_hal *, u_int qnum); extern HAL_BOOL ar5211IsTxQueueStopped(struct ath_hal *, u_int); extern HAL_BOOL ar5211GetTransmitFilterIndex(struct ath_hal *, uint32_t); extern HAL_BOOL ar5211SetupTxDesc(struct ath_hal *, struct ath_desc *, u_int pktLen, u_int hdrLen, HAL_PKT_TYPE type, u_int txPower, u_int txRate0, u_int txTries0, u_int keyIx, u_int antMode, u_int flags, u_int rtsctsRate, u_int rtsctsDuration, u_int compicvLen, u_int compivLen, u_int comp); extern HAL_BOOL ar5211SetupXTxDesc(struct ath_hal *, struct ath_desc *, u_int txRate1, u_int txRetries1, u_int txRate2, u_int txRetries2, u_int txRate3, u_int txRetries3); extern HAL_BOOL ar5211FillTxDesc(struct ath_hal *, struct ath_desc *, u_int segLen, HAL_BOOL firstSeg, HAL_BOOL lastSeg, const struct ath_desc *ds0); extern HAL_STATUS ar5211ProcTxDesc(struct ath_hal *, struct ath_desc *, struct ath_tx_status *); extern void ar5211GetTxIntrQueue(struct ath_hal *ah, uint32_t *); extern void ar5211IntrReqTxDesc(struct ath_hal *ah, struct ath_desc *); extern HAL_BOOL ar5211GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries); extern uint32_t ar5211GetRxDP(struct ath_hal *); extern void ar5211SetRxDP(struct ath_hal *, uint32_t rxdp); extern void ar5211EnableReceive(struct ath_hal *); extern HAL_BOOL ar5211StopDmaReceive(struct ath_hal *); extern void ar5211StartPcuReceive(struct ath_hal *); extern void ar5211StopPcuReceive(struct ath_hal *); extern void ar5211SetMulticastFilter(struct ath_hal *, uint32_t filter0, uint32_t filter1); extern HAL_BOOL ar5211ClrMulticastFilterIndex(struct ath_hal *, uint32_t); extern HAL_BOOL ar5211SetMulticastFilterIndex(struct ath_hal *, uint32_t); extern uint32_t ar5211GetRxFilter(struct ath_hal *); extern void ar5211SetRxFilter(struct ath_hal *, uint32_t); extern HAL_BOOL ar5211SetupRxDesc(struct ath_hal *, struct ath_desc *, uint32_t, u_int flags); extern HAL_STATUS ar5211ProcRxDesc(struct ath_hal *, struct ath_desc *, uint32_t, struct ath_desc *, uint64_t, struct ath_rx_status *); extern void ar5211GetMacAddress(struct ath_hal *, uint8_t *); extern HAL_BOOL ar5211SetMacAddress(struct ath_hal *ah, const uint8_t *); extern void ar5211GetBssIdMask(struct ath_hal *, uint8_t *); extern HAL_BOOL ar5211SetBssIdMask(struct ath_hal *, const uint8_t *); extern HAL_BOOL ar5211EepromRead(struct ath_hal *, u_int off, uint16_t *data); extern HAL_BOOL ar5211EepromWrite(struct ath_hal *, u_int off, uint16_t data); extern HAL_BOOL ar5211SetRegulatoryDomain(struct ath_hal *, uint16_t, HAL_STATUS *); extern u_int ar5211GetWirelessModes(struct ath_hal *); extern void ar5211EnableRfKill(struct ath_hal *); extern uint32_t ar5211GpioGet(struct ath_hal *, uint32_t gpio); extern void ar5211GpioSetIntr(struct ath_hal *, u_int, uint32_t ilevel); extern HAL_BOOL ar5211GpioCfgOutput(struct ath_hal *, uint32_t gpio, HAL_GPIO_MUX_TYPE); extern HAL_BOOL ar5211GpioCfgInput(struct ath_hal *, uint32_t gpio); extern HAL_BOOL ar5211GpioSet(struct ath_hal *, uint32_t gpio, uint32_t val); extern void ar5211SetLedState(struct ath_hal *, HAL_LED_STATE); extern u_int ar5211AntennaGet(struct ath_hal *); extern void ar5211WriteAssocid(struct ath_hal *, const uint8_t *bssid, uint16_t assocId); extern uint64_t ar5211GetTsf64(struct ath_hal *); extern uint32_t ar5211GetTsf32(struct ath_hal *); extern void ar5211ResetTsf(struct ath_hal *); extern uint32_t ar5211GetMaxTurboRate(struct ath_hal *); extern uint32_t ar5211GetRandomSeed(struct ath_hal *); extern HAL_BOOL ar5211DetectCardPresent(struct ath_hal *); extern void ar5211UpdateMibCounters(struct ath_hal *, HAL_MIB_STATS *); extern void ar5211EnableHwEncryption(struct ath_hal *); extern void ar5211DisableHwEncryption(struct ath_hal *); extern HAL_BOOL ar5211SetSlotTime(struct ath_hal *, u_int); extern u_int ar5211GetSlotTime(struct ath_hal *); extern HAL_BOOL ar5211SetAckTimeout(struct ath_hal *, u_int); extern u_int ar5211GetAckTimeout(struct ath_hal *); extern HAL_BOOL ar5211SetAckCTSRate(struct ath_hal *, u_int); extern u_int ar5211GetAckCTSRate(struct ath_hal *); extern HAL_BOOL ar5211SetCTSTimeout(struct ath_hal *, u_int); extern u_int ar5211GetCTSTimeout(struct ath_hal *); extern HAL_BOOL ar5211SetSifsTime(struct ath_hal *, u_int); extern u_int ar5211GetSifsTime(struct ath_hal *); extern HAL_BOOL ar5211SetDecompMask(struct ath_hal *, uint16_t, int); extern void ar5211SetCoverageClass(struct ath_hal *, uint8_t, int); extern uint32_t ar5211GetCurRssi(struct ath_hal *); extern u_int ar5211GetDefAntenna(struct ath_hal *); extern void ar5211SetDefAntenna(struct ath_hal *ah, u_int antenna); extern HAL_ANT_SETTING ar5211GetAntennaSwitch(struct ath_hal *); extern HAL_BOOL ar5211SetAntennaSwitch(struct ath_hal *, HAL_ANT_SETTING); extern HAL_STATUS ar5211GetCapability(struct ath_hal *, HAL_CAPABILITY_TYPE, uint32_t, uint32_t *); extern HAL_BOOL ar5211SetCapability(struct ath_hal *, HAL_CAPABILITY_TYPE, uint32_t, uint32_t, HAL_STATUS *); extern HAL_BOOL ar5211GetDiagState(struct ath_hal *ah, int request, const void *args, uint32_t argsize, void **result, uint32_t *resultsize); extern uint32_t ar5211Get11nExtBusy(struct ath_hal *); extern HAL_BOOL ar5211GetMibCycleCounts(struct ath_hal *, HAL_SURVEY_SAMPLE *); +extern void ar5211EnableDfs(struct ath_hal *, HAL_PHYERR_PARAM *); +extern void ar5211GetDfsThresh(struct ath_hal *, HAL_PHYERR_PARAM *); extern u_int ar5211GetKeyCacheSize(struct ath_hal *); extern HAL_BOOL ar5211IsKeyCacheEntryValid(struct ath_hal *, uint16_t); extern HAL_BOOL ar5211ResetKeyCacheEntry(struct ath_hal *, uint16_t entry); extern HAL_BOOL ar5211SetKeyCacheEntry(struct ath_hal *, uint16_t entry, const HAL_KEYVAL *, const uint8_t *mac, int xorKey); extern HAL_BOOL ar5211SetKeyCacheEntryMac(struct ath_hal *, uint16_t, const uint8_t *); extern HAL_BOOL ar5211SetPowerMode(struct ath_hal *, uint32_t powerRequest, int setChip); extern HAL_POWER_MODE ar5211GetPowerMode(struct ath_hal *); extern void ar5211SetBeaconTimers(struct ath_hal *, const HAL_BEACON_TIMERS *); extern void ar5211BeaconInit(struct ath_hal *, uint32_t, uint32_t); extern void ar5211SetStaBeaconTimers(struct ath_hal *, const HAL_BEACON_STATE *); extern void ar5211ResetStaBeaconTimers(struct ath_hal *); extern uint64_t ar5211GetNextTBTT(struct ath_hal *); extern HAL_BOOL ar5211IsInterruptPending(struct ath_hal *); extern HAL_BOOL ar5211GetPendingInterrupts(struct ath_hal *, HAL_INT *); extern HAL_INT ar5211GetInterrupts(struct ath_hal *); extern HAL_INT ar5211SetInterrupts(struct ath_hal *, HAL_INT ints); extern const HAL_RATE_TABLE *ar5211GetRateTable(struct ath_hal *, u_int mode); extern HAL_BOOL ar5211AniControl(struct ath_hal *, HAL_ANI_CMD, int ); extern void ar5211RxMonitor(struct ath_hal *, const HAL_NODE_STATS *, const struct ieee80211_channel *); extern void ar5211AniPoll(struct ath_hal *, const struct ieee80211_channel *); extern void ar5211MibEvent(struct ath_hal *, const HAL_NODE_STATS *); #endif /* _ATH_AR5211_H_ */ Index: projects/nand/sys/dev/ath/ath_hal/ar5211/ar5211_attach.c =================================================================== --- projects/nand/sys/dev/ath/ath_hal/ar5211/ar5211_attach.c (revision 235262) +++ projects/nand/sys/dev/ath/ath_hal/ar5211/ar5211_attach.c (revision 235263) @@ -1,541 +1,545 @@ /* * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * Copyright (c) 2002-2006 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ #include "opt_ah.h" #include "ah.h" #include "ah_internal.h" #include "ah_devid.h" #include "ar5211/ar5211.h" #include "ar5211/ar5211reg.h" #include "ar5211/ar5211phy.h" #include "ah_eeprom_v3.h" static HAL_BOOL ar5211GetChannelEdges(struct ath_hal *ah, uint16_t flags, uint16_t *low, uint16_t *high); static HAL_BOOL ar5211GetChipPowerLimits(struct ath_hal *ah, struct ieee80211_channel *chan); static void ar5211ConfigPCIE(struct ath_hal *ah, HAL_BOOL restore); static void ar5211DisablePCIE(struct ath_hal *ah); static const struct ath_hal_private ar5211hal = {{ .ah_magic = AR5211_MAGIC, .ah_getRateTable = ar5211GetRateTable, .ah_detach = ar5211Detach, /* Reset Functions */ .ah_reset = ar5211Reset, .ah_phyDisable = ar5211PhyDisable, .ah_disable = ar5211Disable, .ah_configPCIE = ar5211ConfigPCIE, .ah_disablePCIE = ar5211DisablePCIE, .ah_setPCUConfig = ar5211SetPCUConfig, .ah_perCalibration = ar5211PerCalibration, .ah_perCalibrationN = ar5211PerCalibrationN, .ah_resetCalValid = ar5211ResetCalValid, .ah_setTxPowerLimit = ar5211SetTxPowerLimit, .ah_getChanNoise = ath_hal_getChanNoise, /* Transmit functions */ .ah_updateTxTrigLevel = ar5211UpdateTxTrigLevel, .ah_setupTxQueue = ar5211SetupTxQueue, .ah_setTxQueueProps = ar5211SetTxQueueProps, .ah_getTxQueueProps = ar5211GetTxQueueProps, .ah_releaseTxQueue = ar5211ReleaseTxQueue, .ah_resetTxQueue = ar5211ResetTxQueue, .ah_getTxDP = ar5211GetTxDP, .ah_setTxDP = ar5211SetTxDP, .ah_numTxPending = ar5211NumTxPending, .ah_startTxDma = ar5211StartTxDma, .ah_stopTxDma = ar5211StopTxDma, .ah_setupTxDesc = ar5211SetupTxDesc, .ah_setupXTxDesc = ar5211SetupXTxDesc, .ah_fillTxDesc = ar5211FillTxDesc, .ah_procTxDesc = ar5211ProcTxDesc, .ah_getTxIntrQueue = ar5211GetTxIntrQueue, .ah_reqTxIntrDesc = ar5211IntrReqTxDesc, .ah_getTxCompletionRates = ar5211GetTxCompletionRates, /* RX Functions */ .ah_getRxDP = ar5211GetRxDP, .ah_setRxDP = ar5211SetRxDP, .ah_enableReceive = ar5211EnableReceive, .ah_stopDmaReceive = ar5211StopDmaReceive, .ah_startPcuReceive = ar5211StartPcuReceive, .ah_stopPcuReceive = ar5211StopPcuReceive, .ah_setMulticastFilter = ar5211SetMulticastFilter, .ah_setMulticastFilterIndex = ar5211SetMulticastFilterIndex, .ah_clrMulticastFilterIndex = ar5211ClrMulticastFilterIndex, .ah_getRxFilter = ar5211GetRxFilter, .ah_setRxFilter = ar5211SetRxFilter, .ah_setupRxDesc = ar5211SetupRxDesc, .ah_procRxDesc = ar5211ProcRxDesc, .ah_rxMonitor = ar5211RxMonitor, .ah_aniPoll = ar5211AniPoll, .ah_procMibEvent = ar5211MibEvent, /* Misc Functions */ .ah_getCapability = ar5211GetCapability, .ah_setCapability = ar5211SetCapability, .ah_getDiagState = ar5211GetDiagState, .ah_getMacAddress = ar5211GetMacAddress, .ah_setMacAddress = ar5211SetMacAddress, .ah_getBssIdMask = ar5211GetBssIdMask, .ah_setBssIdMask = ar5211SetBssIdMask, .ah_setRegulatoryDomain = ar5211SetRegulatoryDomain, .ah_setLedState = ar5211SetLedState, .ah_writeAssocid = ar5211WriteAssocid, .ah_gpioCfgInput = ar5211GpioCfgInput, .ah_gpioCfgOutput = ar5211GpioCfgOutput, .ah_gpioGet = ar5211GpioGet, .ah_gpioSet = ar5211GpioSet, .ah_gpioSetIntr = ar5211GpioSetIntr, .ah_getTsf32 = ar5211GetTsf32, .ah_getTsf64 = ar5211GetTsf64, .ah_resetTsf = ar5211ResetTsf, .ah_detectCardPresent = ar5211DetectCardPresent, .ah_updateMibCounters = ar5211UpdateMibCounters, .ah_getRfGain = ar5211GetRfgain, .ah_getDefAntenna = ar5211GetDefAntenna, .ah_setDefAntenna = ar5211SetDefAntenna, .ah_getAntennaSwitch = ar5211GetAntennaSwitch, .ah_setAntennaSwitch = ar5211SetAntennaSwitch, .ah_setSifsTime = ar5211SetSifsTime, .ah_getSifsTime = ar5211GetSifsTime, .ah_setSlotTime = ar5211SetSlotTime, .ah_getSlotTime = ar5211GetSlotTime, .ah_setAckTimeout = ar5211SetAckTimeout, .ah_getAckTimeout = ar5211GetAckTimeout, .ah_setAckCTSRate = ar5211SetAckCTSRate, .ah_getAckCTSRate = ar5211GetAckCTSRate, .ah_setCTSTimeout = ar5211SetCTSTimeout, .ah_getCTSTimeout = ar5211GetCTSTimeout, .ah_setDecompMask = ar5211SetDecompMask, .ah_setCoverageClass = ar5211SetCoverageClass, .ah_get11nExtBusy = ar5211Get11nExtBusy, .ah_getMibCycleCounts = ar5211GetMibCycleCounts, + .ah_enableDfs = ar5211EnableDfs, + .ah_getDfsThresh = ar5211GetDfsThresh, + /* XXX procRadarEvent */ + /* XXX isFastClockEnabled */ /* Key Cache Functions */ .ah_getKeyCacheSize = ar5211GetKeyCacheSize, .ah_resetKeyCacheEntry = ar5211ResetKeyCacheEntry, .ah_isKeyCacheEntryValid = ar5211IsKeyCacheEntryValid, .ah_setKeyCacheEntry = ar5211SetKeyCacheEntry, .ah_setKeyCacheEntryMac = ar5211SetKeyCacheEntryMac, /* Power Management Functions */ .ah_setPowerMode = ar5211SetPowerMode, .ah_getPowerMode = ar5211GetPowerMode, /* Beacon Functions */ .ah_setBeaconTimers = ar5211SetBeaconTimers, .ah_beaconInit = ar5211BeaconInit, .ah_setStationBeaconTimers = ar5211SetStaBeaconTimers, .ah_resetStationBeaconTimers = ar5211ResetStaBeaconTimers, .ah_getNextTBTT = ar5211GetNextTBTT, /* Interrupt Functions */ .ah_isInterruptPending = ar5211IsInterruptPending, .ah_getPendingInterrupts = ar5211GetPendingInterrupts, .ah_getInterrupts = ar5211GetInterrupts, .ah_setInterrupts = ar5211SetInterrupts }, .ah_getChannelEdges = ar5211GetChannelEdges, .ah_getWirelessModes = ar5211GetWirelessModes, .ah_eepromRead = ar5211EepromRead, #ifdef AH_SUPPORT_WRITE_EEPROM .ah_eepromWrite = ar5211EepromWrite, #endif .ah_getChipPowerLimits = ar5211GetChipPowerLimits, }; static HAL_BOOL ar5211ChipTest(struct ath_hal *); static HAL_BOOL ar5211FillCapabilityInfo(struct ath_hal *ah); /* * Return the revsion id for the radio chip. This * fetched via the PHY. */ static uint32_t ar5211GetRadioRev(struct ath_hal *ah) { uint32_t val; int i; OS_REG_WRITE(ah, (AR_PHY_BASE + (0x34 << 2)), 0x00001c16); for (i = 0; i < 8; i++) OS_REG_WRITE(ah, (AR_PHY_BASE + (0x20 << 2)), 0x00010000); val = (OS_REG_READ(ah, AR_PHY_BASE + (256 << 2)) >> 24) & 0xff; val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4); return ath_hal_reverseBits(val, 8); } /* * Attach for an AR5211 part. */ static struct ath_hal * ar5211Attach(uint16_t devid, HAL_SOFTC sc, HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata, HAL_STATUS *status) { #define N(a) (sizeof(a)/sizeof(a[0])) struct ath_hal_5211 *ahp; struct ath_hal *ah; uint32_t val; uint16_t eeval; HAL_STATUS ecode; HALDEBUG(AH_NULL, HAL_DEBUG_ATTACH, "%s: sc %p st %p sh %p\n", __func__, sc, (void*) st, (void*) sh); /* NB: memory is returned zero'd */ ahp = ath_hal_malloc(sizeof (struct ath_hal_5211)); if (ahp == AH_NULL) { HALDEBUG(AH_NULL, HAL_DEBUG_ANY, "%s: cannot allocate memory for state block\n", __func__); ecode = HAL_ENOMEM; goto bad; } ah = &ahp->ah_priv.h; /* set initial values */ OS_MEMCPY(&ahp->ah_priv, &ar5211hal, sizeof(struct ath_hal_private)); ah->ah_sc = sc; ah->ah_st = st; ah->ah_sh = sh; ah->ah_devid = devid; /* NB: for AH_DEBUG_ALQ */ AH_PRIVATE(ah)->ah_devid = devid; AH_PRIVATE(ah)->ah_subvendorid = 0; /* XXX */ AH_PRIVATE(ah)->ah_powerLimit = MAX_RATE_POWER; AH_PRIVATE(ah)->ah_tpScale = HAL_TP_SCALE_MAX; /* no scaling */ ahp->ah_diversityControl = HAL_ANT_VARIABLE; ahp->ah_staId1Defaults = 0; ahp->ah_rssiThr = INIT_RSSI_THR; ahp->ah_sifstime = (u_int) -1; ahp->ah_slottime = (u_int) -1; ahp->ah_acktimeout = (u_int) -1; ahp->ah_ctstimeout = (u_int) -1; if (!ar5211ChipReset(ah, AH_NULL)) { /* reset chip */ HALDEBUG(ah, HAL_DEBUG_ANY, "%s: chip reset failed\n", __func__); ecode = HAL_EIO; goto bad; } if (AH_PRIVATE(ah)->ah_devid == AR5211_FPGA11B) { /* set it back to OFDM mode to be able to read analog rev id */ OS_REG_WRITE(ah, AR5211_PHY_MODE, AR5211_PHY_MODE_OFDM); OS_REG_WRITE(ah, AR_PHY_PLL_CTL, AR_PHY_PLL_CTL_44); OS_DELAY(1000); } /* Read Revisions from Chips */ val = OS_REG_READ(ah, AR_SREV) & AR_SREV_ID_M; AH_PRIVATE(ah)->ah_macVersion = val >> AR_SREV_ID_S; AH_PRIVATE(ah)->ah_macRev = val & AR_SREV_REVISION_M; if (AH_PRIVATE(ah)->ah_macVersion < AR_SREV_VERSION_MAUI_2 || AH_PRIVATE(ah)->ah_macVersion > AR_SREV_VERSION_OAHU) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: Mac Chip Rev 0x%x is not supported by this driver\n", __func__, AH_PRIVATE(ah)->ah_macVersion); ecode = HAL_ENOTSUPP; goto bad; } AH_PRIVATE(ah)->ah_phyRev = OS_REG_READ(ah, AR_PHY_CHIP_ID); if (!ar5211ChipTest(ah)) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: hardware self-test failed\n", __func__); ecode = HAL_ESELFTEST; goto bad; } /* Set correct Baseband to analog shift setting to access analog chips. */ if (AH_PRIVATE(ah)->ah_macVersion >= AR_SREV_VERSION_OAHU) { OS_REG_WRITE(ah, AR_PHY_BASE, 0x00000007); } else { OS_REG_WRITE(ah, AR_PHY_BASE, 0x00000047); } OS_DELAY(2000); /* Read Radio Chip Rev Extract */ AH_PRIVATE(ah)->ah_analog5GhzRev = ar5211GetRadioRev(ah); if ((AH_PRIVATE(ah)->ah_analog5GhzRev & 0xf0) != RAD5_SREV_MAJOR) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: 5G Radio Chip Rev 0x%02X is not supported by this " "driver\n", __func__, AH_PRIVATE(ah)->ah_analog5GhzRev); ecode = HAL_ENOTSUPP; goto bad; } val = (OS_REG_READ(ah, AR_PCICFG) & AR_PCICFG_EEPROM_SIZE_M) >> AR_PCICFG_EEPROM_SIZE_S; if (val != AR_PCICFG_EEPROM_SIZE_16K) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: unsupported EEPROM size " "%u (0x%x) found\n", __func__, val, val); ecode = HAL_EESIZE; goto bad; } ecode = ath_hal_legacyEepromAttach(ah); if (ecode != HAL_OK) { goto bad; } /* If Bmode and AR5211, verify 2.4 analog exists */ if (AH_PRIVATE(ah)->ah_macVersion >= AR_SREV_VERSION_OAHU && ath_hal_eepromGetFlag(ah, AR_EEP_BMODE)) { /* Set correct Baseband to analog shift setting to access analog chips. */ OS_REG_WRITE(ah, AR_PHY_BASE, 0x00004007); OS_DELAY(2000); AH_PRIVATE(ah)->ah_analog2GhzRev = ar5211GetRadioRev(ah); /* Set baseband for 5GHz chip */ OS_REG_WRITE(ah, AR_PHY_BASE, 0x00000007); OS_DELAY(2000); if ((AH_PRIVATE(ah)->ah_analog2GhzRev & 0xF0) != RAD2_SREV_MAJOR) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: 2G Radio Chip Rev 0x%x is not supported by " "this driver\n", __func__, AH_PRIVATE(ah)->ah_analog2GhzRev); ecode = HAL_ENOTSUPP; goto bad; } } else { ath_hal_eepromSet(ah, AR_EEP_BMODE, AH_FALSE); } ecode = ath_hal_eepromGet(ah, AR_EEP_REGDMN_0, &eeval); if (ecode != HAL_OK) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: cannot read regulatory domain from EEPROM\n", __func__); goto bad; } AH_PRIVATE(ah)->ah_currentRD = eeval; AH_PRIVATE(ah)->ah_getNfAdjust = ar5211GetNfAdjust; /* * Got everything we need now to setup the capabilities. */ (void) ar5211FillCapabilityInfo(ah); /* Initialize gain ladder thermal calibration structure */ ar5211InitializeGainValues(ah); ecode = ath_hal_eepromGet(ah, AR_EEP_MACADDR, ahp->ah_macaddr); if (ecode != HAL_OK) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: error getting mac address from EEPROM\n", __func__); goto bad; } HALDEBUG(ah, HAL_DEBUG_ATTACH, "%s: return\n", __func__); return ah; bad: if (ahp) ar5211Detach((struct ath_hal *) ahp); if (status) *status = ecode; return AH_NULL; #undef N } void ar5211Detach(struct ath_hal *ah) { HALDEBUG(ah, HAL_DEBUG_ATTACH, "%s:\n", __func__); HALASSERT(ah != AH_NULL); HALASSERT(ah->ah_magic == AR5211_MAGIC); ath_hal_eepromDetach(ah); ath_hal_free(ah); } static HAL_BOOL ar5211ChipTest(struct ath_hal *ah) { uint32_t regAddr[2] = { AR_STA_ID0, AR_PHY_BASE+(8 << 2) }; uint32_t regHold[2]; uint32_t patternData[4] = { 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999 }; int i, j; /* Test PHY & MAC registers */ for (i = 0; i < 2; i++) { uint32_t addr = regAddr[i]; uint32_t wrData, rdData; regHold[i] = OS_REG_READ(ah, addr); for (j = 0; j < 0x100; j++) { wrData = (j << 16) | j; OS_REG_WRITE(ah, addr, wrData); rdData = OS_REG_READ(ah, addr); if (rdData != wrData) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", __func__, addr, wrData, rdData); return AH_FALSE; } } for (j = 0; j < 4; j++) { wrData = patternData[j]; OS_REG_WRITE(ah, addr, wrData); rdData = OS_REG_READ(ah, addr); if (wrData != rdData) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", __func__, addr, wrData, rdData); return AH_FALSE; } } OS_REG_WRITE(ah, regAddr[i], regHold[i]); } OS_DELAY(100); return AH_TRUE; } /* * Store the channel edges for the requested operational mode */ static HAL_BOOL ar5211GetChannelEdges(struct ath_hal *ah, uint16_t flags, uint16_t *low, uint16_t *high) { if (flags & IEEE80211_CHAN_5GHZ) { *low = 4920; *high = 6100; return AH_TRUE; } if (flags & IEEE80211_CHAN_2GHZ && ath_hal_eepromGetFlag(ah, AR_EEP_BMODE)) { *low = 2312; *high = 2732; return AH_TRUE; } return AH_FALSE; } static HAL_BOOL ar5211GetChipPowerLimits(struct ath_hal *ah, struct ieee80211_channel *chan) { /* XXX fill in, this is just a placeholder */ HALDEBUG(ah, HAL_DEBUG_ATTACH, "%s: no min/max power for %u/0x%x\n", __func__, chan->ic_freq, chan->ic_flags); chan->ic_maxpower = MAX_RATE_POWER; chan->ic_minpower = 0; return AH_TRUE; } static void ar5211ConfigPCIE(struct ath_hal *ah, HAL_BOOL restore) { } static void ar5211DisablePCIE(struct ath_hal *ah) { } /* * Fill all software cached or static hardware state information. */ static HAL_BOOL ar5211FillCapabilityInfo(struct ath_hal *ah) { struct ath_hal_private *ahpriv = AH_PRIVATE(ah); HAL_CAPABILITIES *pCap = &ahpriv->ah_caps; /* Construct wireless mode from EEPROM */ pCap->halWirelessModes = 0; if (ath_hal_eepromGetFlag(ah, AR_EEP_AMODE)) { pCap->halWirelessModes |= HAL_MODE_11A; if (!ath_hal_eepromGetFlag(ah, AR_EEP_TURBO5DISABLE)) pCap->halWirelessModes |= HAL_MODE_TURBO; } if (ath_hal_eepromGetFlag(ah, AR_EEP_BMODE)) pCap->halWirelessModes |= HAL_MODE_11B; pCap->halLow2GhzChan = 2312; pCap->halHigh2GhzChan = 2732; pCap->halLow5GhzChan = 4920; pCap->halHigh5GhzChan = 6100; pCap->halChanSpreadSupport = AH_TRUE; pCap->halSleepAfterBeaconBroken = AH_TRUE; pCap->halPSPollBroken = AH_TRUE; pCap->halVEOLSupport = AH_TRUE; pCap->halTotalQueues = HAL_NUM_TX_QUEUES; pCap->halKeyCacheSize = 128; /* XXX not needed */ pCap->halChanHalfRate = AH_FALSE; pCap->halChanQuarterRate = AH_FALSE; /* * RSSI uses the combined field; some 11n NICs may use * the control chain RSSI. */ pCap->halUseCombinedRadarRssi = AH_TRUE; if (ath_hal_eepromGetFlag(ah, AR_EEP_RFKILL) && ath_hal_eepromGet(ah, AR_EEP_RFSILENT, &ahpriv->ah_rfsilent) == HAL_OK) { /* NB: enabled by default */ ahpriv->ah_rfkillEnabled = AH_TRUE; pCap->halRfSilentSupport = AH_TRUE; } pCap->halTstampPrecision = 13; pCap->halIntrMask = HAL_INT_COMMON | HAL_INT_RX | HAL_INT_TX | HAL_INT_FATAL | HAL_INT_BNR | HAL_INT_TIM ; pCap->hal4kbSplitTransSupport = AH_TRUE; pCap->halHasRxSelfLinkedTail = AH_TRUE; /* XXX might be ok w/ some chip revs */ ahpriv->ah_rxornIsFatal = AH_TRUE; return AH_TRUE; } static const char* ar5211Probe(uint16_t vendorid, uint16_t devid) { if (vendorid == ATHEROS_VENDOR_ID) { if (devid == AR5211_DEVID || devid == AR5311_DEVID || devid == AR5211_DEFAULT) return "Atheros 5211"; if (devid == AR5211_FPGA11B) return "Atheros 5211 (FPGA)"; } return AH_NULL; } AH_CHIP(AR5211, ar5211Probe, ar5211Attach); Index: projects/nand/sys/dev/ath/ath_hal/ar5211/ar5211_misc.c =================================================================== --- projects/nand/sys/dev/ath/ath_hal/ar5211/ar5211_misc.c (revision 235262) +++ projects/nand/sys/dev/ath/ath_hal/ar5211/ar5211_misc.c (revision 235263) @@ -1,711 +1,721 @@ /* * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * Copyright (c) 2002-2006 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ #include "opt_ah.h" #include "ah.h" #include "ah_internal.h" #include "ar5211/ar5211.h" #include "ar5211/ar5211reg.h" #include "ar5211/ar5211phy.h" #include "ah_eeprom_v3.h" #define AR_NUM_GPIO 6 /* 6 GPIO bits */ #define AR_GPIOD_MASK 0x2f /* 6-bit mask */ void ar5211GetMacAddress(struct ath_hal *ah, uint8_t *mac) { struct ath_hal_5211 *ahp = AH5211(ah); OS_MEMCPY(mac, ahp->ah_macaddr, IEEE80211_ADDR_LEN); } HAL_BOOL ar5211SetMacAddress(struct ath_hal *ah, const uint8_t *mac) { struct ath_hal_5211 *ahp = AH5211(ah); OS_MEMCPY(ahp->ah_macaddr, mac, IEEE80211_ADDR_LEN); return AH_TRUE; } void ar5211GetBssIdMask(struct ath_hal *ah, uint8_t *mask) { static const uint8_t ones[IEEE80211_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; OS_MEMCPY(mask, ones, IEEE80211_ADDR_LEN); } HAL_BOOL ar5211SetBssIdMask(struct ath_hal *ah, const uint8_t *mask) { return AH_FALSE; } /* * Read 16 bits of data from the specified EEPROM offset. */ HAL_BOOL ar5211EepromRead(struct ath_hal *ah, u_int off, uint16_t *data) { OS_REG_WRITE(ah, AR_EEPROM_ADDR, off); OS_REG_WRITE(ah, AR_EEPROM_CMD, AR_EEPROM_CMD_READ); if (!ath_hal_wait(ah, AR_EEPROM_STS, AR_EEPROM_STS_READ_COMPLETE | AR_EEPROM_STS_READ_ERROR, AR_EEPROM_STS_READ_COMPLETE)) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: read failed for entry 0x%x\n", __func__, off); return AH_FALSE; } *data = OS_REG_READ(ah, AR_EEPROM_DATA) & 0xffff; return AH_TRUE; } #ifdef AH_SUPPORT_WRITE_EEPROM /* * Write 16 bits of data to the specified EEPROM offset. */ HAL_BOOL ar5211EepromWrite(struct ath_hal *ah, u_int off, uint16_t data) { return AH_FALSE; } #endif /* AH_SUPPORT_WRITE_EEPROM */ /* * Attempt to change the cards operating regulatory domain to the given value */ HAL_BOOL ar5211SetRegulatoryDomain(struct ath_hal *ah, uint16_t regDomain, HAL_STATUS *status) { HAL_STATUS ecode; if (AH_PRIVATE(ah)->ah_currentRD == regDomain) { ecode = HAL_EINVAL; goto bad; } /* * Check if EEPROM is configured to allow this; must * be a proper version and the protection bits must * permit re-writing that segment of the EEPROM. */ if (ath_hal_eepromGetFlag(ah, AR_EEP_WRITEPROTECT)) { ecode = HAL_EEWRITE; goto bad; } #ifdef AH_SUPPORT_WRITE_REGDOMAIN if (ar5211EepromWrite(ah, AR_EEPROM_REG_DOMAIN, regDomain)) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: set regulatory domain to %u (0x%x)\n", __func__, regDomain, regDomain); AH_PRIVATE(ah)->ah_currentRD = regDomain; return AH_TRUE; } #endif ecode = HAL_EIO; bad: if (status) *status = ecode; return AH_FALSE; } /* * Return the wireless modes (a,b,g,t) supported by hardware. * * This value is what is actually supported by the hardware * and is unaffected by regulatory/country code settings. * */ u_int ar5211GetWirelessModes(struct ath_hal *ah) { u_int mode = 0; if (ath_hal_eepromGetFlag(ah, AR_EEP_AMODE)) { mode = HAL_MODE_11A; if (!ath_hal_eepromGetFlag(ah, AR_EEP_TURBO5DISABLE)) mode |= HAL_MODE_TURBO | HAL_MODE_108A; } if (ath_hal_eepromGetFlag(ah, AR_EEP_BMODE)) mode |= HAL_MODE_11B; return mode; } #if 0 HAL_BOOL ar5211GetTurboDisable(struct ath_hal *ah) { return (AH5211(ah)->ah_turboDisable != 0); } #endif /* * Called if RfKill is supported (according to EEPROM). Set the interrupt and * GPIO values so the ISR and can disable RF on a switch signal */ void ar5211EnableRfKill(struct ath_hal *ah) { uint16_t rfsilent = AH_PRIVATE(ah)->ah_rfsilent; int select = MS(rfsilent, AR_EEPROM_RFSILENT_GPIO_SEL); int polarity = MS(rfsilent, AR_EEPROM_RFSILENT_POLARITY); /* * Configure the desired GPIO port for input * and enable baseband rf silence. */ ar5211GpioCfgInput(ah, select); OS_REG_SET_BIT(ah, AR_PHY_BASE, 0x00002000); /* * If radio disable switch connection to GPIO bit x is enabled * program GPIO interrupt. * If rfkill bit on eeprom is 1, setupeeprommap routine has already * verified that it is a later version of eeprom, it has a place for * rfkill bit and it is set to 1, indicating that GPIO bit x hardware * connection is present. */ ar5211GpioSetIntr(ah, select, (ar5211GpioGet(ah, select) != polarity)); } /* * Configure GPIO Output lines */ HAL_BOOL ar5211GpioCfgOutput(struct ath_hal *ah, uint32_t gpio, HAL_GPIO_MUX_TYPE type) { uint32_t reg; HALASSERT(gpio < AR_NUM_GPIO); reg = OS_REG_READ(ah, AR_GPIOCR); reg &= ~(AR_GPIOCR_0_CR_A << (gpio * AR_GPIOCR_CR_SHIFT)); reg |= AR_GPIOCR_0_CR_A << (gpio * AR_GPIOCR_CR_SHIFT); OS_REG_WRITE(ah, AR_GPIOCR, reg); return AH_TRUE; } /* * Configure GPIO Input lines */ HAL_BOOL ar5211GpioCfgInput(struct ath_hal *ah, uint32_t gpio) { uint32_t reg; HALASSERT(gpio < AR_NUM_GPIO); reg = OS_REG_READ(ah, AR_GPIOCR); reg &= ~(AR_GPIOCR_0_CR_A << (gpio * AR_GPIOCR_CR_SHIFT)); reg |= AR_GPIOCR_0_CR_N << (gpio * AR_GPIOCR_CR_SHIFT); OS_REG_WRITE(ah, AR_GPIOCR, reg); return AH_TRUE; } /* * Once configured for I/O - set output lines */ HAL_BOOL ar5211GpioSet(struct ath_hal *ah, uint32_t gpio, uint32_t val) { uint32_t reg; HALASSERT(gpio < AR_NUM_GPIO); reg = OS_REG_READ(ah, AR_GPIODO); reg &= ~(1 << gpio); reg |= (val&1) << gpio; OS_REG_WRITE(ah, AR_GPIODO, reg); return AH_TRUE; } /* * Once configured for I/O - get input lines */ uint32_t ar5211GpioGet(struct ath_hal *ah, uint32_t gpio) { if (gpio < AR_NUM_GPIO) { uint32_t val = OS_REG_READ(ah, AR_GPIODI); val = ((val & AR_GPIOD_MASK) >> gpio) & 0x1; return val; } else { return 0xffffffff; } } /* * Set the GPIO 0 Interrupt (gpio is ignored) */ void ar5211GpioSetIntr(struct ath_hal *ah, u_int gpio, uint32_t ilevel) { uint32_t val = OS_REG_READ(ah, AR_GPIOCR); /* Clear the bits that we will modify. */ val &= ~(AR_GPIOCR_INT_SEL0 | AR_GPIOCR_INT_SELH | AR_GPIOCR_INT_ENA | AR_GPIOCR_0_CR_A); val |= AR_GPIOCR_INT_SEL0 | AR_GPIOCR_INT_ENA; if (ilevel) val |= AR_GPIOCR_INT_SELH; /* Don't need to change anything for low level interrupt. */ OS_REG_WRITE(ah, AR_GPIOCR, val); /* Change the interrupt mask. */ ar5211SetInterrupts(ah, AH5211(ah)->ah_maskReg | HAL_INT_GPIO); } /* * Change the LED blinking pattern to correspond to the connectivity */ void ar5211SetLedState(struct ath_hal *ah, HAL_LED_STATE state) { static const uint32_t ledbits[8] = { AR_PCICFG_LEDCTL_NONE|AR_PCICFG_LEDMODE_PROP, /* HAL_LED_INIT */ AR_PCICFG_LEDCTL_PEND|AR_PCICFG_LEDMODE_PROP, /* HAL_LED_SCAN */ AR_PCICFG_LEDCTL_PEND|AR_PCICFG_LEDMODE_PROP, /* HAL_LED_AUTH */ AR_PCICFG_LEDCTL_ASSOC|AR_PCICFG_LEDMODE_PROP,/* HAL_LED_ASSOC*/ AR_PCICFG_LEDCTL_ASSOC|AR_PCICFG_LEDMODE_PROP,/* HAL_LED_RUN */ AR_PCICFG_LEDCTL_NONE|AR_PCICFG_LEDMODE_RAND, AR_PCICFG_LEDCTL_NONE|AR_PCICFG_LEDMODE_RAND, AR_PCICFG_LEDCTL_NONE|AR_PCICFG_LEDMODE_RAND, }; OS_REG_WRITE(ah, AR_PCICFG, (OS_REG_READ(ah, AR_PCICFG) &~ (AR_PCICFG_LEDCTL | AR_PCICFG_LEDMODE)) | ledbits[state & 0x7] ); } /* * Change association related fields programmed into the hardware. * Writing a valid BSSID to the hardware effectively enables the hardware * to synchronize its TSF to the correct beacons and receive frames coming * from that BSSID. It is called by the SME JOIN operation. */ void ar5211WriteAssocid(struct ath_hal *ah, const uint8_t *bssid, uint16_t assocId) { struct ath_hal_5211 *ahp = AH5211(ah); /* XXX save bssid for possible re-use on reset */ OS_MEMCPY(ahp->ah_bssid, bssid, IEEE80211_ADDR_LEN); OS_REG_WRITE(ah, AR_BSS_ID0, LE_READ_4(ahp->ah_bssid)); OS_REG_WRITE(ah, AR_BSS_ID1, LE_READ_2(ahp->ah_bssid+4) | ((assocId & 0x3fff)<> 19) & 0x1ff; if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); return (OS_REG_READ(ah, AR_TSF_U32) ^ OS_REG_READ(ah, AR_TSF_L32) ^ nf); } /* * Detect if our card is present */ HAL_BOOL ar5211DetectCardPresent(struct ath_hal *ah) { uint16_t macVersion, macRev; uint32_t v; /* * Read the Silicon Revision register and compare that * to what we read at attach time. If the same, we say * a card/device is present. */ v = OS_REG_READ(ah, AR_SREV) & AR_SREV_ID_M; macVersion = v >> AR_SREV_ID_S; macRev = v & AR_SREV_REVISION_M; return (AH_PRIVATE(ah)->ah_macVersion == macVersion && AH_PRIVATE(ah)->ah_macRev == macRev); } /* * Update MIB Counters */ void ar5211UpdateMibCounters(struct ath_hal *ah, HAL_MIB_STATS *stats) { stats->ackrcv_bad += OS_REG_READ(ah, AR_ACK_FAIL); stats->rts_bad += OS_REG_READ(ah, AR_RTS_FAIL); stats->fcs_bad += OS_REG_READ(ah, AR_FCS_FAIL); stats->rts_good += OS_REG_READ(ah, AR_RTS_OK); stats->beacons += OS_REG_READ(ah, AR_BEACON_CNT); } HAL_BOOL ar5211SetSifsTime(struct ath_hal *ah, u_int us) { struct ath_hal_5211 *ahp = AH5211(ah); if (us > ath_hal_mac_usec(ah, 0xffff)) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad SIFS time %u\n", __func__, us); ahp->ah_sifstime = (u_int) -1; /* restore default handling */ return AH_FALSE; } else { /* convert to system clocks */ OS_REG_WRITE(ah, AR_D_GBL_IFS_SIFS, ath_hal_mac_clks(ah, us)); ahp->ah_slottime = us; return AH_TRUE; } } u_int ar5211GetSifsTime(struct ath_hal *ah) { u_int clks = OS_REG_READ(ah, AR_D_GBL_IFS_SIFS) & 0xffff; return ath_hal_mac_usec(ah, clks); /* convert from system clocks */ } HAL_BOOL ar5211SetSlotTime(struct ath_hal *ah, u_int us) { struct ath_hal_5211 *ahp = AH5211(ah); if (us < HAL_SLOT_TIME_9 || us > ath_hal_mac_usec(ah, 0xffff)) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad slot time %u\n", __func__, us); ahp->ah_slottime = us; /* restore default handling */ return AH_FALSE; } else { /* convert to system clocks */ OS_REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath_hal_mac_clks(ah, us)); ahp->ah_slottime = us; return AH_TRUE; } } u_int ar5211GetSlotTime(struct ath_hal *ah) { u_int clks = OS_REG_READ(ah, AR_D_GBL_IFS_SLOT) & 0xffff; return ath_hal_mac_usec(ah, clks); /* convert from system clocks */ } HAL_BOOL ar5211SetAckTimeout(struct ath_hal *ah, u_int us) { struct ath_hal_5211 *ahp = AH5211(ah); if (us > ath_hal_mac_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad ack timeout %u\n", __func__, us); ahp->ah_acktimeout = (u_int) -1; /* restore default handling */ return AH_FALSE; } else { /* convert to system clocks */ OS_REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, ath_hal_mac_clks(ah, us)); ahp->ah_acktimeout = us; return AH_TRUE; } } u_int ar5211GetAckTimeout(struct ath_hal *ah) { u_int clks = MS(OS_REG_READ(ah, AR_TIME_OUT), AR_TIME_OUT_ACK); return ath_hal_mac_usec(ah, clks); /* convert from system clocks */ } u_int ar5211GetAckCTSRate(struct ath_hal *ah) { return ((AH5211(ah)->ah_staId1Defaults & AR_STA_ID1_ACKCTS_6MB) == 0); } HAL_BOOL ar5211SetAckCTSRate(struct ath_hal *ah, u_int high) { struct ath_hal_5211 *ahp = AH5211(ah); if (high) { OS_REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_ACKCTS_6MB); ahp->ah_staId1Defaults &= ~AR_STA_ID1_ACKCTS_6MB; } else { OS_REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_ACKCTS_6MB); ahp->ah_staId1Defaults |= AR_STA_ID1_ACKCTS_6MB; } return AH_TRUE; } HAL_BOOL ar5211SetCTSTimeout(struct ath_hal *ah, u_int us) { struct ath_hal_5211 *ahp = AH5211(ah); if (us > ath_hal_mac_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) { HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad cts timeout %u\n", __func__, us); ahp->ah_ctstimeout = (u_int) -1; /* restore default handling */ return AH_FALSE; } else { /* convert to system clocks */ OS_REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, ath_hal_mac_clks(ah, us)); ahp->ah_ctstimeout = us; return AH_TRUE; } } u_int ar5211GetCTSTimeout(struct ath_hal *ah) { u_int clks = MS(OS_REG_READ(ah, AR_TIME_OUT), AR_TIME_OUT_CTS); return ath_hal_mac_usec(ah, clks); /* convert from system clocks */ } HAL_BOOL ar5211SetDecompMask(struct ath_hal *ah, uint16_t keyidx, int en) { /* nothing to do */ return AH_TRUE; } void ar5211SetCoverageClass(struct ath_hal *ah, uint8_t coverageclass, int now) { } /* * Control Adaptive Noise Immunity Parameters */ HAL_BOOL ar5211AniControl(struct ath_hal *ah, HAL_ANI_CMD cmd, int param) { return AH_FALSE; } void ar5211AniPoll(struct ath_hal *ah, const struct ieee80211_channel *chan) { } void ar5211RxMonitor(struct ath_hal *ah, const HAL_NODE_STATS *stats, const struct ieee80211_channel *chan) { } void ar5211MibEvent(struct ath_hal *ah, const HAL_NODE_STATS *stats) { } /* * Get the rssi of frame curently being received. */ uint32_t ar5211GetCurRssi(struct ath_hal *ah) { return (OS_REG_READ(ah, AR_PHY_CURRENT_RSSI) & 0xff); } u_int ar5211GetDefAntenna(struct ath_hal *ah) { return (OS_REG_READ(ah, AR_DEF_ANTENNA) & 0x7); } void ar5211SetDefAntenna(struct ath_hal *ah, u_int antenna) { OS_REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7)); } HAL_ANT_SETTING ar5211GetAntennaSwitch(struct ath_hal *ah) { return AH5211(ah)->ah_diversityControl; } HAL_BOOL ar5211SetAntennaSwitch(struct ath_hal *ah, HAL_ANT_SETTING settings) { const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan; if (chan == AH_NULL) { AH5211(ah)->ah_diversityControl = settings; return AH_TRUE; } return ar5211SetAntennaSwitchInternal(ah, settings, chan); } HAL_STATUS ar5211GetCapability(struct ath_hal *ah, HAL_CAPABILITY_TYPE type, uint32_t capability, uint32_t *result) { switch (type) { case HAL_CAP_CIPHER: /* cipher handled in hardware */ switch (capability) { case HAL_CIPHER_AES_OCB: case HAL_CIPHER_WEP: case HAL_CIPHER_CLR: return HAL_OK; default: return HAL_ENOTSUPP; } default: return ath_hal_getcapability(ah, type, capability, result); } } HAL_BOOL ar5211SetCapability(struct ath_hal *ah, HAL_CAPABILITY_TYPE type, uint32_t capability, uint32_t setting, HAL_STATUS *status) { switch (type) { case HAL_CAP_DIAG: /* hardware diagnostic support */ /* * NB: could split this up into virtual capabilities, * (e.g. 1 => ACK, 2 => CTS, etc.) but it hardly * seems worth the additional complexity. */ #ifdef AH_DEBUG AH_PRIVATE(ah)->ah_diagreg = setting; #else AH_PRIVATE(ah)->ah_diagreg = setting & 0x6; /* ACK+CTS */ #endif OS_REG_WRITE(ah, AR_DIAG_SW, AH_PRIVATE(ah)->ah_diagreg); return AH_TRUE; default: return ath_hal_setcapability(ah, type, capability, setting, status); } } HAL_BOOL ar5211GetDiagState(struct ath_hal *ah, int request, const void *args, uint32_t argsize, void **result, uint32_t *resultsize) { struct ath_hal_5211 *ahp = AH5211(ah); (void) ahp; if (ath_hal_getdiagstate(ah, request, args, argsize, result, resultsize)) return AH_TRUE; switch (request) { case HAL_DIAG_EEPROM: return ath_hal_eepromDiag(ah, request, args, argsize, result, resultsize); case HAL_DIAG_RFGAIN: *result = &ahp->ah_gainValues; *resultsize = sizeof(GAIN_VALUES); return AH_TRUE; case HAL_DIAG_RFGAIN_CURSTEP: *result = __DECONST(void *, ahp->ah_gainValues.currStep); *resultsize = (*result == AH_NULL) ? 0 : sizeof(GAIN_OPTIMIZATION_STEP); return AH_TRUE; } return AH_FALSE; } /* * Return what percentage of the extension channel is busy. * This is always disabled for AR5211 series NICs. */ uint32_t ar5211Get11nExtBusy(struct ath_hal *ah) { return (0); } /* * There's no channel survey support for the AR5211. */ HAL_BOOL ar5211GetMibCycleCounts(struct ath_hal *ah, HAL_SURVEY_SAMPLE *hsample) { return (AH_FALSE); } + +void +ar5211EnableDfs(struct ath_hal *ah, HAL_PHYERR_PARAM *pe) +{ +} + +void +ar5211GetDfsThresh(struct ath_hal *ah, HAL_PHYERR_PARAM *pe) +{ +} Index: projects/nand/sys/dev/dc/if_dc.c =================================================================== --- projects/nand/sys/dev/dc/if_dc.c (revision 235262) +++ projects/nand/sys/dev/dc/if_dc.c (revision 235263) @@ -1,4140 +1,4141 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 * series chips and several workalikes including the following: * * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) * Lite-On 82c168/82c169 PNIC (www.litecom.com) * ASIX Electronics AX88140A (www.asix.com.tw) * ASIX Electronics AX88141 (www.asix.com.tw) * ADMtek AL981 (www.admtek.com.tw) * ADMtek AN983 (www.admtek.com.tw) * ADMtek CardBus AN985 (www.admtek.com.tw) * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek CardBus AN985 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) * Accton EN1217 (www.accton.com) * Xircom X3201 (www.xircom.com) * Abocom FE2500 * Conexant LANfinity (www.conexant.com) * 3Com OfficeConnect 10/100B 3CSOHO100B (www.3com.com) * * Datasheets for the 21143 are available at developer.intel.com. * Datasheets for the clone parts can be found at their respective sites. * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) * The PNIC II is essentially a Macronix 98715A chip; the only difference * worth noting is that its multicast hash table is only 128 bits wide * instead of 512. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Intel 21143 is the successor to the DEC 21140. It is basically * the same as the 21140 but with a few new features. The 21143 supports * three kinds of media attachments: * * o MII port, for 10Mbps and 100Mbps support and NWAY * autonegotiation provided by an external PHY. * o SYM port, for symbol mode 100Mbps support. * o 10baseT port. * o AUI/BNC port. * * The 100Mbps SYM port and 10baseT port can be used together in * combination with the internal NWAY support to create a 10/100 * autosensing configuration. * * Note that not all tulip workalikes are handled in this driver: we only * deal with those which are relatively well behaved. The Winbond is * handled separately due to its different register offsets and the * special handling needed for its various bugs. The PNIC is handled * here, but I'm not thrilled about it. * * All of the workalike chips use some form of MII transceiver support * with the exception of the Macronix chips, which also have a SYM port. * The ASIX AX88140A is also documented to have a SYM port, but all * the cards I've seen use an MII transceiver, probably because the * AX88140A doesn't support internal NWAY. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DC_USEIOSPACE #include #ifdef __sparc64__ #include #include #endif MODULE_DEPEND(dc, pci, 1, 1, 1); MODULE_DEPEND(dc, ether, 1, 1, 1); MODULE_DEPEND(dc, miibus, 1, 1, 1); /* * "device miibus" is required in kernel config. See GENERIC if you get * errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. */ static const struct dc_type const dc_devs[] = { { DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143), 0, "Intel 21143 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009), 0, "Davicom DM9009 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100), 0, "Davicom DM9100 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), DC_REVISION_DM9102A, "Davicom DM9102A 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), 0, "Davicom DM9102 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981), 0, "ADMtek AL981 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN983), 0, "ADMtek AN983 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985), 0, "ADMtek AN985 CardBus 10/100BaseTX or clone" }, { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511), 0, "ADMtek ADM9511 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513), 0, "ADMtek ADM9513 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), DC_REVISION_88141, "ASIX AX88141 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), 0, "ASIX AX88140A 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), DC_REVISION_98713A, "Macronix 98713A 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), 0, "Macronix 98713 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), DC_REVISION_98713A, "Compex RL100-TX 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), 0, "Compex RL100-TX 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98725, "Macronix 98725 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98715AEC_C, "Macronix 98715AEC-C 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), 0, "Macronix 98715/98715A 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727), 0, "Macronix 98727/98732 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115), 0, "LC82C115 PNIC II 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), DC_REVISION_82C169, "82c169 PNIC 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), 0, "82c168 PNIC 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217), 0, "Accton EN1217 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242), 0, "Accton EN2242 MiniPCI 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201), 0, "Xircom X3201 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD), 0, "Neteasy DRP-32TXD Cardbus 10/100" }, { DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500), 0, "Abocom FE2500 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX), 0, "Abocom FE2500MX 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112), 0, "Conexant LANfinity MiniPCI 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX), 0, "Hawking CB102 CardBus 10/100" }, { DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T), 0, "PlaneX FNW-3602-T CardBus 10/100" }, { DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB), 0, "3Com OfficeConnect 10/100B" }, { DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120), 0, "Microsoft MN-120 CardBus 10/100" }, { DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130), 0, "Microsoft MN-130 10/100" }, { DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08), 0, "Linksys PCMPC200 CardBus 10/100" }, { DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09), 0, "Linksys PCMPC200 CardBus 10/100" }, { DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5261), 0, "ULi M5261 FastEthernet" }, { DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5263), 0, "ULi M5263 FastEthernet" }, { 0, 0, NULL } }; static int dc_probe(device_t); static int dc_attach(device_t); static int dc_detach(device_t); static int dc_suspend(device_t); static int dc_resume(device_t); static const struct dc_type *dc_devtype(device_t); static void dc_discard_rxbuf(struct dc_softc *, int); static int dc_newbuf(struct dc_softc *, int); static int dc_encap(struct dc_softc *, struct mbuf **); static void dc_pnic_rx_bug_war(struct dc_softc *, int); static int dc_rx_resync(struct dc_softc *); static int dc_rxeof(struct dc_softc *); static void dc_txeof(struct dc_softc *); static void dc_tick(void *); static void dc_tx_underrun(struct dc_softc *); static void dc_intr(void *); static void dc_start(struct ifnet *); static void dc_start_locked(struct ifnet *); static int dc_ioctl(struct ifnet *, u_long, caddr_t); static void dc_init(void *); static void dc_init_locked(struct dc_softc *); static void dc_stop(struct dc_softc *); static void dc_watchdog(void *); static int dc_shutdown(device_t); static int dc_ifmedia_upd(struct ifnet *); static int dc_ifmedia_upd_locked(struct dc_softc *); static void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int dc_dma_alloc(struct dc_softc *); static void dc_dma_free(struct dc_softc *); static void dc_dma_map_addr(void *, bus_dma_segment_t *, int, int); static void dc_delay(struct dc_softc *); static void dc_eeprom_idle(struct dc_softc *); static void dc_eeprom_putbyte(struct dc_softc *, int); static void dc_eeprom_getword(struct dc_softc *, int, uint16_t *); static void dc_eeprom_getword_pnic(struct dc_softc *, int, uint16_t *); static void dc_eeprom_getword_xircom(struct dc_softc *, int, uint16_t *); static void dc_eeprom_width(struct dc_softc *); static void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int); static int dc_miibus_readreg(device_t, int, int); static int dc_miibus_writereg(device_t, int, int, int); static void dc_miibus_statchg(device_t); static void dc_miibus_mediainit(device_t); static void dc_setcfg(struct dc_softc *, int); static void dc_netcfg_wait(struct dc_softc *); static uint32_t dc_mchash_le(struct dc_softc *, const uint8_t *); static uint32_t dc_mchash_be(const uint8_t *); static void dc_setfilt_21143(struct dc_softc *); static void dc_setfilt_asix(struct dc_softc *); static void dc_setfilt_admtek(struct dc_softc *); static void dc_setfilt_uli(struct dc_softc *); static void dc_setfilt_xircom(struct dc_softc *); static void dc_setfilt(struct dc_softc *); static void dc_reset(struct dc_softc *); static int dc_list_rx_init(struct dc_softc *); static int dc_list_tx_init(struct dc_softc *); static int dc_read_srom(struct dc_softc *, int); static int dc_parse_21143_srom(struct dc_softc *); static int dc_decode_leaf_sia(struct dc_softc *, struct dc_eblock_sia *); static int dc_decode_leaf_mii(struct dc_softc *, struct dc_eblock_mii *); static int dc_decode_leaf_sym(struct dc_softc *, struct dc_eblock_sym *); static void dc_apply_fixup(struct dc_softc *, int); static int dc_check_multiport(struct dc_softc *); /* * MII bit-bang glue */ static uint32_t dc_mii_bitbang_read(device_t); static void dc_mii_bitbang_write(device_t, uint32_t); static const struct mii_bitbang_ops dc_mii_bitbang_ops = { dc_mii_bitbang_read, dc_mii_bitbang_write, { DC_SIO_MII_DATAOUT, /* MII_BIT_MDO */ DC_SIO_MII_DATAIN, /* MII_BIT_MDI */ DC_SIO_MII_CLK, /* MII_BIT_MDC */ 0, /* MII_BIT_DIR_HOST_PHY */ DC_SIO_MII_DIR, /* MII_BIT_DIR_PHY_HOST */ } }; #ifdef DC_USEIOSPACE #define DC_RES SYS_RES_IOPORT #define DC_RID DC_PCI_CFBIO #else #define DC_RES SYS_RES_MEMORY #define DC_RID DC_PCI_CFBMA #endif static device_method_t dc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dc_probe), DEVMETHOD(device_attach, dc_attach), DEVMETHOD(device_detach, dc_detach), DEVMETHOD(device_suspend, dc_suspend), DEVMETHOD(device_resume, dc_resume), DEVMETHOD(device_shutdown, dc_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, dc_miibus_readreg), DEVMETHOD(miibus_writereg, dc_miibus_writereg), DEVMETHOD(miibus_statchg, dc_miibus_statchg), DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), DEVMETHOD_END }; static driver_t dc_driver = { "dc", dc_methods, sizeof(struct dc_softc) }; static devclass_t dc_devclass; -DRIVER_MODULE(dc, pci, dc_driver, dc_devclass, 0, 0); -DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); +DRIVER_MODULE_ORDERED(dc, pci, dc_driver, dc_devclass, NULL, NULL, + SI_ORDER_ANY); +DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, NULL, NULL); #define DC_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) #define DC_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) static void dc_delay(struct dc_softc *sc) { int idx; for (idx = (300 / 33) + 1; idx > 0; idx--) CSR_READ_4(sc, DC_BUSCTL); } static void dc_eeprom_width(struct dc_softc *sc) { int i; /* Force EEPROM to idle state. */ dc_eeprom_idle(sc); /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); for (i = 3; i--;) { if (6 & (1 << i)) DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); else DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } for (i = 1; i <= 12; i++) { DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); break; } DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } /* Turn off EEPROM access mode. */ dc_eeprom_idle(sc); if (i < 4 || i > 12) sc->dc_romwidth = 6; else sc->dc_romwidth = i; /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); /* Turn off EEPROM access mode. */ dc_eeprom_idle(sc); } static void dc_eeprom_idle(struct dc_softc *sc) { int i; CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); for (i = 0; i < 25; i++) { DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); CSR_WRITE_4(sc, DC_SIO, 0x00000000); } /* * Send a read command and address to the EEPROM, check for ACK. */ static void dc_eeprom_putbyte(struct dc_softc *sc, int addr) { int d, i; d = DC_EECMD_READ >> 6; for (i = 3; i--; ) { if (d & (1 << i)) DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); else DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } /* * Feed in each bit and strobe the clock. */ for (i = sc->dc_romwidth; i--;) { if (addr & (1 << i)) { SIO_SET(DC_SIO_EE_DATAIN); } else { SIO_CLR(DC_SIO_EE_DATAIN); } dc_delay(sc); SIO_SET(DC_SIO_EE_CLK); dc_delay(sc); SIO_CLR(DC_SIO_EE_CLK); dc_delay(sc); } } /* * Read a word of data stored in the EEPROM at address 'addr.' * The PNIC 82c168/82c169 has its own non-standard way to read * the EEPROM. */ static void dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, uint16_t *dest) { int i; uint32_t r; CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ | addr); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(1); r = CSR_READ_4(sc, DC_SIO); if (!(r & DC_PN_SIOCTL_BUSY)) { *dest = (uint16_t)(r & 0xFFFF); return; } } } /* * Read a word of data stored in the EEPROM at address 'addr.' * The Xircom X3201 has its own non-standard way to read * the EEPROM, too. */ static void dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, uint16_t *dest) { SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); addr *= 2; CSR_WRITE_4(sc, DC_ROM, addr | 0x160); *dest = (uint16_t)CSR_READ_4(sc, DC_SIO) & 0xff; addr += 1; CSR_WRITE_4(sc, DC_ROM, addr | 0x160); *dest |= ((uint16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8; SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void dc_eeprom_getword(struct dc_softc *sc, int addr, uint16_t *dest) { int i; uint16_t word = 0; /* Force EEPROM to idle state. */ dc_eeprom_idle(sc); /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); /* * Send address of word we want to read. */ dc_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(DC_SIO_EE_CLK); dc_delay(sc); if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) word |= i; dc_delay(sc); SIO_CLR(DC_SIO_EE_CLK); dc_delay(sc); } /* Turn off EEPROM access mode. */ dc_eeprom_idle(sc); *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int be) { int i; uint16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { if (DC_IS_PNIC(sc)) dc_eeprom_getword_pnic(sc, off + i, &word); else if (DC_IS_XIRCOM(sc)) dc_eeprom_getword_xircom(sc, off + i, &word); else dc_eeprom_getword(sc, off + i, &word); ptr = (uint16_t *)(dest + (i * 2)); if (be) *ptr = be16toh(word); else *ptr = le16toh(word); } } /* * Write the MII serial port for the MII bit-bang module. */ static void dc_mii_bitbang_write(device_t dev, uint32_t val) { struct dc_softc *sc; sc = device_get_softc(dev); CSR_WRITE_4(sc, DC_SIO, val); CSR_BARRIER_4(sc, DC_SIO, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } /* * Read the MII serial port for the MII bit-bang module. */ static uint32_t dc_mii_bitbang_read(device_t dev) { struct dc_softc *sc; uint32_t val; sc = device_get_softc(dev); val = CSR_READ_4(sc, DC_SIO); CSR_BARRIER_4(sc, DC_SIO, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } static int dc_miibus_readreg(device_t dev, int phy, int reg) { struct dc_softc *sc; int i, rval, phy_reg = 0; sc = device_get_softc(dev); if (sc->dc_pmode != DC_PMODE_MII) { if (phy == (MII_NPHY - 1)) { switch (reg) { case MII_BMSR: /* * Fake something to make the probe * code think there's a PHY here. */ return (BMSR_MEDIAMASK); break; case MII_PHYIDR1: if (DC_IS_PNIC(sc)) return (DC_VENDORID_LO); return (DC_VENDORID_DEC); break; case MII_PHYIDR2: if (DC_IS_PNIC(sc)) return (DC_DEVICEID_82C168); return (DC_DEVICEID_21143); break; default: return (0); break; } } else return (0); } if (DC_IS_PNIC(sc)) { CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | (phy << 23) | (reg << 18)); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(1); rval = CSR_READ_4(sc, DC_PN_MII); if (!(rval & DC_PN_MII_BUSY)) { rval &= 0xFFFF; return (rval == 0xFFFF ? 0 : rval); } } return (0); } if (sc->dc_type == DC_TYPE_ULI_M5263) { CSR_WRITE_4(sc, DC_ROM, ((phy << DC_ULI_PHY_ADDR_SHIFT) & DC_ULI_PHY_ADDR_MASK) | ((reg << DC_ULI_PHY_REG_SHIFT) & DC_ULI_PHY_REG_MASK) | DC_ULI_PHY_OP_READ); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(1); rval = CSR_READ_4(sc, DC_ROM); if ((rval & DC_ULI_PHY_OP_DONE) != 0) { return (rval & DC_ULI_PHY_DATA_MASK); } } if (i == DC_TIMEOUT) device_printf(dev, "phy read timed out\n"); return (0); } if (DC_IS_COMET(sc)) { switch (reg) { case MII_BMCR: phy_reg = DC_AL_BMCR; break; case MII_BMSR: phy_reg = DC_AL_BMSR; break; case MII_PHYIDR1: phy_reg = DC_AL_VENID; break; case MII_PHYIDR2: phy_reg = DC_AL_DEVID; break; case MII_ANAR: phy_reg = DC_AL_ANAR; break; case MII_ANLPAR: phy_reg = DC_AL_LPAR; break; case MII_ANER: phy_reg = DC_AL_ANER; break; default: device_printf(dev, "phy_read: bad phy register %x\n", reg); return (0); break; } rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; if (rval == 0xFFFF) return (0); return (rval); } if (sc->dc_type == DC_TYPE_98713) { phy_reg = CSR_READ_4(sc, DC_NETCFG); CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); } rval = mii_bitbang_readreg(dev, &dc_mii_bitbang_ops, phy, reg); if (sc->dc_type == DC_TYPE_98713) CSR_WRITE_4(sc, DC_NETCFG, phy_reg); return (rval); } static int dc_miibus_writereg(device_t dev, int phy, int reg, int data) { struct dc_softc *sc; int i, phy_reg = 0; sc = device_get_softc(dev); if (DC_IS_PNIC(sc)) { CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | (phy << 23) | (reg << 10) | data); for (i = 0; i < DC_TIMEOUT; i++) { if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) break; } return (0); } if (sc->dc_type == DC_TYPE_ULI_M5263) { CSR_WRITE_4(sc, DC_ROM, ((phy << DC_ULI_PHY_ADDR_SHIFT) & DC_ULI_PHY_ADDR_MASK) | ((reg << DC_ULI_PHY_REG_SHIFT) & DC_ULI_PHY_REG_MASK) | ((data << DC_ULI_PHY_DATA_SHIFT) & DC_ULI_PHY_DATA_MASK) | DC_ULI_PHY_OP_WRITE); DELAY(1); return (0); } if (DC_IS_COMET(sc)) { switch (reg) { case MII_BMCR: phy_reg = DC_AL_BMCR; break; case MII_BMSR: phy_reg = DC_AL_BMSR; break; case MII_PHYIDR1: phy_reg = DC_AL_VENID; break; case MII_PHYIDR2: phy_reg = DC_AL_DEVID; break; case MII_ANAR: phy_reg = DC_AL_ANAR; break; case MII_ANLPAR: phy_reg = DC_AL_LPAR; break; case MII_ANER: phy_reg = DC_AL_ANER; break; default: device_printf(dev, "phy_write: bad phy register %x\n", reg); return (0); break; } CSR_WRITE_4(sc, phy_reg, data); return (0); } if (sc->dc_type == DC_TYPE_98713) { phy_reg = CSR_READ_4(sc, DC_NETCFG); CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); } mii_bitbang_writereg(dev, &dc_mii_bitbang_ops, phy, reg, data); if (sc->dc_type == DC_TYPE_98713) CSR_WRITE_4(sc, DC_NETCFG, phy_reg); return (0); } static void dc_miibus_statchg(device_t dev) { struct dc_softc *sc; struct ifnet *ifp; struct mii_data *mii; struct ifmedia *ifm; sc = device_get_softc(dev); mii = device_get_softc(sc->dc_miibus); ifp = sc->dc_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { dc_setcfg(sc, ifm->ifm_media); return; } else if (!DC_IS_ADMTEK(sc)) dc_setcfg(sc, mii->mii_media_active); sc->dc_link = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->dc_link = 1; break; } } } /* * Special support for DM9102A cards with HomePNA PHYs. Note: * with the Davicom DM9102A/DM9801 eval board that I have, it seems * to be impossible to talk to the management interface of the DM9801 * PHY (its MDIO pin is not connected to anything). Consequently, * the driver has to just 'know' about the additional mode and deal * with it itself. *sigh* */ static void dc_miibus_mediainit(device_t dev) { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; int rev; rev = pci_get_revid(dev); sc = device_get_softc(dev); mii = device_get_softc(sc->dc_miibus); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL); } #define DC_BITS_512 9 #define DC_BITS_128 7 #define DC_BITS_64 6 static uint32_t dc_mchash_le(struct dc_softc *sc, const uint8_t *addr) { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_le(addr, ETHER_ADDR_LEN); /* * The hash table on the PNIC II and the MX98715AEC-C/D/E * chips is only 128 bits wide. */ if (sc->dc_flags & DC_128BIT_HASH) return (crc & ((1 << DC_BITS_128) - 1)); /* The hash table on the MX98715BEC is only 64 bits wide. */ if (sc->dc_flags & DC_64BIT_HASH) return (crc & ((1 << DC_BITS_64) - 1)); /* Xircom's hash filtering table is different (read: weird) */ /* Xircom uses the LEAST significant bits */ if (DC_IS_XIRCOM(sc)) { if ((crc & 0x180) == 0x180) return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4)); else return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 + (12 << 4)); } return (crc & ((1 << DC_BITS_512) - 1)); } /* * Calculate CRC of a multicast group address, return the lower 6 bits. */ static uint32_t dc_mchash_be(const uint8_t *addr) { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_be(addr, ETHER_ADDR_LEN); /* Return the filter bit position. */ return ((crc >> 26) & 0x0000003F); } /* * 21143-style RX filter setup routine. Filter programming is done by * downloading a special setup frame into the TX engine. 21143, Macronix, * PNIC, PNIC II and Davicom chips are programmed this way. * * We always program the chip using 'hash perfect' mode, i.e. one perfect * address (our node address) and a 512-bit hash filter for multicast * frames. We also sneak the broadcast address into the hash filter since * we need that too. */ static void dc_setfilt_21143(struct dc_softc *sc) { uint16_t eaddr[(ETHER_ADDR_LEN+1)/2]; struct dc_desc *sframe; uint32_t h, *sp; struct ifmultiaddr *ifma; struct ifnet *ifp; int i; ifp = sc->dc_ifp; i = sc->dc_cdata.dc_tx_prod; DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata.dc_tx_list[i]; sp = sc->dc_cdata.dc_sbuf; bzero(sp, DC_SFRAME_LEN); sframe->dc_data = htole32(DC_ADDR_LO(sc->dc_saddr)); sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_mchash_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sp[h >> 4] |= htole32(1 << (h & 0xF)); } if_maddr_runlock(ifp); if (ifp->if_flags & IFF_BROADCAST) { h = dc_mchash_le(sc, ifp->if_broadcastaddr); sp[h >> 4] |= htole32(1 << (h & 0xF)); } /* Set our MAC address. */ bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); sp[39] = DC_SP_MAC(eaddr[0]); sp[40] = DC_SP_MAC(eaddr[1]); sp[41] = DC_SP_MAC(eaddr[2]); sframe->dc_status = htole32(DC_TXSTAT_OWN); bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->dc_stag, sc->dc_smap, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * The PNIC takes an exceedingly long time to process its * setup frame; wait 10ms after posting the setup frame * before proceeding, just so it has time to swallow its * medicine. */ DELAY(10000); sc->dc_wdog_timer = 5; } static void dc_setfilt_admtek(struct dc_softc *sc) { uint8_t eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; struct ifmultiaddr *ifma; int h = 0; uint32_t hashes[2] = { 0, 0 }; ifp = sc->dc_ifp; /* Init our MAC address. */ bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); CSR_WRITE_4(sc, DC_AL_PAR0, eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); CSR_WRITE_4(sc, DC_AL_PAR1, eaddr[5] << 8 | eaddr[4]); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); /* First, zot all the existing hash bits. */ CSR_WRITE_4(sc, DC_AL_MAR0, 0); CSR_WRITE_4(sc, DC_AL_MAR1, 0); /* * If we're already in promisc or allmulti mode, we * don't have to bother programming the multicast filter. */ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) return; /* Now program new ones. */ if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (DC_IS_CENTAUR(sc)) h = dc_mchash_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); else h = dc_mchash_be( LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } if_maddr_runlock(ifp); CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); } static void dc_setfilt_asix(struct dc_softc *sc) { uint32_t eaddr[(ETHER_ADDR_LEN+3)/4]; struct ifnet *ifp; struct ifmultiaddr *ifma; int h = 0; uint32_t hashes[2] = { 0, 0 }; ifp = sc->dc_ifp; /* Init our MAC address. */ bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, eaddr[0]); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, eaddr[1]); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); /* * The ASIX chip has a special bit to enable reception * of broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); else DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); /* * If we're already in promisc or allmulti mode, we * don't have to bother programming the multicast filter. */ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) return; /* now program new ones */ if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_mchash_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } if_maddr_runlock(ifp); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); } static void dc_setfilt_uli(struct dc_softc *sc) { uint8_t eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; struct ifmultiaddr *ifma; struct dc_desc *sframe; uint32_t filter, *sp; uint8_t *ma; int i, mcnt; ifp = sc->dc_ifp; i = sc->dc_cdata.dc_tx_prod; DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata.dc_tx_list[i]; sp = sc->dc_cdata.dc_sbuf; bzero(sp, DC_SFRAME_LEN); sframe->dc_data = htole32(DC_ADDR_LO(sc->dc_saddr)); sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_PERFECT | DC_TXCTL_FINT); sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; /* Set station address. */ bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); *sp++ = DC_SP_MAC(eaddr[1] << 8 | eaddr[0]); *sp++ = DC_SP_MAC(eaddr[3] << 8 | eaddr[2]); *sp++ = DC_SP_MAC(eaddr[5] << 8 | eaddr[4]); /* Set broadcast address. */ *sp++ = DC_SP_MAC(0xFFFF); *sp++ = DC_SP_MAC(0xFFFF); *sp++ = DC_SP_MAC(0xFFFF); /* Extract current filter configuration. */ filter = CSR_READ_4(sc, DC_NETCFG); filter &= ~(DC_NETCFG_RX_PROMISC | DC_NETCFG_RX_ALLMULTI); /* Now build perfect filters. */ mcnt = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt >= DC_ULI_FILTER_NPERF) { filter |= DC_NETCFG_RX_ALLMULTI; break; } ma = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); *sp++ = DC_SP_MAC(ma[1] << 8 | ma[0]); *sp++ = DC_SP_MAC(ma[3] << 8 | ma[2]); *sp++ = DC_SP_MAC(ma[5] << 8 | ma[4]); mcnt++; } if_maddr_runlock(ifp); for (; mcnt < DC_ULI_FILTER_NPERF; mcnt++) { *sp++ = DC_SP_MAC(0xFFFF); *sp++ = DC_SP_MAC(0xFFFF); *sp++ = DC_SP_MAC(0xFFFF); } if (filter & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) CSR_WRITE_4(sc, DC_NETCFG, filter & ~(DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); if (ifp->if_flags & IFF_PROMISC) filter |= DC_NETCFG_RX_PROMISC | DC_NETCFG_RX_ALLMULTI; if (ifp->if_flags & IFF_ALLMULTI) filter |= DC_NETCFG_RX_ALLMULTI; CSR_WRITE_4(sc, DC_NETCFG, filter & ~(DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); if (filter & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) CSR_WRITE_4(sc, DC_NETCFG, filter); sframe->dc_status = htole32(DC_TXSTAT_OWN); bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->dc_stag, sc->dc_smap, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * Wait some time... */ DELAY(1000); sc->dc_wdog_timer = 5; } static void dc_setfilt_xircom(struct dc_softc *sc) { uint16_t eaddr[(ETHER_ADDR_LEN+1)/2]; struct ifnet *ifp; struct ifmultiaddr *ifma; struct dc_desc *sframe; uint32_t h, *sp; int i; ifp = sc->dc_ifp; DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); i = sc->dc_cdata.dc_tx_prod; DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata.dc_tx_list[i]; sp = sc->dc_cdata.dc_sbuf; bzero(sp, DC_SFRAME_LEN); sframe->dc_data = htole32(DC_ADDR_LO(sc->dc_saddr)); sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_mchash_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sp[h >> 4] |= htole32(1 << (h & 0xF)); } if_maddr_runlock(ifp); if (ifp->if_flags & IFF_BROADCAST) { h = dc_mchash_le(sc, ifp->if_broadcastaddr); sp[h >> 4] |= htole32(1 << (h & 0xF)); } /* Set our MAC address. */ bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); sp[0] = DC_SP_MAC(eaddr[0]); sp[1] = DC_SP_MAC(eaddr[1]); sp[2] = DC_SP_MAC(eaddr[2]); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); sframe->dc_status = htole32(DC_TXSTAT_OWN); bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->dc_stag, sc->dc_smap, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * Wait some time... */ DELAY(1000); sc->dc_wdog_timer = 5; } static void dc_setfilt(struct dc_softc *sc) { if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) dc_setfilt_21143(sc); if (DC_IS_ASIX(sc)) dc_setfilt_asix(sc); if (DC_IS_ADMTEK(sc)) dc_setfilt_admtek(sc); if (DC_IS_ULI(sc)) dc_setfilt_uli(sc); if (DC_IS_XIRCOM(sc)) dc_setfilt_xircom(sc); } static void dc_netcfg_wait(struct dc_softc *sc) { uint32_t isr; int i; for (i = 0; i < DC_TIMEOUT; i++) { isr = CSR_READ_4(sc, DC_ISR); if (isr & DC_ISR_TX_IDLE && ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) break; DELAY(10); } if (i == DC_TIMEOUT && bus_child_present(sc->dc_dev)) { if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc)) device_printf(sc->dc_dev, "%s: failed to force tx to idle state\n", __func__); if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) && !DC_HAS_BROKEN_RXSTATE(sc)) device_printf(sc->dc_dev, "%s: failed to force rx to idle state\n", __func__); } } /* * In order to fiddle with the 'full-duplex' and '100Mbps' bits in * the netconfig register, we first have to put the transmit and/or * receive logic in the idle state. */ static void dc_setcfg(struct dc_softc *sc, int media) { int restart = 0, watchdogreg; if (IFM_SUBTYPE(media) == IFM_NONE) return; if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) { restart = 1; DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); dc_netcfg_wait(sc); } if (IFM_SUBTYPE(media) == IFM_100_TX) { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); if (sc->dc_pmode == DC_PMODE_MII) { if (DC_IS_INTEL(sc)) { /* There's a write enable bit here that reads as 1. */ watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); watchdogreg &= ~DC_WDOG_CTLWREN; watchdogreg |= DC_WDOG_JABBERDIS; CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); } else { DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); } DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | DC_NETCFG_SCRAMBLER)); if (!DC_IS_DAVICOM(sc)) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); } else { if (DC_IS_PNIC(sc)) { DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); } DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); } } if (IFM_SUBTYPE(media) == IFM_10_T) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); if (sc->dc_pmode == DC_PMODE_MII) { /* There's a write enable bit here that reads as 1. */ if (DC_IS_INTEL(sc)) { watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); watchdogreg &= ~DC_WDOG_CTLWREN; watchdogreg |= DC_WDOG_JABBERDIS; CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); } else { DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); } DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); if (!DC_IS_DAVICOM(sc)) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); } else { if (DC_IS_PNIC(sc)) { DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); } DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); if (DC_IS_INTEL(sc)) { DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if ((media & IFM_GMASK) == IFM_FDX) DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); else DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); DC_CLRBIT(sc, DC_10BTCTRL, DC_TCTL_AUTONEGENBL); DELAY(20000); } } } /* * If this is a Davicom DM9102A card with a DM9801 HomePNA * PHY and we want HomePNA mode, set the portsel bit to turn * on the external MII port. */ if (DC_IS_DAVICOM(sc)) { if (IFM_SUBTYPE(media) == IFM_HPNA_1) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); sc->dc_link = 1; } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); } } if ((media & IFM_GMASK) == IFM_FDX) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); } if (restart) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON | DC_NETCFG_RX_ON); } static void dc_reset(struct dc_softc *sc) { int i; DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) break; } if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) || DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc) || DC_IS_ULI(sc)) { DELAY(10000); DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); i = 0; } if (i == DC_TIMEOUT) device_printf(sc->dc_dev, "reset never completed!\n"); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); /* * Bring the SIA out of reset. In some cases, it looks * like failing to unreset the SIA soon enough gets it * into a state where it will never come out of reset * until we reset the whole chip again. */ if (DC_IS_INTEL(sc)) { DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); CSR_WRITE_4(sc, DC_10BTCTRL, 0xFFFFFFFF); CSR_WRITE_4(sc, DC_WATCHDOG, 0); } } static const struct dc_type * dc_devtype(device_t dev) { const struct dc_type *t; uint32_t devid; uint8_t rev; t = dc_devs; devid = pci_get_devid(dev); rev = pci_get_revid(dev); while (t->dc_name != NULL) { if (devid == t->dc_devid && rev >= t->dc_minrev) return (t); t++; } return (NULL); } /* * Probe for a 21143 or clone chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. * We do a little bit of extra work to identify the exact type of * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, * but different revision IDs. The same is true for 98715/98715A * chips and the 98725, as well as the ASIX and ADMtek chips. In some * cases, the exact chip revision affects driver behavior. */ static int dc_probe(device_t dev) { const struct dc_type *t; t = dc_devtype(dev); if (t != NULL) { device_set_desc(dev, t->dc_name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static void dc_apply_fixup(struct dc_softc *sc, int media) { struct dc_mediainfo *m; uint8_t *p; int i; uint32_t reg; m = sc->dc_mi; while (m != NULL) { if (m->dc_media == media) break; m = m->dc_next; } if (m == NULL) return; for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { reg = (p[0] | (p[1] << 8)) << 16; CSR_WRITE_4(sc, DC_WATCHDOG, reg); } for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { reg = (p[0] | (p[1] << 8)) << 16; CSR_WRITE_4(sc, DC_WATCHDOG, reg); } } static int dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l) { struct dc_mediainfo *m; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (m == NULL) { device_printf(sc->dc_dev, "Could not allocate mediainfo\n"); return (ENOMEM); } switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) { case DC_SIA_CODE_10BT: m->dc_media = IFM_10_T; break; case DC_SIA_CODE_10BT_FDX: m->dc_media = IFM_10_T | IFM_FDX; break; case DC_SIA_CODE_10B2: m->dc_media = IFM_10_2; break; case DC_SIA_CODE_10B5: m->dc_media = IFM_10_5; break; default: break; } /* * We need to ignore CSR13, CSR14, CSR15 for SIA mode. * Things apparently already work for cards that do * supply Media Specific Data. */ if (l->dc_sia_code & DC_SIA_CODE_EXT) { m->dc_gp_len = 2; m->dc_gp_ptr = (uint8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl; } else { m->dc_gp_len = 2; m->dc_gp_ptr = (uint8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl; } m->dc_next = sc->dc_mi; sc->dc_mi = m; sc->dc_pmode = DC_PMODE_SIA; return (0); } static int dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l) { struct dc_mediainfo *m; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (m == NULL) { device_printf(sc->dc_dev, "Could not allocate mediainfo\n"); return (ENOMEM); } if (l->dc_sym_code == DC_SYM_CODE_100BT) m->dc_media = IFM_100_TX; if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) m->dc_media = IFM_100_TX | IFM_FDX; m->dc_gp_len = 2; m->dc_gp_ptr = (uint8_t *)&l->dc_sym_gpio_ctl; m->dc_next = sc->dc_mi; sc->dc_mi = m; sc->dc_pmode = DC_PMODE_SYM; return (0); } static int dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l) { struct dc_mediainfo *m; uint8_t *p; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (m == NULL) { device_printf(sc->dc_dev, "Could not allocate mediainfo\n"); return (ENOMEM); } /* We abuse IFM_AUTO to represent MII. */ m->dc_media = IFM_AUTO; m->dc_gp_len = l->dc_gpr_len; p = (uint8_t *)l; p += sizeof(struct dc_eblock_mii); m->dc_gp_ptr = p; p += 2 * l->dc_gpr_len; m->dc_reset_len = *p; p++; m->dc_reset_ptr = p; m->dc_next = sc->dc_mi; sc->dc_mi = m; return (0); } static int dc_read_srom(struct dc_softc *sc, int bits) { int size; size = DC_ROM_SIZE(bits); sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->dc_srom == NULL) { device_printf(sc->dc_dev, "Could not allocate SROM buffer\n"); return (ENOMEM); } dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); return (0); } static int dc_parse_21143_srom(struct dc_softc *sc) { struct dc_leaf_hdr *lhdr; struct dc_eblock_hdr *hdr; int error, have_mii, i, loff; char *ptr; have_mii = 0; loff = sc->dc_srom[27]; lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); ptr = (char *)lhdr; ptr += sizeof(struct dc_leaf_hdr) - 1; /* * Look if we got a MII media block. */ for (i = 0; i < lhdr->dc_mcnt; i++) { hdr = (struct dc_eblock_hdr *)ptr; if (hdr->dc_type == DC_EBLOCK_MII) have_mii++; ptr += (hdr->dc_len & 0x7F); ptr++; } /* * Do the same thing again. Only use SIA and SYM media * blocks if no MII media block is available. */ ptr = (char *)lhdr; ptr += sizeof(struct dc_leaf_hdr) - 1; error = 0; for (i = 0; i < lhdr->dc_mcnt; i++) { hdr = (struct dc_eblock_hdr *)ptr; switch (hdr->dc_type) { case DC_EBLOCK_MII: error = dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); break; case DC_EBLOCK_SIA: if (! have_mii) error = dc_decode_leaf_sia(sc, (struct dc_eblock_sia *)hdr); break; case DC_EBLOCK_SYM: if (! have_mii) error = dc_decode_leaf_sym(sc, (struct dc_eblock_sym *)hdr); break; default: /* Don't care. Yet. */ break; } ptr += (hdr->dc_len & 0x7F); ptr++; } return (error); } static void dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr; KASSERT(nseg == 1, ("%s: wrong number of segments (%d)", __func__, nseg)); paddr = arg; *paddr = segs->ds_addr; } static int dc_dma_alloc(struct dc_softc *sc) { int error, i; error = bus_dma_tag_create(bus_get_dma_tag(sc->dc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->dc_ptag); if (error) { device_printf(sc->dc_dev, "failed to allocate parent DMA tag\n"); goto fail; } /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ error = bus_dma_tag_create(sc->dc_ptag, DC_LIST_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, DC_RX_LIST_SZ, 1, DC_RX_LIST_SZ, 0, NULL, NULL, &sc->dc_rx_ltag); if (error) { device_printf(sc->dc_dev, "failed to create RX list DMA tag\n"); goto fail; } error = bus_dma_tag_create(sc->dc_ptag, DC_LIST_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, DC_TX_LIST_SZ, 1, DC_TX_LIST_SZ, 0, NULL, NULL, &sc->dc_tx_ltag); if (error) { device_printf(sc->dc_dev, "failed to create TX list DMA tag\n"); goto fail; } /* RX descriptor list. */ error = bus_dmamem_alloc(sc->dc_rx_ltag, (void **)&sc->dc_ldata.dc_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->dc_rx_lmap); if (error) { device_printf(sc->dc_dev, "failed to allocate DMA'able memory for RX list\n"); goto fail; } error = bus_dmamap_load(sc->dc_rx_ltag, sc->dc_rx_lmap, sc->dc_ldata.dc_rx_list, DC_RX_LIST_SZ, dc_dma_map_addr, &sc->dc_ldata.dc_rx_list_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->dc_dev, "failed to load DMA'able memory for RX list\n"); goto fail; } /* TX descriptor list. */ error = bus_dmamem_alloc(sc->dc_tx_ltag, (void **)&sc->dc_ldata.dc_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->dc_tx_lmap); if (error) { device_printf(sc->dc_dev, "failed to allocate DMA'able memory for TX list\n"); goto fail; } error = bus_dmamap_load(sc->dc_tx_ltag, sc->dc_tx_lmap, sc->dc_ldata.dc_tx_list, DC_TX_LIST_SZ, dc_dma_map_addr, &sc->dc_ldata.dc_tx_list_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->dc_dev, "cannot load DMA'able memory for TX list\n"); goto fail; } /* * Allocate a busdma tag and DMA safe memory for the multicast * setup frame. */ error = bus_dma_tag_create(sc->dc_ptag, DC_LIST_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1, DC_SFRAME_LEN + DC_MIN_FRAMELEN, 0, NULL, NULL, &sc->dc_stag); if (error) { device_printf(sc->dc_dev, "failed to create DMA tag for setup frame\n"); goto fail; } error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf, BUS_DMA_NOWAIT, &sc->dc_smap); if (error) { device_printf(sc->dc_dev, "failed to allocate DMA'able memory for setup frame\n"); goto fail; } error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf, DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->dc_dev, "cannot load DMA'able memory for setup frame\n"); goto fail; } /* Allocate a busdma tag for RX mbufs. */ error = bus_dma_tag_create(sc->dc_ptag, DC_RXBUF_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->dc_rx_mtag); if (error) { device_printf(sc->dc_dev, "failed to create RX mbuf tag\n"); goto fail; } /* Allocate a busdma tag for TX mbufs. */ error = bus_dma_tag_create(sc->dc_ptag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * DC_MAXFRAGS, DC_MAXFRAGS, MCLBYTES, 0, NULL, NULL, &sc->dc_tx_mtag); if (error) { device_printf(sc->dc_dev, "failed to create TX mbuf tag\n"); goto fail; } /* Create the TX/RX busdma maps. */ for (i = 0; i < DC_TX_LIST_CNT; i++) { error = bus_dmamap_create(sc->dc_tx_mtag, 0, &sc->dc_cdata.dc_tx_map[i]); if (error) { device_printf(sc->dc_dev, "failed to create TX mbuf dmamap\n"); goto fail; } } for (i = 0; i < DC_RX_LIST_CNT; i++) { error = bus_dmamap_create(sc->dc_rx_mtag, 0, &sc->dc_cdata.dc_rx_map[i]); if (error) { device_printf(sc->dc_dev, "failed to create RX mbuf dmamap\n"); goto fail; } } error = bus_dmamap_create(sc->dc_rx_mtag, 0, &sc->dc_sparemap); if (error) { device_printf(sc->dc_dev, "failed to create spare RX mbuf dmamap\n"); goto fail; } fail: return (error); } static void dc_dma_free(struct dc_softc *sc) { int i; /* RX buffers. */ if (sc->dc_rx_mtag != NULL) { for (i = 0; i < DC_RX_LIST_CNT; i++) { if (sc->dc_cdata.dc_rx_map[i] != NULL) bus_dmamap_destroy(sc->dc_rx_mtag, sc->dc_cdata.dc_rx_map[i]); } if (sc->dc_sparemap != NULL) bus_dmamap_destroy(sc->dc_rx_mtag, sc->dc_sparemap); bus_dma_tag_destroy(sc->dc_rx_mtag); } /* TX buffers. */ if (sc->dc_rx_mtag != NULL) { for (i = 0; i < DC_TX_LIST_CNT; i++) { if (sc->dc_cdata.dc_tx_map[i] != NULL) bus_dmamap_destroy(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[i]); } bus_dma_tag_destroy(sc->dc_tx_mtag); } /* RX descriptor list. */ if (sc->dc_rx_ltag) { if (sc->dc_rx_lmap != NULL) bus_dmamap_unload(sc->dc_rx_ltag, sc->dc_rx_lmap); if (sc->dc_rx_lmap != NULL && sc->dc_ldata.dc_rx_list != NULL) bus_dmamem_free(sc->dc_rx_ltag, sc->dc_ldata.dc_rx_list, sc->dc_rx_lmap); bus_dma_tag_destroy(sc->dc_rx_ltag); } /* TX descriptor list. */ if (sc->dc_tx_ltag) { if (sc->dc_tx_lmap != NULL) bus_dmamap_unload(sc->dc_tx_ltag, sc->dc_tx_lmap); if (sc->dc_tx_lmap != NULL && sc->dc_ldata.dc_tx_list != NULL) bus_dmamem_free(sc->dc_tx_ltag, sc->dc_ldata.dc_tx_list, sc->dc_tx_lmap); bus_dma_tag_destroy(sc->dc_tx_ltag); } /* multicast setup frame. */ if (sc->dc_stag) { if (sc->dc_smap != NULL) bus_dmamap_unload(sc->dc_stag, sc->dc_smap); if (sc->dc_smap != NULL && sc->dc_cdata.dc_sbuf != NULL) bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf, sc->dc_smap); bus_dma_tag_destroy(sc->dc_stag); } } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int dc_attach(device_t dev) { uint32_t eaddr[(ETHER_ADDR_LEN+3)/4]; uint32_t command; struct dc_softc *sc; struct ifnet *ifp; struct dc_mediainfo *m; uint32_t reg, revision; uint16_t *srom; int error, mac_offset, n, phy, rid, tmp; uint8_t *mac; sc = device_get_softc(dev); sc->dc_dev = dev; mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = DC_RID; sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE); if (sc->dc_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->dc_btag = rman_get_bustag(sc->dc_res); sc->dc_bhandle = rman_get_bushandle(sc->dc_res); /* Allocate interrupt. */ rid = 0; sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->dc_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Need this info to decide on a chip type. */ sc->dc_info = dc_devtype(dev); revision = pci_get_revid(dev); error = 0; /* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */ if (sc->dc_info->dc_devid != DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168) && sc->dc_info->dc_devid != DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201)) dc_eeprom_width(sc); switch (sc->dc_info->dc_devid) { case DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143): sc->dc_type = DC_TYPE_21143; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL; /* Save EEPROM contents so we can parse them later. */ error = dc_read_srom(sc, sc->dc_romwidth); if (error != 0) goto fail; break; case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009): case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100): case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102): sc->dc_type = DC_TYPE_DM9102; sc->dc_flags |= DC_TX_COALESCE | DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_REDUCED_MII_POLL | DC_TX_STORENFWD; sc->dc_flags |= DC_TX_ALIGN; sc->dc_pmode = DC_PMODE_MII; /* Increase the latency timer value. */ pci_write_config(dev, PCIR_LATTIMER, 0x80, 1); break; case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981): sc->dc_type = DC_TYPE_AL981; sc->dc_flags |= DC_TX_USE_TX_INTR; sc->dc_flags |= DC_TX_ADMTEK_WAR; sc->dc_pmode = DC_PMODE_MII; error = dc_read_srom(sc, sc->dc_romwidth); if (error != 0) goto fail; break; case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN983): case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985): case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511): case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513): case DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD): case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500): case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX): case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242): case DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX): case DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T): case DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB): case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120): case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130): case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08): case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09): sc->dc_type = DC_TYPE_AN983; sc->dc_flags |= DC_64BIT_HASH; sc->dc_flags |= DC_TX_USE_TX_INTR; sc->dc_flags |= DC_TX_ADMTEK_WAR; sc->dc_pmode = DC_PMODE_MII; /* Don't read SROM for - auto-loaded on reset */ break; case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713): case DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP): if (revision < DC_REVISION_98713A) { sc->dc_type = DC_TYPE_98713; } if (revision >= DC_REVISION_98713A) { sc->dc_type = DC_TYPE_98713A; sc->dc_flags |= DC_21143_NWAY; } sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; break; case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5): case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217): /* * Macronix MX98715AEC-C/D/E parts have only a * 128-bit hash table. We need to deal with these * in the same manner as the PNIC II so that we * get the right number of bits out of the * CRC routine. */ if (revision >= DC_REVISION_98715AEC_C && revision < DC_REVISION_98725) sc->dc_flags |= DC_128BIT_HASH; sc->dc_type = DC_TYPE_987x5; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; break; case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727): sc->dc_type = DC_TYPE_987x5; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; break; case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115): sc->dc_type = DC_TYPE_PNICII; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR | DC_128BIT_HASH; sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; break; case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168): sc->dc_type = DC_TYPE_PNIC; sc->dc_flags |= DC_TX_STORENFWD | DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_PNIC_RX_BUG_WAR; sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT); if (sc->dc_pnic_rx_buf == NULL) { device_printf(sc->dc_dev, "Could not allocate PNIC RX buffer\n"); error = ENOMEM; goto fail; } if (revision < DC_REVISION_82C169) sc->dc_pmode = DC_PMODE_SYM; break; case DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A): sc->dc_type = DC_TYPE_ASIX; sc->dc_flags |= DC_TX_USE_TX_INTR | DC_TX_INTR_FIRSTFRAG; sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_pmode = DC_PMODE_MII; break; case DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201): sc->dc_type = DC_TYPE_XIRCOM; sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE | DC_TX_ALIGN; /* * We don't actually need to coalesce, but we're doing * it to obtain a double word aligned buffer. * The DC_TX_COALESCE flag is required. */ sc->dc_pmode = DC_PMODE_MII; break; case DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112): sc->dc_type = DC_TYPE_CONEXANT; sc->dc_flags |= DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_pmode = DC_PMODE_MII; error = dc_read_srom(sc, sc->dc_romwidth); if (error != 0) goto fail; break; case DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5261): case DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5263): if (sc->dc_info->dc_devid == DC_DEVID(DC_VENDORID_ULI, DC_DEVICEID_M5261)) sc->dc_type = DC_TYPE_ULI_M5261; else sc->dc_type = DC_TYPE_ULI_M5263; /* TX buffers should be aligned on 4 byte boundary. */ sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE | DC_TX_ALIGN; sc->dc_pmode = DC_PMODE_MII; error = dc_read_srom(sc, sc->dc_romwidth); if (error != 0) goto fail; break; default: device_printf(dev, "unknown device: %x\n", sc->dc_info->dc_devid); break; } /* Save the cache line size. */ if (DC_IS_DAVICOM(sc)) sc->dc_cachesize = 0; else sc->dc_cachesize = pci_get_cachelnsz(dev); /* Reset the adapter. */ dc_reset(sc); /* Take 21143 out of snooze mode */ if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { command = pci_read_config(dev, DC_PCI_CFDD, 4); command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE); pci_write_config(dev, DC_PCI_CFDD, command, 4); } /* * Try to learn something about the supported media. * We know that ASIX and ADMtek and Davicom devices * will *always* be using MII media, so that's a no-brainer. * The tricky ones are the Macronix/PNIC II and the * Intel 21143. */ if (DC_IS_INTEL(sc)) { error = dc_parse_21143_srom(sc); if (error != 0) goto fail; } else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { if (sc->dc_type == DC_TYPE_98713) sc->dc_pmode = DC_PMODE_MII; else sc->dc_pmode = DC_PMODE_SYM; } else if (!sc->dc_pmode) sc->dc_pmode = DC_PMODE_MII; /* * Get station address from the EEPROM. */ switch(sc->dc_type) { case DC_TYPE_98713: case DC_TYPE_98713A: case DC_TYPE_987x5: case DC_TYPE_PNICII: dc_read_eeprom(sc, (caddr_t)&mac_offset, (DC_EE_NODEADDR_OFFSET / 2), 1, 0); dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); break; case DC_TYPE_PNIC: dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); break; case DC_TYPE_DM9102: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); #ifdef __sparc64__ /* * If this is an onboard dc(4) the station address read from * the EEPROM is all zero and we have to get it from the FCode. */ if (eaddr[0] == 0 && (eaddr[1] & ~0xffff) == 0) OF_getetheraddr(dev, (caddr_t)&eaddr); #endif break; case DC_TYPE_21143: case DC_TYPE_ASIX: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); break; case DC_TYPE_AL981: case DC_TYPE_AN983: reg = CSR_READ_4(sc, DC_AL_PAR0); mac = (uint8_t *)&eaddr[0]; mac[0] = (reg >> 0) & 0xff; mac[1] = (reg >> 8) & 0xff; mac[2] = (reg >> 16) & 0xff; mac[3] = (reg >> 24) & 0xff; reg = CSR_READ_4(sc, DC_AL_PAR1); mac[4] = (reg >> 0) & 0xff; mac[5] = (reg >> 8) & 0xff; break; case DC_TYPE_CONEXANT: bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr, ETHER_ADDR_LEN); break; case DC_TYPE_XIRCOM: /* The MAC comes from the CIS. */ mac = pci_get_ether(dev); if (!mac) { device_printf(dev, "No station address in CIS!\n"); error = ENXIO; goto fail; } bcopy(mac, eaddr, ETHER_ADDR_LEN); break; case DC_TYPE_ULI_M5261: case DC_TYPE_ULI_M5263: srom = (uint16_t *)sc->dc_srom; if (srom == NULL || *srom == 0xFFFF || *srom == 0) { /* * No valid SROM present, read station address * from ID Table. */ device_printf(dev, "Reading station address from ID Table.\n"); CSR_WRITE_4(sc, DC_BUSCTL, 0x10000); CSR_WRITE_4(sc, DC_SIARESET, 0x01C0); CSR_WRITE_4(sc, DC_10BTCTRL, 0x0000); CSR_WRITE_4(sc, DC_10BTCTRL, 0x0010); CSR_WRITE_4(sc, DC_10BTCTRL, 0x0000); CSR_WRITE_4(sc, DC_SIARESET, 0x0000); CSR_WRITE_4(sc, DC_SIARESET, 0x01B0); mac = (uint8_t *)eaddr; for (n = 0; n < ETHER_ADDR_LEN; n++) mac[n] = (uint8_t)CSR_READ_4(sc, DC_10BTCTRL); CSR_WRITE_4(sc, DC_SIARESET, 0x0000); CSR_WRITE_4(sc, DC_BUSCTL, 0x0000); DELAY(10); } else dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); break; default: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); break; } bcopy(eaddr, sc->dc_eaddr, sizeof(eaddr)); /* * If we still have invalid station address, see whether we can * find station address for chip 0. Some multi-port controllers * just store station address for chip 0 if they have a shared * SROM. */ if ((sc->dc_eaddr[0] == 0 && (sc->dc_eaddr[1] & ~0xffff) == 0) || (sc->dc_eaddr[0] == 0xffffffff && (sc->dc_eaddr[1] & 0xffff) == 0xffff)) { error = dc_check_multiport(sc); if (error == 0) { bcopy(sc->dc_eaddr, eaddr, sizeof(eaddr)); /* Extract media information. */ if (DC_IS_INTEL(sc) && sc->dc_srom != NULL) { while (sc->dc_mi != NULL) { m = sc->dc_mi->dc_next; free(sc->dc_mi, M_DEVBUF); sc->dc_mi = m; } error = dc_parse_21143_srom(sc); if (error != 0) goto fail; } } else if (error == ENOMEM) goto fail; else error = 0; } if ((error = dc_dma_alloc(sc)) != 0) goto fail; ifp = sc->dc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = dc_ioctl; ifp->if_start = dc_start; ifp->if_init = dc_init; IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = DC_TX_LIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); /* * Do MII setup. If this is a 21143, check for a PHY on the * MII bus after applying any necessary fixups to twiddle the * GPIO bits. If we don't end up finding a PHY, restore the * old selection (SIA only or SIA/SYM) and attach the dcphy * driver instead. */ tmp = 0; if (DC_IS_INTEL(sc)) { dc_apply_fixup(sc, IFM_AUTO); tmp = sc->dc_pmode; sc->dc_pmode = DC_PMODE_MII; } /* * Setup General Purpose port mode and data so the tulip can talk * to the MII. This needs to be done before mii_attach so that * we can actually see them. */ if (DC_IS_XIRCOM(sc)) { CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); } phy = MII_PHY_ANY; /* * Note: both the AL981 and AN983 have internal PHYs, however the * AL981 provides direct access to the PHY registers while the AN983 * uses a serial MII interface. The AN983's MII interface is also * buggy in that you can read from any MII address (0 to 31), but * only address 1 behaves normally. To deal with both cases, we * pretend that the PHY is at MII address 1. */ if (DC_IS_ADMTEK(sc)) phy = DC_ADMTEK_PHYADDR; /* * Note: the ukphy probes of the RS7112 report a PHY at MII address * 0 (possibly HomePNA?) and 1 (ethernet) so we only respond to the * correct one. */ if (DC_IS_CONEXANT(sc)) phy = DC_CONEXANT_PHYADDR; error = mii_attach(dev, &sc->dc_miibus, ifp, dc_ifmedia_upd, dc_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); if (error && DC_IS_INTEL(sc)) { sc->dc_pmode = tmp; if (sc->dc_pmode != DC_PMODE_SIA) sc->dc_pmode = DC_PMODE_SYM; sc->dc_flags |= DC_21143_NWAY; /* * For non-MII cards, we need to have the 21143 * drive the LEDs. Except there are some systems * like the NEC VersaPro NoteBook PC which have no * LEDs, and twiddling these bits has adverse effects * on them. (I.e. you suddenly can't get a link.) */ if (!(pci_get_subvendor(dev) == 0x1033 && pci_get_subdevice(dev) == 0x8028)) sc->dc_flags |= DC_TULIP_LEDS; error = mii_attach(dev, &sc->dc_miibus, ifp, dc_ifmedia_upd, dc_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); } if (error) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } if (DC_IS_ADMTEK(sc)) { /* * Set automatic TX underrun recovery for the ADMtek chips */ DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); } /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif callout_init_mtx(&sc->dc_stat_ch, &sc->dc_mtx, 0); callout_init_mtx(&sc->dc_wdog_ch, &sc->dc_mtx, 0); /* * Call MI attach routine. */ ether_ifattach(ifp, (caddr_t)eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, dc_intr, sc, &sc->dc_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) dc_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int dc_detach(device_t dev) { struct dc_softc *sc; struct ifnet *ifp; struct dc_mediainfo *m; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized")); ifp = sc->dc_ifp; #ifdef DEVICE_POLLING if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { DC_LOCK(sc); dc_stop(sc); DC_UNLOCK(sc); callout_drain(&sc->dc_stat_ch); callout_drain(&sc->dc_wdog_ch); ether_ifdetach(ifp); } if (sc->dc_miibus) device_delete_child(dev, sc->dc_miibus); bus_generic_detach(dev); if (sc->dc_intrhand) bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); if (sc->dc_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); if (sc->dc_res) bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); if (ifp != NULL) if_free(ifp); dc_dma_free(sc); free(sc->dc_pnic_rx_buf, M_DEVBUF); while (sc->dc_mi != NULL) { m = sc->dc_mi->dc_next; free(sc->dc_mi, M_DEVBUF); sc->dc_mi = m; } free(sc->dc_srom, M_DEVBUF); mtx_destroy(&sc->dc_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int dc_list_tx_init(struct dc_softc *sc) { struct dc_chain_data *cd; struct dc_list_data *ld; int i, nexti; cd = &sc->dc_cdata; ld = &sc->dc_ldata; for (i = 0; i < DC_TX_LIST_CNT; i++) { if (i == DC_TX_LIST_CNT - 1) nexti = 0; else nexti = i + 1; ld->dc_tx_list[i].dc_status = 0; ld->dc_tx_list[i].dc_ctl = 0; ld->dc_tx_list[i].dc_data = 0; ld->dc_tx_list[i].dc_next = htole32(DC_TXDESC(sc, nexti)); cd->dc_tx_chain[i] = NULL; } cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; cd->dc_tx_pkts = 0; bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int dc_list_rx_init(struct dc_softc *sc) { struct dc_chain_data *cd; struct dc_list_data *ld; int i, nexti; cd = &sc->dc_cdata; ld = &sc->dc_ldata; for (i = 0; i < DC_RX_LIST_CNT; i++) { if (dc_newbuf(sc, i) != 0) return (ENOBUFS); if (i == DC_RX_LIST_CNT - 1) nexti = 0; else nexti = i + 1; ld->dc_rx_list[i].dc_next = htole32(DC_RXDESC(sc, nexti)); } cd->dc_rx_prod = 0; bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int dc_newbuf(struct dc_softc *sc, int i) { struct mbuf *m; bus_dmamap_t map; bus_dma_segment_t segs[1]; int error, nseg; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, sizeof(u_int64_t)); /* * If this is a PNIC chip, zero the buffer. This is part * of the workaround for the receive bug in the 82c168 and * 82c169 chips. */ if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) bzero(mtod(m, char *), m->m_len); error = bus_dmamap_load_mbuf_sg(sc->dc_rx_mtag, sc->dc_sparemap, m, segs, &nseg, 0); if (error) { m_freem(m); return (error); } KASSERT(nseg == 1, ("%s: wrong number of segments (%d)", __func__, nseg)); if (sc->dc_cdata.dc_rx_chain[i] != NULL) bus_dmamap_unload(sc->dc_rx_mtag, sc->dc_cdata.dc_rx_map[i]); map = sc->dc_cdata.dc_rx_map[i]; sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap; sc->dc_sparemap = map; sc->dc_cdata.dc_rx_chain[i] = m; bus_dmamap_sync(sc->dc_rx_mtag, sc->dc_cdata.dc_rx_map[i], BUS_DMASYNC_PREREAD); sc->dc_ldata.dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN); sc->dc_ldata.dc_rx_list[i].dc_data = htole32(DC_ADDR_LO(segs[0].ds_addr)); sc->dc_ldata.dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN); bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } /* * Grrrrr. * The PNIC chip has a terrible bug in it that manifests itself during * periods of heavy activity. The exact mode of failure if difficult to * pinpoint: sometimes it only happens in promiscuous mode, sometimes it * will happen on slow machines. The bug is that sometimes instead of * uploading one complete frame during reception, it uploads what looks * like the entire contents of its FIFO memory. The frame we want is at * the end of the whole mess, but we never know exactly how much data has * been uploaded, so salvaging the frame is hard. * * There is only one way to do it reliably, and it's disgusting. * Here's what we know: * * - We know there will always be somewhere between one and three extra * descriptors uploaded. * * - We know the desired received frame will always be at the end of the * total data upload. * * - We know the size of the desired received frame because it will be * provided in the length field of the status word in the last descriptor. * * Here's what we do: * * - When we allocate buffers for the receive ring, we bzero() them. * This means that we know that the buffer contents should be all * zeros, except for data uploaded by the chip. * * - We also force the PNIC chip to upload frames that include the * ethernet CRC at the end. * * - We gather all of the bogus frame data into a single buffer. * * - We then position a pointer at the end of this buffer and scan * backwards until we encounter the first non-zero byte of data. * This is the end of the received frame. We know we will encounter * some data at the end of the frame because the CRC will always be * there, so even if the sender transmits a packet of all zeros, * we won't be fooled. * * - We know the size of the actual received frame, so we subtract * that value from the current pointer location. This brings us * to the start of the actual received packet. * * - We copy this into an mbuf and pass it on, along with the actual * frame length. * * The performance hit is tremendous, but it beats dropping frames all * the time. */ #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG | DC_RXSTAT_LASTFRAG) static void dc_pnic_rx_bug_war(struct dc_softc *sc, int idx) { struct dc_desc *cur_rx; struct dc_desc *c = NULL; struct mbuf *m = NULL; unsigned char *ptr; int i, total_len; uint32_t rxstat = 0; i = sc->dc_pnic_rx_bug_save; cur_rx = &sc->dc_ldata.dc_rx_list[idx]; ptr = sc->dc_pnic_rx_buf; bzero(ptr, DC_RXLEN * 5); /* Copy all the bytes from the bogus buffers. */ while (1) { c = &sc->dc_ldata.dc_rx_list[i]; rxstat = le32toh(c->dc_status); m = sc->dc_cdata.dc_rx_chain[i]; bcopy(mtod(m, char *), ptr, DC_RXLEN); ptr += DC_RXLEN; /* If this is the last buffer, break out. */ if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) break; dc_discard_rxbuf(sc, i); DC_INC(i, DC_RX_LIST_CNT); } /* Find the length of the actual receive frame. */ total_len = DC_RXBYTES(rxstat); /* Scan backwards until we hit a non-zero byte. */ while (*ptr == 0x00) ptr--; /* Round off. */ if ((uintptr_t)(ptr) & 0x3) ptr -= 1; /* Now find the start of the frame. */ ptr -= total_len; if (ptr < sc->dc_pnic_rx_buf) ptr = sc->dc_pnic_rx_buf; /* * Now copy the salvaged frame to the last mbuf and fake up * the status word to make it look like a successful * frame reception. */ bcopy(ptr, mtod(m, char *), total_len); cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG); } /* * This routine searches the RX ring for dirty descriptors in the * event that the rxeof routine falls out of sync with the chip's * current descriptor pointer. This may happen sometimes as a result * of a "no RX buffer available" condition that happens when the chip * consumes all of the RX buffers before the driver has a chance to * process the RX ring. This routine may need to be called more than * once to bring the driver back in sync with the chip, however we * should still be getting RX DONE interrupts to drive the search * for new packets in the RX ring, so we should catch up eventually. */ static int dc_rx_resync(struct dc_softc *sc) { struct dc_desc *cur_rx; int i, pos; pos = sc->dc_cdata.dc_rx_prod; for (i = 0; i < DC_RX_LIST_CNT; i++) { cur_rx = &sc->dc_ldata.dc_rx_list[pos]; if (!(le32toh(cur_rx->dc_status) & DC_RXSTAT_OWN)) break; DC_INC(pos, DC_RX_LIST_CNT); } /* If the ring really is empty, then just return. */ if (i == DC_RX_LIST_CNT) return (0); /* We've fallen behing the chip: catch it. */ sc->dc_cdata.dc_rx_prod = pos; return (EAGAIN); } static void dc_discard_rxbuf(struct dc_softc *sc, int i) { struct mbuf *m; if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { m = sc->dc_cdata.dc_rx_chain[i]; bzero(mtod(m, char *), m->m_len); } sc->dc_ldata.dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN); sc->dc_ldata.dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN); bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static int dc_rxeof(struct dc_softc *sc) { struct mbuf *m; struct ifnet *ifp; struct dc_desc *cur_rx; int i, total_len, rx_npkts; uint32_t rxstat; DC_LOCK_ASSERT(sc); ifp = sc->dc_ifp; rx_npkts = 0; bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = sc->dc_cdata.dc_rx_prod; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; DC_INC(i, DC_RX_LIST_CNT)) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = &sc->dc_ldata.dc_rx_list[i]; rxstat = le32toh(cur_rx->dc_status); if ((rxstat & DC_RXSTAT_OWN) != 0) break; m = sc->dc_cdata.dc_rx_chain[i]; bus_dmamap_sync(sc->dc_rx_mtag, sc->dc_cdata.dc_rx_map[i], BUS_DMASYNC_POSTREAD); total_len = DC_RXBYTES(rxstat); rx_npkts++; if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { if (rxstat & DC_RXSTAT_FIRSTFRAG) sc->dc_pnic_rx_bug_save = i; if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) continue; dc_pnic_rx_bug_war(sc, i); rxstat = le32toh(cur_rx->dc_status); total_len = DC_RXBYTES(rxstat); } } /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. However, don't report long * frames as errors since they could be vlans. */ if ((rxstat & DC_RXSTAT_RXERR)) { if (!(rxstat & DC_RXSTAT_GIANT) || (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { ifp->if_ierrors++; if (rxstat & DC_RXSTAT_COLLSEEN) ifp->if_collisions++; dc_discard_rxbuf(sc, i); if (rxstat & DC_RXSTAT_CRCERR) continue; else { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; dc_init_locked(sc); return (rx_npkts); } } } /* No errors; receive the packet. */ total_len -= ETHER_CRC_LEN; #ifdef __NO_STRICT_ALIGNMENT /* * On architectures without alignment problems we try to * allocate a new buffer for the receive ring, and pass up * the one where the packet is already, saving the expensive * copy done in m_devget(). * If we are on an architecture with alignment problems, or * if the allocation fails, then use m_devget and leave the * existing buffer in the receive ring. */ if (dc_newbuf(sc, i) != 0) { dc_discard_rxbuf(sc, i); ifp->if_iqdrops++; continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; #else { struct mbuf *m0; m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); dc_discard_rxbuf(sc, i); if (m0 == NULL) { ifp->if_iqdrops++; continue; } m = m0; } #endif ifp->if_ipackets++; DC_UNLOCK(sc); (*ifp->if_input)(ifp, m); DC_LOCK(sc); } sc->dc_cdata.dc_rx_prod = i; return (rx_npkts); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void dc_txeof(struct dc_softc *sc) { struct dc_desc *cur_tx; struct ifnet *ifp; int idx, setup; uint32_t ctl, txstat; if (sc->dc_cdata.dc_tx_cnt == 0) return; ifp = sc->dc_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); setup = 0; for (idx = sc->dc_cdata.dc_tx_cons; idx != sc->dc_cdata.dc_tx_prod; DC_INC(idx, DC_TX_LIST_CNT), sc->dc_cdata.dc_tx_cnt--) { cur_tx = &sc->dc_ldata.dc_tx_list[idx]; txstat = le32toh(cur_tx->dc_status); ctl = le32toh(cur_tx->dc_ctl); if (txstat & DC_TXSTAT_OWN) break; if (sc->dc_cdata.dc_tx_chain[idx] == NULL) continue; if (ctl & DC_TXCTL_SETUP) { cur_tx->dc_ctl = htole32(ctl & ~DC_TXCTL_SETUP); setup++; bus_dmamap_sync(sc->dc_stag, sc->dc_smap, BUS_DMASYNC_POSTWRITE); /* * Yes, the PNIC is so brain damaged * that it will sometimes generate a TX * underrun error while DMAing the RX * filter setup frame. If we detect this, * we have to send the setup frame again, * or else the filter won't be programmed * correctly. */ if (DC_IS_PNIC(sc)) { if (txstat & DC_TXSTAT_ERRSUM) dc_setfilt(sc); } sc->dc_cdata.dc_tx_chain[idx] = NULL; continue; } if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { /* * XXX: Why does my Xircom taunt me so? * For some reason it likes setting the CARRLOST flag * even when the carrier is there. wtf?!? * Who knows, but Conexant chips have the * same problem. Maybe they took lessons * from Xircom. */ if (/*sc->dc_type == DC_TYPE_21143 &&*/ sc->dc_pmode == DC_PMODE_MII && ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | DC_TXSTAT_NOCARRIER))) txstat &= ~DC_TXSTAT_ERRSUM; } else { if (/*sc->dc_type == DC_TYPE_21143 &&*/ sc->dc_pmode == DC_PMODE_MII && ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | DC_TXSTAT_NOCARRIER | DC_TXSTAT_CARRLOST))) txstat &= ~DC_TXSTAT_ERRSUM; } if (txstat & DC_TXSTAT_ERRSUM) { ifp->if_oerrors++; if (txstat & DC_TXSTAT_EXCESSCOLL) ifp->if_collisions++; if (txstat & DC_TXSTAT_LATECOLL) ifp->if_collisions++; if (!(txstat & DC_TXSTAT_UNDERRUN)) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; dc_init_locked(sc); return; } } else ifp->if_opackets++; ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; bus_dmamap_sync(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx]); m_freem(sc->dc_cdata.dc_tx_chain[idx]); sc->dc_cdata.dc_tx_chain[idx] = NULL; } sc->dc_cdata.dc_tx_cons = idx; if (sc->dc_cdata.dc_tx_cnt <= DC_TX_LIST_CNT - DC_TX_LIST_RSVD) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (sc->dc_cdata.dc_tx_cnt == 0) sc->dc_wdog_timer = 0; } if (setup > 0) bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void dc_tick(void *xsc) { struct dc_softc *sc; struct mii_data *mii; struct ifnet *ifp; uint32_t r; sc = xsc; DC_LOCK_ASSERT(sc); ifp = sc->dc_ifp; mii = device_get_softc(sc->dc_miibus); /* * Reclaim transmitted frames for controllers that do * not generate TX completion interrupt for every frame. */ if (sc->dc_flags & DC_TX_USE_TX_INTR) dc_txeof(sc); if (sc->dc_flags & DC_REDUCED_MII_POLL) { if (sc->dc_flags & DC_21143_NWAY) { r = CSR_READ_4(sc, DC_10BTSTAT); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX && (r & DC_TSTAT_LS100)) { sc->dc_link = 0; mii_mediachg(mii); } if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T && (r & DC_TSTAT_LS10)) { sc->dc_link = 0; mii_mediachg(mii); } if (sc->dc_link == 0) mii_tick(mii); } else { /* * For NICs which never report DC_RXSTATE_WAIT, we * have to bite the bullet... */ if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc, DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) && sc->dc_cdata.dc_tx_cnt == 0) mii_tick(mii); } } else mii_tick(mii); /* * When the init routine completes, we expect to be able to send * packets right away, and in fact the network code will send a * gratuitous ARP the moment the init routine marks the interface * as running. However, even though the MAC may have been initialized, * there may be a delay of a few seconds before the PHY completes * autonegotiation and the link is brought up. Any transmissions * made during that delay will be lost. Dealing with this is tricky: * we can't just pause in the init routine while waiting for the * PHY to come ready since that would bring the whole system to * a screeching halt for several seconds. * * What we do here is prevent the TX start routine from sending * any packets until a link has been established. After the * interface has been initialized, the tick routine will poll * the state of the PHY until the IFM_ACTIVE flag is set. Until * that time, packets will stay in the send queue, and once the * link comes up, they will be flushed out to the wire. */ if (sc->dc_link != 0 && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) dc_start_locked(ifp); if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); else callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); } /* * A transmit underrun has occurred. Back off the transmit threshold, * or switch to store and forward mode if we have to. */ static void dc_tx_underrun(struct dc_softc *sc) { uint32_t netcfg, isr; int i, reinit; reinit = 0; netcfg = CSR_READ_4(sc, DC_NETCFG); device_printf(sc->dc_dev, "TX underrun -- "); if ((sc->dc_flags & DC_TX_STORENFWD) == 0) { if (sc->dc_txthresh + DC_TXTHRESH_INC > DC_TXTHRESH_MAX) { printf("using store and forward mode\n"); netcfg |= DC_NETCFG_STORENFWD; } else { printf("increasing TX threshold\n"); sc->dc_txthresh += DC_TXTHRESH_INC; netcfg &= ~DC_NETCFG_TX_THRESH; netcfg |= sc->dc_txthresh; } if (DC_IS_INTEL(sc)) { /* * The real 21143 requires that the transmitter be idle * in order to change the transmit threshold or store * and forward state. */ CSR_WRITE_4(sc, DC_NETCFG, netcfg & ~DC_NETCFG_TX_ON); for (i = 0; i < DC_TIMEOUT; i++) { isr = CSR_READ_4(sc, DC_ISR); if (isr & DC_ISR_TX_IDLE) break; DELAY(10); } if (i == DC_TIMEOUT) { device_printf(sc->dc_dev, "%s: failed to force tx to idle state\n", __func__); reinit++; } } } else { printf("resetting\n"); reinit++; } if (reinit == 0) { CSR_WRITE_4(sc, DC_NETCFG, netcfg); if (DC_IS_INTEL(sc)) CSR_WRITE_4(sc, DC_NETCFG, netcfg | DC_NETCFG_TX_ON); } else { sc->dc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; dc_init_locked(sc); } } #ifdef DEVICE_POLLING static poll_handler_t dc_poll; static int dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct dc_softc *sc = ifp->if_softc; int rx_npkts = 0; DC_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { DC_UNLOCK(sc); return (rx_npkts); } sc->rxcycles = count; rx_npkts = dc_rxeof(sc); dc_txeof(sc); if (!IFQ_IS_EMPTY(&ifp->if_snd) && !(ifp->if_drv_flags & IFF_DRV_OACTIVE)) dc_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ uint32_t status; status = CSR_READ_4(sc, DC_ISR); status &= (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF | DC_ISR_TX_NOBUF | DC_ISR_TX_IDLE | DC_ISR_TX_UNDERRUN | DC_ISR_BUS_ERR); if (!status) { DC_UNLOCK(sc); return (rx_npkts); } /* ack what we have */ CSR_WRITE_4(sc, DC_ISR, status); if (status & (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF)) { uint32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED); ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff); if (dc_rx_resync(sc)) dc_rxeof(sc); } /* restart transmit unit if necessary */ if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt) CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); if (status & DC_ISR_TX_UNDERRUN) dc_tx_underrun(sc); if (status & DC_ISR_BUS_ERR) { if_printf(ifp, "%s: bus error\n", __func__); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; dc_init_locked(sc); } } DC_UNLOCK(sc); return (rx_npkts); } #endif /* DEVICE_POLLING */ static void dc_intr(void *arg) { struct dc_softc *sc; struct ifnet *ifp; uint32_t r, status; int n; sc = arg; if (sc->suspended) return; DC_LOCK(sc); status = CSR_READ_4(sc, DC_ISR); if (status == 0xFFFFFFFF || (status & DC_INTRS) == 0) { DC_UNLOCK(sc); return; } ifp = sc->dc_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { DC_UNLOCK(sc); return; } #endif /* Disable interrupts. */ CSR_WRITE_4(sc, DC_IMR, 0x00000000); for (n = 16; n > 0; n--) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; /* Ack interrupts. */ CSR_WRITE_4(sc, DC_ISR, status); if (status & DC_ISR_RX_OK) { if (dc_rxeof(sc) == 0) { while (dc_rx_resync(sc)) dc_rxeof(sc); } } if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF)) dc_txeof(sc); if (status & DC_ISR_TX_IDLE) { dc_txeof(sc); if (sc->dc_cdata.dc_tx_cnt) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); } } if (status & DC_ISR_TX_UNDERRUN) dc_tx_underrun(sc); if ((status & DC_ISR_RX_WATDOGTIMEO) || (status & DC_ISR_RX_NOBUF)) { r = CSR_READ_4(sc, DC_FRAMESDISCARDED); ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff); if (dc_rxeof(sc) == 0) { while (dc_rx_resync(sc)) dc_rxeof(sc); } } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) dc_start_locked(ifp); if (status & DC_ISR_BUS_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; dc_init_locked(sc); DC_UNLOCK(sc); return; } status = CSR_READ_4(sc, DC_ISR); if (status == 0xFFFFFFFF || (status & DC_INTRS) == 0) break; } /* Re-enable interrupts. */ if (ifp->if_drv_flags & IFF_DRV_RUNNING) CSR_WRITE_4(sc, DC_IMR, DC_INTRS); DC_UNLOCK(sc); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int dc_encap(struct dc_softc *sc, struct mbuf **m_head) { bus_dma_segment_t segs[DC_MAXFRAGS]; bus_dmamap_t map; struct dc_desc *f; struct mbuf *m; int cur, defragged, error, first, frag, i, idx, nseg; m = NULL; defragged = 0; if (sc->dc_flags & DC_TX_COALESCE && ((*m_head)->m_next != NULL || sc->dc_flags & DC_TX_ALIGN)) { m = m_defrag(*m_head, M_DONTWAIT); defragged = 1; } else { /* * Count the number of frags in this chain to see if we * need to m_collapse. Since the descriptor list is shared * by all packets, we'll m_collapse long chains so that they * do not use up the entire list, even if they would fit. */ i = 0; for (m = *m_head; m != NULL; m = m->m_next) i++; if (i > DC_TX_LIST_CNT / 4 || DC_TX_LIST_CNT - i + sc->dc_cdata.dc_tx_cnt <= DC_TX_LIST_RSVD) { m = m_collapse(*m_head, M_DONTWAIT, DC_MAXFRAGS); defragged = 1; } } if (defragged != 0) { if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; } idx = sc->dc_cdata.dc_tx_prod; error = bus_dmamap_load_mbuf_sg(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0); if (error == EFBIG) { if (defragged != 0 || (m = m_collapse(*m_head, M_DONTWAIT, DC_MAXFRAGS)) == NULL) { m_freem(*m_head); *m_head = NULL; return (defragged != 0 ? error : ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); KASSERT(nseg <= DC_MAXFRAGS, ("%s: wrong number of segments (%d)", __func__, nseg)); if (nseg == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check descriptor overruns. */ if (sc->dc_cdata.dc_tx_cnt + nseg > DC_TX_LIST_CNT - DC_TX_LIST_RSVD) { bus_dmamap_unload(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx]); return (ENOBUFS); } bus_dmamap_sync(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[idx], BUS_DMASYNC_PREWRITE); first = cur = frag = sc->dc_cdata.dc_tx_prod; for (i = 0; i < nseg; i++) { if ((sc->dc_flags & DC_TX_ADMTEK_WAR) && (frag == (DC_TX_LIST_CNT - 1)) && (first != sc->dc_cdata.dc_tx_first)) { bus_dmamap_unload(sc->dc_tx_mtag, sc->dc_cdata.dc_tx_map[first]); m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } f = &sc->dc_ldata.dc_tx_list[frag]; f->dc_ctl = htole32(DC_TXCTL_TLINK | segs[i].ds_len); if (i == 0) { f->dc_status = 0; f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG); } else f->dc_status = htole32(DC_TXSTAT_OWN); f->dc_data = htole32(DC_ADDR_LO(segs[i].ds_addr)); cur = frag; DC_INC(frag, DC_TX_LIST_CNT); } sc->dc_cdata.dc_tx_prod = frag; sc->dc_cdata.dc_tx_cnt += nseg; sc->dc_cdata.dc_tx_chain[cur] = *m_head; sc->dc_ldata.dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG); if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) sc->dc_ldata.dc_tx_list[first].dc_ctl |= htole32(DC_TXCTL_FINT); if (sc->dc_flags & DC_TX_INTR_ALWAYS) sc->dc_ldata.dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); if (sc->dc_flags & DC_TX_USE_TX_INTR && ++sc->dc_cdata.dc_tx_pkts >= 8) { sc->dc_cdata.dc_tx_pkts = 0; sc->dc_ldata.dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); } sc->dc_ldata.dc_tx_list[first].dc_status = htole32(DC_TXSTAT_OWN); bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * Swap the last and the first dmamaps to ensure the map for * this transmission is placed at the last descriptor. */ map = sc->dc_cdata.dc_tx_map[cur]; sc->dc_cdata.dc_tx_map[cur] = sc->dc_cdata.dc_tx_map[first]; sc->dc_cdata.dc_tx_map[first] = map; return (0); } static void dc_start(struct ifnet *ifp) { struct dc_softc *sc; sc = ifp->if_softc; DC_LOCK(sc); dc_start_locked(ifp); DC_UNLOCK(sc); } /* * Main transmit routine * To avoid having to do mbuf copies, we put pointers to the mbuf data * regions directly in the transmit lists. We also save a copy of the * pointers since the transmit list fragment pointers are physical * addresses. */ static void dc_start_locked(struct ifnet *ifp) { struct dc_softc *sc; struct mbuf *m_head; int queued; sc = ifp->if_softc; DC_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || sc->dc_link == 0) return; sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod; for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { /* * If there's no way we can send any packets, return now. */ if (sc->dc_cdata.dc_tx_cnt > DC_TX_LIST_CNT - DC_TX_LIST_RSVD) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (dc_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } queued++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } if (queued > 0) { /* Transmit */ if (!(sc->dc_flags & DC_TX_POLL)) CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * Set a timeout in case the chip goes out to lunch. */ sc->dc_wdog_timer = 5; } } static void dc_init(void *xsc) { struct dc_softc *sc = xsc; DC_LOCK(sc); dc_init_locked(sc); DC_UNLOCK(sc); } static void dc_init_locked(struct dc_softc *sc) { struct ifnet *ifp = sc->dc_ifp; struct mii_data *mii; struct ifmedia *ifm; DC_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; mii = device_get_softc(sc->dc_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ dc_stop(sc); dc_reset(sc); if (DC_IS_INTEL(sc)) { ifm = &mii->mii_media; dc_apply_fixup(sc, ifm->ifm_media); } /* * Set cache alignment and burst length. */ if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc) || DC_IS_ULI(sc)) CSR_WRITE_4(sc, DC_BUSCTL, 0); else CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME | DC_BUSCTL_MRLE); /* * Evenly share the bus between receive and transmit process. */ if (DC_IS_INTEL(sc)) DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); } else { DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); } if (sc->dc_flags & DC_TX_POLL) DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); switch(sc->dc_cachesize) { case 32: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); break; case 16: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); break; case 8: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); break; case 0: default: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); break; } if (sc->dc_flags & DC_TX_STORENFWD) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); else { if (sc->dc_txthresh > DC_TXTHRESH_MAX) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); } } DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { /* * The app notes for the 98713 and 98715A say that * in order to have the chips operate properly, a magic * number must be written to CSR16. Macronix does not * document the meaning of these bits so there's no way * to know exactly what they do. The 98713 has a magic * number all its own; the rest all use a different one. */ DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); else DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); } if (DC_IS_XIRCOM(sc)) { /* * setup General Purpose Port mode and data so the tulip * can talk to the MII. */ CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); } DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); /* Init circular RX list. */ if (dc_list_rx_init(sc) == ENOBUFS) { device_printf(sc->dc_dev, "initialization failed: no memory for rx buffers\n"); dc_stop(sc); return; } /* * Init TX descriptors. */ dc_list_tx_init(sc); /* * Load the address of the RX list. */ CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0)); CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0)); /* * Enable interrupts. */ #ifdef DEVICE_POLLING /* * ... but only if we are not polling, and make sure they are off in * the case of polling. Some cards (e.g. fxp) turn interrupts on * after a reset. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_4(sc, DC_IMR, 0x00000000); else #endif CSR_WRITE_4(sc, DC_IMR, DC_INTRS); CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); /* Initialize TX jabber and RX watchdog timer. */ if (DC_IS_ULI(sc)) CSR_WRITE_4(sc, DC_WATCHDOG, DC_WDOG_JABBERCLK | DC_WDOG_HOSTUNJAB); /* Enable transmitter. */ DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); /* * If this is an Intel 21143 and we're not using the * MII port, program the LED control pins so we get * link and activity indications. */ if (sc->dc_flags & DC_TULIP_LEDS) { CSR_WRITE_4(sc, DC_WATCHDOG, DC_WDOG_CTLWREN | DC_WDOG_LINK | DC_WDOG_ACTIVITY); CSR_WRITE_4(sc, DC_WATCHDOG, 0); } /* * Load the RX/multicast filter. We do this sort of late * because the filter programming scheme on the 21143 and * some clones requires DMAing a setup frame via the TX * engine, and we need the transmitter enabled for that. */ dc_setfilt(sc); /* Enable receiver. */ DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; dc_ifmedia_upd_locked(sc); /* Clear missed frames and overflow counter. */ CSR_READ_4(sc, DC_FRAMESDISCARDED); /* Don't start the ticker if this is a homePNA link. */ if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) sc->dc_link = 1; else { if (sc->dc_flags & DC_21143_NWAY) callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); else callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); } sc->dc_wdog_timer = 0; callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc); } /* * Set media options. */ static int dc_ifmedia_upd(struct ifnet *ifp) { struct dc_softc *sc; int error; sc = ifp->if_softc; DC_LOCK(sc); error = dc_ifmedia_upd_locked(sc); DC_UNLOCK(sc); return (error); } static int dc_ifmedia_upd_locked(struct dc_softc *sc) { struct mii_data *mii; struct ifmedia *ifm; int error; DC_LOCK_ASSERT(sc); sc->dc_link = 0; mii = device_get_softc(sc->dc_miibus); error = mii_mediachg(mii); if (error == 0) { ifm = &mii->mii_media; if (DC_IS_INTEL(sc)) dc_setcfg(sc, ifm->ifm_media); else if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) dc_setcfg(sc, ifm->ifm_media); } return (error); } /* * Report current media status. */ static void dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; mii = device_get_softc(sc->dc_miibus); DC_LOCK(sc); mii_pollstat(mii); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc)) { if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { ifmr->ifm_active = ifm->ifm_media; ifmr->ifm_status = 0; DC_UNLOCK(sc); return; } } ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; DC_UNLOCK(sc); } static int dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct dc_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFFLAGS: DC_LOCK(sc); if (ifp->if_flags & IFF_UP) { int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) & (IFF_PROMISC | IFF_ALLMULTI); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if (need_setfilt) dc_setfilt(sc); } else { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; dc_init_locked(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) dc_stop(sc); } sc->dc_if_flags = ifp->if_flags; DC_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: DC_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) dc_setfilt(sc); DC_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->dc_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: #ifdef DEVICE_POLLING if (ifr->ifr_reqcap & IFCAP_POLLING && !(ifp->if_capenable & IFCAP_POLLING)) { error = ether_poll_register(dc_poll, ifp); if (error) return(error); DC_LOCK(sc); /* Disable interrupts */ CSR_WRITE_4(sc, DC_IMR, 0x00000000); ifp->if_capenable |= IFCAP_POLLING; DC_UNLOCK(sc); return (error); } if (!(ifr->ifr_reqcap & IFCAP_POLLING) && ifp->if_capenable & IFCAP_POLLING) { error = ether_poll_deregister(ifp); /* Enable interrupts. */ DC_LOCK(sc); CSR_WRITE_4(sc, DC_IMR, DC_INTRS); ifp->if_capenable &= ~IFCAP_POLLING; DC_UNLOCK(sc); return (error); } #endif /* DEVICE_POLLING */ break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void dc_watchdog(void *xsc) { struct dc_softc *sc = xsc; struct ifnet *ifp; DC_LOCK_ASSERT(sc); if (sc->dc_wdog_timer == 0 || --sc->dc_wdog_timer != 0) { callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc); return; } ifp = sc->dc_ifp; ifp->if_oerrors++; device_printf(sc->dc_dev, "watchdog timeout\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; dc_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) dc_start_locked(ifp); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void dc_stop(struct dc_softc *sc) { struct ifnet *ifp; struct dc_list_data *ld; struct dc_chain_data *cd; int i; uint32_t ctl, netcfg; DC_LOCK_ASSERT(sc); ifp = sc->dc_ifp; ld = &sc->dc_ldata; cd = &sc->dc_cdata; callout_stop(&sc->dc_stat_ch); callout_stop(&sc->dc_wdog_ch); sc->dc_wdog_timer = 0; sc->dc_link = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); netcfg = CSR_READ_4(sc, DC_NETCFG); if (netcfg & (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON)) CSR_WRITE_4(sc, DC_NETCFG, netcfg & ~(DC_NETCFG_RX_ON | DC_NETCFG_TX_ON)); CSR_WRITE_4(sc, DC_IMR, 0x00000000); /* Wait the completion of TX/RX SM. */ if (netcfg & (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON)) dc_netcfg_wait(sc); CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); /* * Free data in the RX lists. */ for (i = 0; i < DC_RX_LIST_CNT; i++) { if (cd->dc_rx_chain[i] != NULL) { bus_dmamap_sync(sc->dc_rx_mtag, cd->dc_rx_map[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->dc_rx_mtag, cd->dc_rx_map[i]); m_freem(cd->dc_rx_chain[i]); cd->dc_rx_chain[i] = NULL; } } bzero(ld->dc_rx_list, DC_RX_LIST_SZ); bus_dmamap_sync(sc->dc_rx_ltag, sc->dc_rx_lmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * Free the TX list buffers. */ for (i = 0; i < DC_TX_LIST_CNT; i++) { if (cd->dc_tx_chain[i] != NULL) { ctl = le32toh(ld->dc_tx_list[i].dc_ctl); if (ctl & DC_TXCTL_SETUP) { bus_dmamap_sync(sc->dc_stag, sc->dc_smap, BUS_DMASYNC_POSTWRITE); } else { bus_dmamap_sync(sc->dc_tx_mtag, cd->dc_tx_map[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->dc_tx_mtag, cd->dc_tx_map[i]); m_freem(cd->dc_tx_chain[i]); } cd->dc_tx_chain[i] = NULL; } } bzero(ld->dc_tx_list, DC_TX_LIST_SZ); bus_dmamap_sync(sc->dc_tx_ltag, sc->dc_tx_lmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int dc_suspend(device_t dev) { struct dc_softc *sc; sc = device_get_softc(dev); DC_LOCK(sc); dc_stop(sc); sc->suspended = 1; DC_UNLOCK(sc); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int dc_resume(device_t dev) { struct dc_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->dc_ifp; /* reinitialize interface if necessary */ DC_LOCK(sc); if (ifp->if_flags & IFF_UP) dc_init_locked(sc); sc->suspended = 0; DC_UNLOCK(sc); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int dc_shutdown(device_t dev) { struct dc_softc *sc; sc = device_get_softc(dev); DC_LOCK(sc); dc_stop(sc); DC_UNLOCK(sc); return (0); } static int dc_check_multiport(struct dc_softc *sc) { struct dc_softc *dsc; devclass_t dc; device_t child; uint8_t *eaddr; int unit; dc = devclass_find("dc"); for (unit = 0; unit < devclass_get_maxunit(dc); unit++) { child = devclass_get_device(dc, unit); if (child == NULL) continue; if (child == sc->dc_dev) continue; if (device_get_parent(child) != device_get_parent(sc->dc_dev)) continue; if (unit > device_get_unit(sc->dc_dev)) continue; if (device_is_attached(child) == 0) continue; dsc = device_get_softc(child); device_printf(sc->dc_dev, "Using station address of %s as base\n", device_get_nameunit(child)); bcopy(dsc->dc_eaddr, sc->dc_eaddr, ETHER_ADDR_LEN); eaddr = (uint8_t *)sc->dc_eaddr; eaddr[5]++; /* Prepare SROM to parse again. */ if (DC_IS_INTEL(sc) && dsc->dc_srom != NULL && sc->dc_romwidth != 0) { free(sc->dc_srom, M_DEVBUF); sc->dc_romwidth = dsc->dc_romwidth; sc->dc_srom = malloc(DC_ROM_SIZE(sc->dc_romwidth), M_DEVBUF, M_NOWAIT); if (sc->dc_srom == NULL) { device_printf(sc->dc_dev, "Could not allocate SROM buffer\n"); return (ENOMEM); } bcopy(dsc->dc_srom, sc->dc_srom, DC_ROM_SIZE(sc->dc_romwidth)); } return (0); } return (ENOENT); } Index: projects/nand/sys/dev/e1000/if_em.c =================================================================== --- projects/nand/sys/dev/e1000/if_em.c (revision 235262) +++ projects/nand/sys/dev/e1000/if_em.c (revision 235263) @@ -1,5756 +1,5756 @@ /****************************************************************************** Copyright (c) 2001-2011, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #include "opt_inet.h" #include "opt_inet6.h" #endif #include #include #if __FreeBSD_version >= 800000 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "e1000_api.h" #include "e1000_82571.h" #include "if_em.h" /********************************************************************* * Set this to one to display debug statistics *********************************************************************/ int em_display_debug_stats = 0; /********************************************************************* * Driver version: *********************************************************************/ char em_driver_version[] = "7.3.2"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into e1000_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static em_vendor_info_t em_vendor_info_array[] = { /* Intel(R) PRO/1000 Network Connection */ { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82583V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_82567V_3, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_M_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_BM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82574L, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82574LA, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_R_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_R_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_D_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_M_HV_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_M_HV_LC, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_D_HV_DM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_D_HV_DC, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH2_LV_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH2_LV_V, PCI_ANY_ID, PCI_ANY_ID, 0}, /* required last entry */ { 0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings for all supported NICs. *********************************************************************/ static char *em_strings[] = { "Intel(R) PRO/1000 Network Connection" }; /********************************************************************* * Function prototypes *********************************************************************/ static int em_probe(device_t); static int em_attach(device_t); static int em_detach(device_t); static int em_shutdown(device_t); static int em_suspend(device_t); static int em_resume(device_t); #ifdef EM_MULTIQUEUE static int em_mq_start(struct ifnet *, struct mbuf *); static int em_mq_start_locked(struct ifnet *, struct tx_ring *, struct mbuf *); static void em_qflush(struct ifnet *); #else static void em_start(struct ifnet *); static void em_start_locked(struct ifnet *, struct tx_ring *); #endif static int em_ioctl(struct ifnet *, u_long, caddr_t); static void em_init(void *); static void em_init_locked(struct adapter *); static void em_stop(void *); static void em_media_status(struct ifnet *, struct ifmediareq *); static int em_media_change(struct ifnet *); static void em_identify_hardware(struct adapter *); static int em_allocate_pci_resources(struct adapter *); static int em_allocate_legacy(struct adapter *); static int em_allocate_msix(struct adapter *); static int em_allocate_queues(struct adapter *); static int em_setup_msix(struct adapter *); static void em_free_pci_resources(struct adapter *); static void em_local_timer(void *); static void em_reset(struct adapter *); static int em_setup_interface(device_t, struct adapter *); static void em_setup_transmit_structures(struct adapter *); static void em_initialize_transmit_unit(struct adapter *); static int em_allocate_transmit_buffers(struct tx_ring *); static void em_free_transmit_structures(struct adapter *); static void em_free_transmit_buffers(struct tx_ring *); static int em_setup_receive_structures(struct adapter *); static int em_allocate_receive_buffers(struct rx_ring *); static void em_initialize_receive_unit(struct adapter *); static void em_free_receive_structures(struct adapter *); static void em_free_receive_buffers(struct rx_ring *); static void em_enable_intr(struct adapter *); static void em_disable_intr(struct adapter *); static void em_update_stats_counters(struct adapter *); static void em_add_hw_stats(struct adapter *adapter); static void em_txeof(struct tx_ring *); static bool em_rxeof(struct rx_ring *, int, int *); #ifndef __NO_STRICT_ALIGNMENT static int em_fixup_rx(struct rx_ring *); #endif static void em_receive_checksum(struct e1000_rx_desc *, struct mbuf *); static void em_transmit_checksum_setup(struct tx_ring *, struct mbuf *, int, struct ip *, u32 *, u32 *); static void em_tso_setup(struct tx_ring *, struct mbuf *, int, struct ip *, struct tcphdr *, u32 *, u32 *); static void em_set_promisc(struct adapter *); static void em_disable_promisc(struct adapter *); static void em_set_multi(struct adapter *); static void em_update_link_status(struct adapter *); static void em_refresh_mbufs(struct rx_ring *, int); static void em_register_vlan(void *, struct ifnet *, u16); static void em_unregister_vlan(void *, struct ifnet *, u16); static void em_setup_vlan_hw_support(struct adapter *); static int em_xmit(struct tx_ring *, struct mbuf **); static int em_dma_malloc(struct adapter *, bus_size_t, struct em_dma_alloc *, int); static void em_dma_free(struct adapter *, struct em_dma_alloc *); static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); static void em_print_nvm_info(struct adapter *); static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static void em_print_debug_info(struct adapter *); static int em_is_valid_ether_addr(u8 *); static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS); static void em_add_int_delay_sysctl(struct adapter *, const char *, const char *, struct em_int_delay_info *, int, int); /* Management and WOL Support */ static void em_init_manageability(struct adapter *); static void em_release_manageability(struct adapter *); static void em_get_hw_control(struct adapter *); static void em_release_hw_control(struct adapter *); static void em_get_wakeup(device_t); static void em_enable_wakeup(device_t); static int em_enable_phy_wakeup(struct adapter *); static void em_led_func(void *, int); static void em_disable_aspm(struct adapter *); static int em_irq_fast(void *); /* MSIX handlers */ static void em_msix_tx(void *); static void em_msix_rx(void *); static void em_msix_link(void *); static void em_handle_tx(void *context, int pending); static void em_handle_rx(void *context, int pending); static void em_handle_link(void *context, int pending); static void em_set_sysctl_value(struct adapter *, const char *, const char *, int *, int); static int em_set_flowcntl(SYSCTL_HANDLER_ARGS); static __inline void em_rx_discard(struct rx_ring *, int); #ifdef DEVICE_POLLING static poll_handler_t em_poll; #endif /* POLLING */ /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t em_methods[] = { /* Device interface */ DEVMETHOD(device_probe, em_probe), DEVMETHOD(device_attach, em_attach), DEVMETHOD(device_detach, em_detach), DEVMETHOD(device_shutdown, em_shutdown), DEVMETHOD(device_suspend, em_suspend), DEVMETHOD(device_resume, em_resume), {0, 0} }; static driver_t em_driver = { "em", em_methods, sizeof(struct adapter), }; devclass_t em_devclass; DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0); MODULE_DEPEND(em, pci, 1, 1, 1); MODULE_DEPEND(em, ether, 1, 1, 1); /********************************************************************* * Tunable default values. *********************************************************************/ #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000) #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024) #define M_TSO_LEN 66 /* Allow common code without TSO */ #ifndef CSUM_TSO #define CSUM_TSO 0 #endif static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters"); static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV); static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR); TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt); TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt); SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &em_tx_int_delay_dflt, 0, "Default transmit interrupt delay in usecs"); SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt, 0, "Default receive interrupt delay in usecs"); static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV); static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV); TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt); TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt); SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN, &em_tx_abs_int_delay_dflt, 0, "Default transmit interrupt delay limit in usecs"); SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN, &em_rx_abs_int_delay_dflt, 0, "Default receive interrupt delay limit in usecs"); static int em_rxd = EM_DEFAULT_RXD; static int em_txd = EM_DEFAULT_TXD; TUNABLE_INT("hw.em.rxd", &em_rxd); TUNABLE_INT("hw.em.txd", &em_txd); SYSCTL_INT(_hw_em, OID_AUTO, rxd, CTLFLAG_RDTUN, &em_rxd, 0, "Number of receive descriptors per queue"); SYSCTL_INT(_hw_em, OID_AUTO, txd, CTLFLAG_RDTUN, &em_txd, 0, "Number of transmit descriptors per queue"); static int em_smart_pwr_down = FALSE; TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down); SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down, 0, "Set to true to leave smart power down enabled on newer adapters"); /* Controls whether promiscuous also shows bad packets */ static int em_debug_sbp = FALSE; TUNABLE_INT("hw.em.sbp", &em_debug_sbp); SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0, "Show bad packets in promiscuous mode"); static int em_enable_msix = TRUE; TUNABLE_INT("hw.em.enable_msix", &em_enable_msix); SYSCTL_INT(_hw_em, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &em_enable_msix, 0, "Enable MSI-X interrupts"); /* How many packets rxeof tries to clean at a time */ static int em_rx_process_limit = 100; TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit); SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, &em_rx_process_limit, 0, "Maximum number of received packets to process " "at a time, -1 means unlimited"); /* Energy efficient ethernet - default to OFF */ static int eee_setting = 0; TUNABLE_INT("hw.em.eee_setting", &eee_setting); SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0, "Enable Energy Efficient Ethernet"); /* Global used in WOL setup with multiport cards */ static int global_quad_port_a = 0; #ifdef DEV_NETMAP /* see ixgbe.c for details */ #include #endif /* DEV_NETMAP */ /********************************************************************* * Device identification routine * * em_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * * return BUS_PROBE_DEFAULT on success, positive on failure *********************************************************************/ static int em_probe(device_t dev) { char adapter_name[60]; u16 pci_vendor_id = 0; u16 pci_device_id = 0; u16 pci_subvendor_id = 0; u16 pci_subdevice_id = 0; em_vendor_info_t *ent; INIT_DEBUGOUT("em_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != EM_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = em_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == PCI_ANY_ID)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == PCI_ANY_ID))) { sprintf(adapter_name, "%s %s", em_strings[ent->index], em_driver_version); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int em_attach(device_t dev) { struct adapter *adapter; struct e1000_hw *hw; int error = 0; INIT_DEBUGOUT("em_attach: begin"); if (resource_disabled("em", device_get_unit(dev))) { device_printf(dev, "Disabled by device hint\n"); return (ENXIO); } adapter = device_get_softc(dev); adapter->dev = adapter->osdep.dev = dev; hw = &adapter->hw; EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); /* SYSCTL stuff */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_nvm_info, "I", "NVM Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_set_flowcntl, "I", "Flow Control"); callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); /* Determine hardware and mac info */ em_identify_hardware(adapter); /* Setup PCI resources */ if (em_allocate_pci_resources(adapter)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci; } /* ** For ICH8 and family we need to ** map the flash memory, and this ** must happen after the MAC is ** identified */ if ((hw->mac.type == e1000_ich8lan) || (hw->mac.type == e1000_ich9lan) || (hw->mac.type == e1000_ich10lan) || (hw->mac.type == e1000_pchlan) || (hw->mac.type == e1000_pch2lan)) { int rid = EM_BAR_TYPE_FLASH; adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->flash == NULL) { device_printf(dev, "Mapping of Flash failed\n"); error = ENXIO; goto err_pci; } /* This is used in the shared code */ hw->flash_address = (u8 *)adapter->flash; adapter->osdep.flash_bus_space_tag = rman_get_bustag(adapter->flash); adapter->osdep.flash_bus_space_handle = rman_get_bushandle(adapter->flash); } /* Do Shared Code initialization */ if (e1000_setup_init_funcs(hw, TRUE)) { device_printf(dev, "Setup of Shared code failed\n"); error = ENXIO; goto err_pci; } e1000_get_bus_info(hw); /* Set up some sysctls for the tunable interrupt delays */ em_add_int_delay_sysctl(adapter, "rx_int_delay", "receive interrupt delay in usecs", &adapter->rx_int_delay, E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt); em_add_int_delay_sysctl(adapter, "tx_int_delay", "transmit interrupt delay in usecs", &adapter->tx_int_delay, E1000_REGISTER(hw, E1000_TIDV), em_tx_int_delay_dflt); em_add_int_delay_sysctl(adapter, "rx_abs_int_delay", "receive interrupt delay limit in usecs", &adapter->rx_abs_int_delay, E1000_REGISTER(hw, E1000_RADV), em_rx_abs_int_delay_dflt); em_add_int_delay_sysctl(adapter, "tx_abs_int_delay", "transmit interrupt delay limit in usecs", &adapter->tx_abs_int_delay, E1000_REGISTER(hw, E1000_TADV), em_tx_abs_int_delay_dflt); /* Sysctl for limiting the amount of work done in the taskqueue */ em_set_sysctl_value(adapter, "rx_processing_limit", "max number of rx packets to process", &adapter->rx_process_limit, em_rx_process_limit); /* * Validate number of transmit and receive descriptors. It * must not exceed hardware maximum, and must be multiple * of E1000_DBA_ALIGN. */ if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 || (em_txd > EM_MAX_TXD) || (em_txd < EM_MIN_TXD)) { device_printf(dev, "Using %d TX descriptors instead of %d!\n", EM_DEFAULT_TXD, em_txd); adapter->num_tx_desc = EM_DEFAULT_TXD; } else adapter->num_tx_desc = em_txd; if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 || (em_rxd > EM_MAX_RXD) || (em_rxd < EM_MIN_RXD)) { device_printf(dev, "Using %d RX descriptors instead of %d!\n", EM_DEFAULT_RXD, em_rxd); adapter->num_rx_desc = EM_DEFAULT_RXD; } else adapter->num_rx_desc = em_rxd; hw->mac.autoneg = DO_AUTO_NEG; hw->phy.autoneg_wait_to_complete = FALSE; hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; /* Copper options */ if (hw->phy.media_type == e1000_media_type_copper) { hw->phy.mdix = AUTO_ALL_MODES; hw->phy.disable_polarity_correction = FALSE; hw->phy.ms_type = EM_MASTER_SLAVE; } /* * Set the frame limits assuming * standard ethernet sized frames. */ adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE; /* * This controls when hardware reports transmit completion * status. */ hw->mac.report_tx_early = 1; /* ** Get queue/ring memory */ if (em_allocate_queues(adapter)) { error = ENOMEM; goto err_pci; } /* Allocate multicast array memory. */ adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); if (adapter->mta == NULL) { device_printf(dev, "Can not allocate multicast setup array\n"); error = ENOMEM; goto err_late; } /* Check SOL/IDER usage */ if (e1000_check_reset_block(hw)) device_printf(dev, "PHY reset is blocked" " due to SOL/IDER session.\n"); /* Sysctl for setting Energy Efficient Ethernet */ em_set_sysctl_value(adapter, "eee_control", "enable Energy Efficient Ethernet", &hw->dev_spec.ich8lan.eee_disable, eee_setting); /* ** Start from a known state, this is ** important in reading the nvm and ** mac from that. */ e1000_reset_hw(hw); /* Make sure we have a good EEPROM before we read from it */ if (e1000_validate_nvm_checksum(hw) < 0) { /* ** Some PCI-E parts fail the first check due to ** the link being in sleep state, call it again, ** if it fails a second time its a real issue. */ if (e1000_validate_nvm_checksum(hw) < 0) { device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); error = EIO; goto err_late; } } /* Copy the permanent MAC address out of the EEPROM */ if (e1000_read_mac_addr(hw) < 0) { device_printf(dev, "EEPROM read error while reading MAC" " address\n"); error = EIO; goto err_late; } if (!em_is_valid_ether_addr(hw->mac.addr)) { device_printf(dev, "Invalid MAC address\n"); error = EIO; goto err_late; } /* ** Do interrupt configuration */ if (adapter->msix > 1) /* Do MSIX */ error = em_allocate_msix(adapter); else /* MSI or Legacy */ error = em_allocate_legacy(adapter); if (error) goto err_late; /* * Get Wake-on-Lan and Management info for later use */ em_get_wakeup(dev); /* Setup OS specific network interface */ if (em_setup_interface(dev, adapter) != 0) goto err_late; em_reset(adapter); /* Initialize statistics */ em_update_stats_counters(adapter); hw->mac.get_link_status = 1; em_update_link_status(adapter); /* Register for VLAN events */ adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); em_add_hw_stats(adapter); /* Non-AMT based hardware can now take control from firmware */ if (adapter->has_manage && !adapter->has_amt) em_get_hw_control(adapter); /* Tell the stack that the interface is not active */ adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->ifp->if_drv_flags |= IFF_DRV_OACTIVE; adapter->led_dev = led_create(em_led_func, adapter, device_get_nameunit(dev)); #ifdef DEV_NETMAP em_netmap_attach(adapter); #endif /* DEV_NETMAP */ INIT_DEBUGOUT("em_attach: end"); return (0); err_late: em_free_transmit_structures(adapter); em_free_receive_structures(adapter); em_release_hw_control(adapter); if (adapter->ifp != NULL) if_free(adapter->ifp); err_pci: em_free_pci_resources(adapter); free(adapter->mta, M_DEVBUF); EM_CORE_LOCK_DESTROY(adapter); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int em_detach(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; INIT_DEBUGOUT("em_detach: begin"); /* Make sure VLANS are not using driver */ if (adapter->ifp->if_vlantrunk != NULL) { device_printf(dev,"Vlan in use, detach first\n"); return (EBUSY); } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (adapter->led_dev != NULL) led_destroy(adapter->led_dev); EM_CORE_LOCK(adapter); adapter->in_detach = 1; em_stop(adapter); EM_CORE_UNLOCK(adapter); EM_CORE_LOCK_DESTROY(adapter); e1000_phy_hw_reset(&adapter->hw); em_release_manageability(adapter); em_release_hw_control(adapter); /* Unregister VLAN events */ if (adapter->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); if (adapter->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); ether_ifdetach(adapter->ifp); callout_drain(&adapter->timer); #ifdef DEV_NETMAP netmap_detach(ifp); #endif /* DEV_NETMAP */ em_free_pci_resources(adapter); bus_generic_detach(dev); if_free(ifp); em_free_transmit_structures(adapter); em_free_receive_structures(adapter); em_release_hw_control(adapter); free(adapter->mta, M_DEVBUF); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int em_shutdown(device_t dev) { return em_suspend(dev); } /* * Suspend/resume device methods. */ static int em_suspend(device_t dev) { struct adapter *adapter = device_get_softc(dev); EM_CORE_LOCK(adapter); em_release_manageability(adapter); em_release_hw_control(adapter); em_enable_wakeup(dev); EM_CORE_UNLOCK(adapter); return bus_generic_suspend(dev); } static int em_resume(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct tx_ring *txr = adapter->tx_rings; struct ifnet *ifp = adapter->ifp; EM_CORE_LOCK(adapter); if (adapter->hw.mac.type == e1000_pch2lan) e1000_resume_workarounds_pchlan(&adapter->hw); em_init_locked(adapter); em_init_manageability(adapter); if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) { for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr, NULL); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp, txr); #endif EM_TX_UNLOCK(txr); } } EM_CORE_UNLOCK(adapter); return bus_generic_resume(dev); } #ifdef EM_MULTIQUEUE /********************************************************************* * Multiqueue Transmit routines * * em_mq_start is called by the stack to initiate a transmit. * however, if busy the driver can queue the request rather * than do an immediate send. It is this that is an advantage * in this driver, rather than also having multiple tx queues. **********************************************************************/ static int em_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m) { struct adapter *adapter = txr->adapter; struct mbuf *next; int err = 0, enq = 0; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || adapter->link_active == 0) { if (m != NULL) err = drbr_enqueue(ifp, txr->br, m); return (err); } enq = 0; if (m == NULL) { next = drbr_dequeue(ifp, txr->br); } else if (drbr_needs_enqueue(ifp, txr->br)) { if ((err = drbr_enqueue(ifp, txr->br, m)) != 0) return (err); next = drbr_dequeue(ifp, txr->br); } else next = m; /* Process the queue */ while (next != NULL) { if ((err = em_xmit(txr, &next)) != 0) { if (next != NULL) err = drbr_enqueue(ifp, txr->br, next); break; } enq++; drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags); ETHER_BPF_MTAP(ifp, next); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; next = drbr_dequeue(ifp, txr->br); } if (enq > 0) { /* Set the watchdog */ txr->queue_status = EM_QUEUE_WORKING; txr->watchdog_time = ticks; } if (txr->tx_avail < EM_MAX_SCATTER) em_txeof(txr); if (txr->tx_avail < EM_MAX_SCATTER) ifp->if_drv_flags |= IFF_DRV_OACTIVE; return (err); } /* ** Multiqueue capable stack interface */ static int em_mq_start(struct ifnet *ifp, struct mbuf *m) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; int error; if (EM_TX_TRYLOCK(txr)) { error = em_mq_start_locked(ifp, txr, m); EM_TX_UNLOCK(txr); } else error = drbr_enqueue(ifp, txr->br, m); return (error); } /* ** Flush all ring buffers */ static void em_qflush(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; struct mbuf *m; for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) m_freem(m); EM_TX_UNLOCK(txr); } if_qflush(ifp); } #else /* !EM_MULTIQUEUE */ static void em_start_locked(struct ifnet *ifp, struct tx_ring *txr) { struct adapter *adapter = ifp->if_softc; struct mbuf *m_head; EM_TX_LOCK_ASSERT(txr); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (!adapter->link_active) return; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { /* Call cleanup if number of TX descriptors low */ if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD) em_txeof(txr); if (txr->tx_avail < EM_MAX_SCATTER) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Encapsulation can modify our pointer, and or make it * NULL on failure. In that event, we can't requeue. */ if (em_xmit(txr, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); break; } /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, m_head); /* Set timeout in case hardware has problems transmitting. */ txr->watchdog_time = ticks; txr->queue_status = EM_QUEUE_WORKING; } return; } static void em_start(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { EM_TX_LOCK(txr); em_start_locked(ifp, txr); EM_TX_UNLOCK(txr); } return; } #endif /* EM_MULTIQUEUE */ /********************************************************************* * Ioctl entry point * * em_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int em_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct adapter *adapter = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; #endif bool avoid_reset = FALSE; int error = 0; if (adapter->in_detach) return (error); switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) em_init(adapter); #ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else error = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: { int max_frame_size; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); EM_CORE_LOCK(adapter); switch (adapter->hw.mac.type) { case e1000_82571: case e1000_82572: case e1000_ich9lan: case e1000_ich10lan: case e1000_pch2lan: case e1000_82574: case e1000_82583: case e1000_80003es2lan: /* 9K Jumbo Frame size */ max_frame_size = 9234; break; case e1000_pchlan: max_frame_size = 4096; break; /* Adapters that do not support jumbo frames */ case e1000_ich8lan: max_frame_size = ETHER_MAX_LEN; break; default: max_frame_size = MAX_JUMBO_FRAME_SIZE; } if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { EM_CORE_UNLOCK(adapter); error = EINVAL; break; } ifp->if_mtu = ifr->ifr_mtu; adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; em_init_locked(adapter); EM_CORE_UNLOCK(adapter); break; } case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl rcv'd:\ SIOCSIFFLAGS (Set Interface Flags)"); EM_CORE_LOCK(adapter); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ adapter->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { em_disable_promisc(adapter); em_set_promisc(adapter); } } else em_init_locked(adapter); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) em_stop(adapter); adapter->if_flags = ifp->if_flags; EM_CORE_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { EM_CORE_LOCK(adapter); em_disable_intr(adapter); em_set_multi(adapter); #ifdef DEVICE_POLLING if (!(ifp->if_capenable & IFCAP_POLLING)) #endif em_enable_intr(adapter); EM_CORE_UNLOCK(adapter); } break; case SIOCSIFMEDIA: /* Check SOL/IDER usage */ EM_CORE_LOCK(adapter); if (e1000_check_reset_block(&adapter->hw)) { EM_CORE_UNLOCK(adapter); device_printf(adapter->dev, "Media change is" " blocked due to SOL/IDER session.\n"); break; } EM_CORE_UNLOCK(adapter); /* falls thru */ case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl rcv'd: \ SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int mask, reinit; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); reinit = 0; mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(em_poll, ifp); if (error) return (error); EM_CORE_LOCK(adapter); em_disable_intr(adapter); ifp->if_capenable |= IFCAP_POLLING; EM_CORE_UNLOCK(adapter); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ EM_CORE_LOCK(adapter); em_enable_intr(adapter); ifp->if_capenable &= ~IFCAP_POLLING; EM_CORE_UNLOCK(adapter); } } #endif if (mask & IFCAP_HWCSUM) { ifp->if_capenable ^= IFCAP_HWCSUM; reinit = 1; } if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; reinit = 1; } if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; reinit = 1; } if (mask & IFCAP_VLAN_HWFILTER) { ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; reinit = 1; } if (mask & IFCAP_VLAN_HWTSO) { ifp->if_capenable ^= IFCAP_VLAN_HWTSO; reinit = 1; } if ((mask & IFCAP_WOL) && (ifp->if_capabilities & IFCAP_WOL) != 0) { if (mask & IFCAP_WOL_MCAST) ifp->if_capenable ^= IFCAP_WOL_MCAST; if (mask & IFCAP_WOL_MAGIC) ifp->if_capenable ^= IFCAP_WOL_MAGIC; } if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) em_init(adapter); VLAN_CAPABILITIES(ifp); break; } default: error = ether_ioctl(ifp, command, data); break; } return (error); } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void em_init_locked(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; INIT_DEBUGOUT("em_init: begin"); EM_CORE_LOCK_ASSERT(adapter); em_disable_intr(adapter); callout_stop(&adapter->timer); /* Get the latest mac address, User can use a LAA */ bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); /* Put the address into the Receive Address Array */ e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); /* * With the 82571 adapter, RAR[0] may be overwritten * when the other port is reset, we make a duplicate * in RAR[14] for that eventuality, this assures * the interface continues to function. */ if (adapter->hw.mac.type == e1000_82571) { e1000_set_laa_state_82571(&adapter->hw, TRUE); e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, E1000_RAR_ENTRIES - 1); } /* Initialize the hardware */ em_reset(adapter); em_update_link_status(adapter); /* Setup VLAN support, basic and offload if available */ E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); /* Set hardware offload abilities */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_TSO; /* Configure for OS presence */ em_init_manageability(adapter); /* Prepare transmit descriptors and buffers */ em_setup_transmit_structures(adapter); em_initialize_transmit_unit(adapter); /* Setup Multicast table */ em_set_multi(adapter); /* ** Figure out the desired mbuf ** pool for doing jumbos */ if (adapter->max_frame_size <= 2048) adapter->rx_mbuf_sz = MCLBYTES; else if (adapter->max_frame_size <= 4096) adapter->rx_mbuf_sz = MJUMPAGESIZE; else adapter->rx_mbuf_sz = MJUM9BYTES; /* Prepare receive descriptors and buffers */ if (em_setup_receive_structures(adapter)) { device_printf(dev, "Could not setup receive structures\n"); em_stop(adapter); return; } em_initialize_receive_unit(adapter); /* Use real VLAN Filter support? */ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) /* Use real VLAN Filter support */ em_setup_vlan_hw_support(adapter); else { u32 ctrl; ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); ctrl |= E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); } } /* Don't lose promiscuous settings */ em_set_promisc(adapter); /* Set the interface as ACTIVE */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&adapter->timer, hz, em_local_timer, adapter); e1000_clear_hw_cntrs_base_generic(&adapter->hw); /* MSI/X configuration for 82574 */ if (adapter->hw.mac.type == e1000_82574) { int tmp; tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); tmp |= E1000_CTRL_EXT_PBA_CLR; E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp); /* Set the IVAR - interrupt vector routing. */ E1000_WRITE_REG(&adapter->hw, E1000_IVAR, adapter->ivars); } #ifdef DEVICE_POLLING /* * Only enable interrupts if we are not polling, make sure * they are off otherwise. */ if (ifp->if_capenable & IFCAP_POLLING) em_disable_intr(adapter); else #endif /* DEVICE_POLLING */ em_enable_intr(adapter); /* AMT based hardware can now take control from firmware */ if (adapter->has_manage && adapter->has_amt) em_get_hw_control(adapter); } static void em_init(void *arg) { struct adapter *adapter = arg; EM_CORE_LOCK(adapter); em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } #ifdef DEVICE_POLLING /********************************************************************* * * Legacy polling routine: note this only works with single queue * *********************************************************************/ static int em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; u32 reg_icr; int rx_done; EM_CORE_LOCK(adapter); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { EM_CORE_UNLOCK(adapter); return (0); } if (cmd == POLL_AND_CHECK_STATUS) { reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { callout_stop(&adapter->timer); adapter->hw.mac.get_link_status = 1; em_update_link_status(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); } } EM_CORE_UNLOCK(adapter); em_rxeof(rxr, count, &rx_done); EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr, NULL); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp, txr); #endif EM_TX_UNLOCK(txr); return (rx_done); } #endif /* DEVICE_POLLING */ /********************************************************************* * * Fast Legacy/MSI Combined Interrupt Service routine * *********************************************************************/ static int em_irq_fast(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp; u32 reg_icr; ifp = adapter->ifp; reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); /* Hot eject? */ if (reg_icr == 0xffffffff) return FILTER_STRAY; /* Definitely not our interrupt. */ if (reg_icr == 0x0) return FILTER_STRAY; /* * Starting with the 82571 chip, bit 31 should be used to * determine whether the interrupt belongs to us. */ if (adapter->hw.mac.type >= e1000_82571 && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) return FILTER_STRAY; em_disable_intr(adapter); taskqueue_enqueue(adapter->tq, &adapter->que_task); /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { adapter->hw.mac.get_link_status = 1; taskqueue_enqueue(taskqueue_fast, &adapter->link_task); } if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; return FILTER_HANDLED; } /* Combined RX/TX handler, used by Legacy and MSI */ static void em_handle_que(void *context, int pending) { struct adapter *adapter = context; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { bool more = em_rxeof(rxr, adapter->rx_process_limit, NULL); EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr, NULL); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp, txr); #endif EM_TX_UNLOCK(txr); if (more) { taskqueue_enqueue(adapter->tq, &adapter->que_task); return; } } em_enable_intr(adapter); return; } /********************************************************************* * * MSIX Interrupt Service Routines * **********************************************************************/ static void em_msix_tx(void *arg) { struct tx_ring *txr = arg; struct adapter *adapter = txr->adapter; struct ifnet *ifp = adapter->ifp; ++txr->tx_irq; EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr, NULL); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp, txr); #endif /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims); EM_TX_UNLOCK(txr); return; } /********************************************************************* * * MSIX RX Interrupt Service routine * **********************************************************************/ static void em_msix_rx(void *arg) { struct rx_ring *rxr = arg; struct adapter *adapter = rxr->adapter; bool more; ++rxr->rx_irq; more = em_rxeof(rxr, adapter->rx_process_limit, NULL); if (more) taskqueue_enqueue(rxr->tq, &rxr->rx_task); else /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims); return; } /********************************************************************* * * MSIX Link Fast Interrupt Service routine * **********************************************************************/ static void em_msix_link(void *arg) { struct adapter *adapter = arg; u32 reg_icr; ++adapter->link_irq; reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { adapter->hw.mac.get_link_status = 1; em_handle_link(adapter, 0); } else E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC); return; } static void em_handle_rx(void *context, int pending) { struct rx_ring *rxr = context; struct adapter *adapter = rxr->adapter; bool more; more = em_rxeof(rxr, adapter->rx_process_limit, NULL); if (more) taskqueue_enqueue(rxr->tq, &rxr->rx_task); else /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims); } static void em_handle_tx(void *context, int pending) { struct tx_ring *txr = context; struct adapter *adapter = txr->adapter; struct ifnet *ifp = adapter->ifp; EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr, NULL); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp, txr); #endif E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims); EM_TX_UNLOCK(txr); } static void em_handle_link(void *context, int pending) { struct adapter *adapter = context; struct tx_ring *txr = adapter->tx_rings; struct ifnet *ifp = adapter->ifp; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; EM_CORE_LOCK(adapter); callout_stop(&adapter->timer); em_update_link_status(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC); if (adapter->link_active) { for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr, NULL); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp, txr); #endif EM_TX_UNLOCK(txr); } } EM_CORE_UNLOCK(adapter); } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct adapter *adapter = ifp->if_softc; u_char fiber_type = IFM_1000_SX; INIT_DEBUGOUT("em_media_status: begin"); EM_CORE_LOCK(adapter); em_update_link_status(adapter); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) { EM_CORE_UNLOCK(adapter); return; } ifmr->ifm_status |= IFM_ACTIVE; if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { ifmr->ifm_active |= fiber_type | IFM_FDX; } else { switch (adapter->link_speed) { case 10: ifmr->ifm_active |= IFM_10_T; break; case 100: ifmr->ifm_active |= IFM_100_TX; break; case 1000: ifmr->ifm_active |= IFM_1000_T; break; } if (adapter->link_duplex == FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } EM_CORE_UNLOCK(adapter); } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int em_media_change(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct ifmedia *ifm = &adapter->media; INIT_DEBUGOUT("em_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); EM_CORE_LOCK(adapter); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; break; case IFM_1000_LX: case IFM_1000_SX: case IFM_1000_T: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case IFM_100_TX: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; break; case IFM_10_T: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; break; default: device_printf(adapter->dev, "Unsupported media type\n"); } em_init_locked(adapter); EM_CORE_UNLOCK(adapter); return (0); } /********************************************************************* * * This routine maps the mbufs to tx descriptors. * * return 0 on success, positive on failure **********************************************************************/ static int em_xmit(struct tx_ring *txr, struct mbuf **m_headp) { struct adapter *adapter = txr->adapter; bus_dma_segment_t segs[EM_MAX_SCATTER]; bus_dmamap_t map; struct em_buffer *tx_buffer, *tx_buffer_mapped; struct e1000_tx_desc *ctxd = NULL; struct mbuf *m_head; struct ether_header *eh; struct ip *ip = NULL; struct tcphdr *tp = NULL; u32 txd_upper, txd_lower, txd_used, txd_saved; int ip_off, poff; int nsegs, i, j, first, last = 0; int error, do_tso, tso_desc = 0, remap = 1; retry: m_head = *m_headp; txd_upper = txd_lower = txd_used = txd_saved = 0; do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0); ip_off = poff = 0; /* * Intel recommends entire IP/TCP header length reside in a single * buffer. If multiple descriptors are used to describe the IP and * TCP header, each descriptor should describe one or more * complete headers; descriptors referencing only parts of headers * are not supported. If all layer headers are not coalesced into * a single buffer, each buffer should not cross a 4KB boundary, * or be larger than the maximum read request size. * Controller also requires modifing IP/TCP header to make TSO work * so we firstly get a writable mbuf chain then coalesce ethernet/ * IP/TCP header into a single buffer to meet the requirement of * controller. This also simplifies IP/TCP/UDP checksum offloading * which also has similiar restrictions. */ if (do_tso || m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) { if (do_tso || (m_head->m_next != NULL && m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)) { if (M_WRITABLE(*m_headp) == 0) { m_head = m_dup(*m_headp, M_DONTWAIT); m_freem(*m_headp); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } *m_headp = m_head; } } /* * XXX * Assume IPv4, we don't have TSO/checksum offload support * for IPv6 yet. */ ip_off = sizeof(struct ether_header); m_head = m_pullup(m_head, ip_off); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } eh = mtod(m_head, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ip_off = sizeof(struct ether_vlan_header); m_head = m_pullup(m_head, ip_off); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } m_head = m_pullup(m_head, ip_off + sizeof(struct ip)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m_head, char *) + ip_off); poff = ip_off + (ip->ip_hl << 2); if (do_tso) { m_head = m_pullup(m_head, poff + sizeof(struct tcphdr)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } tp = (struct tcphdr *)(mtod(m_head, char *) + poff); /* * TSO workaround: * pull 4 more bytes of data into it. */ m_head = m_pullup(m_head, poff + (tp->th_off << 2) + 4); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m_head, char *) + ip_off); ip->ip_len = 0; ip->ip_sum = 0; /* * The pseudo TCP checksum does not include TCP payload * length so driver should recompute the checksum here * what hardware expect to see. This is adherence of * Microsoft's Large Send specification. */ tp = (struct tcphdr *)(mtod(m_head, char *) + poff); tp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); } else if (m_head->m_pkthdr.csum_flags & CSUM_TCP) { m_head = m_pullup(m_head, poff + sizeof(struct tcphdr)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } tp = (struct tcphdr *)(mtod(m_head, char *) + poff); m_head = m_pullup(m_head, poff + (tp->th_off << 2)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m_head, char *) + ip_off); tp = (struct tcphdr *)(mtod(m_head, char *) + poff); } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) { m_head = m_pullup(m_head, poff + sizeof(struct udphdr)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m_head, char *) + ip_off); } *m_headp = m_head; } /* * Map the packet for DMA * * Capture the first descriptor index, * this descriptor will have the index * of the EOP which is the only one that * now gets a DONE bit writeback. */ first = txr->next_avail_desc; tx_buffer = &txr->tx_buffers[first]; tx_buffer_mapped = tx_buffer; map = tx_buffer->map; error = bus_dmamap_load_mbuf_sg(txr->txtag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); /* * There are two types of errors we can (try) to handle: * - EFBIG means the mbuf chain was too long and bus_dma ran * out of segments. Defragment the mbuf chain and try again. * - ENOMEM means bus_dma could not obtain enough bounce buffers * at this point in time. Defer sending and try again later. * All other errors, in particular EINVAL, are fatal and prevent the * mbuf chain from ever going through. Drop it and report error. */ if (error == EFBIG && remap) { struct mbuf *m; m = m_defrag(*m_headp, M_DONTWAIT); if (m == NULL) { adapter->mbuf_alloc_failed++; m_freem(*m_headp); *m_headp = NULL; return (ENOBUFS); } *m_headp = m; /* Try it again, but only once */ remap = 0; goto retry; } else if (error == ENOMEM) { adapter->no_tx_dma_setup++; return (error); } else if (error != 0) { adapter->no_tx_dma_setup++; m_freem(*m_headp); *m_headp = NULL; return (error); } /* * TSO Hardware workaround, if this packet is not * TSO, and is only a single descriptor long, and * it follows a TSO burst, then we need to add a * sentinel descriptor to prevent premature writeback. */ if ((do_tso == 0) && (txr->tx_tso == TRUE)) { if (nsegs == 1) tso_desc = TRUE; txr->tx_tso = FALSE; } if (nsegs > (txr->tx_avail - 2)) { txr->no_desc_avail++; bus_dmamap_unload(txr->txtag, map); return (ENOBUFS); } m_head = *m_headp; /* Do hardware assists */ if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { em_tso_setup(txr, m_head, ip_off, ip, tp, &txd_upper, &txd_lower); /* we need to make a final sentinel transmit desc */ tso_desc = TRUE; } else if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) em_transmit_checksum_setup(txr, m_head, ip_off, ip, &txd_upper, &txd_lower); if (m_head->m_flags & M_VLANTAG) { /* Set the vlan id. */ txd_upper |= (htole16(m_head->m_pkthdr.ether_vtag) << 16); /* Tell hardware to add tag */ txd_lower |= htole32(E1000_TXD_CMD_VLE); } i = txr->next_avail_desc; /* Set up our transmit descriptors */ for (j = 0; j < nsegs; j++) { bus_size_t seg_len; bus_addr_t seg_addr; tx_buffer = &txr->tx_buffers[i]; ctxd = &txr->tx_base[i]; seg_addr = segs[j].ds_addr; seg_len = segs[j].ds_len; /* ** TSO Workaround: ** If this is the last descriptor, we want to ** split it so we have a small final sentinel */ if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) { seg_len -= 4; ctxd->buffer_addr = htole64(seg_addr); ctxd->lower.data = htole32( adapter->txd_cmd | txd_lower | seg_len); ctxd->upper.data = htole32(txd_upper); if (++i == adapter->num_tx_desc) i = 0; /* Now make the sentinel */ ++txd_used; /* using an extra txd */ ctxd = &txr->tx_base[i]; tx_buffer = &txr->tx_buffers[i]; ctxd->buffer_addr = htole64(seg_addr + seg_len); ctxd->lower.data = htole32( adapter->txd_cmd | txd_lower | 4); ctxd->upper.data = htole32(txd_upper); last = i; if (++i == adapter->num_tx_desc) i = 0; } else { ctxd->buffer_addr = htole64(seg_addr); ctxd->lower.data = htole32( adapter->txd_cmd | txd_lower | seg_len); ctxd->upper.data = htole32(txd_upper); last = i; if (++i == adapter->num_tx_desc) i = 0; } tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; } txr->next_avail_desc = i; txr->tx_avail -= nsegs; if (tso_desc) /* TSO used an extra for sentinel */ txr->tx_avail -= txd_used; tx_buffer->m_head = m_head; /* ** Here we swap the map so the last descriptor, ** which gets the completion interrupt has the ** real map, and the first descriptor gets the ** unused map from this descriptor. */ tx_buffer_mapped->map = tx_buffer->map; tx_buffer->map = map; bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); /* * Last Descriptor of Packet * needs End Of Packet (EOP) * and Report Status (RS) */ ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); /* * Keep track in the first buffer which * descriptor will be written back */ tx_buffer = &txr->tx_buffers[first]; tx_buffer->next_eop = last; /* Update the watchdog time early and often */ txr->watchdog_time = ticks; /* * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 * that this frame is available to transmit. */ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i); return (0); } static void em_set_promisc(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; u32 reg_rctl; reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); if (ifp->if_flags & IFF_PROMISC) { reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); /* Turn this on if you want to see bad packets */ if (em_debug_sbp) reg_rctl |= E1000_RCTL_SBP; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else if (ifp->if_flags & IFF_ALLMULTI) { reg_rctl |= E1000_RCTL_MPE; reg_rctl &= ~E1000_RCTL_UPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } } static void em_disable_promisc(struct adapter *adapter) { u32 reg_rctl; reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl &= (~E1000_RCTL_UPE); reg_rctl &= (~E1000_RCTL_MPE); reg_rctl &= (~E1000_RCTL_SBP); E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void em_set_multi(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; struct ifmultiaddr *ifma; u32 reg_rctl = 0; u8 *mta; /* Multicast array memory */ int mcnt = 0; IOCTL_DEBUGOUT("em_set_multi: begin"); mta = adapter->mta; bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); if (adapter->hw.mac.type == e1000_82542 && adapter->hw.revision_id == E1000_REVISION_2) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_clear_mwi(&adapter->hw); reg_rctl |= E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); msec_delay(5); } #if __FreeBSD_version < 800000 IF_ADDR_LOCK(ifp); #else if_maddr_rlock(ifp); #endif TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); mcnt++; } #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_maddr_runlock(ifp); #endif if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); if (adapter->hw.mac.type == e1000_82542 && adapter->hw.revision_id == E1000_REVISION_2) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl &= ~E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); msec_delay(5); if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_set_mwi(&adapter->hw); } } /********************************************************************* * Timer routine * * This routine checks for link status and updates statistics. * **********************************************************************/ static void em_local_timer(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; u32 trigger; EM_CORE_LOCK_ASSERT(adapter); em_update_link_status(adapter); em_update_stats_counters(adapter); /* Reset LAA into RAR[0] on 82571 */ if ((adapter->hw.mac.type == e1000_82571) && e1000_get_laa_state_82571(&adapter->hw)) e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); /* Mask to use in the irq trigger */ if (adapter->msix_mem) trigger = rxr->ims; /* RX for 82574 */ else trigger = E1000_ICS_RXDMT0; /* ** Check on the state of the TX queue(s), this ** can be done without the lock because its RO ** and the HUNG state will be static if set. */ for (int i = 0; i < adapter->num_queues; i++, txr++) { if ((txr->queue_status == EM_QUEUE_HUNG) && (adapter->pause_frames == 0)) goto hung; /* Schedule a TX tasklet if needed */ if (txr->tx_avail <= EM_MAX_SCATTER) taskqueue_enqueue(txr->tq, &txr->tx_task); } adapter->pause_frames = 0; callout_reset(&adapter->timer, hz, em_local_timer, adapter); #ifndef DEVICE_POLLING /* Trigger an RX interrupt to guarantee mbuf refresh */ E1000_WRITE_REG(&adapter->hw, E1000_ICS, trigger); #endif return; hung: /* Looks like we're hung */ device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); device_printf(adapter->dev, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)), E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me))); device_printf(adapter->dev,"TX(%d) desc avail = %d," "Next TX to Clean = %d\n", txr->me, txr->tx_avail, txr->next_to_clean); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->watchdog_events++; adapter->pause_frames = 0; em_init_locked(adapter); } static void em_update_link_status(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; u32 link_check = 0; /* Get the cached link value or read phy for real */ switch (hw->phy.media_type) { case e1000_media_type_copper: if (hw->mac.get_link_status) { /* Do the work to read phy */ e1000_check_for_link(hw); link_check = !hw->mac.get_link_status; if (link_check) /* ESB2 fix */ e1000_cfg_on_link_up(hw); } else link_check = TRUE; break; case e1000_media_type_fiber: e1000_check_for_link(hw); link_check = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); break; case e1000_media_type_internal_serdes: e1000_check_for_link(hw); link_check = adapter->hw.mac.serdes_has_link; break; default: case e1000_media_type_unknown: break; } /* Now check for a transition */ if (link_check && (adapter->link_active == 0)) { e1000_get_speed_and_duplex(hw, &adapter->link_speed, &adapter->link_duplex); /* Check if we must disable SPEED_MODE bit on PCI-E */ if ((adapter->link_speed != SPEED_1000) && ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572))) { int tarc0; tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); tarc0 &= ~SPEED_MODE_BIT; E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); } if (bootverbose) device_printf(dev, "Link is up %d Mbps %s\n", adapter->link_speed, ((adapter->link_duplex == FULL_DUPLEX) ? "Full Duplex" : "Half Duplex")); adapter->link_active = 1; adapter->smartspeed = 0; ifp->if_baudrate = adapter->link_speed * 1000000; if_link_state_change(ifp, LINK_STATE_UP); } else if (!link_check && (adapter->link_active == 1)) { ifp->if_baudrate = adapter->link_speed = 0; adapter->link_duplex = 0; if (bootverbose) device_printf(dev, "Link is Down\n"); adapter->link_active = 0; /* Link down, disable watchdog */ for (int i = 0; i < adapter->num_queues; i++, txr++) txr->queue_status = EM_QUEUE_IDLE; if_link_state_change(ifp, LINK_STATE_DOWN); } } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * * This routine should always be called with BOTH the CORE * and TX locks. **********************************************************************/ static void em_stop(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; EM_CORE_LOCK_ASSERT(adapter); INIT_DEBUGOUT("em_stop: begin"); em_disable_intr(adapter); callout_stop(&adapter->timer); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ifp->if_drv_flags |= IFF_DRV_OACTIVE; /* Unarm watchdog timer. */ for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); txr->queue_status = EM_QUEUE_IDLE; EM_TX_UNLOCK(txr); } e1000_reset_hw(&adapter->hw); E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void em_identify_hardware(struct adapter *adapter) { device_t dev = adapter->dev; /* Make sure our PCI config space has the necessary stuff set */ adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) && (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) { device_printf(dev, "Memory Access and/or Bus Master bits " "were not set!\n"); adapter->hw.bus.pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); pci_write_config(dev, PCIR_COMMAND, adapter->hw.bus.pci_cmd_word, 2); } /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Do Shared Code Init and Setup */ if (e1000_set_mac_type(&adapter->hw)) { device_printf(dev, "Setup init failure\n"); return; } } static int em_allocate_pci_resources(struct adapter *adapter) { device_t dev = adapter->dev; int rid; rid = PCIR_BAR(0); adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->memory == NULL) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->memory); adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; /* Default to a single queue */ adapter->num_queues = 1; /* * Setup MSI/X or MSI if PCI Express */ adapter->msix = em_setup_msix(adapter); adapter->hw.back = &adapter->osdep; return (0); } /********************************************************************* * * Setup the Legacy or MSI Interrupt handler * **********************************************************************/ int em_allocate_legacy(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; int error, rid = 0; /* Manually turn off all interrupts */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); if (adapter->msix == 1) /* using MSI */ rid = 1; /* We allocate a single interrupt resource */ adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (adapter->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "interrupt\n"); return (ENXIO); } /* * Allocate a fast interrupt and the associated * deferred processing contexts. */ TASK_INIT(&adapter->que_task, 0, em_handle_que, adapter); adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT, taskqueue_thread_enqueue, &adapter->tq); taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s que", device_get_nameunit(adapter->dev)); /* Use a TX only tasklet for local timer */ TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr); txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT, taskqueue_thread_enqueue, &txr->tq); taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq", device_get_nameunit(adapter->dev)); TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter); if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET, em_irq_fast, NULL, adapter, &adapter->tag)) != 0) { device_printf(dev, "Failed to register fast interrupt " "handler: %d\n", error); taskqueue_free(adapter->tq); adapter->tq = NULL; return (error); } return (0); } /********************************************************************* * * Setup the MSIX Interrupt handlers * This is not really Multiqueue, rather * its just seperate interrupt vectors * for TX, RX, and Link. * **********************************************************************/ int em_allocate_msix(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; int error, rid, vector = 0; /* Make sure all interrupts are disabled */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); /* First set up ring resources */ for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) { /* RX ring */ rid = vector + 1; rxr->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (rxr->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "RX MSIX Interrupt %d\n", i); return (ENXIO); } if ((error = bus_setup_intr(dev, rxr->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, rxr, &rxr->tag)) != 0) { device_printf(dev, "Failed to register RX handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, rxr->res, rxr->tag, "rx %d", i); #endif rxr->msix = vector++; /* NOTE increment vector for TX */ TASK_INIT(&rxr->rx_task, 0, em_handle_rx, rxr); rxr->tq = taskqueue_create_fast("em_rxq", M_NOWAIT, taskqueue_thread_enqueue, &rxr->tq); taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq", device_get_nameunit(adapter->dev)); /* ** Set the bit to enable interrupt ** in E1000_IMS -- bits 20 and 21 ** are for RX0 and RX1, note this has ** NOTHING to do with the MSIX vector */ rxr->ims = 1 << (20 + i); adapter->ivars |= (8 | rxr->msix) << (i * 4); /* TX ring */ rid = vector + 1; txr->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (txr->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "TX MSIX Interrupt %d\n", i); return (ENXIO); } if ((error = bus_setup_intr(dev, txr->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, txr, &txr->tag)) != 0) { device_printf(dev, "Failed to register TX handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, txr->res, txr->tag, "tx %d", i); #endif txr->msix = vector++; /* Increment vector for next pass */ TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr); txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT, taskqueue_thread_enqueue, &txr->tq); taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq", device_get_nameunit(adapter->dev)); /* ** Set the bit to enable interrupt ** in E1000_IMS -- bits 22 and 23 ** are for TX0 and TX1, note this has ** NOTHING to do with the MSIX vector */ txr->ims = 1 << (22 + i); adapter->ivars |= (8 | txr->msix) << (8 + (i * 4)); } /* Link interrupt */ ++rid; adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!adapter->res) { device_printf(dev,"Unable to allocate " "bus resource: Link interrupt [%d]\n", rid); return (ENXIO); } /* Set the link handler function */ error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter, &adapter->tag); if (error) { adapter->res = NULL; device_printf(dev, "Failed to register LINK handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, adapter->res, adapter->tag, "link"); #endif adapter->linkvec = vector; adapter->ivars |= (8 | vector) << 16; adapter->ivars |= 0x80000000; return (0); } static void em_free_pci_resources(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr; struct rx_ring *rxr; int rid; /* ** Release all the queue interrupt resources: */ for (int i = 0; i < adapter->num_queues; i++) { txr = &adapter->tx_rings[i]; rxr = &adapter->rx_rings[i]; /* an early abort? */ if ((txr == NULL) || (rxr == NULL)) break; rid = txr->msix +1; if (txr->tag != NULL) { bus_teardown_intr(dev, txr->res, txr->tag); txr->tag = NULL; } if (txr->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, txr->res); rid = rxr->msix +1; if (rxr->tag != NULL) { bus_teardown_intr(dev, rxr->res, rxr->tag); rxr->tag = NULL; } if (rxr->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, rxr->res); } if (adapter->linkvec) /* we are doing MSIX */ rid = adapter->linkvec + 1; else (adapter->msix != 0) ? (rid = 1):(rid = 0); if (adapter->tag != NULL) { bus_teardown_intr(dev, adapter->res, adapter->tag); adapter->tag = NULL; } if (adapter->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); if (adapter->msix) pci_release_msi(dev); if (adapter->msix_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem); if (adapter->memory != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), adapter->memory); if (adapter->flash != NULL) bus_release_resource(dev, SYS_RES_MEMORY, EM_FLASH, adapter->flash); } /* * Setup MSI or MSI/X */ static int em_setup_msix(struct adapter *adapter) { device_t dev = adapter->dev; int val = 0; /* ** Setup MSI/X for Hartwell: tests have shown ** use of two queues to be unstable, and to ** provide no great gain anyway, so we simply ** seperate the interrupts and use a single queue. */ if ((adapter->hw.mac.type == e1000_82574) && (em_enable_msix == TRUE)) { /* Map the MSIX BAR */ int rid = PCIR_BAR(EM_MSIX_BAR); adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!adapter->msix_mem) { /* May not be enabled */ device_printf(adapter->dev, "Unable to map MSIX table \n"); goto msi; } val = pci_msix_count(dev); /* We only need 3 vectors */ if (val > 3) val = 3; if ((val != 3) && (val != 5)) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem); adapter->msix_mem = NULL; device_printf(adapter->dev, "MSIX: incorrect vectors, using MSI\n"); goto msi; } if (pci_alloc_msix(dev, &val) == 0) { device_printf(adapter->dev, "Using MSIX interrupts " "with %d vectors\n", val); } return (val); } msi: val = pci_msi_count(dev); if (val == 1 && pci_alloc_msi(dev, &val) == 0) { adapter->msix = 1; device_printf(adapter->dev,"Using an MSI interrupt\n"); return (val); } /* Should only happen due to manual configuration */ device_printf(adapter->dev,"No MSI/MSIX using a Legacy IRQ\n"); return (0); } /********************************************************************* * * Initialize the hardware to a configuration * as specified by the adapter structure. * **********************************************************************/ static void em_reset(struct adapter *adapter) { device_t dev = adapter->dev; struct ifnet *ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u16 rx_buffer_size; u32 pba; INIT_DEBUGOUT("em_reset: begin"); /* Set up smart power down as default off on newer adapters. */ if (!em_smart_pwr_down && (hw->mac.type == e1000_82571 || hw->mac.type == e1000_82572)) { u16 phy_tmp = 0; /* Speed up time to link by disabling smart power down. */ e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp); phy_tmp &= ~IGP02E1000_PM_SPD; e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp); } /* * Packet Buffer Allocation (PBA) * Writing PBA sets the receive portion of the buffer * the remainder is used for the transmit buffer. */ switch (hw->mac.type) { /* Total Packet Buffer on these is 48K */ case e1000_82571: case e1000_82572: case e1000_80003es2lan: pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ break; case e1000_82573: /* 82573: Total Packet Buffer is 32K */ pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ break; case e1000_82574: case e1000_82583: pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ break; case e1000_ich8lan: pba = E1000_PBA_8K; break; case e1000_ich9lan: case e1000_ich10lan: /* Boost Receive side for jumbo frames */ if (adapter->max_frame_size > 4096) pba = E1000_PBA_14K; else pba = E1000_PBA_10K; break; case e1000_pchlan: case e1000_pch2lan: pba = E1000_PBA_26K; break; default: if (adapter->max_frame_size > 8192) pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ else pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ } E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); /* * These parameters control the automatic generation (Tx) and * response (Rx) to Ethernet PAUSE frames. * - High water mark should allow for at least two frames to be * received after sending an XOFF. * - Low water mark works best when it is very near the high water mark. * This allows the receiver to restart by sending XON when it has * drained a bit. Here we use an arbitary value of 1500 which will * restart after one full frame is pulled from the buffer. There * could be several smaller frames in the buffer and if so they will * not trigger the XON until their total number reduces the buffer * by 1500. * - The pause time is fairly large at 1000 x 512ns = 512 usec. */ rx_buffer_size = ((E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10 ); hw->fc.high_water = rx_buffer_size - roundup2(adapter->max_frame_size, 1024); hw->fc.low_water = hw->fc.high_water - 1500; if (adapter->fc) /* locally set flow control value? */ hw->fc.requested_mode = adapter->fc; else hw->fc.requested_mode = e1000_fc_full; if (hw->mac.type == e1000_80003es2lan) hw->fc.pause_time = 0xFFFF; else hw->fc.pause_time = EM_FC_PAUSE_TIME; hw->fc.send_xon = TRUE; /* Device specific overrides/settings */ switch (hw->mac.type) { case e1000_pchlan: /* Workaround: no TX flow ctrl for PCH */ hw->fc.requested_mode = e1000_fc_rx_pause; hw->fc.pause_time = 0xFFFF; /* override */ if (ifp->if_mtu > ETHERMTU) { hw->fc.high_water = 0x3500; hw->fc.low_water = 0x1500; } else { hw->fc.high_water = 0x5000; hw->fc.low_water = 0x3000; } hw->fc.refresh_time = 0x1000; break; case e1000_pch2lan: hw->fc.high_water = 0x5C20; hw->fc.low_water = 0x5048; hw->fc.pause_time = 0x0650; hw->fc.refresh_time = 0x0400; /* Jumbos need adjusted PBA */ if (ifp->if_mtu > ETHERMTU) E1000_WRITE_REG(hw, E1000_PBA, 12); else E1000_WRITE_REG(hw, E1000_PBA, 26); break; case e1000_ich9lan: case e1000_ich10lan: if (ifp->if_mtu > ETHERMTU) { hw->fc.high_water = 0x2800; hw->fc.low_water = hw->fc.high_water - 8; break; } /* else fall thru */ default: if (hw->mac.type == e1000_80003es2lan) hw->fc.pause_time = 0xFFFF; break; } /* Issue a global reset */ e1000_reset_hw(hw); E1000_WRITE_REG(hw, E1000_WUC, 0); em_disable_aspm(adapter); /* and a re-init */ if (e1000_init_hw(hw) < 0) { device_printf(dev, "Hardware Initialization Failed\n"); return; } E1000_WRITE_REG(hw, E1000_VET, ETHERTYPE_VLAN); e1000_get_phy_info(hw); e1000_check_for_link(hw); return; } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static int em_setup_interface(device_t dev, struct adapter *adapter) { struct ifnet *ifp; INIT_DEBUGOUT("em_setup_interface: begin"); ifp = adapter->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (-1); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_init = em_init; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = em_ioctl; #ifdef EM_MULTIQUEUE /* Multiqueue stack interface */ ifp->if_transmit = em_mq_start; ifp->if_qflush = em_qflush; #else ifp->if_start = em_start; IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; IFQ_SET_READY(&ifp->if_snd); #endif ether_ifattach(ifp, adapter->hw.mac.addr); ifp->if_capabilities = ifp->if_capenable = 0; ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; ifp->if_capabilities |= IFCAP_TSO4; /* * Tell the upper layer(s) we * support full VLAN capability */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the em driver you can ** enable this and get full hardware tag filtering. */ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* Enable only WOL MAGIC by default */ if (adapter->wol) { ifp->if_capabilities |= IFCAP_WOL; ifp->if_capenable |= IFCAP_WOL_MAGIC; } /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, em_media_change, em_media_status); if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { u_char fiber_type = IFM_1000_SX; /* default type */ ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL); } else { ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); if (adapter->hw.phy.type != e1000_phy_ife) { ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); } } ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); return (0); } /* * Manage DMA'able memory. */ static void em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs[0].ds_addr; } static int em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma, int mapflags) { int error; error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ EM_DBA_ALIGN, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->dma_tag); if (error) { device_printf(adapter->dev, "%s: bus_dma_tag_create failed: %d\n", __func__, error); goto fail_0; } error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map); if (error) { device_printf(adapter->dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, error); goto fail_2; } dma->dma_paddr = 0; error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (error || dma->dma_paddr == 0) { device_printf(adapter->dev, "%s: bus_dmamap_load failed: %d\n", __func__, error); goto fail_3; } return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_map = NULL; dma->dma_tag = NULL; return (error); } static void em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) { if (dma->dma_tag == NULL) return; if (dma->dma_map != NULL) { bus_dmamap_sync(dma->dma_tag, dma->dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); dma->dma_map = NULL; } bus_dma_tag_destroy(dma->dma_tag); dma->dma_tag = NULL; } /********************************************************************* * * Allocate memory for the transmit and receive rings, and then * the descriptors associated with each, called only once at attach. * **********************************************************************/ static int em_allocate_queues(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = NULL; struct rx_ring *rxr = NULL; int rsize, tsize, error = E1000_SUCCESS; int txconf = 0, rxconf = 0; /* Allocate the TX ring struct memory */ if (!(adapter->tx_rings = (struct tx_ring *) malloc(sizeof(struct tx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); error = ENOMEM; goto fail; } /* Now allocate the RX */ if (!(adapter->rx_rings = (struct rx_ring *) malloc(sizeof(struct rx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX ring memory\n"); error = ENOMEM; goto rx_fail; } tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc), EM_DBA_ALIGN); /* * Now set up the TX queues, txconf is needed to handle the * possibility that things fail midcourse and we need to * undo memory gracefully */ for (int i = 0; i < adapter->num_queues; i++, txconf++) { /* Set up some basics */ txr = &adapter->tx_rings[i]; txr->adapter = adapter; txr->me = i; /* Initialize the TX lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(dev), txr->me); mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); if (em_dma_malloc(adapter, tsize, &txr->txdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto err_tx_desc; } txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr; bzero((void *)txr->tx_base, tsize); if (em_allocate_transmit_buffers(txr)) { device_printf(dev, "Critical Failure setting up transmit buffers\n"); error = ENOMEM; goto err_tx_desc; } #if __FreeBSD_version >= 800000 /* Allocate a buf ring */ txr->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &txr->tx_mtx); #endif } /* * Next the RX queues... */ rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc), EM_DBA_ALIGN); for (int i = 0; i < adapter->num_queues; i++, rxconf++) { rxr = &adapter->rx_rings[i]; rxr->adapter = adapter; rxr->me = i; /* Initialize the RX lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(dev), txr->me); mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); if (em_dma_malloc(adapter, rsize, &rxr->rxdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate RxDescriptor memory\n"); error = ENOMEM; goto err_rx_desc; } rxr->rx_base = (struct e1000_rx_desc *)rxr->rxdma.dma_vaddr; bzero((void *)rxr->rx_base, rsize); /* Allocate receive buffers for the ring*/ if (em_allocate_receive_buffers(rxr)) { device_printf(dev, "Critical Failure setting up receive buffers\n"); error = ENOMEM; goto err_rx_desc; } } return (0); err_rx_desc: for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) em_dma_free(adapter, &rxr->rxdma); err_tx_desc: for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) em_dma_free(adapter, &txr->txdma); free(adapter->rx_rings, M_DEVBUF); rx_fail: #if __FreeBSD_version >= 800000 buf_ring_free(txr->br, M_DEVBUF); #endif free(adapter->tx_rings, M_DEVBUF); fail: return (error); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. This is * called only once at attach, setup is done every reset. * **********************************************************************/ static int em_allocate_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; device_t dev = adapter->dev; struct em_buffer *txbuf; int error, i; /* * Setup DMA descriptor areas. */ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ EM_TSO_SIZE, /* maxsize */ EM_MAX_SCATTER, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->txtag))) { device_printf(dev,"Unable to allocate TX DMA tag\n"); goto fail; } if (!(txr->tx_buffers = (struct em_buffer *) malloc(sizeof(struct em_buffer) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); error = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); if (error != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } } return 0; fail: /* We free all, it handles case where we are in the middle */ em_free_transmit_structures(adapter); return (error); } /********************************************************************* * * Initialize a transmit ring. * **********************************************************************/ static void em_setup_transmit_ring(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct em_buffer *txbuf; int i; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(adapter->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ /* Clear the old descriptor contents */ EM_TX_LOCK(txr); #ifdef DEV_NETMAP slot = netmap_reset(na, NR_TX, txr->me, 0); #endif /* DEV_NETMAP */ bzero((void *)txr->tx_base, (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc); /* Reset indices */ txr->next_avail_desc = 0; txr->next_to_clean = 0; /* Free any existing tx buffers. */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { if (txbuf->m_head != NULL) { bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, txbuf->map); m_freem(txbuf->m_head); txbuf->m_head = NULL; } #ifdef DEV_NETMAP if (slot) { int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); uint64_t paddr; void *addr; addr = PNMB(slot + si, &paddr); txr->tx_base[i].buffer_addr = htole64(paddr); /* reload the map for netmap mode */ netmap_load_map(txr->txtag, txbuf->map, addr); } #endif /* DEV_NETMAP */ /* clear the watch index */ txbuf->next_eop = -1; } /* Set number of descriptors available */ txr->tx_avail = adapter->num_tx_desc; txr->queue_status = EM_QUEUE_IDLE; /* Clear checksum offload context. */ txr->last_hw_offload = 0; txr->last_hw_ipcss = 0; txr->last_hw_ipcso = 0; txr->last_hw_tucss = 0; txr->last_hw_tucso = 0; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); EM_TX_UNLOCK(txr); } /********************************************************************* * * Initialize all transmit rings. * **********************************************************************/ static void em_setup_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) em_setup_transmit_ring(txr); return; } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void em_initialize_transmit_unit(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; struct e1000_hw *hw = &adapter->hw; u32 tctl, tarc, tipg = 0; INIT_DEBUGOUT("em_initialize_transmit_unit: begin"); for (int i = 0; i < adapter->num_queues; i++, txr++) { u64 bus_addr = txr->txdma.dma_paddr; /* Base and Len of TX Ring */ E1000_WRITE_REG(hw, E1000_TDLEN(i), adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); E1000_WRITE_REG(hw, E1000_TDBAH(i), (u32)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_TDBAL(i), (u32)bus_addr); /* Init the HEAD/TAIL indices */ E1000_WRITE_REG(hw, E1000_TDT(i), 0); E1000_WRITE_REG(hw, E1000_TDH(i), 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)), E1000_READ_REG(&adapter->hw, E1000_TDLEN(i))); txr->queue_status = EM_QUEUE_IDLE; } /* Set the default values for the Tx Inter Packet Gap timer */ switch (adapter->hw.mac.type) { case e1000_80003es2lan: tipg = DEFAULT_82543_TIPG_IPGR1; tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; break; default: if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) tipg = DEFAULT_82543_TIPG_IPGT_FIBER; else tipg = DEFAULT_82543_TIPG_IPGT_COPPER; tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; } E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg); E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value); if(adapter->hw.mac.type >= e1000_82540) E1000_WRITE_REG(&adapter->hw, E1000_TADV, adapter->tx_abs_int_delay.value); if ((adapter->hw.mac.type == e1000_82571) || (adapter->hw.mac.type == e1000_82572)) { tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= SPEED_MODE_BIT; E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); } else if (adapter->hw.mac.type == e1000_80003es2lan) { tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= 1; E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); tarc |= 1; E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); } adapter->txd_cmd = E1000_TXD_CMD_IFCS; if (adapter->tx_int_delay.value > 0) adapter->txd_cmd |= E1000_TXD_CMD_IDE; /* Program the Transmit Control Register */ tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); tctl &= ~E1000_TCTL_CT; tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); if (adapter->hw.mac.type >= e1000_82571) tctl |= E1000_TCTL_MULR; /* This write will effectively turn on the transmit unit. */ E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); } /********************************************************************* * * Free all transmit rings. * **********************************************************************/ static void em_free_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); em_free_transmit_buffers(txr); em_dma_free(adapter, &txr->txdma); EM_TX_UNLOCK(txr); EM_TX_LOCK_DESTROY(txr); } free(adapter->tx_rings, M_DEVBUF); } /********************************************************************* * * Free transmit ring related data structures. * **********************************************************************/ static void em_free_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct em_buffer *txbuf; INIT_DEBUGOUT("free_transmit_ring: begin"); if (txr->tx_buffers == NULL) return; for (int i = 0; i < adapter->num_tx_desc; i++) { txbuf = &txr->tx_buffers[i]; if (txbuf->m_head != NULL) { bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, txbuf->map); m_freem(txbuf->m_head); txbuf->m_head = NULL; if (txbuf->map != NULL) { bus_dmamap_destroy(txr->txtag, txbuf->map); txbuf->map = NULL; } } else if (txbuf->map != NULL) { bus_dmamap_unload(txr->txtag, txbuf->map); bus_dmamap_destroy(txr->txtag, txbuf->map); txbuf->map = NULL; } } #if __FreeBSD_version >= 800000 if (txr->br != NULL) buf_ring_free(txr->br, M_DEVBUF); #endif if (txr->tx_buffers != NULL) { free(txr->tx_buffers, M_DEVBUF); txr->tx_buffers = NULL; } if (txr->txtag != NULL) { bus_dma_tag_destroy(txr->txtag); txr->txtag = NULL; } return; } /********************************************************************* * The offload context is protocol specific (TCP/UDP) and thus * only needs to be set when the protocol changes. The occasion * of a context change can be a performance detriment, and * might be better just disabled. The reason arises in the way * in which the controller supports pipelined requests from the * Tx data DMA. Up to four requests can be pipelined, and they may * belong to the same packet or to multiple packets. However all * requests for one packet are issued before a request is issued * for a subsequent packet and if a request for the next packet * requires a context change, that request will be stalled * until the previous request completes. This means setting up * a new context effectively disables pipelined Tx data DMA which * in turn greatly slow down performance to send small sized * frames. **********************************************************************/ static void em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off, struct ip *ip, u32 *txd_upper, u32 *txd_lower) { struct adapter *adapter = txr->adapter; struct e1000_context_desc *TXD = NULL; struct em_buffer *tx_buffer; int cur, hdr_len; u32 cmd = 0; u16 offload = 0; u8 ipcso, ipcss, tucso, tucss; ipcss = ipcso = tucss = tucso = 0; hdr_len = ip_off + (ip->ip_hl << 2); cur = txr->next_avail_desc; /* Setup of IP header checksum. */ if (mp->m_pkthdr.csum_flags & CSUM_IP) { *txd_upper |= E1000_TXD_POPTS_IXSM << 8; offload |= CSUM_IP; ipcss = ip_off; ipcso = ip_off + offsetof(struct ip, ip_sum); /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place to put the checksum. */ TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; TXD->lower_setup.ip_fields.ipcss = ipcss; TXD->lower_setup.ip_fields.ipcse = htole16(hdr_len); TXD->lower_setup.ip_fields.ipcso = ipcso; cmd |= E1000_TXD_CMD_IP; } if (mp->m_pkthdr.csum_flags & CSUM_TCP) { *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; *txd_upper |= E1000_TXD_POPTS_TXSM << 8; offload |= CSUM_TCP; tucss = hdr_len; tucso = hdr_len + offsetof(struct tcphdr, th_sum); /* * Setting up new checksum offload context for every frames * takes a lot of processing time for hardware. This also * reduces performance a lot for small sized frames so avoid * it if driver can use previously configured checksum * offload context. */ if (txr->last_hw_offload == offload) { if (offload & CSUM_IP) { if (txr->last_hw_ipcss == ipcss && txr->last_hw_ipcso == ipcso && txr->last_hw_tucss == tucss && txr->last_hw_tucso == tucso) return; } else { if (txr->last_hw_tucss == tucss && txr->last_hw_tucso == tucso) return; } } txr->last_hw_offload = offload; txr->last_hw_tucss = tucss; txr->last_hw_tucso = tucso; /* * Start offset for payload checksum calculation. * End offset for payload checksum calculation. * Offset of place to put the checksum. */ TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; TXD->upper_setup.tcp_fields.tucss = hdr_len; TXD->upper_setup.tcp_fields.tucse = htole16(0); TXD->upper_setup.tcp_fields.tucso = tucso; cmd |= E1000_TXD_CMD_TCP; } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; *txd_upper |= E1000_TXD_POPTS_TXSM << 8; tucss = hdr_len; tucso = hdr_len + offsetof(struct udphdr, uh_sum); /* * Setting up new checksum offload context for every frames * takes a lot of processing time for hardware. This also * reduces performance a lot for small sized frames so avoid * it if driver can use previously configured checksum * offload context. */ if (txr->last_hw_offload == offload) { if (offload & CSUM_IP) { if (txr->last_hw_ipcss == ipcss && txr->last_hw_ipcso == ipcso && txr->last_hw_tucss == tucss && txr->last_hw_tucso == tucso) return; } else { if (txr->last_hw_tucss == tucss && txr->last_hw_tucso == tucso) return; } } txr->last_hw_offload = offload; txr->last_hw_tucss = tucss; txr->last_hw_tucso = tucso; /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place to put the checksum. */ TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; TXD->upper_setup.tcp_fields.tucss = tucss; TXD->upper_setup.tcp_fields.tucse = htole16(0); TXD->upper_setup.tcp_fields.tucso = tucso; } if (offload & CSUM_IP) { txr->last_hw_ipcss = ipcss; txr->last_hw_ipcso = ipcso; } TXD->tcp_seg_setup.data = htole32(0); TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd); tx_buffer = &txr->tx_buffers[cur]; tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; if (++cur == adapter->num_tx_desc) cur = 0; txr->tx_avail--; txr->next_avail_desc = cur; } /********************************************************************** * * Setup work for hardware segmentation offload (TSO) * **********************************************************************/ static void em_tso_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off, struct ip *ip, struct tcphdr *tp, u32 *txd_upper, u32 *txd_lower) { struct adapter *adapter = txr->adapter; struct e1000_context_desc *TXD; struct em_buffer *tx_buffer; int cur, hdr_len; /* * In theory we can use the same TSO context if and only if * frame is the same type(IP/TCP) and the same MSS. However * checking whether a frame has the same IP/TCP structure is * hard thing so just ignore that and always restablish a * new TSO context. */ hdr_len = ip_off + (ip->ip_hl << 2) + (tp->th_off << 2); *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */ E1000_TXD_DTYP_D | /* Data descr type */ E1000_TXD_CMD_TSE); /* Do TSE on this packet */ /* IP and/or TCP header checksum calculation and insertion. */ *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; cur = txr->next_avail_desc; tx_buffer = &txr->tx_buffers[cur]; TXD = (struct e1000_context_desc *) &txr->tx_base[cur]; /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place put the checksum. */ TXD->lower_setup.ip_fields.ipcss = ip_off; TXD->lower_setup.ip_fields.ipcse = htole16(ip_off + (ip->ip_hl << 2) - 1); TXD->lower_setup.ip_fields.ipcso = ip_off + offsetof(struct ip, ip_sum); /* * Start offset for payload checksum calculation. * End offset for payload checksum calculation. * Offset of place to put the checksum. */ TXD->upper_setup.tcp_fields.tucss = ip_off + (ip->ip_hl << 2); TXD->upper_setup.tcp_fields.tucse = 0; TXD->upper_setup.tcp_fields.tucso = ip_off + (ip->ip_hl << 2) + offsetof(struct tcphdr, th_sum); /* * Payload size per packet w/o any headers. * Length of all headers up to payload. */ TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz); TXD->tcp_seg_setup.fields.hdr_len = hdr_len; TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | /* Extended descr */ E1000_TXD_CMD_TSE | /* TSE context */ E1000_TXD_CMD_IP | /* Do IP csum */ E1000_TXD_CMD_TCP | /* Do TCP checksum */ (mp->m_pkthdr.len - (hdr_len))); /* Total len */ tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; if (++cur == adapter->num_tx_desc) cur = 0; txr->tx_avail--; txr->next_avail_desc = cur; txr->tx_tso = TRUE; } /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * **********************************************************************/ static void em_txeof(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; int first, last, done, processed; struct em_buffer *tx_buffer; struct e1000_tx_desc *tx_desc, *eop_desc; struct ifnet *ifp = adapter->ifp; EM_TX_LOCK_ASSERT(txr); #ifdef DEV_NETMAP if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(ifp); selwakeuppri(&na->tx_rings[txr->me].si, PI_NET); EM_TX_UNLOCK(txr); EM_CORE_LOCK(adapter); selwakeuppri(&na->tx_si, PI_NET); EM_CORE_UNLOCK(adapter); EM_TX_LOCK(txr); return; } #endif /* DEV_NETMAP */ /* No work, make sure watchdog is off */ if (txr->tx_avail == adapter->num_tx_desc) { txr->queue_status = EM_QUEUE_IDLE; return; } processed = 0; first = txr->next_to_clean; tx_desc = &txr->tx_base[first]; tx_buffer = &txr->tx_buffers[first]; last = tx_buffer->next_eop; eop_desc = &txr->tx_base[last]; /* * What this does is get the index of the * first descriptor AFTER the EOP of the * first packet, that way we can do the * simple comparison on the inner while loop. */ if (++last == adapter->num_tx_desc) last = 0; done = last; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_POSTREAD); while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) { /* We clean the range of the packet */ while (first != done) { tx_desc->upper.data = 0; tx_desc->lower.data = 0; tx_desc->buffer_addr = 0; ++txr->tx_avail; ++processed; if (tx_buffer->m_head) { bus_dmamap_sync(txr->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; } tx_buffer->next_eop = -1; txr->watchdog_time = ticks; if (++first == adapter->num_tx_desc) first = 0; tx_buffer = &txr->tx_buffers[first]; tx_desc = &txr->tx_base[first]; } ++ifp->if_opackets; /* See if we can continue to the next packet */ last = tx_buffer->next_eop; if (last != -1) { eop_desc = &txr->tx_base[last]; /* Get new done point */ if (++last == adapter->num_tx_desc) last = 0; done = last; } else break; } bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); txr->next_to_clean = first; /* ** Watchdog calculation, we know there's ** work outstanding or the first return ** would have been taken, so none processed ** for too long indicates a hang. local timer ** will examine this and do a reset if needed. */ if ((!processed) && ((ticks - txr->watchdog_time) > EM_WATCHDOG)) txr->queue_status = EM_QUEUE_HUNG; /* * If we have a minimum free, clear IFF_DRV_OACTIVE * to tell the stack that it is OK to send packets. * Notice that all writes of OACTIVE happen under the * TX lock which, with a single queue, guarantees * sanity. */ if (txr->tx_avail >= EM_MAX_SCATTER) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* Disable watchdog if all clean */ if (txr->tx_avail == adapter->num_tx_desc) { txr->queue_status = EM_QUEUE_IDLE; } } /********************************************************************* * * Refresh RX descriptor mbufs from system mbuf buffer pool. * **********************************************************************/ static void em_refresh_mbufs(struct rx_ring *rxr, int limit) { struct adapter *adapter = rxr->adapter; struct mbuf *m; bus_dma_segment_t segs[1]; struct em_buffer *rxbuf; int i, j, error, nsegs; bool cleaned = FALSE; i = j = rxr->next_to_refresh; /* ** Get one descriptor beyond ** our work mark to control ** the loop. */ if (++j == adapter->num_rx_desc) j = 0; while (j != limit) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head == NULL) { m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); /* ** If we have a temporary resource shortage ** that causes a failure, just abort refresh ** for now, we will return to this point when ** reinvoked from em_rxeof. */ if (m == NULL) goto update; } else m = rxbuf->m_head; m->m_len = m->m_pkthdr.len = adapter->rx_mbuf_sz; m->m_flags |= M_PKTHDR; m->m_data = m->m_ext.ext_buf; /* Use bus_dma machinery to setup the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->rxtag, rxbuf->map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: hdr dmamap load" " failure - %d\n", error); m_free(m); rxbuf->m_head = NULL; goto update; } rxbuf->m_head = m; bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_PREREAD); rxr->rx_base[i].buffer_addr = htole64(segs[0].ds_addr); cleaned = TRUE; i = j; /* Next is precalulated for us */ rxr->next_to_refresh = i; /* Calculate next controlling index */ if (++j == adapter->num_rx_desc) j = 0; } update: /* ** Update the tail pointer only if, ** and as far as we have refreshed. */ if (cleaned) E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), rxr->next_to_refresh); return; } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int em_allocate_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; device_t dev = adapter->dev; struct em_buffer *rxbuf; int error; rxr->rx_buffers = malloc(sizeof(struct em_buffer) * adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); if (rxr->rx_buffers == NULL) { device_printf(dev, "Unable to allocate rx_buffer memory\n"); return (ENOMEM); } error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &rxr->rxtag); if (error) { device_printf(dev, "%s: bus_dma_tag_create failed %d\n", __func__, error); goto fail; } rxbuf = rxr->rx_buffers; for (int i = 0; i < adapter->num_rx_desc; i++, rxbuf++) { rxbuf = &rxr->rx_buffers[i]; error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT, &rxbuf->map); if (error) { device_printf(dev, "%s: bus_dmamap_create failed: %d\n", __func__, error); goto fail; } } return (0); fail: em_free_receive_structures(adapter); return (error); } /********************************************************************* * * Initialize a receive ring and its buffers. * **********************************************************************/ static int em_setup_receive_ring(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct em_buffer *rxbuf; bus_dma_segment_t seg[1]; - int rsize, nsegs, error; + int rsize, nsegs, error = 0; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(adapter->ifp); struct netmap_slot *slot; #endif /* Clear the ring contents */ EM_RX_LOCK(rxr); rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc), EM_DBA_ALIGN); bzero((void *)rxr->rx_base, rsize); #ifdef DEV_NETMAP slot = netmap_reset(na, NR_RX, 0, 0); #endif /* ** Free current RX buffer structs and their mbufs */ for (int i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->rxtag, rxbuf->map); m_freem(rxbuf->m_head); rxbuf->m_head = NULL; /* mark as freed */ } } /* Now replenish the mbufs */ for (int j = 0; j != adapter->num_rx_desc; ++j) { rxbuf = &rxr->rx_buffers[j]; #ifdef DEV_NETMAP if (slot) { int si = netmap_idx_n2k(&na->rx_rings[rxr->me], j); uint64_t paddr; void *addr; addr = PNMB(slot + si, &paddr); netmap_load_map(rxr->rxtag, rxbuf->map, addr); /* Update descriptor */ rxr->rx_base[j].buffer_addr = htole64(paddr); continue; } #endif /* DEV_NETMAP */ rxbuf->m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); if (rxbuf->m_head == NULL) { error = ENOBUFS; goto fail; } rxbuf->m_head->m_len = adapter->rx_mbuf_sz; rxbuf->m_head->m_flags &= ~M_HASFCS; /* we strip it */ rxbuf->m_head->m_pkthdr.len = adapter->rx_mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->rxtag, rxbuf->map, rxbuf->m_head, seg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(rxbuf->m_head); rxbuf->m_head = NULL; goto fail; } bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_PREREAD); /* Update descriptor */ rxr->rx_base[j].buffer_addr = htole64(seg[0].ds_addr); } rxr->next_to_check = 0; rxr->next_to_refresh = 0; bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); fail: EM_RX_UNLOCK(rxr); return (error); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ static int em_setup_receive_structures(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; int q; for (q = 0; q < adapter->num_queues; q++, rxr++) if (em_setup_receive_ring(rxr)) goto fail; return (0); fail: /* * Free RX buffers allocated so far, we will only handle * the rings that completed, the failing case will have * cleaned up for itself. 'q' failed, so its the terminus. */ for (int i = 0; i < q; ++i) { rxr = &adapter->rx_rings[i]; for (int n = 0; n < adapter->num_rx_desc; n++) { struct em_buffer *rxbuf; rxbuf = &rxr->rx_buffers[n]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->rxtag, rxbuf->map); m_freem(rxbuf->m_head); rxbuf->m_head = NULL; } } rxr->next_to_check = 0; rxr->next_to_refresh = 0; } return (ENOBUFS); } /********************************************************************* * * Free all receive rings. * **********************************************************************/ static void em_free_receive_structures(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; for (int i = 0; i < adapter->num_queues; i++, rxr++) { em_free_receive_buffers(rxr); /* Free the ring memory as well */ em_dma_free(adapter, &rxr->rxdma); EM_RX_LOCK_DESTROY(rxr); } free(adapter->rx_rings, M_DEVBUF); } /********************************************************************* * * Free receive ring data structures * **********************************************************************/ static void em_free_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct em_buffer *rxbuf = NULL; INIT_DEBUGOUT("free_receive_buffers: begin"); if (rxr->rx_buffers != NULL) { for (int i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->map != NULL) { bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->rxtag, rxbuf->map); bus_dmamap_destroy(rxr->rxtag, rxbuf->map); } if (rxbuf->m_head != NULL) { m_freem(rxbuf->m_head); rxbuf->m_head = NULL; } } free(rxr->rx_buffers, M_DEVBUF); rxr->rx_buffers = NULL; rxr->next_to_check = 0; rxr->next_to_refresh = 0; } if (rxr->rxtag != NULL) { bus_dma_tag_destroy(rxr->rxtag); rxr->rxtag = NULL; } return; } /********************************************************************* * * Enable receive unit. * **********************************************************************/ #define MAX_INTS_PER_SEC 8000 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256) static void em_initialize_receive_unit(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; struct ifnet *ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u64 bus_addr; u32 rctl, rxcsum; INIT_DEBUGOUT("em_initialize_receive_units: begin"); /* * Make sure receives are disabled while setting * up the descriptor ring */ rctl = E1000_READ_REG(hw, E1000_RCTL); /* Do not disable if ever enabled on this hardware */ if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583)) E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); E1000_WRITE_REG(&adapter->hw, E1000_RADV, adapter->rx_abs_int_delay.value); /* * Set the interrupt throttling rate. Value is calculated * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */ E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR); /* ** When using MSIX interrupts we need to throttle ** using the EITR register (82574 only) */ if (hw->mac.type == e1000_82574) { for (int i = 0; i < 4; i++) E1000_WRITE_REG(hw, E1000_EITR_82574(i), DEFAULT_ITR); /* Disable accelerated acknowledge */ E1000_WRITE_REG(hw, E1000_RFCTL, E1000_RFCTL_ACK_DIS); } if (ifp->if_capenable & IFCAP_RXCSUM) { rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); } /* ** XXX TEMPORARY WORKAROUND: on some systems with 82573 ** long latencies are observed, like Lenovo X60. This ** change eliminates the problem, but since having positive ** values in RDTR is a known source of problems on other ** platforms another solution is being sought. */ if (hw->mac.type == e1000_82573) E1000_WRITE_REG(hw, E1000_RDTR, 0x20); for (int i = 0; i < adapter->num_queues; i++, rxr++) { /* Setup the Base and Length of the Rx Descriptor Ring */ bus_addr = rxr->rxdma.dma_paddr; E1000_WRITE_REG(hw, E1000_RDLEN(i), adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr); /* Setup the Head and Tail Descriptor Pointers */ E1000_WRITE_REG(hw, E1000_RDH(i), 0); #ifdef DEV_NETMAP /* * an init() while a netmap client is active must * preserve the rx buffers passed to userspace. * In this driver it means we adjust RDT to * something different from na->num_rx_desc - 1. */ if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(adapter->ifp); struct netmap_kring *kring = &na->rx_rings[i]; int t = na->num_rx_desc - 1 - kring->nr_hwavail; E1000_WRITE_REG(hw, E1000_RDT(i), t); } else #endif /* DEV_NETMAP */ E1000_WRITE_REG(hw, E1000_RDT(i), adapter->num_rx_desc - 1); } /* Set PTHRESH for improved jumbo performance */ if (((adapter->hw.mac.type == e1000_ich9lan) || (adapter->hw.mac.type == e1000_pch2lan) || (adapter->hw.mac.type == e1000_ich10lan)) && (ifp->if_mtu > ETHERMTU)) { u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); } if (adapter->hw.mac.type == e1000_pch2lan) { if (ifp->if_mtu > ETHERMTU) e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); else e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); } /* Setup the Receive Control Register */ rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Strip the CRC */ rctl |= E1000_RCTL_SECRC; /* Make sure VLAN Filters are off */ rctl &= ~E1000_RCTL_VFE; rctl &= ~E1000_RCTL_SBP; if (adapter->rx_mbuf_sz == MCLBYTES) rctl |= E1000_RCTL_SZ_2048; else if (adapter->rx_mbuf_sz == MJUMPAGESIZE) rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; if (ifp->if_mtu > ETHERMTU) rctl |= E1000_RCTL_LPE; else rctl &= ~E1000_RCTL_LPE; /* Write out the settings */ E1000_WRITE_REG(hw, E1000_RCTL, rctl); return; } /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * * For polling we also now return the number of cleaned packets *********************************************************************/ static bool em_rxeof(struct rx_ring *rxr, int count, int *done) { struct adapter *adapter = rxr->adapter; struct ifnet *ifp = adapter->ifp; struct mbuf *mp, *sendmp; u8 status = 0; u16 len; int i, processed, rxdone = 0; bool eop; struct e1000_rx_desc *cur; EM_RX_LOCK(rxr); #ifdef DEV_NETMAP if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(ifp); na->rx_rings[rxr->me].nr_kflags |= NKR_PENDINTR; selwakeuppri(&na->rx_rings[rxr->me].si, PI_NET); EM_RX_UNLOCK(rxr); EM_CORE_LOCK(adapter); selwakeuppri(&na->rx_si, PI_NET); EM_CORE_UNLOCK(adapter); return (0); } #endif /* DEV_NETMAP */ for (i = rxr->next_to_check, processed = 0; count != 0;) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cur = &rxr->rx_base[i]; status = cur->status; mp = sendmp = NULL; if ((status & E1000_RXD_STAT_DD) == 0) break; len = le16toh(cur->length); eop = (status & E1000_RXD_STAT_EOP) != 0; if ((cur->errors & E1000_RXD_ERR_FRAME_ERR_MASK) || (rxr->discard == TRUE)) { ifp->if_ierrors++; ++rxr->rx_discarded; if (!eop) /* Catch subsequent segs */ rxr->discard = TRUE; else rxr->discard = FALSE; em_rx_discard(rxr, i); goto next_desc; } /* Assign correct length to the current fragment */ mp = rxr->rx_buffers[i].m_head; mp->m_len = len; /* Trigger for refresh */ rxr->rx_buffers[i].m_head = NULL; /* First segment? */ if (rxr->fmp == NULL) { mp->m_pkthdr.len = len; rxr->fmp = rxr->lmp = mp; } else { /* Chain mbuf's together */ mp->m_flags &= ~M_PKTHDR; rxr->lmp->m_next = mp; rxr->lmp = mp; rxr->fmp->m_pkthdr.len += len; } if (eop) { --count; sendmp = rxr->fmp; sendmp->m_pkthdr.rcvif = ifp; ifp->if_ipackets++; em_receive_checksum(cur, sendmp); #ifndef __NO_STRICT_ALIGNMENT if (adapter->max_frame_size > (MCLBYTES - ETHER_ALIGN) && em_fixup_rx(rxr) != 0) goto skip; #endif if (status & E1000_RXD_STAT_VP) { sendmp->m_pkthdr.ether_vtag = le16toh(cur->special); sendmp->m_flags |= M_VLANTAG; } #ifndef __NO_STRICT_ALIGNMENT skip: #endif rxr->fmp = rxr->lmp = NULL; } next_desc: /* Zero out the receive descriptors status. */ cur->status = 0; ++rxdone; /* cumulative for POLL */ ++processed; /* Advance our pointers to the next descriptor. */ if (++i == adapter->num_rx_desc) i = 0; /* Send to the stack */ if (sendmp != NULL) { rxr->next_to_check = i; EM_RX_UNLOCK(rxr); (*ifp->if_input)(ifp, sendmp); EM_RX_LOCK(rxr); i = rxr->next_to_check; } /* Only refresh mbufs every 8 descriptors */ if (processed == 8) { em_refresh_mbufs(rxr, i); processed = 0; } } /* Catch any remaining refresh work */ if (e1000_rx_unrefreshed(rxr)) em_refresh_mbufs(rxr, i); rxr->next_to_check = i; if (done != NULL) *done = rxdone; EM_RX_UNLOCK(rxr); return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE); } static __inline void em_rx_discard(struct rx_ring *rxr, int i) { struct em_buffer *rbuf; rbuf = &rxr->rx_buffers[i]; /* Free any previous pieces */ if (rxr->fmp != NULL) { rxr->fmp->m_flags |= M_PKTHDR; m_freem(rxr->fmp); rxr->fmp = NULL; rxr->lmp = NULL; } /* ** Free buffer and allow em_refresh_mbufs() ** to clean up and recharge buffer. */ if (rbuf->m_head) { m_free(rbuf->m_head); rbuf->m_head = NULL; } return; } #ifndef __NO_STRICT_ALIGNMENT /* * When jumbo frames are enabled we should realign entire payload on * architecures with strict alignment. This is serious design mistake of 8254x * as it nullifies DMA operations. 8254x just allows RX buffer size to be * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its * payload. On architecures without strict alignment restrictions 8254x still * performs unaligned memory access which would reduce the performance too. * To avoid copying over an entire frame to align, we allocate a new mbuf and * copy ethernet header to the new mbuf. The new mbuf is prepended into the * existing mbuf chain. * * Be aware, best performance of the 8254x is achived only when jumbo frame is * not used at all on architectures with strict alignment. */ static int em_fixup_rx(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct mbuf *m, *n; int error; error = 0; m = rxr->fmp; if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); m->m_data += ETHER_HDR_LEN; } else { MGETHDR(n, M_DONTWAIT, MT_DATA); if (n != NULL) { bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); m->m_data += ETHER_HDR_LEN; m->m_len -= ETHER_HDR_LEN; n->m_len = ETHER_HDR_LEN; M_MOVE_PKTHDR(n, m); n->m_next = m; rxr->fmp = n; } else { adapter->dropped_pkts++; m_freem(rxr->fmp); rxr->fmp = NULL; error = ENOMEM; } } return (error); } #endif /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void em_receive_checksum(struct e1000_rx_desc *rx_desc, struct mbuf *mp) { /* Ignore Checksum bit is set */ if (rx_desc->status & E1000_RXD_STAT_IXSM) { mp->m_pkthdr.csum_flags = 0; return; } if (rx_desc->status & E1000_RXD_STAT_IPCS) { /* Did it pass? */ if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) { /* IP Checksum Good */ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; } else { mp->m_pkthdr.csum_flags = 0; } } if (rx_desc->status & E1000_RXD_STAT_TCPCS) { /* Did it pass? */ if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data = htons(0xffff); } } } /* * This routine is run via an vlan * config EVENT */ static void em_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct adapter *adapter = ifp->if_softc; u32 index, bit; if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */ return; EM_CORE_LOCK(adapter); index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] |= (1 << bit); ++adapter->num_vlans; /* Re-init to load the changes */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } /* * This routine is run via an vlan * unconfig EVENT */ static void em_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct adapter *adapter = ifp->if_softc; u32 index, bit; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; EM_CORE_LOCK(adapter); index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] &= ~(1 << bit); --adapter->num_vlans; /* Re-init to load the changes */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } static void em_setup_vlan_hw_support(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 reg; /* ** We get here thru init_locked, meaning ** a soft reset, this has already cleared ** the VFTA and other state, so if there ** have been no vlan's registered do nothing. */ if (adapter->num_vlans == 0) return; /* ** A soft reset zero's out the VFTA, so ** we need to repopulate it now. */ for (int i = 0; i < EM_VFTA_SIZE; i++) if (adapter->shadow_vfta[i] != 0) E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, adapter->shadow_vfta[i]); reg = E1000_READ_REG(hw, E1000_CTRL); reg |= E1000_CTRL_VME; E1000_WRITE_REG(hw, E1000_CTRL, reg); /* Enable the Filter Table */ reg = E1000_READ_REG(hw, E1000_RCTL); reg &= ~E1000_RCTL_CFIEN; reg |= E1000_RCTL_VFE; E1000_WRITE_REG(hw, E1000_RCTL, reg); } static void em_enable_intr(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ims_mask = IMS_ENABLE_MASK; if (hw->mac.type == e1000_82574) { E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK); ims_mask |= EM_MSIX_MASK; } E1000_WRITE_REG(hw, E1000_IMS, ims_mask); } static void em_disable_intr(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; if (hw->mac.type == e1000_82574) E1000_WRITE_REG(hw, EM_EIAC, 0); E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); } /* * Bit of a misnomer, what this really means is * to enable OS management of the system... aka * to disable special hardware management features */ static void em_init_manageability(struct adapter *adapter) { /* A shared code workaround */ #define E1000_82542_MANC2H E1000_MANC2H if (adapter->has_manage) { int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* disable hardware interception of ARP */ manc &= ~(E1000_MANC_ARP_EN); /* enable receiving management packets to the host */ manc |= E1000_MANC_EN_MNG2HOST; #define E1000_MNG2HOST_PORT_623 (1 << 5) #define E1000_MNG2HOST_PORT_664 (1 << 6) manc2h |= E1000_MNG2HOST_PORT_623; manc2h |= E1000_MNG2HOST_PORT_664; E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * Give control back to hardware management * controller if there is one. */ static void em_release_manageability(struct adapter *adapter) { if (adapter->has_manage) { int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* re-enable hardware interception of ARP */ manc |= E1000_MANC_ARP_EN; manc &= ~E1000_MANC_EN_MNG2HOST; E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means * that the driver is loaded. For AMT version type f/w * this means that the network i/f is open. */ static void em_get_hw_control(struct adapter *adapter) { u32 ctrl_ext, swsm; if (adapter->hw.mac.type == e1000_82573) { swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); E1000_WRITE_REG(&adapter->hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); return; } /* else */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); return; } /* * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is no longer loaded. For AMT versions of the * f/w this means that the network i/f is closed. */ static void em_release_hw_control(struct adapter *adapter) { u32 ctrl_ext, swsm; if (!adapter->has_manage) return; if (adapter->hw.mac.type == e1000_82573) { swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); E1000_WRITE_REG(&adapter->hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); return; } /* else */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); return; } static int em_is_valid_ether_addr(u8 *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { return (FALSE); } return (TRUE); } /* ** Parse the interface capabilities with regard ** to both system management and wake-on-lan for ** later use. */ static void em_get_wakeup(device_t dev) { struct adapter *adapter = device_get_softc(dev); u16 eeprom_data = 0, device_id, apme_mask; adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); apme_mask = EM_EEPROM_APME; switch (adapter->hw.mac.type) { case e1000_82573: case e1000_82583: adapter->has_amt = TRUE; /* Falls thru */ case e1000_82571: case e1000_82572: case e1000_80003es2lan: if (adapter->hw.bus.func == 1) { e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); break; } else e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); break; case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: case e1000_pchlan: case e1000_pch2lan: apme_mask = E1000_WUC_APME; adapter->has_amt = TRUE; eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC); break; default: e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); break; } if (eeprom_data & apme_mask) adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC); /* * We have the eeprom settings, now apply the special cases * where the eeprom may be wrong or the board won't support * wake on lan on a particular port */ device_id = pci_get_device(dev); switch (device_id) { case E1000_DEV_ID_82571EB_FIBER: /* Wake events only supported on port A for dual fiber * regardless of eeprom setting */ if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_FUNC_1) adapter->wol = 0; break; case E1000_DEV_ID_82571EB_QUAD_COPPER: case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->wol = 0; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4) global_quad_port_a = 0; break; } return; } /* * Enable PCI Wake On Lan capability */ static void em_enable_wakeup(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; u32 pmc, ctrl, ctrl_ext, rctl; u16 status; if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0)) return; /* Advertise the wakeup capability */ ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3); E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); if ((adapter->hw.mac.type == e1000_ich8lan) || (adapter->hw.mac.type == e1000_pchlan) || (adapter->hw.mac.type == e1000_ich9lan) || (adapter->hw.mac.type == e1000_ich10lan)) e1000_suspend_workarounds_ich8lan(&adapter->hw); /* Keep the laser running on Fiber adapters */ if (adapter->hw.phy.media_type == e1000_media_type_fiber || adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext); } /* ** Determine type of Wakeup: note that wol ** is set with all bits on by default. */ if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0) adapter->wol &= ~E1000_WUFC_MAG; if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0) adapter->wol &= ~E1000_WUFC_MC; else { rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); } if ((adapter->hw.mac.type == e1000_pchlan) || (adapter->hw.mac.type == e1000_pch2lan)) { if (em_enable_phy_wakeup(adapter)) return; } else { E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); } if (adapter->hw.phy.type == e1000_phy_igp_3) e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); /* Request PME */ status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if (ifp->if_capenable & IFCAP_WOL) status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); return; } /* ** WOL in the newer chipset interfaces (pchlan) ** require thing to be copied into the phy */ static int em_enable_phy_wakeup(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mreg, ret = 0; u16 preg; /* copy MAC RARs to PHY RARs */ e1000_copy_rx_addrs_to_phy_ich8lan(hw); /* copy MAC MTA to PHY MTA */ for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) { mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF)); e1000_write_phy_reg(hw, BM_MTA(i) + 1, (u16)((mreg >> 16) & 0xFFFF)); } /* configure PHY Rx Control register */ e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg); mreg = E1000_READ_REG(hw, E1000_RCTL); if (mreg & E1000_RCTL_UPE) preg |= BM_RCTL_UPE; if (mreg & E1000_RCTL_MPE) preg |= BM_RCTL_MPE; preg &= ~(BM_RCTL_MO_MASK); if (mreg & E1000_RCTL_MO_3) preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) << BM_RCTL_MO_SHIFT); if (mreg & E1000_RCTL_BAM) preg |= BM_RCTL_BAM; if (mreg & E1000_RCTL_PMCF) preg |= BM_RCTL_PMCF; mreg = E1000_READ_REG(hw, E1000_CTRL); if (mreg & E1000_CTRL_RFCE) preg |= BM_RCTL_RFCE; e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg); /* enable PHY wakeup in MAC register */ E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol); /* configure and enable PHY wakeup in PHY registers */ e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol); e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); /* activate PHY wakeup */ ret = hw->phy.ops.acquire(hw); if (ret) { printf("Could not acquire PHY\n"); return ret; } e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg); if (ret) { printf("Could not read PHY page 769\n"); goto out; } preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg); if (ret) printf("Could not set PHY Host Wakeup bit\n"); out: hw->phy.ops.release(hw); return ret; } static void em_led_func(void *arg, int onoff) { struct adapter *adapter = arg; EM_CORE_LOCK(adapter); if (onoff) { e1000_setup_led(&adapter->hw); e1000_led_on(&adapter->hw); } else { e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } EM_CORE_UNLOCK(adapter); } /* ** Disable the L0S and L1 LINK states */ static void em_disable_aspm(struct adapter *adapter) { int base, reg; u16 link_cap,link_ctrl; device_t dev = adapter->dev; switch (adapter->hw.mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: break; default: return; } if (pci_find_cap(dev, PCIY_EXPRESS, &base) != 0) return; reg = base + PCIR_EXPRESS_LINK_CAP; link_cap = pci_read_config(dev, reg, 2); if ((link_cap & PCIM_LINK_CAP_ASPM) == 0) return; reg = base + PCIR_EXPRESS_LINK_CTL; link_ctrl = pci_read_config(dev, reg, 2); link_ctrl &= 0xFFFC; /* turn off bit 1 and 2 */ pci_write_config(dev, reg, link_ctrl, 2); return; } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void em_update_stats_counters(struct adapter *adapter) { struct ifnet *ifp; if(adapter->hw.phy.media_type == e1000_media_type_copper || (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS); adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC); } adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS); adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC); adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC); adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL); adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC); adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL); adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC); adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC); adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC); adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC); adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC); /* ** For watchdog management we need to know if we have been ** paused during the last interval, so capture that here. */ adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); adapter->stats.xoffrxc += adapter->pause_frames; adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC); adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC); adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64); adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127); adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255); adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511); adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023); adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522); adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC); adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC); adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC); adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC); /* For the 64-bit byte counters the low dword must be read first. */ /* Both registers clear on the read of the high dword */ adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) + ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32); adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) + ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32); adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC); adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC); adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC); adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC); adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC); adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH); adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH); adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR); adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT); adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64); adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127); adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255); adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511); adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023); adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522); adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC); adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC); /* Interrupt Counts */ adapter->stats.iac += E1000_READ_REG(&adapter->hw, E1000_IAC); adapter->stats.icrxptc += E1000_READ_REG(&adapter->hw, E1000_ICRXPTC); adapter->stats.icrxatc += E1000_READ_REG(&adapter->hw, E1000_ICRXATC); adapter->stats.ictxptc += E1000_READ_REG(&adapter->hw, E1000_ICTXPTC); adapter->stats.ictxatc += E1000_READ_REG(&adapter->hw, E1000_ICTXATC); adapter->stats.ictxqec += E1000_READ_REG(&adapter->hw, E1000_ICTXQEC); adapter->stats.ictxqmtc += E1000_READ_REG(&adapter->hw, E1000_ICTXQMTC); adapter->stats.icrxdmtc += E1000_READ_REG(&adapter->hw, E1000_ICRXDMTC); adapter->stats.icrxoc += E1000_READ_REG(&adapter->hw, E1000_ICRXOC); if (adapter->hw.mac.type >= e1000_82543) { adapter->stats.algnerrc += E1000_READ_REG(&adapter->hw, E1000_ALGNERRC); adapter->stats.rxerrc += E1000_READ_REG(&adapter->hw, E1000_RXERRC); adapter->stats.tncrs += E1000_READ_REG(&adapter->hw, E1000_TNCRS); adapter->stats.cexterr += E1000_READ_REG(&adapter->hw, E1000_CEXTERR); adapter->stats.tsctc += E1000_READ_REG(&adapter->hw, E1000_TSCTC); adapter->stats.tsctfc += E1000_READ_REG(&adapter->hw, E1000_TSCTFC); } ifp = adapter->ifp; ifp->if_collisions = adapter->stats.colc; /* Rx Errors */ ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.mpc + adapter->stats.cexterr; /* Tx Errors */ ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol + adapter->watchdog_events; } /* Export a single 32-bit register via a read-only sysctl. */ static int em_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; u_int val; adapter = oidp->oid_arg1; val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2); return (sysctl_handle_int(oidp, &val, 0, req)); } /* * Add sysctl variables, one per statistic, to the system. */ static void em_add_hw_stats(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct e1000_hw_stats *stats = &adapter->stats; struct sysctl_oid *stat_node, *queue_node, *int_node; struct sysctl_oid_list *stat_list, *queue_list, *int_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &adapter->link_irq, "Link MSIX IRQ Handled"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail", CTLFLAG_RD, &adapter->mbuf_alloc_failed, "Std mbuf failed"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail", CTLFLAG_RD, &adapter->mbuf_cluster_failed, "Std mbuf cluster failed"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", CTLFLAG_RD, &adapter->no_tx_dma_setup, "Driver tx dma failure in xmit"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", CTLFLAG_RD, &adapter->rx_overruns, "RX overruns"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL, em_sysctl_reg_handler, "IU", "Device Control Register"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL, em_sysctl_reg_handler, "IU", "Receiver Control Register"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", CTLFLAG_RD, &adapter->hw.fc.high_water, 0, "Flow Control High Watermark"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", CTLFLAG_RD, &adapter->hw.fc.low_water, 0, "Flow Control Low Watermark"); for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(txr->me), em_sysctl_reg_handler, "IU", "Transmit Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(txr->me), em_sysctl_reg_handler, "IU", "Transmit Descriptor Tail"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", CTLFLAG_RD, &txr->tx_irq, "Queue MSI-X Transmit Interrupts"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txr->no_desc_avail, "Queue No Descriptor Available"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(rxr->me), em_sysctl_reg_handler, "IU", "Receive Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(rxr->me), em_sysctl_reg_handler, "IU", "Receive Descriptor Tail"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq", CTLFLAG_RD, &rxr->rx_irq, "Queue MSI-X Receive Interrupts"); } /* MAC stats get their own sub node */ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", CTLFLAG_RD, NULL, "Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll", CTLFLAG_RD, &stats->ecol, "Excessive collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll", CTLFLAG_RD, &stats->scc, "Single collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll", CTLFLAG_RD, &stats->mcc, "Multiple collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll", CTLFLAG_RD, &stats->latecol, "Late collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count", CTLFLAG_RD, &stats->colc, "Collision Count"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors", CTLFLAG_RD, &adapter->stats.symerrs, "Symbol Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors", CTLFLAG_RD, &adapter->stats.sec, "Sequence Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count", CTLFLAG_RD, &adapter->stats.dc, "Defer Count"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets", CTLFLAG_RD, &adapter->stats.mpc, "Missed Packets"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", CTLFLAG_RD, &adapter->stats.rnbc, "Receive No Buffers"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize", CTLFLAG_RD, &adapter->stats.ruc, "Receive Undersize"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", CTLFLAG_RD, &adapter->stats.rfc, "Fragmented Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize", CTLFLAG_RD, &adapter->stats.roc, "Oversized Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber", CTLFLAG_RD, &adapter->stats.rjc, "Recevied Jabber"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs", CTLFLAG_RD, &adapter->stats.rxerrc, "Receive Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", CTLFLAG_RD, &adapter->stats.crcerrs, "CRC errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs", CTLFLAG_RD, &adapter->stats.algnerrc, "Alignment Errors"); /* On 82575 these are collision counts */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs", CTLFLAG_RD, &adapter->stats.cexterr, "Collision/Carrier extension errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", CTLFLAG_RD, &adapter->stats.xonrxc, "XON Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", CTLFLAG_RD, &adapter->stats.xontxc, "XON Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", CTLFLAG_RD, &adapter->stats.xoffrxc, "XOFF Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", CTLFLAG_RD, &adapter->stats.xofftxc, "XOFF Transmitted"); /* Packet Reception Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", CTLFLAG_RD, &adapter->stats.tpr, "Total Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", CTLFLAG_RD, &adapter->stats.gprc, "Good Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", CTLFLAG_RD, &adapter->stats.bprc, "Broadcast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", CTLFLAG_RD, &adapter->stats.mprc, "Multicast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", CTLFLAG_RD, &adapter->stats.prc64, "64 byte frames received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", CTLFLAG_RD, &adapter->stats.prc127, "65-127 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", CTLFLAG_RD, &adapter->stats.prc255, "128-255 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", CTLFLAG_RD, &adapter->stats.prc511, "256-511 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", CTLFLAG_RD, &adapter->stats.prc1023, "512-1023 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", CTLFLAG_RD, &adapter->stats.prc1522, "1023-1522 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", CTLFLAG_RD, &adapter->stats.gorc, "Good Octets Received"); /* Packet Transmission Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &adapter->stats.gotc, "Good Octets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", CTLFLAG_RD, &adapter->stats.tpt, "Total Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &adapter->stats.gptc, "Good Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", CTLFLAG_RD, &adapter->stats.bptc, "Broadcast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", CTLFLAG_RD, &adapter->stats.mptc, "Multicast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", CTLFLAG_RD, &adapter->stats.ptc64, "64 byte frames transmitted "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", CTLFLAG_RD, &adapter->stats.ptc127, "65-127 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", CTLFLAG_RD, &adapter->stats.ptc255, "128-255 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", CTLFLAG_RD, &adapter->stats.ptc511, "256-511 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", CTLFLAG_RD, &adapter->stats.ptc1023, "512-1023 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", CTLFLAG_RD, &adapter->stats.ptc1522, "1024-1522 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd", CTLFLAG_RD, &adapter->stats.tsctc, "TSO Contexts Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail", CTLFLAG_RD, &adapter->stats.tsctfc, "TSO Contexts Failed"); /* Interrupt Stats */ int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD, NULL, "Interrupt Statistics"); int_list = SYSCTL_CHILDREN(int_node); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts", CTLFLAG_RD, &adapter->stats.iac, "Interrupt Assertion Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer", CTLFLAG_RD, &adapter->stats.icrxptc, "Interrupt Cause Rx Pkt Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_abs_timer", CTLFLAG_RD, &adapter->stats.icrxatc, "Interrupt Cause Rx Abs Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer", CTLFLAG_RD, &adapter->stats.ictxptc, "Interrupt Cause Tx Pkt Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_abs_timer", CTLFLAG_RD, &adapter->stats.ictxatc, "Interrupt Cause Tx Abs Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_empty", CTLFLAG_RD, &adapter->stats.ictxqec, "Interrupt Cause Tx Queue Empty Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh", CTLFLAG_RD, &adapter->stats.ictxqmtc, "Interrupt Cause Tx Queue Min Thresh Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", CTLFLAG_RD, &adapter->stats.icrxdmtc, "Interrupt Cause Rx Desc Min Thresh Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_overrun", CTLFLAG_RD, &adapter->stats.icrxoc, "Interrupt Cause Receiver Overrun Count"); } /********************************************************************** * * This routine provides a way to dump out the adapter eeprom, * often a useful debug/service tool. This only dumps the first * 32 words, stuff that matters is in that extent. * **********************************************************************/ static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); /* * This value will cause a hex dump of the * first 32 16-bit words of the EEPROM to * the screen. */ if (result == 1) em_print_nvm_info(adapter); return (error); } static void em_print_nvm_info(struct adapter *adapter) { u16 eeprom_data; int i, j, row = 0; /* Its a bit crude, but it gets the job done */ printf("\nInterface EEPROM Dump:\n"); printf("Offset\n0x0000 "); for (i = 0, j = 0; i < 32; i++, j++) { if (j == 8) { /* Make the offset block */ j = 0; ++row; printf("\n0x00%x0 ",row); } e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); printf("%04x ", eeprom_data); } printf("\n"); } static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { struct em_int_delay_info *info; struct adapter *adapter; u32 regval; int error, usecs, ticks; info = (struct em_int_delay_info *)arg1; usecs = info->value; error = sysctl_handle_int(oidp, &usecs, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535)) return (EINVAL); info->value = usecs; ticks = EM_USECS_TO_TICKS(usecs); adapter = info->adapter; EM_CORE_LOCK(adapter); regval = E1000_READ_OFFSET(&adapter->hw, info->offset); regval = (regval & ~0xffff) | (ticks & 0xffff); /* Handle a few special cases. */ switch (info->offset) { case E1000_RDTR: break; case E1000_TIDV: if (ticks == 0) { adapter->txd_cmd &= ~E1000_TXD_CMD_IDE; /* Don't write 0 into the TIDV register. */ regval++; } else adapter->txd_cmd |= E1000_TXD_CMD_IDE; break; } E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval); EM_CORE_UNLOCK(adapter); return (0); } static void em_add_int_delay_sysctl(struct adapter *adapter, const char *name, const char *description, struct em_int_delay_info *info, int offset, int value) { info->adapter = adapter; info->offset = offset; info->value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, info, 0, em_sysctl_int_delay, "I", description); } static void em_set_sysctl_value(struct adapter *adapter, const char *name, const char *description, int *limit, int value) { *limit = value; SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); } /* ** Set flow control using sysctl: ** Flow control values: ** 0 - off ** 1 - rx pause ** 2 - tx pause ** 3 - full */ static int em_set_flowcntl(SYSCTL_HANDLER_ARGS) { int error; static int input = 3; /* default is full */ struct adapter *adapter = (struct adapter *) arg1; error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (input == adapter->fc) /* no change? */ return (error); switch (input) { case e1000_fc_rx_pause: case e1000_fc_tx_pause: case e1000_fc_full: case e1000_fc_none: adapter->hw.fc.requested_mode = input; adapter->fc = input; break; default: /* Do nothing */ return (error); } adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode; e1000_force_mac_fc(&adapter->hw); return (error); } static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *)arg1; em_print_debug_info(adapter); } return (error); } /* ** This routine is meant to be fluid, add whatever is ** needed for debugging a problem. -jfv */ static void em_print_debug_info(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; if (adapter->ifp->if_drv_flags & IFF_DRV_RUNNING) printf("Interface is RUNNING "); else printf("Interface is NOT RUNNING\n"); if (adapter->ifp->if_drv_flags & IFF_DRV_OACTIVE) printf("and INACTIVE\n"); else printf("and ACTIVE\n"); device_printf(dev, "hw tdh = %d, hw tdt = %d\n", E1000_READ_REG(&adapter->hw, E1000_TDH(0)), E1000_READ_REG(&adapter->hw, E1000_TDT(0))); device_printf(dev, "hw rdh = %d, hw rdt = %d\n", E1000_READ_REG(&adapter->hw, E1000_RDH(0)), E1000_READ_REG(&adapter->hw, E1000_RDT(0))); device_printf(dev, "Tx Queue Status = %d\n", txr->queue_status); device_printf(dev, "TX descriptors avail = %d\n", txr->tx_avail); device_printf(dev, "Tx Descriptors avail failure = %ld\n", txr->no_desc_avail); device_printf(dev, "RX discarded packets = %ld\n", rxr->rx_discarded); device_printf(dev, "RX Next to Check = %d\n", rxr->next_to_check); device_printf(dev, "RX Next to Refresh = %d\n", rxr->next_to_refresh); } Index: projects/nand/sys/dev/e1000/if_igb.c =================================================================== --- projects/nand/sys/dev/e1000/if_igb.c (revision 235262) +++ projects/nand/sys/dev/e1000/if_igb.c (revision 235263) @@ -1,5926 +1,5941 @@ /****************************************************************************** Copyright (c) 2001-2011, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #include "opt_inet.h" #include "opt_inet6.h" #include "opt_altq.h" #endif #include #include #if __FreeBSD_version >= 800000 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "e1000_api.h" #include "e1000_82575.h" #include "if_igb.h" /********************************************************************* * Set this to one to display debug statistics *********************************************************************/ int igb_display_debug_stats = 0; /********************************************************************* * Driver version: *********************************************************************/ char igb_driver_version[] = "version - 2.3.1"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into e1000_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static igb_vendor_info_t igb_vendor_info_array[] = { { 0x8086, E1000_DEV_ID_82575EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82576, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82576_NS, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82576_NS_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82576_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82576_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82576_SERDES_QUAD, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82576_VF, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82580_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82580_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82580_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82580_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82580_COPPER_DUAL, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82580_QUAD_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_DH89XXCC_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_DH89XXCC_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_DH89XXCC_SFP, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_I350_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_I350_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_I350_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_I350_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_I350_VF, PCI_ANY_ID, PCI_ANY_ID, 0}, /* required last entry */ { 0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings for all supported NICs. *********************************************************************/ static char *igb_strings[] = { "Intel(R) PRO/1000 Network Connection" }; /********************************************************************* * Function prototypes *********************************************************************/ static int igb_probe(device_t); static int igb_attach(device_t); static int igb_detach(device_t); static int igb_shutdown(device_t); static int igb_suspend(device_t); static int igb_resume(device_t); #if __FreeBSD_version >= 800000 static int igb_mq_start(struct ifnet *, struct mbuf *); static int igb_mq_start_locked(struct ifnet *, struct tx_ring *, struct mbuf *); static void igb_qflush(struct ifnet *); static void igb_deferred_mq_start(void *, int); #else static void igb_start(struct ifnet *); static void igb_start_locked(struct tx_ring *, struct ifnet *ifp); #endif static int igb_ioctl(struct ifnet *, u_long, caddr_t); static void igb_init(void *); static void igb_init_locked(struct adapter *); static void igb_stop(void *); static void igb_media_status(struct ifnet *, struct ifmediareq *); static int igb_media_change(struct ifnet *); static void igb_identify_hardware(struct adapter *); static int igb_allocate_pci_resources(struct adapter *); static int igb_allocate_msix(struct adapter *); static int igb_allocate_legacy(struct adapter *); static int igb_setup_msix(struct adapter *); static void igb_free_pci_resources(struct adapter *); static void igb_local_timer(void *); static void igb_reset(struct adapter *); static int igb_setup_interface(device_t, struct adapter *); static int igb_allocate_queues(struct adapter *); static void igb_configure_queues(struct adapter *); static int igb_allocate_transmit_buffers(struct tx_ring *); static void igb_setup_transmit_structures(struct adapter *); static void igb_setup_transmit_ring(struct tx_ring *); static void igb_initialize_transmit_units(struct adapter *); static void igb_free_transmit_structures(struct adapter *); static void igb_free_transmit_buffers(struct tx_ring *); static int igb_allocate_receive_buffers(struct rx_ring *); static int igb_setup_receive_structures(struct adapter *); static int igb_setup_receive_ring(struct rx_ring *); static void igb_initialize_receive_units(struct adapter *); static void igb_free_receive_structures(struct adapter *); static void igb_free_receive_buffers(struct rx_ring *); static void igb_free_receive_ring(struct rx_ring *); static void igb_enable_intr(struct adapter *); static void igb_disable_intr(struct adapter *); static void igb_update_stats_counters(struct adapter *); static bool igb_txeof(struct tx_ring *); static __inline void igb_rx_discard(struct rx_ring *, int); static __inline void igb_rx_input(struct rx_ring *, struct ifnet *, struct mbuf *, u32); static bool igb_rxeof(struct igb_queue *, int, int *); static void igb_rx_checksum(u32, struct mbuf *, u32); static bool igb_tx_ctx_setup(struct tx_ring *, struct mbuf *); static bool igb_tso_setup(struct tx_ring *, struct mbuf *, int, struct ip *, struct tcphdr *); static void igb_set_promisc(struct adapter *); static void igb_disable_promisc(struct adapter *); static void igb_set_multi(struct adapter *); static void igb_update_link_status(struct adapter *); static void igb_refresh_mbufs(struct rx_ring *, int); static void igb_register_vlan(void *, struct ifnet *, u16); static void igb_unregister_vlan(void *, struct ifnet *, u16); static void igb_setup_vlan_hw_support(struct adapter *); static int igb_xmit(struct tx_ring *, struct mbuf **); static int igb_dma_malloc(struct adapter *, bus_size_t, struct igb_dma_alloc *, int); static void igb_dma_free(struct adapter *, struct igb_dma_alloc *); static int igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); static void igb_print_nvm_info(struct adapter *); static int igb_is_valid_ether_addr(u8 *); static void igb_add_hw_stats(struct adapter *); static void igb_vf_init_stats(struct adapter *); static void igb_update_vf_stats_counters(struct adapter *); /* Management and WOL Support */ static void igb_init_manageability(struct adapter *); static void igb_release_manageability(struct adapter *); static void igb_get_hw_control(struct adapter *); static void igb_release_hw_control(struct adapter *); static void igb_enable_wakeup(device_t); static void igb_led_func(void *, int); static int igb_irq_fast(void *); static void igb_msix_que(void *); static void igb_msix_link(void *); static void igb_handle_que(void *context, int pending); static void igb_handle_link(void *context, int pending); static void igb_handle_link_locked(struct adapter *); static void igb_set_sysctl_value(struct adapter *, const char *, const char *, int *, int); static int igb_set_flowcntl(SYSCTL_HANDLER_ARGS); static int igb_sysctl_dmac(SYSCTL_HANDLER_ARGS); #ifdef DEVICE_POLLING static poll_handler_t igb_poll; #endif /* POLLING */ /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t igb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, igb_probe), DEVMETHOD(device_attach, igb_attach), DEVMETHOD(device_detach, igb_detach), DEVMETHOD(device_shutdown, igb_shutdown), DEVMETHOD(device_suspend, igb_suspend), DEVMETHOD(device_resume, igb_resume), {0, 0} }; static driver_t igb_driver = { "igb", igb_methods, sizeof(struct adapter), }; static devclass_t igb_devclass; DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0); MODULE_DEPEND(igb, pci, 1, 1, 1); MODULE_DEPEND(igb, ether, 1, 1, 1); /********************************************************************* * Tunable default values. *********************************************************************/ static SYSCTL_NODE(_hw, OID_AUTO, igb, CTLFLAG_RD, 0, "IGB driver parameters"); /* Descriptor defaults */ static int igb_rxd = IGB_DEFAULT_RXD; static int igb_txd = IGB_DEFAULT_TXD; TUNABLE_INT("hw.igb.rxd", &igb_rxd); TUNABLE_INT("hw.igb.txd", &igb_txd); SYSCTL_INT(_hw_igb, OID_AUTO, rxd, CTLFLAG_RDTUN, &igb_rxd, 0, "Number of receive descriptors per queue"); SYSCTL_INT(_hw_igb, OID_AUTO, txd, CTLFLAG_RDTUN, &igb_txd, 0, "Number of transmit descriptors per queue"); /* ** AIM: Adaptive Interrupt Moderation ** which means that the interrupt rate ** is varied over time based on the ** traffic for that interrupt vector */ static int igb_enable_aim = TRUE; TUNABLE_INT("hw.igb.enable_aim", &igb_enable_aim); SYSCTL_INT(_hw_igb, OID_AUTO, enable_aim, CTLFLAG_RW, &igb_enable_aim, 0, "Enable adaptive interrupt moderation"); /* * MSIX should be the default for best performance, * but this allows it to be forced off for testing. */ static int igb_enable_msix = 1; TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix); SYSCTL_INT(_hw_igb, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &igb_enable_msix, 0, "Enable MSI-X interrupts"); /* ** Tuneable Interrupt rate */ static int igb_max_interrupt_rate = 8000; TUNABLE_INT("hw.igb.max_interrupt_rate", &igb_max_interrupt_rate); SYSCTL_INT(_hw_igb, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, &igb_max_interrupt_rate, 0, "Maximum interrupts per second"); /* ** Header split causes the packet header to ** be dma'd to a seperate mbuf from the payload. ** this can have memory alignment benefits. But ** another plus is that small packets often fit ** into the header and thus use no cluster. Its ** a very workload dependent type feature. */ static int igb_header_split = FALSE; TUNABLE_INT("hw.igb.hdr_split", &igb_header_split); SYSCTL_INT(_hw_igb, OID_AUTO, header_split, CTLFLAG_RDTUN, &igb_header_split, 0, "Enable receive mbuf header split"); /* ** This will autoconfigure based on ** the number of CPUs if left at 0. */ static int igb_num_queues = 0; TUNABLE_INT("hw.igb.num_queues", &igb_num_queues); SYSCTL_INT(_hw_igb, OID_AUTO, num_queues, CTLFLAG_RDTUN, &igb_num_queues, 0, "Number of queues to configure, 0 indicates autoconfigure"); +/* +** Global variable to store last used CPU when binding queues +** to CPUs in igb_allocate_msix. Starts at CPU_FIRST and increments when a +** queue is bound to a cpu. +*/ +static int igb_last_bind_cpu = -1; + /* How many packets rxeof tries to clean at a time */ static int igb_rx_process_limit = 100; TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit); SYSCTL_INT(_hw_igb, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, &igb_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); #ifdef DEV_NETMAP /* see ixgbe.c for details */ #include #endif /* DEV_NETMAP */ /********************************************************************* * Device identification routine * * igb_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * * return BUS_PROBE_DEFAULT on success, positive on failure *********************************************************************/ static int igb_probe(device_t dev) { char adapter_name[60]; uint16_t pci_vendor_id = 0; uint16_t pci_device_id = 0; uint16_t pci_subvendor_id = 0; uint16_t pci_subdevice_id = 0; igb_vendor_info_t *ent; INIT_DEBUGOUT("igb_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != IGB_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = igb_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == PCI_ANY_ID)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == PCI_ANY_ID))) { sprintf(adapter_name, "%s %s", igb_strings[ent->index], igb_driver_version); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int igb_attach(device_t dev) { struct adapter *adapter; int error = 0; u16 eeprom_data; INIT_DEBUGOUT("igb_attach: begin"); if (resource_disabled("igb", device_get_unit(dev))) { device_printf(dev, "Disabled by device hint\n"); return (ENXIO); } adapter = device_get_softc(dev); adapter->dev = adapter->osdep.dev = dev; IGB_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); /* SYSCTL stuff */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, igb_sysctl_nvm_info, "I", "NVM Information"); igb_set_sysctl_value(adapter, "enable_aim", "Interrupt Moderation", &adapter->enable_aim, igb_enable_aim); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, igb_set_flowcntl, "I", "Flow Control"); callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); /* Determine hardware and mac info */ igb_identify_hardware(adapter); /* Setup PCI resources */ if (igb_allocate_pci_resources(adapter)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci; } /* Do Shared Code initialization */ if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { device_printf(dev, "Setup of Shared code failed\n"); error = ENXIO; goto err_pci; } e1000_get_bus_info(&adapter->hw); /* Sysctl for limiting the amount of work done in the taskqueue */ igb_set_sysctl_value(adapter, "rx_processing_limit", "max number of rx packets to process", &adapter->rx_process_limit, igb_rx_process_limit); /* * Validate number of transmit and receive descriptors. It * must not exceed hardware maximum, and must be multiple * of E1000_DBA_ALIGN. */ if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 || (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) { device_printf(dev, "Using %d TX descriptors instead of %d!\n", IGB_DEFAULT_TXD, igb_txd); adapter->num_tx_desc = IGB_DEFAULT_TXD; } else adapter->num_tx_desc = igb_txd; if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 || (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) { device_printf(dev, "Using %d RX descriptors instead of %d!\n", IGB_DEFAULT_RXD, igb_rxd); adapter->num_rx_desc = IGB_DEFAULT_RXD; } else adapter->num_rx_desc = igb_rxd; adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_wait_to_complete = FALSE; adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; /* Copper options */ if (adapter->hw.phy.media_type == e1000_media_type_copper) { adapter->hw.phy.mdix = AUTO_ALL_MODES; adapter->hw.phy.disable_polarity_correction = FALSE; adapter->hw.phy.ms_type = IGB_MASTER_SLAVE; } /* * Set the frame limits assuming * standard ethernet sized frames. */ adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE; /* ** Allocate and Setup Queues */ if (igb_allocate_queues(adapter)) { error = ENOMEM; goto err_pci; } /* Allocate the appropriate stats memory */ if (adapter->vf_ifp) { adapter->stats = (struct e1000_vf_stats *)malloc(sizeof \ (struct e1000_vf_stats), M_DEVBUF, M_NOWAIT | M_ZERO); igb_vf_init_stats(adapter); } else adapter->stats = (struct e1000_hw_stats *)malloc(sizeof \ (struct e1000_hw_stats), M_DEVBUF, M_NOWAIT | M_ZERO); if (adapter->stats == NULL) { device_printf(dev, "Can not allocate stats memory\n"); error = ENOMEM; goto err_late; } /* Allocate multicast array memory. */ adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); if (adapter->mta == NULL) { device_printf(dev, "Can not allocate multicast setup array\n"); error = ENOMEM; goto err_late; } /* Some adapter-specific advanced features */ if (adapter->hw.mac.type >= e1000_i350) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "dmac", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, igb_sysctl_dmac, "I", "DMA Coalesce"); igb_set_sysctl_value(adapter, "eee_disabled", "enable Energy Efficient Ethernet", &adapter->hw.dev_spec._82575.eee_disable, TRUE); e1000_set_eee_i350(&adapter->hw); } /* ** Start from a known state, this is ** important in reading the nvm and ** mac from that. */ e1000_reset_hw(&adapter->hw); /* Make sure we have a good EEPROM before we read from it */ if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { /* ** Some PCI-E parts fail the first check due to ** the link being in sleep state, call it again, ** if it fails a second time its a real issue. */ if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); error = EIO; goto err_late; } } /* ** Copy the permanent MAC address out of the EEPROM */ if (e1000_read_mac_addr(&adapter->hw) < 0) { device_printf(dev, "EEPROM read error while reading MAC" " address\n"); error = EIO; goto err_late; } /* Check its sanity */ if (!igb_is_valid_ether_addr(adapter->hw.mac.addr)) { device_printf(dev, "Invalid MAC address\n"); error = EIO; goto err_late; } /* Setup OS specific network interface */ if (igb_setup_interface(dev, adapter) != 0) goto err_late; /* Now get a good starting state */ igb_reset(adapter); /* Initialize statistics */ igb_update_stats_counters(adapter); adapter->hw.mac.get_link_status = 1; igb_update_link_status(adapter); /* Indicate SOL/IDER usage */ if (e1000_check_reset_block(&adapter->hw)) device_printf(dev, "PHY reset is blocked due to SOL/IDER session.\n"); /* Determine if we have to control management hardware */ adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); /* * Setup Wake-on-Lan */ /* APME bit in EEPROM is mapped to WUC.APME */ eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME; if (eeprom_data) adapter->wol = E1000_WUFC_MAG; /* Register for VLAN events */ adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); igb_add_hw_stats(adapter); /* Tell the stack that the interface is not active */ adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->ifp->if_drv_flags |= IFF_DRV_OACTIVE; adapter->led_dev = led_create(igb_led_func, adapter, device_get_nameunit(dev)); /* ** Configure Interrupts */ if ((adapter->msix > 1) && (igb_enable_msix)) error = igb_allocate_msix(adapter); else /* MSI or Legacy */ error = igb_allocate_legacy(adapter); if (error) goto err_late; #ifdef DEV_NETMAP igb_netmap_attach(adapter); #endif /* DEV_NETMAP */ INIT_DEBUGOUT("igb_attach: end"); return (0); err_late: igb_detach(dev); igb_free_transmit_structures(adapter); igb_free_receive_structures(adapter); igb_release_hw_control(adapter); err_pci: igb_free_pci_resources(adapter); if (adapter->ifp != NULL) if_free(adapter->ifp); free(adapter->mta, M_DEVBUF); IGB_CORE_LOCK_DESTROY(adapter); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int igb_detach(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; INIT_DEBUGOUT("igb_detach: begin"); /* Make sure VLANS are not using driver */ if (adapter->ifp->if_vlantrunk != NULL) { device_printf(dev,"Vlan in use, detach first\n"); return (EBUSY); } ether_ifdetach(adapter->ifp); if (adapter->led_dev != NULL) led_destroy(adapter->led_dev); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif IGB_CORE_LOCK(adapter); adapter->in_detach = 1; igb_stop(adapter); IGB_CORE_UNLOCK(adapter); e1000_phy_hw_reset(&adapter->hw); /* Give control back to firmware */ igb_release_manageability(adapter); igb_release_hw_control(adapter); if (adapter->wol) { E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); igb_enable_wakeup(dev); } /* Unregister VLAN events */ if (adapter->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); if (adapter->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); callout_drain(&adapter->timer); #ifdef DEV_NETMAP netmap_detach(adapter->ifp); #endif /* DEV_NETMAP */ igb_free_pci_resources(adapter); bus_generic_detach(dev); if_free(ifp); igb_free_transmit_structures(adapter); igb_free_receive_structures(adapter); if (adapter->mta != NULL) free(adapter->mta, M_DEVBUF); IGB_CORE_LOCK_DESTROY(adapter); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int igb_shutdown(device_t dev) { return igb_suspend(dev); } /* * Suspend/resume device methods. */ static int igb_suspend(device_t dev) { struct adapter *adapter = device_get_softc(dev); IGB_CORE_LOCK(adapter); igb_stop(adapter); igb_release_manageability(adapter); igb_release_hw_control(adapter); if (adapter->wol) { E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); igb_enable_wakeup(dev); } IGB_CORE_UNLOCK(adapter); return bus_generic_suspend(dev); } static int igb_resume(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct tx_ring *txr = adapter->tx_rings; struct ifnet *ifp = adapter->ifp; IGB_CORE_LOCK(adapter); igb_init_locked(adapter); igb_init_manageability(adapter); if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) { for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); #if __FreeBSD_version >= 800000 /* Process the stack queue only if not depleted */ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && !drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr, NULL); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); } } IGB_CORE_UNLOCK(adapter); return bus_generic_resume(dev); } /********************************************************************* * Transmit entry point * * igb_start is called by the stack to initiate a transmit. * The driver will remain in this routine as long as there are * packets to transmit and transmit resources are available. * In case resources are not available stack is notified and * the packet is requeued. **********************************************************************/ static void igb_start_locked(struct tx_ring *txr, struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct mbuf *m_head; IGB_TX_LOCK_ASSERT(txr); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (!adapter->link_active) return; /* Call cleanup if number of TX descriptors low */ if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) igb_txeof(txr); while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (txr->tx_avail <= IGB_MAX_SCATTER) { txr->queue_status |= IGB_QUEUE_DEPLETED; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Encapsulation can modify our pointer, and or make it * NULL on failure. In that event, we can't requeue. */ if (igb_xmit(txr, &m_head)) { if (m_head != NULL) IFQ_DRV_PREPEND(&ifp->if_snd, m_head); if (txr->tx_avail <= IGB_MAX_SCATTER) txr->queue_status |= IGB_QUEUE_DEPLETED; break; } /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, m_head); /* Set watchdog on */ txr->watchdog_time = ticks; txr->queue_status |= IGB_QUEUE_WORKING; } } /* * Legacy TX driver routine, called from the * stack, always uses tx[0], and spins for it. * Should not be used with multiqueue tx */ static void igb_start(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IGB_TX_LOCK(txr); igb_start_locked(txr, ifp); IGB_TX_UNLOCK(txr); } return; } #if __FreeBSD_version >= 800000 /* ** Multiqueue Transmit driver ** */ static int igb_mq_start(struct ifnet *ifp, struct mbuf *m) { struct adapter *adapter = ifp->if_softc; struct igb_queue *que; struct tx_ring *txr; int i, err = 0; bool moveable = TRUE; /* Which queue to use */ if ((m->m_flags & M_FLOWID) != 0) { i = m->m_pkthdr.flowid % adapter->num_queues; moveable = FALSE; } else i = curcpu % adapter->num_queues; txr = &adapter->tx_rings[i]; que = &adapter->queues[i]; if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && IGB_TX_TRYLOCK(txr)) { err = igb_mq_start_locked(ifp, txr, m); IGB_TX_UNLOCK(txr); } else { err = drbr_enqueue(ifp, txr->br, m); taskqueue_enqueue(que->tq, &txr->txq_task); } return (err); } static int igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m) { struct adapter *adapter = txr->adapter; struct mbuf *next; int err = 0, enq; IGB_TX_LOCK_ASSERT(txr); if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || (txr->queue_status == IGB_QUEUE_DEPLETED) || adapter->link_active == 0) { if (m != NULL) err = drbr_enqueue(ifp, txr->br, m); return (err); } enq = 0; if (m == NULL) { next = drbr_dequeue(ifp, txr->br); } else if (drbr_needs_enqueue(ifp, txr->br)) { if ((err = drbr_enqueue(ifp, txr->br, m)) != 0) return (err); next = drbr_dequeue(ifp, txr->br); } else next = m; /* Process the queue */ while (next != NULL) { if ((err = igb_xmit(txr, &next)) != 0) { if (next != NULL) err = drbr_enqueue(ifp, txr->br, next); break; } enq++; drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags); ETHER_BPF_MTAP(ifp, next); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; next = drbr_dequeue(ifp, txr->br); } if (enq > 0) { /* Set the watchdog */ txr->queue_status |= IGB_QUEUE_WORKING; txr->watchdog_time = ticks; } if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) igb_txeof(txr); if (txr->tx_avail <= IGB_MAX_SCATTER) txr->queue_status |= IGB_QUEUE_DEPLETED; return (err); } /* * Called from a taskqueue to drain queued transmit packets. */ static void igb_deferred_mq_start(void *arg, int pending) { struct tx_ring *txr = arg; struct adapter *adapter = txr->adapter; struct ifnet *ifp = adapter->ifp; IGB_TX_LOCK(txr); if (!drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr, NULL); IGB_TX_UNLOCK(txr); } /* ** Flush all ring buffers */ static void igb_qflush(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; struct mbuf *m; for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) m_freem(m); IGB_TX_UNLOCK(txr); } if_qflush(ifp); } #endif /* __FreeBSD_version >= 800000 */ /********************************************************************* * Ioctl entry point * * igb_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct adapter *adapter = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; #endif bool avoid_reset = FALSE; int error = 0; if (adapter->in_detach) return (error); switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) igb_init(adapter); #ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else error = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: { int max_frame_size; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); IGB_CORE_LOCK(adapter); max_frame_size = 9234; if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { IGB_CORE_UNLOCK(adapter); error = EINVAL; break; } ifp->if_mtu = ifr->ifr_mtu; adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; igb_init_locked(adapter); IGB_CORE_UNLOCK(adapter); break; } case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl rcv'd:\ SIOCSIFFLAGS (Set Interface Flags)"); IGB_CORE_LOCK(adapter); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ adapter->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { igb_disable_promisc(adapter); igb_set_promisc(adapter); } } else igb_init_locked(adapter); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) igb_stop(adapter); adapter->if_flags = ifp->if_flags; IGB_CORE_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IGB_CORE_LOCK(adapter); igb_disable_intr(adapter); igb_set_multi(adapter); #ifdef DEVICE_POLLING if (!(ifp->if_capenable & IFCAP_POLLING)) #endif igb_enable_intr(adapter); IGB_CORE_UNLOCK(adapter); } break; case SIOCSIFMEDIA: /* Check SOL/IDER usage */ IGB_CORE_LOCK(adapter); if (e1000_check_reset_block(&adapter->hw)) { IGB_CORE_UNLOCK(adapter); device_printf(adapter->dev, "Media change is" " blocked due to SOL/IDER session.\n"); break; } IGB_CORE_UNLOCK(adapter); case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl rcv'd: \ SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int mask, reinit; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); reinit = 0; mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(igb_poll, ifp); if (error) return (error); IGB_CORE_LOCK(adapter); igb_disable_intr(adapter); ifp->if_capenable |= IFCAP_POLLING; IGB_CORE_UNLOCK(adapter); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ IGB_CORE_LOCK(adapter); igb_enable_intr(adapter); ifp->if_capenable &= ~IFCAP_POLLING; IGB_CORE_UNLOCK(adapter); } } #endif if (mask & IFCAP_HWCSUM) { ifp->if_capenable ^= IFCAP_HWCSUM; reinit = 1; } if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; reinit = 1; } if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; reinit = 1; } if (mask & IFCAP_VLAN_HWFILTER) { ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; reinit = 1; } if (mask & IFCAP_VLAN_HWTSO) { ifp->if_capenable ^= IFCAP_VLAN_HWTSO; reinit = 1; } if (mask & IFCAP_LRO) { ifp->if_capenable ^= IFCAP_LRO; reinit = 1; } if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) igb_init(adapter); VLAN_CAPABILITIES(ifp); break; } default: error = ether_ioctl(ifp, command, data); break; } return (error); } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void igb_init_locked(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; INIT_DEBUGOUT("igb_init: begin"); IGB_CORE_LOCK_ASSERT(adapter); igb_disable_intr(adapter); callout_stop(&adapter->timer); /* Get the latest mac address, User can use a LAA */ bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); /* Put the address into the Receive Address Array */ e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); igb_reset(adapter); igb_update_link_status(adapter); E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); /* Set hardware offload abilities */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) { ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); #if __FreeBSD_version >= 800000 if (adapter->hw.mac.type == e1000_82576) ifp->if_hwassist |= CSUM_SCTP; #endif } if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_TSO; /* Configure for OS presence */ igb_init_manageability(adapter); /* Prepare transmit descriptors and buffers */ igb_setup_transmit_structures(adapter); igb_initialize_transmit_units(adapter); /* Setup Multicast table */ igb_set_multi(adapter); /* ** Figure out the desired mbuf pool ** for doing jumbo/packetsplit */ if (adapter->max_frame_size <= 2048) adapter->rx_mbuf_sz = MCLBYTES; else if (adapter->max_frame_size <= 4096) adapter->rx_mbuf_sz = MJUMPAGESIZE; else adapter->rx_mbuf_sz = MJUM9BYTES; /* Prepare receive descriptors and buffers */ if (igb_setup_receive_structures(adapter)) { device_printf(dev, "Could not setup receive structures\n"); return; } igb_initialize_receive_units(adapter); /* Enable VLAN support */ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) igb_setup_vlan_hw_support(adapter); /* Don't lose promiscuous settings */ igb_set_promisc(adapter); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&adapter->timer, hz, igb_local_timer, adapter); e1000_clear_hw_cntrs_base_generic(&adapter->hw); if (adapter->msix > 1) /* Set up queue routing */ igb_configure_queues(adapter); /* this clears any pending interrupts */ E1000_READ_REG(&adapter->hw, E1000_ICR); #ifdef DEVICE_POLLING /* * Only enable interrupts if we are not polling, make sure * they are off otherwise. */ if (ifp->if_capenable & IFCAP_POLLING) igb_disable_intr(adapter); else #endif /* DEVICE_POLLING */ { igb_enable_intr(adapter); E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC); } /* Set Energy Efficient Ethernet */ e1000_set_eee_i350(&adapter->hw); } static void igb_init(void *arg) { struct adapter *adapter = arg; IGB_CORE_LOCK(adapter); igb_init_locked(adapter); IGB_CORE_UNLOCK(adapter); } static void igb_handle_que(void *context, int pending) { struct igb_queue *que = context; struct adapter *adapter = que->adapter; struct tx_ring *txr = que->txr; struct ifnet *ifp = adapter->ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { bool more; more = igb_rxeof(que, adapter->rx_process_limit, NULL); IGB_TX_LOCK(txr); igb_txeof(txr); #if __FreeBSD_version >= 800000 /* Process the stack queue only if not depleted */ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && !drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr, NULL); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); /* Do we need another? */ if (more) { taskqueue_enqueue(que->tq, &que->que_task); return; } } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) return; #endif /* Reenable this interrupt */ if (que->eims) E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims); else igb_enable_intr(adapter); } /* Deal with link in a sleepable context */ static void igb_handle_link(void *context, int pending) { struct adapter *adapter = context; IGB_CORE_LOCK(adapter); igb_handle_link_locked(adapter); IGB_CORE_UNLOCK(adapter); } static void igb_handle_link_locked(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; struct ifnet *ifp = adapter->ifp; IGB_CORE_LOCK_ASSERT(adapter); adapter->hw.mac.get_link_status = 1; igb_update_link_status(adapter); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) { for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); #if __FreeBSD_version >= 800000 /* Process the stack queue only if not depleted */ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && !drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr, NULL); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); } } } /********************************************************************* * * MSI/Legacy Deferred * Interrupt Service routine * *********************************************************************/ static int igb_irq_fast(void *arg) { struct adapter *adapter = arg; struct igb_queue *que = adapter->queues; u32 reg_icr; reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); /* Hot eject? */ if (reg_icr == 0xffffffff) return FILTER_STRAY; /* Definitely not our interrupt. */ if (reg_icr == 0x0) return FILTER_STRAY; if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) return FILTER_STRAY; /* * Mask interrupts until the taskqueue is finished running. This is * cheap, just assume that it is needed. This also works around the * MSI message reordering errata on certain systems. */ igb_disable_intr(adapter); taskqueue_enqueue(que->tq, &que->que_task); /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) taskqueue_enqueue(que->tq, &adapter->link_task); if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; return FILTER_HANDLED; } #ifdef DEVICE_POLLING /********************************************************************* * * Legacy polling routine : if using this code you MUST be sure that * multiqueue is not defined, ie, set igb_num_queues to 1. * *********************************************************************/ #if __FreeBSD_version >= 800000 #define POLL_RETURN_COUNT(a) (a) static int #else #define POLL_RETURN_COUNT(a) static void #endif igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; struct igb_queue *que = adapter->queues; struct tx_ring *txr = adapter->tx_rings; u32 reg_icr, rx_done = 0; u32 loop = IGB_MAX_LOOP; bool more; IGB_CORE_LOCK(adapter); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { IGB_CORE_UNLOCK(adapter); return POLL_RETURN_COUNT(rx_done); } if (cmd == POLL_AND_CHECK_STATUS) { reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) igb_handle_link_locked(adapter); if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; } IGB_CORE_UNLOCK(adapter); igb_rxeof(que, count, &rx_done); IGB_TX_LOCK(txr); do { more = igb_txeof(txr); } while (loop-- && more); #if __FreeBSD_version >= 800000 if (!drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr, NULL); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); return POLL_RETURN_COUNT(rx_done); } #endif /* DEVICE_POLLING */ /********************************************************************* * * MSIX Que Interrupt Service routine * **********************************************************************/ static void igb_msix_que(void *arg) { struct igb_queue *que = arg; struct adapter *adapter = que->adapter; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = que->txr; struct rx_ring *rxr = que->rxr; u32 newitr = 0; bool more_rx; E1000_WRITE_REG(&adapter->hw, E1000_EIMC, que->eims); ++que->irqs; IGB_TX_LOCK(txr); igb_txeof(txr); #if __FreeBSD_version >= 800000 /* Process the stack queue only if not depleted */ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && !drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr, NULL); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); more_rx = igb_rxeof(que, adapter->rx_process_limit, NULL); if (adapter->enable_aim == FALSE) goto no_calc; /* ** Do Adaptive Interrupt Moderation: ** - Write out last calculated setting ** - Calculate based on average size over ** the last interval. */ if (que->eitr_setting) E1000_WRITE_REG(&adapter->hw, E1000_EITR(que->msix), que->eitr_setting); que->eitr_setting = 0; /* Idle, do nothing */ if ((txr->bytes == 0) && (rxr->bytes == 0)) goto no_calc; /* Used half Default if sub-gig */ if (adapter->link_speed != 1000) newitr = IGB_DEFAULT_ITR / 2; else { if ((txr->bytes) && (txr->packets)) newitr = txr->bytes/txr->packets; if ((rxr->bytes) && (rxr->packets)) newitr = max(newitr, (rxr->bytes / rxr->packets)); newitr += 24; /* account for hardware frame, crc */ /* set an upper boundary */ newitr = min(newitr, 3000); /* Be nice to the mid range */ if ((newitr > 300) && (newitr < 1200)) newitr = (newitr / 3); else newitr = (newitr / 2); } newitr &= 0x7FFC; /* Mask invalid bits */ if (adapter->hw.mac.type == e1000_82575) newitr |= newitr << 16; else newitr |= E1000_EITR_CNT_IGNR; /* save for next interrupt */ que->eitr_setting = newitr; /* Reset state */ txr->bytes = 0; txr->packets = 0; rxr->bytes = 0; rxr->packets = 0; no_calc: /* Schedule a clean task if needed*/ if (more_rx) taskqueue_enqueue(que->tq, &que->que_task); else /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims); return; } /********************************************************************* * * MSIX Link Interrupt Service routine * **********************************************************************/ static void igb_msix_link(void *arg) { struct adapter *adapter = arg; u32 icr; ++adapter->link_irq; icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (!(icr & E1000_ICR_LSC)) goto spurious; igb_handle_link(adapter, 0); spurious: /* Rearm */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC); E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask); return; } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct adapter *adapter = ifp->if_softc; u_char fiber_type = IFM_1000_SX; INIT_DEBUGOUT("igb_media_status: begin"); IGB_CORE_LOCK(adapter); igb_update_link_status(adapter); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) { IGB_CORE_UNLOCK(adapter); return; } ifmr->ifm_status |= IFM_ACTIVE; if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) ifmr->ifm_active |= fiber_type | IFM_FDX; else { switch (adapter->link_speed) { case 10: ifmr->ifm_active |= IFM_10_T; break; case 100: ifmr->ifm_active |= IFM_100_TX; break; case 1000: ifmr->ifm_active |= IFM_1000_T; break; } if (adapter->link_duplex == FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } IGB_CORE_UNLOCK(adapter); } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int igb_media_change(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct ifmedia *ifm = &adapter->media; INIT_DEBUGOUT("igb_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); IGB_CORE_LOCK(adapter); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; break; case IFM_1000_LX: case IFM_1000_SX: case IFM_1000_T: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case IFM_100_TX: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; break; case IFM_10_T: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; break; default: device_printf(adapter->dev, "Unsupported media type\n"); } igb_init_locked(adapter); IGB_CORE_UNLOCK(adapter); return (0); } /********************************************************************* * * This routine maps the mbufs to Advanced TX descriptors. * **********************************************************************/ static int igb_xmit(struct tx_ring *txr, struct mbuf **m_headp) { struct adapter *adapter = txr->adapter; bus_dma_segment_t segs[IGB_MAX_SCATTER]; bus_dmamap_t map; struct igb_tx_buffer *tx_buffer, *tx_buffer_mapped; union e1000_adv_tx_desc *txd = NULL; struct mbuf *m_head = *m_headp; struct ether_vlan_header *eh = NULL; struct ip *ip = NULL; struct tcphdr *th = NULL; u32 hdrlen, cmd_type_len, olinfo_status = 0; int ehdrlen, poff; int nsegs, i, first, last = 0; int error, do_tso, remap = 1; /* Set basic descriptor constants */ cmd_type_len = E1000_ADVTXD_DTYP_DATA; cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; if (m_head->m_flags & M_VLANTAG) cmd_type_len |= E1000_ADVTXD_DCMD_VLE; retry: m_head = *m_headp; do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0); hdrlen = ehdrlen = poff = 0; /* * Intel recommends entire IP/TCP header length reside in a single * buffer. If multiple descriptors are used to describe the IP and * TCP header, each descriptor should describe one or more * complete headers; descriptors referencing only parts of headers * are not supported. If all layer headers are not coalesced into * a single buffer, each buffer should not cross a 4KB boundary, * or be larger than the maximum read request size. * Controller also requires modifing IP/TCP header to make TSO work * so we firstly get a writable mbuf chain then coalesce ethernet/ * IP/TCP header into a single buffer to meet the requirement of * controller. This also simplifies IP/TCP/UDP checksum offloading * which also has similiar restrictions. */ if (do_tso || m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) { if (do_tso || (m_head->m_next != NULL && m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)) { if (M_WRITABLE(*m_headp) == 0) { m_head = m_dup(*m_headp, M_DONTWAIT); m_freem(*m_headp); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } *m_headp = m_head; } } /* * Assume IPv4, we don't have TSO/checksum offload support * for IPv6 yet. */ ehdrlen = sizeof(struct ether_header); m_head = m_pullup(m_head, ehdrlen); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } eh = mtod(m_head, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = sizeof(struct ether_vlan_header); m_head = m_pullup(m_head, ehdrlen); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } m_head = m_pullup(m_head, ehdrlen + sizeof(struct ip)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m_head, char *) + ehdrlen); poff = ehdrlen + (ip->ip_hl << 2); if (do_tso) { m_head = m_pullup(m_head, poff + sizeof(struct tcphdr)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } /* * The pseudo TCP checksum does not include TCP payload * length so driver should recompute the checksum here * what hardware expect to see. This is adherence of * Microsoft's Large Send specification. */ th = (struct tcphdr *)(mtod(m_head, char *) + poff); th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); /* Keep track of the full header length */ hdrlen = poff + (th->th_off << 2); } else if (m_head->m_pkthdr.csum_flags & CSUM_TCP) { m_head = m_pullup(m_head, poff + sizeof(struct tcphdr)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } th = (struct tcphdr *)(mtod(m_head, char *) + poff); m_head = m_pullup(m_head, poff + (th->th_off << 2)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m_head, char *) + ehdrlen); th = (struct tcphdr *)(mtod(m_head, char *) + poff); } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) { m_head = m_pullup(m_head, poff + sizeof(struct udphdr)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m_head, char *) + ehdrlen); } *m_headp = m_head; } /* * Map the packet for DMA * * Capture the first descriptor index, * this descriptor will have the index * of the EOP which is the only one that * now gets a DONE bit writeback. */ first = txr->next_avail_desc; tx_buffer = &txr->tx_buffers[first]; tx_buffer_mapped = tx_buffer; map = tx_buffer->map; error = bus_dmamap_load_mbuf_sg(txr->txtag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); /* * There are two types of errors we can (try) to handle: * - EFBIG means the mbuf chain was too long and bus_dma ran * out of segments. Defragment the mbuf chain and try again. * - ENOMEM means bus_dma could not obtain enough bounce buffers * at this point in time. Defer sending and try again later. * All other errors, in particular EINVAL, are fatal and prevent the * mbuf chain from ever going through. Drop it and report error. */ if (error == EFBIG && remap) { struct mbuf *m; m = m_defrag(*m_headp, M_DONTWAIT); if (m == NULL) { adapter->mbuf_defrag_failed++; m_freem(*m_headp); *m_headp = NULL; return (ENOBUFS); } *m_headp = m; /* Try it again, but only once */ remap = 0; goto retry; } else if (error == ENOMEM) { adapter->no_tx_dma_setup++; return (error); } else if (error != 0) { adapter->no_tx_dma_setup++; m_freem(*m_headp); *m_headp = NULL; return (error); } /* ** Make sure we don't overrun the ring, ** we need nsegs descriptors and one for ** the context descriptor used for the ** offloads. */ if ((nsegs + 1) > (txr->tx_avail - 2)) { txr->no_desc_avail++; bus_dmamap_unload(txr->txtag, map); return (ENOBUFS); } m_head = *m_headp; /* Do hardware assists: * Set up the context descriptor, used * when any hardware offload is done. * This includes CSUM, VLAN, and TSO. * It will use the first descriptor. */ if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { if (igb_tso_setup(txr, m_head, ehdrlen, ip, th)) { cmd_type_len |= E1000_ADVTXD_DCMD_TSE; olinfo_status |= E1000_TXD_POPTS_IXSM << 8; olinfo_status |= E1000_TXD_POPTS_TXSM << 8; } else return (ENXIO); } else if (igb_tx_ctx_setup(txr, m_head)) olinfo_status |= E1000_TXD_POPTS_TXSM << 8; /* Calculate payload length */ olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) << E1000_ADVTXD_PAYLEN_SHIFT); /* 82575 needs the queue index added */ if (adapter->hw.mac.type == e1000_82575) olinfo_status |= txr->me << 4; /* Set up our transmit descriptors */ i = txr->next_avail_desc; for (int j = 0; j < nsegs; j++) { bus_size_t seg_len; bus_addr_t seg_addr; tx_buffer = &txr->tx_buffers[i]; txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; seg_addr = segs[j].ds_addr; seg_len = segs[j].ds_len; txd->read.buffer_addr = htole64(seg_addr); txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); txd->read.olinfo_status = htole32(olinfo_status); last = i; if (++i == adapter->num_tx_desc) i = 0; tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; } txr->next_avail_desc = i; txr->tx_avail -= nsegs; tx_buffer->m_head = m_head; /* ** Here we swap the map so the last descriptor, ** which gets the completion interrupt has the ** real map, and the first descriptor gets the ** unused map from this descriptor. */ tx_buffer_mapped->map = tx_buffer->map; tx_buffer->map = map; bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); /* * Last Descriptor of Packet * needs End Of Packet (EOP) * and Report Status (RS) */ txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS); /* * Keep track in the first buffer which * descriptor will be written back */ tx_buffer = &txr->tx_buffers[first]; tx_buffer->next_eop = last; /* Update the watchdog time early and often */ txr->watchdog_time = ticks; /* * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 * that this frame is available to transmit. */ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i); ++txr->tx_packets; return (0); } static void igb_set_promisc(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u32 reg; if (adapter->vf_ifp) { e1000_promisc_set_vf(hw, e1000_promisc_enabled); return; } reg = E1000_READ_REG(hw, E1000_RCTL); if (ifp->if_flags & IFF_PROMISC) { reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, reg); } else if (ifp->if_flags & IFF_ALLMULTI) { reg |= E1000_RCTL_MPE; reg &= ~E1000_RCTL_UPE; E1000_WRITE_REG(hw, E1000_RCTL, reg); } } static void igb_disable_promisc(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 reg; if (adapter->vf_ifp) { e1000_promisc_set_vf(hw, e1000_promisc_disabled); return; } reg = E1000_READ_REG(hw, E1000_RCTL); reg &= (~E1000_RCTL_UPE); reg &= (~E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, reg); } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void igb_set_multi(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; struct ifmultiaddr *ifma; u32 reg_rctl = 0; u8 *mta; int mcnt = 0; IOCTL_DEBUGOUT("igb_set_multi: begin"); mta = adapter->mta; bzero(mta, sizeof(uint8_t) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); #if __FreeBSD_version < 800000 IF_ADDR_LOCK(ifp); #else if_maddr_rlock(ifp); #endif TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); mcnt++; } #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_maddr_runlock(ifp); #endif if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); } /********************************************************************* * Timer routine: * This routine checks for link status, * updates statistics, and does the watchdog. * **********************************************************************/ static void igb_local_timer(void *arg) { struct adapter *adapter = arg; device_t dev = adapter->dev; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; struct igb_queue *que = adapter->queues; int hung = 0, busy = 0; IGB_CORE_LOCK_ASSERT(adapter); igb_update_link_status(adapter); igb_update_stats_counters(adapter); /* ** Check the TX queues status ** - central locked handling of OACTIVE ** - watchdog only if all queues show hung */ for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { if ((txr->queue_status & IGB_QUEUE_HUNG) && (adapter->pause_frames == 0)) ++hung; if (txr->queue_status & IGB_QUEUE_DEPLETED) ++busy; if ((txr->queue_status & IGB_QUEUE_IDLE) == 0) taskqueue_enqueue(que->tq, &que->que_task); } if (hung == adapter->num_queues) goto timeout; if (busy == adapter->num_queues) ifp->if_drv_flags |= IFF_DRV_OACTIVE; else if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) && (busy < adapter->num_queues)) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; adapter->pause_frames = 0; callout_reset(&adapter->timer, hz, igb_local_timer, adapter); #ifndef DEVICE_POLLING /* Schedule all queue interrupts - deadlock protection */ E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->que_mask); #endif return; timeout: device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)), E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me))); device_printf(dev,"TX(%d) desc avail = %d," "Next TX to Clean = %d\n", txr->me, txr->tx_avail, txr->next_to_clean); adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->watchdog_events++; igb_init_locked(adapter); } static void igb_update_link_status(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; u32 link_check, thstat, ctrl; link_check = thstat = ctrl = 0; /* Get the cached link value or read for real */ switch (hw->phy.media_type) { case e1000_media_type_copper: if (hw->mac.get_link_status) { /* Do the work to read phy */ e1000_check_for_link(hw); link_check = !hw->mac.get_link_status; } else link_check = TRUE; break; case e1000_media_type_fiber: e1000_check_for_link(hw); link_check = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); break; case e1000_media_type_internal_serdes: e1000_check_for_link(hw); link_check = adapter->hw.mac.serdes_has_link; break; /* VF device is type_unknown */ case e1000_media_type_unknown: e1000_check_for_link(hw); link_check = !hw->mac.get_link_status; /* Fall thru */ default: break; } /* Check for thermal downshift or shutdown */ if (hw->mac.type == e1000_i350) { thstat = E1000_READ_REG(hw, E1000_THSTAT); ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); } /* Now we check if a transition has happened */ if (link_check && (adapter->link_active == 0)) { e1000_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, &adapter->link_duplex); if (bootverbose) device_printf(dev, "Link is up %d Mbps %s\n", adapter->link_speed, ((adapter->link_duplex == FULL_DUPLEX) ? "Full Duplex" : "Half Duplex")); adapter->link_active = 1; ifp->if_baudrate = adapter->link_speed * 1000000; if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && (thstat & E1000_THSTAT_LINK_THROTTLE)) device_printf(dev, "Link: thermal downshift\n"); /* This can sleep */ if_link_state_change(ifp, LINK_STATE_UP); } else if (!link_check && (adapter->link_active == 1)) { ifp->if_baudrate = adapter->link_speed = 0; adapter->link_duplex = 0; if (bootverbose) device_printf(dev, "Link is Down\n"); if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && (thstat & E1000_THSTAT_PWR_DOWN)) device_printf(dev, "Link: thermal shutdown\n"); adapter->link_active = 0; /* This can sleep */ if_link_state_change(ifp, LINK_STATE_DOWN); /* Reset queue state */ for (int i = 0; i < adapter->num_queues; i++, txr++) txr->queue_status = IGB_QUEUE_IDLE; } } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ static void igb_stop(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; IGB_CORE_LOCK_ASSERT(adapter); INIT_DEBUGOUT("igb_stop: begin"); igb_disable_intr(adapter); callout_stop(&adapter->timer); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ifp->if_drv_flags |= IFF_DRV_OACTIVE; /* Disarm watchdog timer. */ for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); txr->queue_status = IGB_QUEUE_IDLE; IGB_TX_UNLOCK(txr); } e1000_reset_hw(&adapter->hw); E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void igb_identify_hardware(struct adapter *adapter) { device_t dev = adapter->dev; /* Make sure our PCI config space has the necessary stuff set */ adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) && (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) { INIT_DEBUGOUT("Memory Access and/or Bus Master " "bits were not set!\n"); adapter->hw.bus.pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); pci_write_config(dev, PCIR_COMMAND, adapter->hw.bus.pci_cmd_word, 2); } /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Set MAC type early for PCI setup */ e1000_set_mac_type(&adapter->hw); /* Are we a VF device? */ if ((adapter->hw.mac.type == e1000_vfadapt) || (adapter->hw.mac.type == e1000_vfadapt_i350)) adapter->vf_ifp = 1; else adapter->vf_ifp = 0; } static int igb_allocate_pci_resources(struct adapter *adapter) { device_t dev = adapter->dev; int rid; rid = PCIR_BAR(0); adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->pci_mem == NULL) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->pci_mem); adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; adapter->num_queues = 1; /* Defaults for Legacy or MSI */ /* This will setup either MSI/X or MSI */ adapter->msix = igb_setup_msix(adapter); adapter->hw.back = &adapter->osdep; return (0); } /********************************************************************* * * Setup the Legacy or MSI Interrupt handler * **********************************************************************/ static int igb_allocate_legacy(struct adapter *adapter) { device_t dev = adapter->dev; struct igb_queue *que = adapter->queues; struct tx_ring *txr = adapter->tx_rings; int error, rid = 0; /* Turn off all interrupts */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); /* MSI RID is 1 */ if (adapter->msix == 1) rid = 1; /* We allocate a single interrupt resource */ adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (adapter->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "interrupt\n"); return (ENXIO); } #if __FreeBSD_version >= 800000 TASK_INIT(&txr->txq_task, 0, igb_deferred_mq_start, txr); #endif /* * Try allocating a fast interrupt and the associated deferred * processing contexts. */ TASK_INIT(&que->que_task, 0, igb_handle_que, que); /* Make tasklet for deferred link handling */ TASK_INIT(&adapter->link_task, 0, igb_handle_link, adapter); que->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); taskqueue_start_threads(&que->tq, 1, PI_NET, "%s taskq", device_get_nameunit(adapter->dev)); if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE, igb_irq_fast, NULL, adapter, &adapter->tag)) != 0) { device_printf(dev, "Failed to register fast interrupt " "handler: %d\n", error); taskqueue_free(que->tq); que->tq = NULL; return (error); } return (0); } /********************************************************************* * * Setup the MSIX Queue Interrupt handlers: * **********************************************************************/ static int igb_allocate_msix(struct adapter *adapter) { device_t dev = adapter->dev; struct igb_queue *que = adapter->queues; int error, rid, vector = 0; /* Be sure to start with all interrupts disabled */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0); E1000_WRITE_FLUSH(&adapter->hw); for (int i = 0; i < adapter->num_queues; i++, vector++, que++) { rid = vector +1; que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (que->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "MSIX Queue Interrupt\n"); return (ENXIO); } error = bus_setup_intr(dev, que->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, igb_msix_que, que, &que->tag); if (error) { que->res = NULL; device_printf(dev, "Failed to register Queue handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, que->res, que->tag, "que %d", i); #endif que->msix = vector; if (adapter->hw.mac.type == e1000_82575) que->eims = E1000_EICR_TX_QUEUE0 << i; else que->eims = 1 << vector; /* ** Bind the msix vector, and thus the ** rings to the corresponding cpu. */ - if (adapter->num_queues > 1) - bus_bind_intr(dev, que->res, i); + if (adapter->num_queues > 1) { + if (igb_last_bind_cpu < 0) + igb_last_bind_cpu = CPU_FIRST(); + bus_bind_intr(dev, que->res, igb_last_bind_cpu); + device_printf(dev, + "Bound queue %d to cpu %d\n", + i,igb_last_bind_cpu); + igb_last_bind_cpu = CPU_NEXT(igb_last_bind_cpu); + igb_last_bind_cpu = igb_last_bind_cpu % mp_ncpus; + } #if __FreeBSD_version >= 800000 TASK_INIT(&que->txr->txq_task, 0, igb_deferred_mq_start, que->txr); #endif /* Make tasklet for deferred handling */ TASK_INIT(&que->que_task, 0, igb_handle_que, que); que->tq = taskqueue_create("igb_que", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", device_get_nameunit(adapter->dev)); } /* And Link */ rid = vector + 1; adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (adapter->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "MSIX Link Interrupt\n"); return (ENXIO); } if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, igb_msix_link, adapter, &adapter->tag)) != 0) { device_printf(dev, "Failed to register Link handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, adapter->res, adapter->tag, "link"); #endif adapter->linkvec = vector; return (0); } static void igb_configure_queues(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct igb_queue *que; u32 tmp, ivar = 0, newitr = 0; /* First turn on RSS capability */ if (adapter->hw.mac.type != e1000_82575) E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME | E1000_GPIE_PBA | E1000_GPIE_NSICR); /* Turn on MSIX */ switch (adapter->hw.mac.type) { case e1000_82580: case e1000_i350: case e1000_vfadapt: case e1000_vfadapt_i350: /* RX entries */ for (int i = 0; i < adapter->num_queues; i++) { u32 index = i >> 1; ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); que = &adapter->queues[i]; if (i & 1) { ivar &= 0xFF00FFFF; ivar |= (que->msix | E1000_IVAR_VALID) << 16; } else { ivar &= 0xFFFFFF00; ivar |= que->msix | E1000_IVAR_VALID; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); } /* TX entries */ for (int i = 0; i < adapter->num_queues; i++) { u32 index = i >> 1; ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); que = &adapter->queues[i]; if (i & 1) { ivar &= 0x00FFFFFF; ivar |= (que->msix | E1000_IVAR_VALID) << 24; } else { ivar &= 0xFFFF00FF; ivar |= (que->msix | E1000_IVAR_VALID) << 8; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); adapter->que_mask |= que->eims; } /* And for the link interrupt */ ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; adapter->link_mask = 1 << adapter->linkvec; E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); break; case e1000_82576: /* RX entries */ for (int i = 0; i < adapter->num_queues; i++) { u32 index = i & 0x7; /* Each IVAR has two entries */ ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); que = &adapter->queues[i]; if (i < 8) { ivar &= 0xFFFFFF00; ivar |= que->msix | E1000_IVAR_VALID; } else { ivar &= 0xFF00FFFF; ivar |= (que->msix | E1000_IVAR_VALID) << 16; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); adapter->que_mask |= que->eims; } /* TX entries */ for (int i = 0; i < adapter->num_queues; i++) { u32 index = i & 0x7; /* Each IVAR has two entries */ ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); que = &adapter->queues[i]; if (i < 8) { ivar &= 0xFFFF00FF; ivar |= (que->msix | E1000_IVAR_VALID) << 8; } else { ivar &= 0x00FFFFFF; ivar |= (que->msix | E1000_IVAR_VALID) << 24; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); adapter->que_mask |= que->eims; } /* And for the link interrupt */ ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; adapter->link_mask = 1 << adapter->linkvec; E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); break; case e1000_82575: /* enable MSI-X support*/ tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); tmp |= E1000_CTRL_EXT_PBA_CLR; /* Auto-Mask interrupts upon ICR read. */ tmp |= E1000_CTRL_EXT_EIAME; tmp |= E1000_CTRL_EXT_IRCA; E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); /* Queues */ for (int i = 0; i < adapter->num_queues; i++) { que = &adapter->queues[i]; tmp = E1000_EICR_RX_QUEUE0 << i; tmp |= E1000_EICR_TX_QUEUE0 << i; que->eims = tmp; E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), i, que->eims); adapter->que_mask |= que->eims; } /* Link */ E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec), E1000_EIMS_OTHER); adapter->link_mask |= E1000_EIMS_OTHER; default: break; } /* Set the starting interrupt rate */ if (igb_max_interrupt_rate > 0) newitr = (4000000 / igb_max_interrupt_rate) & 0x7FFC; if (hw->mac.type == e1000_82575) newitr |= newitr << 16; else newitr |= E1000_EITR_CNT_IGNR; for (int i = 0; i < adapter->num_queues; i++) { que = &adapter->queues[i]; E1000_WRITE_REG(hw, E1000_EITR(que->msix), newitr); } return; } static void igb_free_pci_resources(struct adapter *adapter) { struct igb_queue *que = adapter->queues; device_t dev = adapter->dev; int rid; /* ** There is a slight possibility of a failure mode ** in attach that will result in entering this function ** before interrupt resources have been initialized, and ** in that case we do not want to execute the loops below ** We can detect this reliably by the state of the adapter ** res pointer. */ if (adapter->res == NULL) goto mem; /* * First release all the interrupt resources: */ for (int i = 0; i < adapter->num_queues; i++, que++) { rid = que->msix + 1; if (que->tag != NULL) { bus_teardown_intr(dev, que->res, que->tag); que->tag = NULL; } if (que->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); } /* Clean the Legacy or Link interrupt last */ if (adapter->linkvec) /* we are doing MSIX */ rid = adapter->linkvec + 1; else (adapter->msix != 0) ? (rid = 1):(rid = 0); que = adapter->queues; if (adapter->tag != NULL) { taskqueue_drain(que->tq, &adapter->link_task); bus_teardown_intr(dev, adapter->res, adapter->tag); adapter->tag = NULL; } if (adapter->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); for (int i = 0; i < adapter->num_queues; i++, que++) { if (que->tq != NULL) { #if __FreeBSD_version >= 800000 taskqueue_drain(que->tq, &que->txr->txq_task); #endif taskqueue_drain(que->tq, &que->que_task); taskqueue_free(que->tq); } } mem: if (adapter->msix) pci_release_msi(dev); if (adapter->msix_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem); if (adapter->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), adapter->pci_mem); } /* * Setup Either MSI/X or MSI */ static int igb_setup_msix(struct adapter *adapter) { device_t dev = adapter->dev; int rid, want, queues, msgs; /* tuneable override */ if (igb_enable_msix == 0) goto msi; /* First try MSI/X */ rid = PCIR_BAR(IGB_MSIX_BAR); adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!adapter->msix_mem) { /* May not be enabled */ device_printf(adapter->dev, "Unable to map MSIX table \n"); goto msi; } msgs = pci_msix_count(dev); if (msgs == 0) { /* system has msix disabled */ bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem); adapter->msix_mem = NULL; goto msi; } /* Figure out a reasonable auto config value */ queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus; /* Manual override */ if (igb_num_queues != 0) queues = igb_num_queues; if (queues > 8) /* max queues */ queues = 8; /* Can have max of 4 queues on 82575 */ if ((adapter->hw.mac.type == e1000_82575) && (queues > 4)) queues = 4; /* Limit the VF devices to one queue */ if (adapter->vf_ifp) queues = 1; /* ** One vector (RX/TX pair) per queue ** plus an additional for Link interrupt */ want = queues + 1; if (msgs >= want) msgs = want; else { device_printf(adapter->dev, "MSIX Configuration Problem, " "%d vectors configured, but %d queues wanted!\n", msgs, want); return (0); } if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) { device_printf(adapter->dev, "Using MSIX interrupts with %d vectors\n", msgs); adapter->num_queues = queues; return (msgs); } msi: msgs = pci_msi_count(dev); if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0) { device_printf(adapter->dev," Using MSI interrupt\n"); return (msgs); } return (0); } /********************************************************************* * * Set up an fresh starting state * **********************************************************************/ static void igb_reset(struct adapter *adapter) { device_t dev = adapter->dev; struct e1000_hw *hw = &adapter->hw; struct e1000_fc_info *fc = &hw->fc; struct ifnet *ifp = adapter->ifp; u32 pba = 0; u16 hwm; INIT_DEBUGOUT("igb_reset: begin"); /* Let the firmware know the OS is in control */ igb_get_hw_control(adapter); /* * Packet Buffer Allocation (PBA) * Writing PBA sets the receive portion of the buffer * the remainder is used for the transmit buffer. */ switch (hw->mac.type) { case e1000_82575: pba = E1000_PBA_32K; break; case e1000_82576: case e1000_vfadapt: pba = E1000_READ_REG(hw, E1000_RXPBS); pba &= E1000_RXPBS_SIZE_MASK_82576; break; case e1000_82580: case e1000_i350: case e1000_vfadapt_i350: pba = E1000_READ_REG(hw, E1000_RXPBS); pba = e1000_rxpbs_adjust_82580(pba); break; default: break; } /* Special needs in case of Jumbo frames */ if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) { u32 tx_space, min_tx, min_rx; pba = E1000_READ_REG(hw, E1000_PBA); tx_space = pba >> 16; pba &= 0xffff; min_tx = (adapter->max_frame_size + sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2; min_tx = roundup2(min_tx, 1024); min_tx >>= 10; min_rx = adapter->max_frame_size; min_rx = roundup2(min_rx, 1024); min_rx >>= 10; if (tx_space < min_tx && ((min_tx - tx_space) < pba)) { pba = pba - (min_tx - tx_space); /* * if short on rx space, rx wins * and must trump tx adjustment */ if (pba < min_rx) pba = min_rx; } E1000_WRITE_REG(hw, E1000_PBA, pba); } INIT_DEBUGOUT1("igb_init: pba=%dK",pba); /* * These parameters control the automatic generation (Tx) and * response (Rx) to Ethernet PAUSE frames. * - High water mark should allow for at least two frames to be * received after sending an XOFF. * - Low water mark works best when it is very near the high water mark. * This allows the receiver to restart by sending XON when it has * drained a bit. */ hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - 2 * adapter->max_frame_size)); if (hw->mac.type < e1000_82576) { fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ fc->low_water = fc->high_water - 8; } else { fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ fc->low_water = fc->high_water - 16; } fc->pause_time = IGB_FC_PAUSE_TIME; fc->send_xon = TRUE; if (adapter->fc) fc->requested_mode = adapter->fc; else fc->requested_mode = e1000_fc_default; /* Issue a global reset */ e1000_reset_hw(hw); E1000_WRITE_REG(hw, E1000_WUC, 0); if (e1000_init_hw(hw) < 0) device_printf(dev, "Hardware Initialization Failed\n"); /* Setup DMA Coalescing */ if (hw->mac.type == e1000_i350) { u32 reg = ~E1000_DMACR_DMAC_EN; if (adapter->dmac == 0) { /* Disabling it */ E1000_WRITE_REG(hw, E1000_DMACR, reg); goto reset_out; } hwm = (pba - 4) << 10; reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT) & E1000_DMACR_DMACTHR_MASK); /* transition to L0x or L1 if available..*/ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); /* timer = value in adapter->dmac in 32usec intervals */ reg |= (adapter->dmac >> 5); E1000_WRITE_REG(hw, E1000_DMACR, reg); /* No lower threshold */ E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); /* set hwm to PBA - 2 * max frame size */ E1000_WRITE_REG(hw, E1000_FCRTC, hwm); /* Set the interval before transition */ reg = E1000_READ_REG(hw, E1000_DMCTLX); reg |= 0x800000FF; /* 255 usec */ E1000_WRITE_REG(hw, E1000_DMCTLX, reg); /* free space in tx packet buffer to wake from DMA coal */ E1000_WRITE_REG(hw, E1000_DMCTXTH, (20480 - (2 * adapter->max_frame_size)) >> 6); /* make low power state decision controlled by DMA coal */ reg = E1000_READ_REG(hw, E1000_PCIEMISC); E1000_WRITE_REG(hw, E1000_PCIEMISC, reg | E1000_PCIEMISC_LX_DECISION); device_printf(dev, "DMA Coalescing enabled\n"); } reset_out: E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); e1000_get_phy_info(hw); e1000_check_for_link(hw); return; } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static int igb_setup_interface(device_t dev, struct adapter *adapter) { struct ifnet *ifp; INIT_DEBUGOUT("igb_setup_interface: begin"); ifp = adapter->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (-1); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_init = igb_init; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = igb_ioctl; ifp->if_start = igb_start; #if __FreeBSD_version >= 800000 ifp->if_transmit = igb_mq_start; ifp->if_qflush = igb_qflush; #endif IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; IFQ_SET_READY(&ifp->if_snd); ether_ifattach(ifp, adapter->hw.mac.addr); ifp->if_capabilities = ifp->if_capenable = 0; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; ifp->if_capabilities |= IFCAP_TSO4; ifp->if_capabilities |= IFCAP_JUMBO_MTU; ifp->if_capenable = ifp->if_capabilities; /* Don't enable LRO by default */ ifp->if_capabilities |= IFCAP_LRO; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Tell the upper layer(s) we * support full VLAN capability. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the igb driver you can ** enable this and get full hardware tag filtering. */ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, igb_media_change, igb_media_status); if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); } else { ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); if (adapter->hw.phy.type != e1000_phy_ife) { ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); } } ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); return (0); } /* * Manage DMA'able memory. */ static void igb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs[0].ds_addr; } static int igb_dma_malloc(struct adapter *adapter, bus_size_t size, struct igb_dma_alloc *dma, int mapflags) { int error; error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ IGB_DBA_ALIGN, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->dma_tag); if (error) { device_printf(adapter->dev, "%s: bus_dma_tag_create failed: %d\n", __func__, error); goto fail_0; } error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map); if (error) { device_printf(adapter->dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, error); goto fail_2; } dma->dma_paddr = 0; error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, igb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (error || dma->dma_paddr == 0) { device_printf(adapter->dev, "%s: bus_dmamap_load failed: %d\n", __func__, error); goto fail_3; } return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_map = NULL; dma->dma_tag = NULL; return (error); } static void igb_dma_free(struct adapter *adapter, struct igb_dma_alloc *dma) { if (dma->dma_tag == NULL) return; if (dma->dma_map != NULL) { bus_dmamap_sync(dma->dma_tag, dma->dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); dma->dma_map = NULL; } bus_dma_tag_destroy(dma->dma_tag); dma->dma_tag = NULL; } /********************************************************************* * * Allocate memory for the transmit and receive rings, and then * the descriptors associated with each, called only once at attach. * **********************************************************************/ static int igb_allocate_queues(struct adapter *adapter) { device_t dev = adapter->dev; struct igb_queue *que = NULL; struct tx_ring *txr = NULL; struct rx_ring *rxr = NULL; int rsize, tsize, error = E1000_SUCCESS; int txconf = 0, rxconf = 0; /* First allocate the top level queue structs */ if (!(adapter->queues = (struct igb_queue *) malloc(sizeof(struct igb_queue) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate queue memory\n"); error = ENOMEM; goto fail; } /* Next allocate the TX ring struct memory */ if (!(adapter->tx_rings = (struct tx_ring *) malloc(sizeof(struct tx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); error = ENOMEM; goto tx_fail; } /* Now allocate the RX */ if (!(adapter->rx_rings = (struct rx_ring *) malloc(sizeof(struct rx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX ring memory\n"); error = ENOMEM; goto rx_fail; } tsize = roundup2(adapter->num_tx_desc * sizeof(union e1000_adv_tx_desc), IGB_DBA_ALIGN); /* * Now set up the TX queues, txconf is needed to handle the * possibility that things fail midcourse and we need to * undo memory gracefully */ for (int i = 0; i < adapter->num_queues; i++, txconf++) { /* Set up some basics */ txr = &adapter->tx_rings[i]; txr->adapter = adapter; txr->me = i; /* Initialize the TX lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(dev), txr->me); mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); if (igb_dma_malloc(adapter, tsize, &txr->txdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto err_tx_desc; } txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr; bzero((void *)txr->tx_base, tsize); /* Now allocate transmit buffers for the ring */ if (igb_allocate_transmit_buffers(txr)) { device_printf(dev, "Critical Failure setting up transmit buffers\n"); error = ENOMEM; goto err_tx_desc; } #if __FreeBSD_version >= 800000 /* Allocate a buf ring */ txr->br = buf_ring_alloc(IGB_BR_SIZE, M_DEVBUF, M_WAITOK, &txr->tx_mtx); #endif } /* * Next the RX queues... */ rsize = roundup2(adapter->num_rx_desc * sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN); for (int i = 0; i < adapter->num_queues; i++, rxconf++) { rxr = &adapter->rx_rings[i]; rxr->adapter = adapter; rxr->me = i; /* Initialize the RX lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(dev), txr->me); mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); if (igb_dma_malloc(adapter, rsize, &rxr->rxdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate RxDescriptor memory\n"); error = ENOMEM; goto err_rx_desc; } rxr->rx_base = (union e1000_adv_rx_desc *)rxr->rxdma.dma_vaddr; bzero((void *)rxr->rx_base, rsize); /* Allocate receive buffers for the ring*/ if (igb_allocate_receive_buffers(rxr)) { device_printf(dev, "Critical Failure setting up receive buffers\n"); error = ENOMEM; goto err_rx_desc; } } /* ** Finally set up the queue holding structs */ for (int i = 0; i < adapter->num_queues; i++) { que = &adapter->queues[i]; que->adapter = adapter; que->txr = &adapter->tx_rings[i]; que->rxr = &adapter->rx_rings[i]; } return (0); err_rx_desc: for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) igb_dma_free(adapter, &rxr->rxdma); err_tx_desc: for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) igb_dma_free(adapter, &txr->txdma); free(adapter->rx_rings, M_DEVBUF); rx_fail: #if __FreeBSD_version >= 800000 buf_ring_free(txr->br, M_DEVBUF); #endif free(adapter->tx_rings, M_DEVBUF); tx_fail: free(adapter->queues, M_DEVBUF); fail: return (error); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. This is * called only once at attach, setup is done every reset. * **********************************************************************/ static int igb_allocate_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; device_t dev = adapter->dev; struct igb_tx_buffer *txbuf; int error, i; /* * Setup DMA descriptor areas. */ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ IGB_TSO_SIZE, /* maxsize */ IGB_MAX_SCATTER, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->txtag))) { device_printf(dev,"Unable to allocate TX DMA tag\n"); goto fail; } if (!(txr->tx_buffers = (struct igb_tx_buffer *) malloc(sizeof(struct igb_tx_buffer) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); error = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); if (error != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } } return 0; fail: /* We free all, it handles case where we are in the middle */ igb_free_transmit_structures(adapter); return (error); } /********************************************************************* * * Initialize a transmit ring. * **********************************************************************/ static void igb_setup_transmit_ring(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct igb_tx_buffer *txbuf; int i; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(adapter->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ /* Clear the old descriptor contents */ IGB_TX_LOCK(txr); #ifdef DEV_NETMAP slot = netmap_reset(na, NR_TX, txr->me, 0); #endif /* DEV_NETMAP */ bzero((void *)txr->tx_base, (sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc); /* Reset indices */ txr->next_avail_desc = 0; txr->next_to_clean = 0; /* Free any existing tx buffers. */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { if (txbuf->m_head != NULL) { bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, txbuf->map); m_freem(txbuf->m_head); txbuf->m_head = NULL; } #ifdef DEV_NETMAP if (slot) { int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); /* no need to set the address */ netmap_load_map(txr->txtag, txbuf->map, NMB(slot + si)); } #endif /* DEV_NETMAP */ /* clear the watch index */ txbuf->next_eop = -1; } /* Set number of descriptors available */ txr->tx_avail = adapter->num_tx_desc; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); IGB_TX_UNLOCK(txr); } /********************************************************************* * * Initialize all transmit rings. * **********************************************************************/ static void igb_setup_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) igb_setup_transmit_ring(txr); return; } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void igb_initialize_transmit_units(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; struct e1000_hw *hw = &adapter->hw; u32 tctl, txdctl; INIT_DEBUGOUT("igb_initialize_transmit_units: begin"); tctl = txdctl = 0; /* Setup the Tx Descriptor Rings */ for (int i = 0; i < adapter->num_queues; i++, txr++) { u64 bus_addr = txr->txdma.dma_paddr; E1000_WRITE_REG(hw, E1000_TDLEN(i), adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); E1000_WRITE_REG(hw, E1000_TDBAH(i), (uint32_t)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr); /* Setup the HW Tx Head and Tail descriptor pointers */ E1000_WRITE_REG(hw, E1000_TDT(i), 0); E1000_WRITE_REG(hw, E1000_TDH(i), 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", E1000_READ_REG(hw, E1000_TDBAL(i)), E1000_READ_REG(hw, E1000_TDLEN(i))); txr->queue_status = IGB_QUEUE_IDLE; txdctl |= IGB_TX_PTHRESH; txdctl |= IGB_TX_HTHRESH << 8; txdctl |= IGB_TX_WTHRESH << 16; txdctl |= E1000_TXDCTL_QUEUE_ENABLE; E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); } if (adapter->vf_ifp) return; e1000_config_collision_dist(hw); /* Program the Transmit Control Register */ tctl = E1000_READ_REG(hw, E1000_TCTL); tctl &= ~E1000_TCTL_CT; tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); /* This write will effectively turn on the transmit unit. */ E1000_WRITE_REG(hw, E1000_TCTL, tctl); } /********************************************************************* * * Free all transmit rings. * **********************************************************************/ static void igb_free_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); igb_free_transmit_buffers(txr); igb_dma_free(adapter, &txr->txdma); IGB_TX_UNLOCK(txr); IGB_TX_LOCK_DESTROY(txr); } free(adapter->tx_rings, M_DEVBUF); } /********************************************************************* * * Free transmit ring related data structures. * **********************************************************************/ static void igb_free_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct igb_tx_buffer *tx_buffer; int i; INIT_DEBUGOUT("free_transmit_ring: begin"); if (txr->tx_buffers == NULL) return; tx_buffer = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { if (tx_buffer->m_head != NULL) { bus_dmamap_sync(txr->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; if (tx_buffer->map != NULL) { bus_dmamap_destroy(txr->txtag, tx_buffer->map); tx_buffer->map = NULL; } } else if (tx_buffer->map != NULL) { bus_dmamap_unload(txr->txtag, tx_buffer->map); bus_dmamap_destroy(txr->txtag, tx_buffer->map); tx_buffer->map = NULL; } } #if __FreeBSD_version >= 800000 if (txr->br != NULL) buf_ring_free(txr->br, M_DEVBUF); #endif if (txr->tx_buffers != NULL) { free(txr->tx_buffers, M_DEVBUF); txr->tx_buffers = NULL; } if (txr->txtag != NULL) { bus_dma_tag_destroy(txr->txtag); txr->txtag = NULL; } return; } /********************************************************************** * * Setup work for hardware segmentation offload (TSO) * **********************************************************************/ static bool igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, int ehdrlen, struct ip *ip, struct tcphdr *th) { struct adapter *adapter = txr->adapter; struct e1000_adv_tx_context_desc *TXD; struct igb_tx_buffer *tx_buffer; u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; u32 mss_l4len_idx = 0; u16 vtag = 0; int ctxd, ip_hlen, tcp_hlen; ctxd = txr->next_avail_desc; tx_buffer = &txr->tx_buffers[ctxd]; TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd]; ip->ip_sum = 0; ip_hlen = ip->ip_hl << 2; tcp_hlen = th->th_off << 2; /* VLAN MACLEN IPLEN */ if (mp->m_flags & M_VLANTAG) { vtag = htole16(mp->m_pkthdr.ether_vtag); vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT); } vlan_macip_lens |= (ehdrlen << E1000_ADVTXD_MACLEN_SHIFT); vlan_macip_lens |= ip_hlen; TXD->vlan_macip_lens |= htole32(vlan_macip_lens); /* ADV DTYPE TUCMD */ type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); /* MSS L4LEN IDX */ mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT); /* 82575 needs the queue index added */ if (adapter->hw.mac.type == e1000_82575) mss_l4len_idx |= txr->me << 4; TXD->mss_l4len_idx = htole32(mss_l4len_idx); TXD->seqnum_seed = htole32(0); tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; if (++ctxd == adapter->num_tx_desc) ctxd = 0; txr->tx_avail--; txr->next_avail_desc = ctxd; return TRUE; } /********************************************************************* * * Context Descriptor setup for VLAN or CSUM * **********************************************************************/ static bool igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp) { struct adapter *adapter = txr->adapter; struct e1000_adv_tx_context_desc *TXD; struct igb_tx_buffer *tx_buffer; u32 vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; struct ether_vlan_header *eh; struct ip *ip = NULL; struct ip6_hdr *ip6; int ehdrlen, ctxd, ip_hlen = 0; u16 etype, vtag = 0; u8 ipproto = 0; bool offload = TRUE; if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) offload = FALSE; vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; ctxd = txr->next_avail_desc; tx_buffer = &txr->tx_buffers[ctxd]; TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd]; /* ** In advanced descriptors the vlan tag must ** be placed into the context descriptor, thus ** we need to be here just for that setup. */ if (mp->m_flags & M_VLANTAG) { vtag = htole16(mp->m_pkthdr.ether_vtag); vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT); } else if (offload == FALSE) return FALSE; /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehdrlen = ETHER_HDR_LEN; } /* Set the ether header length */ vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + ehdrlen); ip_hlen = ip->ip_hl << 2; if (mp->m_len < ehdrlen + ip_hlen) { offload = FALSE; break; } ipproto = ip->ip_p; type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; break; case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); ip_hlen = sizeof(struct ip6_hdr); ipproto = ip6->ip6_nxt; type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; break; default: offload = FALSE; break; } vlan_macip_lens |= ip_hlen; type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; switch (ipproto) { case IPPROTO_TCP: if (mp->m_pkthdr.csum_flags & CSUM_TCP) type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; break; case IPPROTO_UDP: if (mp->m_pkthdr.csum_flags & CSUM_UDP) type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; break; #if __FreeBSD_version >= 800000 case IPPROTO_SCTP: if (mp->m_pkthdr.csum_flags & CSUM_SCTP) type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP; break; #endif default: offload = FALSE; break; } /* 82575 needs the queue index added */ if (adapter->hw.mac.type == e1000_82575) mss_l4len_idx = txr->me << 4; /* Now copy bits into descriptor */ TXD->vlan_macip_lens |= htole32(vlan_macip_lens); TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); TXD->seqnum_seed = htole32(0); TXD->mss_l4len_idx = htole32(mss_l4len_idx); tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; /* We've consumed the first desc, adjust counters */ if (++ctxd == adapter->num_tx_desc) ctxd = 0; txr->next_avail_desc = ctxd; --txr->tx_avail; return (offload); } /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * * TRUE return means there's work in the ring to clean, FALSE its empty. **********************************************************************/ static bool igb_txeof(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; int first, last, done, processed; struct igb_tx_buffer *tx_buffer; struct e1000_tx_desc *tx_desc, *eop_desc; struct ifnet *ifp = adapter->ifp; IGB_TX_LOCK_ASSERT(txr); #ifdef DEV_NETMAP if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(ifp); selwakeuppri(&na->tx_rings[txr->me].si, PI_NET); IGB_TX_UNLOCK(txr); IGB_CORE_LOCK(adapter); selwakeuppri(&na->tx_si, PI_NET); IGB_CORE_UNLOCK(adapter); IGB_TX_LOCK(txr); return FALSE; } #endif /* DEV_NETMAP */ if (txr->tx_avail == adapter->num_tx_desc) { txr->queue_status = IGB_QUEUE_IDLE; return FALSE; } processed = 0; first = txr->next_to_clean; tx_desc = &txr->tx_base[first]; tx_buffer = &txr->tx_buffers[first]; last = tx_buffer->next_eop; eop_desc = &txr->tx_base[last]; /* * What this does is get the index of the * first descriptor AFTER the EOP of the * first packet, that way we can do the * simple comparison on the inner while loop. */ if (++last == adapter->num_tx_desc) last = 0; done = last; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) { /* We clean the range of the packet */ while (first != done) { tx_desc->upper.data = 0; tx_desc->lower.data = 0; tx_desc->buffer_addr = 0; ++txr->tx_avail; ++processed; if (tx_buffer->m_head) { txr->bytes += tx_buffer->m_head->m_pkthdr.len; bus_dmamap_sync(txr->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; } tx_buffer->next_eop = -1; txr->watchdog_time = ticks; if (++first == adapter->num_tx_desc) first = 0; tx_buffer = &txr->tx_buffers[first]; tx_desc = &txr->tx_base[first]; } ++txr->packets; ++ifp->if_opackets; /* See if we can continue to the next packet */ last = tx_buffer->next_eop; if (last != -1) { eop_desc = &txr->tx_base[last]; /* Get new done point */ if (++last == adapter->num_tx_desc) last = 0; done = last; } else break; } bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); txr->next_to_clean = first; /* ** Watchdog calculation, we know there's ** work outstanding or the first return ** would have been taken, so none processed ** for too long indicates a hang. */ if ((!processed) && ((ticks - txr->watchdog_time) > IGB_WATCHDOG)) txr->queue_status |= IGB_QUEUE_HUNG; /* * If we have a minimum free, * clear depleted state bit */ if (txr->tx_avail >= IGB_QUEUE_THRESHOLD) txr->queue_status &= ~IGB_QUEUE_DEPLETED; /* All clean, turn off the watchdog */ if (txr->tx_avail == adapter->num_tx_desc) { txr->queue_status = IGB_QUEUE_IDLE; return (FALSE); } return (TRUE); } /********************************************************************* * * Refresh mbuf buffers for RX descriptor rings * - now keeps its own state so discards due to resource * exhaustion are unnecessary, if an mbuf cannot be obtained * it just returns, keeping its placeholder, thus it can simply * be recalled to try again. * **********************************************************************/ static void igb_refresh_mbufs(struct rx_ring *rxr, int limit) { struct adapter *adapter = rxr->adapter; bus_dma_segment_t hseg[1]; bus_dma_segment_t pseg[1]; struct igb_rx_buf *rxbuf; struct mbuf *mh, *mp; int i, j, nsegs, error; bool refreshed = FALSE; i = j = rxr->next_to_refresh; /* ** Get one descriptor beyond ** our work mark to control ** the loop. */ if (++j == adapter->num_rx_desc) j = 0; while (j != limit) { rxbuf = &rxr->rx_buffers[i]; /* No hdr mbuf used with header split off */ if (rxr->hdr_split == FALSE) goto no_split; if (rxbuf->m_head == NULL) { mh = m_gethdr(M_DONTWAIT, MT_DATA); if (mh == NULL) goto update; } else mh = rxbuf->m_head; mh->m_pkthdr.len = mh->m_len = MHLEN; mh->m_len = MHLEN; mh->m_flags |= M_PKTHDR; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->htag, rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: hdr dmamap load" " failure - %d\n", error); m_free(mh); rxbuf->m_head = NULL; goto update; } rxbuf->m_head = mh; bus_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_PREREAD); rxr->rx_base[i].read.hdr_addr = htole64(hseg[0].ds_addr); no_split: if (rxbuf->m_pack == NULL) { mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); if (mp == NULL) goto update; } else mp = rxbuf->m_pack; mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: payload dmamap load" " failure - %d\n", error); m_free(mp); rxbuf->m_pack = NULL; goto update; } rxbuf->m_pack = mp; bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD); rxr->rx_base[i].read.pkt_addr = htole64(pseg[0].ds_addr); refreshed = TRUE; /* I feel wefreshed :) */ i = j; /* our next is precalculated */ rxr->next_to_refresh = i; if (++j == adapter->num_rx_desc) j = 0; } update: if (refreshed) /* update tail */ E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), rxr->next_to_refresh); return; } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int igb_allocate_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; device_t dev = adapter->dev; struct igb_rx_buf *rxbuf; int i, bsize, error; bsize = sizeof(struct igb_rx_buf) * adapter->num_rx_desc; if (!(rxr->rx_buffers = (struct igb_rx_buf *) malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate rx_buffer memory\n"); error = ENOMEM; goto fail; } if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MSIZE, /* maxsize */ 1, /* nsegments */ MSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rxr->htag))) { device_printf(dev, "Unable to create RX DMA tag\n"); goto fail; } if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rxr->ptag))) { device_printf(dev, "Unable to create RX payload DMA tag\n"); goto fail; } for (i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; error = bus_dmamap_create(rxr->htag, BUS_DMA_NOWAIT, &rxbuf->hmap); if (error) { device_printf(dev, "Unable to create RX head DMA maps\n"); goto fail; } error = bus_dmamap_create(rxr->ptag, BUS_DMA_NOWAIT, &rxbuf->pmap); if (error) { device_printf(dev, "Unable to create RX packet DMA maps\n"); goto fail; } } return (0); fail: /* Frees all, but can handle partial completion */ igb_free_receive_structures(adapter); return (error); } static void igb_free_receive_ring(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct igb_rx_buf *rxbuf; for (int i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->htag, rxbuf->hmap); rxbuf->m_head->m_flags |= M_PKTHDR; m_freem(rxbuf->m_head); } if (rxbuf->m_pack != NULL) { bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->ptag, rxbuf->pmap); rxbuf->m_pack->m_flags |= M_PKTHDR; m_freem(rxbuf->m_pack); } rxbuf->m_head = NULL; rxbuf->m_pack = NULL; } } /********************************************************************* * * Initialize a receive ring and its buffers. * **********************************************************************/ static int igb_setup_receive_ring(struct rx_ring *rxr) { struct adapter *adapter; struct ifnet *ifp; device_t dev; struct igb_rx_buf *rxbuf; bus_dma_segment_t pseg[1], hseg[1]; struct lro_ctrl *lro = &rxr->lro; int rsize, nsegs, error = 0; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(rxr->adapter->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ adapter = rxr->adapter; dev = adapter->dev; ifp = adapter->ifp; /* Clear the ring contents */ IGB_RX_LOCK(rxr); #ifdef DEV_NETMAP slot = netmap_reset(na, NR_RX, rxr->me, 0); #endif /* DEV_NETMAP */ rsize = roundup2(adapter->num_rx_desc * sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN); bzero((void *)rxr->rx_base, rsize); /* ** Free current RX buffer structures and their mbufs */ igb_free_receive_ring(rxr); /* Configure for header split? */ if (igb_header_split) rxr->hdr_split = TRUE; /* Now replenish the ring mbufs */ for (int j = 0; j < adapter->num_rx_desc; ++j) { struct mbuf *mh, *mp; rxbuf = &rxr->rx_buffers[j]; #ifdef DEV_NETMAP if (slot) { /* slot sj is mapped to the i-th NIC-ring entry */ int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j); uint64_t paddr; void *addr; addr = PNMB(slot + sj, &paddr); netmap_load_map(rxr->ptag, rxbuf->pmap, addr); /* Update descriptor */ rxr->rx_base[j].read.pkt_addr = htole64(paddr); continue; } #endif /* DEV_NETMAP */ if (rxr->hdr_split == FALSE) goto skip_head; /* First the header */ rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA); if (rxbuf->m_head == NULL) { error = ENOBUFS; goto fail; } m_adj(rxbuf->m_head, ETHER_ALIGN); mh = rxbuf->m_head; mh->m_len = mh->m_pkthdr.len = MHLEN; mh->m_flags |= M_PKTHDR; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->htag, rxbuf->hmap, rxbuf->m_head, hseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) /* Nothing elegant to do here */ goto fail; bus_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_PREREAD); /* Update descriptor */ rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr); skip_head: /* Now the payload cluster */ rxbuf->m_pack = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); if (rxbuf->m_pack == NULL) { error = ENOBUFS; goto fail; } mp = rxbuf->m_pack; mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) goto fail; bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD); /* Update descriptor */ rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr); } /* Setup our descriptor indices */ rxr->next_to_check = 0; rxr->next_to_refresh = adapter->num_rx_desc - 1; rxr->lro_enabled = FALSE; rxr->rx_split_packets = 0; rxr->rx_bytes = 0; rxr->fmp = NULL; rxr->lmp = NULL; rxr->discard = FALSE; bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* ** Now set up the LRO interface, we ** also only do head split when LRO ** is enabled, since so often they ** are undesireable in similar setups. */ if (ifp->if_capenable & IFCAP_LRO) { error = tcp_lro_init(lro); if (error) { device_printf(dev, "LRO Initialization failed!\n"); goto fail; } INIT_DEBUGOUT("RX LRO Initialized\n"); rxr->lro_enabled = TRUE; lro->ifp = adapter->ifp; } IGB_RX_UNLOCK(rxr); return (0); fail: igb_free_receive_ring(rxr); IGB_RX_UNLOCK(rxr); return (error); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ static int igb_setup_receive_structures(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; int i; for (i = 0; i < adapter->num_queues; i++, rxr++) if (igb_setup_receive_ring(rxr)) goto fail; return (0); fail: /* * Free RX buffers allocated so far, we will only handle * the rings that completed, the failing case will have * cleaned up for itself. 'i' is the endpoint. */ for (int j = 0; j > i; ++j) { rxr = &adapter->rx_rings[i]; IGB_RX_LOCK(rxr); igb_free_receive_ring(rxr); IGB_RX_UNLOCK(rxr); } return (ENOBUFS); } /********************************************************************* * * Enable receive unit. * **********************************************************************/ static void igb_initialize_receive_units(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; struct ifnet *ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u32 rctl, rxcsum, psize, srrctl = 0; INIT_DEBUGOUT("igb_initialize_receive_unit: begin"); /* * Make sure receives are disabled while setting * up the descriptor ring */ rctl = E1000_READ_REG(hw, E1000_RCTL); E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); /* ** Set up for header split */ if (igb_header_split) { /* Use a standard mbuf for the header */ srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; } else srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; /* ** Set up for jumbo frames */ if (ifp->if_mtu > ETHERMTU) { rctl |= E1000_RCTL_LPE; if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; } /* Set maximum packet len */ psize = adapter->max_frame_size; /* are we on a vlan? */ if (adapter->ifp->if_vlantrunk != NULL) psize += VLAN_TAG_SIZE; E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); } else { rctl &= ~E1000_RCTL_LPE; srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; rctl |= E1000_RCTL_SZ_2048; } /* Setup the Base and Length of the Rx Descriptor Rings */ for (int i = 0; i < adapter->num_queues; i++, rxr++) { u64 bus_addr = rxr->rxdma.dma_paddr; u32 rxdctl; E1000_WRITE_REG(hw, E1000_RDLEN(i), adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); E1000_WRITE_REG(hw, E1000_RDBAH(i), (uint32_t)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr); E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); /* Enable this Queue */ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; rxdctl &= 0xFFF00000; rxdctl |= IGB_RX_PTHRESH; rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); } /* ** Setup for RX MultiQueue */ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); if (adapter->num_queues >1) { u32 random[10], mrqc, shift = 0; union igb_reta { u32 dword; u8 bytes[4]; } reta; arc4rand(&random, sizeof(random), 0); if (adapter->hw.mac.type == e1000_82575) shift = 6; /* Warning FM follows */ for (int i = 0; i < 128; i++) { reta.bytes[i & 3] = (i % adapter->num_queues) << shift; if ((i & 3) == 3) E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword); } /* Now fill in hash table */ mrqc = E1000_MRQC_ENABLE_RSS_4Q; for (int i = 0; i < 10; i++) E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, random[i]); mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV4_TCP); mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | E1000_MRQC_RSS_FIELD_IPV6_TCP); mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP | E1000_MRQC_RSS_FIELD_IPV6_UDP); mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); E1000_WRITE_REG(hw, E1000_MRQC, mrqc); /* ** NOTE: Receive Full-Packet Checksum Offload ** is mutually exclusive with Multiqueue. However ** this is not the same as TCP/IP checksums which ** still work. */ rxcsum |= E1000_RXCSUM_PCSD; #if __FreeBSD_version >= 800000 /* For SCTP Offload */ if ((hw->mac.type == e1000_82576) && (ifp->if_capenable & IFCAP_RXCSUM)) rxcsum |= E1000_RXCSUM_CRCOFL; #endif } else { /* Non RSS setup */ if (ifp->if_capenable & IFCAP_RXCSUM) { rxcsum |= E1000_RXCSUM_IPPCSE; #if __FreeBSD_version >= 800000 if (adapter->hw.mac.type == e1000_82576) rxcsum |= E1000_RXCSUM_CRCOFL; #endif } else rxcsum &= ~E1000_RXCSUM_TUOFL; } E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); /* Setup the Receive Control Register */ rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Strip CRC bytes. */ rctl |= E1000_RCTL_SECRC; /* Make sure VLAN Filters are off */ rctl &= ~E1000_RCTL_VFE; /* Don't store bad packets */ rctl &= ~E1000_RCTL_SBP; /* Enable Receives */ E1000_WRITE_REG(hw, E1000_RCTL, rctl); /* * Setup the HW Rx Head and Tail Descriptor Pointers * - needs to be after enable */ for (int i = 0; i < adapter->num_queues; i++) { rxr = &adapter->rx_rings[i]; E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); #ifdef DEV_NETMAP /* * an init() while a netmap client is active must * preserve the rx buffers passed to userspace. * In this driver it means we adjust RDT to * somthing different from next_to_refresh * (which is not used in netmap mode). */ if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(adapter->ifp); struct netmap_kring *kring = &na->rx_rings[i]; int t = rxr->next_to_refresh - kring->nr_hwavail; if (t >= adapter->num_rx_desc) t -= adapter->num_rx_desc; else if (t < 0) t += adapter->num_rx_desc; E1000_WRITE_REG(hw, E1000_RDT(i), t); } else #endif /* DEV_NETMAP */ E1000_WRITE_REG(hw, E1000_RDT(i), rxr->next_to_refresh); } return; } /********************************************************************* * * Free receive rings. * **********************************************************************/ static void igb_free_receive_structures(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; for (int i = 0; i < adapter->num_queues; i++, rxr++) { struct lro_ctrl *lro = &rxr->lro; igb_free_receive_buffers(rxr); tcp_lro_free(lro); igb_dma_free(adapter, &rxr->rxdma); } free(adapter->rx_rings, M_DEVBUF); } /********************************************************************* * * Free receive ring data structures. * **********************************************************************/ static void igb_free_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct igb_rx_buf *rxbuf; int i; INIT_DEBUGOUT("free_receive_structures: begin"); /* Cleanup any existing buffers */ if (rxr->rx_buffers != NULL) { for (i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->htag, rxbuf->hmap); rxbuf->m_head->m_flags |= M_PKTHDR; m_freem(rxbuf->m_head); } if (rxbuf->m_pack != NULL) { bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->ptag, rxbuf->pmap); rxbuf->m_pack->m_flags |= M_PKTHDR; m_freem(rxbuf->m_pack); } rxbuf->m_head = NULL; rxbuf->m_pack = NULL; if (rxbuf->hmap != NULL) { bus_dmamap_destroy(rxr->htag, rxbuf->hmap); rxbuf->hmap = NULL; } if (rxbuf->pmap != NULL) { bus_dmamap_destroy(rxr->ptag, rxbuf->pmap); rxbuf->pmap = NULL; } } if (rxr->rx_buffers != NULL) { free(rxr->rx_buffers, M_DEVBUF); rxr->rx_buffers = NULL; } } if (rxr->htag != NULL) { bus_dma_tag_destroy(rxr->htag); rxr->htag = NULL; } if (rxr->ptag != NULL) { bus_dma_tag_destroy(rxr->ptag); rxr->ptag = NULL; } } static __inline void igb_rx_discard(struct rx_ring *rxr, int i) { struct igb_rx_buf *rbuf; rbuf = &rxr->rx_buffers[i]; /* Partially received? Free the chain */ if (rxr->fmp != NULL) { rxr->fmp->m_flags |= M_PKTHDR; m_freem(rxr->fmp); rxr->fmp = NULL; rxr->lmp = NULL; } /* ** With advanced descriptors the writeback ** clobbers the buffer addrs, so its easier ** to just free the existing mbufs and take ** the normal refresh path to get new buffers ** and mapping. */ if (rbuf->m_head) { m_free(rbuf->m_head); rbuf->m_head = NULL; } if (rbuf->m_pack) { m_free(rbuf->m_pack); rbuf->m_pack = NULL; } return; } static __inline void igb_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype) { /* * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet * should be computed by hardware. Also it should not have VLAN tag in * ethernet header. */ if (rxr->lro_enabled && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && (ptype & (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP)) == (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP) && (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { /* * Send to the stack if: ** - LRO not enabled, or ** - no LRO resources, or ** - lro enqueue fails */ if (rxr->lro.lro_cnt != 0) if (tcp_lro_rx(&rxr->lro, m, 0) == 0) return; } IGB_RX_UNLOCK(rxr); (*ifp->if_input)(ifp, m); IGB_RX_LOCK(rxr); } /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * * Return TRUE if more to clean, FALSE otherwise *********************************************************************/ static bool igb_rxeof(struct igb_queue *que, int count, int *done) { struct adapter *adapter = que->adapter; struct rx_ring *rxr = que->rxr; struct ifnet *ifp = adapter->ifp; struct lro_ctrl *lro = &rxr->lro; struct lro_entry *queued; int i, processed = 0, rxdone = 0; u32 ptype, staterr = 0; union e1000_adv_rx_desc *cur; IGB_RX_LOCK(rxr); /* Sync the ring. */ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); #ifdef DEV_NETMAP if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(ifp); na->rx_rings[rxr->me].nr_kflags |= NKR_PENDINTR; selwakeuppri(&na->rx_rings[rxr->me].si, PI_NET); IGB_RX_UNLOCK(rxr); IGB_CORE_LOCK(adapter); selwakeuppri(&na->rx_si, PI_NET); IGB_CORE_UNLOCK(adapter); return (0); } #endif /* DEV_NETMAP */ /* Main clean loop */ for (i = rxr->next_to_check; count != 0;) { struct mbuf *sendmp, *mh, *mp; struct igb_rx_buf *rxbuf; u16 hlen, plen, hdr, vtag; bool eop = FALSE; cur = &rxr->rx_base[i]; staterr = le32toh(cur->wb.upper.status_error); if ((staterr & E1000_RXD_STAT_DD) == 0) break; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; count--; sendmp = mh = mp = NULL; cur->wb.upper.status_error = 0; rxbuf = &rxr->rx_buffers[i]; plen = le16toh(cur->wb.upper.length); ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK; if ((adapter->hw.mac.type == e1000_i350) && (staterr & E1000_RXDEXT_STATERR_LB)) vtag = be16toh(cur->wb.upper.vlan); else vtag = le16toh(cur->wb.upper.vlan); hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info); eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP); /* Make sure all segments of a bad packet are discarded */ if (((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0) || (rxr->discard)) { ifp->if_ierrors++; ++rxr->rx_discarded; if (!eop) /* Catch subsequent segs */ rxr->discard = TRUE; else rxr->discard = FALSE; igb_rx_discard(rxr, i); goto next_desc; } /* ** The way the hardware is configured to ** split, it will ONLY use the header buffer ** when header split is enabled, otherwise we ** get normal behavior, ie, both header and ** payload are DMA'd into the payload buffer. ** ** The fmp test is to catch the case where a ** packet spans multiple descriptors, in that ** case only the first header is valid. */ if (rxr->hdr_split && rxr->fmp == NULL) { hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; if (hlen > IGB_HDR_BUF) hlen = IGB_HDR_BUF; mh = rxr->rx_buffers[i].m_head; mh->m_len = hlen; /* clear buf pointer for refresh */ rxbuf->m_head = NULL; /* ** Get the payload length, this ** could be zero if its a small ** packet. */ if (plen > 0) { mp = rxr->rx_buffers[i].m_pack; mp->m_len = plen; mh->m_next = mp; /* clear buf pointer */ rxbuf->m_pack = NULL; rxr->rx_split_packets++; } } else { /* ** Either no header split, or a ** secondary piece of a fragmented ** split packet. */ mh = rxr->rx_buffers[i].m_pack; mh->m_len = plen; /* clear buf info for refresh */ rxbuf->m_pack = NULL; } ++processed; /* So we know when to refresh */ /* Initial frame - setup */ if (rxr->fmp == NULL) { mh->m_pkthdr.len = mh->m_len; /* Save the head of the chain */ rxr->fmp = mh; rxr->lmp = mh; if (mp != NULL) { /* Add payload if split */ mh->m_pkthdr.len += mp->m_len; rxr->lmp = mh->m_next; } } else { /* Chain mbuf's together */ rxr->lmp->m_next = mh; rxr->lmp = rxr->lmp->m_next; rxr->fmp->m_pkthdr.len += mh->m_len; } if (eop) { rxr->fmp->m_pkthdr.rcvif = ifp; ifp->if_ipackets++; rxr->rx_packets++; /* capture data for AIM */ rxr->packets++; rxr->bytes += rxr->fmp->m_pkthdr.len; rxr->rx_bytes += rxr->fmp->m_pkthdr.len; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) igb_rx_checksum(staterr, rxr->fmp, ptype); if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (staterr & E1000_RXD_STAT_VP) != 0) { rxr->fmp->m_pkthdr.ether_vtag = vtag; rxr->fmp->m_flags |= M_VLANTAG; } #if __FreeBSD_version >= 800000 rxr->fmp->m_pkthdr.flowid = que->msix; rxr->fmp->m_flags |= M_FLOWID; #endif sendmp = rxr->fmp; /* Make sure to set M_PKTHDR. */ sendmp->m_flags |= M_PKTHDR; rxr->fmp = NULL; rxr->lmp = NULL; } next_desc: bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Advance our pointers to the next descriptor. */ if (++i == adapter->num_rx_desc) i = 0; /* ** Send to the stack or LRO */ if (sendmp != NULL) { rxr->next_to_check = i; igb_rx_input(rxr, ifp, sendmp, ptype); i = rxr->next_to_check; rxdone++; } /* Every 8 descriptors we go to refresh mbufs */ if (processed == 8) { igb_refresh_mbufs(rxr, i); processed = 0; } } /* Catch any remainders */ if (igb_rx_unrefreshed(rxr)) igb_refresh_mbufs(rxr, i); rxr->next_to_check = i; /* * Flush any outstanding LRO work */ while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } if (done != NULL) *done = rxdone; IGB_RX_UNLOCK(rxr); return ((staterr & E1000_RXD_STAT_DD) ? TRUE : FALSE); } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void igb_rx_checksum(u32 staterr, struct mbuf *mp, u32 ptype) { u16 status = (u16)staterr; u8 errors = (u8) (staterr >> 24); int sctp; /* Ignore Checksum bit is set */ if (status & E1000_RXD_STAT_IXSM) { mp->m_pkthdr.csum_flags = 0; return; } if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0) sctp = 1; else sctp = 0; if (status & E1000_RXD_STAT_IPCS) { /* Did it pass? */ if (!(errors & E1000_RXD_ERR_IPE)) { /* IP Checksum Good */ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; } else mp->m_pkthdr.csum_flags = 0; } if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); #if __FreeBSD_version >= 800000 if (sctp) /* reassign */ type = CSUM_SCTP_VALID; #endif /* Did it pass? */ if (!(errors & E1000_RXD_ERR_TCPE)) { mp->m_pkthdr.csum_flags |= type; if (sctp == 0) mp->m_pkthdr.csum_data = htons(0xffff); } } return; } /* * This routine is run via an vlan * config EVENT */ static void igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct adapter *adapter = ifp->if_softc; u32 index, bit; if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; IGB_CORE_LOCK(adapter); index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] |= (1 << bit); ++adapter->num_vlans; /* Change hw filter setting */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) igb_setup_vlan_hw_support(adapter); IGB_CORE_UNLOCK(adapter); } /* * This routine is run via an vlan * unconfig EVENT */ static void igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct adapter *adapter = ifp->if_softc; u32 index, bit; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; IGB_CORE_LOCK(adapter); index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] &= ~(1 << bit); --adapter->num_vlans; /* Change hw filter setting */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) igb_setup_vlan_hw_support(adapter); IGB_CORE_UNLOCK(adapter); } static void igb_setup_vlan_hw_support(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct ifnet *ifp = adapter->ifp; u32 reg; if (adapter->vf_ifp) { e1000_rlpml_set_vf(hw, adapter->max_frame_size + VLAN_TAG_SIZE); return; } reg = E1000_READ_REG(hw, E1000_CTRL); reg |= E1000_CTRL_VME; E1000_WRITE_REG(hw, E1000_CTRL, reg); /* Enable the Filter Table */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { reg = E1000_READ_REG(hw, E1000_RCTL); reg &= ~E1000_RCTL_CFIEN; reg |= E1000_RCTL_VFE; E1000_WRITE_REG(hw, E1000_RCTL, reg); } /* Update the frame size */ E1000_WRITE_REG(&adapter->hw, E1000_RLPML, adapter->max_frame_size + VLAN_TAG_SIZE); /* Don't bother with table if no vlans */ if ((adapter->num_vlans == 0) || ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) return; /* ** A soft reset zero's out the VFTA, so ** we need to repopulate it now. */ for (int i = 0; i < IGB_VFTA_SIZE; i++) if (adapter->shadow_vfta[i] != 0) { if (adapter->vf_ifp) e1000_vfta_set_vf(hw, adapter->shadow_vfta[i], TRUE); else e1000_write_vfta(hw, i, adapter->shadow_vfta[i]); } } static void igb_enable_intr(struct adapter *adapter) { /* With RSS set up what to auto clear */ if (adapter->msix_mem) { u32 mask = (adapter->que_mask | adapter->link_mask); E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask); E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask); E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask); E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC); } else { E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK); } E1000_WRITE_FLUSH(&adapter->hw); return; } static void igb_disable_intr(struct adapter *adapter) { if (adapter->msix_mem) { E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0); E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0); } E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0); E1000_WRITE_FLUSH(&adapter->hw); return; } /* * Bit of a misnomer, what this really means is * to enable OS management of the system... aka * to disable special hardware management features */ static void igb_init_manageability(struct adapter *adapter) { if (adapter->has_manage) { int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* disable hardware interception of ARP */ manc &= ~(E1000_MANC_ARP_EN); /* enable receiving management packets to the host */ manc |= E1000_MANC_EN_MNG2HOST; manc2h |= 1 << 5; /* Mng Port 623 */ manc2h |= 1 << 6; /* Mng Port 664 */ E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * Give control back to hardware management * controller if there is one. */ static void igb_release_manageability(struct adapter *adapter) { if (adapter->has_manage) { int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* re-enable hardware interception of ARP */ manc |= E1000_MANC_ARP_EN; manc &= ~E1000_MANC_EN_MNG2HOST; E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded. * */ static void igb_get_hw_control(struct adapter *adapter) { u32 ctrl_ext; if (adapter->vf_ifp) return; /* Let firmware know the driver has taken over */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); } /* * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that the * driver is no longer loaded. * */ static void igb_release_hw_control(struct adapter *adapter) { u32 ctrl_ext; if (adapter->vf_ifp) return; /* Let firmware taken over control of h/w */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); } static int igb_is_valid_ether_addr(uint8_t *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { return (FALSE); } return (TRUE); } /* * Enable PCI Wake On Lan capability */ static void igb_enable_wakeup(device_t dev) { u16 cap, status; u8 id; /* First find the capabilities pointer*/ cap = pci_read_config(dev, PCIR_CAP_PTR, 2); /* Read the PM Capabilities */ id = pci_read_config(dev, cap, 1); if (id != PCIY_PMG) /* Something wrong */ return; /* OK, we have the power capabilities, so now get the status register */ cap += PCIR_POWER_STATUS; status = pci_read_config(dev, cap, 2); status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(dev, cap, status, 2); return; } static void igb_led_func(void *arg, int onoff) { struct adapter *adapter = arg; IGB_CORE_LOCK(adapter); if (onoff) { e1000_setup_led(&adapter->hw); e1000_led_on(&adapter->hw); } else { e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } IGB_CORE_UNLOCK(adapter); } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void igb_update_stats_counters(struct adapter *adapter) { struct ifnet *ifp; struct e1000_hw *hw = &adapter->hw; struct e1000_hw_stats *stats; /* ** The virtual function adapter has only a ** small controlled set of stats, do only ** those and return. */ if (adapter->vf_ifp) { igb_update_vf_stats_counters(adapter); return; } stats = (struct e1000_hw_stats *)adapter->stats; if(adapter->hw.phy.media_type == e1000_media_type_copper || (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS); stats->sec += E1000_READ_REG(hw, E1000_SEC); } stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); stats->mpc += E1000_READ_REG(hw, E1000_MPC); stats->scc += E1000_READ_REG(hw, E1000_SCC); stats->ecol += E1000_READ_REG(hw, E1000_ECOL); stats->mcc += E1000_READ_REG(hw, E1000_MCC); stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); stats->colc += E1000_READ_REG(hw, E1000_COLC); stats->dc += E1000_READ_REG(hw, E1000_DC); stats->rlec += E1000_READ_REG(hw, E1000_RLEC); stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); /* ** For watchdog management we need to know if we have been ** paused during the last interval, so capture that here. */ adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); stats->xoffrxc += adapter->pause_frames; stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); stats->gprc += E1000_READ_REG(hw, E1000_GPRC); stats->bprc += E1000_READ_REG(hw, E1000_BPRC); stats->mprc += E1000_READ_REG(hw, E1000_MPRC); stats->gptc += E1000_READ_REG(hw, E1000_GPTC); /* For the 64-bit byte counters the low dword must be read first. */ /* Both registers clear on the read of the high dword */ stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + ((u64)E1000_READ_REG(hw, E1000_GORCH) << 32); stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + ((u64)E1000_READ_REG(hw, E1000_GOTCH) << 32); stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); stats->ruc += E1000_READ_REG(hw, E1000_RUC); stats->rfc += E1000_READ_REG(hw, E1000_RFC); stats->roc += E1000_READ_REG(hw, E1000_ROC); stats->rjc += E1000_READ_REG(hw, E1000_RJC); stats->tor += E1000_READ_REG(hw, E1000_TORH); stats->tot += E1000_READ_REG(hw, E1000_TOTH); stats->tpr += E1000_READ_REG(hw, E1000_TPR); stats->tpt += E1000_READ_REG(hw, E1000_TPT); stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); stats->mptc += E1000_READ_REG(hw, E1000_MPTC); stats->bptc += E1000_READ_REG(hw, E1000_BPTC); /* Interrupt Counts */ stats->iac += E1000_READ_REG(hw, E1000_IAC); stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); /* Host to Card Statistics */ stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + ((u64)E1000_READ_REG(hw, E1000_HGORCH) << 32)); stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + ((u64)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); ifp = adapter->ifp; ifp->if_collisions = stats->colc; /* Rx Errors */ ifp->if_ierrors = adapter->dropped_pkts + stats->rxerrc + stats->crcerrs + stats->algnerrc + stats->ruc + stats->roc + stats->mpc + stats->cexterr; /* Tx Errors */ ifp->if_oerrors = stats->ecol + stats->latecol + adapter->watchdog_events; /* Driver specific counters */ adapter->device_control = E1000_READ_REG(hw, E1000_CTRL); adapter->rx_control = E1000_READ_REG(hw, E1000_RCTL); adapter->int_mask = E1000_READ_REG(hw, E1000_IMS); adapter->eint_mask = E1000_READ_REG(hw, E1000_EIMS); adapter->packet_buf_alloc_tx = ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); adapter->packet_buf_alloc_rx = (E1000_READ_REG(hw, E1000_PBA) & 0xffff); } /********************************************************************** * * Initialize the VF board statistics counters. * **********************************************************************/ static void igb_vf_init_stats(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_vf_stats *stats; stats = (struct e1000_vf_stats *)adapter->stats; if (stats == NULL) return; stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); } /********************************************************************** * * Update the VF board statistics counters. * **********************************************************************/ static void igb_update_vf_stats_counters(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_vf_stats *stats; if (adapter->link_speed == 0) return; stats = (struct e1000_vf_stats *)adapter->stats; UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); } /* Export a single 32-bit register via a read-only sysctl. */ static int igb_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; u_int val; adapter = oidp->oid_arg1; val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2); return (sysctl_handle_int(oidp, &val, 0, req)); } /* ** Tuneable interrupt rate handler */ static int igb_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) { struct igb_queue *que = ((struct igb_queue *)oidp->oid_arg1); int error; u32 reg, usec, rate; reg = E1000_READ_REG(&que->adapter->hw, E1000_EITR(que->msix)); usec = ((reg & 0x7FFC) >> 2); if (usec > 0) rate = 1000000 / usec; else rate = 0; error = sysctl_handle_int(oidp, &rate, 0, req); if (error || !req->newptr) return error; return 0; } /* * Add sysctl variables, one per statistic, to the system. */ static void igb_add_hw_stats(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct e1000_hw_stats *stats = adapter->stats; struct sysctl_oid *stat_node, *queue_node, *int_node, *host_node; struct sysctl_oid_list *stat_list, *queue_list, *int_list, *host_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &adapter->link_irq, 0, "Link MSIX IRQ Handled"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", CTLFLAG_RD, &adapter->no_tx_dma_setup, "Driver tx dma failure in xmit"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", CTLFLAG_RD, &adapter->rx_overruns, "RX overruns"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "device_control", CTLFLAG_RD, &adapter->device_control, "Device Control Register"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_control", CTLFLAG_RD, &adapter->rx_control, "Receiver Control Register"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "interrupt_mask", CTLFLAG_RD, &adapter->int_mask, "Interrupt Mask"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "extended_int_mask", CTLFLAG_RD, &adapter->eint_mask, "Extended Interrupt Mask"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_buf_alloc", CTLFLAG_RD, &adapter->packet_buf_alloc_tx, "Transmit Buffer Packet Allocation"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_buf_alloc", CTLFLAG_RD, &adapter->packet_buf_alloc_rx, "Receive Buffer Packet Allocation"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", CTLFLAG_RD, &adapter->hw.fc.high_water, 0, "Flow Control High Watermark"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", CTLFLAG_RD, &adapter->hw.fc.low_water, 0, "Flow Control Low Watermark"); for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { struct lro_ctrl *lro = &rxr->lro; snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", CTLFLAG_RD, &adapter->queues[i], sizeof(&adapter->queues[i]), igb_sysctl_interrupt_rate_handler, "IU", "Interrupt Rate"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", CTLFLAG_RD, adapter, E1000_TDH(txr->me), igb_sysctl_reg_handler, "IU", "Transmit Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", CTLFLAG_RD, adapter, E1000_TDT(txr->me), igb_sysctl_reg_handler, "IU", "Transmit Descriptor Tail"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txr->no_desc_avail, "Queue No Descriptor Available"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &txr->tx_packets, "Queue Packets Transmitted"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", CTLFLAG_RD, adapter, E1000_RDH(rxr->me), igb_sysctl_reg_handler, "IU", "Receive Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", CTLFLAG_RD, adapter, E1000_RDT(rxr->me), igb_sysctl_reg_handler, "IU", "Receive Descriptor Tail"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_queued", CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued"); SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_flushed", CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed"); } /* MAC stats get their own sub node */ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", CTLFLAG_RD, NULL, "MAC Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); /* ** VF adapter has a very limited set of stats ** since its not managing the metal, so to speak. */ if (adapter->vf_ifp) { SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", CTLFLAG_RD, &stats->gprc, "Good Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", CTLFLAG_RD, &stats->gorc, "Good Octets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); return; } SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll", CTLFLAG_RD, &stats->ecol, "Excessive collisions"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll", CTLFLAG_RD, &stats->scc, "Single collisions"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll", CTLFLAG_RD, &stats->mcc, "Multiple collisions"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll", CTLFLAG_RD, &stats->latecol, "Late collisions"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count", CTLFLAG_RD, &stats->colc, "Collision Count"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors", CTLFLAG_RD, &stats->symerrs, "Symbol Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors", CTLFLAG_RD, &stats->sec, "Sequence Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count", CTLFLAG_RD, &stats->dc, "Defer Count"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets", CTLFLAG_RD, &stats->mpc, "Missed Packets"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", CTLFLAG_RD, &stats->rnbc, "Receive No Buffers"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize", CTLFLAG_RD, &stats->ruc, "Receive Undersize"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize", CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber", CTLFLAG_RD, &stats->rjc, "Recevied Jabber"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs", CTLFLAG_RD, &stats->rxerrc, "Receive Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs", CTLFLAG_RD, &stats->crcerrs, "CRC errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs", CTLFLAG_RD, &stats->algnerrc, "Alignment Errors"); /* On 82575 these are collision counts */ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs", CTLFLAG_RD, &stats->cexterr, "Collision/Carrier extension errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd", CTLFLAG_RD, &stats->xonrxc, "XON Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd", CTLFLAG_RD, &stats->xontxc, "XON Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", CTLFLAG_RD, &stats->xoffrxc, "XOFF Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd", CTLFLAG_RD, &stats->xofftxc, "XOFF Transmitted"); /* Packet Reception Stats */ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", CTLFLAG_RD, &stats->tpr, "Total Packets Received "); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", CTLFLAG_RD, &stats->gprc, "Good Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", CTLFLAG_RD, &stats->prc64, "64 byte frames received "); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", CTLFLAG_RD, &stats->gorc, "Good Octets Received"); /* Packet Transmission Stats */ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd", CTLFLAG_RD, &stats->tsctc, "TSO Contexts Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail", CTLFLAG_RD, &stats->tsctfc, "TSO Contexts Failed"); /* Interrupt Stats */ int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD, NULL, "Interrupt Statistics"); int_list = SYSCTL_CHILDREN(int_node); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "asserts", CTLFLAG_RD, &stats->iac, "Interrupt Assertion Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer", CTLFLAG_RD, &stats->icrxptc, "Interrupt Cause Rx Pkt Timer Expire Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_abs_timer", CTLFLAG_RD, &stats->icrxatc, "Interrupt Cause Rx Abs Timer Expire Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer", CTLFLAG_RD, &stats->ictxptc, "Interrupt Cause Tx Pkt Timer Expire Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_abs_timer", CTLFLAG_RD, &stats->ictxatc, "Interrupt Cause Tx Abs Timer Expire Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_empty", CTLFLAG_RD, &stats->ictxqec, "Interrupt Cause Tx Queue Empty Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh", CTLFLAG_RD, &stats->ictxqmtc, "Interrupt Cause Tx Queue Min Thresh Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", CTLFLAG_RD, &stats->icrxdmtc, "Interrupt Cause Rx Desc Min Thresh Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_overrun", CTLFLAG_RD, &stats->icrxoc, "Interrupt Cause Receiver Overrun Count"); /* Host to Card Stats */ host_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "host", CTLFLAG_RD, NULL, "Host to Card Statistics"); host_list = SYSCTL_CHILDREN(host_node); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt", CTLFLAG_RD, &stats->cbtmpc, "Circuit Breaker Tx Packet Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "host_tx_pkt_discard", CTLFLAG_RD, &stats->htdpmc, "Host Transmit Discarded Packets"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_pkt", CTLFLAG_RD, &stats->rpthc, "Rx Packets To Host"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkts", CTLFLAG_RD, &stats->cbrmpc, "Circuit Breaker Rx Packet Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkt_drop", CTLFLAG_RD, &stats->cbrdpc, "Circuit Breaker Rx Dropped Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_pkt", CTLFLAG_RD, &stats->hgptc, "Host Good Packets Tx Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt_drop", CTLFLAG_RD, &stats->htcbdpc, "Host Tx Circuit Breaker Dropped Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_good_bytes", CTLFLAG_RD, &stats->hgorc, "Host Good Octets Received Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_bytes", CTLFLAG_RD, &stats->hgotc, "Host Good Octets Transmit Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "length_errors", CTLFLAG_RD, &stats->lenerrs, "Length Errors"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "serdes_violation_pkt", CTLFLAG_RD, &stats->scvpc, "SerDes/SGMII Code Violation Pkt Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "header_redir_missed", CTLFLAG_RD, &stats->hrmpc, "Header Redirection Missed Packet Count"); } /********************************************************************** * * This routine provides a way to dump out the adapter eeprom, * often a useful debug/service tool. This only dumps the first * 32 words, stuff that matters is in that extent. * **********************************************************************/ static int igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); /* * This value will cause a hex dump of the * first 32 16-bit words of the EEPROM to * the screen. */ if (result == 1) { adapter = (struct adapter *)arg1; igb_print_nvm_info(adapter); } return (error); } static void igb_print_nvm_info(struct adapter *adapter) { u16 eeprom_data; int i, j, row = 0; /* Its a bit crude, but it gets the job done */ printf("\nInterface EEPROM Dump:\n"); printf("Offset\n0x0000 "); for (i = 0, j = 0; i < 32; i++, j++) { if (j == 8) { /* Make the offset block */ j = 0; ++row; printf("\n0x00%x0 ",row); } e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); printf("%04x ", eeprom_data); } printf("\n"); } static void igb_set_sysctl_value(struct adapter *adapter, const char *name, const char *description, int *limit, int value) { *limit = value; SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); } /* ** Set flow control using sysctl: ** Flow control values: ** 0 - off ** 1 - rx pause ** 2 - tx pause ** 3 - full */ static int igb_set_flowcntl(SYSCTL_HANDLER_ARGS) { int error; static int input = 3; /* default is full */ struct adapter *adapter = (struct adapter *) arg1; error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); switch (input) { case e1000_fc_rx_pause: case e1000_fc_tx_pause: case e1000_fc_full: case e1000_fc_none: adapter->hw.fc.requested_mode = input; adapter->fc = input; break; default: /* Do nothing */ return (error); } adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode; e1000_force_mac_fc(&adapter->hw); return (error); } /* ** Manage DMA Coalesce: ** Control values: ** 0/1 - off/on ** Legal timer values are: ** 250,500,1000-10000 in thousands */ static int igb_sysctl_dmac(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *) arg1; int error; error = sysctl_handle_int(oidp, &adapter->dmac, 0, req); if ((error) || (req->newptr == NULL)) return (error); switch (adapter->dmac) { case 0: /*Disabling */ break; case 1: /* Just enable and use default */ adapter->dmac = 1000; break; case 250: case 500: case 1000: case 2000: case 3000: case 4000: case 5000: case 6000: case 7000: case 8000: case 9000: case 10000: /* Legal values - allow */ break; default: /* Do nothing, illegal value */ adapter->dmac = 0; return (error); } /* Reinit the interface */ igb_init(adapter); return (error); } Index: projects/nand/sys/dev/fxp/if_fxp.c =================================================================== --- projects/nand/sys/dev/fxp/if_fxp.c (revision 235262) +++ projects/nand/sys/dev/fxp/if_fxp.c (revision 235263) @@ -1,3250 +1,3251 @@ /*- * Copyright (c) 1995, David Greenman * Copyright (c) 2001 Jonathan Lemon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Intel EtherExpress Pro/100B PCI Fast Ethernet driver */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for PCIM_CMD_xxx */ #include #include #include #include #include MODULE_DEPEND(fxp, pci, 1, 1, 1); MODULE_DEPEND(fxp, ether, 1, 1, 1); MODULE_DEPEND(fxp, miibus, 1, 1, 1); #include "miibus_if.h" /* * NOTE! On !x86 we typically have an alignment constraint. The * card DMAs the packet immediately following the RFA. However, * the first thing in the packet is a 14-byte Ethernet header. * This means that the packet is misaligned. To compensate, * we actually offset the RFA 2 bytes into the cluster. This * alignes the packet after the Ethernet header at a 32-bit * boundary. HOWEVER! This means that the RFA is misaligned! */ #define RFA_ALIGNMENT_FUDGE 2 /* * Set initial transmit threshold at 64 (512 bytes). This is * increased by 64 (512 bytes) at a time, to maximum of 192 * (1536 bytes), if an underrun occurs. */ static int tx_threshold = 64; /* * The configuration byte map has several undefined fields which * must be one or must be zero. Set up a template for these bits. * The actual configuration is performed in fxp_init_body. * * See struct fxp_cb_config for the bit definitions. */ static const u_char const fxp_cb_config_template[] = { 0x0, 0x0, /* cb_status */ 0x0, 0x0, /* cb_command */ 0x0, 0x0, 0x0, 0x0, /* link_addr */ 0x0, /* 0 */ 0x0, /* 1 */ 0x0, /* 2 */ 0x0, /* 3 */ 0x0, /* 4 */ 0x0, /* 5 */ 0x32, /* 6 */ 0x0, /* 7 */ 0x0, /* 8 */ 0x0, /* 9 */ 0x6, /* 10 */ 0x0, /* 11 */ 0x0, /* 12 */ 0x0, /* 13 */ 0xf2, /* 14 */ 0x48, /* 15 */ 0x0, /* 16 */ 0x40, /* 17 */ 0xf0, /* 18 */ 0x0, /* 19 */ 0x3f, /* 20 */ 0x5, /* 21 */ 0x0, /* 22 */ 0x0, /* 23 */ 0x0, /* 24 */ 0x0, /* 25 */ 0x0, /* 26 */ 0x0, /* 27 */ 0x0, /* 28 */ 0x0, /* 29 */ 0x0, /* 30 */ 0x0 /* 31 */ }; /* * Claim various Intel PCI device identifiers for this driver. The * sub-vendor and sub-device field are extensively used to identify * particular variants, but we don't currently differentiate between * them. */ static const struct fxp_ident const fxp_ident_table[] = { { 0x1029, -1, 0, "Intel 82559 PCI/CardBus Pro/100" }, { 0x1030, -1, 0, "Intel 82559 Pro/100 Ethernet" }, { 0x1031, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" }, { 0x1032, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" }, { 0x1033, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, { 0x1034, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, { 0x1035, -1, 3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, { 0x1036, -1, 3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, { 0x1037, -1, 3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, { 0x1038, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, { 0x1039, -1, 4, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" }, { 0x103A, -1, 4, "Intel 82801DB (ICH4) Pro/100 Ethernet" }, { 0x103B, -1, 4, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" }, { 0x103C, -1, 4, "Intel 82801DB (ICH4) Pro/100 Ethernet" }, { 0x103D, -1, 4, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" }, { 0x103E, -1, 4, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" }, { 0x1050, -1, 5, "Intel 82801BA (D865) Pro/100 VE Ethernet" }, { 0x1051, -1, 5, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" }, { 0x1059, -1, 0, "Intel 82551QM Pro/100 M Mobile Connection" }, { 0x1064, -1, 6, "Intel 82562EZ (ICH6)" }, { 0x1065, -1, 6, "Intel 82562ET/EZ/GT/GZ PRO/100 VE Ethernet" }, { 0x1068, -1, 6, "Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" }, { 0x1069, -1, 6, "Intel 82562EM/EX/GX Pro/100 Ethernet" }, { 0x1091, -1, 7, "Intel 82562GX Pro/100 Ethernet" }, { 0x1092, -1, 7, "Intel Pro/100 VE Network Connection" }, { 0x1093, -1, 7, "Intel Pro/100 VM Network Connection" }, { 0x1094, -1, 7, "Intel Pro/100 946GZ (ICH7) Network Connection" }, { 0x1209, -1, 0, "Intel 82559ER Embedded 10/100 Ethernet" }, { 0x1229, 0x01, 0, "Intel 82557 Pro/100 Ethernet" }, { 0x1229, 0x02, 0, "Intel 82557 Pro/100 Ethernet" }, { 0x1229, 0x03, 0, "Intel 82557 Pro/100 Ethernet" }, { 0x1229, 0x04, 0, "Intel 82558 Pro/100 Ethernet" }, { 0x1229, 0x05, 0, "Intel 82558 Pro/100 Ethernet" }, { 0x1229, 0x06, 0, "Intel 82559 Pro/100 Ethernet" }, { 0x1229, 0x07, 0, "Intel 82559 Pro/100 Ethernet" }, { 0x1229, 0x08, 0, "Intel 82559 Pro/100 Ethernet" }, { 0x1229, 0x09, 0, "Intel 82559ER Pro/100 Ethernet" }, { 0x1229, 0x0c, 0, "Intel 82550 Pro/100 Ethernet" }, { 0x1229, 0x0d, 0, "Intel 82550C Pro/100 Ethernet" }, { 0x1229, 0x0e, 0, "Intel 82550 Pro/100 Ethernet" }, { 0x1229, 0x0f, 0, "Intel 82551 Pro/100 Ethernet" }, { 0x1229, 0x10, 0, "Intel 82551 Pro/100 Ethernet" }, { 0x1229, -1, 0, "Intel 82557/8/9 Pro/100 Ethernet" }, { 0x2449, -1, 2, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" }, { 0x27dc, -1, 7, "Intel 82801GB (ICH7) 10/100 Ethernet" }, { 0, -1, 0, NULL }, }; #ifdef FXP_IP_CSUM_WAR #define FXP_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #else #define FXP_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) #endif static int fxp_probe(device_t dev); static int fxp_attach(device_t dev); static int fxp_detach(device_t dev); static int fxp_shutdown(device_t dev); static int fxp_suspend(device_t dev); static int fxp_resume(device_t dev); static const struct fxp_ident *fxp_find_ident(device_t dev); static void fxp_intr(void *xsc); static void fxp_rxcsum(struct fxp_softc *sc, struct ifnet *ifp, struct mbuf *m, uint16_t status, int pos); static int fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack, int count); static void fxp_init(void *xsc); static void fxp_init_body(struct fxp_softc *sc, int); static void fxp_tick(void *xsc); static void fxp_start(struct ifnet *ifp); static void fxp_start_body(struct ifnet *ifp); static int fxp_encap(struct fxp_softc *sc, struct mbuf **m_head); static void fxp_txeof(struct fxp_softc *sc); static void fxp_stop(struct fxp_softc *sc); static void fxp_release(struct fxp_softc *sc); static int fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void fxp_watchdog(struct fxp_softc *sc); static void fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp); static void fxp_discard_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp); static int fxp_new_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp); static int fxp_mc_addrs(struct fxp_softc *sc); static void fxp_mc_setup(struct fxp_softc *sc); static uint16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize); static void fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data); static void fxp_autosize_eeprom(struct fxp_softc *sc); static void fxp_load_eeprom(struct fxp_softc *sc); static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words); static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words); static int fxp_ifmedia_upd(struct ifnet *ifp); static void fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int fxp_serial_ifmedia_upd(struct ifnet *ifp); static void fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int fxp_miibus_readreg(device_t dev, int phy, int reg); static int fxp_miibus_writereg(device_t dev, int phy, int reg, int value); static void fxp_miibus_statchg(device_t dev); static void fxp_load_ucode(struct fxp_softc *sc); static void fxp_update_stats(struct fxp_softc *sc); static void fxp_sysctl_node(struct fxp_softc *sc); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS); static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS); static void fxp_scb_wait(struct fxp_softc *sc); static void fxp_scb_cmd(struct fxp_softc *sc, int cmd); static void fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status, bus_dma_tag_t dmat, bus_dmamap_t map); static device_method_t fxp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fxp_probe), DEVMETHOD(device_attach, fxp_attach), DEVMETHOD(device_detach, fxp_detach), DEVMETHOD(device_shutdown, fxp_shutdown), DEVMETHOD(device_suspend, fxp_suspend), DEVMETHOD(device_resume, fxp_resume), /* MII interface */ DEVMETHOD(miibus_readreg, fxp_miibus_readreg), DEVMETHOD(miibus_writereg, fxp_miibus_writereg), DEVMETHOD(miibus_statchg, fxp_miibus_statchg), - { 0, 0 } + DEVMETHOD_END }; static driver_t fxp_driver = { "fxp", fxp_methods, sizeof(struct fxp_softc), }; static devclass_t fxp_devclass; -DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0); -DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0); +DRIVER_MODULE_ORDERED(fxp, pci, fxp_driver, fxp_devclass, NULL, NULL, + SI_ORDER_ANY); +DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, NULL, NULL); static struct resource_spec fxp_res_spec_mem[] = { { SYS_RES_MEMORY, FXP_PCI_MMBA, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct resource_spec fxp_res_spec_io[] = { { SYS_RES_IOPORT, FXP_PCI_IOBA, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; /* * Wait for the previous command to be accepted (but not necessarily * completed). */ static void fxp_scb_wait(struct fxp_softc *sc) { union { uint16_t w; uint8_t b[2]; } flowctl; int i = 10000; while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i) DELAY(2); if (i == 0) { flowctl.b[0] = CSR_READ_1(sc, FXP_CSR_FC_THRESH); flowctl.b[1] = CSR_READ_1(sc, FXP_CSR_FC_STATUS); device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n", CSR_READ_1(sc, FXP_CSR_SCB_COMMAND), CSR_READ_1(sc, FXP_CSR_SCB_STATACK), CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), flowctl.w); } } static void fxp_scb_cmd(struct fxp_softc *sc, int cmd) { if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) { CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP); fxp_scb_wait(sc); } CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd); } static void fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status, bus_dma_tag_t dmat, bus_dmamap_t map) { int i; for (i = 10000; i > 0; i--) { DELAY(2); bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((le16toh(*status) & FXP_CB_STATUS_C) != 0) break; } if (i == 0) device_printf(sc->dev, "DMA timeout\n"); } static const struct fxp_ident * fxp_find_ident(device_t dev) { uint16_t devid; uint8_t revid; const struct fxp_ident *ident; if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) { devid = pci_get_device(dev); revid = pci_get_revid(dev); for (ident = fxp_ident_table; ident->name != NULL; ident++) { if (ident->devid == devid && (ident->revid == revid || ident->revid == -1)) { return (ident); } } } return (NULL); } /* * Return identification string if this device is ours. */ static int fxp_probe(device_t dev) { const struct fxp_ident *ident; ident = fxp_find_ident(dev); if (ident != NULL) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static void fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint32_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int fxp_attach(device_t dev) { struct fxp_softc *sc; struct fxp_cb_tx *tcbp; struct fxp_tx *txp; struct fxp_rx *rxp; struct ifnet *ifp; uint32_t val; uint16_t data; u_char eaddr[ETHER_ADDR_LEN]; int error, flags, i, pmc, prefer_iomap; error = 0; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->stat_ch, &sc->sc_mtx, 0); ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd, fxp_serial_ifmedia_sts); ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* * Enable bus mastering. */ pci_enable_busmaster(dev); val = pci_read_config(dev, PCIR_COMMAND, 2); /* * Figure out which we should try first - memory mapping or i/o mapping? * We default to memory mapping. Then we accept an override from the * command line. Then we check to see which one is enabled. */ prefer_iomap = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &prefer_iomap); if (prefer_iomap) sc->fxp_spec = fxp_res_spec_io; else sc->fxp_spec = fxp_res_spec_mem; error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res); if (error) { if (sc->fxp_spec == fxp_res_spec_mem) sc->fxp_spec = fxp_res_spec_io; else sc->fxp_spec = fxp_res_spec_mem; error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res); } if (error) { device_printf(dev, "could not allocate resources\n"); error = ENXIO; goto fail; } if (bootverbose) { device_printf(dev, "using %s space register mapping\n", sc->fxp_spec == fxp_res_spec_mem ? "memory" : "I/O"); } /* * Put CU/RU idle state and prepare full reset. */ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); /* Full reset and disable interrupts. */ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); DELAY(10); CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); /* * Find out how large of an SEEPROM we have. */ fxp_autosize_eeprom(sc); fxp_load_eeprom(sc); /* * Find out the chip revision; lump all 82557 revs together. */ sc->ident = fxp_find_ident(dev); if (sc->ident->ich > 0) { /* Assume ICH controllers are 82559. */ sc->revision = FXP_REV_82559_A0; } else { data = sc->eeprom[FXP_EEPROM_MAP_CNTR]; if ((data >> 8) == 1) sc->revision = FXP_REV_82557; else sc->revision = pci_get_revid(dev); } /* * Check availability of WOL. 82559ER does not support WOL. */ if (sc->revision >= FXP_REV_82558_A4 && sc->revision != FXP_REV_82559S_A) { data = sc->eeprom[FXP_EEPROM_MAP_ID]; if ((data & 0x20) != 0 && pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) sc->flags |= FXP_FLAG_WOLCAP; } if (sc->revision == FXP_REV_82550_C) { /* * 82550C with server extension requires microcode to * receive fragmented UDP datagrams. However if the * microcode is used for client-only featured 82550C * it locks up controller. */ data = sc->eeprom[FXP_EEPROM_MAP_COMPAT]; if ((data & 0x0400) == 0) sc->flags |= FXP_FLAG_NO_UCODE; } /* Receiver lock-up workaround detection. */ if (sc->revision < FXP_REV_82558_A4) { data = sc->eeprom[FXP_EEPROM_MAP_COMPAT]; if ((data & 0x03) != 0x03) { sc->flags |= FXP_FLAG_RXBUG; device_printf(dev, "Enabling Rx lock-up workaround\n"); } } /* * Determine whether we must use the 503 serial interface. */ data = sc->eeprom[FXP_EEPROM_MAP_PRI_PHY]; if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0 && (data & FXP_PHY_SERIAL_ONLY)) sc->flags |= FXP_FLAG_SERIAL_MEDIA; fxp_sysctl_node(sc); /* * Enable workarounds for certain chip revision deficiencies. * * Systems based on the ICH2/ICH2-M chip from Intel, and possibly * some systems based a normal 82559 design, have a defect where * the chip can cause a PCI protocol violation if it receives * a CU_RESUME command when it is entering the IDLE state. The * workaround is to disable Dynamic Standby Mode, so the chip never * deasserts CLKRUN#, and always remains in an active state. * * See Intel 82801BA/82801BAM Specification Update, Errata #30. */ if ((sc->ident->ich >= 2 && sc->ident->ich <= 3) || (sc->ident->ich == 0 && sc->revision >= FXP_REV_82559_A0)) { data = sc->eeprom[FXP_EEPROM_MAP_ID]; if (data & 0x02) { /* STB enable */ uint16_t cksum; int i; device_printf(dev, "Disabling dynamic standby mode in EEPROM\n"); data &= ~0x02; sc->eeprom[FXP_EEPROM_MAP_ID] = data; fxp_write_eeprom(sc, &data, FXP_EEPROM_MAP_ID, 1); device_printf(dev, "New EEPROM ID: 0x%x\n", data); cksum = 0; for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) cksum += sc->eeprom[i]; i = (1 << sc->eeprom_size) - 1; cksum = 0xBABA - cksum; fxp_write_eeprom(sc, &cksum, i, 1); device_printf(dev, "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n", i, sc->eeprom[i], cksum); sc->eeprom[i] = cksum; /* * If the user elects to continue, try the software * workaround, as it is better than nothing. */ sc->flags |= FXP_FLAG_CU_RESUME_BUG; } } /* * If we are not a 82557 chip, we can enable extended features. */ if (sc->revision != FXP_REV_82557) { /* * If MWI is enabled in the PCI configuration, and there * is a valid cacheline size (8 or 16 dwords), then tell * the board to turn on MWI. */ if (val & PCIM_CMD_MWRICEN && pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0) sc->flags |= FXP_FLAG_MWI_ENABLE; /* turn on the extended TxCB feature */ sc->flags |= FXP_FLAG_EXT_TXCB; /* enable reception of long frames for VLAN */ sc->flags |= FXP_FLAG_LONG_PKT_EN; } else { /* a hack to get long VLAN frames on a 82557 */ sc->flags |= FXP_FLAG_SAVE_BAD; } /* For 82559 or later chips, Rx checksum offload is supported. */ if (sc->revision >= FXP_REV_82559_A0) { /* 82559ER does not support Rx checksum offloading. */ if (sc->ident->devid != 0x1209) sc->flags |= FXP_FLAG_82559_RXCSUM; } /* * Enable use of extended RFDs and TCBs for 82550 * and later chips. Note: we need extended TXCB support * too, but that's already enabled by the code above. * Be careful to do this only on the right devices. */ if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C || sc->revision == FXP_REV_82551_E || sc->revision == FXP_REV_82551_F || sc->revision == FXP_REV_82551_10) { sc->rfa_size = sizeof (struct fxp_rfa); sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT; sc->flags |= FXP_FLAG_EXT_RFA; /* Use extended RFA instead of 82559 checksum mode. */ sc->flags &= ~FXP_FLAG_82559_RXCSUM; } else { sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN; sc->tx_cmd = FXP_CB_COMMAND_XMIT; } /* * Allocate DMA tags and DMA safe memory. */ sc->maxtxseg = FXP_NTXSEG; sc->maxsegsize = MCLBYTES; if (sc->flags & FXP_FLAG_EXT_RFA) { sc->maxtxseg--; sc->maxsegsize = FXP_TSO_SEGSIZE; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sc->maxsegsize * sc->maxtxseg + sizeof(struct ether_vlan_header), sc->maxtxseg, sc->maxsegsize, 0, busdma_lock_mutex, &Giant, &sc->fxp_txmtag); if (error) { device_printf(dev, "could not create TX DMA tag\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, busdma_lock_mutex, &Giant, &sc->fxp_rxmtag); if (error) { device_printf(dev, "could not create RX DMA tag\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_stats), 1, sizeof(struct fxp_stats), 0, busdma_lock_mutex, &Giant, &sc->fxp_stag); if (error) { device_printf(dev, "could not create stats DMA tag\n"); goto fail; } error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->fxp_smap); if (error) { device_printf(dev, "could not allocate stats DMA memory\n"); goto fail; } error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats, sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "could not load the stats DMA buffer\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, FXP_TXCB_SZ, 1, FXP_TXCB_SZ, 0, busdma_lock_mutex, &Giant, &sc->cbl_tag); if (error) { device_printf(dev, "could not create TxCB DMA tag\n"); goto fail; } error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->cbl_map); if (error) { device_printf(dev, "could not allocate TxCB DMA memory\n"); goto fail; } error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map, sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr, &sc->fxp_desc.cbl_addr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "could not load TxCB DMA buffer\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_cb_mcs), 1, sizeof(struct fxp_cb_mcs), 0, busdma_lock_mutex, &Giant, &sc->mcs_tag); if (error) { device_printf(dev, "could not create multicast setup DMA tag\n"); goto fail; } error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->mcs_map); if (error) { device_printf(dev, "could not allocate multicast setup DMA memory\n"); goto fail; } error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp, sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "can't load the multicast setup DMA buffer\n"); goto fail; } /* * Pre-allocate the TX DMA maps and setup the pointers to * the TX command blocks. */ txp = sc->fxp_desc.tx_list; tcbp = sc->fxp_desc.cbl_list; for (i = 0; i < FXP_NTXCB; i++) { txp[i].tx_cb = tcbp + i; error = bus_dmamap_create(sc->fxp_txmtag, 0, &txp[i].tx_map); if (error) { device_printf(dev, "can't create DMA map for TX\n"); goto fail; } } error = bus_dmamap_create(sc->fxp_rxmtag, 0, &sc->spare_map); if (error) { device_printf(dev, "can't create spare DMA map\n"); goto fail; } /* * Pre-allocate our receive buffers. */ sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL; for (i = 0; i < FXP_NRFABUFS; i++) { rxp = &sc->fxp_desc.rx_list[i]; error = bus_dmamap_create(sc->fxp_rxmtag, 0, &rxp->rx_map); if (error) { device_printf(dev, "can't create DMA map for RX\n"); goto fail; } if (fxp_new_rfabuf(sc, rxp) != 0) { error = ENOMEM; goto fail; } fxp_add_rfabuf(sc, rxp); } /* * Read MAC address. */ eaddr[0] = sc->eeprom[FXP_EEPROM_MAP_IA0] & 0xff; eaddr[1] = sc->eeprom[FXP_EEPROM_MAP_IA0] >> 8; eaddr[2] = sc->eeprom[FXP_EEPROM_MAP_IA1] & 0xff; eaddr[3] = sc->eeprom[FXP_EEPROM_MAP_IA1] >> 8; eaddr[4] = sc->eeprom[FXP_EEPROM_MAP_IA2] & 0xff; eaddr[5] = sc->eeprom[FXP_EEPROM_MAP_IA2] >> 8; if (bootverbose) { device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n", pci_get_vendor(dev), pci_get_device(dev), pci_get_subvendor(dev), pci_get_subdevice(dev), pci_get_revid(dev)); device_printf(dev, "Dynamic Standby mode is %s\n", sc->eeprom[FXP_EEPROM_MAP_ID] & 0x02 ? "enabled" : "disabled"); } /* * If this is only a 10Mbps device, then there is no MII, and * the PHY will use a serial interface instead. * * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter * doesn't have a programming interface of any sort. The * media is sensed automatically based on how the link partner * is configured. This is, in essence, manual configuration. */ if (sc->flags & FXP_FLAG_SERIAL_MEDIA) { ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); } else { /* * i82557 wedge when isolating all of their PHYs. */ flags = MIIF_NOISOLATE; if (sc->revision >= FXP_REV_82558_A4) flags |= MIIF_DOPAUSE; error = mii_attach(dev, &sc->miibus, ifp, fxp_ifmedia_upd, fxp_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, flags); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_init = fxp_init; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = fxp_ioctl; ifp->if_start = fxp_start; ifp->if_capabilities = ifp->if_capenable = 0; /* Enable checksum offload/TSO for 82550 or better chips */ if (sc->flags & FXP_FLAG_EXT_RFA) { ifp->if_hwassist = FXP_CSUM_FEATURES | CSUM_TSO; ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4; ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_TSO4; } if (sc->flags & FXP_FLAG_82559_RXCSUM) { ifp->if_capabilities |= IFCAP_RXCSUM; ifp->if_capenable |= IFCAP_RXCSUM; } if (sc->flags & FXP_FLAG_WOLCAP) { ifp->if_capabilities |= IFCAP_WOL_MAGIC; ifp->if_capenable |= IFCAP_WOL_MAGIC; } #ifdef DEVICE_POLLING /* Inform the world we support polling. */ ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Attach the interface. */ ether_ifattach(ifp, eaddr); /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */ if ((sc->flags & FXP_FLAG_EXT_RFA) != 0) { ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO; ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO; } /* * Let the system queue as many packets as we have available * TX descriptors. */ IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1); ifp->if_snd.ifq_drv_maxlen = FXP_NTXCB - 1; IFQ_SET_READY(&ifp->if_snd); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->fxp_res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, fxp_intr, sc, &sc->ih); if (error) { device_printf(dev, "could not setup irq\n"); ether_ifdetach(sc->ifp); goto fail; } /* * Configure hardware to reject magic frames otherwise * system will hang on recipt of magic frames. */ if ((sc->flags & FXP_FLAG_WOLCAP) != 0) { FXP_LOCK(sc); /* Clear wakeup events. */ CSR_WRITE_1(sc, FXP_CSR_PMDR, CSR_READ_1(sc, FXP_CSR_PMDR)); fxp_init_body(sc, 0); fxp_stop(sc); FXP_UNLOCK(sc); } fail: if (error) fxp_release(sc); return (error); } /* * Release all resources. The softc lock should not be held and the * interrupt should already be torn down. */ static void fxp_release(struct fxp_softc *sc) { struct fxp_rx *rxp; struct fxp_tx *txp; int i; FXP_LOCK_ASSERT(sc, MA_NOTOWNED); KASSERT(sc->ih == NULL, ("fxp_release() called with intr handle still active")); if (sc->miibus) device_delete_child(sc->dev, sc->miibus); bus_generic_detach(sc->dev); ifmedia_removeall(&sc->sc_media); if (sc->fxp_desc.cbl_list) { bus_dmamap_unload(sc->cbl_tag, sc->cbl_map); bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list, sc->cbl_map); } if (sc->fxp_stats) { bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap); bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap); } if (sc->mcsp) { bus_dmamap_unload(sc->mcs_tag, sc->mcs_map); bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map); } bus_release_resources(sc->dev, sc->fxp_spec, sc->fxp_res); if (sc->fxp_rxmtag) { for (i = 0; i < FXP_NRFABUFS; i++) { rxp = &sc->fxp_desc.rx_list[i]; if (rxp->rx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->fxp_rxmtag, rxp->rx_map); m_freem(rxp->rx_mbuf); } bus_dmamap_destroy(sc->fxp_rxmtag, rxp->rx_map); } bus_dmamap_destroy(sc->fxp_rxmtag, sc->spare_map); bus_dma_tag_destroy(sc->fxp_rxmtag); } if (sc->fxp_txmtag) { for (i = 0; i < FXP_NTXCB; i++) { txp = &sc->fxp_desc.tx_list[i]; if (txp->tx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->fxp_txmtag, txp->tx_map); m_freem(txp->tx_mbuf); } bus_dmamap_destroy(sc->fxp_txmtag, txp->tx_map); } bus_dma_tag_destroy(sc->fxp_txmtag); } if (sc->fxp_stag) bus_dma_tag_destroy(sc->fxp_stag); if (sc->cbl_tag) bus_dma_tag_destroy(sc->cbl_tag); if (sc->mcs_tag) bus_dma_tag_destroy(sc->mcs_tag); if (sc->ifp) if_free(sc->ifp); mtx_destroy(&sc->sc_mtx); } /* * Detach interface. */ static int fxp_detach(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); #ifdef DEVICE_POLLING if (sc->ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->ifp); #endif FXP_LOCK(sc); /* * Stop DMA and drop transmit queue, but disable interrupts first. */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); fxp_stop(sc); FXP_UNLOCK(sc); callout_drain(&sc->stat_ch); /* * Close down routes etc. */ ether_ifdetach(sc->ifp); /* * Unhook interrupt before dropping lock. This is to prevent * races with fxp_intr(). */ bus_teardown_intr(sc->dev, sc->fxp_res[1], sc->ih); sc->ih = NULL; /* Release our allocated resources. */ fxp_release(sc); return (0); } /* * Device shutdown routine. Called at system shutdown after sync. The * main purpose of this routine is to shut off receiver DMA so that * kernel memory doesn't get clobbered during warmboot. */ static int fxp_shutdown(device_t dev) { /* * Make sure that DMA is disabled prior to reboot. Not doing * do could allow DMA to corrupt kernel memory during the * reboot before the driver initializes. */ return (fxp_suspend(dev)); } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int fxp_suspend(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); struct ifnet *ifp; int pmc; uint16_t pmstat; FXP_LOCK(sc); ifp = sc->ifp; if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) { pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { /* Request PME. */ pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; sc->flags |= FXP_FLAG_WOL; /* Reconfigure hardware to accept magic frames. */ fxp_init_body(sc, 1); } pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } fxp_stop(sc); sc->suspended = 1; FXP_UNLOCK(sc); return (0); } /* * Device resume routine. re-enable busmastering, and restart the interface if * appropriate. */ static int fxp_resume(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->ifp; int pmc; uint16_t pmstat; FXP_LOCK(sc); if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) { sc->flags &= ~FXP_FLAG_WOL; pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2); /* Disable PME and clear PME status. */ pmstat &= ~PCIM_PSTAT_PMEENABLE; pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2); if ((sc->flags & FXP_FLAG_WOLCAP) != 0) CSR_WRITE_1(sc, FXP_CSR_PMDR, CSR_READ_1(sc, FXP_CSR_PMDR)); } CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) fxp_init_body(sc, 1); sc->suspended = 0; FXP_UNLOCK(sc); return (0); } static void fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length) { uint16_t reg; int x; /* * Shift in data. */ for (x = 1 << (length - 1); x; x >>= 1) { if (data & x) reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; else reg = FXP_EEPROM_EECS; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } } /* * Read from the serial EEPROM. Basically, you manually shift in * the read opcode (one bit at a time) and then shift in the address, * and then you shift out the data (all of this one bit at a time). * The word size is 16 bits, so you have to provide the address for * every 16 bits of data. */ static uint16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize) { uint16_t reg, data; int x; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); /* * Shift in read opcode. */ fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3); /* * Shift in address. */ data = 0; for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) { if (offset & x) reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; else reg = FXP_EEPROM_EECS; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO; data++; if (autosize && reg == 0) { sc->eeprom_size = data; break; } } /* * Shift out data. */ data = 0; reg = FXP_EEPROM_EECS; for (x = 1 << 15; x; x >>= 1) { CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) data |= x; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); return (data); } static void fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data) { int i; /* * Erase/write enable. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); fxp_eeprom_shiftin(sc, 0x4, 3); fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); /* * Shift in write opcode, address, data. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); fxp_eeprom_shiftin(sc, offset, sc->eeprom_size); fxp_eeprom_shiftin(sc, data, 16); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); /* * Wait for EEPROM to finish up. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); DELAY(1); for (i = 0; i < 1000; i++) { if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) break; DELAY(50); } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); /* * Erase/write disable. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); fxp_eeprom_shiftin(sc, 0x4, 3); fxp_eeprom_shiftin(sc, 0, sc->eeprom_size); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); } /* * From NetBSD: * * Figure out EEPROM size. * * 559's can have either 64-word or 256-word EEPROMs, the 558 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet * talks about the existance of 16 to 256 word EEPROMs. * * The only known sizes are 64 and 256, where the 256 version is used * by CardBus cards to store CIS information. * * The address is shifted in msb-to-lsb, and after the last * address-bit the EEPROM is supposed to output a `dummy zero' bit, * after which follows the actual data. We try to detect this zero, by * probing the data-out bit in the EEPROM control register just after * having shifted in a bit. If the bit is zero, we assume we've * shifted enough address bits. The data-out should be tri-state, * before this, which should translate to a logical one. */ static void fxp_autosize_eeprom(struct fxp_softc *sc) { /* guess maximum size of 256 words */ sc->eeprom_size = 8; /* autosize */ (void) fxp_eeprom_getword(sc, 0, 1); } static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) { int i; for (i = 0; i < words; i++) data[i] = fxp_eeprom_getword(sc, offset + i, 0); } static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) { int i; for (i = 0; i < words; i++) fxp_eeprom_putword(sc, offset + i, data[i]); } static void fxp_load_eeprom(struct fxp_softc *sc) { int i; uint16_t cksum; fxp_read_eeprom(sc, sc->eeprom, 0, 1 << sc->eeprom_size); cksum = 0; for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) cksum += sc->eeprom[i]; cksum = 0xBABA - cksum; if (cksum != sc->eeprom[(1 << sc->eeprom_size) - 1]) device_printf(sc->dev, "EEPROM checksum mismatch! (0x%04x -> 0x%04x)\n", cksum, sc->eeprom[(1 << sc->eeprom_size) - 1]); } /* * Grab the softc lock and call the real fxp_start_body() routine */ static void fxp_start(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; FXP_LOCK(sc); fxp_start_body(ifp); FXP_UNLOCK(sc); } /* * Start packet transmission on the interface. * This routine must be called with the softc lock held, and is an * internal entry point only. */ static void fxp_start_body(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; struct mbuf *mb_head; int txqueued; FXP_LOCK_ASSERT(sc, MA_OWNED); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (sc->tx_queued > FXP_NTXCB_HIWAT) fxp_txeof(sc); /* * We're finished if there is nothing more to add to the list or if * we're all filled up with buffers to transmit. * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add * a NOP command when needed. */ txqueued = 0; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->tx_queued < FXP_NTXCB - 1) { /* * Grab a packet to transmit. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head); if (mb_head == NULL) break; if (fxp_encap(sc, &mb_head)) { if (mb_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, mb_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; } txqueued++; /* * Pass packet to bpf if there is a listener. */ BPF_MTAP(ifp, mb_head); } /* * We're finished. If we added to the list, issue a RESUME to get DMA * going again if suspended. */ if (txqueued > 0) { bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); /* * Set a 5 second timer just in case we don't hear * from the card again. */ sc->watchdog_timer = 5; } } static int fxp_encap(struct fxp_softc *sc, struct mbuf **m_head) { struct ifnet *ifp; struct mbuf *m; struct fxp_tx *txp; struct fxp_cb_tx *cbp; struct tcphdr *tcp; bus_dma_segment_t segs[FXP_NTXSEG]; int error, i, nseg, tcp_payload; FXP_LOCK_ASSERT(sc, MA_OWNED); ifp = sc->ifp; tcp_payload = 0; tcp = NULL; /* * Get pointer to next available tx desc. */ txp = sc->fxp_desc.tx_last->tx_next; /* * A note in Appendix B of the Intel 8255x 10/100 Mbps * Ethernet Controller Family Open Source Software * Developer Manual says: * Using software parsing is only allowed with legal * TCP/IP or UDP/IP packets. * ... * For all other datagrams, hardware parsing must * be used. * Software parsing appears to truncate ICMP and * fragmented UDP packets that contain one to three * bytes in the second (and final) mbuf of the packet. */ if (sc->flags & FXP_FLAG_EXT_RFA) txp->tx_cb->ipcb_ip_activation_high = FXP_IPCB_HARDWAREPARSING_ENABLE; m = *m_head; if (m->m_pkthdr.csum_flags & CSUM_TSO) { /* * 82550/82551 requires ethernet/IP/TCP headers must be * contained in the first active transmit buffer. */ struct ether_header *eh; struct ip *ip; uint32_t ip_off, poff; if (M_WRITABLE(*m_head) == 0) { /* Get a writable copy. */ m = m_dup(*m_head, M_DONTWAIT); m_freem(*m_head); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m; } ip_off = sizeof(struct ether_header); m = m_pullup(*m_head, ip_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } eh = mtod(m, struct ether_header *); /* Check the existence of VLAN tag. */ if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ip_off = sizeof(struct ether_vlan_header); m = m_pullup(m, ip_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } } m = m_pullup(m, ip_off + sizeof(struct ip)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m, char *) + ip_off); poff = ip_off + (ip->ip_hl << 2); m = m_pullup(m, poff + sizeof(struct tcphdr)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } tcp = (struct tcphdr *)(mtod(m, char *) + poff); m = m_pullup(m, poff + (tcp->th_off << 2)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } /* * Since 82550/82551 doesn't modify IP length and pseudo * checksum in the first frame driver should compute it. */ ip = (struct ip *)(mtod(m, char *) + ip_off); tcp = (struct tcphdr *)(mtod(m, char *) + poff); ip->ip_sum = 0; ip->ip_len = htons(m->m_pkthdr.tso_segsz + (ip->ip_hl << 2) + (tcp->th_off << 2)); tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP + (tcp->th_off << 2) + m->m_pkthdr.tso_segsz)); /* Compute total TCP payload. */ tcp_payload = m->m_pkthdr.len - ip_off - (ip->ip_hl << 2); tcp_payload -= tcp->th_off << 2; *m_head = m; } else if (m->m_pkthdr.csum_flags & FXP_CSUM_FEATURES) { /* * Deal with TCP/IP checksum offload. Note that * in order for TCP checksum offload to work, * the pseudo header checksum must have already * been computed and stored in the checksum field * in the TCP header. The stack should have * already done this for us. */ txp->tx_cb->ipcb_ip_schedule = FXP_IPCB_TCPUDP_CHECKSUM_ENABLE; if (m->m_pkthdr.csum_flags & CSUM_TCP) txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_TCP_PACKET; #ifdef FXP_IP_CSUM_WAR /* * XXX The 82550 chip appears to have trouble * dealing with IP header checksums in very small * datagrams, namely fragments from 1 to 3 bytes * in size. For example, say you want to transmit * a UDP packet of 1473 bytes. The packet will be * fragmented over two IP datagrams, the latter * containing only one byte of data. The 82550 will * botch the header checksum on the 1-byte fragment. * As long as the datagram contains 4 or more bytes * of data, you're ok. * * The following code attempts to work around this * problem: if the datagram is less than 38 bytes * in size (14 bytes ether header, 20 bytes IP header, * plus 4 bytes of data), we punt and compute the IP * header checksum by hand. This workaround doesn't * work very well, however, since it can be fooled * by things like VLAN tags and IP options that make * the header sizes/offsets vary. */ if (m->m_pkthdr.csum_flags & CSUM_IP) { if (m->m_pkthdr.len < 38) { struct ip *ip; m->m_data += ETHER_HDR_LEN; ip = mtod(m, struct ip *); ip->ip_sum = in_cksum(m, ip->ip_hl << 2); m->m_data -= ETHER_HDR_LEN; m->m_pkthdr.csum_flags &= ~CSUM_IP; } else { txp->tx_cb->ipcb_ip_activation_high = FXP_IPCB_HARDWAREPARSING_ENABLE; txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_IP_CHECKSUM_ENABLE; } } #endif } error = bus_dmamap_load_mbuf_sg(sc->fxp_txmtag, txp->tx_map, *m_head, segs, &nseg, 0); if (error == EFBIG) { m = m_collapse(*m_head, M_DONTWAIT, sc->maxtxseg); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->fxp_txmtag, txp->tx_map, *m_head, segs, &nseg, 0); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } } else if (error != 0) return (error); if (nseg == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } KASSERT(nseg <= sc->maxtxseg, ("too many DMA segments")); bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map, BUS_DMASYNC_PREWRITE); cbp = txp->tx_cb; for (i = 0; i < nseg; i++) { /* * If this is an 82550/82551, then we're using extended * TxCBs _and_ we're using checksum offload. This means * that the TxCB is really an IPCB. One major difference * between the two is that with plain extended TxCBs, * the bottom half of the TxCB contains two entries from * the TBD array, whereas IPCBs contain just one entry: * one entry (8 bytes) has been sacrificed for the TCP/IP * checksum offload control bits. So to make things work * right, we have to start filling in the TBD array * starting from a different place depending on whether * the chip is an 82550/82551 or not. */ if (sc->flags & FXP_FLAG_EXT_RFA) { cbp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr); cbp->tbd[i + 1].tb_size = htole32(segs[i].ds_len); } else { cbp->tbd[i].tb_addr = htole32(segs[i].ds_addr); cbp->tbd[i].tb_size = htole32(segs[i].ds_len); } } if (sc->flags & FXP_FLAG_EXT_RFA) { /* Configure dynamic TBD for 82550/82551. */ cbp->tbd_number = 0xFF; cbp->tbd[nseg].tb_size |= htole32(0x8000); } else cbp->tbd_number = nseg; /* Configure TSO. */ if (m->m_pkthdr.csum_flags & CSUM_TSO) { cbp->tbd[-1].tb_size = htole32(m->m_pkthdr.tso_segsz << 16); cbp->tbd[1].tb_size |= htole32(tcp_payload << 16); cbp->ipcb_ip_schedule |= FXP_IPCB_LARGESEND_ENABLE | FXP_IPCB_IP_CHECKSUM_ENABLE | FXP_IPCB_TCP_PACKET | FXP_IPCB_TCPUDP_CHECKSUM_ENABLE; } /* Configure VLAN hardware tag insertion. */ if ((m->m_flags & M_VLANTAG) != 0) { cbp->ipcb_vlan_id = htons(m->m_pkthdr.ether_vtag); txp->tx_cb->ipcb_ip_activation_high |= FXP_IPCB_INSERTVLAN_ENABLE; } txp->tx_mbuf = m; txp->tx_cb->cb_status = 0; txp->tx_cb->byte_count = 0; if (sc->tx_queued != FXP_CXINT_THRESH - 1) txp->tx_cb->cb_command = htole16(sc->tx_cmd | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S); else txp->tx_cb->cb_command = htole16(sc->tx_cmd | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) txp->tx_cb->tx_threshold = tx_threshold; /* * Advance the end of list forward. */ sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S); sc->fxp_desc.tx_last = txp; /* * Advance the beginning of the list forward if there are * no other packets queued (when nothing is queued, tx_first * sits on the last TxCB that was sent out). */ if (sc->tx_queued == 0) sc->fxp_desc.tx_first = txp; sc->tx_queued++; return (0); } #ifdef DEVICE_POLLING static poll_handler_t fxp_poll; static int fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct fxp_softc *sc = ifp->if_softc; uint8_t statack; int rx_npkts = 0; FXP_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { FXP_UNLOCK(sc); return (rx_npkts); } statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA | FXP_SCB_STATACK_FR; if (cmd == POLL_AND_CHECK_STATUS) { uint8_t tmp; tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK); if (tmp == 0xff || tmp == 0) { FXP_UNLOCK(sc); return (rx_npkts); /* nothing to do */ } tmp &= ~statack; /* ack what we can */ if (tmp != 0) CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp); statack |= tmp; } rx_npkts = fxp_intr_body(sc, ifp, statack, count); FXP_UNLOCK(sc); return (rx_npkts); } #endif /* DEVICE_POLLING */ /* * Process interface interrupts. */ static void fxp_intr(void *xsc) { struct fxp_softc *sc = xsc; struct ifnet *ifp = sc->ifp; uint8_t statack; FXP_LOCK(sc); if (sc->suspended) { FXP_UNLOCK(sc); return; } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { FXP_UNLOCK(sc); return; } #endif while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { /* * It should not be possible to have all bits set; the * FXP_SCB_INTR_SWI bit always returns 0 on a read. If * all bits are set, this may indicate that the card has * been physically ejected, so ignore it. */ if (statack == 0xff) { FXP_UNLOCK(sc); return; } /* * First ACK all the interrupts in this pass. */ CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) fxp_intr_body(sc, ifp, statack, -1); } FXP_UNLOCK(sc); } static void fxp_txeof(struct fxp_softc *sc) { struct ifnet *ifp; struct fxp_tx *txp; ifp = sc->ifp; bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (txp = sc->fxp_desc.tx_first; sc->tx_queued && (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0; txp = txp->tx_next) { if (txp->tx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->fxp_txmtag, txp->tx_map); m_freem(txp->tx_mbuf); txp->tx_mbuf = NULL; /* clear this to reset csum offload bits */ txp->tx_cb->tbd[0].tb_addr = 0; } sc->tx_queued--; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->fxp_desc.tx_first = txp; bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (sc->tx_queued == 0) sc->watchdog_timer = 0; } static void fxp_rxcsum(struct fxp_softc *sc, struct ifnet *ifp, struct mbuf *m, uint16_t status, int pos) { struct ether_header *eh; struct ip *ip; struct udphdr *uh; int32_t hlen, len, pktlen, temp32; uint16_t csum, *opts; if ((sc->flags & FXP_FLAG_82559_RXCSUM) == 0) { if ((status & FXP_RFA_STATUS_PARSE) != 0) { if (status & FXP_RFDX_CS_IP_CSUM_BIT_VALID) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (status & FXP_RFDX_CS_IP_CSUM_VALID) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((status & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) && (status & FXP_RFDX_CS_TCPUDP_CSUM_VALID)) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } return; } pktlen = m->m_pkthdr.len; if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) return; eh = mtod(m, struct ether_header *); if (eh->ether_type != htons(ETHERTYPE_IP)) return; ip = (struct ip *)(eh + 1); if (ip->ip_v != IPVERSION) return; hlen = ip->ip_hl << 2; pktlen -= sizeof(struct ether_header); if (hlen < sizeof(struct ip)) return; if (ntohs(ip->ip_len) < hlen) return; if (ntohs(ip->ip_len) != pktlen) return; if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) return; /* can't handle fragmented packet */ switch (ip->ip_p) { case IPPROTO_TCP: if (pktlen < (hlen + sizeof(struct tcphdr))) return; break; case IPPROTO_UDP: if (pktlen < (hlen + sizeof(struct udphdr))) return; uh = (struct udphdr *)((caddr_t)ip + hlen); if (uh->uh_sum == 0) return; /* no checksum */ break; default: return; } /* Extract computed checksum. */ csum = be16dec(mtod(m, char *) + pos); /* checksum fixup for IP options */ len = hlen - sizeof(struct ip); if (len > 0) { opts = (uint16_t *)(ip + 1); for (; len > 0; len -= sizeof(uint16_t), opts++) { temp32 = csum - *opts; temp32 = (temp32 >> 16) + (temp32 & 65535); csum = temp32 & 65535; } } m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; m->m_pkthdr.csum_data = csum; } static int fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack, int count) { struct mbuf *m; struct fxp_rx *rxp; struct fxp_rfa *rfa; int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0; int rx_npkts; uint16_t status; rx_npkts = 0; FXP_LOCK_ASSERT(sc, MA_OWNED); if (rnr) sc->rnr++; #ifdef DEVICE_POLLING /* Pick up a deferred RNR condition if `count' ran out last time. */ if (sc->flags & FXP_FLAG_DEFERRED_RNR) { sc->flags &= ~FXP_FLAG_DEFERRED_RNR; rnr = 1; } #endif /* * Free any finished transmit mbuf chains. * * Handle the CNA event likt a CXTNO event. It used to * be that this event (control unit not ready) was not * encountered, but it is now with the SMPng modifications. * The exact sequence of events that occur when the interface * is brought up are different now, and if this event * goes unhandled, the configuration/rxfilter setup sequence * can stall for several seconds. The result is that no * packets go out onto the wire for about 5 to 10 seconds * after the interface is ifconfig'ed for the first time. */ if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) fxp_txeof(sc); /* * Try to start more packets transmitting. */ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) fxp_start_body(ifp); /* * Just return if nothing happened on the receive side. */ if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0) return (rx_npkts); /* * Process receiver interrupts. If a no-resource (RNR) * condition exists, get whatever packets we can and * re-start the receiver. * * When using polling, we do not process the list to completion, * so when we get an RNR interrupt we must defer the restart * until we hit the last buffer with the C bit set. * If we run out of cycles and rfa_headm has the C bit set, * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so * that the info will be used in the subsequent polling cycle. */ for (;;) { rxp = sc->fxp_desc.rx_head; m = rxp->rx_mbuf; rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); #ifdef DEVICE_POLLING /* loop at most count times if count >=0 */ if (count >= 0 && count-- == 0) { if (rnr) { /* Defer RNR processing until the next time. */ sc->flags |= FXP_FLAG_DEFERRED_RNR; rnr = 0; } break; } #endif /* DEVICE_POLLING */ status = le16toh(rfa->rfa_status); if ((status & FXP_RFA_STATUS_C) == 0) break; if ((status & FXP_RFA_STATUS_RNR) != 0) rnr++; /* * Advance head forward. */ sc->fxp_desc.rx_head = rxp->rx_next; /* * Add a new buffer to the receive chain. * If this fails, the old buffer is recycled * instead. */ if (fxp_new_rfabuf(sc, rxp) == 0) { int total_len; /* * Fetch packet length (the top 2 bits of * actual_size are flags set by the controller * upon completion), and drop the packet in case * of bogus length or CRC errors. */ total_len = le16toh(rfa->actual_size) & 0x3fff; if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 && (ifp->if_capenable & IFCAP_RXCSUM) != 0) { /* Adjust for appended checksum bytes. */ total_len -= 2; } if (total_len < (int)sizeof(struct ether_header) || total_len > (MCLBYTES - RFA_ALIGNMENT_FUDGE - sc->rfa_size) || status & (FXP_RFA_STATUS_CRC | FXP_RFA_STATUS_ALIGN | FXP_RFA_STATUS_OVERRUN)) { m_freem(m); fxp_add_rfabuf(sc, rxp); continue; } m->m_pkthdr.len = m->m_len = total_len; m->m_pkthdr.rcvif = ifp; /* Do IP checksum checking. */ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) fxp_rxcsum(sc, ifp, m, status, total_len); if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (status & FXP_RFA_STATUS_VLAN) != 0) { m->m_pkthdr.ether_vtag = ntohs(rfa->rfax_vlan_id); m->m_flags |= M_VLANTAG; } /* * Drop locks before calling if_input() since it * may re-enter fxp_start() in the netisr case. * This would result in a lock reversal. Better * performance might be obtained by chaining all * packets received, dropping the lock, and then * calling if_input() on each one. */ FXP_UNLOCK(sc); (*ifp->if_input)(ifp, m); FXP_LOCK(sc); rx_npkts++; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return (rx_npkts); } else { /* Reuse RFA and loaded DMA map. */ ifp->if_iqdrops++; fxp_discard_rfabuf(sc, rxp); } fxp_add_rfabuf(sc, rxp); } if (rnr) { fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); } return (rx_npkts); } static void fxp_update_stats(struct fxp_softc *sc) { struct ifnet *ifp = sc->ifp; struct fxp_stats *sp = sc->fxp_stats; struct fxp_hwstats *hsp; uint32_t *status; FXP_LOCK_ASSERT(sc, MA_OWNED); bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Update statistical counters. */ if (sc->revision >= FXP_REV_82559_A0) status = &sp->completion_status; else if (sc->revision >= FXP_REV_82558_A4) status = (uint32_t *)&sp->tx_tco; else status = &sp->tx_pause; if (*status == htole32(FXP_STATS_DR_COMPLETE)) { hsp = &sc->fxp_hwstats; hsp->tx_good += le32toh(sp->tx_good); hsp->tx_maxcols += le32toh(sp->tx_maxcols); hsp->tx_latecols += le32toh(sp->tx_latecols); hsp->tx_underruns += le32toh(sp->tx_underruns); hsp->tx_lostcrs += le32toh(sp->tx_lostcrs); hsp->tx_deffered += le32toh(sp->tx_deffered); hsp->tx_single_collisions += le32toh(sp->tx_single_collisions); hsp->tx_multiple_collisions += le32toh(sp->tx_multiple_collisions); hsp->tx_total_collisions += le32toh(sp->tx_total_collisions); hsp->rx_good += le32toh(sp->rx_good); hsp->rx_crc_errors += le32toh(sp->rx_crc_errors); hsp->rx_alignment_errors += le32toh(sp->rx_alignment_errors); hsp->rx_rnr_errors += le32toh(sp->rx_rnr_errors); hsp->rx_overrun_errors += le32toh(sp->rx_overrun_errors); hsp->rx_cdt_errors += le32toh(sp->rx_cdt_errors); hsp->rx_shortframes += le32toh(sp->rx_shortframes); hsp->tx_pause += le32toh(sp->tx_pause); hsp->rx_pause += le32toh(sp->rx_pause); hsp->rx_controls += le32toh(sp->rx_controls); hsp->tx_tco += le16toh(sp->tx_tco); hsp->rx_tco += le16toh(sp->rx_tco); ifp->if_opackets += le32toh(sp->tx_good); ifp->if_collisions += le32toh(sp->tx_total_collisions); if (sp->rx_good) { ifp->if_ipackets += le32toh(sp->rx_good); sc->rx_idle_secs = 0; } else if (sc->flags & FXP_FLAG_RXBUG) { /* * Receiver's been idle for another second. */ sc->rx_idle_secs++; } ifp->if_ierrors += le32toh(sp->rx_crc_errors) + le32toh(sp->rx_alignment_errors) + le32toh(sp->rx_rnr_errors) + le32toh(sp->rx_overrun_errors); /* * If any transmit underruns occured, bump up the transmit * threshold by another 512 bytes (64 * 8). */ if (sp->tx_underruns) { ifp->if_oerrors += le32toh(sp->tx_underruns); if (tx_threshold < 192) tx_threshold += 64; } *status = 0; bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } } /* * Update packet in/out/collision statistics. The i82557 doesn't * allow you to access these counters without doing a fairly * expensive DMA to get _all_ of the statistics it maintains, so * we do this operation here only once per second. The statistics * counters in the kernel are updated from the previous dump-stats * DMA and then a new dump-stats DMA is started. The on-chip * counters are zeroed when the DMA completes. If we can't start * the DMA immediately, we don't wait - we just prepare to read * them again next time. */ static void fxp_tick(void *xsc) { struct fxp_softc *sc = xsc; struct ifnet *ifp = sc->ifp; FXP_LOCK_ASSERT(sc, MA_OWNED); /* Update statistical counters. */ fxp_update_stats(sc); /* * Release any xmit buffers that have completed DMA. This isn't * strictly necessary to do here, but it's advantagous for mbufs * with external storage to be released in a timely manner rather * than being defered for a potentially long time. This limits * the delay to a maximum of one second. */ fxp_txeof(sc); /* * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, * then assume the receiver has locked up and attempt to clear * the condition by reprogramming the multicast filter. This is * a work-around for a bug in the 82557 where the receiver locks * up if it gets certain types of garbage in the syncronization * bits prior to the packet header. This bug is supposed to only * occur in 10Mbps mode, but has been seen to occur in 100Mbps * mode as well (perhaps due to a 10/100 speed transition). */ if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { sc->rx_idle_secs = 0; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) fxp_init_body(sc, 1); return; } /* * If there is no pending command, start another stats * dump. Otherwise punt for now. */ if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { /* * Start another stats dump. */ fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); } if (sc->miibus != NULL) mii_tick(device_get_softc(sc->miibus)); /* * Check that chip hasn't hung. */ fxp_watchdog(sc); /* * Schedule another timeout one second from now. */ callout_reset(&sc->stat_ch, hz, fxp_tick, sc); } /* * Stop the interface. Cancels the statistics updater and resets * the interface. */ static void fxp_stop(struct fxp_softc *sc) { struct ifnet *ifp = sc->ifp; struct fxp_tx *txp; int i; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->watchdog_timer = 0; /* * Cancel stats updater. */ callout_stop(&sc->stat_ch); /* * Preserve PCI configuration, configure, IA/multicast * setup and put RU and CU into idle state. */ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(50); /* Disable interrupts. */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); fxp_update_stats(sc); /* * Release any xmit buffers. */ txp = sc->fxp_desc.tx_list; if (txp != NULL) { for (i = 0; i < FXP_NTXCB; i++) { if (txp[i].tx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_txmtag, txp[i].tx_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->fxp_txmtag, txp[i].tx_map); m_freem(txp[i].tx_mbuf); txp[i].tx_mbuf = NULL; /* clear this to reset csum offload bits */ txp[i].tx_cb->tbd[0].tb_addr = 0; } } } bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->tx_queued = 0; } /* * Watchdog/transmission transmit timeout handler. Called when a * transmission is started on the interface, but no interrupt is * received before the timeout. This usually indicates that the * card has wedged for some reason. */ static void fxp_watchdog(struct fxp_softc *sc) { FXP_LOCK_ASSERT(sc, MA_OWNED); if (sc->watchdog_timer == 0 || --sc->watchdog_timer) return; device_printf(sc->dev, "device timeout\n"); sc->ifp->if_oerrors++; fxp_init_body(sc, 1); } /* * Acquire locks and then call the real initialization function. This * is necessary because ether_ioctl() calls if_init() and this would * result in mutex recursion if the mutex was held. */ static void fxp_init(void *xsc) { struct fxp_softc *sc = xsc; FXP_LOCK(sc); fxp_init_body(sc, 1); FXP_UNLOCK(sc); } /* * Perform device initialization. This routine must be called with the * softc lock held. */ static void fxp_init_body(struct fxp_softc *sc, int setmedia) { struct ifnet *ifp = sc->ifp; struct mii_data *mii; struct fxp_cb_config *cbp; struct fxp_cb_ias *cb_ias; struct fxp_cb_tx *tcbp; struct fxp_tx *txp; int i, prm; FXP_LOCK_ASSERT(sc, MA_OWNED); /* * Cancel any pending I/O */ fxp_stop(sc); /* * Issue software reset, which also unloads the microcode. */ sc->flags &= ~FXP_FLAG_UCODE; CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); DELAY(50); prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; /* * Initialize base of CBL and RFA memory. Loading with zero * sets it up for regular linear addressing. */ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); /* * Initialize base of dump-stats buffer. */ fxp_scb_wait(sc); bzero(sc->fxp_stats, sizeof(struct fxp_stats)); bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); /* * Attempt to load microcode if requested. * For ICH based controllers do not load microcode. */ if (sc->ident->ich == 0) { if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0) fxp_load_ucode(sc); } /* * Set IFF_ALLMULTI status. It's needed in configure action * command. */ fxp_mc_addrs(sc); /* * We temporarily use memory that contains the TxCB list to * construct the config CB. The TxCB list memory is rebuilt * later. */ cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list; /* * This bcopy is kind of disgusting, but there are a bunch of must be * zero and must be one bits in this structure and this is the easiest * way to initialize them all to proper values. */ bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template)); cbp->cb_status = 0; cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL); cbp->link_addr = 0xffffffff; /* (no) next command */ cbp->byte_count = sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22; cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0; cbp->type_enable = 0; /* actually reserved */ cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0; cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0; cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ cbp->dma_mbce = 0; /* (disable) dma max counters */ cbp->late_scb = 0; /* (don't) defer SCB update */ cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */ cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */ cbp->ci_int = 1; /* interrupt on CU idle */ cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1; cbp->ext_stats_dis = 1; /* disable extended counters */ cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */ cbp->save_bf = sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm; cbp->disc_short_rx = !prm; /* discard short packets */ cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */ cbp->two_frames = 0; /* do not limit FIFO to 2 frames */ cbp->dyn_tbd = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; cbp->ext_rfa = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1; cbp->csma_dis = 0; /* (don't) disable link */ cbp->tcp_udp_cksum = ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 && (ifp->if_capenable & IFCAP_RXCSUM) != 0) ? 1 : 0; cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */ cbp->link_wake_en = 0; /* (don't) assert PME# on link change */ cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */ cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */ cbp->nsai = 1; /* (don't) disable source addr insert */ cbp->preamble_length = 2; /* (7 byte) preamble */ cbp->loopback = 0; /* (don't) loopback */ cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ cbp->linear_pri_mode = 0; /* (wait after xmit only) */ cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ cbp->promiscuous = prm; /* promiscuous mode */ cbp->bcast_disable = 0; /* (don't) disable broadcasts */ cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/ cbp->ignore_ul = 0; /* consider U/L bit in IA matching */ cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */ cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0; cbp->stripping = !prm; /* truncate rx packet to byte count */ cbp->padding = 1; /* (do) pad short tx packets */ cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0; cbp->ia_wake_en = 0; /* (don't) wake up on address match */ cbp->magic_pkt_dis = sc->flags & FXP_FLAG_WOL ? 0 : 1; cbp->force_fdx = 0; /* (don't) force full duplex */ cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ cbp->multi_ia = 0; /* (don't) accept multiple IAs */ cbp->mc_all = ifp->if_flags & IFF_ALLMULTI ? 1 : prm; cbp->gamla_rx = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; cbp->vlan_strip_en = ((sc->flags & FXP_FLAG_EXT_RFA) != 0 && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) ? 1 : 0; if (sc->revision == FXP_REV_82557) { /* * The 82557 has no hardware flow control, the values * below are the defaults for the chip. */ cbp->fc_delay_lsb = 0; cbp->fc_delay_msb = 0x40; cbp->pri_fc_thresh = 3; cbp->tx_fc_dis = 0; cbp->rx_fc_restop = 0; cbp->rx_fc_restart = 0; cbp->fc_filter = 0; cbp->pri_fc_loc = 1; } else { /* Set pause RX FIFO threshold to 1KB. */ CSR_WRITE_1(sc, FXP_CSR_FC_THRESH, 1); /* Set pause time. */ cbp->fc_delay_lsb = 0xff; cbp->fc_delay_msb = 0xff; cbp->pri_fc_thresh = 3; mii = device_get_softc(sc->miibus); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) /* enable transmit FC */ cbp->tx_fc_dis = 0; else /* disable transmit FC */ cbp->tx_fc_dis = 1; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) { /* enable FC restart/restop frames */ cbp->rx_fc_restart = 1; cbp->rx_fc_restop = 1; } else { /* disable FC restart/restop frames */ cbp->rx_fc_restart = 0; cbp->rx_fc_restop = 0; } cbp->fc_filter = !prm; /* drop FC frames to host */ cbp->pri_fc_loc = 1; /* FC pri location (byte31) */ } /* Enable 82558 and 82559 extended statistics functionality. */ if (sc->revision >= FXP_REV_82558_A4) { if (sc->revision >= FXP_REV_82559_A0) { /* * Extend configuration table size to 32 * to include TCO configuration. */ cbp->byte_count = 32; cbp->ext_stats_dis = 1; /* Enable TCO stats. */ cbp->tno_int_or_tco_en = 1; cbp->gamla_rx = 1; } else cbp->ext_stats_dis = 0; } /* * Start the config command/DMA. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map); /* * Now initialize the station address. Temporarily use the TxCB * memory area like we did above for the config CB. */ cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list; cb_ias->cb_status = 0; cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); cb_ias->link_addr = 0xffffffff; bcopy(IF_LLADDR(sc->ifp), cb_ias->macaddr, ETHER_ADDR_LEN); /* * Start the IAS (Individual Address Setup) command/DMA. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map); /* * Initialize the multicast address list. */ fxp_mc_setup(sc); /* * Initialize transmit control block (TxCB) list. */ txp = sc->fxp_desc.tx_list; tcbp = sc->fxp_desc.cbl_list; bzero(tcbp, FXP_TXCB_SZ); for (i = 0; i < FXP_NTXCB; i++) { txp[i].tx_mbuf = NULL; tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK); tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP); tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr + (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx))); if (sc->flags & FXP_FLAG_EXT_TXCB) tcbp[i].tbd_array_addr = htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2])); else tcbp[i].tbd_array_addr = htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0])); txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK]; } /* * Set the suspend flag on the first TxCB and start the control * unit. It will execute the NOP and then suspend. */ tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp; sc->tx_queued = 1; fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* * Initialize receiver buffer area - RFA. */ fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); if (sc->miibus != NULL && setmedia != 0) mii_mediachg(device_get_softc(sc->miibus)); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* * Enable interrupts. */ #ifdef DEVICE_POLLING /* * ... but only do that if we are not polling. And because (presumably) * the default is interrupts on, we need to disable them explicitly! */ if (ifp->if_capenable & IFCAP_POLLING ) CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); else #endif /* DEVICE_POLLING */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); /* * Start stats updater. */ callout_reset(&sc->stat_ch, hz, fxp_tick, sc); } static int fxp_serial_ifmedia_upd(struct ifnet *ifp) { return (0); } static void fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; } /* * Change media according to request. */ static int fxp_ifmedia_upd(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; struct mii_data *mii; struct mii_softc *miisc; mii = device_get_softc(sc->miibus); FXP_LOCK(sc); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); mii_mediachg(mii); FXP_UNLOCK(sc); return (0); } /* * Notify the world which media we're using. */ static void fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct fxp_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->miibus); FXP_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; FXP_UNLOCK(sc); } /* * Add a buffer to the end of the RFA buffer list. * Return 0 if successful, 1 for failure. A failure results in * reusing the RFA buffer. * The RFA struct is stuck at the beginning of mbuf cluster and the * data pointer is fixed up to point just past it. */ static int fxp_new_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp) { struct mbuf *m; struct fxp_rfa *rfa; bus_dmamap_t tmp_map; int error; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); /* * Move the data pointer up so that the incoming data packet * will be 32-bit aligned. */ m->m_data += RFA_ALIGNMENT_FUDGE; /* * Get a pointer to the base of the mbuf cluster and move * data start past it. */ rfa = mtod(m, struct fxp_rfa *); m->m_data += sc->rfa_size; rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE); rfa->rfa_status = 0; rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL); rfa->actual_size = 0; m->m_len = m->m_pkthdr.len = MCLBYTES - RFA_ALIGNMENT_FUDGE - sc->rfa_size; /* * Initialize the rest of the RFA. Note that since the RFA * is misaligned, we cannot store values directly. We're thus * using the le32enc() function which handles endianness and * is also alignment-safe. */ le32enc(&rfa->link_addr, 0xffffffff); le32enc(&rfa->rbd_addr, 0xffffffff); /* Map the RFA into DMA memory. */ error = bus_dmamap_load(sc->fxp_rxmtag, sc->spare_map, rfa, MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr, &rxp->rx_addr, BUS_DMA_NOWAIT); if (error) { m_freem(m); return (error); } if (rxp->rx_mbuf != NULL) bus_dmamap_unload(sc->fxp_rxmtag, rxp->rx_map); tmp_map = sc->spare_map; sc->spare_map = rxp->rx_map; rxp->rx_map = tmp_map; rxp->rx_mbuf = m; bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp) { struct fxp_rfa *p_rfa; struct fxp_rx *p_rx; /* * If there are other buffers already on the list, attach this * one to the end by fixing up the tail to point to this one. */ if (sc->fxp_desc.rx_head != NULL) { p_rx = sc->fxp_desc.rx_tail; p_rfa = (struct fxp_rfa *) (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); p_rx->rx_next = rxp; le32enc(&p_rfa->link_addr, rxp->rx_addr); p_rfa->rfa_control = 0; bus_dmamap_sync(sc->fxp_rxmtag, p_rx->rx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } else { rxp->rx_next = NULL; sc->fxp_desc.rx_head = rxp; } sc->fxp_desc.rx_tail = rxp; } static void fxp_discard_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp) { struct mbuf *m; struct fxp_rfa *rfa; m = rxp->rx_mbuf; m->m_data = m->m_ext.ext_buf; /* * Move the data pointer up so that the incoming data packet * will be 32-bit aligned. */ m->m_data += RFA_ALIGNMENT_FUDGE; /* * Get a pointer to the base of the mbuf cluster and move * data start past it. */ rfa = mtod(m, struct fxp_rfa *); m->m_data += sc->rfa_size; rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE); rfa->rfa_status = 0; rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL); rfa->actual_size = 0; /* * Initialize the rest of the RFA. Note that since the RFA * is misaligned, we cannot store values directly. We're thus * using the le32enc() function which handles endianness and * is also alignment-safe. */ le32enc(&rfa->link_addr, 0xffffffff); le32enc(&rfa->rbd_addr, 0xffffffff); bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static int fxp_miibus_readreg(device_t dev, int phy, int reg) { struct fxp_softc *sc = device_get_softc(dev); int count = 10000; int value; CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 && count--) DELAY(10); if (count <= 0) device_printf(dev, "fxp_miibus_readreg: timed out\n"); return (value & 0xffff); } static int fxp_miibus_writereg(device_t dev, int phy, int reg, int value) { struct fxp_softc *sc = device_get_softc(dev); int count = 10000; CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | (value & 0xffff)); while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && count--) DELAY(10); if (count <= 0) device_printf(dev, "fxp_miibus_writereg: timed out\n"); return (0); } static void fxp_miibus_statchg(device_t dev) { struct fxp_softc *sc; struct mii_data *mii; struct ifnet *ifp; sc = device_get_softc(dev); mii = device_get_softc(sc->miibus); ifp = sc->ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || (mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) != (IFM_AVALID | IFM_ACTIVE)) return; if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T && sc->flags & FXP_FLAG_CU_RESUME_BUG) sc->cu_resume_bug = 1; else sc->cu_resume_bug = 0; /* * Call fxp_init_body in order to adjust the flow control settings. * Note that the 82557 doesn't support hardware flow control. */ if (sc->revision == FXP_REV_82557) return; fxp_init_body(sc, 0); } static int fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct fxp_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int flag, mask, error = 0, reinit; switch (command) { case SIOCSIFFLAGS: FXP_LOCK(sc); /* * If interface is marked up and not running, then start it. * If it is marked down and running, stop it. * XXX If it's up then re-initialize it. This is so flags * such as IFF_PROMISC are handled. */ if (ifp->if_flags & IFF_UP) { if (((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) && ((ifp->if_flags ^ sc->if_flags) & (IFF_PROMISC | IFF_ALLMULTI | IFF_LINK0)) != 0) fxp_init_body(sc, 0); else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) fxp_init_body(sc, 1); } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) fxp_stop(sc); } sc->if_flags = ifp->if_flags; FXP_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: FXP_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) fxp_init_body(sc, 0); FXP_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->miibus != NULL) { mii = device_get_softc(sc->miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } else { error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); } break; case SIOCSIFCAP: reinit = 0; mask = ifp->if_capenable ^ ifr->ifr_reqcap; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(fxp_poll, ifp); if (error) return(error); FXP_LOCK(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); ifp->if_capenable |= IFCAP_POLLING; FXP_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts in any case */ FXP_LOCK(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); ifp->if_capenable &= ~IFCAP_POLLING; FXP_UNLOCK(sc); } } #endif FXP_LOCK(sc); if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= FXP_CSUM_FEATURES; else ifp->if_hwassist &= ~FXP_CSUM_FEATURES; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0) reinit++; } if ((mask & IFCAP_TSO4) != 0 && (ifp->if_capabilities & IFCAP_TSO4) != 0) { ifp->if_capenable ^= IFCAP_TSO4; if ((ifp->if_capenable & IFCAP_TSO4) != 0) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; } if ((mask & IFCAP_WOL_MAGIC) != 0 && (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; if ((mask & IFCAP_VLAN_MTU) != 0 && (ifp->if_capabilities & IFCAP_VLAN_MTU) != 0) { ifp->if_capenable ^= IFCAP_VLAN_MTU; if (sc->revision != FXP_REV_82557) flag = FXP_FLAG_LONG_PKT_EN; else /* a hack to get long frames on the old chip */ flag = FXP_FLAG_SAVE_BAD; sc->flags ^= flag; if (ifp->if_flags & IFF_UP) reinit++; } if ((mask & IFCAP_VLAN_HWCSUM) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; if ((mask & IFCAP_VLAN_HWTSO) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) ifp->if_capenable &= ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); reinit++; } if (reinit > 0 && ifp->if_flags & IFF_UP) fxp_init_body(sc, 0); FXP_UNLOCK(sc); VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, command, data); } return (error); } /* * Fill in the multicast address list and return number of entries. */ static int fxp_mc_addrs(struct fxp_softc *sc) { struct fxp_cb_mcs *mcsp = sc->mcsp; struct ifnet *ifp = sc->ifp; struct ifmultiaddr *ifma; int nmcasts; nmcasts = 0; if ((ifp->if_flags & IFF_ALLMULTI) == 0) { if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (nmcasts >= MAXMCADDR) { ifp->if_flags |= IFF_ALLMULTI; nmcasts = 0; break; } bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN); nmcasts++; } if_maddr_runlock(ifp); } mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); return (nmcasts); } /* * Program the multicast filter. * * We have an artificial restriction that the multicast setup command * must be the first command in the chain, so we take steps to ensure * this. By requiring this, it allows us to keep up the performance of * the pre-initialized command ring (esp. link pointers) by not actually * inserting the mcsetup command in the ring - i.e. its link pointer * points to the TxCB ring, but the mcsetup descriptor itself is not part * of it. We then can do 'CU_START' on the mcsetup descriptor and have it * lead into the regular TxCB ring when it completes. */ static void fxp_mc_setup(struct fxp_softc *sc) { struct fxp_cb_mcs *mcsp; int count; FXP_LOCK_ASSERT(sc, MA_OWNED); mcsp = sc->mcsp; mcsp->cb_status = 0; mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); mcsp->link_addr = 0xffffffff; fxp_mc_addrs(sc); /* * Wait until command unit is idle. This should never be the * case when nothing is queued, but make sure anyway. */ count = 100; while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) != FXP_SCB_CUS_IDLE && --count) DELAY(10); if (count == 0) { device_printf(sc->dev, "command queue timeout\n"); return; } /* * Start the multicast setup command. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map); } static uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d102e[] = D102_E_RCVBUNDLE_UCODE; #define UCODE(x) x, sizeof(x)/sizeof(uint32_t) static const struct ucode { uint32_t revision; uint32_t *ucode; int length; u_short int_delay_offset; u_short bundle_max_offset; } const ucode_table[] = { { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 }, { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 }, { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma), D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s), D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82550, UCODE(fxp_ucode_d102), D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82550_C, UCODE(fxp_ucode_d102c), D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82551_F, UCODE(fxp_ucode_d102e), D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82551_10, UCODE(fxp_ucode_d102e), D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD }, { 0, NULL, 0, 0, 0 } }; static void fxp_load_ucode(struct fxp_softc *sc) { const struct ucode *uc; struct fxp_cb_ucode *cbp; int i; if (sc->flags & FXP_FLAG_NO_UCODE) return; for (uc = ucode_table; uc->ucode != NULL; uc++) if (sc->revision == uc->revision) break; if (uc->ucode == NULL) return; cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list; cbp->cb_status = 0; cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL); cbp->link_addr = 0xffffffff; /* (no) next command */ for (i = 0; i < uc->length; i++) cbp->ucode[i] = htole32(uc->ucode[i]); if (uc->int_delay_offset) *(uint16_t *)&cbp->ucode[uc->int_delay_offset] = htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2); if (uc->bundle_max_offset) *(uint16_t *)&cbp->ucode[uc->bundle_max_offset] = htole16(sc->tunable_bundle_max); /* * Download the ucode to the chip. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map); device_printf(sc->dev, "Microcode loaded, int_delay: %d usec bundle_max: %d\n", sc->tunable_int_delay, uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max); sc->flags |= FXP_FLAG_UCODE; bzero(cbp, FXP_TXCB_SZ); } #define FXP_SYSCTL_STAT_ADD(c, h, n, p, d) \ SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) static void fxp_sysctl_node(struct fxp_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child, *parent; struct sysctl_oid *tree; struct fxp_hwstats *hsp; ctx = device_get_sysctl_ctx(sc->dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW, &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I", "FXP driver receive interrupt microcode bundling delay"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW, &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I", "FXP driver receive interrupt microcode bundle size limit"); SYSCTL_ADD_INT(ctx, child,OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0, "FXP RNR events"); /* * Pull in device tunables. */ sc->tunable_int_delay = TUNABLE_INT_DELAY; sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX; (void) resource_int_value(device_get_name(sc->dev), device_get_unit(sc->dev), "int_delay", &sc->tunable_int_delay); (void) resource_int_value(device_get_name(sc->dev), device_get_unit(sc->dev), "bundle_max", &sc->tunable_bundle_max); sc->rnr = 0; hsp = &sc->fxp_hwstats; tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "FXP statistics"); parent = SYSCTL_CHILDREN(tree); /* Rx MAC statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, NULL, "Rx MAC statistics"); child = SYSCTL_CHILDREN(tree); FXP_SYSCTL_STAT_ADD(ctx, child, "good_frames", &hsp->rx_good, "Good frames"); FXP_SYSCTL_STAT_ADD(ctx, child, "crc_errors", &hsp->rx_crc_errors, "CRC errors"); FXP_SYSCTL_STAT_ADD(ctx, child, "alignment_errors", &hsp->rx_alignment_errors, "Alignment errors"); FXP_SYSCTL_STAT_ADD(ctx, child, "rnr_errors", &hsp->rx_rnr_errors, "RNR errors"); FXP_SYSCTL_STAT_ADD(ctx, child, "overrun_errors", &hsp->rx_overrun_errors, "Overrun errors"); FXP_SYSCTL_STAT_ADD(ctx, child, "cdt_errors", &hsp->rx_cdt_errors, "Collision detect errors"); FXP_SYSCTL_STAT_ADD(ctx, child, "shortframes", &hsp->rx_shortframes, "Short frame errors"); if (sc->revision >= FXP_REV_82558_A4) { FXP_SYSCTL_STAT_ADD(ctx, child, "pause", &hsp->rx_pause, "Pause frames"); FXP_SYSCTL_STAT_ADD(ctx, child, "controls", &hsp->rx_controls, "Unsupported control frames"); } if (sc->revision >= FXP_REV_82559_A0) FXP_SYSCTL_STAT_ADD(ctx, child, "tco", &hsp->rx_tco, "TCO frames"); /* Tx MAC statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, NULL, "Tx MAC statistics"); child = SYSCTL_CHILDREN(tree); FXP_SYSCTL_STAT_ADD(ctx, child, "good_frames", &hsp->tx_good, "Good frames"); FXP_SYSCTL_STAT_ADD(ctx, child, "maxcols", &hsp->tx_maxcols, "Maximum collisions errors"); FXP_SYSCTL_STAT_ADD(ctx, child, "latecols", &hsp->tx_latecols, "Late collisions errors"); FXP_SYSCTL_STAT_ADD(ctx, child, "underruns", &hsp->tx_underruns, "Underrun errors"); FXP_SYSCTL_STAT_ADD(ctx, child, "lostcrs", &hsp->tx_lostcrs, "Lost carrier sense"); FXP_SYSCTL_STAT_ADD(ctx, child, "deffered", &hsp->tx_deffered, "Deferred"); FXP_SYSCTL_STAT_ADD(ctx, child, "single_collisions", &hsp->tx_single_collisions, "Single collisions"); FXP_SYSCTL_STAT_ADD(ctx, child, "multiple_collisions", &hsp->tx_multiple_collisions, "Multiple collisions"); FXP_SYSCTL_STAT_ADD(ctx, child, "total_collisions", &hsp->tx_total_collisions, "Total collisions"); if (sc->revision >= FXP_REV_82558_A4) FXP_SYSCTL_STAT_ADD(ctx, child, "pause", &hsp->tx_pause, "Pause frames"); if (sc->revision >= FXP_REV_82559_A0) FXP_SYSCTL_STAT_ADD(ctx, child, "tco", &hsp->tx_tco, "TCO frames"); } #undef FXP_SYSCTL_STAT_ADD static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } /* * Interrupt delay is expressed in microseconds, a multiplier is used * to convert this to the appropriate clock ticks before using. */ static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000)); } static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff)); } Index: projects/nand/sys/dev/hwpmc/hwpmc_intel.c =================================================================== --- projects/nand/sys/dev/hwpmc/hwpmc_intel.c (revision 235262) +++ projects/nand/sys/dev/hwpmc/hwpmc_intel.c (revision 235263) @@ -1,321 +1,309 @@ /*- * Copyright (c) 2008 Joseph Koshy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Common code for handling Intel CPUs. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include static int intel_switch_in(struct pmc_cpu *pc, struct pmc_process *pp) { (void) pc; PMCDBG(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp, pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS); /* allow the RDPMC instruction if needed */ if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) load_cr4(rcr4() | CR4_PCE); PMCDBG(MDP,SWI,1, "cr4=0x%jx", (uintmax_t) rcr4()); return 0; } static int intel_switch_out(struct pmc_cpu *pc, struct pmc_process *pp) { (void) pc; (void) pp; /* can be NULL */ PMCDBG(MDP,SWO,1, "pc=%p pp=%p cr4=0x%jx", pc, pp, (uintmax_t) rcr4()); /* always turn off the RDPMC instruction */ load_cr4(rcr4() & ~CR4_PCE); return 0; } struct pmc_mdep * pmc_intel_initialize(void) { struct pmc_mdep *pmc_mdep; enum pmc_cputype cputype; int error, model, nclasses, ncpus; KASSERT(cpu_vendor_id == CPU_VENDOR_INTEL, ("[intel,%d] Initializing non-intel processor", __LINE__)); PMCDBG(MDP,INI,0, "intel-initialize cpuid=0x%x", cpu_id); cputype = -1; nclasses = 2; model = ((cpu_id & 0xF0000) >> 12) | ((cpu_id & 0xF0) >> 4); switch (cpu_id & 0xF00) { #if defined(__i386__) case 0x500: /* Pentium family processors */ cputype = PMC_CPU_INTEL_P5; break; #endif case 0x600: /* Pentium Pro, Celeron, Pentium II & III */ switch (model) { #if defined(__i386__) case 0x1: cputype = PMC_CPU_INTEL_P6; break; case 0x3: case 0x5: cputype = PMC_CPU_INTEL_PII; break; case 0x6: case 0x16: cputype = PMC_CPU_INTEL_CL; break; case 0x7: case 0x8: case 0xA: case 0xB: cputype = PMC_CPU_INTEL_PIII; break; case 0x9: case 0xD: cputype = PMC_CPU_INTEL_PM; break; #endif case 0xE: cputype = PMC_CPU_INTEL_CORE; break; case 0xF: cputype = PMC_CPU_INTEL_CORE2; nclasses = 3; break; case 0x17: cputype = PMC_CPU_INTEL_CORE2EXTREME; nclasses = 3; break; case 0x1C: /* Per Intel document 320047-002. */ cputype = PMC_CPU_INTEL_ATOM; nclasses = 3; break; case 0x1A: case 0x1E: /* Per Intel document 253669-032 9/2009, pages A-2 and A-57 */ case 0x1F: /* Per Intel document 253669-032 9/2009, pages A-2 and A-57 */ case 0x2E: cputype = PMC_CPU_INTEL_COREI7; nclasses = 5; break; case 0x25: /* Per Intel document 253669-033US 12/2009. */ case 0x2C: /* Per Intel document 253669-033US 12/2009. */ cputype = PMC_CPU_INTEL_WESTMERE; nclasses = 5; break; case 0x2A: /* Per Intel document 253669-039US 05/2011. */ case 0x2D: /* Per Intel document 253669-041US 12/2011. */ cputype = PMC_CPU_INTEL_SANDYBRIDGE; nclasses = 5; break; } break; #if defined(__i386__) || defined(__amd64__) case 0xF00: /* P4 */ if (model >= 0 && model <= 6) /* known models */ cputype = PMC_CPU_INTEL_PIV; break; } #endif if ((int) cputype == -1) { printf("pmc: Unknown Intel CPU.\n"); return (NULL); } /* Allocate base class and initialize machine dependent struct */ pmc_mdep = pmc_mdep_alloc(nclasses); pmc_mdep->pmd_cputype = cputype; pmc_mdep->pmd_switch_in = intel_switch_in; pmc_mdep->pmd_switch_out = intel_switch_out; ncpus = pmc_cpu_max(); error = pmc_tsc_initialize(pmc_mdep, ncpus); if (error) goto error; switch (cputype) { #if defined(__i386__) || defined(__amd64__) /* * Intel Core, Core 2 and Atom processors. */ case PMC_CPU_INTEL_ATOM: case PMC_CPU_INTEL_CORE: case PMC_CPU_INTEL_CORE2: case PMC_CPU_INTEL_CORE2EXTREME: case PMC_CPU_INTEL_COREI7: case PMC_CPU_INTEL_SANDYBRIDGE: case PMC_CPU_INTEL_WESTMERE: error = pmc_core_initialize(pmc_mdep, ncpus); break; /* * Intel Pentium 4 Processors, and P4/EMT64 processors. */ case PMC_CPU_INTEL_PIV: error = pmc_p4_initialize(pmc_mdep, ncpus); - - KASSERT(pmc_mdep->pmd_npmc == TSC_NPMCS + P4_NPMCS, - ("[intel,%d] incorrect npmc count %d", __LINE__, - pmc_mdep->pmd_npmc)); break; #endif #if defined(__i386__) /* * P6 Family Processors */ case PMC_CPU_INTEL_P6: case PMC_CPU_INTEL_CL: case PMC_CPU_INTEL_PII: case PMC_CPU_INTEL_PIII: case PMC_CPU_INTEL_PM: error = pmc_p6_initialize(pmc_mdep, ncpus); - - KASSERT(pmc_mdep->pmd_npmc == TSC_NPMCS + P6_NPMCS, - ("[intel,%d] incorrect npmc count %d", __LINE__, - pmc_mdep->pmd_npmc)); break; /* * Intel Pentium PMCs. */ case PMC_CPU_INTEL_P5: error = pmc_p5_initialize(pmc_mdep, ncpus); - - KASSERT(pmc_mdep->pmd_npmc == TSC_NPMCS + PENTIUM_NPMCS, - ("[intel,%d] incorrect npmc count %d", __LINE__, - pmc_mdep->pmd_npmc)); break; #endif default: KASSERT(0, ("[intel,%d] Unknown CPU type", __LINE__)); } if (error) goto error; /* * Init the uncore class. */ #if defined(__i386__) || defined(__amd64__) switch (cputype) { /* * Intel Corei7 and Westmere processors. */ case PMC_CPU_INTEL_COREI7: case PMC_CPU_INTEL_SANDYBRIDGE: case PMC_CPU_INTEL_WESTMERE: error = pmc_uncore_initialize(pmc_mdep, ncpus); break; default: break; } #endif error: if (error) { free(pmc_mdep, M_PMC); pmc_mdep = NULL; } return (pmc_mdep); } void pmc_intel_finalize(struct pmc_mdep *md) { pmc_tsc_finalize(md); switch (md->pmd_cputype) { #if defined(__i386__) || defined(__amd64__) case PMC_CPU_INTEL_ATOM: case PMC_CPU_INTEL_CORE: case PMC_CPU_INTEL_CORE2: case PMC_CPU_INTEL_CORE2EXTREME: case PMC_CPU_INTEL_COREI7: case PMC_CPU_INTEL_SANDYBRIDGE: case PMC_CPU_INTEL_WESTMERE: pmc_core_finalize(md); break; case PMC_CPU_INTEL_PIV: pmc_p4_finalize(md); break; #endif #if defined(__i386__) case PMC_CPU_INTEL_P6: case PMC_CPU_INTEL_CL: case PMC_CPU_INTEL_PII: case PMC_CPU_INTEL_PIII: case PMC_CPU_INTEL_PM: pmc_p6_finalize(md); break; case PMC_CPU_INTEL_P5: pmc_p5_finalize(md); break; #endif default: KASSERT(0, ("[intel,%d] unknown CPU type", __LINE__)); } /* * Uncore. */ #if defined(__i386__) || defined(__amd64__) switch (md->pmd_cputype) { case PMC_CPU_INTEL_COREI7: case PMC_CPU_INTEL_SANDYBRIDGE: case PMC_CPU_INTEL_WESTMERE: pmc_uncore_finalize(md); break; default: break; } #endif } Index: projects/nand/sys/dev/ral/if_ral_pci.c =================================================================== --- projects/nand/sys/dev/ral/if_ral_pci.c (revision 235262) +++ projects/nand/sys/dev/ral/if_ral_pci.c (revision 235263) @@ -1,272 +1,318 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * PCI/Cardbus front-end for the Ralink RT2560/RT2561/RT2561S/RT2661 driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include MODULE_DEPEND(ral, pci, 1, 1, 1); MODULE_DEPEND(ral, firmware, 1, 1, 1); MODULE_DEPEND(ral, wlan, 1, 1, 1); MODULE_DEPEND(ral, wlan_amrr, 1, 1, 1); struct ral_pci_ident { uint16_t vendor; uint16_t device; const char *name; }; static const struct ral_pci_ident ral_pci_ids[] = { + { 0x1432, 0x7708, "Edimax RT2860" }, + { 0x1432, 0x7711, "Edimax RT3591" }, + { 0x1432, 0x7722, "Edimax RT3591" }, + { 0x1432, 0x7727, "Edimax RT2860" }, + { 0x1432, 0x7728, "Edimax RT2860" }, + { 0x1432, 0x7738, "Edimax RT2860" }, + { 0x1432, 0x7748, "Edimax RT2860" }, + { 0x1432, 0x7758, "Edimax RT2860" }, + { 0x1432, 0x7768, "Edimax RT2860" }, + { 0x1462, 0x891a, "MSI RT3090" }, { 0x1814, 0x0201, "Ralink Technology RT2560" }, { 0x1814, 0x0301, "Ralink Technology RT2561S" }, { 0x1814, 0x0302, "Ralink Technology RT2561" }, { 0x1814, 0x0401, "Ralink Technology RT2661" }, - + { 0x1814, 0x0601, "Ralink Technology RT2860" }, + { 0x1814, 0x0681, "Ralink Technology RT2890" }, + { 0x1814, 0x0701, "Ralink Technology RT2760" }, + { 0x1814, 0x0781, "Ralink Technology RT2790" }, + { 0x1814, 0x3060, "Ralink Technology RT3060" }, + { 0x1814, 0x3062, "Ralink Technology RT3062" }, + { 0x1814, 0x3090, "Ralink Technology RT3090" }, + { 0x1814, 0x3091, "Ralink Technology RT3091" }, + { 0x1814, 0x3092, "Ralink Technology RT3092" }, + { 0x1814, 0x3390, "Ralink Technology RT3390" }, + { 0x1814, 0x3562, "Ralink Technology RT3562" }, + { 0x1814, 0x3592, "Ralink Technology RT3592" }, + { 0x1814, 0x3593, "Ralink Technology RT3593" }, + { 0x1814, 0x5390, "Ralink Technology RT5390" }, + { 0x1814, 0x539a, "Ralink Technology RT5390" }, + { 0x1814, 0x539f, "Ralink Technology RT5390" }, + { 0x1a3b, 0x1059, "AWT RT2890" }, { 0, 0, NULL } }; static struct ral_opns { int (*attach)(device_t, int); int (*detach)(void *); void (*shutdown)(void *); void (*suspend)(void *); void (*resume)(void *); void (*intr)(void *); } ral_rt2560_opns = { rt2560_attach, rt2560_detach, rt2560_stop, rt2560_stop, rt2560_resume, rt2560_intr }, ral_rt2661_opns = { rt2661_attach, rt2661_detach, rt2661_shutdown, rt2661_suspend, rt2661_resume, rt2661_intr +}, ral_rt2860_opns = { + rt2860_attach, + rt2860_detach, + rt2860_shutdown, + rt2860_suspend, + rt2860_resume, + rt2860_intr }; struct ral_pci_softc { union { struct rt2560_softc sc_rt2560; struct rt2661_softc sc_rt2661; + struct rt2860_softc sc_rt2860; } u; struct ral_opns *sc_opns; int irq_rid; int mem_rid; struct resource *irq; struct resource *mem; void *sc_ih; }; static int ral_pci_probe(device_t); static int ral_pci_attach(device_t); static int ral_pci_detach(device_t); static int ral_pci_shutdown(device_t); static int ral_pci_suspend(device_t); static int ral_pci_resume(device_t); static device_method_t ral_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ral_pci_probe), DEVMETHOD(device_attach, ral_pci_attach), DEVMETHOD(device_detach, ral_pci_detach), DEVMETHOD(device_shutdown, ral_pci_shutdown), DEVMETHOD(device_suspend, ral_pci_suspend), DEVMETHOD(device_resume, ral_pci_resume), { 0, 0 } }; static driver_t ral_pci_driver = { "ral", ral_pci_methods, sizeof (struct ral_pci_softc) }; static devclass_t ral_devclass; DRIVER_MODULE(ral, pci, ral_pci_driver, ral_devclass, 0, 0); static int ral_pci_probe(device_t dev) { const struct ral_pci_ident *ident; for (ident = ral_pci_ids; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return 0; } } return ENXIO; } /* Base Address Register */ #define RAL_PCI_BAR0 0x10 static int ral_pci_attach(device_t dev) { struct ral_pci_softc *psc = device_get_softc(dev); struct rt2560_softc *sc = &psc->u.sc_rt2560; int error; if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); } /* enable bus-mastering */ pci_enable_busmaster(dev); - psc->sc_opns = (pci_get_device(dev) == 0x0201) ? &ral_rt2560_opns : - &ral_rt2661_opns; + switch (pci_get_device(dev)) { + case 0x0201: + psc->sc_opns = &ral_rt2560_opns; + break; + case 0x0301: + case 0x0302: + case 0x0401: + psc->sc_opns = &ral_rt2661_opns; + break; + default: + psc->sc_opns = &ral_rt2860_opns; + break; + } psc->mem_rid = RAL_PCI_BAR0; psc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &psc->mem_rid, RF_ACTIVE); if (psc->mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); return ENXIO; } sc->sc_st = rman_get_bustag(psc->mem); sc->sc_sh = rman_get_bushandle(psc->mem); sc->sc_invalid = 1; psc->irq_rid = 0; psc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &psc->irq_rid, RF_ACTIVE | RF_SHAREABLE); if (psc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); return ENXIO; } error = (*psc->sc_opns->attach)(dev, pci_get_device(dev)); if (error != 0) return error; /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, psc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, psc->sc_opns->intr, psc, &psc->sc_ih); if (error != 0) { device_printf(dev, "could not set up interrupt\n"); return error; } sc->sc_invalid = 0; return 0; } static int ral_pci_detach(device_t dev) { struct ral_pci_softc *psc = device_get_softc(dev); struct rt2560_softc *sc = &psc->u.sc_rt2560; /* check if device was removed */ sc->sc_invalid = !bus_child_present(dev); (*psc->sc_opns->detach)(psc); bus_generic_detach(dev); bus_teardown_intr(dev, psc->irq, psc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, psc->irq_rid, psc->irq); bus_release_resource(dev, SYS_RES_MEMORY, psc->mem_rid, psc->mem); return 0; } static int ral_pci_shutdown(device_t dev) { struct ral_pci_softc *psc = device_get_softc(dev); (*psc->sc_opns->shutdown)(psc); return 0; } static int ral_pci_suspend(device_t dev) { struct ral_pci_softc *psc = device_get_softc(dev); (*psc->sc_opns->suspend)(psc); return 0; } static int ral_pci_resume(device_t dev) { struct ral_pci_softc *psc = device_get_softc(dev); (*psc->sc_opns->resume)(psc); return 0; } Index: projects/nand/sys/dev/ral/rt2860.c =================================================================== --- projects/nand/sys/dev/ral/rt2860.c (nonexistent) +++ projects/nand/sys/dev/ral/rt2860.c (revision 235263) @@ -0,0 +1,4103 @@ +/*- + * Copyright (c) 2007-2010 Damien Bergamini + * Copyright (c) 2012 Bernhard Schmidt + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $OpenBSD: rt2860.c,v 1.65 2010/10/23 14:24:54 damien Exp $ + */ + +#include +__FBSDID("$FreeBSD$"); + +/*- + * Ralink Technology RT2860/RT3090/RT3390/RT3562 chipset driver + * http://www.ralinktech.com/ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#define RAL_DEBUG +#ifdef RAL_DEBUG +#define DPRINTF(x) do { if (sc->sc_debug > 0) printf x; } while (0) +#define DPRINTFN(n, x) do { if (sc->sc_debug >= (n)) printf x; } while (0) +#else +#define DPRINTF(x) +#define DPRINTFN(n, x) +#endif + +static struct ieee80211vap *rt2860_vap_create(struct ieee80211com *, + const char [IFNAMSIZ], int, enum ieee80211_opmode, + int, const uint8_t [IEEE80211_ADDR_LEN], + const uint8_t [IEEE80211_ADDR_LEN]); +static void rt2860_vap_delete(struct ieee80211vap *); +static void rt2860_dma_map_addr(void *, bus_dma_segment_t *, int, int); +static int rt2860_alloc_tx_ring(struct rt2860_softc *, + struct rt2860_tx_ring *); +static void rt2860_reset_tx_ring(struct rt2860_softc *, + struct rt2860_tx_ring *); +static void rt2860_free_tx_ring(struct rt2860_softc *, + struct rt2860_tx_ring *); +static int rt2860_alloc_tx_pool(struct rt2860_softc *); +static void rt2860_free_tx_pool(struct rt2860_softc *); +static int rt2860_alloc_rx_ring(struct rt2860_softc *, + struct rt2860_rx_ring *); +static void rt2860_reset_rx_ring(struct rt2860_softc *, + struct rt2860_rx_ring *); +static void rt2860_free_rx_ring(struct rt2860_softc *, + struct rt2860_rx_ring *); +static void rt2860_updatestats(struct rt2860_softc *); +static void rt2860_newassoc(struct ieee80211_node *, int); +static void rt2860_node_free(struct ieee80211_node *); +#ifdef IEEE80211_HT +static int rt2860_ampdu_rx_start(struct ieee80211com *, + struct ieee80211_node *, uint8_t); +static void rt2860_ampdu_rx_stop(struct ieee80211com *, + struct ieee80211_node *, uint8_t); +#endif +static int rt2860_newstate(struct ieee80211vap *, enum ieee80211_state, + int); +static uint16_t rt3090_efuse_read_2(struct rt2860_softc *, uint16_t); +static uint16_t rt2860_eeprom_read_2(struct rt2860_softc *, uint16_t); +static void rt2860_intr_coherent(struct rt2860_softc *); +static void rt2860_drain_stats_fifo(struct rt2860_softc *); +static void rt2860_tx_intr(struct rt2860_softc *, int); +static void rt2860_rx_intr(struct rt2860_softc *); +static void rt2860_tbtt_intr(struct rt2860_softc *); +static void rt2860_gp_intr(struct rt2860_softc *); +static int rt2860_tx(struct rt2860_softc *, struct mbuf *, + struct ieee80211_node *); +static int rt2860_raw_xmit(struct ieee80211_node *, struct mbuf *, + const struct ieee80211_bpf_params *); +static int rt2860_tx_raw(struct rt2860_softc *, struct mbuf *, + struct ieee80211_node *, + const struct ieee80211_bpf_params *params); +static void rt2860_start(struct ifnet *); +static void rt2860_start_locked(struct ifnet *); +static void rt2860_watchdog(void *); +static int rt2860_ioctl(struct ifnet *, u_long, caddr_t); +static void rt2860_mcu_bbp_write(struct rt2860_softc *, uint8_t, uint8_t); +static uint8_t rt2860_mcu_bbp_read(struct rt2860_softc *, uint8_t); +static void rt2860_rf_write(struct rt2860_softc *, uint8_t, uint32_t); +static uint8_t rt3090_rf_read(struct rt2860_softc *, uint8_t); +static void rt3090_rf_write(struct rt2860_softc *, uint8_t, uint8_t); +static int rt2860_mcu_cmd(struct rt2860_softc *, uint8_t, uint16_t, int); +static void rt2860_enable_mrr(struct rt2860_softc *); +static void rt2860_set_txpreamble(struct rt2860_softc *); +static void rt2860_set_basicrates(struct rt2860_softc *, + const struct ieee80211_rateset *); +static void rt2860_scan_start(struct ieee80211com *); +static void rt2860_scan_end(struct ieee80211com *); +static void rt2860_set_channel(struct ieee80211com *); +static void rt2860_select_chan_group(struct rt2860_softc *, int); +static void rt2860_set_chan(struct rt2860_softc *, u_int); +static void rt3090_set_chan(struct rt2860_softc *, u_int); +static int rt3090_rf_init(struct rt2860_softc *); +static void rt3090_rf_wakeup(struct rt2860_softc *); +static int rt3090_filter_calib(struct rt2860_softc *, uint8_t, uint8_t, + uint8_t *); +static void rt3090_rf_setup(struct rt2860_softc *); +static void rt2860_set_leds(struct rt2860_softc *, uint16_t); +static void rt2860_set_gp_timer(struct rt2860_softc *, int); +static void rt2860_set_bssid(struct rt2860_softc *, const uint8_t *); +static void rt2860_set_macaddr(struct rt2860_softc *, const uint8_t *); +static void rt2860_update_promisc(struct ifnet *); +static void rt2860_updateslot(struct ifnet *); +static void rt2860_updateprot(struct ifnet *); +static int rt2860_updateedca(struct ieee80211com *); +#ifdef HW_CRYPTO +static int rt2860_set_key(struct ieee80211com *, struct ieee80211_node *, + struct ieee80211_key *); +static void rt2860_delete_key(struct ieee80211com *, + struct ieee80211_node *, struct ieee80211_key *); +#endif +static int8_t rt2860_rssi2dbm(struct rt2860_softc *, uint8_t, uint8_t); +static const char *rt2860_get_rf(uint8_t); +static int rt2860_read_eeprom(struct rt2860_softc *, + uint8_t macaddr[IEEE80211_ADDR_LEN]); +static int rt2860_bbp_init(struct rt2860_softc *); +static int rt2860_txrx_enable(struct rt2860_softc *); +static void rt2860_init(void *); +static void rt2860_init_locked(struct rt2860_softc *); +static void rt2860_stop(void *); +static void rt2860_stop_locked(struct rt2860_softc *); +static int rt2860_load_microcode(struct rt2860_softc *); +#ifdef NOT_YET +static void rt2860_calib(struct rt2860_softc *); +#endif +static void rt3090_set_rx_antenna(struct rt2860_softc *, int); +static void rt2860_switch_chan(struct rt2860_softc *, + struct ieee80211_channel *); +static int rt2860_setup_beacon(struct rt2860_softc *, + struct ieee80211vap *); +static void rt2860_enable_tsf_sync(struct rt2860_softc *); + +static const struct { + uint32_t reg; + uint32_t val; +} rt2860_def_mac[] = { + RT2860_DEF_MAC +}; + +static const struct { + uint8_t reg; + uint8_t val; +} rt2860_def_bbp[] = { + RT2860_DEF_BBP +}; + +static const struct rfprog { + uint8_t chan; + uint32_t r1, r2, r3, r4; +} rt2860_rf2850[] = { + RT2860_RF2850 +}; + +struct { + uint8_t n, r, k; +} rt3090_freqs[] = { + RT3070_RF3052 +}; + +static const struct { + uint8_t reg; + uint8_t val; +} rt3090_def_rf[] = { + RT3070_DEF_RF +}; + +int +rt2860_attach(device_t dev, int id) +{ + struct rt2860_softc *sc = device_get_softc(dev); + struct ieee80211com *ic; + struct ifnet *ifp; + uint32_t tmp; + int error, ntries, qid; + uint8_t bands; + uint8_t macaddr[IEEE80211_ADDR_LEN]; + + sc->sc_dev = dev; + sc->sc_debug = 0; + + ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); + if (ifp == NULL) { + device_printf(sc->sc_dev, "can not if_alloc()\n"); + return ENOMEM; + } + ic = ifp->if_l2com; + + mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, + MTX_DEF | MTX_RECURSE); + + callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); + + /* wait for NIC to initialize */ + for (ntries = 0; ntries < 100; ntries++) { + tmp = RAL_READ(sc, RT2860_ASIC_VER_ID); + if (tmp != 0 && tmp != 0xffffffff) + break; + DELAY(10); + } + if (ntries == 100) { + device_printf(sc->sc_dev, + "timeout waiting for NIC to initialize\n"); + error = EIO; + goto fail1; + } + sc->mac_ver = tmp >> 16; + sc->mac_rev = tmp & 0xffff; + + if (sc->mac_ver != 0x2860 && + (id == 0x0681 || id == 0x0781 || id == 0x1059)) + sc->sc_flags |= RT2860_ADVANCED_PS; + + /* retrieve RF rev. no and various other things from EEPROM */ + rt2860_read_eeprom(sc, macaddr); + if (bootverbose) { + device_printf(sc->sc_dev, "MAC/BBP RT%X (rev 0x%04X), " + "RF %s (MIMO %dT%dR), address %6D\n", + sc->mac_ver, sc->mac_rev, rt2860_get_rf(sc->rf_rev), + sc->ntxchains, sc->nrxchains, macaddr, ":"); + } + + /* + * Allocate Tx (4 EDCAs + HCCA + Mgt) and Rx rings. + */ + for (qid = 0; qid < 6; qid++) { + if ((error = rt2860_alloc_tx_ring(sc, &sc->txq[qid])) != 0) { + device_printf(sc->sc_dev, + "could not allocate Tx ring %d\n", qid); + goto fail2; + } + } + + if ((error = rt2860_alloc_rx_ring(sc, &sc->rxq)) != 0) { + device_printf(sc->sc_dev, "could not allocate Rx ring\n"); + goto fail2; + } + + if ((error = rt2860_alloc_tx_pool(sc)) != 0) { + device_printf(sc->sc_dev, "could not allocate Tx pool\n"); + goto fail3; + } + + /* mgmt ring is broken on RT2860C, use EDCA AC VO ring instead */ + sc->mgtqid = (sc->mac_ver == 0x2860 && sc->mac_rev == 0x0100) ? + WME_AC_VO : 5; + + ifp->if_softc = sc; + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_init = rt2860_init; + ifp->if_ioctl = rt2860_ioctl; + ifp->if_start = rt2860_start; + IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); + ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; + IFQ_SET_READY(&ifp->if_snd); + + ic->ic_ifp = ifp; + ic->ic_opmode = IEEE80211_M_STA; + ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ + + /* set device capabilities */ + ic->ic_caps = + IEEE80211_C_STA /* station mode */ + | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ + | IEEE80211_C_HOSTAP /* hostap mode */ + | IEEE80211_C_MONITOR /* monitor mode */ + | IEEE80211_C_AHDEMO /* adhoc demo mode */ + | IEEE80211_C_WDS /* 4-address traffic works */ + | IEEE80211_C_MBSS /* mesh point link mode */ + | IEEE80211_C_SHPREAMBLE /* short preamble supported */ + | IEEE80211_C_SHSLOT /* short slot time supported */ + | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ +#if 0 + | IEEE80211_C_BGSCAN /* capable of bg scanning */ +#endif + | IEEE80211_C_WME /* 802.11e */ + ; + + bands = 0; + setbit(&bands, IEEE80211_MODE_11B); + setbit(&bands, IEEE80211_MODE_11G); + if (sc->rf_rev == RT2860_RF_2750 || sc->rf_rev == RT2860_RF_2850) + setbit(&bands, IEEE80211_MODE_11A); + ieee80211_init_channels(ic, NULL, &bands); + + ieee80211_ifattach(ic, macaddr); + + ic->ic_wme.wme_update = rt2860_updateedca; + ic->ic_scan_start = rt2860_scan_start; + ic->ic_scan_end = rt2860_scan_end; + ic->ic_set_channel = rt2860_set_channel; + ic->ic_updateslot = rt2860_updateslot; + ic->ic_update_promisc = rt2860_update_promisc; + ic->ic_raw_xmit = rt2860_raw_xmit; + sc->sc_node_free = ic->ic_node_free; + ic->ic_node_free = rt2860_node_free; + ic->ic_newassoc = rt2860_newassoc; + + ic->ic_vap_create = rt2860_vap_create; + ic->ic_vap_delete = rt2860_vap_delete; + + ieee80211_radiotap_attach(ic, + &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), + RT2860_TX_RADIOTAP_PRESENT, + &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), + RT2860_RX_RADIOTAP_PRESENT); + +#ifdef RAL_DEBUG + SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, + "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs"); +#endif + if (bootverbose) + ieee80211_announce(ic); + + return 0; + +fail3: rt2860_free_rx_ring(sc, &sc->rxq); +fail2: while (--qid >= 0) + rt2860_free_tx_ring(sc, &sc->txq[qid]); +fail1: mtx_destroy(&sc->sc_mtx); + if_free(ifp); + return error; +} + +int +rt2860_detach(void *xsc) +{ + struct rt2860_softc *sc = xsc; + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211com *ic = ifp->if_l2com; + int qid; + + RAL_LOCK(sc); + rt2860_stop_locked(sc); + RAL_UNLOCK(sc); + + ieee80211_ifdetach(ic); + + for (qid = 0; qid < 6; qid++) + rt2860_free_tx_ring(sc, &sc->txq[qid]); + rt2860_free_rx_ring(sc, &sc->rxq); + rt2860_free_tx_pool(sc); + + if_free(ifp); + + mtx_destroy(&sc->sc_mtx); + + return 0; +} + +void +rt2860_shutdown(void *xsc) +{ + struct rt2860_softc *sc = xsc; + + rt2860_stop(sc); +} + +void +rt2860_suspend(void *xsc) +{ + struct rt2860_softc *sc = xsc; + + rt2860_stop(sc); +} + +void +rt2860_resume(void *xsc) +{ + struct rt2860_softc *sc = xsc; + struct ifnet *ifp = sc->sc_ifp; + + if (ifp->if_flags & IFF_UP) + rt2860_init(sc); +} + +static struct ieee80211vap * +rt2860_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, + enum ieee80211_opmode opmode, int flags, + const uint8_t bssid[IEEE80211_ADDR_LEN], + const uint8_t mac[IEEE80211_ADDR_LEN]) +{ + struct ifnet *ifp = ic->ic_ifp; + struct rt2860_vap *rvp; + struct ieee80211vap *vap; + + switch (opmode) { + case IEEE80211_M_STA: + case IEEE80211_M_IBSS: + case IEEE80211_M_AHDEMO: + case IEEE80211_M_MONITOR: + case IEEE80211_M_HOSTAP: + case IEEE80211_M_MBSS: + /* XXXRP: TBD */ + if (!TAILQ_EMPTY(&ic->ic_vaps)) { + if_printf(ifp, "only 1 vap supported\n"); + return NULL; + } + if (opmode == IEEE80211_M_STA) + flags |= IEEE80211_CLONE_NOBEACONS; + break; + case IEEE80211_M_WDS: + if (TAILQ_EMPTY(&ic->ic_vaps) || + ic->ic_opmode != IEEE80211_M_HOSTAP) { + if_printf(ifp, "wds only supported in ap mode\n"); + return NULL; + } + /* + * Silently remove any request for a unique + * bssid; WDS vap's always share the local + * mac address. + */ + flags &= ~IEEE80211_CLONE_BSSID; + break; + default: + if_printf(ifp, "unknown opmode %d\n", opmode); + return NULL; + } + rvp = malloc(sizeof(struct rt2860_vap), M_80211_VAP, M_NOWAIT | M_ZERO); + if (rvp == NULL) + return NULL; + vap = &rvp->ral_vap; + ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); + + /* override state transition machine */ + rvp->ral_newstate = vap->iv_newstate; + vap->iv_newstate = rt2860_newstate; +#if 0 + vap->iv_update_beacon = rt2860_beacon_update; +#endif + + /* HW supports up to 255 STAs (0-254) in HostAP and IBSS modes */ + vap->iv_max_aid = min(IEEE80211_AID_MAX, RT2860_WCID_MAX); + + ieee80211_ratectl_init(vap); + /* complete setup */ + ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); + if (TAILQ_FIRST(&ic->ic_vaps) == vap) + ic->ic_opmode = opmode; + return vap; +} + +static void +rt2860_vap_delete(struct ieee80211vap *vap) +{ + struct rt2860_vap *rvp = RT2860_VAP(vap); + + ieee80211_ratectl_deinit(vap); + ieee80211_vap_detach(vap); + free(rvp, M_80211_VAP); +} + +static void +rt2860_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + if (error != 0) + return; + + KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); + + *(bus_addr_t *)arg = segs[0].ds_addr; +} + + +static int +rt2860_alloc_tx_ring(struct rt2860_softc *sc, struct rt2860_tx_ring *ring) +{ + int size, error; + + size = RT2860_TX_RING_COUNT * sizeof (struct rt2860_txd); + + error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 16, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + size, 1, size, 0, NULL, NULL, &ring->desc_dmat); + if (error != 0) { + device_printf(sc->sc_dev, "could not create desc DMA map\n"); + goto fail; + } + + error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->txd, + BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); + if (error != 0) { + device_printf(sc->sc_dev, "could not allocate DMA memory\n"); + goto fail; + } + + error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->txd, + size, rt2860_dma_map_addr, &ring->paddr, 0); + if (error != 0) { + device_printf(sc->sc_dev, "could not load desc DMA map\n"); + goto fail; + } + + bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); + + return 0; + +fail: rt2860_free_tx_ring(sc, ring); + return error; +} + +void +rt2860_reset_tx_ring(struct rt2860_softc *sc, struct rt2860_tx_ring *ring) +{ + struct rt2860_tx_data *data; + int i; + + for (i = 0; i < RT2860_TX_RING_COUNT; i++) { + if ((data = ring->data[i]) == NULL) + continue; /* nothing mapped in this slot */ + + if (data->m != NULL) { + bus_dmamap_sync(sc->txwi_dmat, data->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->txwi_dmat, data->map); + m_freem(data->m); + data->m = NULL; + } + if (data->ni != NULL) { + ieee80211_free_node(data->ni); + data->ni = NULL; + } + + SLIST_INSERT_HEAD(&sc->data_pool, data, next); + ring->data[i] = NULL; + } + + ring->queued = 0; + ring->cur = ring->next = 0; +} + +void +rt2860_free_tx_ring(struct rt2860_softc *sc, struct rt2860_tx_ring *ring) +{ + struct rt2860_tx_data *data; + int i; + + if (ring->txd != NULL) { + bus_dmamap_sync(ring->desc_dmat, ring->desc_map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(ring->desc_dmat, ring->desc_map); + bus_dmamem_free(ring->desc_dmat, ring->txd, ring->desc_map); + } + if (ring->desc_dmat != NULL) + bus_dma_tag_destroy(ring->desc_dmat); + + for (i = 0; i < RT2860_TX_RING_COUNT; i++) { + if ((data = ring->data[i]) == NULL) + continue; /* nothing mapped in this slot */ + + if (data->m != NULL) { + bus_dmamap_sync(sc->txwi_dmat, data->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->txwi_dmat, data->map); + m_freem(data->m); + } + if (data->ni != NULL) + ieee80211_free_node(data->ni); + + SLIST_INSERT_HEAD(&sc->data_pool, data, next); + } +} + +/* + * Allocate a pool of TX Wireless Information blocks. + */ +int +rt2860_alloc_tx_pool(struct rt2860_softc *sc) +{ + caddr_t vaddr; + bus_addr_t paddr; + int i, size, error; + + size = RT2860_TX_POOL_COUNT * RT2860_TXWI_DMASZ; + + /* init data_pool early in case of failure.. */ + SLIST_INIT(&sc->data_pool); + + error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + size, 1, size, 0, NULL, NULL, &sc->txwi_dmat); + if (error != 0) { + device_printf(sc->sc_dev, "could not create txwi DMA tag\n"); + goto fail; + } + + error = bus_dmamem_alloc(sc->txwi_dmat, (void **)&sc->txwi_vaddr, + BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->txwi_map); + if (error != 0) { + device_printf(sc->sc_dev, "could not allocate DMA memory\n"); + goto fail; + } + + error = bus_dmamap_load(sc->txwi_dmat, sc->txwi_map, + sc->txwi_vaddr, size, rt2860_dma_map_addr, &paddr, 0); + if (error != 0) { + device_printf(sc->sc_dev, "could not load txwi DMA map\n"); + goto fail; + } + + bus_dmamap_sync(sc->txwi_dmat, sc->txwi_map, BUS_DMASYNC_PREWRITE); + + vaddr = sc->txwi_vaddr; + for (i = 0; i < RT2860_TX_POOL_COUNT; i++) { + struct rt2860_tx_data *data = &sc->data[i]; + + error = bus_dmamap_create(sc->txwi_dmat, 0, &data->map); + if (error != 0) { + device_printf(sc->sc_dev, "could not create DMA map\n"); + goto fail; + } + data->txwi = (struct rt2860_txwi *)vaddr; + data->paddr = paddr; + vaddr += RT2860_TXWI_DMASZ; + paddr += RT2860_TXWI_DMASZ; + + SLIST_INSERT_HEAD(&sc->data_pool, data, next); + } + + return 0; + +fail: rt2860_free_tx_pool(sc); + return error; +} + +void +rt2860_free_tx_pool(struct rt2860_softc *sc) +{ + if (sc->txwi_vaddr != NULL) { + bus_dmamap_sync(sc->txwi_dmat, sc->txwi_map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->txwi_dmat, sc->txwi_map); + bus_dmamem_free(sc->txwi_dmat, sc->txwi_vaddr, sc->txwi_map); + } + if (sc->txwi_dmat != NULL) + bus_dma_tag_destroy(sc->txwi_dmat); + + while (!SLIST_EMPTY(&sc->data_pool)) { + struct rt2860_tx_data *data; + data = SLIST_FIRST(&sc->data_pool); + bus_dmamap_destroy(sc->txwi_dmat, data->map); + SLIST_REMOVE_HEAD(&sc->data_pool, next); + } +} + +int +rt2860_alloc_rx_ring(struct rt2860_softc *sc, struct rt2860_rx_ring *ring) +{ + bus_addr_t physaddr; + int i, size, error; + + size = RT2860_RX_RING_COUNT * sizeof (struct rt2860_rxd); + + error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 16, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + size, 1, size, 0, NULL, NULL, &ring->desc_dmat); + if (error != 0) { + device_printf(sc->sc_dev, "could not create desc DMA tag\n"); + goto fail; + } + + error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->rxd, + BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); + if (error != 0) { + device_printf(sc->sc_dev, "could not allocate DMA memory\n"); + goto fail; + } + + error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->rxd, + size, rt2860_dma_map_addr, &ring->paddr, 0); + if (error != 0) { + device_printf(sc->sc_dev, "could not load desc DMA map\n"); + goto fail; + } + + error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, + 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); + if (error != 0) { + device_printf(sc->sc_dev, "could not create data DMA tag\n"); + goto fail; + } + + for (i = 0; i < RT2860_RX_RING_COUNT; i++) { + struct rt2860_rx_data *data = &ring->data[i]; + struct rt2860_rxd *rxd = &ring->rxd[i]; + + error = bus_dmamap_create(ring->data_dmat, 0, &data->map); + if (error != 0) { + device_printf(sc->sc_dev, "could not create DMA map\n"); + goto fail; + } + + data->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); + if (data->m == NULL) { + device_printf(sc->sc_dev, + "could not allocate rx mbuf\n"); + error = ENOMEM; + goto fail; + } + + error = bus_dmamap_load(ring->data_dmat, data->map, + mtod(data->m, void *), MCLBYTES, rt2860_dma_map_addr, + &physaddr, 0); + if (error != 0) { + device_printf(sc->sc_dev, + "could not load rx buf DMA map"); + goto fail; + } + + rxd->sdp0 = htole32(physaddr); + rxd->sdl0 = htole16(MCLBYTES); + } + + bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); + + return 0; + +fail: rt2860_free_rx_ring(sc, ring); + return error; +} + +void +rt2860_reset_rx_ring(struct rt2860_softc *sc, struct rt2860_rx_ring *ring) +{ + int i; + + for (i = 0; i < RT2860_RX_RING_COUNT; i++) + ring->rxd[i].sdl0 &= ~htole16(RT2860_RX_DDONE); + + bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); + + ring->cur = 0; +} + +void +rt2860_free_rx_ring(struct rt2860_softc *sc, struct rt2860_rx_ring *ring) +{ + int i; + + if (ring->rxd != NULL) { + bus_dmamap_sync(ring->desc_dmat, ring->desc_map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(ring->desc_dmat, ring->desc_map); + bus_dmamem_free(ring->desc_dmat, ring->rxd, ring->desc_map); + } + if (ring->desc_dmat != NULL) + bus_dma_tag_destroy(ring->desc_dmat); + + for (i = 0; i < RT2860_RX_RING_COUNT; i++) { + struct rt2860_rx_data *data = &ring->data[i]; + + if (data->m != NULL) { + bus_dmamap_sync(ring->data_dmat, data->map, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(ring->data_dmat, data->map); + m_freem(data->m); + } + if (data->map != NULL) + bus_dmamap_destroy(ring->data_dmat, data->map); + } + if (ring->data_dmat != NULL) + bus_dma_tag_destroy(ring->data_dmat); +} + +static void +rt2860_updatestats(struct rt2860_softc *sc) +{ + struct ieee80211com *ic = sc->sc_ifp->if_l2com; + + /* + * In IBSS or HostAP modes (when the hardware sends beacons), the + * MAC can run into a livelock and start sending CTS-to-self frames + * like crazy if protection is enabled. Fortunately, we can detect + * when such a situation occurs and reset the MAC. + */ + if (ic->ic_curmode != IEEE80211_M_STA) { + /* check if we're in a livelock situation.. */ + uint32_t tmp = RAL_READ(sc, RT2860_DEBUG); + if ((tmp & (1 << 29)) && (tmp & (1 << 7 | 1 << 5))) { + /* ..and reset MAC/BBP for a while.. */ + DPRINTF(("CTS-to-self livelock detected\n")); + RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_SRST); + RAL_BARRIER_WRITE(sc); + DELAY(1); + RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, + RT2860_MAC_RX_EN | RT2860_MAC_TX_EN); + } + } +} + +static void +rt2860_newassoc(struct ieee80211_node *ni, int isnew) +{ + struct ieee80211com *ic = ni->ni_ic; + struct rt2860_softc *sc = ic->ic_ifp->if_softc; + uint8_t wcid; + + wcid = IEEE80211_AID(ni->ni_associd); + if (isnew && ni->ni_associd != 0) { + sc->wcid2ni[wcid] = ni; + + /* init WCID table entry */ + RAL_WRITE_REGION_1(sc, RT2860_WCID_ENTRY(wcid), + ni->ni_macaddr, IEEE80211_ADDR_LEN); + } + DPRINTF(("new assoc isnew=%d addr=%s WCID=%d\n", + isnew, ether_sprintf(ni->ni_macaddr), wcid)); +} + +static void +rt2860_node_free(struct ieee80211_node *ni) +{ + struct ieee80211com *ic = ni->ni_ic; + struct rt2860_softc *sc = ic->ic_ifp->if_softc; + uint8_t wcid; + + if (ni->ni_associd != 0) { + wcid = IEEE80211_AID(ni->ni_associd); + + /* clear Rx WCID search table entry */ + RAL_SET_REGION_4(sc, RT2860_WCID_ENTRY(wcid), 0, 2); + } + sc->sc_node_free(ni); +} + +#ifdef IEEE80211_HT +static int +rt2860_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni, + uint8_t tid) +{ + struct rt2860_softc *sc = ic->ic_softc; + uint8_t wcid = ((struct rt2860_node *)ni)->wcid; + uint32_t tmp; + + /* update BA session mask */ + tmp = RAL_READ(sc, RT2860_WCID_ENTRY(wcid) + 4); + tmp |= (1 << tid) << 16; + RAL_WRITE(sc, RT2860_WCID_ENTRY(wcid) + 4, tmp); + return 0; +} + +static void +rt2860_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni, + uint8_t tid) +{ + struct rt2860_softc *sc = ic->ic_softc; + uint8_t wcid = ((struct rt2860_node *)ni)->wcid; + uint32_t tmp; + + /* update BA session mask */ + tmp = RAL_READ(sc, RT2860_WCID_ENTRY(wcid) + 4); + tmp &= ~((1 << tid) << 16); + RAL_WRITE(sc, RT2860_WCID_ENTRY(wcid) + 4, tmp); +} +#endif + +int +rt2860_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) +{ + struct rt2860_vap *rvp = RT2860_VAP(vap); + struct ieee80211com *ic = vap->iv_ic; + struct rt2860_softc *sc = ic->ic_ifp->if_softc; + uint32_t tmp; + int error; + + if (vap->iv_state == IEEE80211_S_RUN) { + /* turn link LED off */ + rt2860_set_leds(sc, RT2860_LED_RADIO); + } + + if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) { + /* abort TSF synchronization */ + tmp = RAL_READ(sc, RT2860_BCN_TIME_CFG); + RAL_WRITE(sc, RT2860_BCN_TIME_CFG, + tmp & ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN | + RT2860_TBTT_TIMER_EN)); + } + + rt2860_set_gp_timer(sc, 0); + + error = rvp->ral_newstate(vap, nstate, arg); + if (error != 0) + return (error); + + if (nstate == IEEE80211_S_RUN) { + struct ieee80211_node *ni = vap->iv_bss; + + if (ic->ic_opmode != IEEE80211_M_MONITOR) { + rt2860_enable_mrr(sc); + rt2860_set_txpreamble(sc); + rt2860_set_basicrates(sc, &ni->ni_rates); + rt2860_set_bssid(sc, ni->ni_bssid); + } + + if (vap->iv_opmode == IEEE80211_M_HOSTAP || + vap->iv_opmode == IEEE80211_M_IBSS || + vap->iv_opmode == IEEE80211_M_MBSS) { + error = rt2860_setup_beacon(sc, vap); + if (error != 0) + return error; + } + + if (ic->ic_opmode != IEEE80211_M_MONITOR) { + rt2860_enable_tsf_sync(sc); + rt2860_set_gp_timer(sc, 500); + } + + /* turn link LED on */ + rt2860_set_leds(sc, RT2860_LED_RADIO | + (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan) ? + RT2860_LED_LINK_2GHZ : RT2860_LED_LINK_5GHZ)); + } + return error; +} + +/* Read 16-bit from eFUSE ROM (>=RT3071 only.) */ +static uint16_t +rt3090_efuse_read_2(struct rt2860_softc *sc, uint16_t addr) +{ + uint32_t tmp; + uint16_t reg; + int ntries; + + addr *= 2; + /*- + * Read one 16-byte block into registers EFUSE_DATA[0-3]: + * DATA0: F E D C + * DATA1: B A 9 8 + * DATA2: 7 6 5 4 + * DATA3: 3 2 1 0 + */ + tmp = RAL_READ(sc, RT3070_EFUSE_CTRL); + tmp &= ~(RT3070_EFSROM_MODE_MASK | RT3070_EFSROM_AIN_MASK); + tmp |= (addr & ~0xf) << RT3070_EFSROM_AIN_SHIFT | RT3070_EFSROM_KICK; + RAL_WRITE(sc, RT3070_EFUSE_CTRL, tmp); + for (ntries = 0; ntries < 500; ntries++) { + tmp = RAL_READ(sc, RT3070_EFUSE_CTRL); + if (!(tmp & RT3070_EFSROM_KICK)) + break; + DELAY(2); + } + if (ntries == 500) + return 0xffff; + + if ((tmp & RT3070_EFUSE_AOUT_MASK) == RT3070_EFUSE_AOUT_MASK) + return 0xffff; /* address not found */ + + /* determine to which 32-bit register our 16-bit word belongs */ + reg = RT3070_EFUSE_DATA3 - (addr & 0xc); + tmp = RAL_READ(sc, reg); + + return (addr & 2) ? tmp >> 16 : tmp & 0xffff; +} + +/* + * Read 16 bits at address 'addr' from the serial EEPROM (either 93C46, + * 93C66 or 93C86). + */ +static uint16_t +rt2860_eeprom_read_2(struct rt2860_softc *sc, uint16_t addr) +{ + uint32_t tmp; + uint16_t val; + int n; + + /* clock C once before the first command */ + RT2860_EEPROM_CTL(sc, 0); + + RT2860_EEPROM_CTL(sc, RT2860_S); + RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_C); + RT2860_EEPROM_CTL(sc, RT2860_S); + + /* write start bit (1) */ + RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_D); + RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_D | RT2860_C); + + /* write READ opcode (10) */ + RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_D); + RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_D | RT2860_C); + RT2860_EEPROM_CTL(sc, RT2860_S); + RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_C); + + /* write address (A5-A0 or A7-A0) */ + n = ((RAL_READ(sc, RT2860_PCI_EECTRL) & 0x30) == 0) ? 5 : 7; + for (; n >= 0; n--) { + RT2860_EEPROM_CTL(sc, RT2860_S | + (((addr >> n) & 1) << RT2860_SHIFT_D)); + RT2860_EEPROM_CTL(sc, RT2860_S | + (((addr >> n) & 1) << RT2860_SHIFT_D) | RT2860_C); + } + + RT2860_EEPROM_CTL(sc, RT2860_S); + + /* read data Q15-Q0 */ + val = 0; + for (n = 15; n >= 0; n--) { + RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_C); + tmp = RAL_READ(sc, RT2860_PCI_EECTRL); + val |= ((tmp & RT2860_Q) >> RT2860_SHIFT_Q) << n; + RT2860_EEPROM_CTL(sc, RT2860_S); + } + + RT2860_EEPROM_CTL(sc, 0); + + /* clear Chip Select and clock C */ + RT2860_EEPROM_CTL(sc, RT2860_S); + RT2860_EEPROM_CTL(sc, 0); + RT2860_EEPROM_CTL(sc, RT2860_C); + + return val; +} + +static __inline uint16_t +rt2860_srom_read(struct rt2860_softc *sc, uint8_t addr) +{ + /* either eFUSE ROM or EEPROM */ + return sc->sc_srom_read(sc, addr); +} + +static void +rt2860_intr_coherent(struct rt2860_softc *sc) +{ + uint32_t tmp; + + /* DMA finds data coherent event when checking the DDONE bit */ + + DPRINTF(("Tx/Rx Coherent interrupt\n")); + + /* restart DMA engine */ + tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG); + tmp &= ~(RT2860_TX_WB_DDONE | RT2860_RX_DMA_EN | RT2860_TX_DMA_EN); + RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp); + + (void)rt2860_txrx_enable(sc); +} + +static void +rt2860_drain_stats_fifo(struct rt2860_softc *sc) +{ + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211_node *ni; + uint32_t stat; + int retrycnt; + uint8_t wcid, mcs, pid; + + /* drain Tx status FIFO (maxsize = 16) */ + while ((stat = RAL_READ(sc, RT2860_TX_STAT_FIFO)) & RT2860_TXQ_VLD) { + DPRINTFN(4, ("tx stat 0x%08x\n", stat)); + + wcid = (stat >> RT2860_TXQ_WCID_SHIFT) & 0xff; + ni = sc->wcid2ni[wcid]; + + /* if no ACK was requested, no feedback is available */ + if (!(stat & RT2860_TXQ_ACKREQ) || wcid == 0xff || ni == NULL) + continue; + + /* update per-STA AMRR stats */ + if (stat & RT2860_TXQ_OK) { + /* + * Check if there were retries, ie if the Tx success + * rate is different from the requested rate. Note + * that it works only because we do not allow rate + * fallback from OFDM to CCK. + */ + mcs = (stat >> RT2860_TXQ_MCS_SHIFT) & 0x7f; + pid = (stat >> RT2860_TXQ_PID_SHIFT) & 0xf; + if (mcs + 1 != pid) + retrycnt = 1; + else + retrycnt = 0; + ieee80211_ratectl_tx_complete(ni->ni_vap, ni, + IEEE80211_RATECTL_TX_SUCCESS, &retrycnt, NULL); + } else { + ieee80211_ratectl_tx_complete(ni->ni_vap, ni, + IEEE80211_RATECTL_TX_FAILURE, &retrycnt, NULL); + ifp->if_oerrors++; + } + } +} + +static void +rt2860_tx_intr(struct rt2860_softc *sc, int qid) +{ + struct ifnet *ifp = sc->sc_ifp; + struct rt2860_tx_ring *ring = &sc->txq[qid]; + uint32_t hw; + + rt2860_drain_stats_fifo(sc); + + hw = RAL_READ(sc, RT2860_TX_DTX_IDX(qid)); + while (ring->next != hw) { + struct rt2860_tx_data *data = ring->data[ring->next]; + + if (data != NULL) { + bus_dmamap_sync(sc->txwi_dmat, data->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->txwi_dmat, data->map); + if (data->m->m_flags & M_TXCB) { + ieee80211_process_callback(data->ni, data->m, + 0); + } + m_freem(data->m); + ieee80211_free_node(data->ni); + data->m = NULL; + data->ni = NULL; + + SLIST_INSERT_HEAD(&sc->data_pool, data, next); + ring->data[ring->next] = NULL; + + ifp->if_opackets++; + } + ring->queued--; + ring->next = (ring->next + 1) % RT2860_TX_RING_COUNT; + } + + sc->sc_tx_timer = 0; + if (ring->queued < RT2860_TX_RING_COUNT) + sc->qfullmsk &= ~(1 << qid); + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + rt2860_start_locked(ifp); +} + +/* + * Return the Rx chain with the highest RSSI for a given frame. + */ +static __inline uint8_t +rt2860_maxrssi_chain(struct rt2860_softc *sc, const struct rt2860_rxwi *rxwi) +{ + uint8_t rxchain = 0; + + if (sc->nrxchains > 1) { + if (rxwi->rssi[1] > rxwi->rssi[rxchain]) + rxchain = 1; + if (sc->nrxchains > 2) + if (rxwi->rssi[2] > rxwi->rssi[rxchain]) + rxchain = 2; + } + return rxchain; +} + +static void +rt2860_rx_intr(struct rt2860_softc *sc) +{ + struct rt2860_rx_radiotap_header *tap; + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211com *ic = ifp->if_l2com; + struct ieee80211_frame *wh; + struct ieee80211_node *ni; + struct mbuf *m, *m1; + bus_addr_t physaddr; + uint32_t hw; + uint16_t phy; + uint8_t ant; + int8_t rssi, nf; + int error; + + hw = RAL_READ(sc, RT2860_FS_DRX_IDX) & 0xfff; + while (sc->rxq.cur != hw) { + struct rt2860_rx_data *data = &sc->rxq.data[sc->rxq.cur]; + struct rt2860_rxd *rxd = &sc->rxq.rxd[sc->rxq.cur]; + struct rt2860_rxwi *rxwi; + + bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, + BUS_DMASYNC_POSTREAD); + + if (__predict_false(!(rxd->sdl0 & htole16(RT2860_RX_DDONE)))) { + DPRINTF(("RXD DDONE bit not set!\n")); + break; /* should not happen */ + } + + if (__predict_false(rxd->flags & + htole32(RT2860_RX_CRCERR | RT2860_RX_ICVERR))) { + ifp->if_ierrors++; + goto skip; + } + +#ifdef HW_CRYPTO + if (__predict_false(rxd->flags & htole32(RT2860_RX_MICERR))) { + /* report MIC failures to net80211 for TKIP */ + ic->ic_stats.is_rx_locmicfail++; + ieee80211_michael_mic_failure(ic, 0/* XXX */); + ifp->if_ierrors++; + goto skip; + } +#endif + + m1 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); + if (__predict_false(m1 == NULL)) { + ifp->if_ierrors++; + goto skip; + } + + bus_dmamap_sync(sc->rxq.data_dmat, data->map, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->rxq.data_dmat, data->map); + + error = bus_dmamap_load(sc->rxq.data_dmat, data->map, + mtod(m1, void *), MCLBYTES, rt2860_dma_map_addr, + &physaddr, 0); + if (__predict_false(error != 0)) { + m_freem(m1); + + /* try to reload the old mbuf */ + error = bus_dmamap_load(sc->rxq.data_dmat, data->map, + mtod(data->m, void *), MCLBYTES, + rt2860_dma_map_addr, &physaddr, 0); + if (__predict_false(error != 0)) { + panic("%s: could not load old rx mbuf", + device_get_name(sc->sc_dev)); + } + /* physical address may have changed */ + rxd->sdp0 = htole32(physaddr); + ifp->if_ierrors++; + goto skip; + } + + /* + * New mbuf successfully loaded, update Rx ring and continue + * processing. + */ + m = data->m; + data->m = m1; + rxd->sdp0 = htole32(physaddr); + + rxwi = mtod(m, struct rt2860_rxwi *); + + /* finalize mbuf */ + m->m_pkthdr.rcvif = ifp; + m->m_data = (caddr_t)(rxwi + 1); + m->m_pkthdr.len = m->m_len = le16toh(rxwi->len) & 0xfff; + + wh = mtod(m, struct ieee80211_frame *); +#ifdef HW_CRYPTO + if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + /* frame is decrypted by hardware */ + wh->i_fc[1] &= ~IEEE80211_FC1_WEP; + } +#endif + + /* HW may insert 2 padding bytes after 802.11 header */ + if (rxd->flags & htole32(RT2860_RX_L2PAD)) { + u_int hdrlen = ieee80211_hdrsize(wh); + ovbcopy(wh, (caddr_t)wh + 2, hdrlen); + m->m_data += 2; + wh = mtod(m, struct ieee80211_frame *); + } + + ant = rt2860_maxrssi_chain(sc, rxwi); + rssi = rt2860_rssi2dbm(sc, rxwi->rssi[ant], ant); + nf = RT2860_NOISE_FLOOR; + + if (ieee80211_radiotap_active(ic)) { + tap = &sc->sc_rxtap; + tap->wr_flags = 0; + tap->wr_antenna = ant; + tap->wr_antsignal = nf + rssi; + tap->wr_antnoise = nf; + /* in case it can't be found below */ + tap->wr_rate = 2; + phy = le16toh(rxwi->phy); + switch (phy & RT2860_PHY_MODE) { + case RT2860_PHY_CCK: + switch ((phy & RT2860_PHY_MCS) & ~RT2860_PHY_SHPRE) { + case 0: tap->wr_rate = 2; break; + case 1: tap->wr_rate = 4; break; + case 2: tap->wr_rate = 11; break; + case 3: tap->wr_rate = 22; break; + } + if (phy & RT2860_PHY_SHPRE) + tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + break; + case RT2860_PHY_OFDM: + switch (phy & RT2860_PHY_MCS) { + case 0: tap->wr_rate = 12; break; + case 1: tap->wr_rate = 18; break; + case 2: tap->wr_rate = 24; break; + case 3: tap->wr_rate = 36; break; + case 4: tap->wr_rate = 48; break; + case 5: tap->wr_rate = 72; break; + case 6: tap->wr_rate = 96; break; + case 7: tap->wr_rate = 108; break; + } + break; + } + } + + RAL_UNLOCK(sc); + wh = mtod(m, struct ieee80211_frame *); + + /* send the frame to the 802.11 layer */ + ni = ieee80211_find_rxnode(ic, + (struct ieee80211_frame_min *)wh); + if (ni != NULL) { + (void)ieee80211_input(ni, m, rssi - nf, nf); + ieee80211_free_node(ni); + } else + (void)ieee80211_input_all(ic, m, rssi - nf, nf); + + RAL_LOCK(sc); + +skip: rxd->sdl0 &= ~htole16(RT2860_RX_DDONE); + + bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, + BUS_DMASYNC_PREWRITE); + + sc->rxq.cur = (sc->rxq.cur + 1) % RT2860_RX_RING_COUNT; + } + + /* tell HW what we have processed */ + RAL_WRITE(sc, RT2860_RX_CALC_IDX, + (sc->rxq.cur - 1) % RT2860_RX_RING_COUNT); +} + +static void +rt2860_tbtt_intr(struct rt2860_softc *sc) +{ +#if 0 + struct ieee80211com *ic = &sc->sc_ic; + +#ifndef IEEE80211_STA_ONLY + if (ic->ic_opmode == IEEE80211_M_HOSTAP) { + /* one less beacon until next DTIM */ + if (ic->ic_dtim_count == 0) + ic->ic_dtim_count = ic->ic_dtim_period - 1; + else + ic->ic_dtim_count--; + + /* update dynamic parts of beacon */ + rt2860_setup_beacon(sc); + + /* flush buffered multicast frames */ + if (ic->ic_dtim_count == 0) + ieee80211_notify_dtim(ic); + } +#endif + /* check if protection mode has changed */ + if ((sc->sc_ic_flags ^ ic->ic_flags) & IEEE80211_F_USEPROT) { + rt2860_updateprot(ic); + sc->sc_ic_flags = ic->ic_flags; + } +#endif +} + +static void +rt2860_gp_intr(struct rt2860_softc *sc) +{ + struct ieee80211com *ic = sc->sc_ifp->if_l2com; + struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); + + DPRINTFN(2, ("GP timeout state=%d\n", vap->iv_state)); + + if (vap->iv_state == IEEE80211_S_RUN) + rt2860_updatestats(sc); +} + +void +rt2860_intr(void *arg) +{ + struct rt2860_softc *sc = arg; + uint32_t r; + + RAL_LOCK(sc); + + r = RAL_READ(sc, RT2860_INT_STATUS); + if (__predict_false(r == 0xffffffff)) { + RAL_UNLOCK(sc); + return; /* device likely went away */ + } + if (r == 0) { + RAL_UNLOCK(sc); + return; /* not for us */ + } + + /* acknowledge interrupts */ + RAL_WRITE(sc, RT2860_INT_STATUS, r); + + if (r & RT2860_TX_RX_COHERENT) + rt2860_intr_coherent(sc); + + if (r & RT2860_MAC_INT_2) /* TX status */ + rt2860_drain_stats_fifo(sc); + + if (r & RT2860_TX_DONE_INT5) + rt2860_tx_intr(sc, 5); + + if (r & RT2860_RX_DONE_INT) + rt2860_rx_intr(sc); + + if (r & RT2860_TX_DONE_INT4) + rt2860_tx_intr(sc, 4); + + if (r & RT2860_TX_DONE_INT3) + rt2860_tx_intr(sc, 3); + + if (r & RT2860_TX_DONE_INT2) + rt2860_tx_intr(sc, 2); + + if (r & RT2860_TX_DONE_INT1) + rt2860_tx_intr(sc, 1); + + if (r & RT2860_TX_DONE_INT0) + rt2860_tx_intr(sc, 0); + + if (r & RT2860_MAC_INT_0) /* TBTT */ + rt2860_tbtt_intr(sc); + + if (r & RT2860_MAC_INT_3) /* Auto wakeup */ + /* TBD wakeup */; + + if (r & RT2860_MAC_INT_4) /* GP timer */ + rt2860_gp_intr(sc); + + RAL_UNLOCK(sc); +} + +static int +rt2860_tx(struct rt2860_softc *sc, struct mbuf *m, struct ieee80211_node *ni) +{ + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211com *ic = ifp->if_l2com; + struct ieee80211vap *vap = ni->ni_vap; + struct rt2860_tx_ring *ring; + struct rt2860_tx_data *data; + struct rt2860_txd *txd; + struct rt2860_txwi *txwi; + struct ieee80211_frame *wh; + const struct ieee80211_txparam *tp; + struct ieee80211_key *k; + struct mbuf *m1; + bus_dma_segment_t segs[RT2860_MAX_SCATTER]; + bus_dma_segment_t *seg; + u_int hdrlen; + uint16_t qos, dur; + uint8_t type, qsel, mcs, pid, tid, qid; + int i, nsegs, ntxds, pad, rate, ridx, error; + + /* the data pool contains at least one element, pick the first */ + data = SLIST_FIRST(&sc->data_pool); + + wh = mtod(m, struct ieee80211_frame *); + + if (wh->i_fc[1] & IEEE80211_FC1_WEP) { + k = ieee80211_crypto_encap(ni, m); + if (k == NULL) { + m_freem(m); + return ENOBUFS; + } + + /* packet header may have moved, reset our local pointer */ + wh = mtod(m, struct ieee80211_frame *); + } + + hdrlen = ieee80211_anyhdrsize(wh); + type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; + + tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; + if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { + rate = tp->mcastrate; + } else if (m->m_flags & M_EAPOL) { + rate = tp->mgmtrate; + } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { + rate = tp->ucastrate; + } else { + (void) ieee80211_ratectl_rate(ni, NULL, 0); + rate = ni->ni_txrate; + } + rate &= IEEE80211_RATE_VAL; + + qid = M_WME_GETAC(m); + if (IEEE80211_QOS_HAS_SEQ(wh)) { + qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; + tid = qos & IEEE80211_QOS_TID; + } else { + qos = 0; + tid = 0; + } + ring = &sc->txq[qid]; + ridx = ic->ic_rt->rateCodeToIndex[rate]; + + /* get MCS code from rate index */ + mcs = rt2860_rates[ridx].mcs; + + /* setup TX Wireless Information */ + txwi = data->txwi; + txwi->flags = 0; + /* let HW generate seq numbers for non-QoS frames */ + txwi->xflags = qos ? 0 : RT2860_TX_NSEQ; + if (type == IEEE80211_FC0_TYPE_DATA) + txwi->wcid = IEEE80211_AID(ni->ni_associd); + else + txwi->wcid = 0xff; + txwi->len = htole16(m->m_pkthdr.len); + if (rt2860_rates[ridx].phy == IEEE80211_T_DS) { + txwi->phy = htole16(RT2860_PHY_CCK); + if (ridx != RT2860_RIDX_CCK1 && + (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) + mcs |= RT2860_PHY_SHPRE; + } else + txwi->phy = htole16(RT2860_PHY_OFDM); + txwi->phy |= htole16(mcs); + + /* + * We store the MCS code into the driver-private PacketID field. + * The PacketID is latched into TX_STAT_FIFO when Tx completes so + * that we know at which initial rate the frame was transmitted. + * We add 1 to the MCS code because setting the PacketID field to + * 0 means that we don't want feedback in TX_STAT_FIFO. + */ + pid = (mcs + 1) & 0xf; + txwi->len |= htole16(pid << RT2860_TX_PID_SHIFT); + + /* check if RTS/CTS or CTS-to-self protection is required */ + if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && + (m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold || + ((ic->ic_flags & IEEE80211_F_USEPROT) && + rt2860_rates[ridx].phy == IEEE80211_T_OFDM))) + txwi->txop = RT2860_TX_TXOP_HT; + else + txwi->txop = RT2860_TX_TXOP_BACKOFF; + + if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && + (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != + IEEE80211_QOS_ACKPOLICY_NOACK)) { + txwi->xflags |= RT2860_TX_ACK; + + if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) + dur = rt2860_rates[ridx].sp_ack_dur; + else + dur = rt2860_rates[ridx].lp_ack_dur; + *(uint16_t *)wh->i_dur = htole16(dur); + } + /* ask MAC to insert timestamp into probe responses */ + if ((wh->i_fc[0] & + (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == + (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) + /* NOTE: beacons do not pass through tx_data() */ + txwi->flags |= RT2860_TX_TS; + + if (ieee80211_radiotap_active_vap(vap)) { + struct rt2860_tx_radiotap_header *tap = &sc->sc_txtap; + + tap->wt_flags = 0; + tap->wt_rate = rate; + if (mcs & RT2860_PHY_SHPRE) + tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + + ieee80211_radiotap_tx(vap, m); + } + + pad = (hdrlen + 3) & ~3; + + /* copy and trim 802.11 header */ + memcpy(txwi + 1, wh, hdrlen); + m_adj(m, hdrlen); + + error = bus_dmamap_load_mbuf_sg(sc->txwi_dmat, data->map, m, segs, + &nsegs, 0); + if (__predict_false(error != 0 && error != EFBIG)) { + device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", + error); + m_freem(m); + return error; + } + if (__predict_true(error == 0)) { + /* determine how many TXDs are required */ + ntxds = 1 + (nsegs / 2); + + if (ring->queued + ntxds >= RT2860_TX_RING_COUNT) { + /* not enough free TXDs, force mbuf defrag */ + bus_dmamap_unload(sc->txwi_dmat, data->map); + error = EFBIG; + } + } + if (__predict_false(error != 0)) { + m1 = m_defrag(m, M_DONTWAIT); + if (m1 == NULL) { + device_printf(sc->sc_dev, + "could not defragment mbuf\n"); + m_freem(m); + return ENOBUFS; + } + m = m1; + + error = bus_dmamap_load_mbuf_sg(sc->txwi_dmat, data->map, m, + segs, &nsegs, 0); + if (__predict_false(error != 0)) { + device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", + error); + m_freem(m); + return error; + } + + /* determine how many TXDs are now required */ + ntxds = 1 + (nsegs / 2); + + if (ring->queued + ntxds >= RT2860_TX_RING_COUNT) { + /* this is a hopeless case, drop the mbuf! */ + bus_dmamap_unload(sc->txwi_dmat, data->map); + m_freem(m); + return ENOBUFS; + } + } + + qsel = (qid < WME_NUM_AC) ? RT2860_TX_QSEL_EDCA : RT2860_TX_QSEL_MGMT; + + /* first segment is TXWI + 802.11 header */ + txd = &ring->txd[ring->cur]; + txd->sdp0 = htole32(data->paddr); + txd->sdl0 = htole16(sizeof (struct rt2860_txwi) + pad); + txd->flags = qsel; + + /* setup payload segments */ + seg = &segs[0]; + for (i = nsegs; i >= 2; i -= 2) { + txd->sdp1 = htole32(seg->ds_addr); + txd->sdl1 = htole16(seg->ds_len); + seg++; + ring->cur = (ring->cur + 1) % RT2860_TX_RING_COUNT; + /* grab a new Tx descriptor */ + txd = &ring->txd[ring->cur]; + txd->sdp0 = htole32(seg->ds_addr); + txd->sdl0 = htole16(seg->ds_len); + txd->flags = qsel; + seg++; + } + /* finalize last segment */ + if (i > 0) { + txd->sdp1 = htole32(seg->ds_addr); + txd->sdl1 = htole16(seg->ds_len | RT2860_TX_LS1); + } else { + txd->sdl0 |= htole16(RT2860_TX_LS0); + txd->sdl1 = 0; + } + + /* remove from the free pool and link it into the SW Tx slot */ + SLIST_REMOVE_HEAD(&sc->data_pool, next); + data->m = m; + data->ni = ni; + ring->data[ring->cur] = data; + + bus_dmamap_sync(sc->txwi_dmat, sc->txwi_map, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc->txwi_dmat, data->map, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); + + DPRINTFN(4, ("sending frame qid=%d wcid=%d nsegs=%d ridx=%d\n", + qid, txwi->wcid, nsegs, ridx)); + + ring->cur = (ring->cur + 1) % RT2860_TX_RING_COUNT; + ring->queued += ntxds; + if (ring->queued >= RT2860_TX_RING_COUNT) + sc->qfullmsk |= 1 << qid; + + /* kick Tx */ + RAL_WRITE(sc, RT2860_TX_CTX_IDX(qid), ring->cur); + + return 0; +} + +static int +rt2860_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, + const struct ieee80211_bpf_params *params) +{ + struct ieee80211com *ic = ni->ni_ic; + struct ifnet *ifp = ic->ic_ifp; + struct rt2860_softc *sc = ifp->if_softc; + int error; + + RAL_LOCK(sc); + + /* prevent management frames from being sent if we're not ready */ + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { + RAL_UNLOCK(sc); + m_freem(m); + ieee80211_free_node(ni); + return ENETDOWN; + } + if (params == NULL) { + /* + * Legacy path; interpret frame contents to decide + * precisely how to send the frame. + */ + error = rt2860_tx(sc, m, ni); + } else { + /* + * Caller supplied explicit parameters to use in + * sending the frame. + */ + error = rt2860_tx_raw(sc, m, ni, params); + } + if (error != 0) { + /* NB: m is reclaimed on tx failure */ + ieee80211_free_node(ni); + ifp->if_oerrors++; + } + sc->sc_tx_timer = 5; + RAL_UNLOCK(sc); + return error; +} + +static int +rt2860_tx_raw(struct rt2860_softc *sc, struct mbuf *m, + struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) +{ + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211com *ic = ifp->if_l2com; + struct ieee80211vap *vap = ni->ni_vap; + struct rt2860_tx_ring *ring; + struct rt2860_tx_data *data; + struct rt2860_txd *txd; + struct rt2860_txwi *txwi; + struct ieee80211_frame *wh; + struct mbuf *m1; + bus_dma_segment_t segs[RT2860_MAX_SCATTER]; + bus_dma_segment_t *seg; + u_int hdrlen; + uint16_t dur; + uint8_t type, qsel, mcs, pid, tid, qid; + int i, nsegs, ntxds, pad, rate, ridx, error; + + /* the data pool contains at least one element, pick the first */ + data = SLIST_FIRST(&sc->data_pool); + + wh = mtod(m, struct ieee80211_frame *); + hdrlen = ieee80211_hdrsize(wh); + type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; + + /* Choose a TX rate index. */ + rate = params->ibp_rate0; + ridx = ic->ic_rt->rateCodeToIndex[rate]; + if (ridx == (uint8_t)-1) { + /* XXX fall back to mcast/mgmt rate? */ + m_freem(m); + return EINVAL; + } + + qid = params->ibp_pri & 3; + tid = 0; + ring = &sc->txq[qid]; + + /* get MCS code from rate index */ + mcs = rt2860_rates[ridx].mcs; + + /* setup TX Wireless Information */ + txwi = data->txwi; + txwi->flags = 0; + /* let HW generate seq numbers for non-QoS frames */ + txwi->xflags = params->ibp_pri & 3 ? 0 : RT2860_TX_NSEQ; + txwi->wcid = 0xff; + txwi->len = htole16(m->m_pkthdr.len); + if (rt2860_rates[ridx].phy == IEEE80211_T_DS) { + txwi->phy = htole16(RT2860_PHY_CCK); + if (ridx != RT2860_RIDX_CCK1 && + (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) + mcs |= RT2860_PHY_SHPRE; + } else + txwi->phy = htole16(RT2860_PHY_OFDM); + txwi->phy |= htole16(mcs); + + /* + * We store the MCS code into the driver-private PacketID field. + * The PacketID is latched into TX_STAT_FIFO when Tx completes so + * that we know at which initial rate the frame was transmitted. + * We add 1 to the MCS code because setting the PacketID field to + * 0 means that we don't want feedback in TX_STAT_FIFO. + */ + pid = (mcs + 1) & 0xf; + txwi->len |= htole16(pid << RT2860_TX_PID_SHIFT); + + /* check if RTS/CTS or CTS-to-self protection is required */ + if (params->ibp_flags & IEEE80211_BPF_RTS || + params->ibp_flags & IEEE80211_BPF_CTS) + txwi->txop = RT2860_TX_TXOP_HT; + else + txwi->txop = RT2860_TX_TXOP_BACKOFF; + if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) { + txwi->xflags |= RT2860_TX_ACK; + + if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) + dur = rt2860_rates[ridx].sp_ack_dur; + else + dur = rt2860_rates[ridx].lp_ack_dur; + *(uint16_t *)wh->i_dur = htole16(dur); + } + /* ask MAC to insert timestamp into probe responses */ + if ((wh->i_fc[0] & + (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == + (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) + /* NOTE: beacons do not pass through tx_data() */ + txwi->flags |= RT2860_TX_TS; + + if (ieee80211_radiotap_active_vap(vap)) { + struct rt2860_tx_radiotap_header *tap = &sc->sc_txtap; + + tap->wt_flags = 0; + tap->wt_rate = rate; + if (mcs & RT2860_PHY_SHPRE) + tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + + ieee80211_radiotap_tx(vap, m); + } + + pad = (hdrlen + 3) & ~3; + + /* copy and trim 802.11 header */ + memcpy(txwi + 1, wh, hdrlen); + m_adj(m, hdrlen); + + error = bus_dmamap_load_mbuf_sg(sc->txwi_dmat, data->map, m, segs, + &nsegs, 0); + if (__predict_false(error != 0 && error != EFBIG)) { + device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", + error); + m_freem(m); + return error; + } + if (__predict_true(error == 0)) { + /* determine how many TXDs are required */ + ntxds = 1 + (nsegs / 2); + + if (ring->queued + ntxds >= RT2860_TX_RING_COUNT) { + /* not enough free TXDs, force mbuf defrag */ + bus_dmamap_unload(sc->txwi_dmat, data->map); + error = EFBIG; + } + } + if (__predict_false(error != 0)) { + m1 = m_defrag(m, M_DONTWAIT); + if (m1 == NULL) { + device_printf(sc->sc_dev, + "could not defragment mbuf\n"); + m_freem(m); + return ENOBUFS; + } + m = m1; + + error = bus_dmamap_load_mbuf_sg(sc->txwi_dmat, data->map, m, + segs, &nsegs, 0); + if (__predict_false(error != 0)) { + device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", + error); + m_freem(m); + return error; + } + + /* determine how many TXDs are now required */ + ntxds = 1 + (nsegs / 2); + + if (ring->queued + ntxds >= RT2860_TX_RING_COUNT) { + /* this is a hopeless case, drop the mbuf! */ + bus_dmamap_unload(sc->txwi_dmat, data->map); + m_freem(m); + return ENOBUFS; + } + } + + qsel = (qid < WME_NUM_AC) ? RT2860_TX_QSEL_EDCA : RT2860_TX_QSEL_MGMT; + + /* first segment is TXWI + 802.11 header */ + txd = &ring->txd[ring->cur]; + txd->sdp0 = htole32(data->paddr); + txd->sdl0 = htole16(sizeof (struct rt2860_txwi) + pad); + txd->flags = qsel; + + /* setup payload segments */ + seg = &segs[0]; + for (i = nsegs; i >= 2; i -= 2) { + txd->sdp1 = htole32(seg->ds_addr); + txd->sdl1 = htole16(seg->ds_len); + seg++; + ring->cur = (ring->cur + 1) % RT2860_TX_RING_COUNT; + /* grab a new Tx descriptor */ + txd = &ring->txd[ring->cur]; + txd->sdp0 = htole32(seg->ds_addr); + txd->sdl0 = htole16(seg->ds_len); + txd->flags = qsel; + seg++; + } + /* finalize last segment */ + if (i > 0) { + txd->sdp1 = htole32(seg->ds_addr); + txd->sdl1 = htole16(seg->ds_len | RT2860_TX_LS1); + } else { + txd->sdl0 |= htole16(RT2860_TX_LS0); + txd->sdl1 = 0; + } + + /* remove from the free pool and link it into the SW Tx slot */ + SLIST_REMOVE_HEAD(&sc->data_pool, next); + data->m = m; + data->ni = ni; + ring->data[ring->cur] = data; + + bus_dmamap_sync(sc->txwi_dmat, sc->txwi_map, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc->txwi_dmat, data->map, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); + + DPRINTFN(4, ("sending frame qid=%d wcid=%d nsegs=%d ridx=%d\n", + qid, txwi->wcid, nsegs, ridx)); + + ring->cur = (ring->cur + 1) % RT2860_TX_RING_COUNT; + ring->queued += ntxds; + if (ring->queued >= RT2860_TX_RING_COUNT) + sc->qfullmsk |= 1 << qid; + + /* kick Tx */ + RAL_WRITE(sc, RT2860_TX_CTX_IDX(qid), ring->cur); + + return 0; +} + +static void +rt2860_start(struct ifnet *ifp) +{ + struct rt2860_softc *sc = ifp->if_softc; + + RAL_LOCK(sc); + rt2860_start_locked(ifp); + RAL_UNLOCK(sc); +} + +static void +rt2860_start_locked(struct ifnet *ifp) +{ + struct rt2860_softc *sc = ifp->if_softc; + struct ieee80211_node *ni; + struct mbuf *m; + + RAL_LOCK_ASSERT(sc); + + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || + (ifp->if_drv_flags & IFF_DRV_OACTIVE)) + return; + + for (;;) { + if (SLIST_EMPTY(&sc->data_pool) || sc->qfullmsk != 0) { + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + break; + } + IFQ_DRV_DEQUEUE(&ifp->if_snd, m); + if (m == NULL) + break; + ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; + if (rt2860_tx(sc, m, ni) != 0) { + ieee80211_free_node(ni); + ifp->if_oerrors++; + continue; + } + sc->sc_tx_timer = 5; + } +} + +static void +rt2860_watchdog(void *arg) +{ + struct rt2860_softc *sc = arg; + struct ifnet *ifp = sc->sc_ifp; + + RAL_LOCK_ASSERT(sc); + + KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); + + if (sc->sc_invalid) /* card ejected */ + return; + + if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { + if_printf(ifp, "device timeout\n"); + rt2860_stop_locked(sc); + rt2860_init_locked(sc); + ifp->if_oerrors++; + return; + } + callout_reset(&sc->watchdog_ch, hz, rt2860_watchdog, sc); +} + +static int +rt2860_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) +{ + struct rt2860_softc *sc = ifp->if_softc; + struct ieee80211com *ic = ifp->if_l2com; + struct ifreq *ifr = (struct ifreq *)data; + int error = 0, startall = 0; + + switch (cmd) { + case SIOCSIFFLAGS: + RAL_LOCK(sc); + if (ifp->if_flags & IFF_UP) { + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { + rt2860_init_locked(sc); + startall = 1; + } else + rt2860_update_promisc(ifp); + } else { + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + rt2860_stop_locked(sc); + } + RAL_UNLOCK(sc); + if (startall) + ieee80211_start_all(ic); + break; + case SIOCGIFMEDIA: + error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); + break; + case SIOCSIFADDR: + error = ether_ioctl(ifp, cmd, data); + break; + default: + error = EINVAL; + break; + } + return error; +} + +/* + * Reading and writing from/to the BBP is different from RT2560 and RT2661. + * We access the BBP through the 8051 microcontroller unit which means that + * the microcode must be loaded first. + */ +void +rt2860_mcu_bbp_write(struct rt2860_softc *sc, uint8_t reg, uint8_t val) +{ + int ntries; + + for (ntries = 0; ntries < 100; ntries++) { + if (!(RAL_READ(sc, RT2860_H2M_BBPAGENT) & RT2860_BBP_CSR_KICK)) + break; + DELAY(1); + } + if (ntries == 100) { + device_printf(sc->sc_dev, + "could not write to BBP through MCU\n"); + return; + } + + RAL_WRITE(sc, RT2860_H2M_BBPAGENT, RT2860_BBP_RW_PARALLEL | + RT2860_BBP_CSR_KICK | reg << 8 | val); + RAL_BARRIER_WRITE(sc); + + rt2860_mcu_cmd(sc, RT2860_MCU_CMD_BBP, 0, 0); + DELAY(1000); +} + +uint8_t +rt2860_mcu_bbp_read(struct rt2860_softc *sc, uint8_t reg) +{ + uint32_t val; + int ntries; + + for (ntries = 0; ntries < 100; ntries++) { + if (!(RAL_READ(sc, RT2860_H2M_BBPAGENT) & RT2860_BBP_CSR_KICK)) + break; + DELAY(1); + } + if (ntries == 100) { + device_printf(sc->sc_dev, + "could not read from BBP through MCU\n"); + return 0; + } + + RAL_WRITE(sc, RT2860_H2M_BBPAGENT, RT2860_BBP_RW_PARALLEL | + RT2860_BBP_CSR_KICK | RT2860_BBP_CSR_READ | reg << 8); + RAL_BARRIER_WRITE(sc); + + rt2860_mcu_cmd(sc, RT2860_MCU_CMD_BBP, 0, 0); + DELAY(1000); + + for (ntries = 0; ntries < 100; ntries++) { + val = RAL_READ(sc, RT2860_H2M_BBPAGENT); + if (!(val & RT2860_BBP_CSR_KICK)) + return val & 0xff; + DELAY(1); + } + device_printf(sc->sc_dev, "could not read from BBP through MCU\n"); + + return 0; +} + +/* + * Write to one of the 4 programmable 24-bit RF registers. + */ +static void +rt2860_rf_write(struct rt2860_softc *sc, uint8_t reg, uint32_t val) +{ + uint32_t tmp; + int ntries; + + for (ntries = 0; ntries < 100; ntries++) { + if (!(RAL_READ(sc, RT2860_RF_CSR_CFG0) & RT2860_RF_REG_CTRL)) + break; + DELAY(1); + } + if (ntries == 100) { + device_printf(sc->sc_dev, "could not write to RF\n"); + return; + } + + /* RF registers are 24-bit on the RT2860 */ + tmp = RT2860_RF_REG_CTRL | 24 << RT2860_RF_REG_WIDTH_SHIFT | + (val & 0x3fffff) << 2 | (reg & 3); + RAL_WRITE(sc, RT2860_RF_CSR_CFG0, tmp); +} + +static uint8_t +rt3090_rf_read(struct rt2860_softc *sc, uint8_t reg) +{ + uint32_t tmp; + int ntries; + + for (ntries = 0; ntries < 100; ntries++) { + if (!(RAL_READ(sc, RT3070_RF_CSR_CFG) & RT3070_RF_KICK)) + break; + DELAY(1); + } + if (ntries == 100) { + device_printf(sc->sc_dev, "could not read RF register\n"); + return 0xff; + } + tmp = RT3070_RF_KICK | reg << 8; + RAL_WRITE(sc, RT3070_RF_CSR_CFG, tmp); + + for (ntries = 0; ntries < 100; ntries++) { + tmp = RAL_READ(sc, RT3070_RF_CSR_CFG); + if (!(tmp & RT3070_RF_KICK)) + break; + DELAY(1); + } + if (ntries == 100) { + device_printf(sc->sc_dev, "could not read RF register\n"); + return 0xff; + } + return tmp & 0xff; +} + +void +rt3090_rf_write(struct rt2860_softc *sc, uint8_t reg, uint8_t val) +{ + uint32_t tmp; + int ntries; + + for (ntries = 0; ntries < 10; ntries++) { + if (!(RAL_READ(sc, RT3070_RF_CSR_CFG) & RT3070_RF_KICK)) + break; + DELAY(10); + } + if (ntries == 10) { + device_printf(sc->sc_dev, "could not write to RF\n"); + return; + } + + tmp = RT3070_RF_WRITE | RT3070_RF_KICK | reg << 8 | val; + RAL_WRITE(sc, RT3070_RF_CSR_CFG, tmp); +} + +/* + * Send a command to the 8051 microcontroller unit. + */ +int +rt2860_mcu_cmd(struct rt2860_softc *sc, uint8_t cmd, uint16_t arg, int wait) +{ + int slot, ntries; + uint32_t tmp; + uint8_t cid; + + for (ntries = 0; ntries < 100; ntries++) { + if (!(RAL_READ(sc, RT2860_H2M_MAILBOX) & RT2860_H2M_BUSY)) + break; + DELAY(2); + } + if (ntries == 100) + return EIO; + + cid = wait ? cmd : RT2860_TOKEN_NO_INTR; + RAL_WRITE(sc, RT2860_H2M_MAILBOX, RT2860_H2M_BUSY | cid << 16 | arg); + RAL_BARRIER_WRITE(sc); + RAL_WRITE(sc, RT2860_HOST_CMD, cmd); + + if (!wait) + return 0; + /* wait for the command to complete */ + for (ntries = 0; ntries < 200; ntries++) { + tmp = RAL_READ(sc, RT2860_H2M_MAILBOX_CID); + /* find the command slot */ + for (slot = 0; slot < 4; slot++, tmp >>= 8) + if ((tmp & 0xff) == cid) + break; + if (slot < 4) + break; + DELAY(100); + } + if (ntries == 200) { + /* clear command and status */ + RAL_WRITE(sc, RT2860_H2M_MAILBOX_STATUS, 0xffffffff); + RAL_WRITE(sc, RT2860_H2M_MAILBOX_CID, 0xffffffff); + return ETIMEDOUT; + } + /* get command status (1 means success) */ + tmp = RAL_READ(sc, RT2860_H2M_MAILBOX_STATUS); + tmp = (tmp >> (slot * 8)) & 0xff; + DPRINTF(("MCU command=0x%02x slot=%d status=0x%02x\n", + cmd, slot, tmp)); + /* clear command and status */ + RAL_WRITE(sc, RT2860_H2M_MAILBOX_STATUS, 0xffffffff); + RAL_WRITE(sc, RT2860_H2M_MAILBOX_CID, 0xffffffff); + return (tmp == 1) ? 0 : EIO; +} + +static void +rt2860_enable_mrr(struct rt2860_softc *sc) +{ +#define CCK(mcs) (mcs) +#define OFDM(mcs) (1 << 3 | (mcs)) + RAL_WRITE(sc, RT2860_LG_FBK_CFG0, + OFDM(6) << 28 | /* 54->48 */ + OFDM(5) << 24 | /* 48->36 */ + OFDM(4) << 20 | /* 36->24 */ + OFDM(3) << 16 | /* 24->18 */ + OFDM(2) << 12 | /* 18->12 */ + OFDM(1) << 8 | /* 12-> 9 */ + OFDM(0) << 4 | /* 9-> 6 */ + OFDM(0)); /* 6-> 6 */ + + RAL_WRITE(sc, RT2860_LG_FBK_CFG1, + CCK(2) << 12 | /* 11->5.5 */ + CCK(1) << 8 | /* 5.5-> 2 */ + CCK(0) << 4 | /* 2-> 1 */ + CCK(0)); /* 1-> 1 */ +#undef OFDM +#undef CCK +} + +static void +rt2860_set_txpreamble(struct rt2860_softc *sc) +{ + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211com *ic = ifp->if_l2com; + uint32_t tmp; + + tmp = RAL_READ(sc, RT2860_AUTO_RSP_CFG); + tmp &= ~RT2860_CCK_SHORT_EN; + if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) + tmp |= RT2860_CCK_SHORT_EN; + RAL_WRITE(sc, RT2860_AUTO_RSP_CFG, tmp); +} + +void +rt2860_set_basicrates(struct rt2860_softc *sc, + const struct ieee80211_rateset *rs) +{ +#define RV(r) ((r) & IEEE80211_RATE_VAL) + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211com *ic = ifp->if_l2com; + uint32_t mask = 0; + uint8_t rate; + int i; + + for (i = 0; i < rs->rs_nrates; i++) { + rate = rs->rs_rates[i]; + + if (!(rate & IEEE80211_RATE_BASIC)) + continue; + + mask |= 1 << ic->ic_rt->rateCodeToIndex[RV(rate)]; + } + + RAL_WRITE(sc, RT2860_LEGACY_BASIC_RATE, mask); +#undef RV +} + +static void +rt2860_scan_start(struct ieee80211com *ic) +{ + struct ifnet *ifp = ic->ic_ifp; + struct rt2860_softc *sc = ifp->if_softc; + uint32_t tmp; + + tmp = RAL_READ(sc, RT2860_BCN_TIME_CFG); + RAL_WRITE(sc, RT2860_BCN_TIME_CFG, + tmp & ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN | + RT2860_TBTT_TIMER_EN)); + rt2860_set_gp_timer(sc, 0); +} + +static void +rt2860_scan_end(struct ieee80211com *ic) +{ + struct ifnet *ifp = ic->ic_ifp; + struct rt2860_softc *sc = ifp->if_softc; + struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); + + if (vap->iv_state == IEEE80211_S_RUN) { + rt2860_enable_tsf_sync(sc); + rt2860_set_gp_timer(sc, 500); + } +} + +static void +rt2860_set_channel(struct ieee80211com *ic) +{ + struct ifnet *ifp = ic->ic_ifp; + struct rt2860_softc *sc = ifp->if_softc; + + RAL_LOCK(sc); + rt2860_switch_chan(sc, ic->ic_curchan); + RAL_UNLOCK(sc); +} + +static void +rt2860_select_chan_group(struct rt2860_softc *sc, int group) +{ + uint32_t tmp; + uint8_t agc; + + rt2860_mcu_bbp_write(sc, 62, 0x37 - sc->lna[group]); + rt2860_mcu_bbp_write(sc, 63, 0x37 - sc->lna[group]); + rt2860_mcu_bbp_write(sc, 64, 0x37 - sc->lna[group]); + rt2860_mcu_bbp_write(sc, 86, 0x00); + + if (group == 0) { + if (sc->ext_2ghz_lna) { + rt2860_mcu_bbp_write(sc, 82, 0x62); + rt2860_mcu_bbp_write(sc, 75, 0x46); + } else { + rt2860_mcu_bbp_write(sc, 82, 0x84); + rt2860_mcu_bbp_write(sc, 75, 0x50); + } + } else { + if (sc->ext_5ghz_lna) { + rt2860_mcu_bbp_write(sc, 82, 0xf2); + rt2860_mcu_bbp_write(sc, 75, 0x46); + } else { + rt2860_mcu_bbp_write(sc, 82, 0xf2); + rt2860_mcu_bbp_write(sc, 75, 0x50); + } + } + + tmp = RAL_READ(sc, RT2860_TX_BAND_CFG); + tmp &= ~(RT2860_5G_BAND_SEL_N | RT2860_5G_BAND_SEL_P); + tmp |= (group == 0) ? RT2860_5G_BAND_SEL_N : RT2860_5G_BAND_SEL_P; + RAL_WRITE(sc, RT2860_TX_BAND_CFG, tmp); + + /* enable appropriate Power Amplifiers and Low Noise Amplifiers */ + tmp = RT2860_RFTR_EN | RT2860_TRSW_EN | RT2860_LNA_PE0_EN; + if (sc->nrxchains > 1) + tmp |= RT2860_LNA_PE1_EN; + if (sc->mac_ver == 0x3593 && sc->nrxchains > 2) + tmp |= RT3593_LNA_PE2_EN; + if (group == 0) { /* 2GHz */ + tmp |= RT2860_PA_PE_G0_EN; + if (sc->ntxchains > 1) + tmp |= RT2860_PA_PE_G1_EN; + if (sc->mac_ver == 0x3593 && sc->ntxchains > 2) + tmp |= RT3593_PA_PE_G2_EN; + } else { /* 5GHz */ + tmp |= RT2860_PA_PE_A0_EN; + if (sc->ntxchains > 1) + tmp |= RT2860_PA_PE_A1_EN; + if (sc->mac_ver == 0x3593 && sc->ntxchains > 2) + tmp |= RT3593_PA_PE_A2_EN; + } + RAL_WRITE(sc, RT2860_TX_PIN_CFG, tmp); + + if (sc->mac_ver == 0x3593) { + tmp = RAL_READ(sc, RT2860_GPIO_CTRL); + if (sc->sc_flags & RT2860_PCIE) { + tmp &= ~0x01010000; + if (group == 0) + tmp |= 0x00010000; + } else { + tmp &= ~0x00008080; + if (group == 0) + tmp |= 0x00000080; + } + tmp = (tmp & ~0x00001000) | 0x00000010; + RAL_WRITE(sc, RT2860_GPIO_CTRL, tmp); + } + + /* set initial AGC value */ + if (group == 0) { /* 2GHz band */ + if (sc->mac_ver >= 0x3071) + agc = 0x1c + sc->lna[0] * 2; + else + agc = 0x2e + sc->lna[0]; + } else { /* 5GHz band */ + agc = 0x32 + (sc->lna[group] * 5) / 3; + } + rt2860_mcu_bbp_write(sc, 66, agc); + + DELAY(1000); +} + +static void +rt2860_set_chan(struct rt2860_softc *sc, u_int chan) +{ + const struct rfprog *rfprog = rt2860_rf2850; + uint32_t r2, r3, r4; + int8_t txpow1, txpow2; + u_int i; + + /* find the settings for this channel (we know it exists) */ + for (i = 0; rfprog[i].chan != chan; i++); + + r2 = rfprog[i].r2; + if (sc->ntxchains == 1) + r2 |= 1 << 12; /* 1T: disable Tx chain 2 */ + if (sc->nrxchains == 1) + r2 |= 1 << 15 | 1 << 4; /* 1R: disable Rx chains 2 & 3 */ + else if (sc->nrxchains == 2) + r2 |= 1 << 4; /* 2R: disable Rx chain 3 */ + + /* use Tx power values from EEPROM */ + txpow1 = sc->txpow1[i]; + txpow2 = sc->txpow2[i]; + if (chan > 14) { + if (txpow1 >= 0) + txpow1 = txpow1 << 1 | 1; + else + txpow1 = (7 + txpow1) << 1; + if (txpow2 >= 0) + txpow2 = txpow2 << 1 | 1; + else + txpow2 = (7 + txpow2) << 1; + } + r3 = rfprog[i].r3 | txpow1 << 7; + r4 = rfprog[i].r4 | sc->freq << 13 | txpow2 << 4; + + rt2860_rf_write(sc, RT2860_RF1, rfprog[i].r1); + rt2860_rf_write(sc, RT2860_RF2, r2); + rt2860_rf_write(sc, RT2860_RF3, r3); + rt2860_rf_write(sc, RT2860_RF4, r4); + + DELAY(200); + + rt2860_rf_write(sc, RT2860_RF1, rfprog[i].r1); + rt2860_rf_write(sc, RT2860_RF2, r2); + rt2860_rf_write(sc, RT2860_RF3, r3 | 1); + rt2860_rf_write(sc, RT2860_RF4, r4); + + DELAY(200); + + rt2860_rf_write(sc, RT2860_RF1, rfprog[i].r1); + rt2860_rf_write(sc, RT2860_RF2, r2); + rt2860_rf_write(sc, RT2860_RF3, r3); + rt2860_rf_write(sc, RT2860_RF4, r4); +} + +static void +rt3090_set_chan(struct rt2860_softc *sc, u_int chan) +{ + int8_t txpow1, txpow2; + uint8_t rf; + int i; + + /* RT3090 is 2GHz only */ + KASSERT(chan >= 1 && chan <= 14, ("chan %d not support", chan)); + + /* find the settings for this channel (we know it exists) */ + for (i = 0; rt2860_rf2850[i].chan != chan; i++); + + /* use Tx power values from EEPROM */ + txpow1 = sc->txpow1[i]; + txpow2 = sc->txpow2[i]; + + rt3090_rf_write(sc, 2, rt3090_freqs[i].n); + rf = rt3090_rf_read(sc, 3); + rf = (rf & ~0x0f) | rt3090_freqs[i].k; + rt3090_rf_write(sc, 3, rf); + rf = rt3090_rf_read(sc, 6); + rf = (rf & ~0x03) | rt3090_freqs[i].r; + rt3090_rf_write(sc, 6, rf); + + /* set Tx0 power */ + rf = rt3090_rf_read(sc, 12); + rf = (rf & ~0x1f) | txpow1; + rt3090_rf_write(sc, 12, rf); + + /* set Tx1 power */ + rf = rt3090_rf_read(sc, 13); + rf = (rf & ~0x1f) | txpow2; + rt3090_rf_write(sc, 13, rf); + + rf = rt3090_rf_read(sc, 1); + rf &= ~0xfc; + if (sc->ntxchains == 1) + rf |= RT3070_TX1_PD | RT3070_TX2_PD; + else if (sc->ntxchains == 2) + rf |= RT3070_TX2_PD; + if (sc->nrxchains == 1) + rf |= RT3070_RX1_PD | RT3070_RX2_PD; + else if (sc->nrxchains == 2) + rf |= RT3070_RX2_PD; + rt3090_rf_write(sc, 1, rf); + + /* set RF offset */ + rf = rt3090_rf_read(sc, 23); + rf = (rf & ~0x7f) | sc->freq; + rt3090_rf_write(sc, 23, rf); + + /* program RF filter */ + rf = rt3090_rf_read(sc, 24); /* Tx */ + rf = (rf & ~0x3f) | sc->rf24_20mhz; + rt3090_rf_write(sc, 24, rf); + rf = rt3090_rf_read(sc, 31); /* Rx */ + rf = (rf & ~0x3f) | sc->rf24_20mhz; + rt3090_rf_write(sc, 31, rf); + + /* enable RF tuning */ + rf = rt3090_rf_read(sc, 7); + rt3090_rf_write(sc, 7, rf | RT3070_TUNE); +} + +static int +rt3090_rf_init(struct rt2860_softc *sc) +{ +#define N(a) (sizeof (a) / sizeof ((a)[0])) + uint32_t tmp; + uint8_t rf, bbp; + int i; + + rf = rt3090_rf_read(sc, 30); + /* toggle RF R30 bit 7 */ + rt3090_rf_write(sc, 30, rf | 0x80); + DELAY(1000); + rt3090_rf_write(sc, 30, rf & ~0x80); + + tmp = RAL_READ(sc, RT3070_LDO_CFG0); + tmp &= ~0x1f000000; + if (sc->patch_dac && sc->mac_rev < 0x0211) + tmp |= 0x0d000000; /* 1.35V */ + else + tmp |= 0x01000000; /* 1.2V */ + RAL_WRITE(sc, RT3070_LDO_CFG0, tmp); + + /* patch LNA_PE_G1 */ + tmp = RAL_READ(sc, RT3070_GPIO_SWITCH); + RAL_WRITE(sc, RT3070_GPIO_SWITCH, tmp & ~0x20); + + /* initialize RF registers to default value */ + for (i = 0; i < N(rt3090_def_rf); i++) { + rt3090_rf_write(sc, rt3090_def_rf[i].reg, + rt3090_def_rf[i].val); + } + + /* select 20MHz bandwidth */ + rt3090_rf_write(sc, 31, 0x14); + + rf = rt3090_rf_read(sc, 6); + rt3090_rf_write(sc, 6, rf | 0x40); + + if (sc->mac_ver != 0x3593) { + /* calibrate filter for 20MHz bandwidth */ + sc->rf24_20mhz = 0x1f; /* default value */ + rt3090_filter_calib(sc, 0x07, 0x16, &sc->rf24_20mhz); + + /* select 40MHz bandwidth */ + bbp = rt2860_mcu_bbp_read(sc, 4); + rt2860_mcu_bbp_write(sc, 4, (bbp & ~0x08) | 0x10); + rf = rt3090_rf_read(sc, 31); + rt3090_rf_write(sc, 31, rf | 0x20); + + /* calibrate filter for 40MHz bandwidth */ + sc->rf24_40mhz = 0x2f; /* default value */ + rt3090_filter_calib(sc, 0x27, 0x19, &sc->rf24_40mhz); + + /* go back to 20MHz bandwidth */ + bbp = rt2860_mcu_bbp_read(sc, 4); + rt2860_mcu_bbp_write(sc, 4, bbp & ~0x18); + } + if (sc->mac_rev < 0x0211) + rt3090_rf_write(sc, 27, 0x03); + + tmp = RAL_READ(sc, RT3070_OPT_14); + RAL_WRITE(sc, RT3070_OPT_14, tmp | 1); + + if (sc->rf_rev == RT3070_RF_3020) + rt3090_set_rx_antenna(sc, 0); + + bbp = rt2860_mcu_bbp_read(sc, 138); + if (sc->mac_ver == 0x3593) { + if (sc->ntxchains == 1) + bbp |= 0x60; /* turn off DAC1 and DAC2 */ + else if (sc->ntxchains == 2) + bbp |= 0x40; /* turn off DAC2 */ + if (sc->nrxchains == 1) + bbp &= ~0x06; /* turn off ADC1 and ADC2 */ + else if (sc->nrxchains == 2) + bbp &= ~0x04; /* turn off ADC2 */ + } else { + if (sc->ntxchains == 1) + bbp |= 0x20; /* turn off DAC1 */ + if (sc->nrxchains == 1) + bbp &= ~0x02; /* turn off ADC1 */ + } + rt2860_mcu_bbp_write(sc, 138, bbp); + + rf = rt3090_rf_read(sc, 1); + rf &= ~(RT3070_RX0_PD | RT3070_TX0_PD); + rf |= RT3070_RF_BLOCK | RT3070_RX1_PD | RT3070_TX1_PD; + rt3090_rf_write(sc, 1, rf); + + rf = rt3090_rf_read(sc, 15); + rt3090_rf_write(sc, 15, rf & ~RT3070_TX_LO2); + + rf = rt3090_rf_read(sc, 17); + rf &= ~RT3070_TX_LO1; + if (sc->mac_rev >= 0x0211 && !sc->ext_2ghz_lna) + rf |= 0x20; /* fix for long range Rx issue */ + if (sc->txmixgain_2ghz >= 2) + rf = (rf & ~0x7) | sc->txmixgain_2ghz; + rt3090_rf_write(sc, 17, rf); + + rf = rt3090_rf_read(sc, 20); + rt3090_rf_write(sc, 20, rf & ~RT3070_RX_LO1); + + rf = rt3090_rf_read(sc, 21); + rt3090_rf_write(sc, 21, rf & ~RT3070_RX_LO2); + + return 0; +#undef N +} + +void +rt3090_rf_wakeup(struct rt2860_softc *sc) +{ + uint32_t tmp; + uint8_t rf; + + if (sc->mac_ver == 0x3593) { + /* enable VCO */ + rf = rt3090_rf_read(sc, 1); + rt3090_rf_write(sc, 1, rf | RT3593_VCO); + + /* initiate VCO calibration */ + rf = rt3090_rf_read(sc, 3); + rt3090_rf_write(sc, 3, rf | RT3593_VCOCAL); + + /* enable VCO bias current control */ + rf = rt3090_rf_read(sc, 6); + rt3090_rf_write(sc, 6, rf | RT3593_VCO_IC); + + /* initiate res calibration */ + rf = rt3090_rf_read(sc, 2); + rt3090_rf_write(sc, 2, rf | RT3593_RESCAL); + + /* set reference current control to 0.33 mA */ + rf = rt3090_rf_read(sc, 22); + rf &= ~RT3593_CP_IC_MASK; + rf |= 1 << RT3593_CP_IC_SHIFT; + rt3090_rf_write(sc, 22, rf); + + /* enable RX CTB */ + rf = rt3090_rf_read(sc, 46); + rt3090_rf_write(sc, 46, rf | RT3593_RX_CTB); + + rf = rt3090_rf_read(sc, 20); + rf &= ~(RT3593_LDO_RF_VC_MASK | RT3593_LDO_PLL_VC_MASK); + rt3090_rf_write(sc, 20, rf); + } else { + /* enable RF block */ + rf = rt3090_rf_read(sc, 1); + rt3090_rf_write(sc, 1, rf | RT3070_RF_BLOCK); + + /* enable VCO bias current control */ + rf = rt3090_rf_read(sc, 7); + rt3090_rf_write(sc, 7, rf | 0x30); + + rf = rt3090_rf_read(sc, 9); + rt3090_rf_write(sc, 9, rf | 0x0e); + + /* enable RX CTB */ + rf = rt3090_rf_read(sc, 21); + rt3090_rf_write(sc, 21, rf | RT3070_RX_CTB); + + /* fix Tx to Rx IQ glitch by raising RF voltage */ + rf = rt3090_rf_read(sc, 27); + rf &= ~0x77; + if (sc->mac_rev < 0x0211) + rf |= 0x03; + rt3090_rf_write(sc, 27, rf); + } + if (sc->patch_dac && sc->mac_rev < 0x0211) { + tmp = RAL_READ(sc, RT3070_LDO_CFG0); + tmp = (tmp & ~0x1f000000) | 0x0d000000; + RAL_WRITE(sc, RT3070_LDO_CFG0, tmp); + } +} + +int +rt3090_filter_calib(struct rt2860_softc *sc, uint8_t init, uint8_t target, + uint8_t *val) +{ + uint8_t rf22, rf24; + uint8_t bbp55_pb, bbp55_sb, delta; + int ntries; + + /* program filter */ + rf24 = rt3090_rf_read(sc, 24); + rf24 = (rf24 & 0xc0) | init; /* initial filter value */ + rt3090_rf_write(sc, 24, rf24); + + /* enable baseband loopback mode */ + rf22 = rt3090_rf_read(sc, 22); + rt3090_rf_write(sc, 22, rf22 | RT3070_BB_LOOPBACK); + + /* set power and frequency of passband test tone */ + rt2860_mcu_bbp_write(sc, 24, 0x00); + for (ntries = 0; ntries < 100; ntries++) { + /* transmit test tone */ + rt2860_mcu_bbp_write(sc, 25, 0x90); + DELAY(1000); + /* read received power */ + bbp55_pb = rt2860_mcu_bbp_read(sc, 55); + if (bbp55_pb != 0) + break; + } + if (ntries == 100) + return ETIMEDOUT; + + /* set power and frequency of stopband test tone */ + rt2860_mcu_bbp_write(sc, 24, 0x06); + for (ntries = 0; ntries < 100; ntries++) { + /* transmit test tone */ + rt2860_mcu_bbp_write(sc, 25, 0x90); + DELAY(1000); + /* read received power */ + bbp55_sb = rt2860_mcu_bbp_read(sc, 55); + + delta = bbp55_pb - bbp55_sb; + if (delta > target) + break; + + /* reprogram filter */ + rf24++; + rt3090_rf_write(sc, 24, rf24); + } + if (ntries < 100) { + if (rf24 != init) + rf24--; /* backtrack */ + *val = rf24; + rt3090_rf_write(sc, 24, rf24); + } + + /* restore initial state */ + rt2860_mcu_bbp_write(sc, 24, 0x00); + + /* disable baseband loopback mode */ + rf22 = rt3090_rf_read(sc, 22); + rt3090_rf_write(sc, 22, rf22 & ~RT3070_BB_LOOPBACK); + + return 0; +} + +static void +rt3090_rf_setup(struct rt2860_softc *sc) +{ + uint8_t bbp; + int i; + + if (sc->mac_rev >= 0x0211) { + /* enable DC filter */ + rt2860_mcu_bbp_write(sc, 103, 0xc0); + + /* improve power consumption */ + bbp = rt2860_mcu_bbp_read(sc, 31); + rt2860_mcu_bbp_write(sc, 31, bbp & ~0x03); + } + + RAL_WRITE(sc, RT2860_TX_SW_CFG1, 0); + if (sc->mac_rev < 0x0211) { + RAL_WRITE(sc, RT2860_TX_SW_CFG2, + sc->patch_dac ? 0x2c : 0x0f); + } else + RAL_WRITE(sc, RT2860_TX_SW_CFG2, 0); + + /* initialize RF registers from ROM */ + for (i = 0; i < 10; i++) { + if (sc->rf[i].reg == 0 || sc->rf[i].reg == 0xff) + continue; + rt3090_rf_write(sc, sc->rf[i].reg, sc->rf[i].val); + } +} + +static void +rt2860_set_leds(struct rt2860_softc *sc, uint16_t which) +{ + rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LEDS, + which | (sc->leds & 0x7f), 0); +} + +/* + * Hardware has a general-purpose programmable timer interrupt that can + * periodically raise MAC_INT_4. + */ +static void +rt2860_set_gp_timer(struct rt2860_softc *sc, int ms) +{ + uint32_t tmp; + + /* disable GP timer before reprogramming it */ + tmp = RAL_READ(sc, RT2860_INT_TIMER_EN); + RAL_WRITE(sc, RT2860_INT_TIMER_EN, tmp & ~RT2860_GP_TIMER_EN); + + if (ms == 0) + return; + + tmp = RAL_READ(sc, RT2860_INT_TIMER_CFG); + ms *= 16; /* Unit: 64us */ + tmp = (tmp & 0xffff) | ms << RT2860_GP_TIMER_SHIFT; + RAL_WRITE(sc, RT2860_INT_TIMER_CFG, tmp); + + /* enable GP timer */ + tmp = RAL_READ(sc, RT2860_INT_TIMER_EN); + RAL_WRITE(sc, RT2860_INT_TIMER_EN, tmp | RT2860_GP_TIMER_EN); +} + +static void +rt2860_set_bssid(struct rt2860_softc *sc, const uint8_t *bssid) +{ + RAL_WRITE(sc, RT2860_MAC_BSSID_DW0, + bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24); + RAL_WRITE(sc, RT2860_MAC_BSSID_DW1, + bssid[4] | bssid[5] << 8); +} + +static void +rt2860_set_macaddr(struct rt2860_softc *sc, const uint8_t *addr) +{ + RAL_WRITE(sc, RT2860_MAC_ADDR_DW0, + addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); + RAL_WRITE(sc, RT2860_MAC_ADDR_DW1, + addr[4] | addr[5] << 8 | 0xff << 16); +} + +static void +rt2860_updateslot(struct ifnet *ifp) +{ + struct rt2860_softc *sc = ifp->if_softc; + struct ieee80211com *ic = ifp->if_l2com; + uint32_t tmp; + + tmp = RAL_READ(sc, RT2860_BKOFF_SLOT_CFG); + tmp &= ~0xff; + tmp |= (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; + RAL_WRITE(sc, RT2860_BKOFF_SLOT_CFG, tmp); +} + +static void +rt2860_updateprot(struct ifnet *ifp) +{ + struct rt2860_softc *sc = ifp->if_softc; + struct ieee80211com *ic = ifp->if_l2com; + uint32_t tmp; + + tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL; + /* setup protection frame rate (MCS code) */ + tmp |= IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan) ? + rt2860_rates[RT2860_RIDX_OFDM6].mcs : + rt2860_rates[RT2860_RIDX_CCK11].mcs; + + /* CCK frames don't require protection */ + RAL_WRITE(sc, RT2860_CCK_PROT_CFG, tmp); + + if (ic->ic_flags & IEEE80211_F_USEPROT) { + if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) + tmp |= RT2860_PROT_CTRL_RTS_CTS; + else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) + tmp |= RT2860_PROT_CTRL_CTS; + } + RAL_WRITE(sc, RT2860_OFDM_PROT_CFG, tmp); +} + +static void +rt2860_update_promisc(struct ifnet *ifp) +{ + struct rt2860_softc *sc = ifp->if_softc; + uint32_t tmp; + + tmp = RAL_READ(sc, RT2860_RX_FILTR_CFG); + tmp &= ~RT2860_DROP_NOT_MYBSS; + if (!(ifp->if_flags & IFF_PROMISC)) + tmp |= RT2860_DROP_NOT_MYBSS; + RAL_WRITE(sc, RT2860_RX_FILTR_CFG, tmp); +} + +static int +rt2860_updateedca(struct ieee80211com *ic) +{ + struct rt2860_softc *sc = ic->ic_ifp->if_softc; + const struct wmeParams *wmep; + int aci; + + wmep = ic->ic_wme.wme_chanParams.cap_wmeParams; + + /* update MAC TX configuration registers */ + for (aci = 0; aci < WME_NUM_AC; aci++) { + RAL_WRITE(sc, RT2860_EDCA_AC_CFG(aci), + wmep[aci].wmep_logcwmax << 16 | + wmep[aci].wmep_logcwmin << 12 | + wmep[aci].wmep_aifsn << 8 | + wmep[aci].wmep_txopLimit); + } + + /* update SCH/DMA registers too */ + RAL_WRITE(sc, RT2860_WMM_AIFSN_CFG, + wmep[WME_AC_VO].wmep_aifsn << 12 | + wmep[WME_AC_VI].wmep_aifsn << 8 | + wmep[WME_AC_BK].wmep_aifsn << 4 | + wmep[WME_AC_BE].wmep_aifsn); + RAL_WRITE(sc, RT2860_WMM_CWMIN_CFG, + wmep[WME_AC_VO].wmep_logcwmin << 12 | + wmep[WME_AC_VI].wmep_logcwmin << 8 | + wmep[WME_AC_BK].wmep_logcwmin << 4 | + wmep[WME_AC_BE].wmep_logcwmin); + RAL_WRITE(sc, RT2860_WMM_CWMAX_CFG, + wmep[WME_AC_VO].wmep_logcwmax << 12 | + wmep[WME_AC_VI].wmep_logcwmax << 8 | + wmep[WME_AC_BK].wmep_logcwmax << 4 | + wmep[WME_AC_BE].wmep_logcwmax); + RAL_WRITE(sc, RT2860_WMM_TXOP0_CFG, + wmep[WME_AC_BK].wmep_txopLimit << 16 | + wmep[WME_AC_BE].wmep_txopLimit); + RAL_WRITE(sc, RT2860_WMM_TXOP1_CFG, + wmep[WME_AC_VO].wmep_txopLimit << 16 | + wmep[WME_AC_VI].wmep_txopLimit); + + return 0; +} + +#ifdef HW_CRYPTO +static int +rt2860_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, + struct ieee80211_key *k) +{ + struct rt2860_softc *sc = ic->ic_softc; + bus_size_t base; + uint32_t attr; + uint8_t mode, wcid, iv[8]; + + /* defer setting of WEP keys until interface is brought up */ + if ((ic->ic_if.if_flags & (IFF_UP | IFF_RUNNING)) != + (IFF_UP | IFF_RUNNING)) + return 0; + + /* map net80211 cipher to RT2860 security mode */ + switch (k->k_cipher) { + case IEEE80211_CIPHER_WEP40: + mode = RT2860_MODE_WEP40; + break; + case IEEE80211_CIPHER_WEP104: + mode = RT2860_MODE_WEP104; + break; + case IEEE80211_CIPHER_TKIP: + mode = RT2860_MODE_TKIP; + break; + case IEEE80211_CIPHER_CCMP: + mode = RT2860_MODE_AES_CCMP; + break; + default: + return EINVAL; + } + + if (k->k_flags & IEEE80211_KEY_GROUP) { + wcid = 0; /* NB: update WCID0 for group keys */ + base = RT2860_SKEY(0, k->k_id); + } else { + wcid = ((struct rt2860_node *)ni)->wcid; + base = RT2860_PKEY(wcid); + } + + if (k->k_cipher == IEEE80211_CIPHER_TKIP) { + RAL_WRITE_REGION_1(sc, base, k->k_key, 16); +#ifndef IEEE80211_STA_ONLY + if (ic->ic_opmode == IEEE80211_M_HOSTAP) { + RAL_WRITE_REGION_1(sc, base + 16, &k->k_key[16], 8); + RAL_WRITE_REGION_1(sc, base + 24, &k->k_key[24], 8); + } else +#endif + { + RAL_WRITE_REGION_1(sc, base + 16, &k->k_key[24], 8); + RAL_WRITE_REGION_1(sc, base + 24, &k->k_key[16], 8); + } + } else + RAL_WRITE_REGION_1(sc, base, k->k_key, k->k_len); + + if (!(k->k_flags & IEEE80211_KEY_GROUP) || + (k->k_flags & IEEE80211_KEY_TX)) { + /* set initial packet number in IV+EIV */ + if (k->k_cipher == IEEE80211_CIPHER_WEP40 || + k->k_cipher == IEEE80211_CIPHER_WEP104) { + uint32_t val = arc4random(); + /* skip weak IVs from Fluhrer/Mantin/Shamir */ + if (val >= 0x03ff00 && (val & 0xf8ff00) == 0x00ff00) + val += 0x000100; + iv[0] = val; + iv[1] = val >> 8; + iv[2] = val >> 16; + iv[3] = k->k_id << 6; + iv[4] = iv[5] = iv[6] = iv[7] = 0; + } else { + if (k->k_cipher == IEEE80211_CIPHER_TKIP) { + iv[0] = k->k_tsc >> 8; + iv[1] = (iv[0] | 0x20) & 0x7f; + iv[2] = k->k_tsc; + } else /* CCMP */ { + iv[0] = k->k_tsc; + iv[1] = k->k_tsc >> 8; + iv[2] = 0; + } + iv[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV; + iv[4] = k->k_tsc >> 16; + iv[5] = k->k_tsc >> 24; + iv[6] = k->k_tsc >> 32; + iv[7] = k->k_tsc >> 40; + } + RAL_WRITE_REGION_1(sc, RT2860_IVEIV(wcid), iv, 8); + } + + if (k->k_flags & IEEE80211_KEY_GROUP) { + /* install group key */ + attr = RAL_READ(sc, RT2860_SKEY_MODE_0_7); + attr &= ~(0xf << (k->k_id * 4)); + attr |= mode << (k->k_id * 4); + RAL_WRITE(sc, RT2860_SKEY_MODE_0_7, attr); + } else { + /* install pairwise key */ + attr = RAL_READ(sc, RT2860_WCID_ATTR(wcid)); + attr = (attr & ~0xf) | (mode << 1) | RT2860_RX_PKEY_EN; + RAL_WRITE(sc, RT2860_WCID_ATTR(wcid), attr); + } + return 0; +} + +static void +rt2860_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni, + struct ieee80211_key *k) +{ + struct rt2860_softc *sc = ic->ic_softc; + uint32_t attr; + uint8_t wcid; + + if (k->k_flags & IEEE80211_KEY_GROUP) { + /* remove group key */ + attr = RAL_READ(sc, RT2860_SKEY_MODE_0_7); + attr &= ~(0xf << (k->k_id * 4)); + RAL_WRITE(sc, RT2860_SKEY_MODE_0_7, attr); + + } else { + /* remove pairwise key */ + wcid = ((struct rt2860_node *)ni)->wcid; + attr = RAL_READ(sc, RT2860_WCID_ATTR(wcid)); + attr &= ~0xf; + RAL_WRITE(sc, RT2860_WCID_ATTR(wcid), attr); + } +} +#endif + +static int8_t +rt2860_rssi2dbm(struct rt2860_softc *sc, uint8_t rssi, uint8_t rxchain) +{ + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211com *ic = ifp->if_l2com; + struct ieee80211_channel *c = ic->ic_curchan; + int delta; + + if (IEEE80211_IS_CHAN_5GHZ(c)) { + u_int chan = ieee80211_chan2ieee(ic, c); + delta = sc->rssi_5ghz[rxchain]; + + /* determine channel group */ + if (chan <= 64) + delta -= sc->lna[1]; + else if (chan <= 128) + delta -= sc->lna[2]; + else + delta -= sc->lna[3]; + } else + delta = sc->rssi_2ghz[rxchain] - sc->lna[0]; + + return -12 - delta - rssi; +} + +/* + * Add `delta' (signed) to each 4-bit sub-word of a 32-bit word. + * Used to adjust per-rate Tx power registers. + */ +static __inline uint32_t +b4inc(uint32_t b32, int8_t delta) +{ + int8_t i, b4; + + for (i = 0; i < 8; i++) { + b4 = b32 & 0xf; + b4 += delta; + if (b4 < 0) + b4 = 0; + else if (b4 > 0xf) + b4 = 0xf; + b32 = b32 >> 4 | b4 << 28; + } + return b32; +} + +static const char * +rt2860_get_rf(uint8_t rev) +{ + switch (rev) { + case RT2860_RF_2820: return "RT2820"; + case RT2860_RF_2850: return "RT2850"; + case RT2860_RF_2720: return "RT2720"; + case RT2860_RF_2750: return "RT2750"; + case RT3070_RF_3020: return "RT3020"; + case RT3070_RF_2020: return "RT2020"; + case RT3070_RF_3021: return "RT3021"; + case RT3070_RF_3022: return "RT3022"; + case RT3070_RF_3052: return "RT3052"; + case RT3070_RF_3320: return "RT3320"; + case RT3070_RF_3053: return "RT3053"; + default: return "unknown"; + } +} + +static int +rt2860_read_eeprom(struct rt2860_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) +{ + int8_t delta_2ghz, delta_5ghz; + uint32_t tmp; + uint16_t val; + int ridx, ant, i; + + /* check whether the ROM is eFUSE ROM or EEPROM */ + sc->sc_srom_read = rt2860_eeprom_read_2; + if (sc->mac_ver >= 0x3071) { + tmp = RAL_READ(sc, RT3070_EFUSE_CTRL); + DPRINTF(("EFUSE_CTRL=0x%08x\n", tmp)); + if (tmp & RT3070_SEL_EFUSE) + sc->sc_srom_read = rt3090_efuse_read_2; + } + + /* read EEPROM version */ + val = rt2860_srom_read(sc, RT2860_EEPROM_VERSION); + DPRINTF(("EEPROM rev=%d, FAE=%d\n", val & 0xff, val >> 8)); + + /* read MAC address */ + val = rt2860_srom_read(sc, RT2860_EEPROM_MAC01); + macaddr[0] = val & 0xff; + macaddr[1] = val >> 8; + val = rt2860_srom_read(sc, RT2860_EEPROM_MAC23); + macaddr[2] = val & 0xff; + macaddr[3] = val >> 8; + val = rt2860_srom_read(sc, RT2860_EEPROM_MAC45); + macaddr[4] = val & 0xff; + macaddr[5] = val >> 8; + + /* read country code */ + val = rt2860_srom_read(sc, RT2860_EEPROM_COUNTRY); + DPRINTF(("EEPROM region code=0x%04x\n", val)); + + /* read vendor BBP settings */ + for (i = 0; i < 8; i++) { + val = rt2860_srom_read(sc, RT2860_EEPROM_BBP_BASE + i); + sc->bbp[i].val = val & 0xff; + sc->bbp[i].reg = val >> 8; + DPRINTF(("BBP%d=0x%02x\n", sc->bbp[i].reg, sc->bbp[i].val)); + } + if (sc->mac_ver >= 0x3071) { + /* read vendor RF settings */ + for (i = 0; i < 10; i++) { + val = rt2860_srom_read(sc, RT3071_EEPROM_RF_BASE + i); + sc->rf[i].val = val & 0xff; + sc->rf[i].reg = val >> 8; + DPRINTF(("RF%d=0x%02x\n", sc->rf[i].reg, + sc->rf[i].val)); + } + } + + /* read RF frequency offset from EEPROM */ + val = rt2860_srom_read(sc, RT2860_EEPROM_FREQ_LEDS); + sc->freq = ((val & 0xff) != 0xff) ? val & 0xff : 0; + DPRINTF(("EEPROM freq offset %d\n", sc->freq & 0xff)); + if ((val >> 8) != 0xff) { + /* read LEDs operating mode */ + sc->leds = val >> 8; + sc->led[0] = rt2860_srom_read(sc, RT2860_EEPROM_LED1); + sc->led[1] = rt2860_srom_read(sc, RT2860_EEPROM_LED2); + sc->led[2] = rt2860_srom_read(sc, RT2860_EEPROM_LED3); + } else { + /* broken EEPROM, use default settings */ + sc->leds = 0x01; + sc->led[0] = 0x5555; + sc->led[1] = 0x2221; + sc->led[2] = 0xa9f8; + } + DPRINTF(("EEPROM LED mode=0x%02x, LEDs=0x%04x/0x%04x/0x%04x\n", + sc->leds, sc->led[0], sc->led[1], sc->led[2])); + + /* read RF information */ + val = rt2860_srom_read(sc, RT2860_EEPROM_ANTENNA); + if (val == 0xffff) { + DPRINTF(("invalid EEPROM antenna info, using default\n")); + if (sc->mac_ver == 0x3593) { + /* default to RF3053 3T3R */ + sc->rf_rev = RT3070_RF_3053; + sc->ntxchains = 3; + sc->nrxchains = 3; + } else if (sc->mac_ver >= 0x3071) { + /* default to RF3020 1T1R */ + sc->rf_rev = RT3070_RF_3020; + sc->ntxchains = 1; + sc->nrxchains = 1; + } else { + /* default to RF2820 1T2R */ + sc->rf_rev = RT2860_RF_2820; + sc->ntxchains = 1; + sc->nrxchains = 2; + } + } else { + sc->rf_rev = (val >> 8) & 0xf; + sc->ntxchains = (val >> 4) & 0xf; + sc->nrxchains = val & 0xf; + } + DPRINTF(("EEPROM RF rev=0x%02x chains=%dT%dR\n", + sc->rf_rev, sc->ntxchains, sc->nrxchains)); + + /* check if RF supports automatic Tx access gain control */ + val = rt2860_srom_read(sc, RT2860_EEPROM_CONFIG); + DPRINTF(("EEPROM CFG 0x%04x\n", val)); + /* check if driver should patch the DAC issue */ + if ((val >> 8) != 0xff) + sc->patch_dac = (val >> 15) & 1; + if ((val & 0xff) != 0xff) { + sc->ext_5ghz_lna = (val >> 3) & 1; + sc->ext_2ghz_lna = (val >> 2) & 1; + /* check if RF supports automatic Tx access gain control */ + sc->calib_2ghz = sc->calib_5ghz = 0; /* XXX (val >> 1) & 1 */; + /* check if we have a hardware radio switch */ + sc->rfswitch = val & 1; + } + if (sc->sc_flags & RT2860_ADVANCED_PS) { + /* read PCIe power save level */ + val = rt2860_srom_read(sc, RT2860_EEPROM_PCIE_PSLEVEL); + if ((val & 0xff) != 0xff) { + sc->pslevel = val & 0x3; + val = rt2860_srom_read(sc, RT2860_EEPROM_REV); + if ((val & 0xff80) != 0x9280) + sc->pslevel = MIN(sc->pslevel, 1); + DPRINTF(("EEPROM PCIe PS Level=%d\n", sc->pslevel)); + } + } + + /* read power settings for 2GHz channels */ + for (i = 0; i < 14; i += 2) { + val = rt2860_srom_read(sc, + RT2860_EEPROM_PWR2GHZ_BASE1 + i / 2); + sc->txpow1[i + 0] = (int8_t)(val & 0xff); + sc->txpow1[i + 1] = (int8_t)(val >> 8); + + val = rt2860_srom_read(sc, + RT2860_EEPROM_PWR2GHZ_BASE2 + i / 2); + sc->txpow2[i + 0] = (int8_t)(val & 0xff); + sc->txpow2[i + 1] = (int8_t)(val >> 8); + } + /* fix broken Tx power entries */ + for (i = 0; i < 14; i++) { + if (sc->txpow1[i] < 0 || sc->txpow1[i] > 31) + sc->txpow1[i] = 5; + if (sc->txpow2[i] < 0 || sc->txpow2[i] > 31) + sc->txpow2[i] = 5; + DPRINTF(("chan %d: power1=%d, power2=%d\n", + rt2860_rf2850[i].chan, sc->txpow1[i], sc->txpow2[i])); + } + /* read power settings for 5GHz channels */ + for (i = 0; i < 40; i += 2) { + val = rt2860_srom_read(sc, + RT2860_EEPROM_PWR5GHZ_BASE1 + i / 2); + sc->txpow1[i + 14] = (int8_t)(val & 0xff); + sc->txpow1[i + 15] = (int8_t)(val >> 8); + + val = rt2860_srom_read(sc, + RT2860_EEPROM_PWR5GHZ_BASE2 + i / 2); + sc->txpow2[i + 14] = (int8_t)(val & 0xff); + sc->txpow2[i + 15] = (int8_t)(val >> 8); + } + /* fix broken Tx power entries */ + for (i = 0; i < 40; i++) { + if (sc->txpow1[14 + i] < -7 || sc->txpow1[14 + i] > 15) + sc->txpow1[14 + i] = 5; + if (sc->txpow2[14 + i] < -7 || sc->txpow2[14 + i] > 15) + sc->txpow2[14 + i] = 5; + DPRINTF(("chan %d: power1=%d, power2=%d\n", + rt2860_rf2850[14 + i].chan, sc->txpow1[14 + i], + sc->txpow2[14 + i])); + } + + /* read Tx power compensation for each Tx rate */ + val = rt2860_srom_read(sc, RT2860_EEPROM_DELTAPWR); + delta_2ghz = delta_5ghz = 0; + if ((val & 0xff) != 0xff && (val & 0x80)) { + delta_2ghz = val & 0xf; + if (!(val & 0x40)) /* negative number */ + delta_2ghz = -delta_2ghz; + } + val >>= 8; + if ((val & 0xff) != 0xff && (val & 0x80)) { + delta_5ghz = val & 0xf; + if (!(val & 0x40)) /* negative number */ + delta_5ghz = -delta_5ghz; + } + DPRINTF(("power compensation=%d (2GHz), %d (5GHz)\n", + delta_2ghz, delta_5ghz)); + + for (ridx = 0; ridx < 5; ridx++) { + uint32_t reg; + + val = rt2860_srom_read(sc, RT2860_EEPROM_RPWR + ridx * 2); + reg = val; + val = rt2860_srom_read(sc, RT2860_EEPROM_RPWR + ridx * 2 + 1); + reg |= (uint32_t)val << 16; + + sc->txpow20mhz[ridx] = reg; + sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz); + sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz); + + DPRINTF(("ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, " + "40MHz/5GHz=0x%08x\n", ridx, sc->txpow20mhz[ridx], + sc->txpow40mhz_2ghz[ridx], sc->txpow40mhz_5ghz[ridx])); + } + + /* read factory-calibrated samples for temperature compensation */ + val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI1_2GHZ); + sc->tssi_2ghz[0] = val & 0xff; /* [-4] */ + sc->tssi_2ghz[1] = val >> 8; /* [-3] */ + val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI2_2GHZ); + sc->tssi_2ghz[2] = val & 0xff; /* [-2] */ + sc->tssi_2ghz[3] = val >> 8; /* [-1] */ + val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI3_2GHZ); + sc->tssi_2ghz[4] = val & 0xff; /* [+0] */ + sc->tssi_2ghz[5] = val >> 8; /* [+1] */ + val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI4_2GHZ); + sc->tssi_2ghz[6] = val & 0xff; /* [+2] */ + sc->tssi_2ghz[7] = val >> 8; /* [+3] */ + val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI5_2GHZ); + sc->tssi_2ghz[8] = val & 0xff; /* [+4] */ + sc->step_2ghz = val >> 8; + DPRINTF(("TSSI 2GHz: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x " + "0x%02x 0x%02x step=%d\n", sc->tssi_2ghz[0], sc->tssi_2ghz[1], + sc->tssi_2ghz[2], sc->tssi_2ghz[3], sc->tssi_2ghz[4], + sc->tssi_2ghz[5], sc->tssi_2ghz[6], sc->tssi_2ghz[7], + sc->tssi_2ghz[8], sc->step_2ghz)); + /* check that ref value is correct, otherwise disable calibration */ + if (sc->tssi_2ghz[4] == 0xff) + sc->calib_2ghz = 0; + + val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI1_5GHZ); + sc->tssi_5ghz[0] = val & 0xff; /* [-4] */ + sc->tssi_5ghz[1] = val >> 8; /* [-3] */ + val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI2_5GHZ); + sc->tssi_5ghz[2] = val & 0xff; /* [-2] */ + sc->tssi_5ghz[3] = val >> 8; /* [-1] */ + val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI3_5GHZ); + sc->tssi_5ghz[4] = val & 0xff; /* [+0] */ + sc->tssi_5ghz[5] = val >> 8; /* [+1] */ + val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI4_5GHZ); + sc->tssi_5ghz[6] = val & 0xff; /* [+2] */ + sc->tssi_5ghz[7] = val >> 8; /* [+3] */ + val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI5_5GHZ); + sc->tssi_5ghz[8] = val & 0xff; /* [+4] */ + sc->step_5ghz = val >> 8; + DPRINTF(("TSSI 5GHz: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x " + "0x%02x 0x%02x step=%d\n", sc->tssi_5ghz[0], sc->tssi_5ghz[1], + sc->tssi_5ghz[2], sc->tssi_5ghz[3], sc->tssi_5ghz[4], + sc->tssi_5ghz[5], sc->tssi_5ghz[6], sc->tssi_5ghz[7], + sc->tssi_5ghz[8], sc->step_5ghz)); + /* check that ref value is correct, otherwise disable calibration */ + if (sc->tssi_5ghz[4] == 0xff) + sc->calib_5ghz = 0; + + /* read RSSI offsets and LNA gains from EEPROM */ + val = rt2860_srom_read(sc, RT2860_EEPROM_RSSI1_2GHZ); + sc->rssi_2ghz[0] = val & 0xff; /* Ant A */ + sc->rssi_2ghz[1] = val >> 8; /* Ant B */ + val = rt2860_srom_read(sc, RT2860_EEPROM_RSSI2_2GHZ); + if (sc->mac_ver >= 0x3071) { + /* + * On RT3090 chips (limited to 2 Rx chains), this ROM + * field contains the Tx mixer gain for the 2GHz band. + */ + if ((val & 0xff) != 0xff) + sc->txmixgain_2ghz = val & 0x7; + DPRINTF(("tx mixer gain=%u (2GHz)\n", sc->txmixgain_2ghz)); + } else + sc->rssi_2ghz[2] = val & 0xff; /* Ant C */ + sc->lna[2] = val >> 8; /* channel group 2 */ + + val = rt2860_srom_read(sc, RT2860_EEPROM_RSSI1_5GHZ); + sc->rssi_5ghz[0] = val & 0xff; /* Ant A */ + sc->rssi_5ghz[1] = val >> 8; /* Ant B */ + val = rt2860_srom_read(sc, RT2860_EEPROM_RSSI2_5GHZ); + sc->rssi_5ghz[2] = val & 0xff; /* Ant C */ + sc->lna[3] = val >> 8; /* channel group 3 */ + + val = rt2860_srom_read(sc, RT2860_EEPROM_LNA); + if (sc->mac_ver >= 0x3071) + sc->lna[0] = RT3090_DEF_LNA; + else /* channel group 0 */ + sc->lna[0] = val & 0xff; + sc->lna[1] = val >> 8; /* channel group 1 */ + + /* fix broken 5GHz LNA entries */ + if (sc->lna[2] == 0 || sc->lna[2] == 0xff) { + DPRINTF(("invalid LNA for channel group %d\n", 2)); + sc->lna[2] = sc->lna[1]; + } + if (sc->lna[3] == 0 || sc->lna[3] == 0xff) { + DPRINTF(("invalid LNA for channel group %d\n", 3)); + sc->lna[3] = sc->lna[1]; + } + + /* fix broken RSSI offset entries */ + for (ant = 0; ant < 3; ant++) { + if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) { + DPRINTF(("invalid RSSI%d offset: %d (2GHz)\n", + ant + 1, sc->rssi_2ghz[ant])); + sc->rssi_2ghz[ant] = 0; + } + if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) { + DPRINTF(("invalid RSSI%d offset: %d (5GHz)\n", + ant + 1, sc->rssi_5ghz[ant])); + sc->rssi_5ghz[ant] = 0; + } + } + + return 0; +} + +int +rt2860_bbp_init(struct rt2860_softc *sc) +{ +#define N(a) (sizeof (a) / sizeof ((a)[0])) + int i, ntries; + + /* wait for BBP to wake up */ + for (ntries = 0; ntries < 20; ntries++) { + uint8_t bbp0 = rt2860_mcu_bbp_read(sc, 0); + if (bbp0 != 0 && bbp0 != 0xff) + break; + } + if (ntries == 20) { + device_printf(sc->sc_dev, + "timeout waiting for BBP to wake up\n"); + return ETIMEDOUT; + } + + /* initialize BBP registers to default values */ + for (i = 0; i < N(rt2860_def_bbp); i++) { + rt2860_mcu_bbp_write(sc, rt2860_def_bbp[i].reg, + rt2860_def_bbp[i].val); + } + + /* fix BBP84 for RT2860E */ + if (sc->mac_ver == 0x2860 && sc->mac_rev != 0x0101) + rt2860_mcu_bbp_write(sc, 84, 0x19); + + if (sc->mac_ver >= 0x3071) { + rt2860_mcu_bbp_write(sc, 79, 0x13); + rt2860_mcu_bbp_write(sc, 80, 0x05); + rt2860_mcu_bbp_write(sc, 81, 0x33); + } else if (sc->mac_ver == 0x2860 && sc->mac_rev == 0x0100) { + rt2860_mcu_bbp_write(sc, 69, 0x16); + rt2860_mcu_bbp_write(sc, 73, 0x12); + } + + return 0; +#undef N +} + +static int +rt2860_txrx_enable(struct rt2860_softc *sc) +{ + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211com *ic = ifp->if_l2com; + uint32_t tmp; + int ntries; + + /* enable Tx/Rx DMA engine */ + RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_TX_EN); + RAL_BARRIER_READ_WRITE(sc); + for (ntries = 0; ntries < 200; ntries++) { + tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG); + if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0) + break; + DELAY(1000); + } + if (ntries == 200) { + device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); + return ETIMEDOUT; + } + + DELAY(50); + + tmp |= RT2860_RX_DMA_EN | RT2860_TX_DMA_EN | + RT2860_WPDMA_BT_SIZE64 << RT2860_WPDMA_BT_SIZE_SHIFT; + RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp); + + /* set Rx filter */ + tmp = RT2860_DROP_CRC_ERR | RT2860_DROP_PHY_ERR; + if (ic->ic_opmode != IEEE80211_M_MONITOR) { + tmp |= RT2860_DROP_UC_NOME | RT2860_DROP_DUPL | + RT2860_DROP_CTS | RT2860_DROP_BA | RT2860_DROP_ACK | + RT2860_DROP_VER_ERR | RT2860_DROP_CTRL_RSV | + RT2860_DROP_CFACK | RT2860_DROP_CFEND; + if (ic->ic_opmode == IEEE80211_M_STA) + tmp |= RT2860_DROP_RTS | RT2860_DROP_PSPOLL; + } + RAL_WRITE(sc, RT2860_RX_FILTR_CFG, tmp); + + RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, + RT2860_MAC_RX_EN | RT2860_MAC_TX_EN); + + return 0; +} + +static void +rt2860_init(void *arg) +{ + struct rt2860_softc *sc = arg; + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211com *ic = ifp->if_l2com; + + RAL_LOCK(sc); + rt2860_init_locked(sc); + RAL_UNLOCK(sc); + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + ieee80211_start_all(ic); +} + +static void +rt2860_init_locked(struct rt2860_softc *sc) +{ +#define N(a) (sizeof (a) / sizeof ((a)[0])) + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211com *ic = ifp->if_l2com; + uint32_t tmp; + uint8_t bbp1, bbp3; + int i, qid, ridx, ntries, error; + + RAL_LOCK_ASSERT(sc); + + if (sc->rfswitch) { + /* hardware has a radio switch on GPIO pin 2 */ + if (!(RAL_READ(sc, RT2860_GPIO_CTRL) & (1 << 2))) { + device_printf(sc->sc_dev, + "radio is disabled by hardware switch\n"); +#ifdef notyet + rt2860_stop_locked(sc); + return; +#endif + } + } + RAL_WRITE(sc, RT2860_PWR_PIN_CFG, RT2860_IO_RA_PE); + + /* disable DMA */ + tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG); + tmp &= 0xff0; + RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp); + + /* PBF hardware reset */ + RAL_WRITE(sc, RT2860_SYS_CTRL, 0xe1f); + RAL_BARRIER_WRITE(sc); + RAL_WRITE(sc, RT2860_SYS_CTRL, 0xe00); + + if ((error = rt2860_load_microcode(sc)) != 0) { + device_printf(sc->sc_dev, "could not load 8051 microcode\n"); + rt2860_stop_locked(sc); + return; + } + + rt2860_set_macaddr(sc, IF_LLADDR(ifp)); + + /* init Tx power for all Tx rates (from EEPROM) */ + for (ridx = 0; ridx < 5; ridx++) { + if (sc->txpow20mhz[ridx] == 0xffffffff) + continue; + RAL_WRITE(sc, RT2860_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]); + } + + for (ntries = 0; ntries < 100; ntries++) { + tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG); + if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0) + break; + DELAY(1000); + } + if (ntries == 100) { + device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); + rt2860_stop_locked(sc); + return; + } + tmp &= 0xff0; + RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp); + + /* reset Rx ring and all 6 Tx rings */ + RAL_WRITE(sc, RT2860_WPDMA_RST_IDX, 0x1003f); + + /* PBF hardware reset */ + RAL_WRITE(sc, RT2860_SYS_CTRL, 0xe1f); + RAL_BARRIER_WRITE(sc); + RAL_WRITE(sc, RT2860_SYS_CTRL, 0xe00); + + RAL_WRITE(sc, RT2860_PWR_PIN_CFG, RT2860_IO_RA_PE | RT2860_IO_RF_PE); + + RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_BBP_HRST | RT2860_MAC_SRST); + RAL_BARRIER_WRITE(sc); + RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, 0); + + for (i = 0; i < N(rt2860_def_mac); i++) + RAL_WRITE(sc, rt2860_def_mac[i].reg, rt2860_def_mac[i].val); + if (sc->mac_ver >= 0x3071) { + /* set delay of PA_PE assertion to 1us (unit of 0.25us) */ + RAL_WRITE(sc, RT2860_TX_SW_CFG0, + 4 << RT2860_DLY_PAPE_EN_SHIFT); + } + + if (!(RAL_READ(sc, RT2860_PCI_CFG) & RT2860_PCI_CFG_PCI)) { + sc->sc_flags |= RT2860_PCIE; + /* PCIe has different clock cycle count than PCI */ + tmp = RAL_READ(sc, RT2860_US_CYC_CNT); + tmp = (tmp & ~0xff) | 0x7d; + RAL_WRITE(sc, RT2860_US_CYC_CNT, tmp); + } + + /* wait while MAC is busy */ + for (ntries = 0; ntries < 100; ntries++) { + if (!(RAL_READ(sc, RT2860_MAC_STATUS_REG) & + (RT2860_RX_STATUS_BUSY | RT2860_TX_STATUS_BUSY))) + break; + DELAY(1000); + } + if (ntries == 100) { + device_printf(sc->sc_dev, "timeout waiting for MAC\n"); + rt2860_stop_locked(sc); + return; + } + + /* clear Host to MCU mailbox */ + RAL_WRITE(sc, RT2860_H2M_BBPAGENT, 0); + RAL_WRITE(sc, RT2860_H2M_MAILBOX, 0); + + rt2860_mcu_cmd(sc, RT2860_MCU_CMD_RFRESET, 0, 0); + DELAY(1000); + + if ((error = rt2860_bbp_init(sc)) != 0) { + rt2860_stop_locked(sc); + return; + } + + /* clear RX WCID search table */ + RAL_SET_REGION_4(sc, RT2860_WCID_ENTRY(0), 0, 512); + /* clear pairwise key table */ + RAL_SET_REGION_4(sc, RT2860_PKEY(0), 0, 2048); + /* clear IV/EIV table */ + RAL_SET_REGION_4(sc, RT2860_IVEIV(0), 0, 512); + /* clear WCID attribute table */ + RAL_SET_REGION_4(sc, RT2860_WCID_ATTR(0), 0, 256); + /* clear shared key table */ + RAL_SET_REGION_4(sc, RT2860_SKEY(0, 0), 0, 8 * 32); + /* clear shared key mode */ + RAL_SET_REGION_4(sc, RT2860_SKEY_MODE_0_7, 0, 4); + + /* init Tx rings (4 EDCAs + HCCA + Mgt) */ + for (qid = 0; qid < 6; qid++) { + RAL_WRITE(sc, RT2860_TX_BASE_PTR(qid), sc->txq[qid].paddr); + RAL_WRITE(sc, RT2860_TX_MAX_CNT(qid), RT2860_TX_RING_COUNT); + RAL_WRITE(sc, RT2860_TX_CTX_IDX(qid), 0); + } + + /* init Rx ring */ + RAL_WRITE(sc, RT2860_RX_BASE_PTR, sc->rxq.paddr); + RAL_WRITE(sc, RT2860_RX_MAX_CNT, RT2860_RX_RING_COUNT); + RAL_WRITE(sc, RT2860_RX_CALC_IDX, RT2860_RX_RING_COUNT - 1); + + /* setup maximum buffer sizes */ + RAL_WRITE(sc, RT2860_MAX_LEN_CFG, 1 << 12 | + (MCLBYTES - sizeof (struct rt2860_rxwi) - 2)); + + for (ntries = 0; ntries < 100; ntries++) { + tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG); + if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0) + break; + DELAY(1000); + } + if (ntries == 100) { + device_printf(sc->sc_dev, "timeout waiting for DMA engine\n"); + rt2860_stop_locked(sc); + return; + } + tmp &= 0xff0; + RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp); + + /* disable interrupts mitigation */ + RAL_WRITE(sc, RT2860_DELAY_INT_CFG, 0); + + /* write vendor-specific BBP values (from EEPROM) */ + for (i = 0; i < 8; i++) { + if (sc->bbp[i].reg == 0 || sc->bbp[i].reg == 0xff) + continue; + rt2860_mcu_bbp_write(sc, sc->bbp[i].reg, sc->bbp[i].val); + } + + /* select Main antenna for 1T1R devices */ + if (sc->rf_rev == RT3070_RF_2020 || + sc->rf_rev == RT3070_RF_3020 || + sc->rf_rev == RT3070_RF_3320) + rt3090_set_rx_antenna(sc, 0); + + /* send LEDs operating mode to microcontroller */ + rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LED1, sc->led[0], 0); + rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LED2, sc->led[1], 0); + rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LED3, sc->led[2], 0); + + if (sc->mac_ver >= 0x3071) + rt3090_rf_init(sc); + + rt2860_mcu_cmd(sc, RT2860_MCU_CMD_SLEEP, 0x02ff, 1); + rt2860_mcu_cmd(sc, RT2860_MCU_CMD_WAKEUP, 0, 1); + + if (sc->mac_ver >= 0x3071) + rt3090_rf_wakeup(sc); + + /* disable non-existing Rx chains */ + bbp3 = rt2860_mcu_bbp_read(sc, 3); + bbp3 &= ~(1 << 3 | 1 << 4); + if (sc->nrxchains == 2) + bbp3 |= 1 << 3; + else if (sc->nrxchains == 3) + bbp3 |= 1 << 4; + rt2860_mcu_bbp_write(sc, 3, bbp3); + + /* disable non-existing Tx chains */ + bbp1 = rt2860_mcu_bbp_read(sc, 1); + if (sc->ntxchains == 1) + bbp1 = (bbp1 & ~(1 << 3 | 1 << 4)); + else if (sc->mac_ver == 0x3593 && sc->ntxchains == 2) + bbp1 = (bbp1 & ~(1 << 4)) | 1 << 3; + else if (sc->mac_ver == 0x3593 && sc->ntxchains == 3) + bbp1 = (bbp1 & ~(1 << 3)) | 1 << 4; + rt2860_mcu_bbp_write(sc, 1, bbp1); + + if (sc->mac_ver >= 0x3071) + rt3090_rf_setup(sc); + + /* select default channel */ + rt2860_switch_chan(sc, ic->ic_curchan); + + /* reset RF from MCU */ + rt2860_mcu_cmd(sc, RT2860_MCU_CMD_RFRESET, 0, 0); + + /* set RTS threshold */ + tmp = RAL_READ(sc, RT2860_TX_RTS_CFG); + tmp &= ~0xffff00; + tmp |= IEEE80211_RTS_DEFAULT << 8; + RAL_WRITE(sc, RT2860_TX_RTS_CFG, tmp); + + /* setup initial protection mode */ + rt2860_updateprot(ifp); + + /* turn radio LED on */ + rt2860_set_leds(sc, RT2860_LED_RADIO); + + /* enable Tx/Rx DMA engine */ + if ((error = rt2860_txrx_enable(sc)) != 0) { + rt2860_stop_locked(sc); + return; + } + + /* clear pending interrupts */ + RAL_WRITE(sc, RT2860_INT_STATUS, 0xffffffff); + /* enable interrupts */ + RAL_WRITE(sc, RT2860_INT_MASK, 0x3fffc); + + if (sc->sc_flags & RT2860_ADVANCED_PS) + rt2860_mcu_cmd(sc, RT2860_MCU_CMD_PSLEVEL, sc->pslevel, 0); + + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + ifp->if_drv_flags |= IFF_DRV_RUNNING; + + callout_reset(&sc->watchdog_ch, hz, rt2860_watchdog, sc); +#undef N +} + +static void +rt2860_stop(void *arg) +{ + struct rt2860_softc *sc = arg; + + RAL_LOCK(sc); + rt2860_stop_locked(sc); + RAL_UNLOCK(sc); +} + +static void +rt2860_stop_locked(struct rt2860_softc *sc) +{ + struct ifnet *ifp = sc->sc_ifp; + uint32_t tmp; + int qid; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + rt2860_set_leds(sc, 0); /* turn all LEDs off */ + + callout_stop(&sc->watchdog_ch); + sc->sc_tx_timer = 0; + ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + + /* disable interrupts */ + RAL_WRITE(sc, RT2860_INT_MASK, 0); + + /* disable GP timer */ + rt2860_set_gp_timer(sc, 0); + + /* disable Rx */ + tmp = RAL_READ(sc, RT2860_MAC_SYS_CTRL); + tmp &= ~(RT2860_MAC_RX_EN | RT2860_MAC_TX_EN); + RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, tmp); + + /* reset adapter */ + RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_BBP_HRST | RT2860_MAC_SRST); + RAL_BARRIER_WRITE(sc); + RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, 0); + + /* reset Tx and Rx rings (and reclaim TXWIs) */ + sc->qfullmsk = 0; + for (qid = 0; qid < 6; qid++) + rt2860_reset_tx_ring(sc, &sc->txq[qid]); + rt2860_reset_rx_ring(sc, &sc->rxq); +} + +int +rt2860_load_microcode(struct rt2860_softc *sc) +{ + const struct firmware *fp; + int ntries, error; + + RAL_LOCK_ASSERT(sc); + + RAL_UNLOCK(sc); + fp = firmware_get("rt2860fw"); + RAL_LOCK(sc); + if (fp == NULL) { + device_printf(sc->sc_dev, + "unable to receive rt2860fw firmware image\n"); + return EINVAL; + } + + /* set "host program ram write selection" bit */ + RAL_WRITE(sc, RT2860_SYS_CTRL, RT2860_HST_PM_SEL); + /* write microcode image */ + RAL_WRITE_REGION_1(sc, RT2860_FW_BASE, fp->data, fp->datasize); + /* kick microcontroller unit */ + RAL_WRITE(sc, RT2860_SYS_CTRL, 0); + RAL_BARRIER_WRITE(sc); + RAL_WRITE(sc, RT2860_SYS_CTRL, RT2860_MCU_RESET); + + RAL_WRITE(sc, RT2860_H2M_BBPAGENT, 0); + RAL_WRITE(sc, RT2860_H2M_MAILBOX, 0); + + /* wait until microcontroller is ready */ + RAL_BARRIER_READ_WRITE(sc); + for (ntries = 0; ntries < 1000; ntries++) { + if (RAL_READ(sc, RT2860_SYS_CTRL) & RT2860_MCU_READY) + break; + DELAY(1000); + } + if (ntries == 1000) { + device_printf(sc->sc_dev, + "timeout waiting for MCU to initialize\n"); + error = ETIMEDOUT; + } else + error = 0; + + firmware_put(fp, FIRMWARE_UNLOAD); + return error; +} + +/* + * This function is called periodically to adjust Tx power based on + * temperature variation. + */ +#ifdef NOT_YET +static void +rt2860_calib(struct rt2860_softc *sc) +{ + struct ieee80211com *ic = &sc->sc_ic; + const uint8_t *tssi; + uint8_t step, bbp49; + int8_t ridx, d; + + /* read current temperature */ + bbp49 = rt2860_mcu_bbp_read(sc, 49); + + if (IEEE80211_IS_CHAN_2GHZ(ic->ic_bss->ni_chan)) { + tssi = &sc->tssi_2ghz[4]; + step = sc->step_2ghz; + } else { + tssi = &sc->tssi_5ghz[4]; + step = sc->step_5ghz; + } + + if (bbp49 < tssi[0]) { /* lower than reference */ + /* use higher Tx power than default */ + for (d = 0; d > -4 && bbp49 <= tssi[d - 1]; d--); + } else if (bbp49 > tssi[0]) { /* greater than reference */ + /* use lower Tx power than default */ + for (d = 0; d < +4 && bbp49 >= tssi[d + 1]; d++); + } else { + /* use default Tx power */ + d = 0; + } + d *= step; + + DPRINTF(("BBP49=0x%02x, adjusting Tx power by %d\n", bbp49, d)); + + /* write adjusted Tx power values for each Tx rate */ + for (ridx = 0; ridx < 5; ridx++) { + if (sc->txpow20mhz[ridx] == 0xffffffff) + continue; + RAL_WRITE(sc, RT2860_TX_PWR_CFG(ridx), + b4inc(sc->txpow20mhz[ridx], d)); + } +} +#endif + +static void +rt3090_set_rx_antenna(struct rt2860_softc *sc, int aux) +{ + uint32_t tmp; + + if (aux) { + tmp = RAL_READ(sc, RT2860_PCI_EECTRL); + RAL_WRITE(sc, RT2860_PCI_EECTRL, tmp & ~RT2860_C); + tmp = RAL_READ(sc, RT2860_GPIO_CTRL); + RAL_WRITE(sc, RT2860_GPIO_CTRL, (tmp & ~0x0808) | 0x08); + } else { + tmp = RAL_READ(sc, RT2860_PCI_EECTRL); + RAL_WRITE(sc, RT2860_PCI_EECTRL, tmp | RT2860_C); + tmp = RAL_READ(sc, RT2860_GPIO_CTRL); + RAL_WRITE(sc, RT2860_GPIO_CTRL, tmp & ~0x0808); + } +} + +static void +rt2860_switch_chan(struct rt2860_softc *sc, struct ieee80211_channel *c) +{ + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211com *ic = ifp->if_l2com; + u_int chan, group; + + chan = ieee80211_chan2ieee(ic, c); + if (chan == 0 || chan == IEEE80211_CHAN_ANY) + return; + + if (sc->mac_ver >= 0x3071) + rt3090_set_chan(sc, chan); + else + rt2860_set_chan(sc, chan); + + /* determine channel group */ + if (chan <= 14) + group = 0; + else if (chan <= 64) + group = 1; + else if (chan <= 128) + group = 2; + else + group = 3; + + /* XXX necessary only when group has changed! */ + rt2860_select_chan_group(sc, group); + + DELAY(1000); +} + +static int +rt2860_setup_beacon(struct rt2860_softc *sc, struct ieee80211vap *vap) +{ + struct ieee80211com *ic = vap->iv_ic; + struct ieee80211_beacon_offsets bo; + struct rt2860_txwi txwi; + struct mbuf *m; + int ridx; + + if ((m = ieee80211_beacon_alloc(vap->iv_bss, &bo)) == NULL) + return ENOBUFS; + + memset(&txwi, 0, sizeof txwi); + txwi.wcid = 0xff; + txwi.len = htole16(m->m_pkthdr.len); + /* send beacons at the lowest available rate */ + ridx = IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan) ? + RT2860_RIDX_OFDM6 : RT2860_RIDX_CCK1; + txwi.phy = htole16(rt2860_rates[ridx].mcs); + if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) + txwi.phy |= htole16(RT2860_PHY_OFDM); + txwi.txop = RT2860_TX_TXOP_HT; + txwi.flags = RT2860_TX_TS; + txwi.xflags = RT2860_TX_NSEQ; + + RAL_WRITE_REGION_1(sc, RT2860_BCN_BASE(0), + (uint8_t *)&txwi, sizeof txwi); + RAL_WRITE_REGION_1(sc, RT2860_BCN_BASE(0) + sizeof txwi, + mtod(m, uint8_t *), m->m_pkthdr.len); + + m_freem(m); + + return 0; +} + +static void +rt2860_enable_tsf_sync(struct rt2860_softc *sc) +{ + struct ifnet *ifp = sc->sc_ifp; + struct ieee80211com *ic = ifp->if_l2com; + struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); + uint32_t tmp; + + tmp = RAL_READ(sc, RT2860_BCN_TIME_CFG); + + tmp &= ~0x1fffff; + tmp |= vap->iv_bss->ni_intval * 16; + tmp |= RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN; + if (vap->iv_opmode == IEEE80211_M_STA) { + /* + * Local TSF is always updated with remote TSF on beacon + * reception. + */ + tmp |= 1 << RT2860_TSF_SYNC_MODE_SHIFT; + } + else if (vap->iv_opmode == IEEE80211_M_IBSS || + vap->iv_opmode == IEEE80211_M_MBSS) { + tmp |= RT2860_BCN_TX_EN; + /* + * Local TSF is updated with remote TSF on beacon reception + * only if the remote TSF is greater than local TSF. + */ + tmp |= 2 << RT2860_TSF_SYNC_MODE_SHIFT; + } else if (vap->iv_opmode == IEEE80211_M_HOSTAP) { + tmp |= RT2860_BCN_TX_EN; + /* SYNC with nobody */ + tmp |= 3 << RT2860_TSF_SYNC_MODE_SHIFT; + } + + RAL_WRITE(sc, RT2860_BCN_TIME_CFG, tmp); +} Property changes on: projects/nand/sys/dev/ral/rt2860.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/nand/sys/dev/ral/rt2860reg.h =================================================================== --- projects/nand/sys/dev/ral/rt2860reg.h (nonexistent) +++ projects/nand/sys/dev/ral/rt2860reg.h (revision 235263) @@ -0,0 +1,1255 @@ +/*- + * Copyright (c) 2007 Damien Bergamini + * Copyright (c) 2012 Bernhard Schmidt + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $OpenBSD: rt2860reg.h,v 1.30 2010/05/10 18:17:10 damien Exp $ + * $FreeBSD$ + */ + +#define RT2860_NOISE_FLOOR -95 + +/* PCI registers */ +#define RT2860_PCI_CFG 0x0000 +#define RT2860_PCI_EECTRL 0x0004 +#define RT2860_PCI_MCUCTRL 0x0008 +#define RT2860_PCI_SYSCTRL 0x000c +#define RT2860_PCIE_JTAG 0x0010 + +#define RT3090_AUX_CTRL 0x010c + +#define RT3070_OPT_14 0x0114 + +/* SCH/DMA registers */ +#define RT2860_INT_STATUS 0x0200 +#define RT2860_INT_MASK 0x0204 +#define RT2860_WPDMA_GLO_CFG 0x0208 +#define RT2860_WPDMA_RST_IDX 0x020c +#define RT2860_DELAY_INT_CFG 0x0210 +#define RT2860_WMM_AIFSN_CFG 0x0214 +#define RT2860_WMM_CWMIN_CFG 0x0218 +#define RT2860_WMM_CWMAX_CFG 0x021c +#define RT2860_WMM_TXOP0_CFG 0x0220 +#define RT2860_WMM_TXOP1_CFG 0x0224 +#define RT2860_GPIO_CTRL 0x0228 +#define RT2860_MCU_CMD_REG 0x022c +#define RT2860_TX_BASE_PTR(qid) (0x0230 + (qid) * 16) +#define RT2860_TX_MAX_CNT(qid) (0x0234 + (qid) * 16) +#define RT2860_TX_CTX_IDX(qid) (0x0238 + (qid) * 16) +#define RT2860_TX_DTX_IDX(qid) (0x023c + (qid) * 16) +#define RT2860_RX_BASE_PTR 0x0290 +#define RT2860_RX_MAX_CNT 0x0294 +#define RT2860_RX_CALC_IDX 0x0298 +#define RT2860_FS_DRX_IDX 0x029c +#define RT2860_USB_DMA_CFG 0x02a0 /* RT2870 only */ +#define RT2860_US_CYC_CNT 0x02a4 + +/* PBF registers */ +#define RT2860_SYS_CTRL 0x0400 +#define RT2860_HOST_CMD 0x0404 +#define RT2860_PBF_CFG 0x0408 +#define RT2860_MAX_PCNT 0x040c +#define RT2860_BUF_CTRL 0x0410 +#define RT2860_MCU_INT_STA 0x0414 +#define RT2860_MCU_INT_ENA 0x0418 +#define RT2860_TXQ_IO(qid) (0x041c + (qid) * 4) +#define RT2860_RX0Q_IO 0x0424 +#define RT2860_BCN_OFFSET0 0x042c +#define RT2860_BCN_OFFSET1 0x0430 +#define RT2860_TXRXQ_STA 0x0434 +#define RT2860_TXRXQ_PCNT 0x0438 +#define RT2860_PBF_DBG 0x043c +#define RT2860_CAP_CTRL 0x0440 + +/* RT3070 registers */ +#define RT3070_RF_CSR_CFG 0x0500 +#define RT3070_EFUSE_CTRL 0x0580 +#define RT3070_EFUSE_DATA0 0x0590 +#define RT3070_EFUSE_DATA1 0x0594 +#define RT3070_EFUSE_DATA2 0x0598 +#define RT3070_EFUSE_DATA3 0x059c +#define RT3090_OSC_CTRL 0x05a4 +#define RT3070_LDO_CFG0 0x05d4 +#define RT3070_GPIO_SWITCH 0x05dc + +/* MAC registers */ +#define RT2860_ASIC_VER_ID 0x1000 +#define RT2860_MAC_SYS_CTRL 0x1004 +#define RT2860_MAC_ADDR_DW0 0x1008 +#define RT2860_MAC_ADDR_DW1 0x100c +#define RT2860_MAC_BSSID_DW0 0x1010 +#define RT2860_MAC_BSSID_DW1 0x1014 +#define RT2860_MAX_LEN_CFG 0x1018 +#define RT2860_BBP_CSR_CFG 0x101c +#define RT2860_RF_CSR_CFG0 0x1020 +#define RT2860_RF_CSR_CFG1 0x1024 +#define RT2860_RF_CSR_CFG2 0x1028 +#define RT2860_LED_CFG 0x102c + +/* undocumented registers */ +#define RT2860_DEBUG 0x10f4 + +/* MAC Timing control registers */ +#define RT2860_XIFS_TIME_CFG 0x1100 +#define RT2860_BKOFF_SLOT_CFG 0x1104 +#define RT2860_NAV_TIME_CFG 0x1108 +#define RT2860_CH_TIME_CFG 0x110c +#define RT2860_PBF_LIFE_TIMER 0x1110 +#define RT2860_BCN_TIME_CFG 0x1114 +#define RT2860_TBTT_SYNC_CFG 0x1118 +#define RT2860_TSF_TIMER_DW0 0x111c +#define RT2860_TSF_TIMER_DW1 0x1120 +#define RT2860_TBTT_TIMER 0x1124 +#define RT2860_INT_TIMER_CFG 0x1128 +#define RT2860_INT_TIMER_EN 0x112c +#define RT2860_CH_IDLE_TIME 0x1130 + +/* MAC Power Save configuration registers */ +#define RT2860_MAC_STATUS_REG 0x1200 +#define RT2860_PWR_PIN_CFG 0x1204 +#define RT2860_AUTO_WAKEUP_CFG 0x1208 + +/* MAC TX configuration registers */ +#define RT2860_EDCA_AC_CFG(aci) (0x1300 + (aci) * 4) +#define RT2860_EDCA_TID_AC_MAP 0x1310 +#define RT2860_TX_PWR_CFG(ridx) (0x1314 + (ridx) * 4) +#define RT2860_TX_PIN_CFG 0x1328 +#define RT2860_TX_BAND_CFG 0x132c +#define RT2860_TX_SW_CFG0 0x1330 +#define RT2860_TX_SW_CFG1 0x1334 +#define RT2860_TX_SW_CFG2 0x1338 +#define RT2860_TXOP_THRES_CFG 0x133c +#define RT2860_TXOP_CTRL_CFG 0x1340 +#define RT2860_TX_RTS_CFG 0x1344 +#define RT2860_TX_TIMEOUT_CFG 0x1348 +#define RT2860_TX_RTY_CFG 0x134c +#define RT2860_TX_LINK_CFG 0x1350 +#define RT2860_HT_FBK_CFG0 0x1354 +#define RT2860_HT_FBK_CFG1 0x1358 +#define RT2860_LG_FBK_CFG0 0x135c +#define RT2860_LG_FBK_CFG1 0x1360 +#define RT2860_CCK_PROT_CFG 0x1364 +#define RT2860_OFDM_PROT_CFG 0x1368 +#define RT2860_MM20_PROT_CFG 0x136c +#define RT2860_MM40_PROT_CFG 0x1370 +#define RT2860_GF20_PROT_CFG 0x1374 +#define RT2860_GF40_PROT_CFG 0x1378 +#define RT2860_EXP_CTS_TIME 0x137c +#define RT2860_EXP_ACK_TIME 0x1380 + +/* MAC RX configuration registers */ +#define RT2860_RX_FILTR_CFG 0x1400 +#define RT2860_AUTO_RSP_CFG 0x1404 +#define RT2860_LEGACY_BASIC_RATE 0x1408 +#define RT2860_HT_BASIC_RATE 0x140c +#define RT2860_HT_CTRL_CFG 0x1410 +#define RT2860_SIFS_COST_CFG 0x1414 +#define RT2860_RX_PARSER_CFG 0x1418 + +/* MAC Security configuration registers */ +#define RT2860_TX_SEC_CNT0 0x1500 +#define RT2860_RX_SEC_CNT0 0x1504 +#define RT2860_CCMP_FC_MUTE 0x1508 + +/* MAC HCCA/PSMP configuration registers */ +#define RT2860_TXOP_HLDR_ADDR0 0x1600 +#define RT2860_TXOP_HLDR_ADDR1 0x1604 +#define RT2860_TXOP_HLDR_ET 0x1608 +#define RT2860_QOS_CFPOLL_RA_DW0 0x160c +#define RT2860_QOS_CFPOLL_A1_DW1 0x1610 +#define RT2860_QOS_CFPOLL_QC 0x1614 + +/* MAC Statistics Counters */ +#define RT2860_RX_STA_CNT0 0x1700 +#define RT2860_RX_STA_CNT1 0x1704 +#define RT2860_RX_STA_CNT2 0x1708 +#define RT2860_TX_STA_CNT0 0x170c +#define RT2860_TX_STA_CNT1 0x1710 +#define RT2860_TX_STA_CNT2 0x1714 +#define RT2860_TX_STAT_FIFO 0x1718 + +/* RX WCID search table */ +#define RT2860_WCID_ENTRY(wcid) (0x1800 + (wcid) * 8) + +#define RT2860_FW_BASE 0x2000 +#define RT2870_FW_BASE 0x3000 + +/* Pair-wise key table */ +#define RT2860_PKEY(wcid) (0x4000 + (wcid) * 32) + +/* IV/EIV table */ +#define RT2860_IVEIV(wcid) (0x6000 + (wcid) * 8) + +/* WCID attribute table */ +#define RT2860_WCID_ATTR(wcid) (0x6800 + (wcid) * 4) + +/* Shared Key Table */ +#define RT2860_SKEY(vap, kidx) (0x6c00 + (vap) * 128 + (kidx) * 32) + +/* Shared Key Mode */ +#define RT2860_SKEY_MODE_0_7 0x7000 +#define RT2860_SKEY_MODE_8_15 0x7004 +#define RT2860_SKEY_MODE_16_23 0x7008 +#define RT2860_SKEY_MODE_24_31 0x700c + +/* Shared Memory between MCU and host */ +#define RT2860_H2M_MAILBOX 0x7010 +#define RT2860_H2M_MAILBOX_CID 0x7014 +#define RT2860_H2M_MAILBOX_STATUS 0x701c +#define RT2860_H2M_BBPAGENT 0x7028 +#define RT2860_BCN_BASE(vap) (0x7800 + (vap) * 512) + + +/* possible flags for RT2860_PCI_CFG */ +#define RT2860_PCI_CFG_USB (1 << 17) +#define RT2860_PCI_CFG_PCI (1 << 16) + +/* possible flags for register RT2860_PCI_EECTRL */ +#define RT2860_C (1 << 0) +#define RT2860_S (1 << 1) +#define RT2860_D (1 << 2) +#define RT2860_SHIFT_D 2 +#define RT2860_Q (1 << 3) +#define RT2860_SHIFT_Q 3 + +/* possible flags for registers INT_STATUS/INT_MASK */ +#define RT2860_TX_COHERENT (1 << 17) +#define RT2860_RX_COHERENT (1 << 16) +#define RT2860_MAC_INT_4 (1 << 15) +#define RT2860_MAC_INT_3 (1 << 14) +#define RT2860_MAC_INT_2 (1 << 13) +#define RT2860_MAC_INT_1 (1 << 12) +#define RT2860_MAC_INT_0 (1 << 11) +#define RT2860_TX_RX_COHERENT (1 << 10) +#define RT2860_MCU_CMD_INT (1 << 9) +#define RT2860_TX_DONE_INT5 (1 << 8) +#define RT2860_TX_DONE_INT4 (1 << 7) +#define RT2860_TX_DONE_INT3 (1 << 6) +#define RT2860_TX_DONE_INT2 (1 << 5) +#define RT2860_TX_DONE_INT1 (1 << 4) +#define RT2860_TX_DONE_INT0 (1 << 3) +#define RT2860_RX_DONE_INT (1 << 2) +#define RT2860_TX_DLY_INT (1 << 1) +#define RT2860_RX_DLY_INT (1 << 0) + +/* possible flags for register WPDMA_GLO_CFG */ +#define RT2860_HDR_SEG_LEN_SHIFT 8 +#define RT2860_BIG_ENDIAN (1 << 7) +#define RT2860_TX_WB_DDONE (1 << 6) +#define RT2860_WPDMA_BT_SIZE_SHIFT 4 +#define RT2860_WPDMA_BT_SIZE16 0 +#define RT2860_WPDMA_BT_SIZE32 1 +#define RT2860_WPDMA_BT_SIZE64 2 +#define RT2860_WPDMA_BT_SIZE128 3 +#define RT2860_RX_DMA_BUSY (1 << 3) +#define RT2860_RX_DMA_EN (1 << 2) +#define RT2860_TX_DMA_BUSY (1 << 1) +#define RT2860_TX_DMA_EN (1 << 0) + +/* possible flags for register DELAY_INT_CFG */ +#define RT2860_TXDLY_INT_EN (1 << 31) +#define RT2860_TXMAX_PINT_SHIFT 24 +#define RT2860_TXMAX_PTIME_SHIFT 16 +#define RT2860_RXDLY_INT_EN (1 << 15) +#define RT2860_RXMAX_PINT_SHIFT 8 +#define RT2860_RXMAX_PTIME_SHIFT 0 + +/* possible flags for register GPIO_CTRL */ +#define RT2860_GPIO_D_SHIFT 8 +#define RT2860_GPIO_O_SHIFT 0 + +/* possible flags for register USB_DMA_CFG */ +#define RT2860_USB_TX_BUSY (1 << 31) +#define RT2860_USB_RX_BUSY (1 << 30) +#define RT2860_USB_EPOUT_VLD_SHIFT 24 +#define RT2860_USB_TX_EN (1 << 23) +#define RT2860_USB_RX_EN (1 << 22) +#define RT2860_USB_RX_AGG_EN (1 << 21) +#define RT2860_USB_TXOP_HALT (1 << 20) +#define RT2860_USB_TX_CLEAR (1 << 19) +#define RT2860_USB_PHY_WD_EN (1 << 16) +#define RT2860_USB_PHY_MAN_RST (1 << 15) +#define RT2860_USB_RX_AGG_LMT(x) ((x) << 8) /* in unit of 1KB */ +#define RT2860_USB_RX_AGG_TO(x) ((x) & 0xff) /* in unit of 33ns */ + +/* possible flags for register US_CYC_CNT */ +#define RT2860_TEST_EN (1 << 24) +#define RT2860_TEST_SEL_SHIFT 16 +#define RT2860_BT_MODE_EN (1 << 8) +#define RT2860_US_CYC_CNT_SHIFT 0 + +/* possible flags for register SYS_CTRL */ +#define RT2860_HST_PM_SEL (1 << 16) +#define RT2860_CAP_MODE (1 << 14) +#define RT2860_PME_OEN (1 << 13) +#define RT2860_CLKSELECT (1 << 12) +#define RT2860_PBF_CLK_EN (1 << 11) +#define RT2860_MAC_CLK_EN (1 << 10) +#define RT2860_DMA_CLK_EN (1 << 9) +#define RT2860_MCU_READY (1 << 7) +#define RT2860_ASY_RESET (1 << 4) +#define RT2860_PBF_RESET (1 << 3) +#define RT2860_MAC_RESET (1 << 2) +#define RT2860_DMA_RESET (1 << 1) +#define RT2860_MCU_RESET (1 << 0) + +/* possible values for register HOST_CMD */ +#define RT2860_MCU_CMD_SLEEP 0x30 +#define RT2860_MCU_CMD_WAKEUP 0x31 +#define RT2860_MCU_CMD_LEDS 0x50 +#define RT2860_MCU_CMD_LED_RSSI 0x51 +#define RT2860_MCU_CMD_LED1 0x52 +#define RT2860_MCU_CMD_LED2 0x53 +#define RT2860_MCU_CMD_LED3 0x54 +#define RT2860_MCU_CMD_RFRESET 0x72 +#define RT2860_MCU_CMD_ANTSEL 0x73 +#define RT2860_MCU_CMD_BBP 0x80 +#define RT2860_MCU_CMD_PSLEVEL 0x83 + +/* possible flags for register PBF_CFG */ +#define RT2860_TX1Q_NUM_SHIFT 21 +#define RT2860_TX2Q_NUM_SHIFT 16 +#define RT2860_NULL0_MODE (1 << 15) +#define RT2860_NULL1_MODE (1 << 14) +#define RT2860_RX_DROP_MODE (1 << 13) +#define RT2860_TX0Q_MANUAL (1 << 12) +#define RT2860_TX1Q_MANUAL (1 << 11) +#define RT2860_TX2Q_MANUAL (1 << 10) +#define RT2860_RX0Q_MANUAL (1 << 9) +#define RT2860_HCCA_EN (1 << 8) +#define RT2860_TX0Q_EN (1 << 4) +#define RT2860_TX1Q_EN (1 << 3) +#define RT2860_TX2Q_EN (1 << 2) +#define RT2860_RX0Q_EN (1 << 1) + +/* possible flags for register BUF_CTRL */ +#define RT2860_WRITE_TXQ(qid) (1 << (11 - (qid))) +#define RT2860_NULL0_KICK (1 << 7) +#define RT2860_NULL1_KICK (1 << 6) +#define RT2860_BUF_RESET (1 << 5) +#define RT2860_READ_TXQ(qid) (1 << (3 - (qid)) +#define RT2860_READ_RX0Q (1 << 0) + +/* possible flags for registers MCU_INT_STA/MCU_INT_ENA */ +#define RT2860_MCU_MAC_INT_8 (1 << 24) +#define RT2860_MCU_MAC_INT_7 (1 << 23) +#define RT2860_MCU_MAC_INT_6 (1 << 22) +#define RT2860_MCU_MAC_INT_4 (1 << 20) +#define RT2860_MCU_MAC_INT_3 (1 << 19) +#define RT2860_MCU_MAC_INT_2 (1 << 18) +#define RT2860_MCU_MAC_INT_1 (1 << 17) +#define RT2860_MCU_MAC_INT_0 (1 << 16) +#define RT2860_DTX0_INT (1 << 11) +#define RT2860_DTX1_INT (1 << 10) +#define RT2860_DTX2_INT (1 << 9) +#define RT2860_DRX0_INT (1 << 8) +#define RT2860_HCMD_INT (1 << 7) +#define RT2860_N0TX_INT (1 << 6) +#define RT2860_N1TX_INT (1 << 5) +#define RT2860_BCNTX_INT (1 << 4) +#define RT2860_MTX0_INT (1 << 3) +#define RT2860_MTX1_INT (1 << 2) +#define RT2860_MTX2_INT (1 << 1) +#define RT2860_MRX0_INT (1 << 0) + +/* possible flags for register TXRXQ_PCNT */ +#define RT2860_RX0Q_PCNT_MASK 0xff000000 +#define RT2860_TX2Q_PCNT_MASK 0x00ff0000 +#define RT2860_TX1Q_PCNT_MASK 0x0000ff00 +#define RT2860_TX0Q_PCNT_MASK 0x000000ff + +/* possible flags for register CAP_CTRL */ +#define RT2860_CAP_ADC_FEQ (1 << 31) +#define RT2860_CAP_START (1 << 30) +#define RT2860_MAN_TRIG (1 << 29) +#define RT2860_TRIG_OFFSET_SHIFT 16 +#define RT2860_START_ADDR_SHIFT 0 + +/* possible flags for register RF_CSR_CFG */ +#define RT3070_RF_KICK (1 << 17) +#define RT3070_RF_WRITE (1 << 16) + +/* possible flags for register EFUSE_CTRL */ +#define RT3070_SEL_EFUSE (1 << 31) +#define RT3070_EFSROM_KICK (1 << 30) +#define RT3070_EFSROM_AIN_MASK 0x03ff0000 +#define RT3070_EFSROM_AIN_SHIFT 16 +#define RT3070_EFSROM_MODE_MASK 0x000000c0 +#define RT3070_EFUSE_AOUT_MASK 0x0000003f + +/* possible flags for register MAC_SYS_CTRL */ +#define RT2860_RX_TS_EN (1 << 7) +#define RT2860_WLAN_HALT_EN (1 << 6) +#define RT2860_PBF_LOOP_EN (1 << 5) +#define RT2860_CONT_TX_TEST (1 << 4) +#define RT2860_MAC_RX_EN (1 << 3) +#define RT2860_MAC_TX_EN (1 << 2) +#define RT2860_BBP_HRST (1 << 1) +#define RT2860_MAC_SRST (1 << 0) + +/* possible flags for register MAC_BSSID_DW1 */ +#define RT2860_MULTI_BCN_NUM_SHIFT 18 +#define RT2860_MULTI_BSSID_MODE_SHIFT 16 + +/* possible flags for register MAX_LEN_CFG */ +#define RT2860_MIN_MPDU_LEN_SHIFT 16 +#define RT2860_MAX_PSDU_LEN_SHIFT 12 +#define RT2860_MAX_PSDU_LEN8K 0 +#define RT2860_MAX_PSDU_LEN16K 1 +#define RT2860_MAX_PSDU_LEN32K 2 +#define RT2860_MAX_PSDU_LEN64K 3 +#define RT2860_MAX_MPDU_LEN_SHIFT 0 + +/* possible flags for registers BBP_CSR_CFG/H2M_BBPAGENT */ +#define RT2860_BBP_RW_PARALLEL (1 << 19) +#define RT2860_BBP_PAR_DUR_112_5 (1 << 18) +#define RT2860_BBP_CSR_KICK (1 << 17) +#define RT2860_BBP_CSR_READ (1 << 16) +#define RT2860_BBP_ADDR_SHIFT 8 +#define RT2860_BBP_DATA_SHIFT 0 + +/* possible flags for register RF_CSR_CFG0 */ +#define RT2860_RF_REG_CTRL (1 << 31) +#define RT2860_RF_LE_SEL1 (1 << 30) +#define RT2860_RF_LE_STBY (1 << 29) +#define RT2860_RF_REG_WIDTH_SHIFT 24 +#define RT2860_RF_REG_0_SHIFT 0 + +/* possible flags for register RF_CSR_CFG1 */ +#define RT2860_RF_DUR_5 (1 << 24) +#define RT2860_RF_REG_1_SHIFT 0 + +/* possible flags for register LED_CFG */ +#define RT2860_LED_POL (1 << 30) +#define RT2860_Y_LED_MODE_SHIFT 28 +#define RT2860_G_LED_MODE_SHIFT 26 +#define RT2860_R_LED_MODE_SHIFT 24 +#define RT2860_LED_MODE_OFF 0 +#define RT2860_LED_MODE_BLINK_TX 1 +#define RT2860_LED_MODE_SLOW_BLINK 2 +#define RT2860_LED_MODE_ON 3 +#define RT2860_SLOW_BLK_TIME_SHIFT 16 +#define RT2860_LED_OFF_TIME_SHIFT 8 +#define RT2860_LED_ON_TIME_SHIFT 0 + +/* possible flags for register XIFS_TIME_CFG */ +#define RT2860_BB_RXEND_EN (1 << 29) +#define RT2860_EIFS_TIME_SHIFT 20 +#define RT2860_OFDM_XIFS_TIME_SHIFT 16 +#define RT2860_OFDM_SIFS_TIME_SHIFT 8 +#define RT2860_CCK_SIFS_TIME_SHIFT 0 + +/* possible flags for register BKOFF_SLOT_CFG */ +#define RT2860_CC_DELAY_TIME_SHIFT 8 +#define RT2860_SLOT_TIME 0 + +/* possible flags for register NAV_TIME_CFG */ +#define RT2860_NAV_UPD (1 << 31) +#define RT2860_NAV_UPD_VAL_SHIFT 16 +#define RT2860_NAV_CLR_EN (1 << 15) +#define RT2860_NAV_TIMER_SHIFT 0 + +/* possible flags for register CH_TIME_CFG */ +#define RT2860_EIFS_AS_CH_BUSY (1 << 4) +#define RT2860_NAV_AS_CH_BUSY (1 << 3) +#define RT2860_RX_AS_CH_BUSY (1 << 2) +#define RT2860_TX_AS_CH_BUSY (1 << 1) +#define RT2860_CH_STA_TIMER_EN (1 << 0) + +/* possible values for register BCN_TIME_CFG */ +#define RT2860_TSF_INS_COMP_SHIFT 24 +#define RT2860_BCN_TX_EN (1 << 20) +#define RT2860_TBTT_TIMER_EN (1 << 19) +#define RT2860_TSF_SYNC_MODE_SHIFT 17 +#define RT2860_TSF_SYNC_MODE_DIS 0 +#define RT2860_TSF_SYNC_MODE_STA 1 +#define RT2860_TSF_SYNC_MODE_IBSS 2 +#define RT2860_TSF_SYNC_MODE_HOSTAP 3 +#define RT2860_TSF_TIMER_EN (1 << 16) +#define RT2860_BCN_INTVAL_SHIFT 0 + +/* possible flags for register TBTT_SYNC_CFG */ +#define RT2860_BCN_CWMIN_SHIFT 20 +#define RT2860_BCN_AIFSN_SHIFT 16 +#define RT2860_BCN_EXP_WIN_SHIFT 8 +#define RT2860_TBTT_ADJUST_SHIFT 0 + +/* possible flags for register INT_TIMER_CFG */ +#define RT2860_GP_TIMER_SHIFT 16 +#define RT2860_PRE_TBTT_TIMER_SHIFT 0 + +/* possible flags for register INT_TIMER_EN */ +#define RT2860_GP_TIMER_EN (1 << 1) +#define RT2860_PRE_TBTT_INT_EN (1 << 0) + +/* possible flags for register MAC_STATUS_REG */ +#define RT2860_RX_STATUS_BUSY (1 << 1) +#define RT2860_TX_STATUS_BUSY (1 << 0) + +/* possible flags for register PWR_PIN_CFG */ +#define RT2860_IO_ADDA_PD (1 << 3) +#define RT2860_IO_PLL_PD (1 << 2) +#define RT2860_IO_RA_PE (1 << 1) +#define RT2860_IO_RF_PE (1 << 0) + +/* possible flags for register AUTO_WAKEUP_CFG */ +#define RT2860_AUTO_WAKEUP_EN (1 << 15) +#define RT2860_SLEEP_TBTT_NUM_SHIFT 8 +#define RT2860_WAKEUP_LEAD_TIME_SHIFT 0 + +/* possible flags for register TX_PIN_CFG */ +#define RT3593_LNA_PE_G2_POL (1 << 31) +#define RT3593_LNA_PE_A2_POL (1 << 30) +#define RT3593_LNA_PE_G2_EN (1 << 29) +#define RT3593_LNA_PE_A2_EN (1 << 28) +#define RT3593_LNA_PE2_EN (RT3593_LNA_PE_A2_EN | RT3593_LNA_PE_G2_EN) +#define RT3593_PA_PE_G2_POL (1 << 27) +#define RT3593_PA_PE_A2_POL (1 << 26) +#define RT3593_PA_PE_G2_EN (1 << 25) +#define RT3593_PA_PE_A2_EN (1 << 24) +#define RT2860_TRSW_POL (1 << 19) +#define RT2860_TRSW_EN (1 << 18) +#define RT2860_RFTR_POL (1 << 17) +#define RT2860_RFTR_EN (1 << 16) +#define RT2860_LNA_PE_G1_POL (1 << 15) +#define RT2860_LNA_PE_A1_POL (1 << 14) +#define RT2860_LNA_PE_G0_POL (1 << 13) +#define RT2860_LNA_PE_A0_POL (1 << 12) +#define RT2860_LNA_PE_G1_EN (1 << 11) +#define RT2860_LNA_PE_A1_EN (1 << 10) +#define RT2860_LNA_PE1_EN (RT2860_LNA_PE_A1_EN | RT2860_LNA_PE_G1_EN) +#define RT2860_LNA_PE_G0_EN (1 << 9) +#define RT2860_LNA_PE_A0_EN (1 << 8) +#define RT2860_LNA_PE0_EN (RT2860_LNA_PE_A0_EN | RT2860_LNA_PE_G0_EN) +#define RT2860_PA_PE_G1_POL (1 << 7) +#define RT2860_PA_PE_A1_POL (1 << 6) +#define RT2860_PA_PE_G0_POL (1 << 5) +#define RT2860_PA_PE_A0_POL (1 << 4) +#define RT2860_PA_PE_G1_EN (1 << 3) +#define RT2860_PA_PE_A1_EN (1 << 2) +#define RT2860_PA_PE_G0_EN (1 << 1) +#define RT2860_PA_PE_A0_EN (1 << 0) + +/* possible flags for register TX_BAND_CFG */ +#define RT2860_5G_BAND_SEL_N (1 << 2) +#define RT2860_5G_BAND_SEL_P (1 << 1) +#define RT2860_TX_BAND_SEL (1 << 0) + +/* possible flags for register TX_SW_CFG0 */ +#define RT2860_DLY_RFTR_EN_SHIFT 24 +#define RT2860_DLY_TRSW_EN_SHIFT 16 +#define RT2860_DLY_PAPE_EN_SHIFT 8 +#define RT2860_DLY_TXPE_EN_SHIFT 0 + +/* possible flags for register TX_SW_CFG1 */ +#define RT2860_DLY_RFTR_DIS_SHIFT 16 +#define RT2860_DLY_TRSW_DIS_SHIFT 8 +#define RT2860_DLY_PAPE_DIS SHIFT 0 + +/* possible flags for register TX_SW_CFG2 */ +#define RT2860_DLY_LNA_EN_SHIFT 24 +#define RT2860_DLY_LNA_DIS_SHIFT 16 +#define RT2860_DLY_DAC_EN_SHIFT 8 +#define RT2860_DLY_DAC_DIS_SHIFT 0 + +/* possible flags for register TXOP_THRES_CFG */ +#define RT2860_TXOP_REM_THRES_SHIFT 24 +#define RT2860_CF_END_THRES_SHIFT 16 +#define RT2860_RDG_IN_THRES 8 +#define RT2860_RDG_OUT_THRES 0 + +/* possible flags for register TXOP_CTRL_CFG */ +#define RT2860_EXT_CW_MIN_SHIFT 16 +#define RT2860_EXT_CCA_DLY_SHIFT 8 +#define RT2860_EXT_CCA_EN (1 << 7) +#define RT2860_LSIG_TXOP_EN (1 << 6) +#define RT2860_TXOP_TRUN_EN_MIMOPS (1 << 4) +#define RT2860_TXOP_TRUN_EN_TXOP (1 << 3) +#define RT2860_TXOP_TRUN_EN_RATE (1 << 2) +#define RT2860_TXOP_TRUN_EN_AC (1 << 1) +#define RT2860_TXOP_TRUN_EN_TIMEOUT (1 << 0) + +/* possible flags for register TX_RTS_CFG */ +#define RT2860_RTS_FBK_EN (1 << 24) +#define RT2860_RTS_THRES_SHIFT 8 +#define RT2860_RTS_RTY_LIMIT_SHIFT 0 + +/* possible flags for register TX_TIMEOUT_CFG */ +#define RT2860_TXOP_TIMEOUT_SHIFT 16 +#define RT2860_RX_ACK_TIMEOUT_SHIFT 8 +#define RT2860_MPDU_LIFE_TIME_SHIFT 4 + +/* possible flags for register TX_RTY_CFG */ +#define RT2860_TX_AUTOFB_EN (1 << 30) +#define RT2860_AGG_RTY_MODE_TIMER (1 << 29) +#define RT2860_NAG_RTY_MODE_TIMER (1 << 28) +#define RT2860_LONG_RTY_THRES_SHIFT 16 +#define RT2860_LONG_RTY_LIMIT_SHIFT 8 +#define RT2860_SHORT_RTY_LIMIT_SHIFT 0 + +/* possible flags for register TX_LINK_CFG */ +#define RT2860_REMOTE_MFS_SHIFT 24 +#define RT2860_REMOTE_MFB_SHIFT 16 +#define RT2860_TX_CFACK_EN (1 << 12) +#define RT2860_TX_RDG_EN (1 << 11) +#define RT2860_TX_MRQ_EN (1 << 10) +#define RT2860_REMOTE_UMFS_EN (1 << 9) +#define RT2860_TX_MFB_EN (1 << 8) +#define RT2860_REMOTE_MFB_LT_SHIFT 0 + +/* possible flags for registers *_PROT_CFG */ +#define RT2860_RTSTH_EN (1 << 26) +#define RT2860_TXOP_ALLOW_GF40 (1 << 25) +#define RT2860_TXOP_ALLOW_GF20 (1 << 24) +#define RT2860_TXOP_ALLOW_MM40 (1 << 23) +#define RT2860_TXOP_ALLOW_MM20 (1 << 22) +#define RT2860_TXOP_ALLOW_OFDM (1 << 21) +#define RT2860_TXOP_ALLOW_CCK (1 << 20) +#define RT2860_TXOP_ALLOW_ALL (0x3f << 20) +#define RT2860_PROT_NAV_SHORT (1 << 18) +#define RT2860_PROT_NAV_LONG (2 << 18) +#define RT2860_PROT_CTRL_RTS_CTS (1 << 16) +#define RT2860_PROT_CTRL_CTS (2 << 16) + +/* possible flags for registers EXP_{CTS,ACK}_TIME */ +#define RT2860_EXP_OFDM_TIME_SHIFT 16 +#define RT2860_EXP_CCK_TIME_SHIFT 0 + +/* possible flags for register RX_FILTR_CFG */ +#define RT2860_DROP_CTRL_RSV (1 << 16) +#define RT2860_DROP_BAR (1 << 15) +#define RT2860_DROP_BA (1 << 14) +#define RT2860_DROP_PSPOLL (1 << 13) +#define RT2860_DROP_RTS (1 << 12) +#define RT2860_DROP_CTS (1 << 11) +#define RT2860_DROP_ACK (1 << 10) +#define RT2860_DROP_CFEND (1 << 9) +#define RT2860_DROP_CFACK (1 << 8) +#define RT2860_DROP_DUPL (1 << 7) +#define RT2860_DROP_BC (1 << 6) +#define RT2860_DROP_MC (1 << 5) +#define RT2860_DROP_VER_ERR (1 << 4) +#define RT2860_DROP_NOT_MYBSS (1 << 3) +#define RT2860_DROP_UC_NOME (1 << 2) +#define RT2860_DROP_PHY_ERR (1 << 1) +#define RT2860_DROP_CRC_ERR (1 << 0) + +/* possible flags for register AUTO_RSP_CFG */ +#define RT2860_CTRL_PWR_BIT (1 << 7) +#define RT2860_BAC_ACK_POLICY (1 << 6) +#define RT2860_CCK_SHORT_EN (1 << 4) +#define RT2860_CTS_40M_REF_EN (1 << 3) +#define RT2860_CTS_40M_MODE_EN (1 << 2) +#define RT2860_BAC_ACKPOLICY_EN (1 << 1) +#define RT2860_AUTO_RSP_EN (1 << 0) + +/* possible flags for register SIFS_COST_CFG */ +#define RT2860_OFDM_SIFS_COST_SHIFT 8 +#define RT2860_CCK_SIFS_COST_SHIFT 0 + +/* possible flags for register TXOP_HLDR_ET */ +#define RT2860_TXOP_ETM1_EN (1 << 25) +#define RT2860_TXOP_ETM0_EN (1 << 24) +#define RT2860_TXOP_ETM_THRES_SHIFT 16 +#define RT2860_TXOP_ETO_EN (1 << 8) +#define RT2860_TXOP_ETO_THRES_SHIFT 1 +#define RT2860_PER_RX_RST_EN (1 << 0) + +/* possible flags for register TX_STAT_FIFO */ +#define RT2860_TXQ_MCS_SHIFT 16 +#define RT2860_TXQ_WCID_SHIFT 8 +#define RT2860_TXQ_ACKREQ (1 << 7) +#define RT2860_TXQ_AGG (1 << 6) +#define RT2860_TXQ_OK (1 << 5) +#define RT2860_TXQ_PID_SHIFT 1 +#define RT2860_TXQ_VLD (1 << 0) + +/* possible flags for register WCID_ATTR */ +#define RT2860_MODE_NOSEC 0 +#define RT2860_MODE_WEP40 1 +#define RT2860_MODE_WEP104 2 +#define RT2860_MODE_TKIP 3 +#define RT2860_MODE_AES_CCMP 4 +#define RT2860_MODE_CKIP40 5 +#define RT2860_MODE_CKIP104 6 +#define RT2860_MODE_CKIP128 7 +#define RT2860_RX_PKEY_EN (1 << 0) + +/* possible flags for register H2M_MAILBOX */ +#define RT2860_H2M_BUSY (1 << 24) +#define RT2860_TOKEN_NO_INTR 0xff + + +/* possible flags for MCU command RT2860_MCU_CMD_LEDS */ +#define RT2860_LED_RADIO (1 << 13) +#define RT2860_LED_LINK_2GHZ (1 << 14) +#define RT2860_LED_LINK_5GHZ (1 << 15) + + +/* possible flags for RT3020 RF register 1 */ +#define RT3070_RF_BLOCK (1 << 0) +#define RT3070_RX0_PD (1 << 2) +#define RT3070_TX0_PD (1 << 3) +#define RT3070_RX1_PD (1 << 4) +#define RT3070_TX1_PD (1 << 5) +#define RT3070_RX2_PD (1 << 6) +#define RT3070_TX2_PD (1 << 7) + +/* possible flags for RT3020 RF register 7 */ +#define RT3070_TUNE (1 << 0) + +/* possible flags for RT3020 RF register 15 */ +#define RT3070_TX_LO2 (1 << 3) + +/* possible flags for RT3020 RF register 17 */ +#define RT3070_TX_LO1 (1 << 3) + +/* possible flags for RT3020 RF register 20 */ +#define RT3070_RX_LO1 (1 << 3) + +/* possible flags for RT3020 RF register 21 */ +#define RT3070_RX_LO2 (1 << 3) +#define RT3070_RX_CTB (1 << 7) + +/* possible flags for RT3020 RF register 22 */ +#define RT3070_BB_LOOPBACK (1 << 0) + +/* possible flags for RT3053 RF register 1 */ +#define RT3593_VCO (1 << 0) + +/* possible flags for RT3053 RF register 2 */ +#define RT3593_RESCAL (1 << 7) + +/* possible flags for RT3053 RF register 3 */ +#define RT3593_VCOCAL (1 << 7) + +/* possible flags for RT3053 RF register 6 */ +#define RT3593_VCO_IC (1 << 6) + +/* possible flags for RT3053 RF register 20 */ +#define RT3593_LDO_PLL_VC_MASK 0x0e +#define RT3593_LDO_RF_VC_MASK 0xe0 + +/* possible flags for RT3053 RF register 22 */ +#define RT3593_CP_IC_MASK 0xe0 +#define RT3593_CP_IC_SHIFT 5 + +/* possible flags for RT3053 RF register 46 */ +#define RT3593_RX_CTB (1 << 5) + +#define RT3090_DEF_LNA 10 + +/* RT2860 TX descriptor */ +struct rt2860_txd { + uint32_t sdp0; /* Segment Data Pointer 0 */ + uint16_t sdl1; /* Segment Data Length 1 */ +#define RT2860_TX_BURST (1 << 15) +#define RT2860_TX_LS1 (1 << 14) /* SDP1 is the last segment */ + + uint16_t sdl0; /* Segment Data Length 0 */ +#define RT2860_TX_DDONE (1 << 15) +#define RT2860_TX_LS0 (1 << 14) /* SDP0 is the last segment */ + + uint32_t sdp1; /* Segment Data Pointer 1 */ + uint8_t reserved[3]; + uint8_t flags; +#define RT2860_TX_QSEL_SHIFT 1 +#define RT2860_TX_QSEL_MGMT (0 << 1) +#define RT2860_TX_QSEL_HCCA (1 << 1) +#define RT2860_TX_QSEL_EDCA (2 << 1) +#define RT2860_TX_WIV (1 << 0) +} __packed; + +/* RT2870 TX descriptor */ +struct rt2870_txd { + uint16_t len; + uint8_t pad; + uint8_t flags; +} __packed; + +/* TX Wireless Information */ +struct rt2860_txwi { + uint8_t flags; +#define RT2860_TX_MPDU_DSITY_SHIFT 5 +#define RT2860_TX_AMPDU (1 << 4) +#define RT2860_TX_TS (1 << 3) +#define RT2860_TX_CFACK (1 << 2) +#define RT2860_TX_MMPS (1 << 1) +#define RT2860_TX_FRAG (1 << 0) + + uint8_t txop; +#define RT2860_TX_TXOP_HT 0 +#define RT2860_TX_TXOP_PIFS 1 +#define RT2860_TX_TXOP_SIFS 2 +#define RT2860_TX_TXOP_BACKOFF 3 + + uint16_t phy; +#define RT2860_PHY_MODE 0xc000 +#define RT2860_PHY_CCK (0 << 14) +#define RT2860_PHY_OFDM (1 << 14) +#define RT2860_PHY_HT (2 << 14) +#define RT2860_PHY_HT_GF (3 << 14) +#define RT2860_PHY_SGI (1 << 8) +#define RT2860_PHY_BW40 (1 << 7) +#define RT2860_PHY_MCS 0x7f +#define RT2860_PHY_SHPRE (1 << 3) + + uint8_t xflags; +#define RT2860_TX_BAWINSIZE_SHIFT 2 +#define RT2860_TX_NSEQ (1 << 1) +#define RT2860_TX_ACK (1 << 0) + + uint8_t wcid; /* Wireless Client ID */ + uint16_t len; +#define RT2860_TX_PID_SHIFT 12 + + uint32_t iv; + uint32_t eiv; +} __packed; + +/* RT2860 RX descriptor */ +struct rt2860_rxd { + uint32_t sdp0; + uint16_t sdl1; /* unused */ + uint16_t sdl0; +#define RT2860_RX_DDONE (1 << 15) +#define RT2860_RX_LS0 (1 << 14) + + uint32_t sdp1; /* unused */ + uint32_t flags; +#define RT2860_RX_DEC (1 << 16) +#define RT2860_RX_AMPDU (1 << 15) +#define RT2860_RX_L2PAD (1 << 14) +#define RT2860_RX_RSSI (1 << 13) +#define RT2860_RX_HTC (1 << 12) +#define RT2860_RX_AMSDU (1 << 11) +#define RT2860_RX_MICERR (1 << 10) +#define RT2860_RX_ICVERR (1 << 9) +#define RT2860_RX_CRCERR (1 << 8) +#define RT2860_RX_MYBSS (1 << 7) +#define RT2860_RX_BC (1 << 6) +#define RT2860_RX_MC (1 << 5) +#define RT2860_RX_UC2ME (1 << 4) +#define RT2860_RX_FRAG (1 << 3) +#define RT2860_RX_NULL (1 << 2) +#define RT2860_RX_DATA (1 << 1) +#define RT2860_RX_BA (1 << 0) +} __packed; + +/* RT2870 RX descriptor */ +struct rt2870_rxd { + /* single 32-bit field */ + uint32_t flags; +} __packed; + +/* RX Wireless Information */ +struct rt2860_rxwi { + uint8_t wcid; + uint8_t keyidx; +#define RT2860_RX_UDF_SHIFT 5 +#define RT2860_RX_BSS_IDX_SHIFT 2 + + uint16_t len; +#define RT2860_RX_TID_SHIFT 12 + + uint16_t seq; + uint16_t phy; + uint8_t rssi[3]; + uint8_t reserved1; + uint8_t snr[2]; + uint16_t reserved2; +} __packed; + + +/* first DMA segment contains TXWI + 802.11 header + 32-bit padding */ +#define RT2860_TXWI_DMASZ \ + (sizeof (struct rt2860_txwi) + \ + sizeof (struct ieee80211_frame) + 6 + \ + sizeof (uint16_t)) + +#define RT2860_RF1 0 +#define RT2860_RF2 2 +#define RT2860_RF3 1 +#define RT2860_RF4 3 + +#define RT2860_RF_2820 1 /* 2T3R */ +#define RT2860_RF_2850 2 /* dual-band 2T3R */ +#define RT2860_RF_2720 3 /* 1T2R */ +#define RT2860_RF_2750 4 /* dual-band 1T2R */ +#define RT3070_RF_3020 5 /* 1T1R */ +#define RT3070_RF_2020 6 /* b/g */ +#define RT3070_RF_3021 7 /* 1T2R */ +#define RT3070_RF_3022 8 /* 2T2R */ +#define RT3070_RF_3052 9 /* dual-band 2T2R */ +#define RT3070_RF_3320 11 /* 1T1R */ +#define RT3070_RF_3053 13 /* dual-band 3T3R */ + +/* USB commands for RT2870 only */ +#define RT2870_RESET 1 +#define RT2870_WRITE_2 2 +#define RT2870_WRITE_REGION_1 6 +#define RT2870_READ_REGION_1 7 +#define RT2870_EEPROM_READ 9 + +#define RT2860_EEPROM_DELAY 1 /* minimum hold time (microsecond) */ + +#define RT2860_EEPROM_VERSION 0x01 +#define RT2860_EEPROM_MAC01 0x02 +#define RT2860_EEPROM_MAC23 0x03 +#define RT2860_EEPROM_MAC45 0x04 +#define RT2860_EEPROM_PCIE_PSLEVEL 0x11 +#define RT2860_EEPROM_REV 0x12 +#define RT2860_EEPROM_ANTENNA 0x1a +#define RT2860_EEPROM_CONFIG 0x1b +#define RT2860_EEPROM_COUNTRY 0x1c +#define RT2860_EEPROM_FREQ_LEDS 0x1d +#define RT2860_EEPROM_LED1 0x1e +#define RT2860_EEPROM_LED2 0x1f +#define RT2860_EEPROM_LED3 0x20 +#define RT2860_EEPROM_LNA 0x22 +#define RT2860_EEPROM_RSSI1_2GHZ 0x23 +#define RT2860_EEPROM_RSSI2_2GHZ 0x24 +#define RT2860_EEPROM_RSSI1_5GHZ 0x25 +#define RT2860_EEPROM_RSSI2_5GHZ 0x26 +#define RT2860_EEPROM_DELTAPWR 0x28 +#define RT2860_EEPROM_PWR2GHZ_BASE1 0x29 +#define RT2860_EEPROM_PWR2GHZ_BASE2 0x30 +#define RT2860_EEPROM_TSSI1_2GHZ 0x37 +#define RT2860_EEPROM_TSSI2_2GHZ 0x38 +#define RT2860_EEPROM_TSSI3_2GHZ 0x39 +#define RT2860_EEPROM_TSSI4_2GHZ 0x3a +#define RT2860_EEPROM_TSSI5_2GHZ 0x3b +#define RT2860_EEPROM_PWR5GHZ_BASE1 0x3c +#define RT2860_EEPROM_PWR5GHZ_BASE2 0x53 +#define RT2860_EEPROM_TSSI1_5GHZ 0x6a +#define RT2860_EEPROM_TSSI2_5GHZ 0x6b +#define RT2860_EEPROM_TSSI3_5GHZ 0x6c +#define RT2860_EEPROM_TSSI4_5GHZ 0x6d +#define RT2860_EEPROM_TSSI5_5GHZ 0x6e +#define RT2860_EEPROM_RPWR 0x6f +#define RT2860_EEPROM_BBP_BASE 0x78 +#define RT3071_EEPROM_RF_BASE 0x82 + +#define RT2860_RIDX_CCK1 0 +#define RT2860_RIDX_CCK11 3 +#define RT2860_RIDX_OFDM6 4 +#define RT2860_RIDX_MAX 11 +static const struct rt2860_rate { + uint8_t rate; + uint8_t mcs; + enum ieee80211_phytype phy; + uint8_t ctl_ridx; + uint16_t sp_ack_dur; + uint16_t lp_ack_dur; +} rt2860_rates[] = { + { 2, 0, IEEE80211_T_DS, 0, 314, 314 }, + { 4, 1, IEEE80211_T_DS, 1, 258, 162 }, + { 11, 2, IEEE80211_T_DS, 2, 223, 127 }, + { 22, 3, IEEE80211_T_DS, 3, 213, 117 }, + { 12, 0, IEEE80211_T_OFDM, 4, 60, 60 }, + { 18, 1, IEEE80211_T_OFDM, 4, 52, 52 }, + { 24, 2, IEEE80211_T_OFDM, 6, 48, 48 }, + { 36, 3, IEEE80211_T_OFDM, 6, 44, 44 }, + { 48, 4, IEEE80211_T_OFDM, 8, 44, 44 }, + { 72, 5, IEEE80211_T_OFDM, 8, 40, 40 }, + { 96, 6, IEEE80211_T_OFDM, 8, 40, 40 }, + { 108, 7, IEEE80211_T_OFDM, 8, 40, 40 } +}; + +/* + * Control and status registers access macros. + */ +#define RAL_READ(sc, reg) \ + bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) + +#define RAL_WRITE(sc, reg, val) \ + bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) + +#define RAL_BARRIER_WRITE(sc) \ + bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, 0x1800, \ + BUS_SPACE_BARRIER_WRITE) + +#define RAL_BARRIER_READ_WRITE(sc) \ + bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, 0x1800, \ + BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE) + +#define RAL_WRITE_REGION_1(sc, offset, datap, count) \ + bus_space_write_region_1((sc)->sc_st, (sc)->sc_sh, (offset), \ + (datap), (count)) + +#define RAL_SET_REGION_4(sc, offset, val, count) \ + bus_space_set_region_4((sc)->sc_st, (sc)->sc_sh, (offset), \ + (val), (count)) + +/* + * EEPROM access macro. + */ +#define RT2860_EEPROM_CTL(sc, val) do { \ + RAL_WRITE((sc), RT2860_PCI_EECTRL, (val)); \ + RAL_BARRIER_READ_WRITE((sc)); \ + DELAY(RT2860_EEPROM_DELAY); \ +} while (/* CONSTCOND */0) + +/* + * Default values for MAC registers; values taken from the reference driver. + */ +#define RT2860_DEF_MAC \ + { RT2860_BCN_OFFSET0, 0xf8f0e8e0 }, \ + { RT2860_LEGACY_BASIC_RATE, 0x0000013f }, \ + { RT2860_HT_BASIC_RATE, 0x00008003 }, \ + { RT2860_MAC_SYS_CTRL, 0x00000000 }, \ + { RT2860_BKOFF_SLOT_CFG, 0x00000209 }, \ + { RT2860_TX_SW_CFG0, 0x00000000 }, \ + { RT2860_TX_SW_CFG1, 0x00080606 }, \ + { RT2860_TX_LINK_CFG, 0x00001020 }, \ + { RT2860_TX_TIMEOUT_CFG, 0x000a2090 }, \ + { RT2860_LED_CFG, 0x7f031e46 }, \ + { RT2860_WMM_AIFSN_CFG, 0x00002273 }, \ + { RT2860_WMM_CWMIN_CFG, 0x00002344 }, \ + { RT2860_WMM_CWMAX_CFG, 0x000034aa }, \ + { RT2860_MAX_PCNT, 0x1f3fbf9f }, \ + { RT2860_TX_RTY_CFG, 0x47d01f0f }, \ + { RT2860_AUTO_RSP_CFG, 0x00000013 }, \ + { RT2860_CCK_PROT_CFG, 0x05740003 }, \ + { RT2860_OFDM_PROT_CFG, 0x05740003 }, \ + { RT2860_GF20_PROT_CFG, 0x01744004 }, \ + { RT2860_GF40_PROT_CFG, 0x03f44084 }, \ + { RT2860_MM20_PROT_CFG, 0x01744004 }, \ + { RT2860_MM40_PROT_CFG, 0x03f54084 }, \ + { RT2860_TXOP_CTRL_CFG, 0x0000583f }, \ + { RT2860_TXOP_HLDR_ET, 0x00000002 }, \ + { RT2860_TX_RTS_CFG, 0x00092b20 }, \ + { RT2860_EXP_ACK_TIME, 0x002400ca }, \ + { RT2860_XIFS_TIME_CFG, 0x33a41010 }, \ + { RT2860_PWR_PIN_CFG, 0x00000003 } + +/* XXX only a few registers differ from above, try to merge? */ +#define RT2870_DEF_MAC \ + { RT2860_BCN_OFFSET0, 0xf8f0e8e0 }, \ + { RT2860_LEGACY_BASIC_RATE, 0x0000013f }, \ + { RT2860_HT_BASIC_RATE, 0x00008003 }, \ + { RT2860_MAC_SYS_CTRL, 0x00000000 }, \ + { RT2860_BKOFF_SLOT_CFG, 0x00000209 }, \ + { RT2860_TX_SW_CFG0, 0x00000000 }, \ + { RT2860_TX_SW_CFG1, 0x00080606 }, \ + { RT2860_TX_LINK_CFG, 0x00001020 }, \ + { RT2860_TX_TIMEOUT_CFG, 0x000a2090 }, \ + { RT2860_LED_CFG, 0x7f031e46 }, \ + { RT2860_WMM_AIFSN_CFG, 0x00002273 }, \ + { RT2860_WMM_CWMIN_CFG, 0x00002344 }, \ + { RT2860_WMM_CWMAX_CFG, 0x000034aa }, \ + { RT2860_MAX_PCNT, 0x1f3fbf9f }, \ + { RT2860_TX_RTY_CFG, 0x47d01f0f }, \ + { RT2860_AUTO_RSP_CFG, 0x00000013 }, \ + { RT2860_CCK_PROT_CFG, 0x05740003 }, \ + { RT2860_OFDM_PROT_CFG, 0x05740003 }, \ + { RT2860_PBF_CFG, 0x00f40006 }, \ + { RT2860_WPDMA_GLO_CFG, 0x00000030 }, \ + { RT2860_GF20_PROT_CFG, 0x01744004 }, \ + { RT2860_GF40_PROT_CFG, 0x03f44084 }, \ + { RT2860_MM20_PROT_CFG, 0x01744004 }, \ + { RT2860_MM40_PROT_CFG, 0x03f44084 }, \ + { RT2860_TXOP_CTRL_CFG, 0x0000583f }, \ + { RT2860_TXOP_HLDR_ET, 0x00000002 }, \ + { RT2860_TX_RTS_CFG, 0x00092b20 }, \ + { RT2860_EXP_ACK_TIME, 0x002400ca }, \ + { RT2860_XIFS_TIME_CFG, 0x33a41010 }, \ + { RT2860_PWR_PIN_CFG, 0x00000003 } + +/* + * Default values for BBP registers; values taken from the reference driver. + */ +#define RT2860_DEF_BBP \ + { 65, 0x2c }, \ + { 66, 0x38 }, \ + { 69, 0x12 }, \ + { 70, 0x0a }, \ + { 73, 0x10 }, \ + { 81, 0x37 }, \ + { 82, 0x62 }, \ + { 83, 0x6a }, \ + { 84, 0x99 }, \ + { 86, 0x00 }, \ + { 91, 0x04 }, \ + { 92, 0x00 }, \ + { 103, 0x00 }, \ + { 105, 0x05 }, \ + { 106, 0x35 } + +/* + * Default settings for RF registers; values derived from the reference driver. + */ +#define RT2860_RF2850 \ + { 1, 0x100bb3, 0x1301e1, 0x05a014, 0x001402 }, \ + { 2, 0x100bb3, 0x1301e1, 0x05a014, 0x001407 }, \ + { 3, 0x100bb3, 0x1301e2, 0x05a014, 0x001402 }, \ + { 4, 0x100bb3, 0x1301e2, 0x05a014, 0x001407 }, \ + { 5, 0x100bb3, 0x1301e3, 0x05a014, 0x001402 }, \ + { 6, 0x100bb3, 0x1301e3, 0x05a014, 0x001407 }, \ + { 7, 0x100bb3, 0x1301e4, 0x05a014, 0x001402 }, \ + { 8, 0x100bb3, 0x1301e4, 0x05a014, 0x001407 }, \ + { 9, 0x100bb3, 0x1301e5, 0x05a014, 0x001402 }, \ + { 10, 0x100bb3, 0x1301e5, 0x05a014, 0x001407 }, \ + { 11, 0x100bb3, 0x1301e6, 0x05a014, 0x001402 }, \ + { 12, 0x100bb3, 0x1301e6, 0x05a014, 0x001407 }, \ + { 13, 0x100bb3, 0x1301e7, 0x05a014, 0x001402 }, \ + { 14, 0x100bb3, 0x1301e8, 0x05a014, 0x001404 }, \ + { 36, 0x100bb3, 0x130266, 0x056014, 0x001408 }, \ + { 38, 0x100bb3, 0x130267, 0x056014, 0x001404 }, \ + { 40, 0x100bb2, 0x1301a0, 0x056014, 0x001400 }, \ + { 44, 0x100bb2, 0x1301a0, 0x056014, 0x001408 }, \ + { 46, 0x100bb2, 0x1301a1, 0x056014, 0x001402 }, \ + { 48, 0x100bb2, 0x1301a1, 0x056014, 0x001406 }, \ + { 52, 0x100bb2, 0x1301a2, 0x056014, 0x001404 }, \ + { 54, 0x100bb2, 0x1301a2, 0x056014, 0x001408 }, \ + { 56, 0x100bb2, 0x1301a3, 0x056014, 0x001402 }, \ + { 60, 0x100bb2, 0x1301a4, 0x056014, 0x001400 }, \ + { 62, 0x100bb2, 0x1301a4, 0x056014, 0x001404 }, \ + { 64, 0x100bb2, 0x1301a4, 0x056014, 0x001408 }, \ + { 100, 0x100bb2, 0x1301ac, 0x05e014, 0x001400 }, \ + { 102, 0x100bb2, 0x1701ac, 0x15e014, 0x001404 }, \ + { 104, 0x100bb2, 0x1701ac, 0x15e014, 0x001408 }, \ + { 108, 0x100bb3, 0x17028c, 0x15e014, 0x001404 }, \ + { 110, 0x100bb3, 0x13028d, 0x05e014, 0x001400 }, \ + { 112, 0x100bb3, 0x13028d, 0x05e014, 0x001406 }, \ + { 116, 0x100bb3, 0x13028e, 0x05e014, 0x001408 }, \ + { 118, 0x100bb3, 0x13028f, 0x05e014, 0x001404 }, \ + { 120, 0x100bb1, 0x1300e0, 0x05e014, 0x001400 }, \ + { 124, 0x100bb1, 0x1300e0, 0x05e014, 0x001404 }, \ + { 126, 0x100bb1, 0x1300e0, 0x05e014, 0x001406 }, \ + { 128, 0x100bb1, 0x1300e0, 0x05e014, 0x001408 }, \ + { 132, 0x100bb1, 0x1300e1, 0x05e014, 0x001402 }, \ + { 134, 0x100bb1, 0x1300e1, 0x05e014, 0x001404 }, \ + { 136, 0x100bb1, 0x1300e1, 0x05e014, 0x001406 }, \ + { 140, 0x100bb1, 0x1300e2, 0x05e014, 0x001400 }, \ + { 149, 0x100bb1, 0x1300e2, 0x05e014, 0x001409 }, \ + { 151, 0x100bb1, 0x1300e3, 0x05e014, 0x001401 }, \ + { 153, 0x100bb1, 0x1300e3, 0x05e014, 0x001403 }, \ + { 157, 0x100bb1, 0x1300e3, 0x05e014, 0x001407 }, \ + { 159, 0x100bb1, 0x1300e3, 0x05e014, 0x001409 }, \ + { 161, 0x100bb1, 0x1300e4, 0x05e014, 0x001401 }, \ + { 165, 0x100bb1, 0x1300e4, 0x05e014, 0x001405 }, \ + { 167, 0x100bb1, 0x1300f4, 0x05e014, 0x001407 }, \ + { 169, 0x100bb1, 0x1300f4, 0x05e014, 0x001409 }, \ + { 171, 0x100bb1, 0x1300f5, 0x05e014, 0x001401 }, \ + { 173, 0x100bb1, 0x1300f5, 0x05e014, 0x001403 } + +#define RT3070_RF3052 \ + { 0xf1, 2, 2 }, \ + { 0xf1, 2, 7 }, \ + { 0xf2, 2, 2 }, \ + { 0xf2, 2, 7 }, \ + { 0xf3, 2, 2 }, \ + { 0xf3, 2, 7 }, \ + { 0xf4, 2, 2 }, \ + { 0xf4, 2, 7 }, \ + { 0xf5, 2, 2 }, \ + { 0xf5, 2, 7 }, \ + { 0xf6, 2, 2 }, \ + { 0xf6, 2, 7 }, \ + { 0xf7, 2, 2 }, \ + { 0xf8, 2, 4 }, \ + { 0x56, 0, 4 }, \ + { 0x56, 0, 6 }, \ + { 0x56, 0, 8 }, \ + { 0x57, 0, 0 }, \ + { 0x57, 0, 2 }, \ + { 0x57, 0, 4 }, \ + { 0x57, 0, 8 }, \ + { 0x57, 0, 10 }, \ + { 0x58, 0, 0 }, \ + { 0x58, 0, 4 }, \ + { 0x58, 0, 6 }, \ + { 0x58, 0, 8 }, \ + { 0x5b, 0, 8 }, \ + { 0x5b, 0, 10 }, \ + { 0x5c, 0, 0 }, \ + { 0x5c, 0, 4 }, \ + { 0x5c, 0, 6 }, \ + { 0x5c, 0, 8 }, \ + { 0x5d, 0, 0 }, \ + { 0x5d, 0, 2 }, \ + { 0x5d, 0, 4 }, \ + { 0x5d, 0, 8 }, \ + { 0x5d, 0, 10 }, \ + { 0x5e, 0, 0 }, \ + { 0x5e, 0, 4 }, \ + { 0x5e, 0, 6 }, \ + { 0x5e, 0, 8 }, \ + { 0x5f, 0, 0 }, \ + { 0x5f, 0, 9 }, \ + { 0x5f, 0, 11 }, \ + { 0x60, 0, 1 }, \ + { 0x60, 0, 5 }, \ + { 0x60, 0, 7 }, \ + { 0x60, 0, 9 }, \ + { 0x61, 0, 1 }, \ + { 0x61, 0, 3 }, \ + { 0x61, 0, 5 }, \ + { 0x61, 0, 7 }, \ + { 0x61, 0, 9 } + +#define RT3070_DEF_RF \ + { 4, 0x40 }, \ + { 5, 0x03 }, \ + { 6, 0x02 }, \ + { 7, 0x70 }, \ + { 9, 0x0f }, \ + { 10, 0x41 }, \ + { 11, 0x21 }, \ + { 12, 0x7b }, \ + { 14, 0x90 }, \ + { 15, 0x58 }, \ + { 16, 0xb3 }, \ + { 17, 0x92 }, \ + { 18, 0x2c }, \ + { 19, 0x02 }, \ + { 20, 0xba }, \ + { 21, 0xdb }, \ + { 24, 0x16 }, \ + { 25, 0x01 }, \ + { 29, 0x1f } + +#define RT3572_DEF_RF \ + { 0, 0x70 }, \ + { 1, 0x81 }, \ + { 2, 0xf1 }, \ + { 3, 0x02 }, \ + { 4, 0x4c }, \ + { 5, 0x05 }, \ + { 6, 0x4a }, \ + { 7, 0xd8 }, \ + { 9, 0xc3 }, \ + { 10, 0xf1 }, \ + { 11, 0xb9 }, \ + { 12, 0x70 }, \ + { 13, 0x65 }, \ + { 14, 0xa0 }, \ + { 15, 0x53 }, \ + { 16, 0x4c }, \ + { 17, 0x23 }, \ + { 18, 0xac }, \ + { 19, 0x93 }, \ + { 20, 0xb3 }, \ + { 21, 0xd0 }, \ + { 22, 0x00 }, \ + { 23, 0x3c }, \ + { 24, 0x16 }, \ + { 25, 0x15 }, \ + { 26, 0x85 }, \ + { 27, 0x00 }, \ + { 28, 0x00 }, \ + { 29, 0x9b }, \ + { 30, 0x09 }, \ + { 31, 0x10 } Property changes on: projects/nand/sys/dev/ral/rt2860reg.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/nand/sys/dev/ral/rt2860var.h =================================================================== --- projects/nand/sys/dev/ral/rt2860var.h (nonexistent) +++ projects/nand/sys/dev/ral/rt2860var.h (revision 235263) @@ -0,0 +1,210 @@ +/*- + * Copyright (c) 2007 Damien Bergamini + * Copyright (c) 2012 Bernhard Schmidt + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $OpenBSD: rt2860var.h,v 1.20 2010/09/07 16:21:42 deraadt Exp $ + * $FreeBSD$ + */ + +#define RT2860_TX_RING_COUNT 64 +#define RT2860_RX_RING_COUNT 128 +#define RT2860_TX_POOL_COUNT (RT2860_TX_RING_COUNT * 2) + +#define RT2860_MAX_SCATTER ((RT2860_TX_RING_COUNT * 2) - 1) + +/* HW supports up to 255 STAs */ +#define RT2860_WCID_MAX 254 +#define RT2860_AID2WCID(aid) ((aid) & 0xff) + +struct rt2860_rx_radiotap_header { + struct ieee80211_radiotap_header wr_ihdr; + uint64_t wr_tsf; + uint8_t wr_flags; + uint8_t wr_rate; + uint16_t wr_chan_freq; + uint16_t wr_chan_flags; + uint8_t wr_antenna; + int8_t wr_antsignal; + int8_t wr_antnoise; +} __packed; + +#define RT2860_RX_RADIOTAP_PRESENT \ + ((1 << IEEE80211_RADIOTAP_TSFT) | \ + (1 << IEEE80211_RADIOTAP_FLAGS) | \ + (1 << IEEE80211_RADIOTAP_RATE) | \ + (1 << IEEE80211_RADIOTAP_CHANNEL) | \ + (1 << IEEE80211_RADIOTAP_ANTENNA) | \ + (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \ + (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE)) + +struct rt2860_tx_radiotap_header { + struct ieee80211_radiotap_header wt_ihdr; + uint8_t wt_flags; + uint8_t wt_rate; + uint16_t wt_chan_freq; + uint16_t wt_chan_flags; +} __packed; + +#define RT2860_TX_RADIOTAP_PRESENT \ + ((1 << IEEE80211_RADIOTAP_FLAGS) | \ + (1 << IEEE80211_RADIOTAP_RATE) | \ + (1 << IEEE80211_RADIOTAP_CHANNEL)) + +struct rt2860_tx_data { + struct rt2860_txwi *txwi; + struct mbuf *m; + struct ieee80211_node *ni; + bus_dmamap_t map; + bus_addr_t paddr; + SLIST_ENTRY(rt2860_tx_data) next; +}; + +struct rt2860_tx_ring { + struct rt2860_txd *txd; + bus_addr_t paddr; + bus_dma_tag_t desc_dmat; + bus_dmamap_t desc_map; + bus_dma_segment_t seg; + struct rt2860_tx_data *data[RT2860_TX_RING_COUNT]; + int cur; + int next; + int queued; +}; + +struct rt2860_rx_data { + struct mbuf *m; + bus_dmamap_t map; +}; + +struct rt2860_rx_ring { + struct rt2860_rxd *rxd; + bus_addr_t paddr; + bus_dma_tag_t desc_dmat; + bus_dmamap_t desc_map; + bus_dma_tag_t data_dmat; + bus_dma_segment_t seg; + unsigned int cur; /* must be unsigned */ + struct rt2860_rx_data data[RT2860_RX_RING_COUNT]; +}; + +struct rt2860_node { + struct ieee80211_node ni; + uint8_t wcid; + uint8_t ridx[IEEE80211_RATE_MAXSIZE]; + uint8_t ctl_ridx[IEEE80211_RATE_MAXSIZE]; +}; + +struct rt2860_vap { + struct ieee80211vap ral_vap; + + int (*ral_newstate)(struct ieee80211vap *, + enum ieee80211_state, int); +}; +#define RT2860_VAP(vap) ((struct rt2860_vap *)(vap)) + +struct rt2860_softc { + struct ifnet *sc_ifp; + device_t sc_dev; + bus_space_tag_t sc_st; + bus_space_handle_t sc_sh; + + struct mtx sc_mtx; + + struct callout watchdog_ch; + + int sc_invalid; + int sc_debug; +/* + * The same in both up to here + * ------------------------------------------------ + */ + + uint16_t (*sc_srom_read)(struct rt2860_softc *, + uint16_t); + void (*sc_node_free)(struct ieee80211_node *); + + int sc_flags; +#define RT2860_ENABLED (1 << 0) +#define RT2860_ADVANCED_PS (1 << 1) +#define RT2860_PCIE (1 << 2) + + struct ieee80211_node *wcid2ni[RT2860_WCID_MAX]; + + struct rt2860_tx_ring txq[6]; + struct rt2860_rx_ring rxq; + + SLIST_HEAD(, rt2860_tx_data) data_pool; + struct rt2860_tx_data data[RT2860_TX_POOL_COUNT]; + bus_dma_tag_t txwi_dmat; + bus_dmamap_t txwi_map; + bus_dma_segment_t txwi_seg; + caddr_t txwi_vaddr; + + int sc_tx_timer; + int mgtqid; + uint8_t qfullmsk; + + uint16_t mac_ver; + uint16_t mac_rev; + uint8_t rf_rev; + uint8_t freq; + uint8_t ntxchains; + uint8_t nrxchains; + uint8_t pslevel; + int8_t txpow1[54]; + int8_t txpow2[54]; + int8_t rssi_2ghz[3]; + int8_t rssi_5ghz[3]; + uint8_t lna[4]; + uint8_t rf24_20mhz; + uint8_t rf24_40mhz; + uint8_t patch_dac; + uint8_t rfswitch; + uint8_t ext_2ghz_lna; + uint8_t ext_5ghz_lna; + uint8_t calib_2ghz; + uint8_t calib_5ghz; + uint8_t txmixgain_2ghz; + uint8_t txmixgain_5ghz; + uint8_t tssi_2ghz[9]; + uint8_t tssi_5ghz[9]; + uint8_t step_2ghz; + uint8_t step_5ghz; + struct { + uint8_t reg; + uint8_t val; + } bbp[8], rf[10]; + uint8_t leds; + uint16_t led[3]; + uint32_t txpow20mhz[5]; + uint32_t txpow40mhz_2ghz[5]; + uint32_t txpow40mhz_5ghz[5]; + + struct rt2860_rx_radiotap_header sc_rxtap; + int sc_rxtap_len; + struct rt2860_tx_radiotap_header sc_txtap; + int sc_txtap_len; +}; + +int rt2860_attach(device_t, int); +int rt2860_detach(void *); +void rt2860_shutdown(void *); +void rt2860_suspend(void *); +void rt2860_resume(void *); +void rt2860_intr(void *); + +#define RAL_LOCK(sc) mtx_lock(&(sc)->sc_mtx) +#define RAL_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) +#define RAL_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) Property changes on: projects/nand/sys/dev/ral/rt2860var.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/nand/sys/dev/re/if_re.c =================================================================== --- projects/nand/sys/dev/re/if_re.c (revision 235262) +++ projects/nand/sys/dev/re/if_re.c (revision 235263) @@ -1,4013 +1,4014 @@ /*- * Copyright (c) 1997, 1998-2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver * * Written by Bill Paul * Senior Networking Software Engineer * Wind River Systems */ /* * This driver is designed to support RealTek's next generation of * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. * * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible * with the older 8139 family, however it also supports a special * C+ mode of operation that provides several new performance enhancing * features. These include: * * o Descriptor based DMA mechanism. Each descriptor represents * a single packet fragment. Data buffers may be aligned on * any byte boundary. * * o 64-bit DMA * * o TCP/IP checksum offload for both RX and TX * * o High and normal priority transmit DMA rings * * o VLAN tag insertion and extraction * * o TCP large send (segmentation offload) * * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ * programming API is fairly straightforward. The RX filtering, EEPROM * access and PHY access is the same as it is on the older 8139 series * chips. * * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the * same programming API and feature set as the 8139C+ with the following * differences and additions: * * o 1000Mbps mode * * o Jumbo frames * * o GMII and TBI ports/registers for interfacing with copper * or fiber PHYs * * o RX and TX DMA rings can have up to 1024 descriptors * (the 8139C+ allows a maximum of 64) * * o Slight differences in register layout from the 8139C+ * * The TX start and timer interrupt registers are at different locations * on the 8169 than they are on the 8139C+. Also, the status word in the * RX descriptor has a slightly different bit layout. The 8169 does not * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' * copper gigE PHY. * * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs * (the 'S' stands for 'single-chip'). These devices have the same * programming API as the older 8169, but also have some vendor-specific * registers for the on-board PHY. The 8110S is a LAN-on-motherboard * part designed to be pin-compatible with the RealTek 8100 10/100 chip. * * This driver takes advantage of the RX and TX checksum offload and * VLAN tag insertion/extraction features. It also implements TX * interrupt moderation using the timer interrupt registers, which * significantly reduces TX interrupt load. There is also support * for jumbo frames, however the 8169/8169S/8110S can not transmit * jumbo frames larger than 7440, so the max MTU possible with this * driver is 7422 bytes. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(re, pci, 1, 1, 1); MODULE_DEPEND(re, ether, 1, 1, 1); MODULE_DEPEND(re, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* Tunables. */ static int intr_filter = 0; TUNABLE_INT("hw.re.intr_filter", &intr_filter); static int msi_disable = 0; TUNABLE_INT("hw.re.msi_disable", &msi_disable); static int msix_disable = 0; TUNABLE_INT("hw.re.msix_disable", &msix_disable); static int prefer_iomap = 0; TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap); #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types and their names. */ static const struct rl_type const re_devs[] = { { DLINK_VENDORID, DLINK_DEVICEID_528T, 0, "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, { DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0, "D-Link DGE-530(T) Gigabit Ethernet Adapter" }, { RT_VENDORID, RT_DEVICEID_8139, 0, "RealTek 8139C+ 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8101E, 0, "RealTek 810xE PCIe 10/100baseTX" }, { RT_VENDORID, RT_DEVICEID_8168, 0, "RealTek 8168/8111 B/C/CP/D/DP/E/F PCIe Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169, 0, "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169SC, 0, "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0, "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0, "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, { USR_VENDORID, USR_DEVICEID_997902, 0, "US Robotics 997902 (RTL8169S) Gigabit Ethernet" } }; static const struct rl_hwrev const re_hwrevs[] = { { RL_HWREV_8139, RL_8139, "", RL_MTU }, { RL_HWREV_8139A, RL_8139, "A", RL_MTU }, { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU }, { RL_HWREV_8139B, RL_8139, "B", RL_MTU }, { RL_HWREV_8130, RL_8139, "8130", RL_MTU }, { RL_HWREV_8139C, RL_8139, "C", RL_MTU }, { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU }, { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU }, { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU }, { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU }, { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU }, { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, { RL_HWREV_8100, RL_8139, "8100", RL_MTU }, { RL_HWREV_8101, RL_8139, "8101", RL_MTU }, { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU }, { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU }, { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU }, { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU }, { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU }, { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU }, { RL_HWREV_8401E, RL_8169, "8401E", RL_MTU }, { RL_HWREV_8402, RL_8169, "8402", RL_MTU }, { RL_HWREV_8105E, RL_8169, "8105E", RL_MTU }, { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU }, { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU }, { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU }, { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K }, { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K }, { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K }, { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K}, { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K}, { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K}, { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K}, { 0, 0, NULL, 0 } }; static int re_probe (device_t); static int re_attach (device_t); static int re_detach (device_t); static int re_encap (struct rl_softc *, struct mbuf **); static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); static int re_allocmem (device_t, struct rl_softc *); static __inline void re_discard_rxbuf (struct rl_softc *, int); static int re_newbuf (struct rl_softc *, int); static int re_jumbo_newbuf (struct rl_softc *, int); static int re_rx_list_init (struct rl_softc *); static int re_jrx_list_init (struct rl_softc *); static int re_tx_list_init (struct rl_softc *); #ifdef RE_FIXUP_RX static __inline void re_fixup_rx (struct mbuf *); #endif static int re_rxeof (struct rl_softc *, int *); static void re_txeof (struct rl_softc *); #ifdef DEVICE_POLLING static int re_poll (struct ifnet *, enum poll_cmd, int); static int re_poll_locked (struct ifnet *, enum poll_cmd, int); #endif static int re_intr (void *); static void re_intr_msi (void *); static void re_tick (void *); static void re_int_task (void *, int); static void re_start (struct ifnet *); static void re_start_locked (struct ifnet *); static int re_ioctl (struct ifnet *, u_long, caddr_t); static void re_init (void *); static void re_init_locked (struct rl_softc *); static void re_stop (struct rl_softc *); static void re_watchdog (struct rl_softc *); static int re_suspend (device_t); static int re_resume (device_t); static int re_shutdown (device_t); static int re_ifmedia_upd (struct ifnet *); static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); static void re_eeprom_putbyte (struct rl_softc *, int); static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); static int re_gmii_readreg (device_t, int, int); static int re_gmii_writereg (device_t, int, int, int); static int re_miibus_readreg (device_t, int, int); static int re_miibus_writereg (device_t, int, int, int); static void re_miibus_statchg (device_t); static void re_set_jumbo (struct rl_softc *, int); static void re_set_rxmode (struct rl_softc *); static void re_reset (struct rl_softc *); static void re_setwol (struct rl_softc *); static void re_clrwol (struct rl_softc *); static void re_set_linkspeed (struct rl_softc *); #ifdef DEV_NETMAP /* see ixgbe.c for details */ #include #endif /* !DEV_NETMAP */ #ifdef RE_DIAG static int re_diag (struct rl_softc *); #endif static void re_add_sysctls (struct rl_softc *); static int re_sysctl_stats (SYSCTL_HANDLER_ARGS); static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS); static device_method_t re_methods[] = { /* Device interface */ DEVMETHOD(device_probe, re_probe), DEVMETHOD(device_attach, re_attach), DEVMETHOD(device_detach, re_detach), DEVMETHOD(device_suspend, re_suspend), DEVMETHOD(device_resume, re_resume), DEVMETHOD(device_shutdown, re_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, re_miibus_readreg), DEVMETHOD(miibus_writereg, re_miibus_writereg), DEVMETHOD(miibus_statchg, re_miibus_statchg), DEVMETHOD_END }; static driver_t re_driver = { "re", re_methods, sizeof(struct rl_softc) }; static devclass_t re_devclass; DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); #define EE_SET(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) | x) #define EE_CLR(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) & ~x) /* * Send a read command and address to the EEPROM, check for ACK. */ static void re_eeprom_putbyte(struct rl_softc *sc, int addr) { int d, i; d = addr | (RL_9346_READ << sc->rl_eewidth); /* * Feed in each bit and strobe the clock. */ for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { if (d & i) { EE_SET(RL_EE_DATAIN); } else { EE_CLR(RL_EE_DATAIN); } DELAY(100); EE_SET(RL_EE_CLK); DELAY(150); EE_CLR(RL_EE_CLK); DELAY(100); } } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) { int i; u_int16_t word = 0; /* * Send address of word we want to read. */ re_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { EE_SET(RL_EE_CLK); DELAY(100); if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) word |= i; EE_CLR(RL_EE_CLK); DELAY(100); } *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) { int i; u_int16_t word = 0, *ptr; CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); DELAY(100); for (i = 0; i < cnt; i++) { CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); re_eeprom_getword(sc, off + i, &word); CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); ptr = (u_int16_t *)(dest + (i * 2)); *ptr = word; } CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); } static int re_gmii_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; u_int32_t rval; int i; sc = device_get_softc(dev); /* Let the rgephy driver read the GMEDIASTAT register */ if (reg == RL_GMEDIASTAT) { rval = CSR_READ_1(sc, RL_GMEDIASTAT); return (rval); } CSR_WRITE_4(sc, RL_PHYAR, reg << 16); for (i = 0; i < RL_PHY_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (rval & RL_PHYAR_BUSY) break; DELAY(25); } if (i == RL_PHY_TIMEOUT) { device_printf(sc->rl_dev, "PHY read failed\n"); return (0); } /* * Controller requires a 20us delay to process next MDIO request. */ DELAY(20); return (rval & RL_PHYAR_PHYDATA); } static int re_gmii_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; u_int32_t rval; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); for (i = 0; i < RL_PHY_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (!(rval & RL_PHYAR_BUSY)) break; DELAY(25); } if (i == RL_PHY_TIMEOUT) { device_printf(sc->rl_dev, "PHY write failed\n"); return (0); } /* * Controller requires a 20us delay to process next MDIO request. */ DELAY(20); return (0); } static int re_miibus_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; u_int16_t rval = 0; u_int16_t re8139_reg = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8169) { rval = re_gmii_readreg(dev, phy, reg); return (rval); } switch (reg) { case MII_BMCR: re8139_reg = RL_BMCR; break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); /* * Allow the rlphy driver to read the media status * register. If we have a link partner which does not * support NWAY, this is the register which will tell * us the results of parallel detection. */ case RL_MEDIASTAT: rval = CSR_READ_1(sc, RL_MEDIASTAT); return (rval); default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } rval = CSR_READ_2(sc, re8139_reg); if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { /* 8139C+ has different bit layout. */ rval &= ~(BMCR_LOOP | BMCR_ISO); } return (rval); } static int re_miibus_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; u_int16_t re8139_reg = 0; int rval = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8169) { rval = re_gmii_writereg(dev, phy, reg, data); return (rval); } switch (reg) { case MII_BMCR: re8139_reg = RL_BMCR; if (sc->rl_type == RL_8139CPLUS) { /* 8139C+ has different bit layout. */ data &= ~(BMCR_LOOP | BMCR_ISO); } break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } CSR_WRITE_2(sc, re8139_reg, data); return (0); } static void re_miibus_statchg(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->rl_miibus); ifp = sc->rl_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; sc->rl_flags &= ~RL_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->rl_flags |= RL_FLAG_LINK; break; case IFM_1000_T: if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) break; sc->rl_flags |= RL_FLAG_LINK; break; default: break; } } /* * RealTek controllers does not provide any interface to * Tx/Rx MACs for resolved speed, duplex and flow-control * parameters. */ } /* * Set the RX configuration and 64-bit multicast hash filter. */ static void re_set_rxmode(struct rl_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; uint32_t hashes[2] = { 0, 0 }; uint32_t h, rxfilt; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { if (ifp->if_flags & IFF_PROMISC) rxfilt |= RL_RXCFG_RX_ALLPHYS; /* * Unlike other hardwares, we have to explicitly set * RL_RXCFG_RX_MULTI to receive multicast frames in * promiscuous mode. */ rxfilt |= RL_RXCFG_RX_MULTI; hashes[0] = hashes[1] = 0xffffffff; goto done; } if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } if_maddr_runlock(ifp); if (hashes[0] != 0 || hashes[1] != 0) { /* * For some unfathomable reason, RealTek decided to * reverse the order of the multicast hash registers * in the PCI Express parts. This means we have to * write the hash pattern in reverse order for those * devices. */ if ((sc->rl_flags & RL_FLAG_PCIE) != 0) { h = bswap32(hashes[0]); hashes[0] = bswap32(hashes[1]); hashes[1] = h; } rxfilt |= RL_RXCFG_RX_MULTI; } done: CSR_WRITE_4(sc, RL_MAR0, hashes[0]); CSR_WRITE_4(sc, RL_MAR4, hashes[1]); CSR_WRITE_4(sc, RL_RXCFG, rxfilt); } static void re_reset(struct rl_softc *sc) { int i; RL_LOCK_ASSERT(sc); CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) break; } if (i == RL_TIMEOUT) device_printf(sc->rl_dev, "reset never completed!\n"); if ((sc->rl_flags & RL_FLAG_MACRESET) != 0) CSR_WRITE_1(sc, 0x82, 1); if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S) re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0); } #ifdef RE_DIAG /* * The following routine is designed to test for a defect on some * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# * lines connected to the bus, however for a 32-bit only card, they * should be pulled high. The result of this defect is that the * NIC will not work right if you plug it into a 64-bit slot: DMA * operations will be done with 64-bit transfers, which will fail * because the 64-bit data lines aren't connected. * * There's no way to work around this (short of talking a soldering * iron to the board), however we can detect it. The method we use * here is to put the NIC into digital loopback mode, set the receiver * to promiscuous mode, and then try to send a frame. We then compare * the frame data we sent to what was received. If the data matches, * then the NIC is working correctly, otherwise we know the user has * a defective NIC which has been mistakenly plugged into a 64-bit PCI * slot. In the latter case, there's no way the NIC can work correctly, * so we print out a message on the console and abort the device attach. */ static int re_diag(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mbuf *m0; struct ether_header *eh; struct rl_desc *cur_rx; u_int16_t status; u_int32_t rxstat; int total_len, i, error = 0, phyaddr; u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; /* Allocate a single mbuf */ MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 == NULL) return (ENOBUFS); RL_LOCK(sc); /* * Initialize the NIC in test mode. This sets the chip up * so that it can send and receive frames, but performs the * following special functions: * - Puts receiver in promiscuous mode * - Enables digital loopback mode * - Leaves interrupts turned off */ ifp->if_flags |= IFF_PROMISC; sc->rl_testmode = 1; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); sc->rl_flags |= RL_FLAG_LINK; if (sc->rl_type == RL_8169) phyaddr = 1; else phyaddr = 0; re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); for (i = 0; i < RL_TIMEOUT; i++) { status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); if (!(status & BMCR_RESET)) break; } re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); CSR_WRITE_2(sc, RL_ISR, RL_INTRS); DELAY(100000); /* Put some data in the mbuf */ eh = mtod(m0, struct ether_header *); bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); eh->ether_type = htons(ETHERTYPE_IP); m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; /* * Queue the packet, start transmission. * Note: IF_HANDOFF() ultimately calls re_start() for us. */ CSR_WRITE_2(sc, RL_ISR, 0xFFFF); RL_UNLOCK(sc); /* XXX: re_diag must not be called when in ALTQ mode */ IF_HANDOFF(&ifp->if_snd, m0, ifp); RL_LOCK(sc); m0 = NULL; /* Wait for it to propagate through the chip */ DELAY(100000); for (i = 0; i < RL_TIMEOUT; i++) { status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) break; DELAY(10); } if (i == RL_TIMEOUT) { device_printf(sc->rl_dev, "diagnostic failed, failed to receive packet in" " loopback mode\n"); error = EIO; goto done; } /* * The packet should have been dumped into the first * entry in the RX DMA ring. Grab it from there. */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[0].rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[0].rx_dmamap); m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; eh = mtod(m0, struct ether_header *); cur_rx = &sc->rl_ldata.rl_rx_list[0]; total_len = RL_RXBYTES(cur_rx); rxstat = le32toh(cur_rx->rl_cmdstat); if (total_len != ETHER_MIN_LEN) { device_printf(sc->rl_dev, "diagnostic failed, received short packet\n"); error = EIO; goto done; } /* Test that the received packet data matches what we sent. */ if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || ntohs(eh->ether_type) != ETHERTYPE_IP) { device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", dst, ":", src, ":", ETHERTYPE_IP); device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", eh->ether_dhost, ":", eh->ether_shost, ":", ntohs(eh->ether_type)); device_printf(sc->rl_dev, "You may have a defective 32-bit " "NIC plugged into a 64-bit PCI slot.\n"); device_printf(sc->rl_dev, "Please re-install the NIC in a " "32-bit slot for proper operation.\n"); device_printf(sc->rl_dev, "Read the re(4) man page for more " "details.\n"); error = EIO; } done: /* Turn interface off, release resources */ sc->rl_testmode = 0; sc->rl_flags &= ~RL_FLAG_LINK; ifp->if_flags &= ~IFF_PROMISC; re_stop(sc); if (m0 != NULL) m_freem(m0); RL_UNLOCK(sc); return (error); } #endif /* * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int re_probe(device_t dev) { const struct rl_type *t; uint16_t devid, vendor; uint16_t revid, sdevid; int i; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); revid = pci_get_revid(dev); sdevid = pci_get_subdevice(dev); if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) { if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) { /* * Only attach to rev. 3 of the Linksys EG1032 adapter. * Rev. 2 is supported by sk(4). */ return (ENXIO); } } if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { if (revid != 0x20) { /* 8139, let rl(4) take care of this device. */ return (ENXIO); } } t = re_devs; for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) { if (vendor == t->rl_vid && devid == t->rl_did) { device_set_desc(dev, t->rl_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } /* * Map a single buffer address. */ static void re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int re_allocmem(device_t dev, struct rl_softc *sc) { bus_addr_t lowaddr; bus_size_t rx_list_size, tx_list_size; int error; int i; rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); /* * Allocate the parent bus DMA tag appropriate for PCI. * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD * register should be set. However some RealTek chips are known * to be buggy on DAC handling, therefore disable DAC by limiting * DMA address space to 32bit. PCIe variants of RealTek chips * may not have the limitation. */ lowaddr = BUS_SPACE_MAXADDR; if ((sc->rl_flags & RL_FLAG_PCIE) == 0) lowaddr = BUS_SPACE_MAXADDR_32BIT; error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->rl_parent_tag); if (error) { device_printf(dev, "could not allocate parent DMA tag\n"); return (error); } /* * Allocate map for TX mbufs. */ error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0, NULL, NULL, &sc->rl_ldata.rl_tx_mtag); if (error) { device_printf(dev, "could not allocate TX DMA tag\n"); return (error); } /* * Allocate map for RX mbufs. */ if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL, &sc->rl_ldata.rl_jrx_mtag); if (error) { device_printf(dev, "could not allocate jumbo RX DMA tag\n"); return (error); } } error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); if (error) { device_printf(dev, "could not allocate RX DMA tag\n"); return (error); } /* * Allocate map for TX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, tx_list_size, 1, tx_list_size, 0, NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); if (error) { device_printf(dev, "could not allocate TX DMA ring tag\n"); return (error); } /* Allocate DMA'able memory for the TX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_tx_list_map); if (error) { device_printf(dev, "could not allocate TX DMA ring\n"); return (error); } /* Load the map for the TX ring. */ sc->rl_ldata.rl_tx_list_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, tx_list_size, re_dma_map_addr, &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { device_printf(dev, "could not load TX DMA ring\n"); return (ENOMEM); } /* Create DMA maps for TX buffers */ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); if (error) { device_printf(dev, "could not create DMA map for TX\n"); return (error); } } /* * Allocate map for RX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, rx_list_size, 1, rx_list_size, 0, NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); if (error) { device_printf(dev, "could not create RX DMA ring tag\n"); return (error); } /* Allocate DMA'able memory for the RX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_rx_list_map); if (error) { device_printf(dev, "could not allocate RX DMA ring\n"); return (error); } /* Load the map for the RX ring. */ sc->rl_ldata.rl_rx_list_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, rx_list_size, re_dma_map_addr, &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { device_printf(dev, "could not load RX DMA ring\n"); return (ENOMEM); } /* Create DMA maps for RX buffers */ if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, &sc->rl_ldata.rl_jrx_sparemap); if (error) { device_printf(dev, "could not create spare DMA map for jumbo RX\n"); return (error); } for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); if (error) { device_printf(dev, "could not create DMA map for jumbo RX\n"); return (error); } } } error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, &sc->rl_ldata.rl_rx_sparemap); if (error) { device_printf(dev, "could not create spare DMA map for RX\n"); return (error); } for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); if (error) { device_printf(dev, "could not create DMA map for RX\n"); return (error); } } /* Create DMA map for statistics. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL, &sc->rl_ldata.rl_stag); if (error) { device_printf(dev, "could not create statistics DMA tag\n"); return (error); } /* Allocate DMA'able memory for statistics. */ error = bus_dmamem_alloc(sc->rl_ldata.rl_stag, (void **)&sc->rl_ldata.rl_stats, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_smap); if (error) { device_printf(dev, "could not allocate statistics DMA memory\n"); return (error); } /* Load the map for statistics. */ sc->rl_ldata.rl_stats_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr, &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) { device_printf(dev, "could not load statistics DMA memory\n"); return (ENOMEM); } return (0); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int re_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; u_int16_t as[ETHER_ADDR_LEN / 2]; struct rl_softc *sc; struct ifnet *ifp; const struct rl_hwrev *hw_rev; u_int32_t cap, ctl; int hwrev; u_int16_t devid, re_did = 0; int error = 0, i, phy, rid; int msic, msixc, reg; uint8_t cfg; sc = device_get_softc(dev); sc->rl_dev = dev; mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); /* * Map control/status registers. */ pci_enable_busmaster(dev); devid = pci_get_device(dev); /* * Prefer memory space register mapping over IO space. * Because RTL8169SC does not seem to work when memory mapping * is used always activate io mapping. */ if (devid == RT_DEVICEID_8169SC) prefer_iomap = 1; if (prefer_iomap == 0) { sc->rl_res_id = PCIR_BAR(1); sc->rl_res_type = SYS_RES_MEMORY; /* RTL8168/8101E seems to use different BARs. */ if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E) sc->rl_res_id = PCIR_BAR(2); } else { sc->rl_res_id = PCIR_BAR(0); sc->rl_res_type = SYS_RES_IOPORT; } sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, &sc->rl_res_id, RF_ACTIVE); if (sc->rl_res == NULL && prefer_iomap == 0) { sc->rl_res_id = PCIR_BAR(0); sc->rl_res_type = SYS_RES_IOPORT; sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, &sc->rl_res_id, RF_ACTIVE); } if (sc->rl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); msic = pci_msi_count(dev); msixc = pci_msix_count(dev); if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { sc->rl_flags |= RL_FLAG_PCIE; sc->rl_expcap = reg; } if (bootverbose) { device_printf(dev, "MSI count : %d\n", msic); device_printf(dev, "MSI-X count : %d\n", msixc); } if (msix_disable > 0) msixc = 0; if (msi_disable > 0) msic = 0; /* Prefer MSI-X to MSI. */ if (msixc > 0) { msixc = 1; rid = PCIR_BAR(4); sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->rl_res_pba == NULL) { device_printf(sc->rl_dev, "could not allocate MSI-X PBA resource\n"); } if (sc->rl_res_pba != NULL && pci_alloc_msix(dev, &msixc) == 0) { if (msixc == 1) { device_printf(dev, "Using %d MSI-X message\n", msixc); sc->rl_flags |= RL_FLAG_MSIX; } else pci_release_msi(dev); } if ((sc->rl_flags & RL_FLAG_MSIX) == 0) { if (sc->rl_res_pba != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); sc->rl_res_pba = NULL; msixc = 0; } } /* Prefer MSI to INTx. */ if (msixc == 0 && msic > 0) { msic = 1; if (pci_alloc_msi(dev, &msic) == 0) { if (msic == RL_MSI_MESSAGES) { device_printf(dev, "Using %d MSI message\n", msic); sc->rl_flags |= RL_FLAG_MSI; /* Explicitly set MSI enable bit. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, RL_CFG2); cfg |= RL_CFG2_MSI; CSR_WRITE_1(sc, RL_CFG2, cfg); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); } else pci_release_msi(dev); } if ((sc->rl_flags & RL_FLAG_MSI) == 0) msic = 0; } /* Allocate interrupt */ if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) { rid = 0; sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->rl_irq[0] == NULL) { device_printf(dev, "couldn't allocate IRQ resources\n"); error = ENXIO; goto fail; } } else { for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { sc->rl_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->rl_irq[i] == NULL) { device_printf(dev, "couldn't llocate IRQ resources for " "message %d\n", rid); error = ENXIO; goto fail; } } } if ((sc->rl_flags & RL_FLAG_MSI) == 0) { CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, RL_CFG2); if ((cfg & RL_CFG2_MSI) != 0) { device_printf(dev, "turning off MSI enable bit.\n"); cfg &= ~RL_CFG2_MSI; CSR_WRITE_1(sc, RL_CFG2, cfg); } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); } /* Disable ASPM L0S/L1. */ if (sc->rl_expcap != 0) { cap = pci_read_config(dev, sc->rl_expcap + PCIR_EXPRESS_LINK_CAP, 2); if ((cap & PCIM_LINK_CAP_ASPM) != 0) { ctl = pci_read_config(dev, sc->rl_expcap + PCIR_EXPRESS_LINK_CTL, 2); if ((ctl & 0x0003) != 0) { ctl &= ~0x0003; pci_write_config(dev, sc->rl_expcap + PCIR_EXPRESS_LINK_CTL, ctl, 2); device_printf(dev, "ASPM disabled\n"); } } else device_printf(dev, "no ASPM capability\n"); } hw_rev = re_hwrevs; hwrev = CSR_READ_4(sc, RL_TXCFG); switch (hwrev & 0x70000000) { case 0x00000000: case 0x10000000: device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000); hwrev &= (RL_TXCFG_HWREV | 0x80000000); break; default: device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000); hwrev &= RL_TXCFG_HWREV; break; } device_printf(dev, "MAC rev. 0x%08x\n", hwrev & 0x00700000); while (hw_rev->rl_desc != NULL) { if (hw_rev->rl_rev == hwrev) { sc->rl_type = hw_rev->rl_type; sc->rl_hwrev = hw_rev; break; } hw_rev++; } if (hw_rev->rl_desc == NULL) { device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev); error = ENXIO; goto fail; } switch (hw_rev->rl_rev) { case RL_HWREV_8139CPLUS: sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; break; case RL_HWREV_8100E: case RL_HWREV_8101E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; break; case RL_HWREV_8102E: case RL_HWREV_8102EL: case RL_HWREV_8102EL_SPIN1: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; break; case RL_HWREV_8103E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP; break; case RL_HWREV_8401E: case RL_HWREV_8105E: case RL_HWREV_8105E_SPIN1: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; break; case RL_HWREV_8402: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ; break; case RL_HWREV_8168B_SPIN1: case RL_HWREV_8168B_SPIN2: sc->rl_flags |= RL_FLAG_WOLRXENB; /* FALLTHROUGH */ case RL_HWREV_8168B_SPIN3: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; break; case RL_HWREV_8168C_SPIN2: sc->rl_flags |= RL_FLAG_MACSLEEP; /* FALLTHROUGH */ case RL_HWREV_8168C: if ((hwrev & 0x00700000) == 0x00200000) sc->rl_flags |= RL_FLAG_MACSLEEP; /* FALLTHROUGH */ case RL_HWREV_8168CP: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168D: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168DP: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168E_VL: case RL_HWREV_8168F: case RL_HWREV_8411: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8169_8110SB: case RL_HWREV_8169_8110SBL: case RL_HWREV_8169_8110SC: case RL_HWREV_8169_8110SCE: sc->rl_flags |= RL_FLAG_PHYWAKE; /* FALLTHROUGH */ case RL_HWREV_8169: case RL_HWREV_8169S: case RL_HWREV_8110S: sc->rl_flags |= RL_FLAG_MACRESET; break; default: break; } if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) { sc->rl_cfg0 = RL_8139_CFG0; sc->rl_cfg1 = RL_8139_CFG1; sc->rl_cfg2 = 0; sc->rl_cfg3 = RL_8139_CFG3; sc->rl_cfg4 = RL_8139_CFG4; sc->rl_cfg5 = RL_8139_CFG5; } else { sc->rl_cfg0 = RL_CFG0; sc->rl_cfg1 = RL_CFG1; sc->rl_cfg2 = RL_CFG2; sc->rl_cfg3 = RL_CFG3; sc->rl_cfg4 = RL_CFG4; sc->rl_cfg5 = RL_CFG5; } /* Reset the adapter. */ RL_LOCK(sc); re_reset(sc); RL_UNLOCK(sc); /* Enable PME. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, sc->rl_cfg1); cfg |= RL_CFG1_PME; CSR_WRITE_1(sc, sc->rl_cfg1, cfg); cfg = CSR_READ_1(sc, sc->rl_cfg5); cfg &= RL_CFG5_PME_STS; CSR_WRITE_1(sc, sc->rl_cfg5, cfg); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); if ((sc->rl_flags & RL_FLAG_PAR) != 0) { /* * XXX Should have a better way to extract station * address from EEPROM. */ for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); } else { sc->rl_eewidth = RL_9356_ADDR_LEN; re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); if (re_did != 0x8129) sc->rl_eewidth = RL_9346_ADDR_LEN; /* * Get station address from the EEPROM. */ re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); for (i = 0; i < ETHER_ADDR_LEN / 2; i++) as[i] = le16toh(as[i]); bcopy(as, eaddr, sizeof(eaddr)); } if (sc->rl_type == RL_8169) { /* Set RX length mask and number of descriptors. */ sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; sc->rl_txstart = RL_GTXSTART; sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; } else { /* Set RX length mask and number of descriptors. */ sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; sc->rl_txstart = RL_TXSTART; sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; } error = re_allocmem(dev, sc); if (error) goto fail; re_add_sysctls(sc); ifp = sc->rl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* Take controller out of deep sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) | 0x01); else CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) & ~0x01); } /* Take PHY out of power down mode. */ if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) { CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); if (hw_rev->rl_rev == RL_HWREV_8401E) CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); } if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) { re_gmii_writereg(dev, 1, 0x1f, 0); re_gmii_writereg(dev, 1, 0x0e, 0); } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = re_ioctl; ifp->if_start = re_start; /* * RTL8168/8111C generates wrong IP checksummed frame if the * packet has IP options so disable TX IP checksum offloading. */ if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C || sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2) ifp->if_hwassist = CSUM_TCP | CSUM_UDP; else ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; ifp->if_hwassist |= CSUM_TSO; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; ifp->if_capenable = ifp->if_capabilities; ifp->if_init = re_init; IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); #define RE_PHYAD_INTERNAL 0 /* Do MII setup. */ phy = RE_PHYAD_INTERNAL; if (sc->rl_type == RL_8169) phy = 1; error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd, re_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* VLAN capability setup */ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; if (ifp->if_capabilities & IFCAP_HWCSUM) ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; /* Enable WOL if PM is supported. */ if (pci_find_cap(sc->rl_dev, PCIY_PMG, ®) == 0) ifp->if_capabilities |= IFCAP_WOL; ifp->if_capenable = ifp->if_capabilities; ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST); /* * Don't enable TSO by default. It is known to generate * corrupted TCP segments(bad TCP options) under certain * circumtances. */ ifp->if_hwassist &= ~CSUM_TSO; ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); #ifdef DEV_NETMAP re_netmap_attach(sc); #endif /* DEV_NETMAP */ #ifdef RE_DIAG /* * Perform hardware diagnostic on the original RTL8169. * Some 32-bit cards were incorrectly wired and would * malfunction if plugged into a 64-bit slot. */ if (hwrev == RL_HWREV_8169) { error = re_diag(sc); if (error) { device_printf(dev, "attach aborted due to hardware diag failure\n"); ether_ifdetach(ifp); goto fail; } } #endif #ifdef RE_TX_MODERATION intr_filter = 1; #endif /* Hook interrupt last to avoid having to lock softc */ if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && intr_filter == 0) { error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc, &sc->rl_intrhand[0]); } else { error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, &sc->rl_intrhand[0]); } if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); } fail: if (error) re_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int re_detach(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; int i, rid; sc = device_get_softc(dev); ifp = sc->rl_ifp; KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif RL_LOCK(sc); #if 0 sc->suspended = 1; #endif re_stop(sc); RL_UNLOCK(sc); callout_drain(&sc->rl_stat_callout); taskqueue_drain(taskqueue_fast, &sc->rl_inttask); /* * Force off the IFF_UP flag here, in case someone * still had a BPF descriptor attached to this * interface. If they do, ether_ifdetach() will cause * the BPF code to try and clear the promisc mode * flag, which will bubble down to re_ioctl(), * which will try to call re_init() again. This will * turn the NIC back on and restart the MII ticker, * which will panic the system when the kernel tries * to invoke the re_tick() function that isn't there * anymore. */ ifp->if_flags &= ~IFF_UP; ether_ifdetach(ifp); } if (sc->rl_miibus) device_delete_child(dev, sc->rl_miibus); bus_generic_detach(dev); /* * The rest is resource deallocation, so we should already be * stopped here. */ if (sc->rl_intrhand[0] != NULL) { bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); sc->rl_intrhand[0] = NULL; } - if (ifp != NULL) + if (ifp != NULL) { +#ifdef DEV_NETMAP + netmap_detach(ifp); +#endif /* DEV_NETMAP */ if_free(ifp); + } if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) rid = 0; else rid = 1; if (sc->rl_irq[0] != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]); sc->rl_irq[0] = NULL; } if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0) pci_release_msi(dev); if (sc->rl_res_pba) { rid = PCIR_BAR(4); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); } if (sc->rl_res) bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, sc->rl_res); /* Unload and free the RX DMA ring memory and map */ if (sc->rl_ldata.rl_rx_list_tag) { if (sc->rl_ldata.rl_rx_list_map) bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map); if (sc->rl_ldata.rl_rx_list_map && sc->rl_ldata.rl_rx_list) bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); } /* Unload and free the TX DMA ring memory and map */ if (sc->rl_ldata.rl_tx_list_tag) { if (sc->rl_ldata.rl_tx_list_map) bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map); if (sc->rl_ldata.rl_tx_list_map && sc->rl_ldata.rl_tx_list) bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list, sc->rl_ldata.rl_tx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); } /* Destroy all the RX and TX buffer maps */ if (sc->rl_ldata.rl_tx_mtag) { for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap) bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, sc->rl_ldata.rl_tx_desc[i].tx_dmamap); } bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); } if (sc->rl_ldata.rl_rx_mtag) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap) bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[i].rx_dmamap); } if (sc->rl_ldata.rl_rx_sparemap) bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_sparemap); bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); } if (sc->rl_ldata.rl_jrx_mtag) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap) bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); } if (sc->rl_ldata.rl_jrx_sparemap) bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, sc->rl_ldata.rl_jrx_sparemap); bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag); } /* Unload and free the stats buffer and map */ if (sc->rl_ldata.rl_stag) { if (sc->rl_ldata.rl_smap) bus_dmamap_unload(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap); if (sc->rl_ldata.rl_smap && sc->rl_ldata.rl_stats) bus_dmamem_free(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); bus_dma_tag_destroy(sc->rl_ldata.rl_stag); } -#ifdef DEV_NETMAP - netmap_detach(ifp); -#endif /* DEV_NETMAP */ if (sc->rl_parent_tag) bus_dma_tag_destroy(sc->rl_parent_tag); mtx_destroy(&sc->rl_mtx); return (0); } static __inline void re_discard_rxbuf(struct rl_softc *sc, int idx) { struct rl_desc *desc; struct rl_rxdesc *rxd; uint32_t cmdstat; if (sc->rl_ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) rxd = &sc->rl_ldata.rl_jrx_desc[idx]; else rxd = &sc->rl_ldata.rl_rx_desc[idx]; desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; cmdstat = rxd->rx_size; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); } static int re_newbuf(struct rl_softc *sc, int idx) { struct mbuf *m; struct rl_rxdesc *rxd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct rl_desc *desc; uint32_t cmdstat; int error, nsegs; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; #ifdef RE_FIXUP_RX /* * This is part of an evil trick to deal with non-x86 platforms. * The RealTek chip requires RX buffers to be aligned on 64-bit * boundaries, but that will hose non-x86 machines. To get around * this, we leave some empty space at the start of each buffer * and for non-x86 hosts, we copy the buffer back six bytes * to achieve word alignment. This is slightly more efficient * than allocating a new buffer, copying the contents, and * discarding the old buffer. */ m_adj(m, RE_ETHER_ALIGN); #endif error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); rxd = &sc->rl_ldata.rl_rx_desc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); } rxd->rx_m = m; map = rxd->rx_dmamap; rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; rxd->rx_size = segs[0].ds_len; sc->rl_ldata.rl_rx_sparemap = map; bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); cmdstat = segs[0].ds_len; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); return (0); } static int re_jumbo_newbuf(struct rl_softc *sc, int idx) { struct mbuf *m; struct rl_rxdesc *rxd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct rl_desc *desc; uint32_t cmdstat; int error, nsegs; m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MJUM9BYTES; #ifdef RE_FIXUP_RX m_adj(m, RE_ETHER_ALIGN); #endif error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag, sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); rxd = &sc->rl_ldata.rl_jrx_desc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); } rxd->rx_m = m; map = rxd->rx_dmamap; rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap; rxd->rx_size = segs[0].ds_len; sc->rl_ldata.rl_jrx_sparemap = map; bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); cmdstat = segs[0].ds_len; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); return (0); } #ifdef RE_FIXUP_RX static __inline void re_fixup_rx(struct mbuf *m) { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; } #endif static int re_tx_list_init(struct rl_softc *sc) { struct rl_desc *desc; int i; RL_LOCK_ASSERT(sc); bzero(sc->rl_ldata.rl_tx_list, sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; #ifdef DEV_NETMAP re_netmap_tx_init(sc); #endif /* DEV_NETMAP */ /* Set EOR. */ desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->rl_ldata.rl_tx_prodidx = 0; sc->rl_ldata.rl_tx_considx = 0; sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; return (0); } static int re_rx_list_init(struct rl_softc *sc) { int error, i; bzero(sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; if ((error = re_newbuf(sc, i)) != 0) return (error); } #ifdef DEV_NETMAP re_netmap_rx_init(sc); #endif /* DEV_NETMAP */ /* Flush the RX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = 0; sc->rl_head = sc->rl_tail = NULL; sc->rl_int_rx_act = 0; return (0); } static int re_jrx_list_init(struct rl_softc *sc) { int error, i; bzero(sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL; if ((error = re_jumbo_newbuf(sc, i)) != 0) return (error); } bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = 0; sc->rl_head = sc->rl_tail = NULL; sc->rl_int_rx_act = 0; return (0); } /* * RX handler for C+ and 8169. For the gigE chips, we support * the reception of jumbo frames that have been fragmented * across multiple 2K mbuf cluster buffers. */ static int re_rxeof(struct rl_softc *sc, int *rx_npktsp) { struct mbuf *m; struct ifnet *ifp; int i, rxerr, total_len; struct rl_desc *cur_rx; u_int32_t rxstat, rxvlan; int jumbo, maxpkt = 16, rx_npkts = 0; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; #ifdef DEV_NETMAP if (ifp->if_capenable & IFCAP_NETMAP) { NA(ifp)->rx_rings->nr_kflags |= NKR_PENDINTR; selwakeuppri(&NA(ifp)->rx_rings->si, PI_NET); return 0; } #endif /* DEV_NETMAP */ if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) jumbo = 1; else jumbo = 0; /* Invalidate the descriptor memory */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; i = RL_RX_DESC_NXT(sc, i)) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; cur_rx = &sc->rl_ldata.rl_rx_list[i]; rxstat = le32toh(cur_rx->rl_cmdstat); if ((rxstat & RL_RDESC_STAT_OWN) != 0) break; total_len = rxstat & sc->rl_rxlenmask; rxvlan = le32toh(cur_rx->rl_vlanctl); if (jumbo != 0) m = sc->rl_ldata.rl_jrx_desc[i].rx_m; else m = sc->rl_ldata.rl_rx_desc[i].rx_m; if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { /* * RTL8168C or later controllers do not * support multi-fragment packet. */ re_discard_rxbuf(sc, i); continue; } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) { if (re_newbuf(sc, i) != 0) { /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } m->m_len = RE_RX_DESC_BUFLEN; if (sc->rl_head == NULL) sc->rl_head = sc->rl_tail = m; else { m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; sc->rl_tail = m; } continue; } /* * NOTE: for the 8139C+, the frame length field * is always 12 bits in size, but for the gigE chips, * it is 13 bits (since the max RX frame length is 16K). * Unfortunately, all 32 bits in the status word * were already used, so to make room for the extra * length bit, RealTek took out the 'frame alignment * error' bit and shifted the other status bits * over one slot. The OWN, EOR, FS and LS bits are * still in the same places. We have already extracted * the frame length and checked the OWN bit, so rather * than using an alternate bit mapping, we shift the * status bits one space to the right so we can evaluate * them using the 8169 status as though it was in the * same format as that of the 8139C+. */ if (sc->rl_type == RL_8169) rxstat >>= 1; /* * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be * set, but if CRC is clear, it will still be a valid frame. */ if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) { rxerr = 1; if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 && total_len > 8191 && (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT) rxerr = 0; if (rxerr != 0) { ifp->if_ierrors++; /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } } /* * If allocating a replacement mbuf fails, * reload the current one. */ if (jumbo != 0) rxerr = re_jumbo_newbuf(sc, i); else rxerr = re_newbuf(sc, i); if (rxerr != 0) { ifp->if_iqdrops++; if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } if (sc->rl_head != NULL) { if (jumbo != 0) m->m_len = total_len; else { m->m_len = total_len % RE_RX_DESC_BUFLEN; if (m->m_len == 0) m->m_len = RE_RX_DESC_BUFLEN; } /* * Special case: if there's 4 bytes or less * in this buffer, the mbuf can be discarded: * the last 4 bytes is the CRC, which we don't * care about anyway. */ if (m->m_len <= ETHER_CRC_LEN) { sc->rl_tail->m_len -= (ETHER_CRC_LEN - m->m_len); m_freem(m); } else { m->m_len -= ETHER_CRC_LEN; m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; } m = sc->rl_head; sc->rl_head = sc->rl_tail = NULL; m->m_pkthdr.len = total_len - ETHER_CRC_LEN; } else m->m_pkthdr.len = m->m_len = (total_len - ETHER_CRC_LEN); #ifdef RE_FIXUP_RX re_fixup_rx(m); #endif ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; /* Do RX checksumming if enabled */ if (ifp->if_capenable & IFCAP_RXCSUM) { if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { /* Check IP header checksum */ if (rxstat & RL_RDESC_STAT_PROTOID) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; /* Check TCP/UDP checksum */ if ((RL_TCPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || (RL_UDPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } else { /* * RTL8168C/RTL816CP/RTL8111C/RTL8111CP */ if ((rxstat & RL_RDESC_STAT_PROTOID) && (rxvlan & RL_RDESC_IPV4)) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) && (rxvlan & RL_RDESC_IPV4)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (((rxstat & RL_RDESC_STAT_TCP) && !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || ((rxstat & RL_RDESC_STAT_UDP) && !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } maxpkt--; if (rxvlan & RL_RDESC_VLANCTL_TAG) { m->m_pkthdr.ether_vtag = bswap16((rxvlan & RL_RDESC_VLANCTL_DATA)); m->m_flags |= M_VLANTAG; } RL_UNLOCK(sc); (*ifp->if_input)(ifp, m); RL_LOCK(sc); rx_npkts++; } /* Flush the RX DMA ring */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = i; if (rx_npktsp != NULL) *rx_npktsp = rx_npkts; if (maxpkt) return (EAGAIN); return (0); } static void re_txeof(struct rl_softc *sc) { struct ifnet *ifp; struct rl_txdesc *txd; u_int32_t txstat; int cons; cons = sc->rl_ldata.rl_tx_considx; if (cons == sc->rl_ldata.rl_tx_prodidx) return; ifp = sc->rl_ifp; #ifdef DEV_NETMAP if (ifp->if_capenable & IFCAP_NETMAP) { selwakeuppri(&NA(ifp)->tx_rings[0].si, PI_NET); return; } #endif /* DEV_NETMAP */ /* Invalidate the TX descriptor list */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (; cons != sc->rl_ldata.rl_tx_prodidx; cons = RL_TX_DESC_NXT(sc, cons)) { txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); if (txstat & RL_TDESC_STAT_OWN) break; /* * We only stash mbufs in the last descriptor * in a fragment chain, which also happens to * be the only place where the TX status bits * are valid. */ if (txstat & RL_TDESC_CMD_EOF) { txd = &sc->rl_ldata.rl_tx_desc[cons]; bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbufs!", __func__)); m_freem(txd->tx_m); txd->tx_m = NULL; if (txstat & (RL_TDESC_STAT_EXCESSCOL| RL_TDESC_STAT_COLCNT)) ifp->if_collisions++; if (txstat & RL_TDESC_STAT_TXERRSUM) ifp->if_oerrors++; else ifp->if_opackets++; } sc->rl_ldata.rl_tx_free++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->rl_ldata.rl_tx_considx = cons; /* No changes made to the TX ring, so no flush needed */ if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { #ifdef RE_TX_MODERATION /* * If not all descriptors have been reaped yet, reload * the timer so that we will eventually get another * interrupt that will cause us to re-enter this routine. * This is done in case the transmitter has gone idle. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif } else sc->rl_watchdog_timer = 0; } static void re_tick(void *xsc) { struct rl_softc *sc; struct mii_data *mii; sc = xsc; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); mii_tick(mii); if ((sc->rl_flags & RL_FLAG_LINK) == 0) re_miibus_statchg(sc->rl_dev); /* * Reclaim transmitted frames here. Technically it is not * necessary to do here but it ensures periodic reclamation * regardless of Tx completion interrupt which seems to be * lost on PCIe based controllers under certain situations. */ re_txeof(sc); re_watchdog(sc); callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); } #ifdef DEVICE_POLLING static int re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; int rx_npkts = 0; RL_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) rx_npkts = re_poll_locked(ifp, cmd, count); RL_UNLOCK(sc); return (rx_npkts); } static int re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; int rx_npkts; RL_LOCK_ASSERT(sc); sc->rxcycles = count; re_rxeof(sc, &rx_npkts); re_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ u_int16_t status; status = CSR_READ_2(sc, RL_ISR); if (status == 0xffff) return (rx_npkts); if (status) CSR_WRITE_2(sc, RL_ISR, status); if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); /* * XXX check behaviour on receiver stalls. */ if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } } return (rx_npkts); } #endif /* DEVICE_POLLING */ static int re_intr(void *arg) { struct rl_softc *sc; uint16_t status; sc = arg; status = CSR_READ_2(sc, RL_ISR); if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) return (FILTER_STRAY); CSR_WRITE_2(sc, RL_IMR, 0); taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); return (FILTER_HANDLED); } static void re_int_task(void *arg, int npending) { struct rl_softc *sc; struct ifnet *ifp; u_int16_t status; int rval = 0; sc = arg; ifp = sc->rl_ifp; RL_LOCK(sc); status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if (sc->suspended || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); return; } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { RL_UNLOCK(sc); return; } #endif if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) rval = re_rxeof(sc, NULL); /* * Some chips will ignore a second TX request issued * while an existing transmission is in progress. If * the transmitter goes idle but there are still * packets waiting to be sent, we need to restart the * channel here to flush them out. This only seems to * be required with the PCIe devices. */ if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); if (status & ( #ifdef RE_TX_MODERATION RL_ISR_TIMEOUT_EXPIRED| #else RL_ISR_TX_OK| #endif RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) re_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); RL_UNLOCK(sc); if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); return; } CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); } static void re_intr_msi(void *xsc) { struct rl_softc *sc; struct ifnet *ifp; uint16_t intrs, status; sc = xsc; RL_LOCK(sc); ifp = sc->rl_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { RL_UNLOCK(sc); return; } #endif /* Disable interrupts. */ CSR_WRITE_2(sc, RL_IMR, 0); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); return; } intrs = RL_INTRS_CPLUS; status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if (sc->rl_int_rx_act > 0) { intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); } if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) { re_rxeof(sc, NULL); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (sc->rl_int_rx_mod != 0 && (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) { /* Rearm one-shot timer. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); sc->rl_int_rx_act = 1; } else { intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN; sc->rl_int_rx_act = 0; } } } /* * Some chips will ignore a second TX request issued * while an existing transmission is in progress. If * the transmitter goes idle but there are still * packets waiting to be sent, we need to restart the * channel here to flush them out. This only seems to * be required with the PCIe devices. */ if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL)) re_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); CSR_WRITE_2(sc, RL_IMR, intrs); } RL_UNLOCK(sc); } static int re_encap(struct rl_softc *sc, struct mbuf **m_head) { struct rl_txdesc *txd, *txd_last; bus_dma_segment_t segs[RL_NTXSEGS]; bus_dmamap_t map; struct mbuf *m_new; struct rl_desc *desc; int nsegs, prod; int i, error, ei, si; int padlen; uint32_t cmdstat, csum_flags, vlanctl; RL_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); /* * With some of the RealTek chips, using the checksum offload * support in conjunction with the autopadding feature results * in the transmission of corrupt frames. For example, if we * need to send a really small IP fragment that's less than 60 * bytes in size, and IP header checksumming is enabled, the * resulting ethernet frame that appears on the wire will * have garbled payload. To work around this, if TX IP checksum * offload is enabled, we always manually pad short frames out * to the minimum ethernet frame size. */ if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) { padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; if (M_WRITABLE(*m_head) == 0) { /* Get a writable copy. */ m_new = m_dup(*m_head, M_DONTWAIT); m_freem(*m_head); if (m_new == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m_new; } if ((*m_head)->m_next != NULL || M_TRAILINGSPACE(*m_head) < padlen) { m_new = m_defrag(*m_head, M_DONTWAIT); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } } else m_new = *m_head; /* * Manually pad short frames, and zero the pad space * to avoid leaking data. */ bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); m_new->m_pkthdr.len += padlen; m_new->m_len = m_new->m_pkthdr.len; *m_head = m_new; } prod = sc->rl_ldata.rl_tx_prodidx; txd = &sc->rl_ldata.rl_tx_desc[prod]; error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m_new = m_collapse(*m_head, M_DONTWAIT, RL_NTXSEGS); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m_new; error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check for number of available descriptors. */ if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); return (ENOBUFS); } bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); /* * Set up checksum offload. Note: checksum offload bits must * appear in all descriptors of a multi-descriptor transmit * attempt. This is according to testing done with an 8169 * chip. This is a requirement. */ vlanctl = 0; csum_flags = 0; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) { csum_flags |= RL_TDESC_CMD_LGSEND; vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << RL_TDESC_CMD_MSSVALV2_SHIFT); } else { csum_flags |= RL_TDESC_CMD_LGSEND | ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << RL_TDESC_CMD_MSSVAL_SHIFT); } } else { /* * Unconditionally enable IP checksum if TCP or UDP * checksum is required. Otherwise, TCP/UDP checksum * does't make effects. */ if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) { if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { csum_flags |= RL_TDESC_CMD_IPCSUM; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) != 0) csum_flags |= RL_TDESC_CMD_TCPCSUM; if (((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) != 0) csum_flags |= RL_TDESC_CMD_UDPCSUM; } else { vlanctl |= RL_TDESC_CMD_IPCSUMV2; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) != 0) vlanctl |= RL_TDESC_CMD_TCPCSUMV2; if (((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) != 0) vlanctl |= RL_TDESC_CMD_UDPCSUMV2; } } } /* * Set up hardware VLAN tagging. Note: vlan tag info must * appear in all descriptors of a multi-descriptor * transmission attempt. */ if ((*m_head)->m_flags & M_VLANTAG) vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) | RL_TDESC_VLANCTL_TAG; si = prod; for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) { desc = &sc->rl_ldata.rl_tx_list[prod]; desc->rl_vlanctl = htole32(vlanctl); desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); cmdstat = segs[i].ds_len; if (i != 0) cmdstat |= RL_TDESC_CMD_OWN; if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) cmdstat |= RL_TDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | csum_flags); sc->rl_ldata.rl_tx_free--; } /* Update producer index. */ sc->rl_ldata.rl_tx_prodidx = prod; /* Set EOF on the last descriptor. */ ei = RL_TX_DESC_PRV(sc, prod); desc = &sc->rl_ldata.rl_tx_list[ei]; desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); desc = &sc->rl_ldata.rl_tx_list[si]; /* Set SOF and transfer ownership of packet to the chip. */ desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. (Swap last and first dmamaps.) */ txd_last = &sc->rl_ldata.rl_tx_desc[ei]; map = txd->tx_dmamap; txd->tx_dmamap = txd_last->tx_dmamap; txd_last->tx_dmamap = map; txd_last->tx_m = *m_head; return (0); } static void re_start(struct ifnet *ifp) { struct rl_softc *sc; sc = ifp->if_softc; RL_LOCK(sc); re_start_locked(ifp); RL_UNLOCK(sc); } /* * Main transmit routine for C+ and gigE NICs. */ static void re_start_locked(struct ifnet *ifp) { struct rl_softc *sc; struct mbuf *m_head; int queued; sc = ifp->if_softc; #ifdef DEV_NETMAP /* XXX is this necessary ? */ if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_kring *kring = &NA(ifp)->tx_rings[0]; if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) { /* kick the tx unit */ CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); #ifdef RE_TX_MODERATION CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif sc->rl_watchdog_timer = 5; } return; } #endif /* DEV_NETMAP */ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) return; for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->rl_ldata.rl_tx_free > 1;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (re_encap(sc, &m_head) != 0) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); queued++; } if (queued == 0) { #ifdef RE_TX_MODERATION if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif return; } /* Flush the TX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); #ifdef RE_TX_MODERATION /* * Use the countdown timer for interrupt moderation. * 'TX done' interrupts are disabled. Instead, we reset the * countdown timer, which will begin counting until it hits * the value in the TIMERINT register, and then trigger an * interrupt. Each time we write to the TIMERCNT register, * the timer count is reset to 0. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif /* * Set a timeout in case the chip goes out to lunch. */ sc->rl_watchdog_timer = 5; } static void re_set_jumbo(struct rl_softc *sc, int jumbo) { if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) { pci_set_max_read_req(sc->rl_dev, 4096); return; } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); if (jumbo != 0) { CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) | RL_CFG3_JUMBO_EN0); switch (sc->rl_hwrev->rl_rev) { case RL_HWREV_8168DP: break; case RL_HWREV_8168E: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) | 0x01); break; default: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1); } } else { CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) & ~RL_CFG3_JUMBO_EN0); switch (sc->rl_hwrev->rl_rev) { case RL_HWREV_8168DP: break; case RL_HWREV_8168E: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) & ~0x01); break; default: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1); } } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); switch (sc->rl_hwrev->rl_rev) { case RL_HWREV_8168DP: pci_set_max_read_req(sc->rl_dev, 4096); break; default: if (jumbo != 0) pci_set_max_read_req(sc->rl_dev, 512); else pci_set_max_read_req(sc->rl_dev, 4096); } } static void re_init(void *xsc) { struct rl_softc *sc = xsc; RL_LOCK(sc); re_init_locked(sc); RL_UNLOCK(sc); } static void re_init_locked(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mii_data *mii; uint32_t reg; uint16_t cfg; union { uint32_t align_dummy; u_char eaddr[ETHER_ADDR_LEN]; } eaddr; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* * Cancel pending I/O and free all RX/TX buffers. */ re_stop(sc); /* Put controller into known state. */ re_reset(sc); /* * For C+ mode, initialize the RX descriptors and mbufs. */ if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { if (ifp->if_mtu > RL_MTU) { if (re_jrx_list_init(sc) != 0) { device_printf(sc->rl_dev, "no memory for jumbo RX buffers\n"); re_stop(sc); return; } /* Disable checksum offloading for jumbo frames. */ ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4); ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO); } else { if (re_rx_list_init(sc) != 0) { device_printf(sc->rl_dev, "no memory for RX buffers\n"); re_stop(sc); return; } } re_set_jumbo(sc, ifp->if_mtu > RL_MTU); } else { if (re_rx_list_init(sc) != 0) { device_printf(sc->rl_dev, "no memory for RX buffers\n"); re_stop(sc); return; } if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) { if (ifp->if_mtu > RL_MTU) pci_set_max_read_req(sc->rl_dev, 512); else pci_set_max_read_req(sc->rl_dev, 4096); } } re_tx_list_init(sc); /* * Enable C+ RX and TX mode, as well as VLAN stripping and * RX checksum offload. We must configure the C+ register * before all others. */ cfg = RL_CPLUSCMD_PCI_MRW; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) cfg |= RL_CPLUSCMD_RXCSUM_ENB; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) cfg |= RL_CPLUSCMD_VLANSTRIP; if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) { cfg |= RL_CPLUSCMD_MACSTAT_DIS; /* XXX magic. */ cfg |= 0x0001; } else cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB; CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC || sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) { reg = 0x000fff00; if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0) reg |= 0x000000ff; if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) reg |= 0x00f00000; CSR_WRITE_4(sc, 0x7c, reg); /* Disable interrupt mitigation. */ CSR_WRITE_2(sc, 0xe2, 0); } /* * Disable TSO if interface MTU size is greater than MSS * allowed in controller. */ if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } /* * Init our MAC address. Even though the chipset * documentation doesn't mention it, we need to enter "Config * register write enable" mode to modify the ID registers. */ /* Copy MAC address on stack to align. */ bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); CSR_WRITE_4(sc, RL_IDR0, htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); CSR_WRITE_4(sc, RL_IDR4, htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); /* * Load the addresses of the RX and TX lists into the chip. */ CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); /* * Enable transmit and receive. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); /* * Set the initial TX configuration. */ if (sc->rl_testmode) { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON); else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); } else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); /* * Set the initial RX configuration. */ re_set_rxmode(sc); /* Configure interrupt moderation. */ if (sc->rl_type == RL_8169) { /* Magic from vendor. */ CSR_WRITE_2(sc, RL_INTRMOD, 0x5100); } #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, RL_IMR, 0); else /* otherwise ... */ #endif /* * Enable interrupts. */ if (sc->rl_testmode) CSR_WRITE_2(sc, RL_IMR, 0); else CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); /* Set initial TX threshold */ sc->rl_txthresh = RL_TX_THRESH_INIT; /* Start RX/TX process. */ CSR_WRITE_4(sc, RL_MISSEDPKT, 0); #ifdef notdef /* Enable receiver and transmitter. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); #endif /* * Initialize the timer interrupt register so that * a timer interrupt will be generated once the timer * reaches a certain number of ticks. The timer is * reloaded on each transmit. */ #ifdef RE_TX_MODERATION /* * Use timer interrupt register to moderate TX interrupt * moderation, which dramatically improves TX frame rate. */ if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); else CSR_WRITE_4(sc, RL_TIMERINT, 0x400); #else /* * Use timer interrupt register to moderate RX interrupt * moderation. */ if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && intr_filter == 0) { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(sc->rl_int_rx_mod)); } else { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0)); } #endif /* * For 8169 gigE NICs, set the max allowed RX packet * size so we can receive jumbo frames. */ if (sc->rl_type == RL_8169) { if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { /* * For controllers that use new jumbo frame scheme, * set maximum size of jumbo frame depedning on * controller revisions. */ if (ifp->if_mtu > RL_MTU) CSR_WRITE_2(sc, RL_MAXRXPKTLEN, sc->rl_hwrev->rl_max_mtu + ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN + ETHER_CRC_LEN); else CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && sc->rl_hwrev->rl_max_mtu == RL_MTU) { /* RTL810x has no jumbo frame support. */ CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); } else CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); } if (sc->rl_testmode) return; CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) | RL_CFG1_DRVLOAD); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->rl_flags &= ~RL_FLAG_LINK; mii_mediachg(mii); sc->rl_watchdog_timer = 0; callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); } /* * Set media options. */ static int re_ifmedia_upd(struct ifnet *ifp) { struct rl_softc *sc; struct mii_data *mii; int error; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); error = mii_mediachg(mii); RL_UNLOCK(sc); return (error); } /* * Report current media status. */ static void re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct rl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; RL_UNLOCK(sc); } static int re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct rl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; uint32_t rev; int error = 0; switch (command) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu) { error = EINVAL; break; } RL_LOCK(sc); if (ifp->if_mtu != ifr->ifr_mtu) { ifp->if_mtu = ifr->ifr_mtu; if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); ifp->if_hwassist &= ~CSUM_TSO; } VLAN_CAPABILITIES(ifp); } RL_UNLOCK(sc); break; case SIOCSIFFLAGS: RL_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (((ifp->if_flags ^ sc->rl_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) re_set_rxmode(sc); } else re_init_locked(sc); } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) re_stop(sc); } sc->rl_if_flags = ifp->if_flags; RL_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: RL_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) re_set_rxmode(sc); RL_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->rl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: { int mask, reinit; mask = ifr->ifr_reqcap ^ ifp->if_capenable; reinit = 0; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(re_poll, ifp); if (error) return (error); RL_LOCK(sc); /* Disable interrupts */ CSR_WRITE_2(sc, RL_IMR, 0x0000); ifp->if_capenable |= IFCAP_POLLING; RL_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ RL_LOCK(sc); CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); ifp->if_capenable &= ~IFCAP_POLLING; RL_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ RL_LOCK(sc); if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) { rev = sc->rl_hwrev->rl_rev; if (rev == RL_HWREV_8168C || rev == RL_HWREV_8168C_SPIN2) ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; else ifp->if_hwassist |= RE_CSUM_FEATURES; } else ifp->if_hwassist &= ~RE_CSUM_FEATURES; reinit = 1; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; reinit = 1; } if ((mask & IFCAP_TSO4) != 0 && (ifp->if_capabilities & IFCAP_TSO4) != 0) { ifp->if_capenable ^= IFCAP_TSO4; if ((IFCAP_TSO4 & ifp->if_capenable) != 0) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } } if ((mask & IFCAP_VLAN_HWTSO) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; /* TSO over VLAN requires VLAN hardware tagging. */ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; reinit = 1; } if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && (mask & (IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWTSO)) != 0) reinit = 1; if ((mask & IFCAP_WOL) != 0 && (ifp->if_capabilities & IFCAP_WOL) != 0) { if ((mask & IFCAP_WOL_UCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_UCAST; if ((mask & IFCAP_WOL_MCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_MCAST; if ((mask & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; } if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } RL_UNLOCK(sc); VLAN_CAPABILITIES(ifp); } break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void re_watchdog(struct rl_softc *sc) { struct ifnet *ifp; RL_LOCK_ASSERT(sc); if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) return; ifp = sc->rl_ifp; re_txeof(sc); if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) { if_printf(ifp, "watchdog timeout (missed Tx interrupts) " "-- recovering\n"); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); return; } if_printf(ifp, "watchdog timeout\n"); ifp->if_oerrors++; re_rxeof(sc, NULL); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void re_stop(struct rl_softc *sc) { int i; struct ifnet *ifp; struct rl_txdesc *txd; struct rl_rxdesc *rxd; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; sc->rl_watchdog_timer = 0; callout_stop(&sc->rl_stat_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* * Disable accepting frames to put RX MAC into idle state. * Otherwise it's possible to get frames while stop command * execution is in progress and controller can DMA the frame * to already freed RX buffer during that period. */ CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI | RL_RXCFG_RX_BROAD)); if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) { for (i = RL_TIMEOUT; i > 0; i--) { if ((CSR_READ_1(sc, sc->rl_txstart) & RL_TXSTART_START) == 0) break; DELAY(20); } if (i == 0) device_printf(sc->rl_dev, "stopping TX poll timed out!\n"); CSR_WRITE_1(sc, RL_COMMAND, 0x00); } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) { CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | RL_CMD_RX_ENB); if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) { for (i = RL_TIMEOUT; i > 0; i--) { if ((CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_QUEUE_EMPTY) != 0) break; DELAY(100); } if (i == 0) device_printf(sc->rl_dev, "stopping TXQ timed out!\n"); } } else CSR_WRITE_1(sc, RL_COMMAND, 0x00); DELAY(1000); CSR_WRITE_2(sc, RL_IMR, 0x0000); CSR_WRITE_2(sc, RL_ISR, 0xFFFF); if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } /* Free the TX list buffers. */ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { txd = &sc->rl_ldata.rl_tx_desc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } /* Free the RX list buffers. */ for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { rxd = &sc->rl_ldata.rl_rx_desc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { rxd = &sc->rl_ldata.rl_jrx_desc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } } } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int re_suspend(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); re_stop(sc); re_setwol(sc); sc->suspended = 1; RL_UNLOCK(sc); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int re_resume(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); RL_LOCK(sc); ifp = sc->rl_ifp; /* Take controller out of sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) | 0x01); } /* * Clear WOL matching such that normal Rx filtering * wouldn't interfere with WOL patterns. */ re_clrwol(sc); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) re_init_locked(sc); sc->suspended = 0; RL_UNLOCK(sc); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int re_shutdown(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); re_stop(sc); /* * Mark interface as down since otherwise we will panic if * interrupt comes in later on, which can happen in some * cases. */ sc->rl_ifp->if_flags &= ~IFF_UP; re_setwol(sc); RL_UNLOCK(sc); return (0); } static void re_set_linkspeed(struct rl_softc *sc) { struct mii_softc *miisc; struct mii_data *mii; int aneg, i, phyno; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); mii_pollstat(mii); aneg = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch IFM_SUBTYPE(mii->mii_media_active) { case IFM_10_T: case IFM_100_TX: return; case IFM_1000_T: aneg++; break; default: break; } } miisc = LIST_FIRST(&mii->mii_phys); phyno = miisc->mii_phy; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0); re_miibus_writereg(sc->rl_dev, phyno, MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); re_miibus_writereg(sc->rl_dev, phyno, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG); DELAY(1000); if (aneg != 0) { /* * Poll link state until re(4) get a 10/100Mbps link. */ for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { mii_pollstat(mii); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: return; default: break; } } RL_UNLOCK(sc); pause("relnk", hz); RL_LOCK(sc); } if (i == MII_ANEGTICKS_GIGE) device_printf(sc->rl_dev, "establishing a link failed, WOL may not work!"); } /* * No link, force MAC to have 100Mbps, full-duplex link. * MAC does not require reprogramming on resolved speed/duplex, * so this is just for completeness. */ mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; } static void re_setwol(struct rl_softc *sc) { struct ifnet *ifp; int pmc; uint16_t pmstat; uint8_t v; RL_LOCK_ASSERT(sc); if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) return; ifp = sc->rl_ifp; /* Put controller into sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) & ~0x01); } if ((ifp->if_capenable & IFCAP_WOL) != 0) { re_set_rxmode(sc); if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0) re_set_linkspeed(sc); if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0) CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB); } /* Enable config register write. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); /* Enable PME. */ v = CSR_READ_1(sc, sc->rl_cfg1); v &= ~RL_CFG1_PME; if ((ifp->if_capenable & IFCAP_WOL) != 0) v |= RL_CFG1_PME; CSR_WRITE_1(sc, sc->rl_cfg1, v); v = CSR_READ_1(sc, sc->rl_cfg3); v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) v |= RL_CFG3_WOL_MAGIC; CSR_WRITE_1(sc, sc->rl_cfg3, v); v = CSR_READ_1(sc, sc->rl_cfg5); v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST | RL_CFG5_WOL_LANWAKE); if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) v |= RL_CFG5_WOL_UCAST; if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; if ((ifp->if_capenable & IFCAP_WOL) != 0) v |= RL_CFG5_WOL_LANWAKE; CSR_WRITE_1(sc, sc->rl_cfg5, v); /* Config register write done. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); if ((ifp->if_capenable & IFCAP_WOL) == 0 && (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80); /* * It seems that hardware resets its link speed to 100Mbps in * power down mode so switching to 100Mbps in driver is not * needed. */ /* Request PME if WOL is requested. */ pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } static void re_clrwol(struct rl_softc *sc) { int pmc; uint8_t v; RL_LOCK_ASSERT(sc); if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) return; /* Enable config register write. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); v = CSR_READ_1(sc, sc->rl_cfg3); v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); CSR_WRITE_1(sc, sc->rl_cfg3, v); /* Config register write done. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); v = CSR_READ_1(sc, sc->rl_cfg5); v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); v &= ~RL_CFG5_WOL_LANWAKE; CSR_WRITE_1(sc, sc->rl_cfg5, v); } static void re_add_sysctls(struct rl_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; int error; ctx = device_get_sysctl_ctx(sc->rl_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I", "Statistics Information"); if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) return; SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0, sysctl_hw_re_int_mod, "I", "re RX interrupt moderation"); /* Pull in device tunables. */ sc->rl_int_rx_mod = RL_TIMER_DEFAULT; error = resource_int_value(device_get_name(sc->rl_dev), device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod); if (error == 0) { if (sc->rl_int_rx_mod < RL_TIMER_MIN || sc->rl_int_rx_mod > RL_TIMER_MAX) { device_printf(sc->rl_dev, "int_rx_mod value out of " "range; using default: %d\n", RL_TIMER_DEFAULT); sc->rl_int_rx_mod = RL_TIMER_DEFAULT; } } } static int re_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct rl_softc *sc; struct rl_stats *stats; int error, i, result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || req->newptr == NULL) return (error); if (result == 1) { sc = (struct rl_softc *)arg1; RL_LOCK(sc); if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); goto done; } bus_dmamap_sync(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD); CSR_WRITE_4(sc, RL_DUMPSTATS_HI, RL_ADDR_HI(sc->rl_ldata.rl_stats_addr)); CSR_WRITE_4(sc, RL_DUMPSTATS_LO, RL_ADDR_LO(sc->rl_ldata.rl_stats_addr)); CSR_WRITE_4(sc, RL_DUMPSTATS_LO, RL_ADDR_LO(sc->rl_ldata.rl_stats_addr | RL_DUMPSTATS_START)); for (i = RL_TIMEOUT; i > 0; i--) { if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) & RL_DUMPSTATS_START) == 0) break; DELAY(1000); } bus_dmamap_sync(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD); RL_UNLOCK(sc); if (i == 0) { device_printf(sc->rl_dev, "DUMP statistics request timedout\n"); return (ETIMEDOUT); } done: stats = sc->rl_ldata.rl_stats; printf("%s statistics:\n", device_get_nameunit(sc->rl_dev)); printf("Tx frames : %ju\n", (uintmax_t)le64toh(stats->rl_tx_pkts)); printf("Rx frames : %ju\n", (uintmax_t)le64toh(stats->rl_rx_pkts)); printf("Tx errors : %ju\n", (uintmax_t)le64toh(stats->rl_tx_errs)); printf("Rx errors : %u\n", le32toh(stats->rl_rx_errs)); printf("Rx missed frames : %u\n", (uint32_t)le16toh(stats->rl_missed_pkts)); printf("Rx frame alignment errs : %u\n", (uint32_t)le16toh(stats->rl_rx_framealign_errs)); printf("Tx single collisions : %u\n", le32toh(stats->rl_tx_onecoll)); printf("Tx multiple collisions : %u\n", le32toh(stats->rl_tx_multicolls)); printf("Rx unicast frames : %ju\n", (uintmax_t)le64toh(stats->rl_rx_ucasts)); printf("Rx broadcast frames : %ju\n", (uintmax_t)le64toh(stats->rl_rx_bcasts)); printf("Rx multicast frames : %u\n", le32toh(stats->rl_rx_mcasts)); printf("Tx aborts : %u\n", (uint32_t)le16toh(stats->rl_tx_aborts)); printf("Tx underruns : %u\n", (uint32_t)le16toh(stats->rl_rx_underruns)); } return (error); } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (arg1 == NULL) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN, RL_TIMER_MAX)); } Index: projects/nand/sys/dev/sf/if_sf.c =================================================================== --- projects/nand/sys/dev/sf/if_sf.c (revision 235262) +++ projects/nand/sys/dev/sf/if_sf.c (revision 235263) @@ -1,2736 +1,2734 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD. * Programming manual is available from: * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf. * * Written by Bill Paul * Department of Electical Engineering * Columbia University, New York City */ /* * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet * controller designed with flexibility and reducing CPU load in mind. * The Starfire offers high and low priority buffer queues, a * producer/consumer index mechanism and several different buffer * queue and completion queue descriptor types. Any one of a number * of different driver designs can be used, depending on system and * OS requirements. This driver makes use of type2 transmit frame * descriptors to take full advantage of fragmented packets buffers * and two RX buffer queues prioritized on size (one queue for small * frames that will fit into a single mbuf, another with full size * mbuf clusters for everything else). The producer/consumer indexes * and completion queues are also used. * * One downside to the Starfire has to do with alignment: buffer * queues must be aligned on 256-byte boundaries, and receive buffers * must be aligned on longword boundaries. The receive buffer alignment * causes problems on the strict alignment architecture, where the * packet payload should be longword aligned. There is no simple way * around this. * * For receive filtering, the Starfire offers 16 perfect filter slots * and a 512-bit hash table. * * The Starfire has no internal transceiver, relying instead on an * external MII-based transceiver. Accessing registers on external * PHYs is done through a special register map rather than with the * usual bitbang MDIO method. * * Acesssing the registers on the Starfire is a little tricky. The * Starfire has a 512K internal register space. When programmed for * PCI memory mapped mode, the entire register space can be accessed * directly. However in I/O space mode, only 256 bytes are directly * mapped into PCI I/O space. The other registers can be accessed * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA * registers inside the 256-byte I/O window. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" MODULE_DEPEND(sf, pci, 1, 1, 1); MODULE_DEPEND(sf, ether, 1, 1, 1); MODULE_DEPEND(sf, miibus, 1, 1, 1); #undef SF_GFP_DEBUG #define SF_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) /* Define this to activate partial TCP/UDP checksum offload. */ #undef SF_PARTIAL_CSUM_SUPPORT static struct sf_type sf_devs[] = { { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" }, { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" }, { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" }, { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" }, { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" }, { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" }, { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" }, }; static int sf_probe(device_t); static int sf_attach(device_t); static int sf_detach(device_t); static int sf_shutdown(device_t); static int sf_suspend(device_t); static int sf_resume(device_t); static void sf_intr(void *); static void sf_tick(void *); static void sf_stats_update(struct sf_softc *); #ifndef __NO_STRICT_ALIGNMENT static __inline void sf_fixup_rx(struct mbuf *); #endif static int sf_rxeof(struct sf_softc *); static void sf_txeof(struct sf_softc *); static int sf_encap(struct sf_softc *, struct mbuf **); static void sf_start(struct ifnet *); static void sf_start_locked(struct ifnet *); static int sf_ioctl(struct ifnet *, u_long, caddr_t); static void sf_download_fw(struct sf_softc *); static void sf_init(void *); static void sf_init_locked(struct sf_softc *); static void sf_stop(struct sf_softc *); static void sf_watchdog(struct sf_softc *); static int sf_ifmedia_upd(struct ifnet *); static int sf_ifmedia_upd_locked(struct ifnet *); static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void sf_reset(struct sf_softc *); static int sf_dma_alloc(struct sf_softc *); static void sf_dma_free(struct sf_softc *); static int sf_init_rx_ring(struct sf_softc *); static void sf_init_tx_ring(struct sf_softc *); static int sf_newbuf(struct sf_softc *, int); static void sf_rxfilter(struct sf_softc *); static int sf_setperf(struct sf_softc *, int, uint8_t *); static int sf_sethash(struct sf_softc *, caddr_t, int); #ifdef notdef static int sf_setvlan(struct sf_softc *, int, uint32_t); #endif static uint8_t sf_read_eeprom(struct sf_softc *, int); static int sf_miibus_readreg(device_t, int, int); static int sf_miibus_writereg(device_t, int, int, int); static void sf_miibus_statchg(device_t); #ifdef DEVICE_POLLING static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); #endif static uint32_t csr_read_4(struct sf_softc *, int); static void csr_write_4(struct sf_softc *, int, uint32_t); static void sf_txthresh_adjust(struct sf_softc *); static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS); static device_method_t sf_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sf_probe), DEVMETHOD(device_attach, sf_attach), DEVMETHOD(device_detach, sf_detach), DEVMETHOD(device_shutdown, sf_shutdown), DEVMETHOD(device_suspend, sf_suspend), DEVMETHOD(device_resume, sf_resume), /* MII interface */ DEVMETHOD(miibus_readreg, sf_miibus_readreg), DEVMETHOD(miibus_writereg, sf_miibus_writereg), DEVMETHOD(miibus_statchg, sf_miibus_statchg), DEVMETHOD_END }; static driver_t sf_driver = { "sf", sf_methods, sizeof(struct sf_softc), }; static devclass_t sf_devclass; DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0); DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0); #define SF_SETBIT(sc, reg, x) \ csr_write_4(sc, reg, csr_read_4(sc, reg) | (x)) #define SF_CLRBIT(sc, reg, x) \ csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x)) static uint32_t csr_read_4(struct sf_softc *sc, int reg) { uint32_t val; if (sc->sf_restype == SYS_RES_MEMORY) val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE)); else { CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); val = CSR_READ_4(sc, SF_INDIRECTIO_DATA); } return (val); } static uint8_t sf_read_eeprom(struct sf_softc *sc, int reg) { uint8_t val; val = (csr_read_4(sc, SF_EEADDR_BASE + (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF; return (val); } static void csr_write_4(struct sf_softc *sc, int reg, uint32_t val) { if (sc->sf_restype == SYS_RES_MEMORY) CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val); else { CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val); } } /* * Copy the address 'mac' into the perfect RX filter entry at * offset 'idx.' The perfect filter only has 16 entries so do * some sanity tests. */ static int sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac) { if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT) return (EINVAL); if (mac == NULL) return (EINVAL); csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8)); csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8)); csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8)); return (0); } /* * Set the bit in the 512-bit hash table that corresponds to the * specified mac address 'mac.' If 'prio' is nonzero, update the * priority hash table instead of the filter hash table. */ static int sf_sethash(struct sf_softc *sc, caddr_t mac, int prio) { uint32_t h; if (mac == NULL) return (EINVAL); h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23; if (prio) { SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF + (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); } else { SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF + (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); } return (0); } #ifdef notdef /* * Set a VLAN tag in the receive filter. */ static int sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan) { if (idx < 0 || idx >> SF_RXFILT_HASH_CNT) return (EINVAL); csr_write_4(sc, SF_RXFILT_HASH_BASE + (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan); return (0); } #endif static int sf_miibus_readreg(device_t dev, int phy, int reg) { struct sf_softc *sc; int i; uint32_t val = 0; sc = device_get_softc(dev); for (i = 0; i < SF_TIMEOUT; i++) { val = csr_read_4(sc, SF_PHY_REG(phy, reg)); if ((val & SF_MII_DATAVALID) != 0) break; } if (i == SF_TIMEOUT) return (0); val &= SF_MII_DATAPORT; if (val == 0xffff) return (0); return (val); } static int sf_miibus_writereg(device_t dev, int phy, int reg, int val) { struct sf_softc *sc; int i; int busy; sc = device_get_softc(dev); csr_write_4(sc, SF_PHY_REG(phy, reg), val); for (i = 0; i < SF_TIMEOUT; i++) { busy = csr_read_4(sc, SF_PHY_REG(phy, reg)); if ((busy & SF_MII_BUSY) == 0) break; } return (0); } static void sf_miibus_statchg(device_t dev) { struct sf_softc *sc; struct mii_data *mii; struct ifnet *ifp; uint32_t val; sc = device_get_softc(dev); mii = device_get_softc(sc->sf_miibus); ifp = sc->sf_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; sc->sf_link = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: case IFM_100_FX: sc->sf_link = 1; break; } } if (sc->sf_link == 0) return; val = csr_read_4(sc, SF_MACCFG_1); val &= ~SF_MACCFG1_FULLDUPLEX; val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { val |= SF_MACCFG1_FULLDUPLEX; csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX); #ifdef notyet /* Configure flow-control bits. */ if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) val |= SF_MACCFG1_RX_FLOWENB; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) val |= SF_MACCFG1_TX_FLOWENB; #endif } else csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX); /* Make sure to reset MAC to take changes effect. */ csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET); DELAY(1000); csr_write_4(sc, SF_MACCFG_1, val); val = csr_read_4(sc, SF_TIMER_CTL); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) val |= SF_TIMER_TIMES_TEN; else val &= ~SF_TIMER_TIMES_TEN; csr_write_4(sc, SF_TIMER_CTL, val); } static void sf_rxfilter(struct sf_softc *sc) { struct ifnet *ifp; int i; struct ifmultiaddr *ifma; uint8_t dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; uint32_t rxfilt; ifp = sc->sf_ifp; /* First zot all the existing filters. */ for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++) sf_setperf(sc, i, dummy); for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t)) csr_write_4(sc, i, 0); rxfilt = csr_read_4(sc, SF_RXFILT); rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD); if ((ifp->if_flags & IFF_BROADCAST) != 0) rxfilt |= SF_RXFILT_BROAD; if ((ifp->if_flags & IFF_ALLMULTI) != 0 || (ifp->if_flags & IFF_PROMISC) != 0) { if ((ifp->if_flags & IFF_PROMISC) != 0) rxfilt |= SF_RXFILT_PROMISC; if ((ifp->if_flags & IFF_ALLMULTI) != 0) rxfilt |= SF_RXFILT_ALLMULTI; goto done; } /* Now program new ones. */ i = 1; if_maddr_rlock(ifp); TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first 15 multicast groups * into the perfect filter. For all others, * use the hash table. */ if (i < SF_RXFILT_PERFECT_CNT) { sf_setperf(sc, i, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); i++; continue; } sf_sethash(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0); } if_maddr_runlock(ifp); done: csr_write_4(sc, SF_RXFILT, rxfilt); } /* * Set media options. */ static int sf_ifmedia_upd(struct ifnet *ifp) { struct sf_softc *sc; int error; sc = ifp->if_softc; SF_LOCK(sc); error = sf_ifmedia_upd_locked(ifp); SF_UNLOCK(sc); return (error); } static int sf_ifmedia_upd_locked(struct ifnet *ifp) { struct sf_softc *sc; struct mii_data *mii; struct mii_softc *miisc; sc = ifp->if_softc; mii = device_get_softc(sc->sf_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); return (mii_mediachg(mii)); } /* * Report current media status. */ static void sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct sf_softc *sc; struct mii_data *mii; sc = ifp->if_softc; SF_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0) { SF_UNLOCK(sc); return; } mii = device_get_softc(sc->sf_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; SF_UNLOCK(sc); } static int sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct sf_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error, mask; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = 0; switch (command) { case SIOCSIFFLAGS: SF_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if ((ifp->if_flags ^ sc->sf_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) sf_rxfilter(sc); } else { if (sc->sf_detach == 0) sf_init_locked(sc); } } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) sf_stop(sc); } sc->sf_if_flags = ifp->if_flags; SF_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: SF_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) sf_rxfilter(sc); SF_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->sf_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if ((mask & IFCAP_POLLING) != 0) { if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { error = ether_poll_register(sf_poll, ifp); if (error != 0) break; SF_LOCK(sc); /* Disable interrupts. */ csr_write_4(sc, SF_IMR, 0); ifp->if_capenable |= IFCAP_POLLING; SF_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ SF_LOCK(sc); csr_write_4(sc, SF_IMR, SF_INTRS); ifp->if_capenable &= ~IFCAP_POLLING; SF_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ if ((mask & IFCAP_TXCSUM) != 0) { if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) { SF_LOCK(sc); ifp->if_capenable ^= IFCAP_TXCSUM; if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) { ifp->if_hwassist |= SF_CSUM_FEATURES; SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); } else { ifp->if_hwassist &= ~SF_CSUM_FEATURES; SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); } SF_UNLOCK(sc); } } if ((mask & IFCAP_RXCSUM) != 0) { if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) { SF_LOCK(sc); ifp->if_capenable ^= IFCAP_RXCSUM; if ((IFCAP_RXCSUM & ifp->if_capenable) != 0) SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); else SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); SF_UNLOCK(sc); } } break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void sf_reset(struct sf_softc *sc) { int i; csr_write_4(sc, SF_GEN_ETH_CTL, 0); SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); DELAY(1000); SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET); for (i = 0; i < SF_TIMEOUT; i++) { DELAY(10); if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET)) break; } if (i == SF_TIMEOUT) device_printf(sc->sf_dev, "reset never completed!\n"); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); } /* * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. * We also check the subsystem ID so that we can identify exactly which * NIC has been found, if possible. */ static int sf_probe(device_t dev) { struct sf_type *t; uint16_t vid; uint16_t did; uint16_t sdid; int i; vid = pci_get_vendor(dev); did = pci_get_device(dev); sdid = pci_get_subdevice(dev); t = sf_devs; for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) { if (vid == t->sf_vid && did == t->sf_did) { if (sdid == t->sf_sdid) { device_set_desc(dev, t->sf_sname); return (BUS_PROBE_DEFAULT); } } } if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) { /* unkown subdevice */ device_set_desc(dev, sf_devs[0].sf_name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int sf_attach(device_t dev) { int i; struct sf_softc *sc; struct ifnet *ifp; uint32_t reg; int rid, error = 0; uint8_t eaddr[ETHER_ADDR_LEN]; sc = device_get_softc(dev); sc->sf_dev = dev; mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0); /* * Map control/status registers. */ pci_enable_busmaster(dev); /* * Prefer memory space register mapping over I/O space as the * hardware requires lots of register access to get various * producer/consumer index during Tx/Rx operation. However this * requires large memory space(512K) to map the entire register * space. */ sc->sf_rid = PCIR_BAR(0); sc->sf_restype = SYS_RES_MEMORY; sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid, RF_ACTIVE); if (sc->sf_res == NULL) { reg = pci_read_config(dev, PCIR_BAR(0), 4); if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64) sc->sf_rid = PCIR_BAR(2); else sc->sf_rid = PCIR_BAR(1); sc->sf_restype = SYS_RES_IOPORT; sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid, RF_ACTIVE); if (sc->sf_res == NULL) { device_printf(dev, "couldn't allocate resources\n"); mtx_destroy(&sc->sf_mtx); return (ENXIO); } } if (bootverbose) device_printf(dev, "using %s space register mapping\n", sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O"); reg = pci_read_config(dev, PCIR_CACHELNSZ, 1); if (reg == 0) { /* * If cache line size is 0, MWI is not used at all, so set * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32 * and 64. */ reg = 16; device_printf(dev, "setting PCI cache line size to %u\n", reg); pci_write_config(dev, PCIR_CACHELNSZ, reg, 1); } else { if (bootverbose) device_printf(dev, "PCI cache line size : %u\n", reg); } /* Enable MWI. */ reg = pci_read_config(dev, PCIR_COMMAND, 2); reg |= PCIM_CMD_MWRICEN; pci_write_config(dev, PCIR_COMMAND, reg, 2); /* Allocate interrupt. */ rid = 0; sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sf_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sf_sysctl_stats, "I", "Statistics"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I", "sf interrupt moderation"); /* Pull in device tunables. */ sc->sf_int_mod = SF_IM_DEFAULT; error = resource_int_value(device_get_name(dev), device_get_unit(dev), "int_mod", &sc->sf_int_mod); if (error == 0) { if (sc->sf_int_mod < SF_IM_MIN || sc->sf_int_mod > SF_IM_MAX) { device_printf(dev, "int_mod value out of range; " "using default: %d\n", SF_IM_DEFAULT); sc->sf_int_mod = SF_IM_DEFAULT; } } /* Reset the adapter. */ sf_reset(sc); /* * Get station address from the EEPROM. */ for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i); /* Allocate DMA resources. */ if (sf_dma_alloc(sc) != 0) { error = ENOSPC; goto fail; } sc->sf_txthresh = SF_MIN_TX_THRESHOLD; ifp = sc->sf_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); error = ENOSPC; goto fail; } /* Do MII setup. */ error = mii_attach(dev, &sc->sf_miibus, ifp, sf_ifmedia_upd, sf_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sf_ioctl; ifp->if_start = sf_start; ifp->if_init = sf_init; IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); /* * With the help of firmware, AIC-6915 supports * Tx/Rx TCP/UDP checksum offload. */ ifp->if_hwassist = SF_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* VLAN capability setup. */ ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, sf_intr, sc, &sc->sf_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) sf_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int sf_detach(device_t dev) { struct sf_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->sf_ifp; #ifdef DEVICE_POLLING if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { SF_LOCK(sc); sc->sf_detach = 1; sf_stop(sc); SF_UNLOCK(sc); callout_drain(&sc->sf_co); if (ifp != NULL) ether_ifdetach(ifp); } if (sc->sf_miibus) { device_delete_child(dev, sc->sf_miibus); sc->sf_miibus = NULL; } bus_generic_detach(dev); if (sc->sf_intrhand != NULL) bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); if (sc->sf_irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); if (sc->sf_res != NULL) bus_release_resource(dev, sc->sf_restype, sc->sf_rid, sc->sf_res); sf_dma_free(sc); if (ifp != NULL) if_free(ifp); mtx_destroy(&sc->sf_mtx); return (0); } struct sf_dmamap_arg { bus_addr_t sf_busaddr; }; static void sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sf_dmamap_arg *ctx; if (error != 0) return; ctx = arg; ctx->sf_busaddr = segs[0].ds_addr; } static int sf_dma_alloc(struct sf_softc *sc) { struct sf_dmamap_arg ctx; struct sf_txdesc *txd; struct sf_rxdesc *rxd; bus_addr_t lowaddr; bus_addr_t rx_ring_end, rx_cring_end; bus_addr_t tx_ring_end, tx_cring_end; int error, i; lowaddr = BUS_SPACE_MAXADDR; again: /* Create parent DMA tag. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->sf_dev), /* parent */ 1, 0, /* alignment, boundary */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_parent_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create parent DMA tag\n"); goto fail; } /* Create tag for Tx ring. */ error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ SF_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SF_TX_DLIST_SIZE, /* maxsize */ 1, /* nsegments */ SF_TX_DLIST_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_tx_ring_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n"); goto fail; } /* Create tag for Tx completion ring. */ error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ SF_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SF_TX_CLIST_SIZE, /* maxsize */ 1, /* nsegments */ SF_TX_CLIST_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_tx_cring_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create Tx completion ring DMA tag\n"); goto fail; } /* Create tag for Rx ring. */ error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ SF_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SF_RX_DLIST_SIZE, /* maxsize */ 1, /* nsegments */ SF_RX_DLIST_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_rx_ring_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create Rx ring DMA tag\n"); goto fail; } /* Create tag for Rx completion ring. */ error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ SF_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SF_RX_CLIST_SIZE, /* maxsize */ 1, /* nsegments */ SF_RX_CLIST_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_rx_cring_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create Rx completion ring DMA tag\n"); goto fail; } /* Create tag for Tx buffers. */ error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * SF_MAXTXSEGS, /* maxsize */ SF_MAXTXSEGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_tx_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create Tx DMA tag\n"); goto fail; } /* Create tag for Rx buffers. */ error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ SF_RX_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_rx_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create Rx DMA tag\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map for Tx ring. */ error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag, (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map); if (error != 0) { device_printf(sc->sf_dev, "failed to allocate DMA'able memory for Tx ring\n"); goto fail; } ctx.sf_busaddr = 0; error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag, sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring, SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); if (error != 0 || ctx.sf_busaddr == 0) { device_printf(sc->sf_dev, "failed to load DMA'able memory for Tx ring\n"); goto fail; } sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr; /* * Allocate DMA'able memory and load the DMA map for Tx completion ring. */ error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag, (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map); if (error != 0) { device_printf(sc->sf_dev, "failed to allocate DMA'able memory for " "Tx completion ring\n"); goto fail; } ctx.sf_busaddr = 0; error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag, sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring, SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); if (error != 0 || ctx.sf_busaddr == 0) { device_printf(sc->sf_dev, "failed to load DMA'able memory for Tx completion ring\n"); goto fail; } sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr; /* Allocate DMA'able memory and load the DMA map for Rx ring. */ error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag, (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map); if (error != 0) { device_printf(sc->sf_dev, "failed to allocate DMA'able memory for Rx ring\n"); goto fail; } ctx.sf_busaddr = 0; error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag, sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring, SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); if (error != 0 || ctx.sf_busaddr == 0) { device_printf(sc->sf_dev, "failed to load DMA'able memory for Rx ring\n"); goto fail; } sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr; /* * Allocate DMA'able memory and load the DMA map for Rx completion ring. */ error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag, (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map); if (error != 0) { device_printf(sc->sf_dev, "failed to allocate DMA'able memory for " "Rx completion ring\n"); goto fail; } ctx.sf_busaddr = 0; error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag, sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring, SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); if (error != 0 || ctx.sf_busaddr == 0) { device_printf(sc->sf_dev, "failed to load DMA'able memory for Rx completion ring\n"); goto fail; } sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr; /* * Tx desciptor ring and Tx completion ring should be addressed in * the same 4GB space. The same rule applys to Rx ring and Rx * completion ring. Unfortunately there is no way to specify this * boundary restriction with bus_dma(9). So just try to allocate * without the restriction and check the restriction was satisfied. * If not, fall back to 32bit dma addressing mode which always * guarantees the restriction. */ tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE; tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE; rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE; rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE; if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) != SF_ADDR_HI(tx_cring_end)) || (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) != SF_ADDR_HI(tx_ring_end)) || (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) != SF_ADDR_HI(rx_cring_end)) || (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) != SF_ADDR_HI(rx_ring_end))) { device_printf(sc->sf_dev, "switching to 32bit DMA mode\n"); sf_dma_free(sc); /* Limit DMA address space to 32bit and try again. */ lowaddr = BUS_SPACE_MAXADDR_32BIT; goto again; } /* Create DMA maps for Tx buffers. */ for (i = 0; i < SF_TX_DLIST_CNT; i++) { txd = &sc->sf_cdata.sf_txdesc[i]; txd->tx_m = NULL; txd->ndesc = 0; txd->tx_dmamap = NULL; error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc->sf_dev, "failed to create Tx dmamap\n"); goto fail; } } /* Create DMA maps for Rx buffers. */ if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, &sc->sf_cdata.sf_rx_sparemap)) != 0) { device_printf(sc->sf_dev, "failed to create spare Rx dmamap\n"); goto fail; } for (i = 0; i < SF_RX_DLIST_CNT; i++) { rxd = &sc->sf_cdata.sf_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_dmamap = NULL; error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, &rxd->rx_dmamap); if (error != 0) { device_printf(sc->sf_dev, "failed to create Rx dmamap\n"); goto fail; } } fail: return (error); } static void sf_dma_free(struct sf_softc *sc) { struct sf_txdesc *txd; struct sf_rxdesc *rxd; int i; /* Tx ring. */ if (sc->sf_cdata.sf_tx_ring_tag) { if (sc->sf_cdata.sf_tx_ring_map) bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag, sc->sf_cdata.sf_tx_ring_map); if (sc->sf_cdata.sf_tx_ring_map && sc->sf_rdata.sf_tx_ring) bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag, sc->sf_rdata.sf_tx_ring, sc->sf_cdata.sf_tx_ring_map); sc->sf_rdata.sf_tx_ring = NULL; sc->sf_cdata.sf_tx_ring_map = NULL; bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag); sc->sf_cdata.sf_tx_ring_tag = NULL; } /* Tx completion ring. */ if (sc->sf_cdata.sf_tx_cring_tag) { if (sc->sf_cdata.sf_tx_cring_map) bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag, sc->sf_cdata.sf_tx_cring_map); if (sc->sf_cdata.sf_tx_cring_map && sc->sf_rdata.sf_tx_cring) bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag, sc->sf_rdata.sf_tx_cring, sc->sf_cdata.sf_tx_cring_map); sc->sf_rdata.sf_tx_cring = NULL; sc->sf_cdata.sf_tx_cring_map = NULL; bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag); sc->sf_cdata.sf_tx_cring_tag = NULL; } /* Rx ring. */ if (sc->sf_cdata.sf_rx_ring_tag) { if (sc->sf_cdata.sf_rx_ring_map) bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag, sc->sf_cdata.sf_rx_ring_map); if (sc->sf_cdata.sf_rx_ring_map && sc->sf_rdata.sf_rx_ring) bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag, sc->sf_rdata.sf_rx_ring, sc->sf_cdata.sf_rx_ring_map); sc->sf_rdata.sf_rx_ring = NULL; sc->sf_cdata.sf_rx_ring_map = NULL; bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag); sc->sf_cdata.sf_rx_ring_tag = NULL; } /* Rx completion ring. */ if (sc->sf_cdata.sf_rx_cring_tag) { if (sc->sf_cdata.sf_rx_cring_map) bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag, sc->sf_cdata.sf_rx_cring_map); if (sc->sf_cdata.sf_rx_cring_map && sc->sf_rdata.sf_rx_cring) bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag, sc->sf_rdata.sf_rx_cring, sc->sf_cdata.sf_rx_cring_map); sc->sf_rdata.sf_rx_cring = NULL; sc->sf_cdata.sf_rx_cring_map = NULL; bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag); sc->sf_cdata.sf_rx_cring_tag = NULL; } /* Tx buffers. */ if (sc->sf_cdata.sf_tx_tag) { for (i = 0; i < SF_TX_DLIST_CNT; i++) { txd = &sc->sf_cdata.sf_txdesc[i]; if (txd->tx_dmamap) { bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag, txd->tx_dmamap); txd->tx_dmamap = NULL; } } bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag); sc->sf_cdata.sf_tx_tag = NULL; } /* Rx buffers. */ if (sc->sf_cdata.sf_rx_tag) { for (i = 0; i < SF_RX_DLIST_CNT; i++) { rxd = &sc->sf_cdata.sf_rxdesc[i]; if (rxd->rx_dmamap) { bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap); rxd->rx_dmamap = NULL; } } if (sc->sf_cdata.sf_rx_sparemap) { bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, sc->sf_cdata.sf_rx_sparemap); sc->sf_cdata.sf_rx_sparemap = 0; } bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag); sc->sf_cdata.sf_rx_tag = NULL; } if (sc->sf_cdata.sf_parent_tag) { bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag); sc->sf_cdata.sf_parent_tag = NULL; } } static int sf_init_rx_ring(struct sf_softc *sc) { struct sf_ring_data *rd; int i; sc->sf_cdata.sf_rxc_cons = 0; rd = &sc->sf_rdata; bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE); bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE); for (i = 0; i < SF_RX_DLIST_CNT; i++) { if (sf_newbuf(sc, i) != 0) return (ENOBUFS); } bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, sc->sf_cdata.sf_rx_cring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, sc->sf_cdata.sf_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void sf_init_tx_ring(struct sf_softc *sc) { struct sf_ring_data *rd; int i; sc->sf_cdata.sf_tx_prod = 0; sc->sf_cdata.sf_tx_cnt = 0; sc->sf_cdata.sf_txc_cons = 0; rd = &sc->sf_rdata; bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE); bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE); for (i = 0; i < SF_TX_DLIST_CNT; i++) { rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID); sc->sf_cdata.sf_txdesc[i].tx_m = NULL; sc->sf_cdata.sf_txdesc[i].ndesc = 0; } rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END); bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, sc->sf_cdata.sf_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, sc->sf_cdata.sf_tx_cring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int sf_newbuf(struct sf_softc *sc, int idx) { struct sf_rx_rdesc *desc; struct sf_rxdesc *rxd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, sizeof(uint32_t)); if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag, sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc->sf_cdata.sf_rxdesc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap; sc->sf_cdata.sf_rx_sparemap = map; bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; desc = &sc->sf_rdata.sf_rx_ring[idx]; desc->sf_addr = htole64(segs[0].ds_addr); return (0); } #ifndef __NO_STRICT_ALIGNMENT static __inline void sf_fixup_rx(struct mbuf *m) { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - 1; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= ETHER_ALIGN; } #endif /* * The starfire is programmed to use 'normal' mode for packet reception, * which means we use the consumer/producer model for both the buffer * descriptor queue and the completion descriptor queue. The only problem * with this is that it involves a lot of register accesses: we have to * read the RX completion consumer and producer indexes and the RX buffer * producer index, plus the RX completion consumer and RX buffer producer * indexes have to be updated. It would have been easier if Adaptec had * put each index in a separate register, especially given that the damn * NIC has a 512K register space. * * In spite of all the lovely features that Adaptec crammed into the 6915, * it is marred by one truly stupid design flaw, which is that receive * buffer addresses must be aligned on a longword boundary. This forces * the packet payload to be unaligned, which is suboptimal on the x86 and * completely unuseable on the Alpha. Our only recourse is to copy received * packets into properly aligned buffers before handing them off. */ static int sf_rxeof(struct sf_softc *sc) { struct mbuf *m; struct ifnet *ifp; struct sf_rxdesc *rxd; struct sf_rx_rcdesc *cur_cmp; int cons, eidx, prog, rx_npkts; uint32_t status, status2; SF_LOCK_ASSERT(sc); ifp = sc->sf_ifp; rx_npkts = 0; bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, sc->sf_cdata.sf_rx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, sc->sf_cdata.sf_rx_cring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * To reduce register access, directly read Receive completion * queue entry. */ eidx = 0; prog = 0; for (cons = sc->sf_cdata.sf_rxc_cons; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; SF_INC(cons, SF_RX_CLIST_CNT)) { cur_cmp = &sc->sf_rdata.sf_rx_cring[cons]; status = le32toh(cur_cmp->sf_rx_status1); if (status == 0) break; #ifdef DEVICE_POLLING if ((ifp->if_capenable & IFCAP_POLLING) != 0) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif prog++; eidx = (status & SF_RX_CMPDESC_EIDX) >> 16; rxd = &sc->sf_cdata.sf_rxdesc[eidx]; m = rxd->rx_m; /* * Note, if_ipackets and if_ierrors counters * are handled in sf_stats_update(). */ if ((status & SF_RXSTAT1_OK) == 0) { cur_cmp->sf_rx_status1 = 0; continue; } if (sf_newbuf(sc, eidx) != 0) { ifp->if_iqdrops++; cur_cmp->sf_rx_status1 = 0; continue; } /* AIC-6915 supports TCP/UDP checksum offload. */ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { status2 = le32toh(cur_cmp->sf_rx_status2); /* * Sometimes AIC-6915 generates an interrupt to * warn RxGFP stall with bad checksum bit set * in status word. I'm not sure what conditioan * triggers it but recevied packet's checksum * was correct even though AIC-6915 does not * agree on this. This may be an indication of * firmware bug. To fix the issue, do not rely * on bad checksum bit in status word and let * upper layer verify integrity of received * frame. * Another nice feature of AIC-6915 is hardware * assistance of checksum calculation by * providing partial checksum value for received * frame. The partial checksum value can be used * to accelerate checksum computation for * fragmented TCP/UDP packets. Upper network * stack already takes advantage of the partial * checksum value in IP reassembly stage. But * I'm not sure the correctness of the partial * hardware checksum assistance as frequent * RxGFP stalls are seen on non-fragmented * frames. Due to the nature of the complexity * of checksum computation code in firmware it's * possible to see another bug in RxGFP so * ignore checksum assistance for fragmented * frames. This can be changed in future. */ if ((status2 & SF_RXSTAT2_FRAG) == 0) { if ((status2 & (SF_RXSTAT2_TCP | SF_RXSTAT2_UDP)) != 0) { if ((status2 & SF_RXSTAT2_CSUM_OK)) { m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } #ifdef SF_PARTIAL_CSUM_SUPPORT else if ((status2 & SF_RXSTAT2_FRAG) != 0) { if ((status2 & (SF_RXSTAT2_TCP | SF_RXSTAT2_UDP)) != 0) { if ((status2 & SF_RXSTAT2_PCSUM_OK)) { m->m_pkthdr.csum_flags = CSUM_DATA_VALID; m->m_pkthdr.csum_data = (status & SF_RX_CMPDESC_CSUM2); } } } #endif } m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN; #ifndef __NO_STRICT_ALIGNMENT sf_fixup_rx(m); #endif m->m_pkthdr.rcvif = ifp; SF_UNLOCK(sc); (*ifp->if_input)(ifp, m); SF_LOCK(sc); rx_npkts++; /* Clear completion status. */ cur_cmp->sf_rx_status1 = 0; } if (prog > 0) { sc->sf_cdata.sf_rxc_cons = cons; bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, sc->sf_cdata.sf_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, sc->sf_cdata.sf_rx_cring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Update Rx completion Q1 consumer index. */ csr_write_4(sc, SF_CQ_CONSIDX, (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) | (cons & SF_CQ_CONSIDX_RXQ1)); /* Update Rx descriptor Q1 ptr. */ csr_write_4(sc, SF_RXDQ_PTR_Q1, (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) | (eidx & SF_RXDQ_PRODIDX)); } return (rx_npkts); } /* * Read the transmit status from the completion queue and release * mbufs. Note that the buffer descriptor index in the completion * descriptor is an offset from the start of the transmit buffer * descriptor list in bytes. This is important because the manual * gives the impression that it should match the producer/consumer * index, which is the offset in 8 byte blocks. */ static void sf_txeof(struct sf_softc *sc) { struct sf_txdesc *txd; struct sf_tx_rcdesc *cur_cmp; struct ifnet *ifp; uint32_t status; int cons, idx, prod; SF_LOCK_ASSERT(sc); ifp = sc->sf_ifp; bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, sc->sf_cdata.sf_tx_cring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cons = sc->sf_cdata.sf_txc_cons; prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16; if (prod == cons) return; for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) { cur_cmp = &sc->sf_rdata.sf_tx_cring[cons]; status = le32toh(cur_cmp->sf_tx_status1); if (status == 0) break; switch (status & SF_TX_CMPDESC_TYPE) { case SF_TXCMPTYPE_TX: /* Tx complete entry. */ break; case SF_TXCMPTYPE_DMA: /* DMA complete entry. */ idx = status & SF_TX_CMPDESC_IDX; idx = idx / sizeof(struct sf_tx_rdesc); /* * We don't need to check Tx status here. * SF_ISR_TX_LOFIFO intr would handle this. * Note, if_opackets, if_collisions and if_oerrors * counters are handled in sf_stats_update(). */ txd = &sc->sf_cdata.sf_txdesc[idx]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } sc->sf_cdata.sf_tx_cnt -= txd->ndesc; KASSERT(sc->sf_cdata.sf_tx_cnt >= 0, ("%s: Active Tx desc counter was garbled\n", __func__)); txd->ndesc = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; break; default: /* It should not happen. */ device_printf(sc->sf_dev, "unknown Tx completion type : 0x%08x : %d : %d\n", status, cons, prod); break; } cur_cmp->sf_tx_status1 = 0; } sc->sf_cdata.sf_txc_cons = cons; bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, sc->sf_cdata.sf_tx_cring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (sc->sf_cdata.sf_tx_cnt == 0) sc->sf_watchdog_timer = 0; /* Update Tx completion consumer index. */ csr_write_4(sc, SF_CQ_CONSIDX, (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) | ((cons << 16) & 0xffff0000)); } static void sf_txthresh_adjust(struct sf_softc *sc) { uint32_t txfctl; device_printf(sc->sf_dev, "Tx underrun -- "); if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) { txfctl = csr_read_4(sc, SF_TX_FRAMCTL); /* Increase Tx threshold 256 bytes. */ sc->sf_txthresh += 16; if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD) sc->sf_txthresh = SF_MAX_TX_THRESHOLD; txfctl &= ~SF_TXFRMCTL_TXTHRESH; txfctl |= sc->sf_txthresh; printf("increasing Tx threshold to %d bytes\n", sc->sf_txthresh * SF_TX_THRESHOLD_UNIT); csr_write_4(sc, SF_TX_FRAMCTL, txfctl); } else printf("\n"); } #ifdef DEVICE_POLLING static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct sf_softc *sc; uint32_t status; int rx_npkts; sc = ifp->if_softc; rx_npkts = 0; SF_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { SF_UNLOCK(sc); return (rx_npkts); } sc->rxcycles = count; rx_npkts = sf_rxeof(sc); sf_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sf_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { /* Reading the ISR register clears all interrrupts. */ status = csr_read_4(sc, SF_ISR); if ((status & SF_ISR_ABNORMALINTR) != 0) { if ((status & SF_ISR_STATSOFLOW) != 0) sf_stats_update(sc); else if ((status & SF_ISR_TX_LOFIFO) != 0) sf_txthresh_adjust(sc); else if ((status & SF_ISR_DMAERR) != 0) { device_printf(sc->sf_dev, "DMA error, resetting\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sf_init_locked(sc); SF_UNLOCK(sc); return (rx_npkts); } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { sc->sf_statistics.sf_tx_gfp_stall++; #ifdef SF_GFP_DEBUG device_printf(sc->sf_dev, "TxGFP is not responding!\n"); #endif } else if ((status & SF_ISR_RXGFP_NORESP) != 0) { sc->sf_statistics.sf_rx_gfp_stall++; #ifdef SF_GFP_DEBUG device_printf(sc->sf_dev, "RxGFP is not responding!\n"); #endif } } } SF_UNLOCK(sc); return (rx_npkts); } #endif /* DEVICE_POLLING */ static void sf_intr(void *arg) { struct sf_softc *sc; struct ifnet *ifp; uint32_t status; int cnt; sc = (struct sf_softc *)arg; SF_LOCK(sc); if (sc->sf_suspended != 0) goto done_locked; /* Reading the ISR register clears all interrrupts. */ status = csr_read_4(sc, SF_ISR); if (status == 0 || status == 0xffffffff || (status & SF_ISR_PCIINT_ASSERTED) == 0) goto done_locked; ifp = sc->sf_ifp; #ifdef DEVICE_POLLING if ((ifp->if_capenable & IFCAP_POLLING) != 0) goto done_locked; #endif /* Disable interrupts. */ csr_write_4(sc, SF_IMR, 0x00000000); for (cnt = 32; (status & SF_INTRS) != 0;) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; if ((status & SF_ISR_RXDQ1_DMADONE) != 0) sf_rxeof(sc); if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE | SF_ISR_TX_QUEUEDONE)) != 0) sf_txeof(sc); if ((status & SF_ISR_ABNORMALINTR) != 0) { if ((status & SF_ISR_STATSOFLOW) != 0) sf_stats_update(sc); else if ((status & SF_ISR_TX_LOFIFO) != 0) sf_txthresh_adjust(sc); else if ((status & SF_ISR_DMAERR) != 0) { device_printf(sc->sf_dev, "DMA error, resetting\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sf_init_locked(sc); SF_UNLOCK(sc); return; } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { sc->sf_statistics.sf_tx_gfp_stall++; #ifdef SF_GFP_DEBUG device_printf(sc->sf_dev, "TxGFP is not responding!\n"); #endif } else if ((status & SF_ISR_RXGFP_NORESP) != 0) { sc->sf_statistics.sf_rx_gfp_stall++; #ifdef SF_GFP_DEBUG device_printf(sc->sf_dev, "RxGFP is not responding!\n"); #endif } } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sf_start_locked(ifp); if (--cnt <= 0) break; /* Reading the ISR register clears all interrrupts. */ status = csr_read_4(sc, SF_ISR); } if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { /* Re-enable interrupts. */ csr_write_4(sc, SF_IMR, SF_INTRS); } done_locked: SF_UNLOCK(sc); } static void sf_download_fw(struct sf_softc *sc) { uint32_t gfpinst; int i, ndx; uint8_t *p; /* * A FP instruction is composed of 48bits so we have to * write it with two parts. */ p = txfwdata; ndx = 0; for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) { gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst); gfpinst = p[0] << 8 | p[1]; csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); p += SF_GFP_INST_BYTES; ndx += 2; } if (bootverbose) device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i); p = rxfwdata; ndx = 0; for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) { gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst); gfpinst = p[0] << 8 | p[1]; csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); p += SF_GFP_INST_BYTES; ndx += 2; } if (bootverbose) device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i); } static void sf_init(void *xsc) { struct sf_softc *sc; sc = (struct sf_softc *)xsc; SF_LOCK(sc); sf_init_locked(sc); SF_UNLOCK(sc); } static void sf_init_locked(struct sf_softc *sc) { struct ifnet *ifp; - struct mii_data *mii; uint8_t eaddr[ETHER_ADDR_LEN]; bus_addr_t addr; int i; SF_LOCK_ASSERT(sc); ifp = sc->sf_ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; - mii = device_get_softc(sc->sf_miibus); sf_stop(sc); /* Reset the hardware to a known state. */ sf_reset(sc); /* Init all the receive filter registers */ for (i = SF_RXFILT_PERFECT_BASE; i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t)) csr_write_4(sc, i, 0); /* Empty stats counter registers. */ for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) csr_write_4(sc, i, 0); /* Init our MAC address. */ bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr)); csr_write_4(sc, SF_PAR0, eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]); sf_setperf(sc, 0, eaddr); if (sf_init_rx_ring(sc) == ENOBUFS) { device_printf(sc->sf_dev, "initialization failed: no memory for rx buffers\n"); sf_stop(sc); return; } sf_init_tx_ring(sc); /* * 16 perfect address filtering. * Hash only multicast destination address, Accept matching * frames regardless of VLAN ID. */ csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN); /* * Set Rx filter. */ sf_rxfilter(sc); /* Init the completion queue indexes. */ csr_write_4(sc, SF_CQ_CONSIDX, 0); csr_write_4(sc, SF_CQ_PRODIDX, 0); /* Init the RX completion queue. */ addr = sc->sf_rdata.sf_rx_cring_paddr; csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr)); csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR); if (SF_ADDR_HI(addr) != 0) SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT); /* Set RX completion queue type 2. */ SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2); csr_write_4(sc, SF_RXCQ_CTL_2, 0); /* * Init RX DMA control. * default RxHighPriority Threshold, * default RxBurstSize, 128bytes. */ SF_SETBIT(sc, SF_RXDMA_CTL, SF_RXDMA_REPORTBADPKTS | (SF_RXDMA_HIGHPRIO_THRESH << 8) | SF_RXDMA_BURST); /* Init the RX buffer descriptor queue. */ addr = sc->sf_rdata.sf_rx_ring_paddr; csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr)); csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr)); /* Set RX queue buffer length. */ csr_write_4(sc, SF_RXDQ_CTL_1, ((MCLBYTES - sizeof(uint32_t)) << 16) | SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE); if (SF_ADDR_HI(addr) != 0) SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR); csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1); csr_write_4(sc, SF_RXDQ_CTL_2, 0); /* Init the TX completion queue */ addr = sc->sf_rdata.sf_tx_cring_paddr; csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR); if (SF_ADDR_HI(addr) != 0) SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT); /* Init the TX buffer descriptor queue. */ addr = sc->sf_rdata.sf_tx_ring_paddr; csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr)); csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr)); csr_write_4(sc, SF_TX_FRAMCTL, SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh); csr_write_4(sc, SF_TXDQ_CTL, SF_TXDMA_HIPRIO_THRESH << 24 | SF_TXSKIPLEN_0BYTES << 16 | SF_TXDDMA_BURST << 8 | SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT); if (SF_ADDR_HI(addr) != 0) SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR); /* Set VLAN Type register. */ csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN); /* Set TxPause Timer. */ csr_write_4(sc, SF_TXPAUSETIMER, 0xffff); /* Enable autopadding of short TX frames. */ SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD); SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD); /* Make sure to reset MAC to take changes effect. */ SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); DELAY(1000); SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); /* Enable PCI bus master. */ SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN); /* Load StarFire firmware. */ sf_download_fw(sc); /* Intialize interrupt moderation. */ csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN | (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL)); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if ((ifp->if_capenable & IFCAP_POLLING) != 0) csr_write_4(sc, SF_IMR, 0x00000000); else #endif /* Enable interrupts. */ csr_write_4(sc, SF_IMR, SF_INTRS); SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB); /* Enable the RX and TX engines. */ csr_write_4(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB | SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB); if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); else SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); else SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->sf_link = 0; sf_ifmedia_upd_locked(ifp); callout_reset(&sc->sf_co, hz, sf_tick, sc); } static int sf_encap(struct sf_softc *sc, struct mbuf **m_head) { struct sf_txdesc *txd; struct sf_tx_rdesc *desc; struct mbuf *m; bus_dmamap_t map; bus_dma_segment_t txsegs[SF_MAXTXSEGS]; int error, i, nsegs, prod, si; int avail, nskip; SF_LOCK_ASSERT(sc); m = *m_head; prod = sc->sf_cdata.sf_tx_prod; txd = &sc->sf_cdata.sf_txdesc[prod]; map = txd->tx_dmamap; error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check number of available descriptors. */ avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt; if (avail < nsegs) { bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); return (ENOBUFS); } nskip = 0; if (prod + nsegs >= SF_TX_DLIST_CNT) { nskip = SF_TX_DLIST_CNT - prod - 1; if (avail < nsegs + nskip) { bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); return (ENOBUFS); } } bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE); si = prod; for (i = 0; i < nsegs; i++) { desc = &sc->sf_rdata.sf_tx_ring[prod]; desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID | (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN)); desc->sf_tx_reserved = 0; desc->sf_addr = htole64(txsegs[i].ds_addr); if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) { /* Queue wraps! */ desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END); prod = 0; } else SF_INC(prod, SF_TX_DLIST_CNT); } /* Update producer index. */ sc->sf_cdata.sf_tx_prod = prod; sc->sf_cdata.sf_tx_cnt += nsegs + nskip; desc = &sc->sf_rdata.sf_tx_ring[si]; /* Check TDP/UDP checksum offload request. */ if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0) desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP); desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16)); txd->tx_dmamap = map; txd->tx_m = m; txd->ndesc = nsegs + nskip; return (0); } static void sf_start(struct ifnet *ifp) { struct sf_softc *sc; sc = ifp->if_softc; SF_LOCK(sc); sf_start_locked(ifp); SF_UNLOCK(sc); } static void sf_start_locked(struct ifnet *ifp) { struct sf_softc *sc; struct mbuf *m_head; int enq; sc = ifp->if_softc; SF_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || sc->sf_link == 0) return; /* * Since we don't know when descriptor wrap occurrs in advance * limit available number of active Tx descriptor counter to be * higher than maximum number of DMA segments allowed in driver. */ for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (sf_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } if (enq > 0) { bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, sc->sf_cdata.sf_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Kick transmit. */ csr_write_4(sc, SF_TXDQ_PRODIDX, sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8)); /* Set a timeout in case the chip goes out to lunch. */ sc->sf_watchdog_timer = 5; } } static void sf_stop(struct sf_softc *sc) { struct sf_txdesc *txd; struct sf_rxdesc *rxd; struct ifnet *ifp; int i; SF_LOCK_ASSERT(sc); ifp = sc->sf_ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sf_link = 0; callout_stop(&sc->sf_co); sc->sf_watchdog_timer = 0; /* Reading the ISR register clears all interrrupts. */ csr_read_4(sc, SF_ISR); /* Disable further interrupts. */ csr_write_4(sc, SF_IMR, 0); /* Disable Tx/Rx egine. */ csr_write_4(sc, SF_GEN_ETH_CTL, 0); /* Give hardware chance to drain active DMA cycles. */ DELAY(1000); csr_write_4(sc, SF_CQ_CONSIDX, 0); csr_write_4(sc, SF_CQ_PRODIDX, 0); csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0); csr_write_4(sc, SF_RXDQ_CTL_1, 0); csr_write_4(sc, SF_RXDQ_PTR_Q1, 0); csr_write_4(sc, SF_TXCQ_CTL, 0); csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); csr_write_4(sc, SF_TXDQ_CTL, 0); /* * Free RX and TX mbufs still in the queues. */ for (i = 0; i < SF_RX_DLIST_CNT; i++) { rxd = &sc->sf_cdata.sf_rxdesc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } for (i = 0; i < SF_TX_DLIST_CNT; i++) { txd = &sc->sf_cdata.sf_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; txd->ndesc = 0; } } } static void sf_tick(void *xsc) { struct sf_softc *sc; struct mii_data *mii; sc = xsc; SF_LOCK_ASSERT(sc); mii = device_get_softc(sc->sf_miibus); mii_tick(mii); sf_stats_update(sc); sf_watchdog(sc); callout_reset(&sc->sf_co, hz, sf_tick, sc); } /* * Note: it is important that this function not be interrupted. We * use a two-stage register access scheme: if we are interrupted in * between setting the indirect address register and reading from the * indirect data register, the contents of the address register could * be changed out from under us. */ static void sf_stats_update(struct sf_softc *sc) { struct ifnet *ifp; struct sf_stats now, *stats, *nstats; int i; SF_LOCK_ASSERT(sc); ifp = sc->sf_ifp; stats = &now; stats->sf_tx_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES); stats->sf_tx_single_colls = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL); stats->sf_tx_multi_colls = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL); stats->sf_tx_crcerrs = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS); stats->sf_tx_bytes = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES); stats->sf_tx_deferred = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED); stats->sf_tx_late_colls = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL); stats->sf_tx_pause_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE); stats->sf_tx_control_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME); stats->sf_tx_excess_colls = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL); stats->sf_tx_excess_defer = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF); stats->sf_tx_mcast_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI); stats->sf_tx_bcast_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST); stats->sf_tx_frames_lost = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST); stats->sf_rx_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES); stats->sf_rx_crcerrs = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS); stats->sf_rx_alignerrs = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS); stats->sf_rx_bytes = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES); stats->sf_rx_pause_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE); stats->sf_rx_control_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME); stats->sf_rx_unsup_control_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME); stats->sf_rx_giants = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS); stats->sf_rx_runts = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS); stats->sf_rx_jabbererrs = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER); stats->sf_rx_fragments = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS); stats->sf_rx_pkts_64 = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64); stats->sf_rx_pkts_65_127 = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127); stats->sf_rx_pkts_128_255 = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255); stats->sf_rx_pkts_256_511 = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511); stats->sf_rx_pkts_512_1023 = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023); stats->sf_rx_pkts_1024_1518 = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518); stats->sf_rx_frames_lost = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST); /* Lower 16bits are valid. */ stats->sf_tx_underruns = (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff); /* Empty stats counter registers. */ for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) csr_write_4(sc, i, 0); ifp->if_opackets += (u_long)stats->sf_tx_frames; ifp->if_collisions += (u_long)stats->sf_tx_single_colls + (u_long)stats->sf_tx_multi_colls; ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls + (u_long)stats->sf_tx_excess_defer + (u_long)stats->sf_tx_frames_lost; ifp->if_ipackets += (u_long)stats->sf_rx_frames; ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs + (u_long)stats->sf_rx_alignerrs + (u_long)stats->sf_rx_giants + (u_long)stats->sf_rx_runts + (u_long)stats->sf_rx_jabbererrs + (u_long)stats->sf_rx_frames_lost; nstats = &sc->sf_statistics; nstats->sf_tx_frames += stats->sf_tx_frames; nstats->sf_tx_single_colls += stats->sf_tx_single_colls; nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls; nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs; nstats->sf_tx_bytes += stats->sf_tx_bytes; nstats->sf_tx_deferred += stats->sf_tx_deferred; nstats->sf_tx_late_colls += stats->sf_tx_late_colls; nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames; nstats->sf_tx_control_frames += stats->sf_tx_control_frames; nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls; nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer; nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames; nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames; nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost; nstats->sf_rx_frames += stats->sf_rx_frames; nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs; nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs; nstats->sf_rx_bytes += stats->sf_rx_bytes; nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames; nstats->sf_rx_control_frames += stats->sf_rx_control_frames; nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames; nstats->sf_rx_giants += stats->sf_rx_giants; nstats->sf_rx_runts += stats->sf_rx_runts; nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs; nstats->sf_rx_fragments += stats->sf_rx_fragments; nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64; nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127; nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255; nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511; nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023; nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518; nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost; nstats->sf_tx_underruns += stats->sf_tx_underruns; } static void sf_watchdog(struct sf_softc *sc) { struct ifnet *ifp; SF_LOCK_ASSERT(sc); if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer) return; ifp = sc->sf_ifp; ifp->if_oerrors++; if (sc->sf_link == 0) { if (bootverbose) if_printf(sc->sf_ifp, "watchdog timeout " "(missed link)\n"); } else if_printf(ifp, "watchdog timeout, %d Tx descs are active\n", sc->sf_cdata.sf_tx_cnt); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sf_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sf_start_locked(ifp); } static int sf_shutdown(device_t dev) { struct sf_softc *sc; sc = device_get_softc(dev); SF_LOCK(sc); sf_stop(sc); SF_UNLOCK(sc); return (0); } static int sf_suspend(device_t dev) { struct sf_softc *sc; sc = device_get_softc(dev); SF_LOCK(sc); sf_stop(sc); sc->sf_suspended = 1; bus_generic_suspend(dev); SF_UNLOCK(sc); return (0); } static int sf_resume(device_t dev) { struct sf_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); SF_LOCK(sc); bus_generic_resume(dev); ifp = sc->sf_ifp; if ((ifp->if_flags & IFF_UP) != 0) sf_init_locked(sc); sc->sf_suspended = 0; SF_UNLOCK(sc); return (0); } static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct sf_softc *sc; struct sf_stats *stats; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (result != 1) return (error); sc = (struct sf_softc *)arg1; stats = &sc->sf_statistics; printf("%s statistics:\n", device_get_nameunit(sc->sf_dev)); printf("Transmit good frames : %ju\n", (uintmax_t)stats->sf_tx_frames); printf("Transmit good octets : %ju\n", (uintmax_t)stats->sf_tx_bytes); printf("Transmit single collisions : %u\n", stats->sf_tx_single_colls); printf("Transmit multiple collisions : %u\n", stats->sf_tx_multi_colls); printf("Transmit late collisions : %u\n", stats->sf_tx_late_colls); printf("Transmit abort due to excessive collisions : %u\n", stats->sf_tx_excess_colls); printf("Transmit CRC errors : %u\n", stats->sf_tx_crcerrs); printf("Transmit deferrals : %u\n", stats->sf_tx_deferred); printf("Transmit abort due to excessive deferrals : %u\n", stats->sf_tx_excess_defer); printf("Transmit pause control frames : %u\n", stats->sf_tx_pause_frames); printf("Transmit control frames : %u\n", stats->sf_tx_control_frames); printf("Transmit good multicast frames : %u\n", stats->sf_tx_mcast_frames); printf("Transmit good broadcast frames : %u\n", stats->sf_tx_bcast_frames); printf("Transmit frames lost due to internal transmit errors : %u\n", stats->sf_tx_frames_lost); printf("Transmit FIFO underflows : %u\n", stats->sf_tx_underruns); printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall); printf("Receive good frames : %ju\n", (uint64_t)stats->sf_rx_frames); printf("Receive good octets : %ju\n", (uint64_t)stats->sf_rx_bytes); printf("Receive CRC errors : %u\n", stats->sf_rx_crcerrs); printf("Receive alignment errors : %u\n", stats->sf_rx_alignerrs); printf("Receive pause frames : %u\n", stats->sf_rx_pause_frames); printf("Receive control frames : %u\n", stats->sf_rx_control_frames); printf("Receive control frames with unsupported opcode : %u\n", stats->sf_rx_unsup_control_frames); printf("Receive frames too long : %u\n", stats->sf_rx_giants); printf("Receive frames too short : %u\n", stats->sf_rx_runts); printf("Receive frames jabber errors : %u\n", stats->sf_rx_jabbererrs); printf("Receive frames fragments : %u\n", stats->sf_rx_fragments); printf("Receive packets 64 bytes : %ju\n", (uint64_t)stats->sf_rx_pkts_64); printf("Receive packets 65 to 127 bytes : %ju\n", (uint64_t)stats->sf_rx_pkts_65_127); printf("Receive packets 128 to 255 bytes : %ju\n", (uint64_t)stats->sf_rx_pkts_128_255); printf("Receive packets 256 to 511 bytes : %ju\n", (uint64_t)stats->sf_rx_pkts_256_511); printf("Receive packets 512 to 1023 bytes : %ju\n", (uint64_t)stats->sf_rx_pkts_512_1023); printf("Receive packets 1024 to 1518 bytes : %ju\n", (uint64_t)stats->sf_rx_pkts_1024_1518); printf("Receive frames lost due to internal receive errors : %u\n", stats->sf_rx_frames_lost); printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall); return (error); } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (!arg1) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX)); } Index: projects/nand/sys/dev/usb/net/if_rue.c =================================================================== --- projects/nand/sys/dev/usb/net/if_rue.c (revision 235262) +++ projects/nand/sys/dev/usb/net/if_rue.c (revision 235263) @@ -1,915 +1,916 @@ /*- * Copyright (c) 2001-2003, Shunsuke Akiyama . * Copyright (c) 1997, 1998, 1999, 2000 Bill Paul . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek RTL8150 USB to fast ethernet controller driver. * Datasheet is available from * ftp://ftp.realtek.com.tw/lancard/data_sheet/8150/. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR rue_debug #include #include #include #include #ifdef USB_DEBUG static int rue_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, rue, CTLFLAG_RW, 0, "USB rue"); SYSCTL_INT(_hw_usb_rue, OID_AUTO, debug, CTLFLAG_RW, &rue_debug, 0, "Debug level"); #endif /* * Various supported device vendors/products. */ static const STRUCT_USB_HOST_ID rue_devs[] = { {USB_VPI(USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUAKTX, 0)}, {USB_VPI(USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_USBKR100, 0)}, {USB_VPI(USB_VENDOR_OQO, USB_PRODUCT_OQO_ETHER01, 0)}, }; /* prototypes */ static device_probe_t rue_probe; static device_attach_t rue_attach; static device_detach_t rue_detach; static miibus_readreg_t rue_miibus_readreg; static miibus_writereg_t rue_miibus_writereg; static miibus_statchg_t rue_miibus_statchg; static usb_callback_t rue_intr_callback; static usb_callback_t rue_bulk_read_callback; static usb_callback_t rue_bulk_write_callback; static uether_fn_t rue_attach_post; static uether_fn_t rue_init; static uether_fn_t rue_stop; static uether_fn_t rue_start; static uether_fn_t rue_tick; static uether_fn_t rue_setmulti; static uether_fn_t rue_setpromisc; static int rue_read_mem(struct rue_softc *, uint16_t, void *, int); static int rue_write_mem(struct rue_softc *, uint16_t, void *, int); static uint8_t rue_csr_read_1(struct rue_softc *, uint16_t); static uint16_t rue_csr_read_2(struct rue_softc *, uint16_t); static int rue_csr_write_1(struct rue_softc *, uint16_t, uint8_t); static int rue_csr_write_2(struct rue_softc *, uint16_t, uint16_t); static int rue_csr_write_4(struct rue_softc *, int, uint32_t); static void rue_reset(struct rue_softc *); static int rue_ifmedia_upd(struct ifnet *); static void rue_ifmedia_sts(struct ifnet *, struct ifmediareq *); static const struct usb_config rue_config[RUE_N_TRANSFER] = { [RUE_BULK_DT_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = MCLBYTES, .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = rue_bulk_write_callback, .timeout = 10000, /* 10 seconds */ }, [RUE_BULK_DT_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = (MCLBYTES + 4), .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = rue_bulk_read_callback, .timeout = 0, /* no timeout */ }, [RUE_INTR_DT_RD] = { .type = UE_INTERRUPT, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .bufsize = 0, /* use wMaxPacketSize */ .callback = rue_intr_callback, }, }; static device_method_t rue_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rue_probe), DEVMETHOD(device_attach, rue_attach), DEVMETHOD(device_detach, rue_detach), /* MII interface */ DEVMETHOD(miibus_readreg, rue_miibus_readreg), DEVMETHOD(miibus_writereg, rue_miibus_writereg), DEVMETHOD(miibus_statchg, rue_miibus_statchg), DEVMETHOD_END }; static driver_t rue_driver = { .name = "rue", .methods = rue_methods, .size = sizeof(struct rue_softc), }; static devclass_t rue_devclass; -DRIVER_MODULE(rue, uhub, rue_driver, rue_devclass, NULL, 0); -DRIVER_MODULE(miibus, rue, miibus_driver, miibus_devclass, 0, 0); +DRIVER_MODULE_ORDERED(rue, uhub, rue_driver, rue_devclass, NULL, NULL, + SI_ORDER_ANY); +DRIVER_MODULE(miibus, rue, miibus_driver, miibus_devclass, NULL, NULL); MODULE_DEPEND(rue, uether, 1, 1, 1); MODULE_DEPEND(rue, usb, 1, 1, 1); MODULE_DEPEND(rue, ether, 1, 1, 1); MODULE_DEPEND(rue, miibus, 1, 1, 1); MODULE_VERSION(rue, 1); static const struct usb_ether_methods rue_ue_methods = { .ue_attach_post = rue_attach_post, .ue_start = rue_start, .ue_init = rue_init, .ue_stop = rue_stop, .ue_tick = rue_tick, .ue_setmulti = rue_setmulti, .ue_setpromisc = rue_setpromisc, .ue_mii_upd = rue_ifmedia_upd, .ue_mii_sts = rue_ifmedia_sts, }; #define RUE_SETBIT(sc, reg, x) \ rue_csr_write_1(sc, reg, rue_csr_read_1(sc, reg) | (x)) #define RUE_CLRBIT(sc, reg, x) \ rue_csr_write_1(sc, reg, rue_csr_read_1(sc, reg) & ~(x)) static int rue_read_mem(struct rue_softc *sc, uint16_t addr, void *buf, int len) { struct usb_device_request req; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = UR_SET_ADDRESS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); return (uether_do_request(&sc->sc_ue, &req, buf, 1000)); } static int rue_write_mem(struct rue_softc *sc, uint16_t addr, void *buf, int len) { struct usb_device_request req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UR_SET_ADDRESS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); return (uether_do_request(&sc->sc_ue, &req, buf, 1000)); } static uint8_t rue_csr_read_1(struct rue_softc *sc, uint16_t reg) { uint8_t val; rue_read_mem(sc, reg, &val, 1); return (val); } static uint16_t rue_csr_read_2(struct rue_softc *sc, uint16_t reg) { uint8_t val[2]; rue_read_mem(sc, reg, &val, 2); return (UGETW(val)); } static int rue_csr_write_1(struct rue_softc *sc, uint16_t reg, uint8_t val) { return (rue_write_mem(sc, reg, &val, 1)); } static int rue_csr_write_2(struct rue_softc *sc, uint16_t reg, uint16_t val) { uint8_t temp[2]; USETW(temp, val); return (rue_write_mem(sc, reg, &temp, 2)); } static int rue_csr_write_4(struct rue_softc *sc, int reg, uint32_t val) { uint8_t temp[4]; USETDW(temp, val); return (rue_write_mem(sc, reg, &temp, 4)); } static int rue_miibus_readreg(device_t dev, int phy, int reg) { struct rue_softc *sc = device_get_softc(dev); uint16_t rval; uint16_t ruereg; int locked; if (phy != 0) /* RTL8150 supports PHY == 0, only */ return (0); locked = mtx_owned(&sc->sc_mtx); if (!locked) RUE_LOCK(sc); switch (reg) { case MII_BMCR: ruereg = RUE_BMCR; break; case MII_BMSR: ruereg = RUE_BMSR; break; case MII_ANAR: ruereg = RUE_ANAR; break; case MII_ANER: ruereg = RUE_AER; break; case MII_ANLPAR: ruereg = RUE_ANLP; break; case MII_PHYIDR1: case MII_PHYIDR2: rval = 0; goto done; default: if (RUE_REG_MIN <= reg && reg <= RUE_REG_MAX) { rval = rue_csr_read_1(sc, reg); goto done; } device_printf(sc->sc_ue.ue_dev, "bad phy register\n"); rval = 0; goto done; } rval = rue_csr_read_2(sc, ruereg); done: if (!locked) RUE_UNLOCK(sc); return (rval); } static int rue_miibus_writereg(device_t dev, int phy, int reg, int data) { struct rue_softc *sc = device_get_softc(dev); uint16_t ruereg; int locked; if (phy != 0) /* RTL8150 supports PHY == 0, only */ return (0); locked = mtx_owned(&sc->sc_mtx); if (!locked) RUE_LOCK(sc); switch (reg) { case MII_BMCR: ruereg = RUE_BMCR; break; case MII_BMSR: ruereg = RUE_BMSR; break; case MII_ANAR: ruereg = RUE_ANAR; break; case MII_ANER: ruereg = RUE_AER; break; case MII_ANLPAR: ruereg = RUE_ANLP; break; case MII_PHYIDR1: case MII_PHYIDR2: goto done; default: if (RUE_REG_MIN <= reg && reg <= RUE_REG_MAX) { rue_csr_write_1(sc, reg, data); goto done; } device_printf(sc->sc_ue.ue_dev, " bad phy register\n"); goto done; } rue_csr_write_2(sc, ruereg, data); done: if (!locked) RUE_UNLOCK(sc); return (0); } static void rue_miibus_statchg(device_t dev) { /* * When the code below is enabled the card starts doing weird * things after link going from UP to DOWN and back UP. * * Looks like some of register writes below messes up PHY * interface. * * No visible regressions were found after commenting this code * out, so that disable it for good. */ #if 0 struct rue_softc *sc = device_get_softc(dev); struct mii_data *mii = GET_MII(sc); uint16_t bmcr; int locked; locked = mtx_owned(&sc->sc_mtx); if (!locked) RUE_LOCK(sc); RUE_CLRBIT(sc, RUE_CR, (RUE_CR_RE | RUE_CR_TE)); bmcr = rue_csr_read_2(sc, RUE_BMCR); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) bmcr |= RUE_BMCR_SPD_SET; else bmcr &= ~RUE_BMCR_SPD_SET; if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) bmcr |= RUE_BMCR_DUPLEX; else bmcr &= ~RUE_BMCR_DUPLEX; rue_csr_write_2(sc, RUE_BMCR, bmcr); RUE_SETBIT(sc, RUE_CR, (RUE_CR_RE | RUE_CR_TE)); if (!locked) RUE_UNLOCK(sc); #endif } static void rue_setpromisc(struct usb_ether *ue) { struct rue_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); RUE_LOCK_ASSERT(sc, MA_OWNED); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) RUE_SETBIT(sc, RUE_RCR, RUE_RCR_AAP); else RUE_CLRBIT(sc, RUE_RCR, RUE_RCR_AAP); } /* * Program the 64-bit multicast hash filter. */ static void rue_setmulti(struct usb_ether *ue) { struct rue_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); uint16_t rxcfg; int h = 0; uint32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; int mcnt = 0; RUE_LOCK_ASSERT(sc, MA_OWNED); rxcfg = rue_csr_read_2(sc, RUE_RCR); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxcfg |= (RUE_RCR_AAM | RUE_RCR_AAP); rxcfg &= ~RUE_RCR_AM; rue_csr_write_2(sc, RUE_RCR, rxcfg); rue_csr_write_4(sc, RUE_MAR0, 0xFFFFFFFF); rue_csr_write_4(sc, RUE_MAR4, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ rue_csr_write_4(sc, RUE_MAR0, 0); rue_csr_write_4(sc, RUE_MAR4, 0); /* now program new ones */ if_maddr_rlock(ifp); TAILQ_FOREACH (ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } if_maddr_runlock(ifp); if (mcnt) rxcfg |= RUE_RCR_AM; else rxcfg &= ~RUE_RCR_AM; rxcfg &= ~(RUE_RCR_AAM | RUE_RCR_AAP); rue_csr_write_2(sc, RUE_RCR, rxcfg); rue_csr_write_4(sc, RUE_MAR0, hashes[0]); rue_csr_write_4(sc, RUE_MAR4, hashes[1]); } static void rue_reset(struct rue_softc *sc) { int i; rue_csr_write_1(sc, RUE_CR, RUE_CR_SOFT_RST); for (i = 0; i != RUE_TIMEOUT; i++) { if (uether_pause(&sc->sc_ue, hz / 1000)) break; if (!(rue_csr_read_1(sc, RUE_CR) & RUE_CR_SOFT_RST)) break; } if (i == RUE_TIMEOUT) device_printf(sc->sc_ue.ue_dev, "reset never completed\n"); uether_pause(&sc->sc_ue, hz / 100); } static void rue_attach_post(struct usb_ether *ue) { struct rue_softc *sc = uether_getsc(ue); /* reset the adapter */ rue_reset(sc); /* get station address from the EEPROM */ rue_read_mem(sc, RUE_EEPROM_IDR0, ue->ue_eaddr, ETHER_ADDR_LEN); } /* * Probe for a RTL8150 chip. */ static int rue_probe(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != RUE_CONFIG_IDX) return (ENXIO); if (uaa->info.bIfaceIndex != RUE_IFACE_IDX) return (ENXIO); return (usbd_lookup_id_by_uaa(rue_devs, sizeof(rue_devs), uaa)); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int rue_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct rue_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; uint8_t iface_index; int error; device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); iface_index = RUE_IFACE_IDX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, rue_config, RUE_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(dev, "allocating USB transfers failed\n"); goto detach; } ue->ue_sc = sc; ue->ue_dev = dev; ue->ue_udev = uaa->device; ue->ue_mtx = &sc->sc_mtx; ue->ue_methods = &rue_ue_methods; error = uether_ifattach(ue); if (error) { device_printf(dev, "could not attach interface\n"); goto detach; } return (0); /* success */ detach: rue_detach(dev); return (ENXIO); /* failure */ } static int rue_detach(device_t dev) { struct rue_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; usbd_transfer_unsetup(sc->sc_xfer, RUE_N_TRANSFER); uether_ifdetach(ue); mtx_destroy(&sc->sc_mtx); return (0); } static void rue_intr_callback(struct usb_xfer *xfer, usb_error_t error) { struct rue_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct rue_intrpkt pkt; struct usb_page_cache *pc; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: if (ifp && (ifp->if_drv_flags & IFF_DRV_RUNNING) && actlen >= (int)sizeof(pkt)) { pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, &pkt, sizeof(pkt)); ifp->if_ierrors += pkt.rue_rxlost_cnt; ifp->if_ierrors += pkt.rue_crcerr_cnt; ifp->if_collisions += pkt.rue_col_cnt; } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); return; default: /* Error */ if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static void rue_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct rue_softc *sc = usbd_xfer_softc(xfer); struct usb_ether *ue = &sc->sc_ue; struct ifnet *ifp = uether_getifp(ue); struct usb_page_cache *pc; uint16_t status; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: if (actlen < 4) { ifp->if_ierrors++; goto tr_setup; } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, actlen - 4, &status, sizeof(status)); actlen -= 4; /* check recieve packet was valid or not */ status = le16toh(status); if ((status & RUE_RXSTAT_VALID) == 0) { ifp->if_ierrors++; goto tr_setup; } uether_rxbuf(ue, pc, 0, actlen); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); uether_rxflush(ue); return; default: /* Error */ DPRINTF("bulk read error, %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static void rue_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct rue_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct usb_page_cache *pc; struct mbuf *m; int temp_len; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete\n"); ifp->if_opackets++; /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: if ((sc->sc_flags & RUE_FLAG_LINK) == 0) { /* * don't send anything if there is no link ! */ return; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) return; if (m->m_pkthdr.len > MCLBYTES) m->m_pkthdr.len = MCLBYTES; temp_len = m->m_pkthdr.len; pc = usbd_xfer_get_frame(xfer, 0); usbd_m_copy_in(pc, 0, m, 0, m->m_pkthdr.len); /* * This is an undocumented behavior. * RTL8150 chip doesn't send frame length smaller than * RUE_MIN_FRAMELEN (60) byte packet. */ if (temp_len < RUE_MIN_FRAMELEN) { usbd_frame_zero(pc, temp_len, RUE_MIN_FRAMELEN - temp_len); temp_len = RUE_MIN_FRAMELEN; } usbd_xfer_set_frame_len(xfer, 0, temp_len); /* * if there's a BPF listener, bounce a copy * of this frame to him: */ BPF_MTAP(ifp, m); m_freem(m); usbd_transfer_submit(xfer); return; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); ifp->if_oerrors++; if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static void rue_tick(struct usb_ether *ue) { struct rue_softc *sc = uether_getsc(ue); struct mii_data *mii = GET_MII(sc); RUE_LOCK_ASSERT(sc, MA_OWNED); mii_tick(mii); if ((sc->sc_flags & RUE_FLAG_LINK) == 0 && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->sc_flags |= RUE_FLAG_LINK; rue_start(ue); } } static void rue_start(struct usb_ether *ue) { struct rue_softc *sc = uether_getsc(ue); /* * start the USB transfers, if not already started: */ usbd_transfer_start(sc->sc_xfer[RUE_INTR_DT_RD]); usbd_transfer_start(sc->sc_xfer[RUE_BULK_DT_RD]); usbd_transfer_start(sc->sc_xfer[RUE_BULK_DT_WR]); } static void rue_init(struct usb_ether *ue) { struct rue_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); RUE_LOCK_ASSERT(sc, MA_OWNED); /* * Cancel pending I/O */ rue_reset(sc); /* Set MAC address */ rue_write_mem(sc, RUE_IDR0, IF_LLADDR(ifp), ETHER_ADDR_LEN); rue_stop(ue); /* * Set the initial TX and RX configuration. */ rue_csr_write_1(sc, RUE_TCR, RUE_TCR_CONFIG); rue_csr_write_2(sc, RUE_RCR, RUE_RCR_CONFIG|RUE_RCR_AB); /* Load the multicast filter */ rue_setpromisc(ue); /* Load the multicast filter. */ rue_setmulti(ue); /* Enable RX and TX */ rue_csr_write_1(sc, RUE_CR, (RUE_CR_TE | RUE_CR_RE | RUE_CR_EP3CLREN)); usbd_xfer_set_stall(sc->sc_xfer[RUE_BULK_DT_WR]); ifp->if_drv_flags |= IFF_DRV_RUNNING; rue_start(ue); } /* * Set media options. */ static int rue_ifmedia_upd(struct ifnet *ifp) { struct rue_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); struct mii_softc *miisc; RUE_LOCK_ASSERT(sc, MA_OWNED); sc->sc_flags &= ~RUE_FLAG_LINK; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); mii_mediachg(mii); return (0); } /* * Report current media status. */ static void rue_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct rue_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); RUE_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; RUE_UNLOCK(sc); } static void rue_stop(struct usb_ether *ue) { struct rue_softc *sc = uether_getsc(ue); struct ifnet *ifp = uether_getifp(ue); RUE_LOCK_ASSERT(sc, MA_OWNED); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sc->sc_flags &= ~RUE_FLAG_LINK; /* * stop all the transfers, if not already stopped: */ usbd_transfer_stop(sc->sc_xfer[RUE_BULK_DT_WR]); usbd_transfer_stop(sc->sc_xfer[RUE_BULK_DT_RD]); usbd_transfer_stop(sc->sc_xfer[RUE_INTR_DT_RD]); rue_csr_write_1(sc, RUE_CR, 0x00); rue_reset(sc); } Index: projects/nand/sys/dev/xl/if_xl.c =================================================================== --- projects/nand/sys/dev/xl/if_xl.c (revision 235262) +++ projects/nand/sys/dev/xl/if_xl.c (revision 235263) @@ -1,3293 +1,3294 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * 3Com 3c90x Etherlink XL PCI NIC driver * * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI * bus-master chips (3c90x cards and embedded controllers) including * the following: * * 3Com 3c900-TPO 10Mbps/RJ-45 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC * 3Com 3c905-TX 10/100Mbps/RJ-45 * 3Com 3c905-T4 10/100Mbps/RJ-45 * 3Com 3c900B-TPO 10Mbps/RJ-45 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC * 3Com 3c900B-FL 10Mbps/Fiber-optic * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC * 3Com 3c905B-TX 10/100Mbps/RJ-45 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC) * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC) * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC) * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC) * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC) * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane) * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC) * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45 * Dell on-board 3c920 10/100Mbps/RJ-45 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45 * Dell Latitude laptop docking station embedded 3c905-TX * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The 3c90x series chips use a bus-master DMA interface for transfering * packets to and from the controller chip. Some of the "vortex" cards * (3c59x) also supported a bus master mode, however for those chips * you could only DMA packets to/from a contiguous memory buffer. For * transmission this would mean copying the contents of the queued mbuf * chain into an mbuf cluster and then DMAing the cluster. This extra * copy would sort of defeat the purpose of the bus master support for * any packet that doesn't fit into a single mbuf. * * By contrast, the 3c90x cards support a fragment-based bus master * mode where mbuf chains can be encapsulated using TX descriptors. * This is similar to other PCI chips such as the Texas Instruments * ThunderLAN and the Intel 82557/82558. * * The "vortex" driver (if_vx.c) happens to work for the "boomerang" * bus master chips because they maintain the old PIO interface for * backwards compatibility, but starting with the 3c905B and the * "cyclone" chips, the compatibility interface has been dropped. * Since using bus master DMA is a big win, we use this driver to * support the PCI "boomerang" chips even though they work with the * "vortex" driver in order to obtain better performance. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(xl, pci, 1, 1, 1); MODULE_DEPEND(xl, ether, 1, 1, 1); MODULE_DEPEND(xl, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include /* * TX Checksumming is disabled by default for two reasons: * - TX Checksumming will occasionally produce corrupt packets * - TX Checksumming seems to reduce performance * * Only 905B/C cards were reported to have this problem, it is possible * that later chips _may_ be immune. */ #define XL905B_TXCSUM_BROKEN 1 #ifdef XL905B_TXCSUM_BROKEN #define XL905B_CSUM_FEATURES 0 #else #define XL905B_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #endif /* * Various supported device vendors/types and their names. */ static const struct xl_type const xl_devs[] = { { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT, "3Com 3c900-TPO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO, "3Com 3c900-COMBO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT, "3Com 3c905-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4, "3Com 3c905-T4 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT, "3Com 3c900B-TPO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO, "3Com 3c900B-COMBO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC, "3Com 3c900B-TPC Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL, "3Com 3c900B-FL Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT, "3Com 3c905B-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4, "3Com 3c905B-T4 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX, "3Com 3c905B-FX/SC Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO, "3Com 3c905B-COMBO Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT, "3Com 3c905C-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B, "3Com 3c920B-EMB Integrated Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM, "3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV, "3Com 3c980 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV, "3Com 3c980C Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX, "3Com 3cSOHO100-TX OfficeConnect" }, { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT, "3Com 3c450-TX HomeConnect" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_555, "3Com 3c555 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_556, "3Com 3c556 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_556B, "3Com 3c556B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575A, "3Com 3c575TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575B, "3Com 3c575B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575C, "3Com 3c575C Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_656, "3Com 3c656 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_656B, "3Com 3c656B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_656C, "3Com 3c656C Fast Etherlink XL" }, { 0, 0, NULL } }; static int xl_probe(device_t); static int xl_attach(device_t); static int xl_detach(device_t); static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *); static void xl_tick(void *); static void xl_stats_update(struct xl_softc *); static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf **); static int xl_rxeof(struct xl_softc *); static void xl_rxeof_task(void *, int); static int xl_rx_resync(struct xl_softc *); static void xl_txeof(struct xl_softc *); static void xl_txeof_90xB(struct xl_softc *); static void xl_txeoc(struct xl_softc *); static void xl_intr(void *); static void xl_start(struct ifnet *); static void xl_start_locked(struct ifnet *); static void xl_start_90xB_locked(struct ifnet *); static int xl_ioctl(struct ifnet *, u_long, caddr_t); static void xl_init(void *); static void xl_init_locked(struct xl_softc *); static void xl_stop(struct xl_softc *); static int xl_watchdog(struct xl_softc *); static int xl_shutdown(device_t); static int xl_suspend(device_t); static int xl_resume(device_t); static void xl_setwol(struct xl_softc *); #ifdef DEVICE_POLLING static int xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); static int xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); #endif static int xl_ifmedia_upd(struct ifnet *); static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int xl_eeprom_wait(struct xl_softc *); static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int); static void xl_rxfilter(struct xl_softc *); static void xl_rxfilter_90x(struct xl_softc *); static void xl_rxfilter_90xB(struct xl_softc *); static void xl_setcfg(struct xl_softc *); static void xl_setmode(struct xl_softc *, int); static void xl_reset(struct xl_softc *); static int xl_list_rx_init(struct xl_softc *); static int xl_list_tx_init(struct xl_softc *); static int xl_list_tx_init_90xB(struct xl_softc *); static void xl_wait(struct xl_softc *); static void xl_mediacheck(struct xl_softc *); static void xl_choose_media(struct xl_softc *sc, int *media); static void xl_choose_xcvr(struct xl_softc *, int); static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int); #ifdef notdef static void xl_testpacket(struct xl_softc *); #endif static int xl_miibus_readreg(device_t, int, int); static int xl_miibus_writereg(device_t, int, int, int); static void xl_miibus_statchg(device_t); static void xl_miibus_mediainit(device_t); /* * MII bit-bang glue */ static uint32_t xl_mii_bitbang_read(device_t); static void xl_mii_bitbang_write(device_t, uint32_t); static const struct mii_bitbang_ops xl_mii_bitbang_ops = { xl_mii_bitbang_read, xl_mii_bitbang_write, { XL_MII_DATA, /* MII_BIT_MDO */ XL_MII_DATA, /* MII_BIT_MDI */ XL_MII_CLK, /* MII_BIT_MDC */ XL_MII_DIR, /* MII_BIT_DIR_HOST_PHY */ 0, /* MII_BIT_DIR_PHY_HOST */ } }; static device_method_t xl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xl_probe), DEVMETHOD(device_attach, xl_attach), DEVMETHOD(device_detach, xl_detach), DEVMETHOD(device_shutdown, xl_shutdown), DEVMETHOD(device_suspend, xl_suspend), DEVMETHOD(device_resume, xl_resume), /* MII interface */ DEVMETHOD(miibus_readreg, xl_miibus_readreg), DEVMETHOD(miibus_writereg, xl_miibus_writereg), DEVMETHOD(miibus_statchg, xl_miibus_statchg), DEVMETHOD(miibus_mediainit, xl_miibus_mediainit), DEVMETHOD_END }; static driver_t xl_driver = { "xl", xl_methods, sizeof(struct xl_softc) }; static devclass_t xl_devclass; -DRIVER_MODULE(xl, pci, xl_driver, xl_devclass, 0, 0); -DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0); +DRIVER_MODULE_ORDERED(xl, pci, xl_driver, xl_devclass, NULL, NULL, + SI_ORDER_ANY); +DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, NULL, NULL); static void xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *paddr; paddr = arg; *paddr = segs->ds_addr; } /* * Murphy's law says that it's possible the chip can wedge and * the 'command in progress' bit may never clear. Hence, we wait * only a finite amount of time to avoid getting caught in an * infinite loop. Normally this delay routine would be a macro, * but it isn't called during normal operation so we can afford * to make it a function. Suppress warning when card gone. */ static void xl_wait(struct xl_softc *sc) { register int i; for (i = 0; i < XL_TIMEOUT; i++) { if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0) break; } if (i == XL_TIMEOUT && bus_child_present(sc->xl_dev)) device_printf(sc->xl_dev, "command never completed!\n"); } /* * MII access routines are provided for adapters with external * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in * autoneg logic that's faked up to look like a PHY (3c905B-TX). * Note: if you don't perform the MDIO operations just right, * it's possible to end up with code that works correctly with * some chips/CPUs/processor speeds/bus speeds/etc but not * with others. */ /* * Read the MII serial port for the MII bit-bang module. */ static uint32_t xl_mii_bitbang_read(device_t dev) { struct xl_softc *sc; uint32_t val; sc = device_get_softc(dev); /* We're already in window 4. */ val = CSR_READ_2(sc, XL_W4_PHY_MGMT); CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } /* * Write the MII serial port for the MII bit-bang module. */ static void xl_mii_bitbang_write(device_t dev, uint32_t val) { struct xl_softc *sc; sc = device_get_softc(dev); /* We're already in window 4. */ CSR_WRITE_2(sc, XL_W4_PHY_MGMT, val); CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } static int xl_miibus_readreg(device_t dev, int phy, int reg) { struct xl_softc *sc; sc = device_get_softc(dev); /* Select the window 4. */ XL_SEL_WIN(4); return (mii_bitbang_readreg(dev, &xl_mii_bitbang_ops, phy, reg)); } static int xl_miibus_writereg(device_t dev, int phy, int reg, int data) { struct xl_softc *sc; sc = device_get_softc(dev); /* Select the window 4. */ XL_SEL_WIN(4); mii_bitbang_writereg(dev, &xl_mii_bitbang_ops, phy, reg, data); return (0); } static void xl_miibus_statchg(device_t dev) { struct xl_softc *sc; struct mii_data *mii; uint8_t macctl; sc = device_get_softc(dev); mii = device_get_softc(sc->xl_miibus); xl_setcfg(sc); /* Set ASIC's duplex mode to match the PHY. */ XL_SEL_WIN(3); macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { macctl |= XL_MACCTRL_DUPLEX; if (sc->xl_type == XL_TYPE_905B) { if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) macctl |= XL_MACCTRL_FLOW_CONTROL_ENB; else macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB; } } else { macctl &= ~XL_MACCTRL_DUPLEX; if (sc->xl_type == XL_TYPE_905B) macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB; } CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl); } /* * Special support for the 3c905B-COMBO. This card has 10/100 support * plus BNC and AUI ports. This means we will have both an miibus attached * plus some non-MII media settings. In order to allow this, we have to * add the extra media to the miibus's ifmedia struct, but we can't do * that during xl_attach() because the miibus hasn't been attached yet. * So instead, we wait until the miibus probe/attach is done, at which * point we will get a callback telling is that it's safe to add our * extra media. */ static void xl_miibus_mediainit(device_t dev) { struct xl_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = device_get_softc(dev); mii = device_get_softc(sc->xl_miibus); ifm = &mii->mii_media; if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) { /* * Check for a 10baseFL board in disguise. */ if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { if (bootverbose) device_printf(sc->xl_dev, "found 10baseFL\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL); ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(ifm, IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL); } else { if (bootverbose) device_printf(sc->xl_dev, "found AUI\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL); } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (bootverbose) device_printf(sc->xl_dev, "found BNC\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL); } } /* * The EEPROM is slow: give it time to come ready after issuing * it a command. */ static int xl_eeprom_wait(struct xl_softc *sc) { int i; for (i = 0; i < 100; i++) { if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY) DELAY(162); else break; } if (i == 100) { device_printf(sc->xl_dev, "eeprom failed to come ready\n"); return (1); } return (0); } /* * Read a sequence of words from the EEPROM. Note that ethernet address * data is stored in the EEPROM in network byte order. */ static int xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap) { int err = 0, i; u_int16_t word = 0, *ptr; #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F)) #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F) /* * XXX: WARNING! DANGER! * It's easy to accidentally overwrite the rom content! * Note: the 3c575 uses 8bit EEPROM offsets. */ XL_SEL_WIN(0); if (xl_eeprom_wait(sc)) return (1); if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30) off += 0x30; for (i = 0; i < cnt; i++) { if (sc->xl_flags & XL_FLAG_8BITROM) CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i)); else CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_READ | EEPROM_5BIT_OFFSET(off + i)); err = xl_eeprom_wait(sc); if (err) break; word = CSR_READ_2(sc, XL_W0_EE_DATA); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return (err ? 1 : 0); } static void xl_rxfilter(struct xl_softc *sc) { if (sc->xl_type == XL_TYPE_905B) xl_rxfilter_90xB(sc); else xl_rxfilter_90x(sc); } /* * NICs older than the 3c905B have only one multicast option, which * is to enable reception of all multicast frames. */ static void xl_rxfilter_90x(struct xl_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int8_t rxfilt; XL_LOCK_ASSERT(sc); ifp = sc->xl_ifp; XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI | XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL); /* Set the individual bit to receive frames for this host only. */ rxfilt |= XL_RXFILTER_INDIVIDUAL; /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) rxfilt |= XL_RXFILTER_BROADCAST; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { if (ifp->if_flags & IFF_PROMISC) rxfilt |= XL_RXFILTER_ALLFRAMES; if (ifp->if_flags & IFF_ALLMULTI) rxfilt |= XL_RXFILTER_ALLMULTI; } else { if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; rxfilt |= XL_RXFILTER_ALLMULTI; break; } if_maddr_runlock(ifp); } CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT); XL_SEL_WIN(7); } /* * 3c905B adapters have a hash filter that we can program. */ static void xl_rxfilter_90xB(struct xl_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; int i, mcnt; u_int16_t h; u_int8_t rxfilt; XL_LOCK_ASSERT(sc); ifp = sc->xl_ifp; XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI | XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL | XL_RXFILTER_MULTIHASH); /* Set the individual bit to receive frames for this host only. */ rxfilt |= XL_RXFILTER_INDIVIDUAL; /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) rxfilt |= XL_RXFILTER_BROADCAST; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { if (ifp->if_flags & IFF_PROMISC) rxfilt |= XL_RXFILTER_ALLFRAMES; if (ifp->if_flags & IFF_ALLMULTI) rxfilt |= XL_RXFILTER_ALLMULTI; } else { /* First, zot all the existing hash bits. */ for (i = 0; i < XL_HASHFILT_SIZE; i++) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH | i); /* Now program new ones. */ mcnt = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Note: the 3c905B currently only supports a 64-bit * hash table, which means we really only need 6 bits, * but the manual indicates that future chip revisions * will have a 256-bit hash table, hence the routine * is set up to calculate 8 bits of position info in * case we need it some day. * Note II, The Sequel: _CURRENT_ versions of the * 3c905B have a 256 bit hash table. This means we have * to use all 8 bits regardless. On older cards, the * upper 2 bits will be ignored. Grrrr.... */ h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF; CSR_WRITE_2(sc, XL_COMMAND, h | XL_CMD_RX_SET_HASH | XL_HASH_SET); mcnt++; } if_maddr_runlock(ifp); if (mcnt > 0) rxfilt |= XL_RXFILTER_MULTIHASH; } CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT); XL_SEL_WIN(7); } static void xl_setcfg(struct xl_softc *sc) { u_int32_t icfg; /*XL_LOCK_ASSERT(sc);*/ XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); icfg &= ~XL_ICFG_CONNECTOR_MASK; if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BT4) icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS); if (sc->xl_media & XL_MEDIAOPT_BTX) icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS); CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); } static void xl_setmode(struct xl_softc *sc, int media) { u_int32_t icfg; u_int16_t mediastat; char *pmsg = "", *dmsg = ""; XL_LOCK_ASSERT(sc); XL_SEL_WIN(4); mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); if (sc->xl_media & XL_MEDIAOPT_BT) { if (IFM_SUBTYPE(media) == IFM_10_T) { pmsg = "10baseT transceiver"; sc->xl_xcvr = XL_XCVR_10BT; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS); mediastat |= XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD; mediastat &= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & XL_MEDIAOPT_BFX) { if (IFM_SUBTYPE(media) == IFM_100_FX) { pmsg = "100baseFX port"; sc->xl_xcvr = XL_XCVR_100BFX; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS); mediastat |= XL_MEDIASTAT_LINKBEAT; mediastat &= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { if (IFM_SUBTYPE(media) == IFM_10_5) { pmsg = "AUI port"; sc->xl_xcvr = XL_XCVR_AUI; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD); mediastat |= ~XL_MEDIASTAT_SQEENB; } if (IFM_SUBTYPE(media) == IFM_10_FL) { pmsg = "10baseFL transceiver"; sc->xl_xcvr = XL_XCVR_AUI; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD); mediastat |= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (IFM_SUBTYPE(media) == IFM_10_2) { pmsg = "AUI port"; sc->xl_xcvr = XL_XCVR_COAX; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB); } } if ((media & IFM_GMASK) == IFM_FDX || IFM_SUBTYPE(media) == IFM_100_FX) { dmsg = "full"; XL_SEL_WIN(3); CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); } else { dmsg = "half"; XL_SEL_WIN(3); CSR_WRITE_1(sc, XL_W3_MAC_CTRL, (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); } if (IFM_SUBTYPE(media) == IFM_10_2) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); else CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat); DELAY(800); XL_SEL_WIN(7); device_printf(sc->xl_dev, "selecting %s, %s duplex\n", pmsg, dmsg); } static void xl_reset(struct xl_softc *sc) { register int i; XL_LOCK_ASSERT(sc); XL_SEL_WIN(0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | ((sc->xl_flags & XL_FLAG_WEIRDRESET) ? XL_RESETOPT_DISADVFD:0)); /* * If we're using memory mapped register mode, pause briefly * after issuing the reset command before trying to access any * other registers. With my 3c575C CardBus card, failing to do * this results in the system locking up while trying to poll * the command busy bit in the status register. */ if (sc->xl_flags & XL_FLAG_USE_MMIO) DELAY(100000); for (i = 0; i < XL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) break; } if (i == XL_TIMEOUT) device_printf(sc->xl_dev, "reset didn't complete\n"); /* Reset TX and RX. */ /* Note: the RX reset takes an absurd amount of time * on newer versions of the Tornado chips such as those * on the 3c905CX and newer 3c908C cards. We wait an * extra amount of time so that xl_wait() doesn't complain * and annoy the users. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); DELAY(100000); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR || sc->xl_flags & XL_FLAG_INVERT_MII_PWR) { XL_SEL_WIN(2); CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc, XL_W2_RESET_OPTIONS) | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ? XL_RESETOPT_INVERT_LED : 0) | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ? XL_RESETOPT_INVERT_MII : 0)); } /* Wait a little while for the chip to get its brains in order. */ DELAY(100000); } /* * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int xl_probe(device_t dev) { const struct xl_type *t; t = xl_devs; while (t->xl_name != NULL) { if ((pci_get_vendor(dev) == t->xl_vid) && (pci_get_device(dev) == t->xl_did)) { device_set_desc(dev, t->xl_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } /* * This routine is a kludge to work around possible hardware faults * or manufacturing defects that can cause the media options register * (or reset options register, as it's called for the first generation * 3c90x adapters) to return an incorrect result. I have encountered * one Dell Latitude laptop docking station with an integrated 3c905-TX * which doesn't have any of the 'mediaopt' bits set. This screws up * the attach routine pretty badly because it doesn't know what media * to look for. If we find ourselves in this predicament, this routine * will try to guess the media options values and warn the user of a * possible manufacturing defect with his adapter/system/whatever. */ static void xl_mediacheck(struct xl_softc *sc) { /* * If some of the media options bits are set, assume they are * correct. If not, try to figure it out down below. * XXX I should check for 10baseFL, but I don't have an adapter * to test with. */ if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) { /* * Check the XCVR value. If it's not in the normal range * of values, we need to fake it up here. */ if (sc->xl_xcvr <= XL_XCVR_AUTO) return; else { device_printf(sc->xl_dev, "bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr); device_printf(sc->xl_dev, "choosing new default based on card type\n"); } } else { if (sc->xl_type == XL_TYPE_905B && sc->xl_media & XL_MEDIAOPT_10FL) return; device_printf(sc->xl_dev, "WARNING: no media options bits set in the media options register!!\n"); device_printf(sc->xl_dev, "this could be a manufacturing defect in your adapter or system\n"); device_printf(sc->xl_dev, "attempting to guess media type; you should probably consult your vendor\n"); } xl_choose_xcvr(sc, 1); } static void xl_choose_xcvr(struct xl_softc *sc, int verbose) { u_int16_t devid; /* * Read the device ID from the EEPROM. * This is what's loaded into the PCI device ID register, so it has * to be correct otherwise we wouldn't have gotten this far. */ xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0); switch (devid) { case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */ case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */ sc->xl_media = XL_MEDIAOPT_BT; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) device_printf(sc->xl_dev, "guessing 10BaseT transceiver\n"); break; case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */ case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */ sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) device_printf(sc->xl_dev, "guessing COMBO (AUI/BNC/TP)\n"); break; case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */ sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) device_printf(sc->xl_dev, "guessing TPC (BNC/TP)\n"); break; case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */ sc->xl_media = XL_MEDIAOPT_10FL; sc->xl_xcvr = XL_XCVR_AUI; if (verbose) device_printf(sc->xl_dev, "guessing 10baseFL\n"); break; case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ case TC_DEVICEID_HURRICANE_555: /* 3c555 */ case TC_DEVICEID_HURRICANE_556: /* 3c556 */ case TC_DEVICEID_HURRICANE_556B: /* 3c556B */ case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */ case TC_DEVICEID_HURRICANE_575B: /* 3c575B */ case TC_DEVICEID_HURRICANE_575C: /* 3c575C */ case TC_DEVICEID_HURRICANE_656: /* 3c656 */ case TC_DEVICEID_HURRICANE_656B: /* 3c656B */ case TC_DEVICEID_TORNADO_656C: /* 3c656C */ case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */ case TC_DEVICEID_TORNADO_10_100BT_920B_WNM: /* 3c920B-EMB-WNM */ sc->xl_media = XL_MEDIAOPT_MII; sc->xl_xcvr = XL_XCVR_MII; if (verbose) device_printf(sc->xl_dev, "guessing MII\n"); break; case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */ case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */ sc->xl_media = XL_MEDIAOPT_BT4; sc->xl_xcvr = XL_XCVR_MII; if (verbose) device_printf(sc->xl_dev, "guessing 100baseT4/MII\n"); break; case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */ case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */ case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */ case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */ case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */ case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */ sc->xl_media = XL_MEDIAOPT_BTX; sc->xl_xcvr = XL_XCVR_AUTO; if (verbose) device_printf(sc->xl_dev, "guessing 10/100 internal\n"); break; case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */ sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; sc->xl_xcvr = XL_XCVR_AUTO; if (verbose) device_printf(sc->xl_dev, "guessing 10/100 plus BNC/AUI\n"); break; default: device_printf(sc->xl_dev, "unknown device ID: %x -- defaulting to 10baseT\n", devid); sc->xl_media = XL_MEDIAOPT_BT; break; } } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int xl_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; u_int16_t sinfo2, xcvr[2]; struct xl_softc *sc; struct ifnet *ifp; int media, pmcap; int error = 0, phy, rid, res, unit; uint16_t did; sc = device_get_softc(dev); sc->xl_dev = dev; unit = device_get_unit(dev); mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts); did = pci_get_device(dev); sc->xl_flags = 0; if (did == TC_DEVICEID_HURRICANE_555) sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK; if (did == TC_DEVICEID_HURRICANE_556 || did == TC_DEVICEID_HURRICANE_556B) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET | XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_HURRICANE_555 || did == TC_DEVICEID_HURRICANE_556) sc->xl_flags |= XL_FLAG_8BITROM; if (did == TC_DEVICEID_HURRICANE_556B) sc->xl_flags |= XL_FLAG_NO_XCVR_PWR; if (did == TC_DEVICEID_HURRICANE_575B || did == TC_DEVICEID_HURRICANE_575C || did == TC_DEVICEID_HURRICANE_656B || did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_FUNCREG; if (did == TC_DEVICEID_HURRICANE_575A || did == TC_DEVICEID_HURRICANE_575B || did == TC_DEVICEID_HURRICANE_575C || did == TC_DEVICEID_HURRICANE_656B || did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_8BITROM; if (did == TC_DEVICEID_HURRICANE_656) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK; if (did == TC_DEVICEID_HURRICANE_575B) sc->xl_flags |= XL_FLAG_INVERT_LED_PWR; if (did == TC_DEVICEID_HURRICANE_575C) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_HURRICANE_656 || did == TC_DEVICEID_HURRICANE_656B) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR | XL_FLAG_INVERT_LED_PWR; if (did == TC_DEVICEID_TORNADO_10_100BT_920B || did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM) sc->xl_flags |= XL_FLAG_PHYOK; switch (did) { case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ case TC_DEVICEID_HURRICANE_575A: case TC_DEVICEID_HURRICANE_575B: case TC_DEVICEID_HURRICANE_575C: sc->xl_flags |= XL_FLAG_NO_MMIO; break; default: break; } /* * Map control/status registers. */ pci_enable_busmaster(dev); if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) { rid = XL_PCI_LOMEM; res = SYS_RES_MEMORY; sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE); } if (sc->xl_res != NULL) { sc->xl_flags |= XL_FLAG_USE_MMIO; if (bootverbose) device_printf(dev, "using memory mapped I/O\n"); } else { rid = XL_PCI_LOIO; res = SYS_RES_IOPORT; sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE); if (sc->xl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } if (bootverbose) device_printf(dev, "using port I/O\n"); } sc->xl_btag = rman_get_bustag(sc->xl_res); sc->xl_bhandle = rman_get_bushandle(sc->xl_res); if (sc->xl_flags & XL_FLAG_FUNCREG) { rid = XL_PCI_FUNCMEM; sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->xl_fres == NULL) { device_printf(dev, "couldn't map funcreg memory\n"); error = ENXIO; goto fail; } sc->xl_ftag = rman_get_bustag(sc->xl_fres); sc->xl_fhandle = rman_get_bushandle(sc->xl_fres); } /* Allocate interrupt */ rid = 0; sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->xl_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Initialize interface name. */ ifp = sc->xl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); /* Reset the adapter. */ XL_LOCK(sc); xl_reset(sc); XL_UNLOCK(sc); /* * Get station address from the EEPROM. */ if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) { device_printf(dev, "failed to read station address\n"); error = ENXIO; goto fail; } callout_init_mtx(&sc->xl_tick_callout, &sc->xl_mtx, 0); TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc); /* * Now allocate a tag for the DMA descriptor lists and a chunk * of DMA-able memory based on the tag. Also obtain the DMA * addresses of the RX and TX ring, which we'll need later. * All of our lists are allocated as a contiguous block * of memory. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL, &sc->xl_ldata.xl_rx_tag); if (error) { device_printf(dev, "failed to allocate rx dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag, (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_rx_dmamap); if (error) { device_printf(dev, "no memory for rx list buffers!\n"); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); sc->xl_ldata.xl_rx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ, xl_dma_map_addr, &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get dma address of the rx ring!\n"); bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list, sc->xl_ldata.xl_rx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); sc->xl_ldata.xl_rx_tag = NULL; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL, &sc->xl_ldata.xl_tx_tag); if (error) { device_printf(dev, "failed to allocate tx dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag, (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_tx_dmamap); if (error) { device_printf(dev, "no memory for list buffers!\n"); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); sc->xl_ldata.xl_tx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ, xl_dma_map_addr, &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get dma address of the tx ring!\n"); bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list, sc->xl_ldata.xl_tx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); sc->xl_ldata.xl_tx_tag = NULL; goto fail; } /* * Allocate a DMA tag for the mapping of mbufs. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL, NULL, &sc->xl_mtag); if (error) { device_printf(dev, "failed to allocate mbuf dma tag\n"); goto fail; } /* We need a spare DMA map for the RX ring. */ error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap); if (error) goto fail; /* * Figure out the card type. 3c905B adapters have the * 'supportsNoTxLength' bit set in the capabilities * word in the EEPROM. * Note: my 3c575C CardBus card lies. It returns a value * of 0x1578 for its capabilities word, which is somewhat * nonsensical. Another way to distinguish a 3c90x chip * from a 3c90xB/C chip is to check for the 'supportsLargePackets' * bit. This will only be set for 3c90x boomerage chips. */ xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0); if (sc->xl_caps & XL_CAPS_NO_TXLENGTH || !(sc->xl_caps & XL_CAPS_LARGE_PKTS)) sc->xl_type = XL_TYPE_905B; else sc->xl_type = XL_TYPE_90X; /* Check availability of WOL. */ if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0 && pci_find_cap(dev, PCIY_PMG, &pmcap) == 0) { sc->xl_pmcap = pmcap; sc->xl_flags |= XL_FLAG_WOL; sinfo2 = 0; xl_read_eeprom(sc, (caddr_t)&sinfo2, XL_EE_SOFTINFO2, 1, 0); if ((sinfo2 & XL_SINFO2_AUX_WOL_CON) == 0 && bootverbose) device_printf(dev, "No auxiliary remote wakeup connector!\n"); } /* Set the TX start threshold for best performance. */ sc->xl_tx_thresh = XL_MIN_FRAMELEN; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = xl_ioctl; ifp->if_capabilities = IFCAP_VLAN_MTU; if (sc->xl_type == XL_TYPE_905B) { ifp->if_hwassist = XL905B_CSUM_FEATURES; #ifdef XL905B_TXCSUM_BROKEN ifp->if_capabilities |= IFCAP_RXCSUM; #else ifp->if_capabilities |= IFCAP_HWCSUM; #endif } if ((sc->xl_flags & XL_FLAG_WOL) != 0) ifp->if_capabilities |= IFCAP_WOL_MAGIC; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_start = xl_start; ifp->if_init = xl_init; IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); /* * Now we have to see what sort of media we have. * This includes probing for an MII interace and a * possible PHY. */ XL_SEL_WIN(3); sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT); if (bootverbose) device_printf(dev, "media options word: %x\n", sc->xl_media); xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0); sc->xl_xcvr = xcvr[0] | xcvr[1] << 16; sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK; sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS; xl_mediacheck(sc); if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX || sc->xl_media & XL_MEDIAOPT_BT4) { if (bootverbose) device_printf(dev, "found MII/AUTO\n"); xl_setcfg(sc); /* * Attach PHYs only at MII address 24 if !XL_FLAG_PHYOK. * This is to guard against problems with certain 3Com ASIC * revisions that incorrectly map the internal transceiver * control registers at all MII addresses. */ phy = MII_PHY_ANY; if ((sc->xl_flags & XL_FLAG_PHYOK) == 0) phy = 24; error = mii_attach(dev, &sc->xl_miibus, ifp, xl_ifmedia_upd, xl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, sc->xl_type == XL_TYPE_905B ? MIIF_DOPAUSE : 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } goto done; } /* * Sanity check. If the user has selected "auto" and this isn't * a 10/100 card of some kind, we need to force the transceiver * type to something sane. */ if (sc->xl_xcvr == XL_XCVR_AUTO) xl_choose_xcvr(sc, bootverbose); /* * Do ifmedia setup. */ if (sc->xl_media & XL_MEDIAOPT_BT) { if (bootverbose) device_printf(dev, "found 10baseT\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); } if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { /* * Check for a 10baseFL board in disguise. */ if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { if (bootverbose) device_printf(dev, "found 10baseFL\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL); } else { if (bootverbose) device_printf(dev, "found AUI\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (bootverbose) device_printf(dev, "found BNC\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL); } if (sc->xl_media & XL_MEDIAOPT_BFX) { if (bootverbose) device_printf(dev, "found 100baseFX\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL); } media = IFM_ETHER|IFM_100_TX|IFM_FDX; xl_choose_media(sc, &media); if (sc->xl_miibus == NULL) ifmedia_set(&sc->ifmedia, media); done: if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) { XL_SEL_WIN(0); CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS); } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, xl_intr, sc, &sc->xl_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) xl_detach(dev); return (error); } /* * Choose a default media. * XXX This is a leaf function only called by xl_attach() and * acquires/releases the non-recursible driver mutex to * satisfy lock assertions. */ static void xl_choose_media(struct xl_softc *sc, int *media) { XL_LOCK(sc); switch (sc->xl_xcvr) { case XL_XCVR_10BT: *media = IFM_ETHER|IFM_10_T; xl_setmode(sc, *media); break; case XL_XCVR_AUI: if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { *media = IFM_ETHER|IFM_10_FL; xl_setmode(sc, *media); } else { *media = IFM_ETHER|IFM_10_5; xl_setmode(sc, *media); } break; case XL_XCVR_COAX: *media = IFM_ETHER|IFM_10_2; xl_setmode(sc, *media); break; case XL_XCVR_AUTO: case XL_XCVR_100BTX: case XL_XCVR_MII: /* Chosen by miibus */ break; case XL_XCVR_100BFX: *media = IFM_ETHER|IFM_100_FX; break; default: device_printf(sc->xl_dev, "unknown XCVR type: %d\n", sc->xl_xcvr); /* * This will probably be wrong, but it prevents * the ifmedia code from panicking. */ *media = IFM_ETHER|IFM_10_T; break; } XL_UNLOCK(sc); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int xl_detach(device_t dev) { struct xl_softc *sc; struct ifnet *ifp; int rid, res; sc = device_get_softc(dev); ifp = sc->xl_ifp; KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized")); #ifdef DEVICE_POLLING if (ifp && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (sc->xl_flags & XL_FLAG_USE_MMIO) { rid = XL_PCI_LOMEM; res = SYS_RES_MEMORY; } else { rid = XL_PCI_LOIO; res = SYS_RES_IOPORT; } /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { XL_LOCK(sc); xl_stop(sc); XL_UNLOCK(sc); taskqueue_drain(taskqueue_swi, &sc->xl_task); callout_drain(&sc->xl_tick_callout); ether_ifdetach(ifp); } if (sc->xl_miibus) device_delete_child(dev, sc->xl_miibus); bus_generic_detach(dev); ifmedia_removeall(&sc->ifmedia); if (sc->xl_intrhand) bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand); if (sc->xl_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq); if (sc->xl_fres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, XL_PCI_FUNCMEM, sc->xl_fres); if (sc->xl_res) bus_release_resource(dev, res, rid, sc->xl_res); if (ifp) if_free(ifp); if (sc->xl_mtag) { bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap); bus_dma_tag_destroy(sc->xl_mtag); } if (sc->xl_ldata.xl_rx_tag) { bus_dmamap_unload(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap); bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list, sc->xl_ldata.xl_rx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); } if (sc->xl_ldata.xl_tx_tag) { bus_dmamap_unload(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap); bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list, sc->xl_ldata.xl_tx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); } mtx_destroy(&sc->xl_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int xl_list_tx_init(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_TX_LIST_CNT; i++) { cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_tx_chain[i].xl_map); if (error) return (error); cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr + i * sizeof(struct xl_list); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = NULL; else cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; } cd->xl_tx_free = &cd->xl_tx_chain[0]; cd->xl_tx_tail = cd->xl_tx_head = NULL; bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the transmit descriptors. */ static int xl_list_tx_init_90xB(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_TX_LIST_CNT; i++) { cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_tx_chain[i].xl_map); if (error) return (error); cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr + i * sizeof(struct xl_list); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0]; else cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; if (i == 0) cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[XL_TX_LIST_CNT - 1]; else cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[i - 1]; } bzero(ld->xl_tx_list, XL_TX_LIST_SZ); ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY); cd->xl_tx_prod = 1; cd->xl_tx_cons = 1; cd->xl_tx_cnt = 0; bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int xl_list_rx_init(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i, next; u_int32_t nextptr; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_RX_LIST_CNT; i++) { cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_rx_chain[i].xl_map); if (error) return (error); error = xl_newbuf(sc, &cd->xl_rx_chain[i]); if (error) return (error); if (i == (XL_RX_LIST_CNT - 1)) next = 0; else next = i + 1; nextptr = ld->xl_rx_dmaaddr + next * sizeof(struct xl_list_onefrag); cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next]; ld->xl_rx_list[i].xl_next = htole32(nextptr); } bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE); cd->xl_rx_head = &cd->xl_rx_chain[0]; return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. * If we fail to do so, we need to leave the old mbuf and * the old DMA map untouched so that it can be reused. */ static int xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c) { struct mbuf *m_new = NULL; bus_dmamap_t map; bus_dma_segment_t segs[1]; int error, nseg; XL_LOCK_ASSERT(sc); m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (ENOBUFS); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; /* Force longword alignment for packet payload. */ m_adj(m_new, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, sc->xl_tmpmap, m_new, segs, &nseg, BUS_DMA_NOWAIT); if (error) { m_freem(m_new); device_printf(sc->xl_dev, "can't map mbuf (error %d)\n", error); return (error); } KASSERT(nseg == 1, ("%s: too many DMA segments (%d)", __func__, nseg)); bus_dmamap_unload(sc->xl_mtag, c->xl_map); map = c->xl_map; c->xl_map = sc->xl_tmpmap; sc->xl_tmpmap = map; c->xl_mbuf = m_new; c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG); c->xl_ptr->xl_frag.xl_addr = htole32(segs->ds_addr); c->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD); return (0); } static int xl_rx_resync(struct xl_softc *sc) { struct xl_chain_onefrag *pos; int i; XL_LOCK_ASSERT(sc); pos = sc->xl_cdata.xl_rx_head; for (i = 0; i < XL_RX_LIST_CNT; i++) { if (pos->xl_ptr->xl_status) break; pos = pos->xl_next; } if (i == XL_RX_LIST_CNT) return (0); sc->xl_cdata.xl_rx_head = pos; return (EAGAIN); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static int xl_rxeof(struct xl_softc *sc) { struct mbuf *m; struct ifnet *ifp = sc->xl_ifp; struct xl_chain_onefrag *cur_rx; int total_len; int rx_npkts = 0; u_int32_t rxstat; XL_LOCK_ASSERT(sc); again: bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_POSTREAD); while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = sc->xl_cdata.xl_rx_head; sc->xl_cdata.xl_rx_head = cur_rx->xl_next; total_len = rxstat & XL_RXSTAT_LENMASK; rx_npkts++; /* * Since we have told the chip to allow large frames, * we need to trap giant frame errors in software. We allow * a little more than the normal frame size to account for * frames with VLAN tags. */ if (total_len > XL_MAX_FRAMELEN) rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE); /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & XL_RXSTAT_UP_ERROR) { ifp->if_ierrors++; cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } /* * If the error bit was not set, the upload complete * bit should be set which means we have a valid packet. * If not, something truly strange has happened. */ if (!(rxstat & XL_RXSTAT_UP_CMPLT)) { device_printf(sc->xl_dev, "bad receive status -- packet dropped\n"); ifp->if_ierrors++; cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } /* No errors; receive the packet. */ bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map, BUS_DMASYNC_POSTREAD); m = cur_rx->xl_mbuf; /* * Try to conjure up a new mbuf cluster. If that * fails, it means we have an out of memory condition and * should leave the buffer in place and continue. This will * result in a lost packet, but there's little else we * can do in this situation. */ if (xl_newbuf(sc, cur_rx)) { ifp->if_ierrors++; cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; if (ifp->if_capenable & IFCAP_RXCSUM) { /* Do IP checksum checking. */ if (rxstat & XL_RXSTAT_IPCKOK) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & XL_RXSTAT_IPCKERR)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((rxstat & XL_RXSTAT_TCPCOK && !(rxstat & XL_RXSTAT_TCPCKERR)) || (rxstat & XL_RXSTAT_UDPCKOK && !(rxstat & XL_RXSTAT_UDPCKERR))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } XL_UNLOCK(sc); (*ifp->if_input)(ifp, m); XL_LOCK(sc); /* * If we are running from the taskqueue, the interface * might have been stopped while we were passing the last * packet up the network stack. */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return (rx_npkts); } /* * Handle the 'end of channel' condition. When the upload * engine hits the end of the RX ring, it will stall. This * is our cue to flush the RX ring, reload the uplist pointer * register and unstall the engine. * XXX This is actually a little goofy. With the ThunderLAN * chip, you get an interrupt when the receiver hits the end * of the receive ring, which tells you exactly when you * you need to reload the ring pointer. Here we have to * fake it. I'm mad at myself for not being clever enough * to avoid the use of a goto here. */ if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 || CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr); sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0]; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); goto again; } return (rx_npkts); } /* * Taskqueue wrapper for xl_rxeof(). */ static void xl_rxeof_task(void *arg, int pending) { struct xl_softc *sc = (struct xl_softc *)arg; XL_LOCK(sc); if (sc->xl_ifp->if_drv_flags & IFF_DRV_RUNNING) xl_rxeof(sc); XL_UNLOCK(sc); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void xl_txeof(struct xl_softc *sc) { struct xl_chain *cur_tx; struct ifnet *ifp = sc->xl_ifp; XL_LOCK_ASSERT(sc); /* * Go through our tx list and free mbufs for those * frames that have been uploaded. Note: the 3c905B * sets a special bit in the status word to let us * know that a frame has been downloaded, but the * original 3c900/3c905 adapters don't do that. * Consequently, we have to use a different test if * xl_type != XL_TYPE_905B. */ while (sc->xl_cdata.xl_tx_head != NULL) { cur_tx = sc->xl_cdata.xl_tx_head; if (CSR_READ_4(sc, XL_DOWNLIST_PTR)) break; sc->xl_cdata.xl_tx_head = cur_tx->xl_next; bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map); m_freem(cur_tx->xl_mbuf); cur_tx->xl_mbuf = NULL; ifp->if_opackets++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; cur_tx->xl_next = sc->xl_cdata.xl_tx_free; sc->xl_cdata.xl_tx_free = cur_tx; } if (sc->xl_cdata.xl_tx_head == NULL) { sc->xl_wdog_timer = 0; sc->xl_cdata.xl_tx_tail = NULL; } else { if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED || !CSR_READ_4(sc, XL_DOWNLIST_PTR)) { CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_head->xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } } } static void xl_txeof_90xB(struct xl_softc *sc) { struct xl_chain *cur_tx = NULL; struct ifnet *ifp = sc->xl_ifp; int idx; XL_LOCK_ASSERT(sc); bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_POSTREAD); idx = sc->xl_cdata.xl_tx_cons; while (idx != sc->xl_cdata.xl_tx_prod) { cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; if (!(le32toh(cur_tx->xl_ptr->xl_status) & XL_TXSTAT_DL_COMPLETE)) break; if (cur_tx->xl_mbuf != NULL) { bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map); m_freem(cur_tx->xl_mbuf); cur_tx->xl_mbuf = NULL; } ifp->if_opackets++; sc->xl_cdata.xl_tx_cnt--; XL_INC(idx, XL_TX_LIST_CNT); } if (sc->xl_cdata.xl_tx_cnt == 0) sc->xl_wdog_timer = 0; sc->xl_cdata.xl_tx_cons = idx; if (cur_tx != NULL) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } /* * TX 'end of channel' interrupt handler. Actually, we should * only get a 'TX complete' interrupt if there's a transmit error, * so this is really TX error handler. */ static void xl_txeoc(struct xl_softc *sc) { u_int8_t txstat; XL_LOCK_ASSERT(sc); while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) { if (txstat & XL_TXSTATUS_UNDERRUN || txstat & XL_TXSTATUS_JABBER || txstat & XL_TXSTATUS_RECLAIM) { device_printf(sc->xl_dev, "transmission error: 0x%02x\n", txstat); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); if (sc->xl_type == XL_TYPE_905B) { if (sc->xl_cdata.xl_tx_cnt) { int i; struct xl_chain *c; i = sc->xl_cdata.xl_tx_cons; c = &sc->xl_cdata.xl_tx_chain[i]; CSR_WRITE_4(sc, XL_DOWNLIST_PTR, c->xl_phys); CSR_WRITE_1(sc, XL_DOWN_POLL, 64); sc->xl_wdog_timer = 5; } } else { if (sc->xl_cdata.xl_tx_head != NULL) { CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_head->xl_phys); sc->xl_wdog_timer = 5; } } /* * Remember to set this for the * first generation 3c90X chips. */ CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); if (txstat & XL_TXSTATUS_UNDERRUN && sc->xl_tx_thresh < XL_PACKET_SIZE) { sc->xl_tx_thresh += XL_MIN_FRAMELEN; device_printf(sc->xl_dev, "tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); if (sc->xl_type == XL_TYPE_905B) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } else { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } /* * Write an arbitrary byte to the TX_STATUS register * to clear this interrupt/error and advance to the next. */ CSR_WRITE_1(sc, XL_TX_STATUS, 0x01); } } static void xl_intr(void *arg) { struct xl_softc *sc = arg; struct ifnet *ifp = sc->xl_ifp; u_int16_t status; XL_LOCK(sc); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { XL_UNLOCK(sc); return; } #endif for (;;) { status = CSR_READ_2(sc, XL_STATUS); if ((status & XL_INTRS) == 0 || status == 0xFFFF) break; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|(status & XL_INTRS)); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; if (status & XL_STAT_UP_COMPLETE) { if (xl_rxeof(sc) == 0) { while (xl_rx_resync(sc)) xl_rxeof(sc); } } if (status & XL_STAT_DOWN_COMPLETE) { if (sc->xl_type == XL_TYPE_905B) xl_txeof_90xB(sc); else xl_txeof(sc); } if (status & XL_STAT_TX_COMPLETE) { ifp->if_oerrors++; xl_txeoc(sc); } if (status & XL_STAT_ADFAIL) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xl_init_locked(sc); break; } if (status & XL_STAT_STATSOFLOW) xl_stats_update(sc); } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && ifp->if_drv_flags & IFF_DRV_RUNNING) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } XL_UNLOCK(sc); } #ifdef DEVICE_POLLING static int xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct xl_softc *sc = ifp->if_softc; int rx_npkts = 0; XL_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) rx_npkts = xl_poll_locked(ifp, cmd, count); XL_UNLOCK(sc); return (rx_npkts); } static int xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct xl_softc *sc = ifp->if_softc; int rx_npkts; XL_LOCK_ASSERT(sc); sc->rxcycles = count; rx_npkts = xl_rxeof(sc); if (sc->xl_type == XL_TYPE_905B) xl_txeof_90xB(sc); else xl_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } if (cmd == POLL_AND_CHECK_STATUS) { u_int16_t status; status = CSR_READ_2(sc, XL_STATUS); if (status & XL_INTRS && status != 0xFFFF) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|(status & XL_INTRS)); if (status & XL_STAT_TX_COMPLETE) { ifp->if_oerrors++; xl_txeoc(sc); } if (status & XL_STAT_ADFAIL) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xl_init_locked(sc); } if (status & XL_STAT_STATSOFLOW) xl_stats_update(sc); } } return (rx_npkts); } #endif /* DEVICE_POLLING */ static void xl_tick(void *xsc) { struct xl_softc *sc = xsc; struct mii_data *mii; XL_LOCK_ASSERT(sc); if (sc->xl_miibus != NULL) { mii = device_get_softc(sc->xl_miibus); mii_tick(mii); } xl_stats_update(sc); if (xl_watchdog(sc) == EJUSTRETURN) return; callout_reset(&sc->xl_tick_callout, hz, xl_tick, sc); } static void xl_stats_update(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; struct xl_stats xl_stats; u_int8_t *p; int i; XL_LOCK_ASSERT(sc); bzero((char *)&xl_stats, sizeof(struct xl_stats)); p = (u_int8_t *)&xl_stats; /* Read all the stats registers. */ XL_SEL_WIN(6); for (i = 0; i < 16; i++) *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i); ifp->if_ierrors += xl_stats.xl_rx_overrun; ifp->if_collisions += xl_stats.xl_tx_multi_collision + xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision; /* * Boomerang and cyclone chips have an extra stats counter * in window 4 (BadSSD). We have to read this too in order * to clear out all the stats registers and avoid a statsoflow * interrupt. */ XL_SEL_WIN(4); CSR_READ_1(sc, XL_W4_BADSSD); XL_SEL_WIN(7); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf **m_head) { struct mbuf *m_new; struct ifnet *ifp = sc->xl_ifp; int error, i, nseg, total_len; u_int32_t status; XL_LOCK_ASSERT(sc); error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT); if (error && error != EFBIG) { if_printf(ifp, "can't map mbuf (error %d)\n", error); return (error); } /* * Handle special case: we used up all 63 fragments, * but we have more mbufs left in the chain. Copy the * data into an mbuf cluster. Note that we don't * bother clearing the values in the other fragment * pointers/counters; it wouldn't gain us anything, * and would waste cycles. */ if (error) { m_new = m_collapse(*m_head, M_DONTWAIT, XL_MAXFRAGS); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m_new; error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT); if (error) { m_freem(*m_head); *m_head = NULL; if_printf(ifp, "can't map mbuf (error %d)\n", error); return (error); } } KASSERT(nseg <= XL_MAXFRAGS, ("%s: too many DMA segments (%d)", __func__, nseg)); if (nseg == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE); total_len = 0; for (i = 0; i < nseg; i++) { KASSERT(sc->xl_cdata.xl_tx_segs[i].ds_len <= MCLBYTES, ("segment size too large")); c->xl_ptr->xl_frag[i].xl_addr = htole32(sc->xl_cdata.xl_tx_segs[i].ds_addr); c->xl_ptr->xl_frag[i].xl_len = htole32(sc->xl_cdata.xl_tx_segs[i].ds_len); total_len += sc->xl_cdata.xl_tx_segs[i].ds_len; } c->xl_ptr->xl_frag[nseg - 1].xl_len |= htole32(XL_LAST_FRAG); if (sc->xl_type == XL_TYPE_905B) { status = XL_TXSTAT_RND_DEFEAT; #ifndef XL905B_TXCSUM_BROKEN if ((*m_head)->m_pkthdr.csum_flags) { if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) status |= XL_TXSTAT_IPCKSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) status |= XL_TXSTAT_TCPCKSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) status |= XL_TXSTAT_UDPCKSUM; } #endif } else status = total_len; c->xl_ptr->xl_status = htole32(status); c->xl_ptr->xl_next = 0; c->xl_mbuf = *m_head; return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void xl_start(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; XL_LOCK(sc); if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); XL_UNLOCK(sc); } static void xl_start_locked(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; struct mbuf *m_head; struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; struct xl_chain *prev_tx; int error; XL_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; /* * Check for an available queue slot. If there are none, * punt. */ if (sc->xl_cdata.xl_tx_free == NULL) { xl_txeoc(sc); xl_txeof(sc); if (sc->xl_cdata.xl_tx_free == NULL) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } } start_tx = sc->xl_cdata.xl_tx_free; for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->xl_cdata.xl_tx_free != NULL;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ prev_tx = cur_tx; cur_tx = sc->xl_cdata.xl_tx_free; /* Pack the data into the descriptor. */ error = xl_encap(sc, cur_tx, &m_head); if (error) { cur_tx = prev_tx; if (m_head == NULL) break; ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); break; } sc->xl_cdata.xl_tx_free = cur_tx->xl_next; cur_tx->xl_next = NULL; /* Chain it together. */ if (prev != NULL) { prev->xl_next = cur_tx; prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); } prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->xl_mbuf); } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) return; /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interrupt once for the whole chain rather than * once for each packet. */ cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); /* * Queue the packets. If the TX channel is clear, update * the downlist pointer register. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); xl_wait(sc); if (sc->xl_cdata.xl_tx_head != NULL) { sc->xl_cdata.xl_tx_tail->xl_next = start_tx; sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next = htole32(start_tx->xl_phys); sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &= htole32(~XL_TXSTAT_DL_INTR); sc->xl_cdata.xl_tx_tail = cur_tx; } else { sc->xl_cdata.xl_tx_head = start_tx; sc->xl_cdata.xl_tx_tail = cur_tx; } bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_PREWRITE); if (!CSR_READ_4(sc, XL_DOWNLIST_PTR)) CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); XL_SEL_WIN(7); /* * Set a timeout in case the chip goes out to lunch. */ sc->xl_wdog_timer = 5; /* * XXX Under certain conditions, usually on slower machines * where interrupts may be dropped, it's possible for the * adapter to chew up all the buffers in the receive ring * and stall, without us being able to do anything about it. * To guard against this, we need to make a pass over the * RX queue to make sure there aren't any packets pending. * Doing it here means we can flush the receive ring at the * same time the chip is DMAing the transmit descriptors we * just gave it. * * 3Com goes to some lengths to emphasize the Parallel Tasking (tm) * nature of their chips in all their marketing literature; * we may as well take advantage of it. :) */ taskqueue_enqueue(taskqueue_swi, &sc->xl_task); } static void xl_start_90xB_locked(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; struct mbuf *m_head; struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; struct xl_chain *prev_tx; int error, idx; XL_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; idx = sc->xl_cdata.xl_tx_prod; start_tx = &sc->xl_cdata.xl_tx_chain[idx]; for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL;) { if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; prev_tx = cur_tx; cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; /* Pack the data into the descriptor. */ error = xl_encap(sc, cur_tx, &m_head); if (error) { cur_tx = prev_tx; if (m_head == NULL) break; ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); break; } /* Chain it together. */ if (prev != NULL) prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->xl_mbuf); XL_INC(idx, XL_TX_LIST_CNT); sc->xl_cdata.xl_tx_cnt++; } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) return; /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interrupt once for the whole chain rather than * once for each packet. */ cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); /* Start transmission */ sc->xl_cdata.xl_tx_prod = idx; start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys); bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_PREWRITE); /* * Set a timeout in case the chip goes out to lunch. */ sc->xl_wdog_timer = 5; } static void xl_init(void *xsc) { struct xl_softc *sc = xsc; XL_LOCK(sc); xl_init_locked(sc); XL_UNLOCK(sc); } static void xl_init_locked(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; int error, i; struct mii_data *mii = NULL; XL_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* * Cancel pending I/O and free all RX/TX buffers. */ xl_stop(sc); /* Reset the chip to a known state. */ xl_reset(sc); if (sc->xl_miibus == NULL) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); DELAY(10000); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); /* * Clear WOL status and disable all WOL feature as WOL * would interfere Rx operation under normal environments. */ if ((sc->xl_flags & XL_FLAG_WOL) != 0) { XL_SEL_WIN(7); CSR_READ_2(sc, XL_W7_BM_PME); CSR_WRITE_2(sc, XL_W7_BM_PME, 0); } /* Init our MAC address */ XL_SEL_WIN(2); for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i, IF_LLADDR(sc->xl_ifp)[i]); } /* Clear the station mask. */ for (i = 0; i < 3; i++) CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0); #ifdef notdef /* Reset TX and RX. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); #endif /* Init circular RX list. */ error = xl_list_rx_init(sc); if (error) { device_printf(sc->xl_dev, "initialization of the rx ring failed (%d)\n", error); xl_stop(sc); return; } /* Init TX descriptors. */ if (sc->xl_type == XL_TYPE_905B) error = xl_list_tx_init_90xB(sc); else error = xl_list_tx_init(sc); if (error) { device_printf(sc->xl_dev, "initialization of the tx ring failed (%d)\n", error); xl_stop(sc); return; } /* * Set the TX freethresh value. * Note that this has no effect on 3c905B "cyclone" * cards but is required for 3c900/3c905 "boomerang" * cards in order to enable the download engine. */ CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); /* Set the TX start threshold for best performance. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); /* * If this is a 3c905B, also set the tx reclaim threshold. * This helps cut down on the number of tx reclaim errors * that could happen on a busy network. The chip multiplies * the register value by 16 to obtain the actual threshold * in bytes, so we divide by 16 when setting the value here. * The existing threshold value can be examined by reading * the register at offset 9 in window 5. */ if (sc->xl_type == XL_TYPE_905B) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); } /* Set RX filter bits. */ xl_rxfilter(sc); /* * Load the address of the RX list. We have to * stall the upload engine before we can manipulate * the uplist pointer register, then unstall it when * we're finished. We also have to wait for the * stall command to complete before proceeding. * Note that we have to do this after any RX resets * have completed since the uplist register is cleared * by a reset. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); xl_wait(sc); if (sc->xl_type == XL_TYPE_905B) { /* Set polling interval */ CSR_WRITE_1(sc, XL_DOWN_POLL, 64); /* Load the address of the TX list */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_chain[0].xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); xl_wait(sc); } /* * If the coax transceiver is on, make sure to enable * the DC-DC converter. */ XL_SEL_WIN(3); if (sc->xl_xcvr == XL_XCVR_COAX) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); else CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); /* * increase packet size to allow reception of 802.1q or ISL packets. * For the 3c90x chip, set the 'allow large packets' bit in the MAC * control register. For 3c90xB/C chips, use the RX packet size * register. */ if (sc->xl_type == XL_TYPE_905B) CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE); else { u_int8_t macctl; macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL); macctl |= XL_MACCTRL_ALLOW_LARGE_PACK; CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl); } /* Clear out the stats counters. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); xl_stats_update(sc); XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE); /* * Enable interrupts. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); else #endif CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); /* Set the RX early threshold */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2)); CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY); /* Enable receiver and transmitter. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); xl_wait(sc); /* XXX Downcall to miibus. */ if (mii != NULL) mii_mediachg(mii); /* Select window 7 for normal operations. */ XL_SEL_WIN(7); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->xl_wdog_timer = 0; callout_reset(&sc->xl_tick_callout, hz, xl_tick, sc); } /* * Set media options. */ static int xl_ifmedia_upd(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; struct ifmedia *ifm = NULL; struct mii_data *mii = NULL; XL_LOCK(sc); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); if (mii == NULL) ifm = &sc->ifmedia; else ifm = &mii->mii_media; switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_100_FX: case IFM_10_FL: case IFM_10_2: case IFM_10_5: xl_setmode(sc, ifm->ifm_media); XL_UNLOCK(sc); return (0); } if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX || sc->xl_media & XL_MEDIAOPT_BT4) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xl_init_locked(sc); } else { xl_setmode(sc, ifm->ifm_media); } XL_UNLOCK(sc); return (0); } /* * Report current media status. */ static void xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct xl_softc *sc = ifp->if_softc; u_int32_t icfg; u_int16_t status = 0; struct mii_data *mii = NULL; XL_LOCK(sc); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); XL_SEL_WIN(4); status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK; icfg >>= XL_ICFG_CONNECTOR_BITS; ifmr->ifm_active = IFM_ETHER; ifmr->ifm_status = IFM_AVALID; if ((status & XL_MEDIASTAT_CARRIER) == 0) ifmr->ifm_status |= IFM_ACTIVE; switch (icfg) { case XL_XCVR_10BT: ifmr->ifm_active = IFM_ETHER|IFM_10_T; if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; break; case XL_XCVR_AUI: if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { ifmr->ifm_active = IFM_ETHER|IFM_10_FL; if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } else ifmr->ifm_active = IFM_ETHER|IFM_10_5; break; case XL_XCVR_COAX: ifmr->ifm_active = IFM_ETHER|IFM_10_2; break; /* * XXX MII and BTX/AUTO should be separate cases. */ case XL_XCVR_100BTX: case XL_XCVR_AUTO: case XL_XCVR_MII: if (mii != NULL) { mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } break; case XL_XCVR_100BFX: ifmr->ifm_active = IFM_ETHER|IFM_100_FX; break; default: if_printf(ifp, "unknown XCVR type: %d\n", icfg); break; } XL_UNLOCK(sc); } static int xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct xl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0, mask; struct mii_data *mii = NULL; switch (command) { case SIOCSIFFLAGS: XL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING && (ifp->if_flags ^ sc->xl_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) xl_rxfilter(sc); else xl_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) xl_stop(sc); } sc->xl_if_flags = ifp->if_flags; XL_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* XXX Downcall from if_addmulti() possibly with locks held. */ XL_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) xl_rxfilter(sc); XL_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); if (mii == NULL) error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); else error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if ((mask & IFCAP_POLLING) != 0 && (ifp->if_capabilities & IFCAP_POLLING) != 0) { ifp->if_capenable ^= IFCAP_POLLING; if ((ifp->if_capenable & IFCAP_POLLING) != 0) { error = ether_poll_register(xl_poll, ifp); if (error) break; XL_LOCK(sc); /* Disable interrupts */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); ifp->if_capenable |= IFCAP_POLLING; XL_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ XL_LOCK(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK | 0xFF); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB | XL_INTRS); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); XL_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ XL_LOCK(sc); if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= XL905B_CSUM_FEATURES; else ifp->if_hwassist &= ~XL905B_CSUM_FEATURES; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) ifp->if_capenable ^= IFCAP_RXCSUM; if ((mask & IFCAP_WOL_MAGIC) != 0 && (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; XL_UNLOCK(sc); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int xl_watchdog(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; u_int16_t status = 0; int misintr; XL_LOCK_ASSERT(sc); if (sc->xl_wdog_timer == 0 || --sc->xl_wdog_timer != 0) return (0); xl_rxeof(sc); xl_txeoc(sc); misintr = 0; if (sc->xl_type == XL_TYPE_905B) { xl_txeof_90xB(sc); if (sc->xl_cdata.xl_tx_cnt == 0) misintr++; } else { xl_txeof(sc); if (sc->xl_cdata.xl_tx_head == NULL) misintr++; } if (misintr != 0) { device_printf(sc->xl_dev, "watchdog timeout (missed Tx interrupts) -- recovering\n"); return (0); } ifp->if_oerrors++; XL_SEL_WIN(4); status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); device_printf(sc->xl_dev, "watchdog timeout\n"); if (status & XL_MEDIASTAT_CARRIER) device_printf(sc->xl_dev, "no carrier - transceiver cable problem?\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xl_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } return (EJUSTRETURN); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void xl_stop(struct xl_softc *sc) { register int i; struct ifnet *ifp = sc->xl_ifp; XL_LOCK_ASSERT(sc); sc->xl_wdog_timer = 0; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); DELAY(800); #ifdef foo CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); #endif CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); /* Stop the stats updater. */ callout_stop(&sc->xl_tick_callout); /* * Free data in the RX lists. */ for (i = 0; i < XL_RX_LIST_CNT; i++) { if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) { bus_dmamap_unload(sc->xl_mtag, sc->xl_cdata.xl_rx_chain[i].xl_map); bus_dmamap_destroy(sc->xl_mtag, sc->xl_cdata.xl_rx_chain[i].xl_map); m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf); sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL; } } if (sc->xl_ldata.xl_rx_list != NULL) bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ); /* * Free the TX list buffers. */ for (i = 0; i < XL_TX_LIST_CNT; i++) { if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) { bus_dmamap_unload(sc->xl_mtag, sc->xl_cdata.xl_tx_chain[i].xl_map); bus_dmamap_destroy(sc->xl_mtag, sc->xl_cdata.xl_tx_chain[i].xl_map); m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf); sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL; } } if (sc->xl_ldata.xl_tx_list != NULL) bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int xl_shutdown(device_t dev) { return (xl_suspend(dev)); } static int xl_suspend(device_t dev) { struct xl_softc *sc; sc = device_get_softc(dev); XL_LOCK(sc); xl_stop(sc); xl_setwol(sc); XL_UNLOCK(sc); return (0); } static int xl_resume(device_t dev) { struct xl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->xl_ifp; XL_LOCK(sc); if (ifp->if_flags & IFF_UP) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xl_init_locked(sc); } XL_UNLOCK(sc); return (0); } static void xl_setwol(struct xl_softc *sc) { struct ifnet *ifp; u_int16_t cfg, pmstat; if ((sc->xl_flags & XL_FLAG_WOL) == 0) return; ifp = sc->xl_ifp; XL_SEL_WIN(7); /* Clear any pending PME events. */ CSR_READ_2(sc, XL_W7_BM_PME); cfg = 0; if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) cfg |= XL_BM_PME_MAGIC; CSR_WRITE_2(sc, XL_W7_BM_PME, cfg); /* Enable RX. */ if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); /* Request PME. */ pmstat = pci_read_config(sc->xl_dev, sc->xl_pmcap + PCIR_POWER_STATUS, 2); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) pmstat |= PCIM_PSTAT_PMEENABLE; else pmstat &= ~PCIM_PSTAT_PMEENABLE; pci_write_config(sc->xl_dev, sc->xl_pmcap + PCIR_POWER_STATUS, pmstat, 2); } Index: projects/nand/sys/fs/ext2fs/ext2_vfsops.c =================================================================== --- projects/nand/sys/fs/ext2fs/ext2_vfsops.c (revision 235262) +++ projects/nand/sys/fs/ext2fs/ext2_vfsops.c (revision 235263) @@ -1,1113 +1,1111 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * Copyright (c) 1989, 1991, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_vfsops.c 8.8 (Berkeley) 4/18/94 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int ext2_flushfiles(struct mount *mp, int flags, struct thread *td); static int ext2_mountfs(struct vnode *, struct mount *); static int ext2_reload(struct mount *mp, struct thread *td); static int ext2_sbupdate(struct ext2mount *, int); static int ext2_cgupdate(struct ext2mount *, int); static vfs_unmount_t ext2_unmount; static vfs_root_t ext2_root; static vfs_statfs_t ext2_statfs; static vfs_sync_t ext2_sync; static vfs_vget_t ext2_vget; static vfs_fhtovp_t ext2_fhtovp; static vfs_mount_t ext2_mount; MALLOC_DEFINE(M_EXT2NODE, "ext2_node", "EXT2 vnode private part"); static MALLOC_DEFINE(M_EXT2MNT, "ext2_mount", "EXT2 mount structure"); static struct vfsops ext2fs_vfsops = { .vfs_fhtovp = ext2_fhtovp, .vfs_mount = ext2_mount, .vfs_root = ext2_root, /* root inode via vget */ .vfs_statfs = ext2_statfs, .vfs_sync = ext2_sync, .vfs_unmount = ext2_unmount, .vfs_vget = ext2_vget, }; VFS_SET(ext2fs_vfsops, ext2fs, 0); static int ext2_check_sb_compat(struct ext2fs *es, struct cdev *dev, int ronly); static int compute_sb_data(struct vnode * devvp, struct ext2fs * es, struct m_ext2fs * fs); static const char *ext2_opts[] = { "acls", "async", "noatime", "noclusterr", "noclusterw", "noexec", "export", "force", "from", "multilabel", "suiddir", "nosymfollow", "sync", "union", NULL }; /* * VFS Operations. * * mount system call */ static int ext2_mount(struct mount *mp) { struct vfsoptlist *opts; struct vnode *devvp; struct thread *td; struct ext2mount *ump = 0; struct m_ext2fs *fs; struct nameidata nd, *ndp = &nd; accmode_t accmode; char *path, *fspec; int error, flags, len; td = curthread; opts = mp->mnt_optnew; if (vfs_filteropt(opts, ext2_opts)) return (EINVAL); vfs_getopt(opts, "fspath", (void **)&path, NULL); /* Double-check the length of path.. */ if (strlen(path) >= MAXMNTLEN - 1) return (ENAMETOOLONG); fspec = NULL; error = vfs_getopt(opts, "from", (void **)&fspec, &len); if (!error && fspec[len - 1] != '\0') return (EINVAL); /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { ump = VFSTOEXT2(mp); fs = ump->um_e2fs; error = 0; if (fs->e2fs_ronly == 0 && vfs_flagopt(opts, "ro", NULL, 0)) { error = VFS_SYNC(mp, MNT_WAIT); if (error) return (error); flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = ext2_flushfiles(mp, flags, td); if ( error == 0 && fs->e2fs_wasvalid && ext2_cgupdate(ump, MNT_WAIT) == 0) { fs->e2fs->e2fs_state |= E2FS_ISCLEAN; ext2_sbupdate(ump, MNT_WAIT); } fs->e2fs_ronly = 1; vfs_flagopt(opts, "ro", &mp->mnt_flag, MNT_RDONLY); DROP_GIANT(); g_topology_lock(); g_access(ump->um_cp, 0, -1, 0); g_topology_unlock(); PICKUP_GIANT(); } if (!error && (mp->mnt_flag & MNT_RELOAD)) error = ext2_reload(mp, td); if (error) return (error); devvp = ump->um_devvp; if (fs->e2fs_ronly && !vfs_flagopt(opts, "ro", NULL, 0)) { if (ext2_check_sb_compat(fs->e2fs, devvp->v_rdev, 0)) return (EPERM); /* * If upgrade to read-write by non-root, then verify * that user has necessary permissions on the device. */ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_ACCESS(devvp, VREAD | VWRITE, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { VOP_UNLOCK(devvp, 0); return (error); } VOP_UNLOCK(devvp, 0); DROP_GIANT(); g_topology_lock(); error = g_access(ump->um_cp, 0, 1, 0); g_topology_unlock(); PICKUP_GIANT(); if (error) return (error); if ((fs->e2fs->e2fs_state & E2FS_ISCLEAN) == 0 || (fs->e2fs->e2fs_state & E2FS_ERRORS)) { if (mp->mnt_flag & MNT_FORCE) { printf( "WARNING: %s was not properly dismounted\n", fs->e2fs_fsmnt); } else { printf( "WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n", fs->e2fs_fsmnt); return (EPERM); } } fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN; (void)ext2_cgupdate(ump, MNT_WAIT); fs->e2fs_ronly = 0; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_RDONLY; MNT_IUNLOCK(mp); } if (vfs_flagopt(opts, "export", NULL, 0)) { /* Process export requests in vfs_mount.c. */ return (error); } } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible disk device. */ if (fspec == NULL) return (EINVAL); NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); if ((error = namei(ndp)) != 0) return (error); NDFREE(ndp, NDF_ONLY_PNBUF); devvp = ndp->ni_vp; if (!vn_isdisk(devvp, &error)) { vput(devvp); return (error); } /* * If mount by non-root, then verify that user has necessary * permissions on the device. * * XXXRW: VOP_ACCESS() enough? */ accmode = VREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) accmode |= VWRITE; error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { vput(devvp); return (error); } if ((mp->mnt_flag & MNT_UPDATE) == 0) { error = ext2_mountfs(devvp, mp); } else { if (devvp != ump->um_devvp) { vput(devvp); return (EINVAL); /* needs translation */ } else vput(devvp); } if (error) { vrele(devvp); return (error); } ump = VFSTOEXT2(mp); fs = ump->um_e2fs; /* * Note that this strncpy() is ok because of a check at the start * of ext2_mount(). */ strncpy(fs->e2fs_fsmnt, path, MAXMNTLEN); fs->e2fs_fsmnt[MAXMNTLEN - 1] = '\0'; vfs_mountedfrom(mp, fspec); return (0); } static int ext2_check_sb_compat(struct ext2fs *es, struct cdev *dev, int ronly) { if (es->e2fs_magic != E2FS_MAGIC) { printf("ext2fs: %s: wrong magic number %#x (expected %#x)\n", devtoname(dev), es->e2fs_magic, E2FS_MAGIC); return (1); } if (es->e2fs_rev > E2FS_REV0) { if (es->e2fs_features_incompat & ~EXT2F_INCOMPAT_SUPP) { printf( "WARNING: mount of %s denied due to unsupported optional features\n", devtoname(dev)); return (1); } if (!ronly && (es->e2fs_features_rocompat & ~EXT2F_ROCOMPAT_SUPP)) { printf("WARNING: R/W mount of %s denied due to " "unsupported optional features\n", devtoname(dev)); return (1); } } return (0); } /* * This computes the fields of the ext2_sb_info structure from the * data in the ext2_super_block structure read in. */ static int compute_sb_data(struct vnode *devvp, struct ext2fs *es, struct m_ext2fs *fs) { int db_count, error; int i; int logic_sb_block = 1; /* XXX for now */ struct buf *bp; fs->e2fs_bsize = EXT2_MIN_BLOCK_SIZE << es->e2fs_log_bsize; fs->e2fs_bshift = EXT2_MIN_BLOCK_LOG_SIZE + es->e2fs_log_bsize; fs->e2fs_fsbtodb = es->e2fs_log_bsize + 1; fs->e2fs_qbmask = fs->e2fs_bsize - 1; fs->e2fs_blocksize_bits = es->e2fs_log_bsize + 10; fs->e2fs_fsize = EXT2_MIN_FRAG_SIZE << es->e2fs_log_fsize; if (fs->e2fs_fsize) fs->e2fs_fpb = fs->e2fs_bsize / fs->e2fs_fsize; fs->e2fs_bpg = es->e2fs_bpg; fs->e2fs_fpg = es->e2fs_fpg; fs->e2fs_ipg = es->e2fs_ipg; if (es->e2fs_rev == E2FS_REV0) { fs->e2fs_first_inode = EXT2_FIRSTINO; fs->e2fs_isize = E2FS_REV0_INODE_SIZE ; } else { fs->e2fs_first_inode = es->e2fs_first_ino; fs->e2fs_isize = es->e2fs_inode_size; /* * Simple sanity check for superblock inode size value. */ if (EXT2_INODE_SIZE(fs) < E2FS_REV0_INODE_SIZE || EXT2_INODE_SIZE(fs) > fs->e2fs_bsize || (fs->e2fs_isize & (fs->e2fs_isize - 1)) != 0) { printf("ext2fs: invalid inode size %d\n", fs->e2fs_isize); return (EIO); } } /* Check for extra isize in big inodes. */ if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT4F_ROCOMPAT_EXTRA_ISIZE) && EXT2_INODE_SIZE(fs) < sizeof(struct ext2fs_dinode)) { printf("ext2fs: no space for extra inode timestamps\n"); return (EINVAL); } fs->e2fs_ipb = fs->e2fs_bsize / EXT2_INODE_SIZE(fs); fs->e2fs_itpg = fs->e2fs_ipg /fs->e2fs_ipb; fs->e2fs_descpb = fs->e2fs_bsize / sizeof(struct ext2_gd); /* s_resuid / s_resgid ? */ fs->e2fs_gcount = (es->e2fs_bcount - es->e2fs_first_dblock + EXT2_BLOCKS_PER_GROUP(fs) - 1) / EXT2_BLOCKS_PER_GROUP(fs); db_count = (fs->e2fs_gcount + EXT2_DESC_PER_BLOCK(fs) - 1) / EXT2_DESC_PER_BLOCK(fs); fs->e2fs_gdbcount = db_count; fs->e2fs_gd = malloc(db_count * fs->e2fs_bsize, M_EXT2MNT, M_WAITOK); fs->e2fs_contigdirs = malloc(fs->e2fs_gcount * sizeof(*fs->e2fs_contigdirs), M_EXT2MNT, M_WAITOK); /* * Adjust logic_sb_block. * Godmar thinks: if the blocksize is greater than 1024, then * the superblock is logically part of block zero. */ if(fs->e2fs_bsize > SBSIZE) logic_sb_block = 0; for (i = 0; i < db_count; i++) { error = bread(devvp , fsbtodb(fs, logic_sb_block + i + 1 ), fs->e2fs_bsize, NOCRED, &bp); if (error) { free(fs->e2fs_gd, M_EXT2MNT); brelse(bp); return (error); } e2fs_cgload((struct ext2_gd *)bp->b_data, &fs->e2fs_gd[ i * fs->e2fs_bsize / sizeof(struct ext2_gd)], fs->e2fs_bsize); brelse(bp); bp = NULL; } fs->e2fs_total_dir = 0; for (i=0; i < fs->e2fs_gcount; i++){ fs->e2fs_total_dir += fs->e2fs_gd[i].ext2bgd_ndirs; fs->e2fs_contigdirs[i] = 0; } if (es->e2fs_rev == E2FS_REV0 || !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_LARGEFILE)) fs->e2fs_maxfilesize = 0x7fffffff; else fs->e2fs_maxfilesize = 0x7fffffffffffffff; return (0); } /* * Reload all incore data for a filesystem (used after running fsck on * the root filesystem and finding things to fix). The filesystem must * be mounted read-only. * * Things to do to update the mount: * 1) invalidate all cached meta-data. * 2) re-read superblock from disk. * 3) invalidate all cluster summary information. * 4) invalidate all inactive vnodes. * 5) invalidate all cached file data. * 6) re-read inode data for all active vnodes. * XXX we are missing some steps, in particular # 3, this has to be reviewed. */ static int ext2_reload(struct mount *mp, struct thread *td) { struct vnode *vp, *mvp, *devvp; struct inode *ip; struct buf *bp; struct ext2fs *es; struct m_ext2fs *fs; struct csum *sump; int error, i; int32_t *lp; if ((mp->mnt_flag & MNT_RDONLY) == 0) return (EINVAL); /* * Step 1: invalidate all cached meta-data. */ devvp = VFSTOEXT2(mp)->um_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); if (vinvalbuf(devvp, 0, 0, 0) != 0) panic("ext2_reload: dirty1"); VOP_UNLOCK(devvp, 0); /* * Step 2: re-read superblock from disk. * constants have been adjusted for ext2 */ if ((error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) != 0) return (error); es = (struct ext2fs *)bp->b_data; if (ext2_check_sb_compat(es, devvp->v_rdev, 0) != 0) { brelse(bp); return (EIO); /* XXX needs translation */ } fs = VFSTOEXT2(mp)->um_e2fs; bcopy(bp->b_data, fs->e2fs, sizeof(struct ext2fs)); if((error = compute_sb_data(devvp, es, fs)) != 0) { brelse(bp); return (error); } #ifdef UNKLAR if (fs->fs_sbsize < SBSIZE) bp->b_flags |= B_INVAL; #endif brelse(bp); /* * Step 3: invalidate all cluster summary information. */ if (fs->e2fs_contigsumsize > 0) { lp = fs->e2fs_maxcluster; sump = fs->e2fs_clustersum; for (i = 0; i < fs->e2fs_gcount; i++, sump++) { *lp++ = fs->e2fs_contigsumsize; sump->cs_init = 0; bzero(sump->cs_sum, fs->e2fs_contigsumsize + 1); } } loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { /* * Step 4: invalidate all cached file data. */ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } if (vinvalbuf(vp, 0, 0, 0)) panic("ext2_reload: dirty2"); /* * Step 5: re-read inode data for all active vnodes. */ ip = VTOI(vp); error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { VOP_UNLOCK(vp, 0); vrele(vp); MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); return (error); } ext2_ei2i((struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)), ip); brelse(bp); VOP_UNLOCK(vp, 0); vrele(vp); } return (0); } /* * Common code for mount and mountroot. */ static int ext2_mountfs(struct vnode *devvp, struct mount *mp) { struct ext2mount *ump; struct buf *bp; struct m_ext2fs *fs; struct ext2fs *es; struct cdev *dev = devvp->v_rdev; struct g_consumer *cp; struct bufobj *bo; struct csum *sump; int error; int ronly; int i, size; int32_t *lp; ronly = vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0); /* XXX: use VOP_ACESS to check FS perms */ DROP_GIANT(); g_topology_lock(); error = g_vfs_open(devvp, &cp, "ext2fs", ronly ? 0 : 1); g_topology_unlock(); PICKUP_GIANT(); VOP_UNLOCK(devvp, 0); if (error) return (error); /* XXX: should we check for some sectorsize or 512 instead? */ if (((SBSIZE % cp->provider->sectorsize) != 0) || (SBSIZE < cp->provider->sectorsize)) { DROP_GIANT(); g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); PICKUP_GIANT(); return (EINVAL); } bo = &devvp->v_bufobj; bo->bo_private = cp; bo->bo_ops = g_vfs_bufops; if (devvp->v_rdev->si_iosize_max != 0) mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max; if (mp->mnt_iosize_max > MAXPHYS) mp->mnt_iosize_max = MAXPHYS; bp = NULL; ump = NULL; if ((error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) != 0) goto out; es = (struct ext2fs *)bp->b_data; if (ext2_check_sb_compat(es, dev, ronly) != 0) { error = EINVAL; /* XXX needs translation */ goto out; } if ((es->e2fs_state & E2FS_ISCLEAN) == 0 || (es->e2fs_state & E2FS_ERRORS)) { if (ronly || (mp->mnt_flag & MNT_FORCE)) { printf( "WARNING: Filesystem was not properly dismounted\n"); } else { printf( "WARNING: R/W mount denied. Filesystem is not clean - run fsck\n"); error = EPERM; goto out; } } ump = malloc(sizeof(*ump), M_EXT2MNT, M_WAITOK | M_ZERO); /* * I don't know whether this is the right strategy. Note that * we dynamically allocate both an ext2_sb_info and an ext2_super_block * while Linux keeps the super block in a locked buffer. */ ump->um_e2fs = malloc(sizeof(struct m_ext2fs), M_EXT2MNT, M_WAITOK); ump->um_e2fs->e2fs = malloc(sizeof(struct ext2fs), M_EXT2MNT, M_WAITOK); mtx_init(EXT2_MTX(ump), "EXT2FS", "EXT2FS Lock", MTX_DEF); bcopy(es, ump->um_e2fs->e2fs, (u_int)sizeof(struct ext2fs)); if ((error = compute_sb_data(devvp, ump->um_e2fs->e2fs, ump->um_e2fs))) goto out; /* * Calculate the maximum contiguous blocks and size of cluster summary * array. In FFS this is done by newfs; however, the superblock * in ext2fs doesn't have these variables, so we can calculate * them here. */ ump->um_e2fs->e2fs_maxcontig = MAX(1, MAXPHYS / ump->um_e2fs->e2fs_bsize); if (ump->um_e2fs->e2fs_maxcontig > 0) ump->um_e2fs->e2fs_contigsumsize = MIN(ump->um_e2fs->e2fs_maxcontig, EXT2_MAXCONTIG); else ump->um_e2fs->e2fs_contigsumsize = 0; if (ump->um_e2fs->e2fs_contigsumsize > 0) { size = ump->um_e2fs->e2fs_gcount * sizeof(int32_t); ump->um_e2fs->e2fs_maxcluster = malloc(size, M_EXT2MNT, M_WAITOK); size = ump->um_e2fs->e2fs_gcount * sizeof(struct csum); ump->um_e2fs->e2fs_clustersum = malloc(size, M_EXT2MNT, M_WAITOK); lp = ump->um_e2fs->e2fs_maxcluster; sump = ump->um_e2fs->e2fs_clustersum; for (i = 0; i < ump->um_e2fs->e2fs_gcount; i++, sump++) { *lp++ = ump->um_e2fs->e2fs_contigsumsize; sump->cs_init = 0; sump->cs_sum = malloc((ump->um_e2fs->e2fs_contigsumsize + 1) * sizeof(int32_t), M_EXT2MNT, M_WAITOK | M_ZERO); } } brelse(bp); bp = NULL; fs = ump->um_e2fs; fs->e2fs_ronly = ronly; /* ronly is set according to mnt_flags */ /* * If the fs is not mounted read-only, make sure the super block is * always written back on a sync(). */ fs->e2fs_wasvalid = fs->e2fs->e2fs_state & E2FS_ISCLEAN ? 1 : 0; if (ronly == 0) { fs->e2fs_fmod = 1; /* mark it modified */ fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN; /* set fs invalid */ } mp->mnt_data = ump; mp->mnt_stat.f_fsid.val[0] = dev2udev(dev); mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; mp->mnt_maxsymlinklen = EXT2_MAXSYMLINKLEN; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; MNT_IUNLOCK(mp); ump->um_mountp = mp; ump->um_dev = dev; ump->um_devvp = devvp; ump->um_bo = &devvp->v_bufobj; ump->um_cp = cp; /* * Setting those two parameters allowed us to use * ufs_bmap w/o changse! */ ump->um_nindir = EXT2_ADDR_PER_BLOCK(fs); ump->um_bptrtodb = fs->e2fs->e2fs_log_bsize + 1; ump->um_seqinc = EXT2_FRAGS_PER_BLOCK(fs); if (ronly == 0) ext2_sbupdate(ump, MNT_WAIT); /* * Initialize filesystem stat information in mount struct. */ MNT_ILOCK(mp); mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED; MNT_IUNLOCK(mp); return (0); out: if (bp) brelse(bp); if (cp != NULL) { DROP_GIANT(); g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); PICKUP_GIANT(); } if (ump) { mtx_destroy(EXT2_MTX(ump)); free(ump->um_e2fs->e2fs_gd, M_EXT2MNT); free(ump->um_e2fs->e2fs_contigdirs, M_EXT2MNT); free(ump->um_e2fs->e2fs, M_EXT2MNT); free(ump->um_e2fs, M_EXT2MNT); free(ump, M_EXT2MNT); mp->mnt_data = NULL; } return (error); } /* * Unmount system call. */ static int ext2_unmount(struct mount *mp, int mntflags) { struct ext2mount *ump; struct m_ext2fs *fs; struct csum *sump; int error, flags, i, ronly; flags = 0; if (mntflags & MNT_FORCE) { if (mp->mnt_flag & MNT_ROOTFS) return (EINVAL); flags |= FORCECLOSE; } if ((error = ext2_flushfiles(mp, flags, curthread)) != 0) return (error); ump = VFSTOEXT2(mp); fs = ump->um_e2fs; ronly = fs->e2fs_ronly; if (ronly == 0 && ext2_cgupdate(ump, MNT_WAIT) == 0) { if (fs->e2fs_wasvalid) fs->e2fs->e2fs_state |= E2FS_ISCLEAN; ext2_sbupdate(ump, MNT_WAIT); } DROP_GIANT(); g_topology_lock(); g_vfs_close(ump->um_cp); g_topology_unlock(); PICKUP_GIANT(); vrele(ump->um_devvp); sump = fs->e2fs_clustersum; for (i = 0; i < fs->e2fs_gcount; i++, sump++) free(sump->cs_sum, M_EXT2MNT); free(fs->e2fs_clustersum, M_EXT2MNT); free(fs->e2fs_maxcluster, M_EXT2MNT); free(fs->e2fs_gd, M_EXT2MNT); free(fs->e2fs_contigdirs, M_EXT2MNT); free(fs->e2fs, M_EXT2MNT); free(fs, M_EXT2MNT); free(ump, M_EXT2MNT); mp->mnt_data = NULL; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_LOCAL; MNT_IUNLOCK(mp); return (error); } /* * Flush out all the files in a filesystem. */ static int ext2_flushfiles(struct mount *mp, int flags, struct thread *td) { int error; error = vflush(mp, 0, flags, td); return (error); } /* * Get file system statistics. */ int ext2_statfs(struct mount *mp, struct statfs *sbp) { struct ext2mount *ump; struct m_ext2fs *fs; uint32_t overhead, overhead_per_group, ngdb; int i, ngroups; ump = VFSTOEXT2(mp); fs = ump->um_e2fs; if (fs->e2fs->e2fs_magic != E2FS_MAGIC) panic("ext2fs_statvfs"); /* * Compute the overhead (FS structures) */ overhead_per_group = 1 /* block bitmap */ + 1 /* inode bitmap */ + fs->e2fs_itpg; overhead = fs->e2fs->e2fs_first_dblock + fs->e2fs_gcount * overhead_per_group; if (fs->e2fs->e2fs_rev > E2FS_REV0 && fs->e2fs->e2fs_features_rocompat & EXT2F_ROCOMPAT_SPARSESUPER) { for (i = 0, ngroups = 0; i < fs->e2fs_gcount; i++) { if (cg_has_sb(i)) ngroups++; } } else { ngroups = fs->e2fs_gcount; } ngdb = fs->e2fs_gdbcount; if (fs->e2fs->e2fs_rev > E2FS_REV0 && fs->e2fs->e2fs_features_compat & EXT2F_COMPAT_RESIZE) ngdb += fs->e2fs->e2fs_reserved_ngdb; overhead += ngroups * (1 /* superblock */ + ngdb); sbp->f_bsize = EXT2_FRAG_SIZE(fs); sbp->f_iosize = EXT2_BLOCK_SIZE(fs); sbp->f_blocks = fs->e2fs->e2fs_bcount - overhead; sbp->f_bfree = fs->e2fs->e2fs_fbcount; sbp->f_bavail = sbp->f_bfree - fs->e2fs->e2fs_rbcount; sbp->f_files = fs->e2fs->e2fs_icount; sbp->f_ffree = fs->e2fs->e2fs_ficount; return (0); } /* * Go through the disk queues to initiate sandbagged IO; * go through the inodes to write those that have been modified; * initiate the writing of the super block if it has been modified. * * Note: we are always called with the filesystem marked `MPBUSY'. */ static int ext2_sync(struct mount *mp, int waitfor) { struct vnode *mvp, *vp; struct thread *td; struct inode *ip; struct ext2mount *ump = VFSTOEXT2(mp); struct m_ext2fs *fs; int error, allerror = 0; td = curthread; fs = ump->um_e2fs; if (fs->e2fs_fmod != 0 && fs->e2fs_ronly != 0) { /* XXX */ printf("fs = %s\n", fs->e2fs_fsmnt); panic("ext2_sync: rofs mod"); } /* * Write back each (modified) inode. */ - MNT_ILOCK(mp); loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { if (vp->v_type == VNON) { VI_UNLOCK(vp); continue; } ip = VTOI(vp); if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && (vp->v_bufobj.bo_dirty.bv_cnt == 0 || waitfor == MNT_LAZY)) { VI_UNLOCK(vp); continue; } error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td); if (error) { - MNT_ILOCK(mp); if (error == ENOENT) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } continue; } if ((error = VOP_FSYNC(vp, waitfor, td)) != 0) allerror = error; VOP_UNLOCK(vp, 0); vrele(vp); } /* * Force stale file system control information to be flushed. */ if (waitfor != MNT_LAZY) { vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); if ((error = VOP_FSYNC(ump->um_devvp, waitfor, td)) != 0) allerror = error; VOP_UNLOCK(ump->um_devvp, 0); } /* * Write back modified superblock. */ if (fs->e2fs_fmod != 0) { fs->e2fs_fmod = 0; fs->e2fs->e2fs_wtime = time_second; if ((error = ext2_cgupdate(ump, waitfor)) != 0) allerror = error; } return (allerror); } /* * Look up an EXT2FS dinode number to find its incore vnode, otherwise read it * in from disk. If it is in core, wait for the lock bit to clear, then * return the inode locked. Detection and handling of mount points must be * done by the calling routine. */ static int ext2_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) { struct m_ext2fs *fs; struct inode *ip; struct ext2mount *ump; struct buf *bp; struct vnode *vp; struct cdev *dev; struct thread *td; int i, error; int used_blocks; td = curthread; error = vfs_hash_get(mp, ino, flags, td, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); ump = VFSTOEXT2(mp); dev = ump->um_dev; /* * If this malloc() is performed after the getnewvnode() * it might block, leaving a vnode with a NULL v_data to be * found by ext2_sync() if a sync happens to fire right then, * which will cause a panic because ext2_sync() blindly * dereferences vp->v_data (as well it should). */ ip = malloc(sizeof(struct inode), M_EXT2NODE, M_WAITOK | M_ZERO); /* Allocate a new vnode/inode. */ if ((error = getnewvnode("ext2fs", mp, &ext2_vnodeops, &vp)) != 0) { *vpp = NULL; free(ip, M_EXT2NODE); return (error); } vp->v_data = ip; ip->i_vnode = vp; ip->i_e2fs = fs = ump->um_e2fs; ip->i_ump = ump; ip->i_number = ino; lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); error = insmntque(vp, mp); if (error != 0) { free(ip, M_EXT2NODE); *vpp = NULL; return (error); } error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* Read in the disk contents for the inode, copy into the inode. */ if ((error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { /* * The inode does not contain anything useful, so it would * be misleading to leave it on its hash chain. With mode * still zero, it will be unlinked and returned to the free * list by vput(). */ brelse(bp); vput(vp); *vpp = NULL; return (error); } /* convert ext2 inode to dinode */ ext2_ei2i((struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ino)), ip); ip->i_block_group = ino_to_cg(fs, ino); ip->i_next_alloc_block = 0; ip->i_next_alloc_goal = 0; /* * Now we want to make sure that block pointers for unused * blocks are zeroed out - ext2_balloc depends on this * although for regular files and directories only */ if(S_ISDIR(ip->i_mode) || S_ISREG(ip->i_mode)) { used_blocks = (ip->i_size+fs->e2fs_bsize-1) / fs->e2fs_bsize; for (i = used_blocks; i < EXT2_NDIR_BLOCKS; i++) ip->i_db[i] = 0; } /* ext2_print_inode(ip); */ bqrelse(bp); /* * Initialize the vnode from the inode, check for aliases. * Note that the underlying vnode may have changed. */ if ((error = ext2_vinit(mp, &ext2_fifoops, &vp)) != 0) { vput(vp); *vpp = NULL; return (error); } /* * Finish inode initialization. */ /* * Set up a generation number for this inode if it does not * already have one. This should only happen on old filesystems. */ if (ip->i_gen == 0) { ip->i_gen = random() / 2 + 1; if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) ip->i_flag |= IN_MODIFIED; } *vpp = vp; return (0); } /* * File handle to vnode * * Have to be really careful about stale file handles: * - check that the inode number is valid * - call ext2_vget() to get the locked inode * - check for an unallocated inode (i_mode == 0) * - check that the given client host has export rights and return * those rights via. exflagsp and credanonp */ static int ext2_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp) { struct inode *ip; struct ufid *ufhp; struct vnode *nvp; struct m_ext2fs *fs; int error; ufhp = (struct ufid *)fhp; fs = VFSTOEXT2(mp)->um_e2fs; if (ufhp->ufid_ino < EXT2_ROOTINO || ufhp->ufid_ino > fs->e2fs_gcount * fs->e2fs->e2fs_ipg) return (ESTALE); error = VFS_VGET(mp, ufhp->ufid_ino, LK_EXCLUSIVE, &nvp); if (error) { *vpp = NULLVP; return (error); } ip = VTOI(nvp); if (ip->i_mode == 0 || ip->i_gen != ufhp->ufid_gen || ip->i_nlink <= 0) { vput(nvp); *vpp = NULLVP; return (ESTALE); } *vpp = nvp; vnode_create_vobject(*vpp, 0, curthread); return (0); } /* * Write a superblock and associated information back to disk. */ static int ext2_sbupdate(struct ext2mount *mp, int waitfor) { struct m_ext2fs *fs = mp->um_e2fs; struct ext2fs *es = fs->e2fs; struct buf *bp; int error = 0; bp = getblk(mp->um_devvp, SBLOCK, SBSIZE, 0, 0, 0); bcopy((caddr_t)es, bp->b_data, (u_int)sizeof(struct ext2fs)); if (waitfor == MNT_WAIT) error = bwrite(bp); else bawrite(bp); /* * The buffers for group descriptors, inode bitmaps and block bitmaps * are not busy at this point and are (hopefully) written by the * usual sync mechanism. No need to write them here. */ return (error); } int ext2_cgupdate(struct ext2mount *mp, int waitfor) { struct m_ext2fs *fs = mp->um_e2fs; struct buf *bp; int i, error = 0, allerror = 0; allerror = ext2_sbupdate(mp, waitfor); for (i = 0; i < fs->e2fs_gdbcount; i++) { bp = getblk(mp->um_devvp, fsbtodb(fs, fs->e2fs->e2fs_first_dblock + 1 /* superblock */ + i), fs->e2fs_bsize, 0, 0, 0); e2fs_cgsave(&fs->e2fs_gd[ i * fs->e2fs_bsize / sizeof(struct ext2_gd)], (struct ext2_gd *)bp->b_data, fs->e2fs_bsize); if (waitfor == MNT_WAIT) error = bwrite(bp); else bawrite(bp); } if (!allerror && error) allerror = error; return (allerror); } /* * Return the root of a filesystem. */ static int ext2_root(struct mount *mp, int flags, struct vnode **vpp) { struct vnode *nvp; int error; error = VFS_VGET(mp, EXT2_ROOTINO, LK_EXCLUSIVE, &nvp); if (error) return (error); *vpp = nvp; return (0); } Index: projects/nand/sys/geom/eli/g_eli_integrity.c =================================================================== --- projects/nand/sys/geom/eli/g_eli_integrity.c (revision 235262) +++ projects/nand/sys/geom/eli/g_eli_integrity.c (revision 235263) @@ -1,544 +1,545 @@ /*- * Copyright (c) 2005-2011 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * The data layout description when integrity verification is configured. * * One of the most important assumption here is that authenticated data and its * HMAC has to be stored in the same place (namely in the same sector) to make * it work reliable. * The problem is that file systems work only with sectors that are multiple of * 512 bytes and a power of two number. * My idea to implement it is as follows. * Let's store HMAC in sector. This is a must. This leaves us 480 bytes for * data. We can't use that directly (ie. we can't create provider with 480 bytes * sector size). We need another sector from where we take only 32 bytes of data * and we store HMAC of this data as well. This takes two sectors from the * original provider at the input and leaves us one sector of authenticated data * at the output. Not very efficient, but you got the idea. * Now, let's assume, we want to create provider with 4096 bytes sector. * To output 4096 bytes of authenticated data we need 8x480 plus 1x256, so we * need nine 512-bytes sectors at the input to get one 4096-bytes sector at the * output. That's better. With 4096 bytes sector we can use 89% of size of the * original provider. I find it as an acceptable cost. * The reliability comes from the fact, that every HMAC stored inside the sector * is calculated only for the data in the same sector, so its impossible to * write new data and leave old HMAC or vice versa. * * And here is the picture: * * da0: +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ * |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |256b | * |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data | * +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ * |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |288 bytes | * +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ |224 unused| * +----------+ * da0.eli: +----+----+----+----+----+----+----+----+----+ * |480b|480b|480b|480b|480b|480b|480b|480b|256b| * +----+----+----+----+----+----+----+----+----+ * | 4096 bytes | * +--------------------------------------------+ * * PS. You can use any sector size with geli(8). My example is using 4kB, * because it's most efficient. For 8kB sectors you need 2 extra sectors, * so the cost is the same as for 4kB sectors. */ /* * Code paths: * BIO_READ: * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> g_eli_auth_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> g_eli_auth_run -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ MALLOC_DECLARE(M_ELI); /* * Here we generate key for HMAC. Every sector has its own HMAC key, so it is * not possible to copy sectors. * We cannot depend on fact, that every sector has its own IV, because different * IV doesn't change HMAC, when we use encrypt-then-authenticate method. */ static void g_eli_auth_keygen(struct g_eli_softc *sc, off_t offset, u_char *key) { SHA256_CTX ctx; /* Copy precalculated SHA256 context. */ bcopy(&sc->sc_akeyctx, &ctx, sizeof(ctx)); SHA256_Update(&ctx, (uint8_t *)&offset, sizeof(offset)); SHA256_Final(key, &ctx); } /* * The function is called after we read and decrypt data. * * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> G_ELI_AUTH_READ_DONE -> g_io_deliver */ static int g_eli_auth_read_done(struct cryptop *crp) { struct g_eli_softc *sc; struct bio *bp; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { bp->bio_completed += crp->crp_olen; G_ELI_DEBUG(3, "Crypto READ request done (%d/%d) (add=%jd completed=%jd).", bp->bio_inbed, bp->bio_children, (intmax_t)crp->crp_olen, (intmax_t)bp->bio_completed); } else { G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } sc = bp->bio_to->geom->softc; g_eli_key_drop(sc, crp->crp_desc->crd_next->crd_key); /* * Do we have all sectors already? */ if (bp->bio_inbed < bp->bio_children) return (0); if (bp->bio_error == 0) { u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; u_char *srcdata, *dstdata, *auth; off_t coroff, corsize; /* * Verify data integrity based on calculated and read HMACs. */ /* Sectorsize of decrypted provider eg. 4096. */ decr_secsize = bp->bio_to->sectorsize; /* The real sectorsize of encrypted provider, eg. 512. */ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; /* Number of data bytes in one encrypted sector, eg. 480. */ data_secsize = sc->sc_data_per_sector; /* Number of sectors from decrypted provider, eg. 2. */ nsec = bp->bio_length / decr_secsize; /* Number of sectors from encrypted provider, eg. 18. */ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; /* Last sector number in every big sector, eg. 9. */ lsec = sc->sc_bytes_per_sector / encr_secsize; srcdata = bp->bio_driver2; dstdata = bp->bio_data; auth = srcdata + encr_secsize * nsec; coroff = -1; corsize = 0; for (i = 1; i <= nsec; i++) { data_secsize = sc->sc_data_per_sector; if ((i % lsec) == 0) data_secsize = decr_secsize % data_secsize; if (bcmp(srcdata, auth, sc->sc_alen) != 0) { /* * Curruption detected, remember the offset if * this is the first corrupted sector and * increase size. */ if (bp->bio_error == 0) bp->bio_error = -1; if (coroff == -1) { coroff = bp->bio_offset + (dstdata - (u_char *)bp->bio_data); } corsize += data_secsize; } else { /* * No curruption, good. * Report previous corruption if there was one. */ if (coroff != -1) { - G_ELI_DEBUG(0, "%s: %jd bytes " - "corrupted at offset %jd.", + G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " + "bytes of data at offset %jd", sc->sc_name, (intmax_t)corsize, (intmax_t)coroff); coroff = -1; corsize = 0; } bcopy(srcdata + sc->sc_alen, dstdata, data_secsize); } srcdata += encr_secsize; dstdata += data_secsize; auth += sc->sc_alen; } /* Report previous corruption if there was one. */ if (coroff != -1) { - G_ELI_DEBUG(0, "%s: %jd bytes corrupted at offset %jd.", + G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " + "bytes of data at offset %jd", sc->sc_name, (intmax_t)corsize, (intmax_t)coroff); } } free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; if (bp->bio_error != 0) { if (bp->bio_error == -1) bp->bio_error = EINVAL; else { G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", bp->bio_error); } bp->bio_completed = 0; } /* * Read is finished, send it up. */ g_io_deliver(bp, bp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return (0); } /* * The function is called after data encryption. * * g_eli_start -> g_eli_auth_run -> G_ELI_AUTH_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver */ static int g_eli_auth_write_done(struct cryptop *crp) { struct g_eli_softc *sc; struct g_consumer *cp; struct bio *bp, *cbp, *cbp2; u_int nsec; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).", bp->bio_inbed, bp->bio_children); } else { G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } sc = bp->bio_to->geom->softc; g_eli_key_drop(sc, crp->crp_desc->crd_key); /* * All sectors are already encrypted? */ if (bp->bio_inbed < bp->bio_children) return (0); if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).", bp->bio_error); free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; cbp = bp->bio_driver1; bp->bio_driver1 = NULL; g_destroy_bio(cbp); g_io_deliver(bp, bp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return (0); } cp = LIST_FIRST(&sc->sc_geom->consumer); cbp = bp->bio_driver1; bp->bio_driver1 = NULL; cbp->bio_to = cp->provider; cbp->bio_done = g_eli_write_done; /* Number of sectors from decrypted provider, eg. 1. */ nsec = bp->bio_length / bp->bio_to->sectorsize; /* Number of sectors from encrypted provider, eg. 9. */ nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; cbp->bio_length = cp->provider->sectorsize * nsec; cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; cbp->bio_data = bp->bio_driver2; /* * We write more than what is requested, so we have to be ready to write * more than MAXPHYS. */ cbp2 = NULL; if (cbp->bio_length > MAXPHYS) { cbp2 = g_duplicate_bio(bp); cbp2->bio_length = cbp->bio_length - MAXPHYS; cbp2->bio_data = cbp->bio_data + MAXPHYS; cbp2->bio_offset = cbp->bio_offset + MAXPHYS; cbp2->bio_to = cp->provider; cbp2->bio_done = g_eli_write_done; cbp->bio_length = MAXPHYS; } /* * Send encrypted data to the provider. */ G_ELI_LOGREQ(2, cbp, "Sending request."); bp->bio_inbed = 0; bp->bio_children = (cbp2 != NULL ? 2 : 1); g_io_request(cbp, cp); if (cbp2 != NULL) { G_ELI_LOGREQ(2, cbp2, "Sending request."); g_io_request(cbp2, cp); } return (0); } void g_eli_auth_read(struct g_eli_softc *sc, struct bio *bp) { struct g_consumer *cp; struct bio *cbp, *cbp2; size_t size; off_t nsec; bp->bio_pflags = 0; cp = LIST_FIRST(&sc->sc_geom->consumer); cbp = bp->bio_driver1; bp->bio_driver1 = NULL; cbp->bio_to = cp->provider; cbp->bio_done = g_eli_read_done; /* Number of sectors from decrypted provider, eg. 1. */ nsec = bp->bio_length / bp->bio_to->sectorsize; /* Number of sectors from encrypted provider, eg. 9. */ nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; cbp->bio_length = cp->provider->sectorsize * nsec; size = cbp->bio_length; size += sc->sc_alen * nsec; size += sizeof(struct cryptop) * nsec; size += sizeof(struct cryptodesc) * nsec * 2; size += G_ELI_AUTH_SECKEYLEN * nsec; size += sizeof(struct uio) * nsec; size += sizeof(struct iovec) * nsec; cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; bp->bio_driver2 = malloc(size, M_ELI, M_WAITOK); cbp->bio_data = bp->bio_driver2; /* * We read more than what is requested, so we have to be ready to read * more than MAXPHYS. */ cbp2 = NULL; if (cbp->bio_length > MAXPHYS) { cbp2 = g_duplicate_bio(bp); cbp2->bio_length = cbp->bio_length - MAXPHYS; cbp2->bio_data = cbp->bio_data + MAXPHYS; cbp2->bio_offset = cbp->bio_offset + MAXPHYS; cbp2->bio_to = cp->provider; cbp2->bio_done = g_eli_read_done; cbp->bio_length = MAXPHYS; } /* * Read encrypted data from provider. */ G_ELI_LOGREQ(2, cbp, "Sending request."); g_io_request(cbp, cp); if (cbp2 != NULL) { G_ELI_LOGREQ(2, cbp2, "Sending request."); g_io_request(cbp2, cp); } } /* * This is the main function responsible for cryptography (ie. communication * with crypto(9) subsystem). * * BIO_READ: * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> G_ELI_AUTH_RUN -> g_eli_auth_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> G_ELI_AUTH_RUN -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ void g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp) { struct g_eli_softc *sc; struct cryptop *crp; struct cryptodesc *crde, *crda; struct uio *uio; struct iovec *iov; u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; off_t dstoff; int err, error; u_char *p, *data, *auth, *authkey, *plaindata; G_ELI_LOGREQ(3, bp, "%s", __func__); bp->bio_pflags = wr->w_number; sc = wr->w_softc; /* Sectorsize of decrypted provider eg. 4096. */ decr_secsize = bp->bio_to->sectorsize; /* The real sectorsize of encrypted provider, eg. 512. */ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; /* Number of data bytes in one encrypted sector, eg. 480. */ data_secsize = sc->sc_data_per_sector; /* Number of sectors from decrypted provider, eg. 2. */ nsec = bp->bio_length / decr_secsize; /* Number of sectors from encrypted provider, eg. 18. */ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; /* Last sector number in every big sector, eg. 9. */ lsec = sc->sc_bytes_per_sector / encr_secsize; /* Destination offset, used for IV generation. */ dstoff = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; auth = NULL; /* Silence compiler warning. */ plaindata = bp->bio_data; if (bp->bio_cmd == BIO_READ) { data = bp->bio_driver2; auth = data + encr_secsize * nsec; p = auth + sc->sc_alen * nsec; } else { size_t size; size = encr_secsize * nsec; size += sizeof(*crp) * nsec; size += sizeof(*crde) * nsec; size += sizeof(*crda) * nsec; size += G_ELI_AUTH_SECKEYLEN * nsec; size += sizeof(*uio) * nsec; size += sizeof(*iov) * nsec; data = malloc(size, M_ELI, M_WAITOK); bp->bio_driver2 = data; p = data + encr_secsize * nsec; } bp->bio_inbed = 0; bp->bio_children = nsec; error = 0; for (i = 1; i <= nsec; i++, dstoff += encr_secsize) { crp = (struct cryptop *)p; p += sizeof(*crp); crde = (struct cryptodesc *)p; p += sizeof(*crde); crda = (struct cryptodesc *)p; p += sizeof(*crda); authkey = (u_char *)p; p += G_ELI_AUTH_SECKEYLEN; uio = (struct uio *)p; p += sizeof(*uio); iov = (struct iovec *)p; p += sizeof(*iov); data_secsize = sc->sc_data_per_sector; if ((i % lsec) == 0) data_secsize = decr_secsize % data_secsize; if (bp->bio_cmd == BIO_READ) { /* Remember read HMAC. */ bcopy(data, auth, sc->sc_alen); auth += sc->sc_alen; /* TODO: bzero(9) can be commented out later. */ bzero(data, sc->sc_alen); } else { bcopy(plaindata, data + sc->sc_alen, data_secsize); plaindata += data_secsize; } iov->iov_len = sc->sc_alen + data_secsize; iov->iov_base = data; data += encr_secsize; uio->uio_iov = iov; uio->uio_iovcnt = 1; uio->uio_segflg = UIO_SYSSPACE; uio->uio_resid = iov->iov_len; crp->crp_sid = wr->w_sid; crp->crp_ilen = uio->uio_resid; crp->crp_olen = data_secsize; crp->crp_opaque = (void *)bp; crp->crp_buf = (void *)uio; crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIFSYNC | CRYPTO_F_REL; if (g_eli_batch) crp->crp_flags |= CRYPTO_F_BATCH; if (bp->bio_cmd == BIO_WRITE) { crp->crp_callback = g_eli_auth_write_done; crp->crp_desc = crde; crde->crd_next = crda; crda->crd_next = NULL; } else { crp->crp_callback = g_eli_auth_read_done; crp->crp_desc = crda; crda->crd_next = crde; crde->crd_next = NULL; } crde->crd_skip = sc->sc_alen; crde->crd_len = data_secsize; crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) == 0) crde->crd_flags |= CRD_F_KEY_EXPLICIT; if (bp->bio_cmd == BIO_WRITE) crde->crd_flags |= CRD_F_ENCRYPT; crde->crd_alg = sc->sc_ealgo; crde->crd_key = g_eli_key_hold(sc, dstoff, encr_secsize); crde->crd_klen = sc->sc_ekeylen; if (sc->sc_ealgo == CRYPTO_AES_XTS) crde->crd_klen <<= 1; g_eli_crypto_ivgen(sc, dstoff, crde->crd_iv, sizeof(crde->crd_iv)); crda->crd_skip = sc->sc_alen; crda->crd_len = data_secsize; crda->crd_inject = 0; crda->crd_flags = CRD_F_KEY_EXPLICIT; crda->crd_alg = sc->sc_aalgo; g_eli_auth_keygen(sc, dstoff, authkey); crda->crd_key = authkey; crda->crd_klen = G_ELI_AUTH_SECKEYLEN * 8; crp->crp_etype = 0; err = crypto_dispatch(crp); if (err != 0 && error == 0) error = err; } if (bp->bio_error == 0) bp->bio_error = error; } Index: projects/nand/sys/i386/conf/GENERIC =================================================================== --- projects/nand/sys/i386/conf/GENERIC (revision 235262) +++ projects/nand/sys/i386/conf/GENERIC (revision 235263) @@ -1,353 +1,354 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/i386 # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu I486_CPU cpu I586_CPU cpu I686_CPU ident GENERIC makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options PREEMPTION # Enable kernel thread preemption options INET # InterNETworking options INET6 # IPv6 communications protocols options SCTP # Stream Control Transmission Protocol options FFS # Berkeley Fast Filesystem options NANDFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options UFS_GJOURNAL # Enable gjournal-based UFS journaling options MD_ROOT # MD is a potential root device options NFSCL # New Network Filesystem Client options NFSD # New Network Filesystem Server options NFSLOCKD # Network Lock Manager options NFS_ROOT # NFS usable as /, requires NFSCL options MSDOSFS # MSDOS Filesystem options CD9660 # ISO 9660 Filesystem options PROCFS # Process filesystem (requires PSEUDOFS) options PSEUDOFS # Pseudo-filesystem framework options GEOM_PART_GPT # GUID Partition Tables. +options GEOM_RAID # Soft RAID functionality. options GEOM_LABEL # Provides labelization options COMPAT_FREEBSD4 # Compatible with FreeBSD4 options COMPAT_FREEBSD5 # Compatible with FreeBSD5 options COMPAT_FREEBSD6 # Compatible with FreeBSD6 options COMPAT_FREEBSD7 # Compatible with FreeBSD7 options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options STACK # stack(9) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KBD_INSTALL_CDEV # install a CDEV entry in /dev options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_HOOKS # Kernel DTrace hooks options DDB_CTF # Kernel ELF linker loads CTF data options INCLUDE_CONFIG_FILE # Include this file in kernel # Debugging support. Always need this: options KDB # Enable kernel debugger support. # For minimum debugger support (stable branch) use: #options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use this instead: options DDB # Support DDB. options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones options ALQ # To make an SMP kernel, the next two lines are needed options SMP # Symmetric MultiProcessor Kernel device apic # I/O APIC # CPU frequency control device cpufreq # Bus support. device acpi device eisa device pci # Floppy drives device fdc # ATA controllers device ahci # AHCI-compatible SATA controllers device ata # Legacy ATA/SATA controllers options ATA_CAM # Handle legacy controllers with CAM options ATA_STATIC_ID # Static device numbering device mvs # Marvell 88SX50XX/88SX60XX/88SX70XX/SoC SATA device siis # SiliconImage SiI3124/SiI3132/SiI3531 SATA # SCSI Controllers device ahb # EISA AHA1742 family device ahc # AHA2940 and onboard AIC7xxx devices options AHC_REG_PRETTY_PRINT # Print register bitfields in debug # output. Adds ~128k to driver. device ahd # AHA39320/29320 and onboard AIC79xx devices options AHD_REG_PRETTY_PRINT # Print register bitfields in debug # output. Adds ~215k to driver. device esp # AMD Am53C974 (Tekram DC-390(T)) device hptiop # Highpoint RocketRaid 3xxx series device isp # Qlogic family #device ispfw # Firmware for QLogic HBAs- normally a module device mpt # LSI-Logic MPT-Fusion #device ncr # NCR/Symbios Logic device sym # NCR/Symbios Logic (newer chipsets + those of `ncr') device trm # Tekram DC395U/UW/F DC315U adapters device adv # Advansys SCSI adapters device adw # Advansys wide SCSI adapters device aha # Adaptec 154x SCSI adapters device aic # Adaptec 15[012]x SCSI adapters, AIC-6[23]60. device bt # Buslogic/Mylex MultiMaster SCSI adapters device ncv # NCR 53C500 device nsp # Workbit Ninja SCSI-3 device stg # TMC 18C30/18C50 device isci # Intel C600 SAS controller # ATA/SCSI peripherals device scbus # SCSI bus (required for ATA/SCSI) device ch # SCSI media changers device da # Direct Access (disks) device sa # Sequential Access (tape etc) device cd # CD device pass # Passthrough device (direct ATA/SCSI access) device ses # Enclosure Services (SES and SAF-TE) device ctl # CAM Target Layer # RAID controllers interfaced to the SCSI subsystem device amr # AMI MegaRAID device arcmsr # Areca SATA II RAID device asr # DPT SmartRAID V, VI and Adaptec SCSI RAID device ciss # Compaq Smart RAID 5* device dpt # DPT Smartcache III, IV - See NOTES for options device hptmv # Highpoint RocketRAID 182x device hptrr # Highpoint RocketRAID 17xx, 22xx, 23xx, 25xx device iir # Intel Integrated RAID device ips # IBM (Adaptec) ServeRAID device mly # Mylex AcceleRAID/eXtremeRAID device twa # 3ware 9000 series PATA/SATA RAID device tws # LSI 3ware 9750 SATA+SAS 6Gb/s RAID controller # RAID controllers device aac # Adaptec FSA RAID device aacp # SCSI passthrough for aac (requires CAM) device ida # Compaq Smart RAID device mfi # LSI MegaRAID SAS device mlx # Mylex DAC960 family device pst # Promise Supertrak SX6000 device twe # 3ware ATA RAID # atkbdc0 controls both the keyboard and the PS/2 mouse device atkbdc # AT keyboard controller device atkbd # AT keyboard device psm # PS/2 mouse device kbdmux # keyboard multiplexer device vga # VGA video card driver options VESA # Add support for VESA BIOS Extensions (VBE) device splash # Splash screen and screen saver support # syscons is the default console driver, resembling an SCO console device sc options SC_PIXEL_MODE # add support for the raster text mode device agp # support several AGP chipsets # Power management support (see NOTES for more options) #device apm # Add suspend/resume support for the i8254. device pmtimer # PCCARD (PCMCIA) support # PCMCIA and cardbus bridge support device cbb # cardbus (yenta) bridge device pccard # PC Card (16-bit) bus device cardbus # CardBus (32-bit) bus # Serial (COM) ports device uart # Generic UART driver # Parallel port device ppc device ppbus # Parallel port bus (required) device lpt # Printer device plip # TCP/IP over parallel device ppi # Parallel port interface device #device vpo # Requires scbus and da device puc # Multi I/O cards and multi-channel UARTs # PCI Ethernet NICs. device bxe # Broadcom BCM57710/BCM57711/BCM57711E 10Gb Ethernet device de # DEC/Intel DC21x4x (``Tulip'') device em # Intel PRO/1000 Gigabit Ethernet Family device igb # Intel PRO/1000 PCIE Server Gigabit Family device ixgb # Intel PRO/10GbE Ethernet Card device le # AMD Am7900 LANCE and Am79C9xx PCnet device ti # Alteon Networks Tigon I/II gigabit Ethernet device txp # 3Com 3cR990 (``Typhoon'') device vx # 3Com 3c590, 3c595 (``Vortex'') # PCI Ethernet NICs that use the common MII bus controller code. # NOTE: Be sure to keep the 'device miibus' line in order to use these NICs! device miibus # MII bus support device ae # Attansic/Atheros L2 FastEthernet device age # Attansic/Atheros L1 Gigabit Ethernet device alc # Atheros AR8131/AR8132 Ethernet device ale # Atheros AR8121/AR8113/AR8114 Ethernet device bce # Broadcom BCM5706/BCM5708 Gigabit Ethernet device bfe # Broadcom BCM440x 10/100 Ethernet device bge # Broadcom BCM570xx Gigabit Ethernet device cas # Sun Cassini/Cassini+ and NS DP83065 Saturn device dc # DEC/Intel 21143 and various workalikes device et # Agere ET1310 10/100/Gigabit Ethernet device fxp # Intel EtherExpress PRO/100B (82557, 82558) device gem # Sun GEM/Sun ERI/Apple GMAC device hme # Sun HME (Happy Meal Ethernet) device jme # JMicron JMC250 Gigabit/JMC260 Fast Ethernet device lge # Level 1 LXT1001 gigabit Ethernet device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet device nfe # nVidia nForce MCP on-board Ethernet device nge # NatSemi DP83820 gigabit Ethernet #device nve # nVidia nForce MCP on-board Ethernet Networking device pcn # AMD Am79C97x PCI 10/100 (precedence over 'le') device re # RealTek 8139C+/8169/8169S/8110S device rl # RealTek 8129/8139 device sf # Adaptec AIC-6915 (``Starfire'') device sge # Silicon Integrated Systems SiS190/191 device sis # Silicon Integrated Systems SiS 900/SiS 7016 device sk # SysKonnect SK-984x & SK-982x gigabit Ethernet device ste # Sundance ST201 (D-Link DFE-550TX) device stge # Sundance/Tamarack TC9021 gigabit Ethernet device tl # Texas Instruments ThunderLAN device tx # SMC EtherPower II (83c170 ``EPIC'') device vge # VIA VT612x gigabit Ethernet device vr # VIA Rhine, Rhine II device vte # DM&P Vortex86 RDC R6040 Fast Ethernet device wb # Winbond W89C840F device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'') # ISA Ethernet NICs. pccard NICs included. device cs # Crystal Semiconductor CS89x0 NIC # 'device ed' requires 'device miibus' device ed # NE[12]000, SMC Ultra, 3c503, DS8390 cards device ex # Intel EtherExpress Pro/10 and Pro/10+ device ep # Etherlink III based cards device fe # Fujitsu MB8696x based cards device ie # EtherExpress 8/16, 3C507, StarLAN 10 etc. device sn # SMC's 9000 series of Ethernet chips device xe # Xircom pccard Ethernet # Wireless NIC cards device wlan # 802.11 support options IEEE80211_DEBUG # enable debug msgs options IEEE80211_AMPDU_AGE # age frames in AMPDU reorder q's options IEEE80211_SUPPORT_MESH # enable 802.11s draft support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support device wlan_tkip # 802.11 TKIP support device wlan_amrr # AMRR transmit rate control algorithm device an # Aironet 4500/4800 802.11 wireless NICs. device ath # Atheros NIC's device ath_pci # Atheros pci/cardbus glue device ath_hal # pci/cardbus chip support options AH_SUPPORT_AR5416 # enable AR5416 tx/rx descriptors device ath_rate_sample # SampleRate tx rate control for ath #device bwi # Broadcom BCM430x/BCM431x wireless NICs. #device bwn # Broadcom BCM43xx wireless NICs. device ipw # Intel 2100 wireless NICs. device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs. device iwn # Intel 4965/1000/5000/6000 wireless NICs. device malo # Marvell Libertas wireless NICs. device mwl # Marvell 88W8363 802.11n wireless NICs. device ral # Ralink Technology RT2500 wireless NICs. device wi # WaveLAN/Intersil/Symbol 802.11 wireless NICs. #device wl # Older non 802.11 Wavelan wireless NIC. device wpi # Intel 3945ABG wireless NICs. # Pseudo devices. device loop # Network loopback device random # Entropy device device ether # Ethernet support device vlan # 802.1Q VLAN support device tun # Packet tunnel. device md # Memory "disks" device gif # IPv6 and IPv4 tunneling device faith # IPv6-to-IPv4 relaying (translation) device firmware # firmware assist module # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # USB support options USB_DEBUG # enable debug msgs device uhci # UHCI PCI->USB interface device ohci # OHCI PCI->USB interface device ehci # EHCI PCI->USB interface (USB 2.0) device xhci # XHCI PCI->USB interface (USB 3.0) device usb # USB Bus (required) device ukbd # Keyboard device umass # Disks/Mass storage - Requires scbus and da # FireWire support device firewire # FireWire bus code # sbp(4) works for some systems but causes boot failure on others #device sbp # SCSI over FireWire (Requires scbus and da) device fwe # Ethernet over FireWire (non-standard!) device fwip # IP over FireWire (RFC 2734,3146) device dcons # Dumb console driver device dcons_crom # Configuration ROM for dcons # Sound support device sound # Generic sound driver (required) device snd_cmi # CMedia CMI8338/CMI8738 device snd_csa # Crystal Semiconductor CS461x/428x device snd_emu10kx # Creative SoundBlaster Live! and Audigy device snd_es137x # Ensoniq AudioPCI ES137x device snd_hda # Intel High Definition Audio device snd_ich # Intel, NVidia and other ICH AC'97 Audio device snd_via8233 # VIA VT8233x Audio # MMC/SD device mmc # MMC/SD bus device mmcsd # MMC/SD memory card device sdhci # Generic PCI SD Host Controller device nand device nandsim Index: projects/nand/sys/i386/conf/XENHVM =================================================================== --- projects/nand/sys/i386/conf/XENHVM (revision 235262) +++ projects/nand/sys/i386/conf/XENHVM (revision 235263) Property changes on: projects/nand/sys/i386/conf/XENHVM ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/i386/conf/XENHVM:r235157-235262 Index: projects/nand/sys/modules/ral/Makefile =================================================================== --- projects/nand/sys/modules/ral/Makefile (revision 235262) +++ projects/nand/sys/modules/ral/Makefile (revision 235263) @@ -1,9 +1,9 @@ # $FreeBSD$ .PATH: ${.CURDIR}/../../dev/ral KMOD= if_ral -SRCS= rt2560.c rt2661.c if_ral_pci.c +SRCS= rt2560.c rt2661.c rt2860.c if_ral_pci.c SRCS+= device_if.h bus_if.h pci_if.h .include Index: projects/nand/sys/nfsclient/nfs_subs.c =================================================================== --- projects/nand/sys/nfsclient/nfs_subs.c (revision 235262) +++ projects/nand/sys/nfsclient/nfs_subs.c (revision 235263) @@ -1,1139 +1,1138 @@ /*- * Copyright (c) 1989, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Rick Macklem at The University of Guelph. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95 */ #include __FBSDID("$FreeBSD$"); /* * These functions support the macros and help fiddle mbuf chains for * the nfs op functions. They do things like create the rpc header and * copy data between mbuf chains and uio lists. */ #include "opt_kdtrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Note that stdarg.h and the ANSI style va_start macro is used for both * ANSI and traditional C compilers. */ #include #ifdef KDTRACE_HOOKS dtrace_nfsclient_attrcache_flush_probe_func_t dtrace_nfsclient_attrcache_flush_done_probe; uint32_t nfsclient_attrcache_flush_done_id; dtrace_nfsclient_attrcache_get_hit_probe_func_t dtrace_nfsclient_attrcache_get_hit_probe; uint32_t nfsclient_attrcache_get_hit_id; dtrace_nfsclient_attrcache_get_miss_probe_func_t dtrace_nfsclient_attrcache_get_miss_probe; uint32_t nfsclient_attrcache_get_miss_id; dtrace_nfsclient_attrcache_load_probe_func_t dtrace_nfsclient_attrcache_load_done_probe; uint32_t nfsclient_attrcache_load_done_id; #endif /* !KDTRACE_HOOKS */ /* * Data items converted to xdr at startup, since they are constant * This is kinda hokey, but may save a little time doing byte swaps */ u_int32_t nfs_xdrneg1; u_int32_t nfs_true, nfs_false; /* And other global data */ static u_int32_t nfs_xid = 0; static enum vtype nv2tov_type[8]= { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON, VNON }; int nfs_ticks; int nfs_pbuf_freecnt = -1; /* start out unlimited */ struct nfs_bufq nfs_bufq; static struct mtx nfs_xid_mtx; struct task nfs_nfsiodnew_task; /* * and the reverse mapping from generic to Version 2 procedure numbers */ int nfsv2_procid[NFS_NPROCS] = { NFSV2PROC_NULL, NFSV2PROC_GETATTR, NFSV2PROC_SETATTR, NFSV2PROC_LOOKUP, NFSV2PROC_NOOP, NFSV2PROC_READLINK, NFSV2PROC_READ, NFSV2PROC_WRITE, NFSV2PROC_CREATE, NFSV2PROC_MKDIR, NFSV2PROC_SYMLINK, NFSV2PROC_CREATE, NFSV2PROC_REMOVE, NFSV2PROC_RMDIR, NFSV2PROC_RENAME, NFSV2PROC_LINK, NFSV2PROC_READDIR, NFSV2PROC_NOOP, NFSV2PROC_STATFS, NFSV2PROC_NOOP, NFSV2PROC_NOOP, NFSV2PROC_NOOP, NFSV2PROC_NOOP, }; LIST_HEAD(nfsnodehashhead, nfsnode); u_int32_t nfs_xid_gen(void) { uint32_t xid; mtx_lock(&nfs_xid_mtx); /* Get a pretty random xid to start with */ if (!nfs_xid) nfs_xid = random(); /* * Skip zero xid if it should ever happen. */ if (++nfs_xid == 0) nfs_xid++; xid = nfs_xid; mtx_unlock(&nfs_xid_mtx); return xid; } /* * Create the header for an rpc request packet * The hsiz is the size of the rest of the nfs request header. * (just used to decide if a cluster is a good idea) */ struct mbuf * nfsm_reqhead(struct vnode *vp, u_long procid, int hsiz) { struct mbuf *mb; MGET(mb, M_WAIT, MT_DATA); if (hsiz >= MINCLSIZE) MCLGET(mb, M_WAIT); mb->m_len = 0; return (mb); } /* * copies a uio scatter/gather list to an mbuf chain. * NOTE: can ony handle iovcnt == 1 */ int nfsm_uiotombuf(struct uio *uiop, struct mbuf **mq, int siz, caddr_t *bpos) { char *uiocp; struct mbuf *mp, *mp2; int xfer, left, mlen; int uiosiz, clflg, rem; char *cp; KASSERT(uiop->uio_iovcnt == 1, ("nfsm_uiotombuf: iovcnt != 1")); if (siz > MLEN) /* or should it >= MCLBYTES ?? */ clflg = 1; else clflg = 0; rem = nfsm_rndup(siz)-siz; mp = mp2 = *mq; while (siz > 0) { left = uiop->uio_iov->iov_len; uiocp = uiop->uio_iov->iov_base; if (left > siz) left = siz; uiosiz = left; while (left > 0) { mlen = M_TRAILINGSPACE(mp); if (mlen == 0) { MGET(mp, M_WAIT, MT_DATA); if (clflg) MCLGET(mp, M_WAIT); mp->m_len = 0; mp2->m_next = mp; mp2 = mp; mlen = M_TRAILINGSPACE(mp); } xfer = (left > mlen) ? mlen : left; #ifdef notdef /* Not Yet.. */ if (uiop->uio_iov->iov_op != NULL) (*(uiop->uio_iov->iov_op)) (uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); else #endif if (uiop->uio_segflg == UIO_SYSSPACE) bcopy(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); else copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); mp->m_len += xfer; left -= xfer; uiocp += xfer; uiop->uio_offset += xfer; uiop->uio_resid -= xfer; } uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + uiosiz; uiop->uio_iov->iov_len -= uiosiz; siz -= uiosiz; } if (rem > 0) { if (rem > M_TRAILINGSPACE(mp)) { MGET(mp, M_WAIT, MT_DATA); mp->m_len = 0; mp2->m_next = mp; } cp = mtod(mp, caddr_t)+mp->m_len; for (left = 0; left < rem; left++) *cp++ = '\0'; mp->m_len += rem; *bpos = cp; } else *bpos = mtod(mp, caddr_t)+mp->m_len; *mq = mp; return (0); } /* * Copy a string into mbufs for the hard cases... */ int nfsm_strtmbuf(struct mbuf **mb, char **bpos, const char *cp, long siz) { struct mbuf *m1 = NULL, *m2; long left, xfer, len, tlen; u_int32_t *tl; int putsize; putsize = 1; m2 = *mb; left = M_TRAILINGSPACE(m2); if (left > 0) { tl = ((u_int32_t *)(*bpos)); *tl++ = txdr_unsigned(siz); putsize = 0; left -= NFSX_UNSIGNED; m2->m_len += NFSX_UNSIGNED; if (left > 0) { bcopy(cp, (caddr_t) tl, left); siz -= left; cp += left; m2->m_len += left; left = 0; } } /* Loop around adding mbufs */ while (siz > 0) { MGET(m1, M_WAIT, MT_DATA); if (siz > MLEN) MCLGET(m1, M_WAIT); m1->m_len = NFSMSIZ(m1); m2->m_next = m1; m2 = m1; tl = mtod(m1, u_int32_t *); tlen = 0; if (putsize) { *tl++ = txdr_unsigned(siz); m1->m_len -= NFSX_UNSIGNED; tlen = NFSX_UNSIGNED; putsize = 0; } if (siz < m1->m_len) { len = nfsm_rndup(siz); xfer = siz; if (xfer < len) *(tl+(xfer>>2)) = 0; } else { xfer = len = m1->m_len; } bcopy(cp, (caddr_t) tl, xfer); m1->m_len = len+tlen; siz -= xfer; cp += xfer; } *mb = m1; *bpos = mtod(m1, caddr_t)+m1->m_len; return (0); } /* * Called once to initialize data structures... */ int nfs_init(struct vfsconf *vfsp) { int i; nfsmount_zone = uma_zcreate("NFSMOUNT", sizeof(struct nfsmount), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); nfs_true = txdr_unsigned(TRUE); nfs_false = txdr_unsigned(FALSE); nfs_xdrneg1 = txdr_unsigned(-1); nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000; if (nfs_ticks < 1) nfs_ticks = 1; /* Ensure async daemons disabled */ for (i = 0; i < NFS_MAXASYNCDAEMON; i++) { nfs_iodwant[i] = NFSIOD_NOT_AVAILABLE; nfs_iodmount[i] = NULL; } nfs_nhinit(); /* Init the nfsnode table */ /* * Initialize reply list and start timer */ mtx_init(&nfs_iod_mtx, "NFS iod lock", NULL, MTX_DEF); mtx_init(&nfs_xid_mtx, "NFS xid lock", NULL, MTX_DEF); TASK_INIT(&nfs_nfsiodnew_task, 0, nfs_nfsiodnew_tq, NULL); nfs_pbuf_freecnt = nswbuf / 2 + 1; return (0); } int nfs_uninit(struct vfsconf *vfsp) { int i; /* * Tell all nfsiod processes to exit. Clear nfs_iodmax, and wakeup * any sleeping nfsiods so they check nfs_iodmax and exit. * Drain nfsiodnew task before we wait for them to finish. */ mtx_lock(&nfs_iod_mtx); nfs_iodmax = 0; mtx_unlock(&nfs_iod_mtx); taskqueue_drain(taskqueue_thread, &nfs_nfsiodnew_task); mtx_lock(&nfs_iod_mtx); for (i = 0; i < nfs_numasync; i++) if (nfs_iodwant[i] == NFSIOD_AVAILABLE) wakeup(&nfs_iodwant[i]); /* The last nfsiod to exit will wake us up when nfs_numasync hits 0 */ while (nfs_numasync) msleep(&nfs_numasync, &nfs_iod_mtx, PWAIT, "ioddie", 0); mtx_unlock(&nfs_iod_mtx); nfs_nhuninit(); uma_zdestroy(nfsmount_zone); return (0); } void nfs_dircookie_lock(struct nfsnode *np) { mtx_lock(&np->n_mtx); while (np->n_flag & NDIRCOOKIELK) (void) msleep(&np->n_flag, &np->n_mtx, PZERO, "nfsdirlk", 0); np->n_flag |= NDIRCOOKIELK; mtx_unlock(&np->n_mtx); } void nfs_dircookie_unlock(struct nfsnode *np) { mtx_lock(&np->n_mtx); np->n_flag &= ~NDIRCOOKIELK; wakeup(&np->n_flag); mtx_unlock(&np->n_mtx); } int nfs_upgrade_vnlock(struct vnode *vp) { int old_lock; ASSERT_VOP_LOCKED(vp, "nfs_upgrade_vnlock"); old_lock = VOP_ISLOCKED(vp); if (old_lock != LK_EXCLUSIVE) { KASSERT(old_lock == LK_SHARED, ("nfs_upgrade_vnlock: wrong old_lock %d", old_lock)); /* Upgrade to exclusive lock, this might block */ vn_lock(vp, LK_UPGRADE | LK_RETRY); } return (old_lock); } void nfs_downgrade_vnlock(struct vnode *vp, int old_lock) { if (old_lock != LK_EXCLUSIVE) { KASSERT(old_lock == LK_SHARED, ("wrong old_lock %d", old_lock)); /* Downgrade from exclusive lock. */ vn_lock(vp, LK_DOWNGRADE | LK_RETRY); } } void nfs_printf(const char *fmt, ...) { va_list ap; mtx_lock(&Giant); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); mtx_unlock(&Giant); } /* * Attribute cache routines. * nfs_loadattrcache() - loads or updates the cache contents from attributes * that are on the mbuf list * nfs_getattrcache() - returns valid attributes if found in cache, returns * error otherwise */ /* * Load the attribute cache (that lives in the nfsnode entry) with * the values on the mbuf list and * Iff vap not NULL * copy the attributes to *vaper */ int nfs_loadattrcache(struct vnode **vpp, struct mbuf **mdp, caddr_t *dposp, struct vattr *vaper, int dontshrink) { struct vnode *vp = *vpp; struct vattr *vap; struct nfs_fattr *fp; struct nfsnode *np = NULL; int32_t t1; caddr_t cp2; int rdev; struct mbuf *md; enum vtype vtyp; u_short vmode; struct timespec mtime, mtime_save; int v3 = NFS_ISV3(vp); int error = 0; md = *mdp; t1 = (mtod(md, caddr_t) + md->m_len) - *dposp; cp2 = nfsm_disct(mdp, dposp, NFSX_FATTR(v3), t1, M_WAIT); if (cp2 == NULL) { error = EBADRPC; goto out; } fp = (struct nfs_fattr *)cp2; if (v3) { vtyp = nfsv3tov_type(fp->fa_type); vmode = fxdr_unsigned(u_short, fp->fa_mode); rdev = makedev(fxdr_unsigned(int, fp->fa3_rdev.specdata1), fxdr_unsigned(int, fp->fa3_rdev.specdata2)); fxdr_nfsv3time(&fp->fa3_mtime, &mtime); } else { vtyp = nfsv2tov_type(fp->fa_type); vmode = fxdr_unsigned(u_short, fp->fa_mode); /* * XXX * * The duplicate information returned in fa_type and fa_mode * is an ambiguity in the NFS version 2 protocol. * * VREG should be taken literally as a regular file. If a * server intents to return some type information differently * in the upper bits of the mode field (e.g. for sockets, or * FIFOs), NFSv2 mandates fa_type to be VNON. Anyway, we * leave the examination of the mode bits even in the VREG * case to avoid breakage for bogus servers, but we make sure * that there are actually type bits set in the upper part of * fa_mode (and failing that, trust the va_type field). * * NFSv3 cleared the issue, and requires fa_mode to not * contain any type information (while also introduing sockets * and FIFOs for fa_type). */ if (vtyp == VNON || (vtyp == VREG && (vmode & S_IFMT) != 0)) vtyp = IFTOVT(vmode); rdev = fxdr_unsigned(int32_t, fp->fa2_rdev); fxdr_nfsv2time(&fp->fa2_mtime, &mtime); /* * Really ugly NFSv2 kludge. */ if (vtyp == VCHR && rdev == 0xffffffff) vtyp = VFIFO; } /* * If v_type == VNON it is a new node, so fill in the v_type, * n_mtime fields. Check to see if it represents a special * device, and if so, check for a possible alias. Once the * correct vnode has been obtained, fill in the rest of the * information. */ np = VTONFS(vp); mtx_lock(&np->n_mtx); if (vp->v_type != vtyp) { vp->v_type = vtyp; if (vp->v_type == VFIFO) vp->v_op = &nfs_fifoops; np->n_mtime = mtime; } vap = &np->n_vattr; vap->va_type = vtyp; vap->va_mode = (vmode & 07777); vap->va_rdev = rdev; mtime_save = vap->va_mtime; vap->va_mtime = mtime; vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; if (v3) { vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink); vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); vap->va_size = fxdr_hyper(&fp->fa3_size); vap->va_blocksize = NFS_FABLKSIZE; vap->va_bytes = fxdr_hyper(&fp->fa3_used); vap->va_fileid = fxdr_unsigned(int32_t, fp->fa3_fileid.nfsuquad[1]); fxdr_nfsv3time(&fp->fa3_atime, &vap->va_atime); fxdr_nfsv3time(&fp->fa3_ctime, &vap->va_ctime); vap->va_flags = 0; vap->va_filerev = 0; } else { vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink); vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); vap->va_size = fxdr_unsigned(u_int32_t, fp->fa2_size); vap->va_blocksize = fxdr_unsigned(int32_t, fp->fa2_blocksize); vap->va_bytes = (u_quad_t)fxdr_unsigned(int32_t, fp->fa2_blocks) * NFS_FABLKSIZE; vap->va_fileid = fxdr_unsigned(int32_t, fp->fa2_fileid); fxdr_nfsv2time(&fp->fa2_atime, &vap->va_atime); vap->va_flags = 0; vap->va_ctime.tv_sec = fxdr_unsigned(u_int32_t, fp->fa2_ctime.nfsv2_sec); vap->va_ctime.tv_nsec = 0; vap->va_gen = fxdr_unsigned(u_int32_t, fp->fa2_ctime.nfsv2_usec); vap->va_filerev = 0; } np->n_attrstamp = time_second; if (vap->va_size != np->n_size) { if (vap->va_type == VREG) { if (dontshrink && vap->va_size < np->n_size) { /* * We've been told not to shrink the file; * zero np->n_attrstamp to indicate that * the attributes are stale. */ vap->va_size = np->n_size; np->n_attrstamp = 0; KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); } else if (np->n_flag & NMODIFIED) { /* * We've modified the file: Use the larger * of our size, and the server's size. */ if (vap->va_size < np->n_size) { vap->va_size = np->n_size; } else { np->n_size = vap->va_size; np->n_flag |= NSIZECHANGED; } } else { np->n_size = vap->va_size; np->n_flag |= NSIZECHANGED; } vnode_pager_setsize(vp, np->n_size); } else { np->n_size = vap->va_size; } } /* * The following checks are added to prevent a race between (say) * a READDIR+ and a WRITE. * READDIR+, WRITE requests sent out. * READDIR+ resp, WRITE resp received on client. * However, the WRITE resp was handled before the READDIR+ resp * causing the post op attrs from the write to be loaded first * and the attrs from the READDIR+ to be loaded later. If this * happens, we have stale attrs loaded into the attrcache. * We detect this by for the mtime moving back. We invalidate the * attrcache when this happens. */ if (timespeccmp(&mtime_save, &vap->va_mtime, >)) { /* Size changed or mtime went backwards */ np->n_attrstamp = 0; KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); } if (vaper != NULL) { bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap)); if (np->n_flag & NCHG) { if (np->n_flag & NACC) vaper->va_atime = np->n_atim; if (np->n_flag & NUPD) vaper->va_mtime = np->n_mtim; } } #ifdef KDTRACE_HOOKS if (np->n_attrstamp != 0) KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, &np->n_vattr, 0); #endif mtx_unlock(&np->n_mtx); out: #ifdef KDTRACE_HOOKS if (error) KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, NULL, error); #endif return (error); } #ifdef NFS_ACDEBUG #include SYSCTL_DECL(_vfs_oldnfs); static int nfs_acdebug; SYSCTL_INT(_vfs_oldnfs, OID_AUTO, acdebug, CTLFLAG_RW, &nfs_acdebug, 0, "Toggle acdebug (attribute cache debug) flag"); #endif /* * Check the time stamp * If the cache is valid, copy contents to *vap and return 0 * otherwise return an error */ int nfs_getattrcache(struct vnode *vp, struct vattr *vaper) { struct nfsnode *np; struct vattr *vap; struct nfsmount *nmp; int timeo; np = VTONFS(vp); vap = &np->n_vattr; nmp = VFSTONFS(vp->v_mount); #ifdef NFS_ACDEBUG mtx_lock(&Giant); /* nfs_printf() */ #endif mtx_lock(&np->n_mtx); /* XXX n_mtime doesn't seem to be updated on a miss-and-reload */ timeo = (time_second - np->n_mtime.tv_sec) / 10; #ifdef NFS_ACDEBUG if (nfs_acdebug>1) nfs_printf("nfs_getattrcache: initial timeo = %d\n", timeo); #endif if (vap->va_type == VDIR) { if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acdirmin) timeo = nmp->nm_acdirmin; else if (timeo > nmp->nm_acdirmax) timeo = nmp->nm_acdirmax; } else { if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acregmin) timeo = nmp->nm_acregmin; else if (timeo > nmp->nm_acregmax) timeo = nmp->nm_acregmax; } #ifdef NFS_ACDEBUG if (nfs_acdebug > 2) nfs_printf("acregmin %d; acregmax %d; acdirmin %d; acdirmax %d\n", nmp->nm_acregmin, nmp->nm_acregmax, nmp->nm_acdirmin, nmp->nm_acdirmax); if (nfs_acdebug) nfs_printf("nfs_getattrcache: age = %d; final timeo = %d\n", (time_second - np->n_attrstamp), timeo); #endif if ((time_second - np->n_attrstamp) >= timeo) { nfsstats.attrcache_misses++; mtx_unlock(&np->n_mtx); #ifdef NFS_ACDEBUG mtx_unlock(&Giant); /* nfs_printf() */ #endif KDTRACE_NFS_ATTRCACHE_GET_MISS(vp); return (ENOENT); } nfsstats.attrcache_hits++; if (vap->va_size != np->n_size) { if (vap->va_type == VREG) { if (np->n_flag & NMODIFIED) { if (vap->va_size < np->n_size) vap->va_size = np->n_size; else np->n_size = vap->va_size; } else { np->n_size = vap->va_size; } vnode_pager_setsize(vp, np->n_size); } else { np->n_size = vap->va_size; } } bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr)); if (np->n_flag & NCHG) { if (np->n_flag & NACC) vaper->va_atime = np->n_atim; if (np->n_flag & NUPD) vaper->va_mtime = np->n_mtim; } mtx_unlock(&np->n_mtx); #ifdef NFS_ACDEBUG mtx_unlock(&Giant); /* nfs_printf() */ #endif KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap); return (0); } /* * Purge all cached information about an NFS vnode including name * cache entries, the attribute cache, and the access cache. This is * called when an NFS request for a node fails with a stale * filehandle. */ void nfs_purgecache(struct vnode *vp) { struct nfsnode *np; int i; np = VTONFS(vp); cache_purge(vp); mtx_lock(&np->n_mtx); np->n_attrstamp = 0; KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); for (i = 0; i < NFS_ACCESSCACHESIZE; i++) np->n_accesscache[i].stamp = 0; KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp); mtx_unlock(&np->n_mtx); } static nfsuint64 nfs_nullcookie = { { 0, 0 } }; /* * This function finds the directory cookie that corresponds to the * logical byte offset given. */ nfsuint64 * nfs_getcookie(struct nfsnode *np, off_t off, int add) { struct nfsdmap *dp, *dp2; int pos; nfsuint64 *retval = NULL; pos = (uoff_t)off / NFS_DIRBLKSIZ; if (pos == 0 || off < 0) { KASSERT(!add, ("nfs getcookie add at <= 0")); return (&nfs_nullcookie); } pos--; dp = LIST_FIRST(&np->n_cookies); if (!dp) { if (add) { dp = malloc(sizeof (struct nfsdmap), M_NFSDIROFF, M_WAITOK); dp->ndm_eocookie = 0; LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list); } else goto out; } while (pos >= NFSNUMCOOKIES) { pos -= NFSNUMCOOKIES; if (LIST_NEXT(dp, ndm_list)) { if (!add && dp->ndm_eocookie < NFSNUMCOOKIES && pos >= dp->ndm_eocookie) goto out; dp = LIST_NEXT(dp, ndm_list); } else if (add) { dp2 = malloc(sizeof (struct nfsdmap), M_NFSDIROFF, M_WAITOK); dp2->ndm_eocookie = 0; LIST_INSERT_AFTER(dp, dp2, ndm_list); dp = dp2; } else goto out; } if (pos >= dp->ndm_eocookie) { if (add) dp->ndm_eocookie = pos + 1; else goto out; } retval = &dp->ndm_cookies[pos]; out: return (retval); } /* * Invalidate cached directory information, except for the actual directory * blocks (which are invalidated separately). * Done mainly to avoid the use of stale offset cookies. */ void nfs_invaldir(struct vnode *vp) { struct nfsnode *np = VTONFS(vp); KASSERT(vp->v_type == VDIR, ("nfs: invaldir not dir")); nfs_dircookie_lock(np); np->n_direofoffset = 0; np->n_cookieverf.nfsuquad[0] = 0; np->n_cookieverf.nfsuquad[1] = 0; if (LIST_FIRST(&np->n_cookies)) LIST_FIRST(&np->n_cookies)->ndm_eocookie = 0; nfs_dircookie_unlock(np); } /* * The write verifier has changed (probably due to a server reboot), so all * B_NEEDCOMMIT blocks will have to be written again. Since they are on the * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT * and B_CLUSTEROK flags. Once done the new write verifier can be set for the * mount point. * * B_CLUSTEROK must be cleared along with B_NEEDCOMMIT because stage 1 data * writes are not clusterable. */ void nfs_clearcommit(struct mount *mp) { struct vnode *vp, *nvp; struct buf *bp, *nbp; struct bufobj *bo; - MNT_ILOCK(mp); MNT_VNODE_FOREACH_ALL(vp, mp, nvp) { bo = &vp->v_bufobj; vholdl(vp); VI_UNLOCK(vp); BO_LOCK(bo); TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { if (!BUF_ISLOCKED(bp) && (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) == (B_DELWRI | B_NEEDCOMMIT)) bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); } BO_UNLOCK(bo); vdrop(vp); } } /* * Helper functions for former macros. Some of these should be * moved to their callers. */ int nfsm_mtofh_xx(struct vnode *d, struct vnode **v, int v3, int *f, struct mbuf **md, caddr_t *dpos) { struct nfsnode *ttnp; struct vnode *ttvp; nfsfh_t *ttfhp; u_int32_t *tl; int ttfhsize; int t1; if (v3) { tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos); if (tl == NULL) return EBADRPC; *f = fxdr_unsigned(int, *tl); } else *f = 1; if (*f) { t1 = nfsm_getfh_xx(&ttfhp, &ttfhsize, (v3), md, dpos); if (t1 != 0) return t1; t1 = nfs_nget(d->v_mount, ttfhp, ttfhsize, &ttnp, LK_EXCLUSIVE); if (t1 != 0) return t1; *v = NFSTOV(ttnp); } if (v3) { tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos); if (tl == NULL) return EBADRPC; if (*f) *f = fxdr_unsigned(int, *tl); else if (fxdr_unsigned(int, *tl)) nfsm_adv_xx(NFSX_V3FATTR, md, dpos); } if (*f) { ttvp = *v; t1 = nfs_loadattrcache(&ttvp, md, dpos, NULL, 0); if (t1) return t1; *v = ttvp; } return 0; } int nfsm_getfh_xx(nfsfh_t **f, int *s, int v3, struct mbuf **md, caddr_t *dpos) { u_int32_t *tl; if (v3) { tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos); if (tl == NULL) return EBADRPC; *s = fxdr_unsigned(int, *tl); if (*s <= 0 || *s > NFSX_V3FHMAX) return EBADRPC; } else *s = NFSX_V2FH; *f = nfsm_dissect_xx(nfsm_rndup(*s), md, dpos); if (*f == NULL) return EBADRPC; else return 0; } int nfsm_loadattr_xx(struct vnode **v, struct vattr *va, struct mbuf **md, caddr_t *dpos) { int t1; struct vnode *ttvp = *v; t1 = nfs_loadattrcache(&ttvp, md, dpos, va, 0); if (t1 != 0) return t1; *v = ttvp; return 0; } int nfsm_postop_attr_xx(struct vnode **v, int *f, struct vattr *va, struct mbuf **md, caddr_t *dpos) { u_int32_t *tl; int t1; struct vnode *ttvp = *v; tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos); if (tl == NULL) return EBADRPC; *f = fxdr_unsigned(int, *tl); if (*f != 0) { t1 = nfs_loadattrcache(&ttvp, md, dpos, va, 1); if (t1 != 0) { *f = 0; return t1; } *v = ttvp; } return 0; } int nfsm_wcc_data_xx(struct vnode **v, int *f, struct mbuf **md, caddr_t *dpos) { u_int32_t *tl; int ttattrf, ttretf = 0; int t1; tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos); if (tl == NULL) return EBADRPC; if (*tl == nfs_true) { tl = nfsm_dissect_xx(6 * NFSX_UNSIGNED, md, dpos); if (tl == NULL) return EBADRPC; mtx_lock(&(VTONFS(*v))->n_mtx); if (*f) ttretf = (VTONFS(*v)->n_mtime.tv_sec == fxdr_unsigned(u_int32_t, *(tl + 2)) && VTONFS(*v)->n_mtime.tv_nsec == fxdr_unsigned(u_int32_t, *(tl + 3))); mtx_unlock(&(VTONFS(*v))->n_mtx); } t1 = nfsm_postop_attr_xx(v, &ttattrf, NULL, md, dpos); if (t1) return t1; if (*f) *f = ttretf; else *f = ttattrf; return 0; } int nfsm_strtom_xx(const char *a, int s, int m, struct mbuf **mb, caddr_t *bpos) { u_int32_t *tl; int t1; if (s > m) return ENAMETOOLONG; t1 = nfsm_rndup(s) + NFSX_UNSIGNED; if (t1 <= M_TRAILINGSPACE(*mb)) { tl = nfsm_build_xx(t1, mb, bpos); *tl++ = txdr_unsigned(s); *(tl + ((t1 >> 2) - 2)) = 0; bcopy(a, tl, s); } else { t1 = nfsm_strtmbuf(mb, bpos, a, s); if (t1 != 0) return t1; } return 0; } int nfsm_fhtom_xx(struct vnode *v, int v3, struct mbuf **mb, caddr_t *bpos) { u_int32_t *tl; int t1; caddr_t cp; if (v3) { t1 = nfsm_rndup(VTONFS(v)->n_fhsize) + NFSX_UNSIGNED; if (t1 < M_TRAILINGSPACE(*mb)) { tl = nfsm_build_xx(t1, mb, bpos); *tl++ = txdr_unsigned(VTONFS(v)->n_fhsize); *(tl + ((t1 >> 2) - 2)) = 0; bcopy(VTONFS(v)->n_fhp, tl, VTONFS(v)->n_fhsize); } else { t1 = nfsm_strtmbuf(mb, bpos, (const char *)VTONFS(v)->n_fhp, VTONFS(v)->n_fhsize); if (t1 != 0) return t1; } } else { cp = nfsm_build_xx(NFSX_V2FH, mb, bpos); bcopy(VTONFS(v)->n_fhp, cp, NFSX_V2FH); } return 0; } void nfsm_v3attrbuild_xx(struct vattr *va, int full, struct mbuf **mb, caddr_t *bpos) { u_int32_t *tl; if (va->va_mode != (mode_t)VNOVAL) { tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos); *tl++ = nfs_true; *tl = txdr_unsigned(va->va_mode); } else { tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos); *tl = nfs_false; } if (full && va->va_uid != (uid_t)VNOVAL) { tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos); *tl++ = nfs_true; *tl = txdr_unsigned(va->va_uid); } else { tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos); *tl = nfs_false; } if (full && va->va_gid != (gid_t)VNOVAL) { tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos); *tl++ = nfs_true; *tl = txdr_unsigned(va->va_gid); } else { tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos); *tl = nfs_false; } if (full && va->va_size != VNOVAL) { tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos); *tl++ = nfs_true; txdr_hyper(va->va_size, tl); } else { tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos); *tl = nfs_false; } if (va->va_atime.tv_sec != VNOVAL) { if (va->va_atime.tv_sec != time_second) { tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos); *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); txdr_nfsv3time(&va->va_atime, tl); } else { tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos); *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); } } else { tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos); *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); } if (va->va_mtime.tv_sec != VNOVAL) { if (va->va_mtime.tv_sec != time_second) { tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos); *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); txdr_nfsv3time(&va->va_mtime, tl); } else { tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos); *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); } } else { tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos); *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); } } Index: projects/nand/sys/sparc64/include/intr_machdep.h =================================================================== --- projects/nand/sys/sparc64/include/intr_machdep.h (revision 235262) +++ projects/nand/sys/sparc64/include/intr_machdep.h (revision 235263) @@ -1,111 +1,113 @@ /*- * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_INTR_MACHDEP_H_ #define _MACHINE_INTR_MACHDEP_H_ #define IRSR_BUSY (1 << 5) #define PIL_MAX (1 << 4) #define IV_MAX (1 << 11) #define IR_FREE (PIL_MAX * 2) #define IH_SHIFT PTR_SHIFT #define IQE_SHIFT 5 #define IV_SHIFT 6 #define PIL_LOW 1 /* stray interrupts */ #define PIL_ITHREAD 2 /* interrupts that use ithreads */ #define PIL_RENDEZVOUS 3 /* smp rendezvous ipi */ #define PIL_AST 4 /* ast ipi */ #define PIL_STOP 5 /* stop cpu ipi */ #define PIL_PREEMPT 6 /* preempt idle thread cpu ipi */ #define PIL_HARDCLOCK 7 /* hardclock broadcast */ #define PIL_FILTER 12 /* filter interrupts */ #define PIL_BRIDGE 13 /* bridge interrupts */ #define PIL_TICK 14 /* tick interrupts */ #ifndef LOCORE #define INTR_BRIDGE INTR_MD1 struct trapframe; typedef void ih_func_t(struct trapframe *); typedef void iv_func_t(void *); struct intr_request { struct intr_request *ir_next; iv_func_t *ir_func; void *ir_arg; u_int ir_vec; u_int ir_pri; }; struct intr_controller { void (*ic_enable)(void *); void (*ic_disable)(void *); void (*ic_assign)(void *); void (*ic_clear)(void *); }; struct intr_vector { iv_func_t *iv_func; void *iv_arg; const struct intr_controller *iv_ic; void *iv_icarg; struct intr_event *iv_event; u_int iv_pri; u_int iv_vec; u_int iv_mid; u_int iv_refcnt; u_int iv_pad[2]; }; extern ih_func_t *intr_handlers[]; extern struct intr_vector intr_vectors[]; void intr_add_cpu(u_int cpu); +#ifdef SMP int intr_bind(int vec, u_char cpu); +#endif int intr_describe(int vec, void *ih, const char *descr); void intr_setup(int level, ih_func_t *ihf, int pri, iv_func_t *ivf, void *iva); void intr_init1(void); void intr_init2(void); int intr_controller_register(int vec, const struct intr_controller *ic, void *icarg); int inthand_add(const char *name, int vec, int (*filt)(void *), void (*handler)(void *), void *arg, int flags, void **cookiep); int inthand_remove(int vec, void *cookie); ih_func_t intr_fast; #endif /* !LOCORE */ #endif /* !_MACHINE_INTR_MACHDEP_H_ */ Index: projects/nand/sys/sparc64/sparc64/intr_machdep.c =================================================================== --- projects/nand/sys/sparc64/sparc64/intr_machdep.c (revision 235262) +++ projects/nand/sys/sparc64/sparc64/intr_machdep.c (revision 235263) @@ -1,573 +1,564 @@ /*- * Copyright (c) 1991 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)isa.c 7.2 (Berkeley) 5/13/91 * form: src/sys/i386/isa/intr_machdep.c,v 1.57 2001/07/20 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MAX_STRAY_LOG 5 CTASSERT((1 << IV_SHIFT) == sizeof(struct intr_vector)); ih_func_t *intr_handlers[PIL_MAX]; uint16_t pil_countp[PIL_MAX]; static uint16_t pil_stray_count[PIL_MAX]; struct intr_vector intr_vectors[IV_MAX]; uint16_t intr_countp[IV_MAX]; static uint16_t intr_stray_count[IV_MAX]; static const char *const pil_names[] = { "stray", "low", /* PIL_LOW */ "ithrd", /* PIL_ITHREAD */ "rndzvs", /* PIL_RENDEZVOUS */ "ast", /* PIL_AST */ "stop", /* PIL_STOP */ "preempt", /* PIL_PREEMPT */ "hardclock", /* PIL_HARDCLOCK */ "stray", "stray", "stray", "stray", "filter", /* PIL_FILTER */ "bridge", /* PIL_BRIDGE */ "tick", /* PIL_TICK */ }; /* protect the intr_vectors table */ static struct sx intr_table_lock; /* protect intrcnt_index */ static struct mtx intrcnt_lock; #ifdef SMP static int assign_cpu; static void intr_assign_next_cpu(struct intr_vector *iv); static void intr_shuffle_irqs(void *arg __unused); #endif static int intr_assign_cpu(void *arg, u_char cpu); static void intr_execute_handlers(void *); static void intr_stray_level(struct trapframe *); static void intr_stray_vector(void *); static int intrcnt_setname(const char *, int); static void intrcnt_updatename(int, const char *, int); static void intrcnt_updatename(int vec, const char *name, int ispil) { static int intrcnt_index, stray_pil_index, stray_vec_index; int name_index; mtx_lock_spin(&intrcnt_lock); if (intrnames[0] == '\0') { /* for bitbucket */ if (bootverbose) printf("initalizing intr_countp\n"); intrcnt_setname("???", intrcnt_index++); stray_vec_index = intrcnt_index++; intrcnt_setname("stray", stray_vec_index); for (name_index = 0; name_index < IV_MAX; name_index++) intr_countp[name_index] = stray_vec_index; stray_pil_index = intrcnt_index++; intrcnt_setname("pil", stray_pil_index); for (name_index = 0; name_index < PIL_MAX; name_index++) pil_countp[name_index] = stray_pil_index; } if (name == NULL) name = "???"; if (!ispil && intr_countp[vec] != stray_vec_index) name_index = intr_countp[vec]; else if (ispil && pil_countp[vec] != stray_pil_index) name_index = pil_countp[vec]; else name_index = intrcnt_index++; if (intrcnt_setname(name, name_index)) name_index = 0; if (!ispil) intr_countp[vec] = name_index; else pil_countp[vec] = name_index; mtx_unlock_spin(&intrcnt_lock); } static int intrcnt_setname(const char *name, int index) { if ((MAXCOMLEN + 1) * index >= sintrnames) return (E2BIG); snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); return (0); } void intr_setup(int pri, ih_func_t *ihf, int vec, iv_func_t *ivf, void *iva) { char pilname[MAXCOMLEN + 1]; register_t s; s = intr_disable(); if (vec != -1) { intr_vectors[vec].iv_func = ivf; intr_vectors[vec].iv_arg = iva; intr_vectors[vec].iv_pri = pri; intr_vectors[vec].iv_vec = vec; } intr_handlers[pri] = ihf; intr_restore(s); snprintf(pilname, MAXCOMLEN + 1, "pil%d: %s", pri, pil_names[pri]); intrcnt_updatename(pri, pilname, 1); } static void intr_stray_level(struct trapframe *tf) { uint64_t level; level = tf->tf_level; if (pil_stray_count[level] < MAX_STRAY_LOG) { printf("stray level interrupt %ld\n", level); pil_stray_count[level]++; if (pil_stray_count[level] >= MAX_STRAY_LOG) printf("got %d stray level interrupt %ld's: not " "logging anymore\n", MAX_STRAY_LOG, level); } } static void intr_stray_vector(void *cookie) { struct intr_vector *iv; u_int vec; iv = cookie; vec = iv->iv_vec; if (intr_stray_count[vec] < MAX_STRAY_LOG) { printf("stray vector interrupt %d\n", vec); intr_stray_count[vec]++; if (intr_stray_count[vec] >= MAX_STRAY_LOG) printf("got %d stray vector interrupt %d's: not " "logging anymore\n", MAX_STRAY_LOG, vec); } } void intr_init1() { int i; /* Mark all interrupts as being stray. */ for (i = 0; i < PIL_MAX; i++) intr_handlers[i] = intr_stray_level; for (i = 0; i < IV_MAX; i++) { intr_vectors[i].iv_func = intr_stray_vector; intr_vectors[i].iv_arg = &intr_vectors[i]; intr_vectors[i].iv_pri = PIL_LOW; intr_vectors[i].iv_vec = i; intr_vectors[i].iv_refcnt = 0; } intr_handlers[PIL_LOW] = intr_fast; } void intr_init2() { sx_init(&intr_table_lock, "intr sources"); mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN); } static int intr_assign_cpu(void *arg, u_char cpu) { #ifdef SMP struct pcpu *pc; struct intr_vector *iv; /* * Don't do anything during early boot. We will pick up the * assignment once the APs are started. */ if (assign_cpu && cpu != NOCPU) { pc = pcpu_find(cpu); if (pc == NULL) return (EINVAL); iv = arg; sx_xlock(&intr_table_lock); iv->iv_mid = pc->pc_mid; iv->iv_ic->ic_assign(iv); sx_xunlock(&intr_table_lock); } return (0); #else return (EOPNOTSUPP); #endif } static void intr_execute_handlers(void *cookie) { struct intr_vector *iv; iv = cookie; if (__predict_false(intr_event_handle(iv->iv_event, NULL) != 0)) intr_stray_vector(iv); } int intr_controller_register(int vec, const struct intr_controller *ic, void *icarg) { struct intr_event *ie; struct intr_vector *iv; int error; if (vec < 0 || vec >= IV_MAX) return (EINVAL); sx_xlock(&intr_table_lock); iv = &intr_vectors[vec]; ie = iv->iv_event; sx_xunlock(&intr_table_lock); if (ie != NULL) return (EEXIST); error = intr_event_create(&ie, iv, 0, vec, NULL, ic->ic_clear, ic->ic_clear, intr_assign_cpu, "vec%d:", vec); if (error != 0) return (error); sx_xlock(&intr_table_lock); if (iv->iv_event != NULL) { sx_xunlock(&intr_table_lock); intr_event_destroy(ie); return (EEXIST); } iv->iv_ic = ic; iv->iv_icarg = icarg; iv->iv_event = ie; iv->iv_mid = PCPU_GET(mid); sx_xunlock(&intr_table_lock); return (0); } int inthand_add(const char *name, int vec, driver_filter_t *filt, driver_intr_t *handler, void *arg, int flags, void **cookiep) { const struct intr_controller *ic; struct intr_event *ie; struct intr_handler *ih; struct intr_vector *iv; int error, filter; if (vec < 0 || vec >= IV_MAX) return (EINVAL); /* * INTR_BRIDGE filters/handlers are special purpose only, allowing * them to be shared just would complicate things unnecessarily. */ if ((flags & INTR_BRIDGE) != 0 && (flags & INTR_EXCL) == 0) return (EINVAL); sx_xlock(&intr_table_lock); iv = &intr_vectors[vec]; ic = iv->iv_ic; ie = iv->iv_event; sx_xunlock(&intr_table_lock); if (ic == NULL || ie == NULL) return (EINVAL); error = intr_event_add_handler(ie, name, filt, handler, arg, intr_priority(flags), flags, cookiep); if (error != 0) return (error); sx_xlock(&intr_table_lock); /* Disable the interrupt while we fiddle with it. */ ic->ic_disable(iv); iv->iv_refcnt++; if (iv->iv_refcnt == 1) intr_setup((flags & INTR_BRIDGE) != 0 ? PIL_BRIDGE : filt != NULL ? PIL_FILTER : PIL_ITHREAD, intr_fast, vec, intr_execute_handlers, iv); else if (filt != NULL) { /* * Check if we need to upgrade from PIL_ITHREAD to PIL_FILTER. * Given that apart from the on-board SCCs and UARTs shared * interrupts are rather uncommon on sparc64 this sould be * pretty rare in practice. */ filter = 0; TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { if (ih->ih_filter != NULL && ih->ih_filter != filt) { filter = 1; break; } } if (filter == 0) intr_setup(PIL_FILTER, intr_fast, vec, intr_execute_handlers, iv); } intr_stray_count[vec] = 0; intrcnt_updatename(vec, ie->ie_fullname, 0); #ifdef SMP if (assign_cpu) intr_assign_next_cpu(iv); #endif ic->ic_enable(iv); /* Ensure the interrupt is cleared, it might have triggered before. */ if (ic->ic_clear != NULL) ic->ic_clear(iv); sx_xunlock(&intr_table_lock); return (0); } int inthand_remove(int vec, void *cookie) { struct intr_vector *iv; int error; if (vec < 0 || vec >= IV_MAX) return (EINVAL); error = intr_event_remove_handler(cookie); if (error == 0) { /* * XXX: maybe this should be done regardless of whether * intr_event_remove_handler() succeeded? */ sx_xlock(&intr_table_lock); iv = &intr_vectors[vec]; iv->iv_refcnt--; if (iv->iv_refcnt == 0) { /* * Don't disable the interrupt for now, so that * stray interrupts get detected... */ intr_setup(PIL_LOW, intr_fast, vec, intr_stray_vector, iv); } sx_xunlock(&intr_table_lock); } return (error); } /* Add a description to an active interrupt handler. */ int intr_describe(int vec, void *ih, const char *descr) { struct intr_vector *iv; int error; if (vec < 0 || vec >= IV_MAX) return (EINVAL); sx_xlock(&intr_table_lock); iv = &intr_vectors[vec]; if (iv == NULL) { sx_xunlock(&intr_table_lock); return (EINVAL); } error = intr_event_describe_handler(iv->iv_event, ih, descr); if (error) { sx_xunlock(&intr_table_lock); return (error); } intrcnt_updatename(vec, iv->iv_event->ie_fullname, 0); sx_xunlock(&intr_table_lock); return (error); } #ifdef SMP /* * Support for balancing interrupt sources across CPUs. For now we just * allocate CPUs round-robin. */ static cpuset_t intr_cpus; static int current_cpu; static void intr_assign_next_cpu(struct intr_vector *iv) { struct pcpu *pc; sx_assert(&intr_table_lock, SA_XLOCKED); /* * Assign this source to a CPU in a round-robin fashion. */ pc = pcpu_find(current_cpu); if (pc == NULL) return; iv->iv_mid = pc->pc_mid; iv->iv_ic->ic_assign(iv); do { current_cpu++; if (current_cpu > mp_maxid) current_cpu = 0; } while (!CPU_ISSET(current_cpu, &intr_cpus)); } /* Attempt to bind the specified IRQ to the specified CPU. */ int intr_bind(int vec, u_char cpu) { struct intr_vector *iv; int error; if (vec < 0 || vec >= IV_MAX) return (EINVAL); sx_xlock(&intr_table_lock); iv = &intr_vectors[vec]; if (iv == NULL) { sx_xunlock(&intr_table_lock); return (EINVAL); } error = intr_event_bind(iv->iv_event, cpu); sx_xunlock(&intr_table_lock); return (error); } /* * Add a CPU to our mask of valid CPUs that can be destinations of * interrupts. */ void intr_add_cpu(u_int cpu) { if (cpu >= MAXCPU) panic("%s: Invalid CPU ID", __func__); if (bootverbose) printf("INTR: Adding CPU %d as a target\n", cpu); CPU_SET(cpu, &intr_cpus); } /* * Distribute all the interrupt sources among the available CPUs once the * APs have been launched. */ static void intr_shuffle_irqs(void *arg __unused) { struct pcpu *pc; struct intr_vector *iv; int i; /* Don't bother on UP. */ if (mp_ncpus == 1) return; sx_xlock(&intr_table_lock); assign_cpu = 1; for (i = 0; i < IV_MAX; i++) { iv = &intr_vectors[i]; if (iv != NULL && iv->iv_refcnt > 0) { /* * If this event is already bound to a CPU, * then assign the source to that CPU instead * of picking one via round-robin. */ if (iv->iv_event->ie_cpu != NOCPU && (pc = pcpu_find(iv->iv_event->ie_cpu)) != NULL) { iv->iv_mid = pc->pc_mid; iv->iv_ic->ic_assign(iv); } else intr_assign_next_cpu(iv); } } sx_xunlock(&intr_table_lock); } SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, NULL); #else /* !SMP */ - -/* Return EOPNOTSUPP in the UP case. */ -int -intr_bind(int vec __unused, u_char cpu __unused) -{ - - return (EOPNOTSUPP); -} - /* Use an empty stub for compatibility. */ void intr_add_cpu(u_int cpu __unused) { } #endif Index: projects/nand/sys/vm/vm_fault.c =================================================================== --- projects/nand/sys/vm/vm_fault.c (revision 235262) +++ projects/nand/sys/vm/vm_fault.c (revision 235263) @@ -1,1506 +1,1536 @@ /*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * Page fault handling module. */ #include __FBSDID("$FreeBSD$"); #include "opt_ktrace.h" #include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #include #include #include #include #include #include #include #include #include #include #include /* XXX Temporary for VFS_LOCK_GIANT() */ #define PFBAK 4 #define PFFOR 4 #define PAGEORDER_SIZE (PFBAK+PFFOR) static int prefault_pageorder[] = { -1 * PAGE_SIZE, 1 * PAGE_SIZE, -2 * PAGE_SIZE, 2 * PAGE_SIZE, -3 * PAGE_SIZE, 3 * PAGE_SIZE, -4 * PAGE_SIZE, 4 * PAGE_SIZE }; static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *); static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t); -#define VM_FAULT_READ_AHEAD 8 -#define VM_FAULT_READ_BEHIND 7 -#define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1) +#define VM_FAULT_READ_BEHIND 8 +#define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX) +#define VM_FAULT_NINCR (VM_FAULT_READ_MAX / VM_FAULT_READ_BEHIND) +#define VM_FAULT_SUM (VM_FAULT_NINCR * (VM_FAULT_NINCR + 1) / 2) +#define VM_FAULT_CACHE_BEHIND (VM_FAULT_READ_BEHIND * VM_FAULT_SUM) struct faultstate { vm_page_t m; vm_object_t object; vm_pindex_t pindex; vm_page_t first_m; vm_object_t first_object; vm_pindex_t first_pindex; vm_map_t map; vm_map_entry_t entry; int lookup_still_valid; struct vnode *vp; int vfslocked; }; +static void vm_fault_cache_behind(const struct faultstate *fs, int distance); + static inline void release_page(struct faultstate *fs) { vm_page_wakeup(fs->m); vm_page_lock(fs->m); vm_page_deactivate(fs->m); vm_page_unlock(fs->m); fs->m = NULL; } static inline void unlock_map(struct faultstate *fs) { if (fs->lookup_still_valid) { vm_map_lookup_done(fs->map, fs->entry); fs->lookup_still_valid = FALSE; } } static void unlock_and_deallocate(struct faultstate *fs) { vm_object_pip_wakeup(fs->object); VM_OBJECT_UNLOCK(fs->object); if (fs->object != fs->first_object) { VM_OBJECT_LOCK(fs->first_object); vm_page_lock(fs->first_m); vm_page_free(fs->first_m); vm_page_unlock(fs->first_m); vm_object_pip_wakeup(fs->first_object); VM_OBJECT_UNLOCK(fs->first_object); fs->first_m = NULL; } vm_object_deallocate(fs->first_object); unlock_map(fs); if (fs->vp != NULL) { vput(fs->vp); fs->vp = NULL; } VFS_UNLOCK_GIANT(fs->vfslocked); fs->vfslocked = 0; } /* * TRYPAGER - used by vm_fault to calculate whether the pager for the * current object *might* contain the page. * * default objects are zero-fill, there is no real pager. */ #define TRYPAGER (fs.object->type != OBJT_DEFAULT && \ ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 || wired)) /* * vm_fault: * * Handle a page fault occurring at the given address, * requiring the given permissions, in the map specified. * If successful, the page is inserted into the * associated physical map. * * NOTE: the given address should be truncated to the * proper page address. * * KERN_SUCCESS is returned if the page fault is handled; otherwise, * a standard error specifying why the fault is fatal is returned. * * The map in question must be referenced, and remains so. * Caller may hold no locks. */ int vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags) { struct thread *td; int result; td = curthread; if ((td->td_pflags & TDP_NOFAULTING) != 0) return (KERN_PROTECTION_FAILURE); #ifdef KTRACE if (map != kernel_map && KTRPOINT(td, KTR_FAULT)) ktrfault(vaddr, fault_type); #endif result = vm_fault_hold(map, trunc_page(vaddr), fault_type, fault_flags, NULL); #ifdef KTRACE if (map != kernel_map && KTRPOINT(td, KTR_FAULTEND)) ktrfaultend(result); #endif return (result); } int vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags, vm_page_t *m_hold) { vm_prot_t prot; - int is_first_object_locked, result; - boolean_t growstack, wired; + long ahead, behind; + int alloc_req, era, faultcount, nera, reqpage, result; + boolean_t growstack, is_first_object_locked, wired; int map_generation; vm_object_t next_object; - vm_page_t marray[VM_FAULT_READ], mt, mt_prev; + vm_page_t marray[VM_FAULT_READ_MAX]; int hardfault; - int faultcount, ahead, behind, alloc_req; struct faultstate fs; struct vnode *vp; int locked, error; hardfault = 0; growstack = TRUE; PCPU_INC(cnt.v_vm_faults); fs.vp = NULL; fs.vfslocked = 0; - faultcount = behind = 0; + faultcount = reqpage = 0; RetryFault:; /* * Find the backing store object and offset into it to begin the * search. */ fs.map = map; result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired); if (result != KERN_SUCCESS) { if (growstack && result == KERN_INVALID_ADDRESS && map != kernel_map) { result = vm_map_growstack(curproc, vaddr); if (result != KERN_SUCCESS) return (KERN_FAILURE); growstack = FALSE; goto RetryFault; } return (result); } map_generation = fs.map->timestamp; if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { panic("vm_fault: fault on nofault entry, addr: %lx", (u_long)vaddr); } /* * Make a reference to this object to prevent its disposal while we * are messing with it. Once we have the reference, the map is free * to be diddled. Since objects reference their shadows (and copies), * they will stay around as well. * * Bump the paging-in-progress count to prevent size changes (e.g. * truncation operations) during I/O. This must be done after * obtaining the vnode lock in order to avoid possible deadlocks. */ VM_OBJECT_LOCK(fs.first_object); vm_object_reference_locked(fs.first_object); vm_object_pip_add(fs.first_object, 1); fs.lookup_still_valid = TRUE; if (wired) fault_type = prot | (fault_type & VM_PROT_COPY); fs.first_m = NULL; /* * Search for the page at object/offset. */ fs.object = fs.first_object; fs.pindex = fs.first_pindex; while (TRUE) { /* * If the object is dead, we stop here */ if (fs.object->flags & OBJ_DEAD) { unlock_and_deallocate(&fs); return (KERN_PROTECTION_FAILURE); } /* * See if page is resident */ fs.m = vm_page_lookup(fs.object, fs.pindex); if (fs.m != NULL) { /* * check for page-based copy on write. * We check fs.object == fs.first_object so * as to ensure the legacy COW mechanism is * used when the page in question is part of * a shadow object. Otherwise, vm_page_cowfault() * removes the page from the backing object, * which is not what we want. */ vm_page_lock(fs.m); if ((fs.m->cow) && (fault_type & VM_PROT_WRITE) && (fs.object == fs.first_object)) { vm_page_cowfault(fs.m); unlock_and_deallocate(&fs); goto RetryFault; } /* * Wait/Retry if the page is busy. We have to do this * if the page is busy via either VPO_BUSY or * vm_page_t->busy because the vm_pager may be using * vm_page_t->busy for pageouts ( and even pageins if * it is the vnode pager ), and we could end up trying * to pagein and pageout the same page simultaneously. * * We can theoretically allow the busy case on a read * fault if the page is marked valid, but since such * pages are typically already pmap'd, putting that * special case in might be more effort then it is * worth. We cannot under any circumstances mess * around with a vm_page_t->busy page except, perhaps, * to pmap it. */ if ((fs.m->oflags & VPO_BUSY) || fs.m->busy) { /* * Reference the page before unlocking and * sleeping so that the page daemon is less * likely to reclaim it. */ vm_page_aflag_set(fs.m, PGA_REFERENCED); vm_page_unlock(fs.m); if (fs.object != fs.first_object) { if (!VM_OBJECT_TRYLOCK( fs.first_object)) { VM_OBJECT_UNLOCK(fs.object); VM_OBJECT_LOCK(fs.first_object); VM_OBJECT_LOCK(fs.object); } vm_page_lock(fs.first_m); vm_page_free(fs.first_m); vm_page_unlock(fs.first_m); vm_object_pip_wakeup(fs.first_object); VM_OBJECT_UNLOCK(fs.first_object); fs.first_m = NULL; } unlock_map(&fs); if (fs.m == vm_page_lookup(fs.object, fs.pindex)) { vm_page_sleep_if_busy(fs.m, TRUE, "vmpfw"); } vm_object_pip_wakeup(fs.object); VM_OBJECT_UNLOCK(fs.object); PCPU_INC(cnt.v_intrans); vm_object_deallocate(fs.first_object); goto RetryFault; } vm_pageq_remove(fs.m); vm_page_unlock(fs.m); /* * Mark page busy for other processes, and the * pagedaemon. If it still isn't completely valid * (readable), jump to readrest, else break-out ( we * found the page ). */ vm_page_busy(fs.m); if (fs.m->valid != VM_PAGE_BITS_ALL) goto readrest; break; } /* * Page is not resident, If this is the search termination * or the pager might contain the page, allocate a new page. */ if (TRYPAGER || fs.object == fs.first_object) { if (fs.pindex >= fs.object->size) { unlock_and_deallocate(&fs); return (KERN_PROTECTION_FAILURE); } /* * Allocate a new page for this object/offset pair. * * Unlocked read of the p_flag is harmless. At * worst, the P_KILLED might be not observed * there, and allocation can fail, causing * restart and new reading of the p_flag. */ fs.m = NULL; if (!vm_page_count_severe() || P_KILLED(curproc)) { #if VM_NRESERVLEVEL > 0 if ((fs.object->flags & OBJ_COLORED) == 0) { fs.object->flags |= OBJ_COLORED; fs.object->pg_color = atop(vaddr) - fs.pindex; } #endif alloc_req = P_KILLED(curproc) ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; if (fs.object->type != OBJT_VNODE && fs.object->backing_object == NULL) alloc_req |= VM_ALLOC_ZERO; fs.m = vm_page_alloc(fs.object, fs.pindex, alloc_req); } if (fs.m == NULL) { unlock_and_deallocate(&fs); VM_WAITPFAULT; goto RetryFault; } else if (fs.m->valid == VM_PAGE_BITS_ALL) break; } readrest: /* * We have found a valid page or we have allocated a new page. * The page thus may not be valid or may not be entirely * valid. * * Attempt to fault-in the page if there is a chance that the * pager has it, and potentially fault in additional pages * at the same time. */ if (TRYPAGER) { int rv; - int reqpage = 0; u_char behavior = vm_map_entry_behavior(fs.entry); if (behavior == MAP_ENTRY_BEHAV_RANDOM || P_KILLED(curproc)) { + behind = 0; ahead = 0; + } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { behind = 0; + ahead = atop(fs.entry->end - vaddr) - 1; + if (ahead > VM_FAULT_READ_AHEAD_MAX) + ahead = VM_FAULT_READ_AHEAD_MAX; + if (fs.pindex == fs.entry->next_read) + vm_fault_cache_behind(&fs, + VM_FAULT_READ_MAX); } else { - behind = (vaddr - fs.entry->start) >> PAGE_SHIFT; + /* + * If this is a sequential page fault, then + * arithmetically increase the number of pages + * in the read-ahead window. Otherwise, reset + * the read-ahead window to its smallest size. + */ + behind = atop(vaddr - fs.entry->start); if (behind > VM_FAULT_READ_BEHIND) behind = VM_FAULT_READ_BEHIND; - - ahead = ((fs.entry->end - vaddr) >> PAGE_SHIFT) - 1; - if (ahead > VM_FAULT_READ_AHEAD) - ahead = VM_FAULT_READ_AHEAD; + ahead = atop(fs.entry->end - vaddr) - 1; + era = fs.entry->read_ahead; + if (fs.pindex == fs.entry->next_read) { + nera = era + behind; + if (nera > VM_FAULT_READ_AHEAD_MAX) + nera = VM_FAULT_READ_AHEAD_MAX; + behind = 0; + if (ahead > nera) + ahead = nera; + if (era == VM_FAULT_READ_AHEAD_MAX) + vm_fault_cache_behind(&fs, + VM_FAULT_CACHE_BEHIND); + } else if (ahead > VM_FAULT_READ_AHEAD_MIN) + ahead = VM_FAULT_READ_AHEAD_MIN; + if (era != ahead) + fs.entry->read_ahead = ahead; } - is_first_object_locked = FALSE; - if ((behavior == MAP_ENTRY_BEHAV_SEQUENTIAL || - (behavior != MAP_ENTRY_BEHAV_RANDOM && - fs.pindex >= fs.entry->lastr && - fs.pindex < fs.entry->lastr + VM_FAULT_READ)) && - (fs.first_object == fs.object || - (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object))) && - fs.first_object->type != OBJT_DEVICE && - fs.first_object->type != OBJT_PHYS && - fs.first_object->type != OBJT_SG) { - vm_pindex_t firstpindex; - if (fs.first_pindex < 2 * VM_FAULT_READ) - firstpindex = 0; - else - firstpindex = fs.first_pindex - 2 * VM_FAULT_READ; - mt = fs.first_object != fs.object ? - fs.first_m : fs.m; - KASSERT(mt != NULL, ("vm_fault: missing mt")); - KASSERT((mt->oflags & VPO_BUSY) != 0, - ("vm_fault: mt %p not busy", mt)); - mt_prev = vm_page_prev(mt); - - /* - * note: partially valid pages cannot be - * included in the lookahead - NFS piecemeal - * writes will barf on it badly. - */ - while ((mt = mt_prev) != NULL && - mt->pindex >= firstpindex && - mt->valid == VM_PAGE_BITS_ALL) { - mt_prev = vm_page_prev(mt); - if (mt->busy || - (mt->oflags & VPO_BUSY)) - continue; - vm_page_lock(mt); - if (mt->hold_count || - mt->wire_count) { - vm_page_unlock(mt); - continue; - } - pmap_remove_all(mt); - if (mt->dirty != 0) - vm_page_deactivate(mt); - else - vm_page_cache(mt); - vm_page_unlock(mt); - } - ahead += behind; - behind = 0; - } - if (is_first_object_locked) - VM_OBJECT_UNLOCK(fs.first_object); - /* * Call the pager to retrieve the data, if any, after * releasing the lock on the map. We hold a ref on * fs.object and the pages are VPO_BUSY'd. */ unlock_map(&fs); vnode_lock: if (fs.object->type == OBJT_VNODE) { vp = fs.object->handle; if (vp == fs.vp) goto vnode_locked; else if (fs.vp != NULL) { vput(fs.vp); fs.vp = NULL; } locked = VOP_ISLOCKED(vp); if (VFS_NEEDSGIANT(vp->v_mount) && !fs.vfslocked) { fs.vfslocked = 1; if (!mtx_trylock(&Giant)) { VM_OBJECT_UNLOCK(fs.object); mtx_lock(&Giant); VM_OBJECT_LOCK(fs.object); goto vnode_lock; } } if (locked != LK_EXCLUSIVE) locked = LK_SHARED; /* Do not sleep for vnode lock while fs.m is busy */ error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT, curthread); if (error != 0) { int vfslocked; vfslocked = fs.vfslocked; fs.vfslocked = 0; /* Keep Giant */ vhold(vp); release_page(&fs); unlock_and_deallocate(&fs); error = vget(vp, locked | LK_RETRY | LK_CANRECURSE, curthread); vdrop(vp); fs.vp = vp; fs.vfslocked = vfslocked; KASSERT(error == 0, ("vm_fault: vget failed")); goto RetryFault; } fs.vp = vp; } vnode_locked: KASSERT(fs.vp == NULL || !fs.map->system_map, ("vm_fault: vnode-backed object mapped by system map")); /* * now we find out if any other pages should be paged * in at this time this routine checks to see if the * pages surrounding this fault reside in the same * object as the page for this fault. If they do, * then they are faulted in also into the object. The * array "marray" returned contains an array of * vm_page_t structs where one of them is the * vm_page_t passed to the routine. The reqpage * return value is the index into the marray for the * vm_page_t passed to the routine. * * fs.m plus the additional pages are VPO_BUSY'd. */ faultcount = vm_fault_additional_pages( fs.m, behind, ahead, marray, &reqpage); rv = faultcount ? vm_pager_get_pages(fs.object, marray, faultcount, reqpage) : VM_PAGER_FAIL; if (rv == VM_PAGER_OK) { /* * Found the page. Leave it busy while we play * with it. */ /* * Relookup in case pager changed page. Pager * is responsible for disposition of old page * if moved. */ fs.m = vm_page_lookup(fs.object, fs.pindex); if (!fs.m) { unlock_and_deallocate(&fs); goto RetryFault; } hardfault++; break; /* break to PAGE HAS BEEN FOUND */ } /* * Remove the bogus page (which does not exist at this * object/offset); before doing so, we must get back * our object lock to preserve our invariant. * * Also wake up any other process that may want to bring * in this page. * * If this is the top-level object, we must leave the * busy page to prevent another process from rushing * past us, and inserting the page in that object at * the same time that we are. */ if (rv == VM_PAGER_ERROR) printf("vm_fault: pager read error, pid %d (%s)\n", curproc->p_pid, curproc->p_comm); /* * Data outside the range of the pager or an I/O error */ /* * XXX - the check for kernel_map is a kludge to work * around having the machine panic on a kernel space * fault w/ I/O error. */ if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { vm_page_lock(fs.m); vm_page_free(fs.m); vm_page_unlock(fs.m); fs.m = NULL; unlock_and_deallocate(&fs); return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); } if (fs.object != fs.first_object) { vm_page_lock(fs.m); vm_page_free(fs.m); vm_page_unlock(fs.m); fs.m = NULL; /* * XXX - we cannot just fall out at this * point, m has been freed and is invalid! */ } } /* * We get here if the object has default pager (or unwiring) * or the pager doesn't have the page. */ if (fs.object == fs.first_object) fs.first_m = fs.m; /* * Move on to the next object. Lock the next object before * unlocking the current one. */ fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset); next_object = fs.object->backing_object; if (next_object == NULL) { /* * If there's no object left, fill the page in the top * object with zeros. */ if (fs.object != fs.first_object) { vm_object_pip_wakeup(fs.object); VM_OBJECT_UNLOCK(fs.object); fs.object = fs.first_object; fs.pindex = fs.first_pindex; fs.m = fs.first_m; VM_OBJECT_LOCK(fs.object); } fs.first_m = NULL; /* * Zero the page if necessary and mark it valid. */ if ((fs.m->flags & PG_ZERO) == 0) { pmap_zero_page(fs.m); } else { PCPU_INC(cnt.v_ozfod); } PCPU_INC(cnt.v_zfod); fs.m->valid = VM_PAGE_BITS_ALL; break; /* break to PAGE HAS BEEN FOUND */ } else { KASSERT(fs.object != next_object, ("object loop %p", next_object)); VM_OBJECT_LOCK(next_object); vm_object_pip_add(next_object, 1); if (fs.object != fs.first_object) vm_object_pip_wakeup(fs.object); VM_OBJECT_UNLOCK(fs.object); fs.object = next_object; } } KASSERT((fs.m->oflags & VPO_BUSY) != 0, ("vm_fault: not busy after main loop")); /* * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock * is held.] */ /* * If the page is being written, but isn't already owned by the * top-level object, we have to copy it into a new page owned by the * top-level object. */ if (fs.object != fs.first_object) { /* * We only really need to copy if we want to write it. */ if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { /* * This allows pages to be virtually copied from a * backing_object into the first_object, where the * backing object has no other refs to it, and cannot * gain any more refs. Instead of a bcopy, we just * move the page from the backing object to the * first object. Note that we must mark the page * dirty in the first object so that it will go out * to swap when needed. */ is_first_object_locked = FALSE; if ( /* * Only one shadow object */ (fs.object->shadow_count == 1) && /* * No COW refs, except us */ (fs.object->ref_count == 1) && /* * No one else can look this object up */ (fs.object->handle == NULL) && /* * No other ways to look the object up */ ((fs.object->type == OBJT_DEFAULT) || (fs.object->type == OBJT_SWAP)) && (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) && /* * We don't chase down the shadow chain */ fs.object == fs.first_object->backing_object) { /* * get rid of the unnecessary page */ vm_page_lock(fs.first_m); vm_page_free(fs.first_m); vm_page_unlock(fs.first_m); /* * grab the page and put it into the * process'es object. The page is * automatically made dirty. */ vm_page_lock(fs.m); vm_page_rename(fs.m, fs.first_object, fs.first_pindex); vm_page_unlock(fs.m); vm_page_busy(fs.m); fs.first_m = fs.m; fs.m = NULL; PCPU_INC(cnt.v_cow_optim); } else { /* * Oh, well, lets copy it. */ pmap_copy_page(fs.m, fs.first_m); fs.first_m->valid = VM_PAGE_BITS_ALL; if (wired && (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) { vm_page_lock(fs.first_m); vm_page_wire(fs.first_m); vm_page_unlock(fs.first_m); vm_page_lock(fs.m); vm_page_unwire(fs.m, FALSE); vm_page_unlock(fs.m); } /* * We no longer need the old page or object. */ release_page(&fs); } /* * fs.object != fs.first_object due to above * conditional */ vm_object_pip_wakeup(fs.object); VM_OBJECT_UNLOCK(fs.object); /* * Only use the new page below... */ fs.object = fs.first_object; fs.pindex = fs.first_pindex; fs.m = fs.first_m; if (!is_first_object_locked) VM_OBJECT_LOCK(fs.object); PCPU_INC(cnt.v_cow_faults); } else { prot &= ~VM_PROT_WRITE; } } /* * We must verify that the maps have not changed since our last * lookup. */ if (!fs.lookup_still_valid) { vm_object_t retry_object; vm_pindex_t retry_pindex; vm_prot_t retry_prot; if (!vm_map_trylock_read(fs.map)) { release_page(&fs); unlock_and_deallocate(&fs); goto RetryFault; } fs.lookup_still_valid = TRUE; if (fs.map->timestamp != map_generation) { result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); /* * If we don't need the page any longer, put it on the inactive * list (the easiest thing to do here). If no one needs it, * pageout will grab it eventually. */ if (result != KERN_SUCCESS) { release_page(&fs); unlock_and_deallocate(&fs); /* * If retry of map lookup would have blocked then * retry fault from start. */ if (result == KERN_FAILURE) goto RetryFault; return (result); } if ((retry_object != fs.first_object) || (retry_pindex != fs.first_pindex)) { release_page(&fs); unlock_and_deallocate(&fs); goto RetryFault; } /* * Check whether the protection has changed or the object has * been copied while we left the map unlocked. Changing from * read to write permission is OK - we leave the page * write-protected, and catch the write fault. Changing from * write to read permission means that we can't mark the page * write-enabled after all. */ prot &= retry_prot; } } /* * If the page was filled by a pager, update the map entry's * last read offset. Since the pager does not return the * actual set of pages that it read, this update is based on * the requested set. Typically, the requested and actual * sets are the same. * * XXX The following assignment modifies the map * without holding a write lock on it. */ if (hardfault) - fs.entry->lastr = fs.pindex + faultcount - behind; + fs.entry->next_read = fs.pindex + faultcount - reqpage; if ((prot & VM_PROT_WRITE) != 0 || (fault_flags & VM_FAULT_DIRTY) != 0) { vm_object_set_writeable_dirty(fs.object); /* * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC * if the page is already dirty to prevent data written with * the expectation of being synced from not being synced. * Likewise if this entry does not request NOSYNC then make * sure the page isn't marked NOSYNC. Applications sharing * data should use the same flags to avoid ping ponging. */ if (fs.entry->eflags & MAP_ENTRY_NOSYNC) { if (fs.m->dirty == 0) fs.m->oflags |= VPO_NOSYNC; } else { fs.m->oflags &= ~VPO_NOSYNC; } /* * If the fault is a write, we know that this page is being * written NOW so dirty it explicitly to save on * pmap_is_modified() calls later. * * Also tell the backing pager, if any, that it should remove * any swap backing since the page is now dirty. */ if (((fault_type & VM_PROT_WRITE) != 0 && (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) || (fault_flags & VM_FAULT_DIRTY) != 0) { vm_page_dirty(fs.m); vm_pager_page_unswapped(fs.m); } } /* * Page had better still be busy */ KASSERT(fs.m->oflags & VPO_BUSY, ("vm_fault: page %p not busy!", fs.m)); /* * Page must be completely valid or it is not fit to * map into user space. vm_pager_get_pages() ensures this. */ KASSERT(fs.m->valid == VM_PAGE_BITS_ALL, ("vm_fault: page %p partially invalid", fs.m)); VM_OBJECT_UNLOCK(fs.object); /* * Put this page into the physical map. We had to do the unlock above * because pmap_enter() may sleep. We don't put the page * back on the active queue until later so that the pageout daemon * won't find it (yet). */ pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired); if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0) vm_fault_prefault(fs.map->pmap, vaddr, fs.entry); VM_OBJECT_LOCK(fs.object); vm_page_lock(fs.m); /* * If the page is not wired down, then put it where the pageout daemon * can find it. */ if (fault_flags & VM_FAULT_CHANGE_WIRING) { if (wired) vm_page_wire(fs.m); else vm_page_unwire(fs.m, 1); } else vm_page_activate(fs.m); if (m_hold != NULL) { *m_hold = fs.m; vm_page_hold(fs.m); } vm_page_unlock(fs.m); vm_page_wakeup(fs.m); /* * Unlock everything, and return */ unlock_and_deallocate(&fs); if (hardfault) curthread->td_ru.ru_majflt++; else curthread->td_ru.ru_minflt++; return (KERN_SUCCESS); +} + +/* + * Speed up the reclamation of up to "distance" pages that precede the + * faulting pindex within the first object of the shadow chain. + */ +static void +vm_fault_cache_behind(const struct faultstate *fs, int distance) +{ + vm_object_t first_object, object; + vm_page_t m, m_prev; + vm_pindex_t pindex; + + object = fs->object; + VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + first_object = fs->first_object; + if (first_object != object) { + if (!VM_OBJECT_TRYLOCK(first_object)) { + VM_OBJECT_UNLOCK(object); + VM_OBJECT_LOCK(first_object); + VM_OBJECT_LOCK(object); + } + } + if (first_object->type != OBJT_DEVICE && + first_object->type != OBJT_PHYS && first_object->type != OBJT_SG) { + if (fs->first_pindex < distance) + pindex = 0; + else + pindex = fs->first_pindex - distance; + if (pindex < OFF_TO_IDX(fs->entry->offset)) + pindex = OFF_TO_IDX(fs->entry->offset); + m = first_object != object ? fs->first_m : fs->m; + KASSERT((m->oflags & VPO_BUSY) != 0, + ("vm_fault_cache_behind: page %p is not busy", m)); + m_prev = vm_page_prev(m); + while ((m = m_prev) != NULL && m->pindex >= pindex && + m->valid == VM_PAGE_BITS_ALL) { + m_prev = vm_page_prev(m); + if (m->busy != 0 || (m->oflags & VPO_BUSY) != 0) + continue; + vm_page_lock(m); + if (m->hold_count == 0 && m->wire_count == 0) { + pmap_remove_all(m); + vm_page_aflag_clear(m, PGA_REFERENCED); + if (m->dirty != 0) + vm_page_deactivate(m); + else + vm_page_cache(m); + } + vm_page_unlock(m); + } + } + if (first_object != object) + VM_OBJECT_UNLOCK(first_object); } /* * vm_fault_prefault provides a quick way of clustering * pagefaults into a processes address space. It is a "cousin" * of vm_map_pmap_enter, except it runs at page fault time instead * of mmap time. */ static void vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry) { int i; vm_offset_t addr, starta; vm_pindex_t pindex; vm_page_t m; vm_object_t object; if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) return; object = entry->object.vm_object; starta = addra - PFBAK * PAGE_SIZE; if (starta < entry->start) { starta = entry->start; } else if (starta > addra) { starta = 0; } for (i = 0; i < PAGEORDER_SIZE; i++) { vm_object_t backing_object, lobject; addr = addra + prefault_pageorder[i]; if (addr > addra + (PFFOR * PAGE_SIZE)) addr = 0; if (addr < starta || addr >= entry->end) continue; if (!pmap_is_prefaultable(pmap, addr)) continue; pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; lobject = object; VM_OBJECT_LOCK(lobject); while ((m = vm_page_lookup(lobject, pindex)) == NULL && lobject->type == OBJT_DEFAULT && (backing_object = lobject->backing_object) != NULL) { KASSERT((lobject->backing_object_offset & PAGE_MASK) == 0, ("vm_fault_prefault: unaligned object offset")); pindex += lobject->backing_object_offset >> PAGE_SHIFT; VM_OBJECT_LOCK(backing_object); VM_OBJECT_UNLOCK(lobject); lobject = backing_object; } /* * give-up when a page is not in memory */ if (m == NULL) { VM_OBJECT_UNLOCK(lobject); break; } if (m->valid == VM_PAGE_BITS_ALL && (m->flags & PG_FICTITIOUS) == 0) pmap_enter_quick(pmap, addr, m, entry->protection); VM_OBJECT_UNLOCK(lobject); } } /* * Hold each of the physical pages that are mapped by the specified range of * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid * and allow the specified types of access, "prot". If all of the implied * pages are successfully held, then the number of held pages is returned * together with pointers to those pages in the array "ma". However, if any * of the pages cannot be held, -1 is returned. */ int vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, vm_prot_t prot, vm_page_t *ma, int max_count) { vm_offset_t end, va; vm_page_t *mp; int count; boolean_t pmap_failed; if (len == 0) return (0); end = round_page(addr + len); addr = trunc_page(addr); /* * Check for illegal addresses. */ if (addr < vm_map_min(map) || addr > end || end > vm_map_max(map)) return (-1); count = howmany(end - addr, PAGE_SIZE); if (count > max_count) panic("vm_fault_quick_hold_pages: count > max_count"); /* * Most likely, the physical pages are resident in the pmap, so it is * faster to try pmap_extract_and_hold() first. */ pmap_failed = FALSE; for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { *mp = pmap_extract_and_hold(map->pmap, va, prot); if (*mp == NULL) pmap_failed = TRUE; else if ((prot & VM_PROT_WRITE) != 0 && (*mp)->dirty != VM_PAGE_BITS_ALL) { /* * Explicitly dirty the physical page. Otherwise, the * caller's changes may go unnoticed because they are * performed through an unmanaged mapping or by a DMA * operation. * * The object lock is not held here. * See vm_page_clear_dirty_mask(). */ vm_page_dirty(*mp); } } if (pmap_failed) { /* * One or more pages could not be held by the pmap. Either no * page was mapped at the specified virtual address or that * mapping had insufficient permissions. Attempt to fault in * and hold these pages. */ for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) if (*mp == NULL && vm_fault_hold(map, va, prot, VM_FAULT_NORMAL, mp) != KERN_SUCCESS) goto error; } return (count); error: for (mp = ma; mp < ma + count; mp++) if (*mp != NULL) { vm_page_lock(*mp); vm_page_unhold(*mp); vm_page_unlock(*mp); } return (-1); } /* * vm_fault_wire: * * Wire down a range of virtual addresses in a map. */ int vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t fictitious) { vm_offset_t va; int rv; /* * We simulate a fault to get the page and enter it in the physical * map. For user wiring, we only ask for read access on currently * read-only sections. */ for (va = start; va < end; va += PAGE_SIZE) { rv = vm_fault(map, va, VM_PROT_NONE, VM_FAULT_CHANGE_WIRING); if (rv) { if (va != start) vm_fault_unwire(map, start, va, fictitious); return (rv); } } return (KERN_SUCCESS); } /* * vm_fault_unwire: * * Unwire a range of virtual addresses in a map. */ void vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t fictitious) { vm_paddr_t pa; vm_offset_t va; vm_page_t m; pmap_t pmap; pmap = vm_map_pmap(map); /* * Since the pages are wired down, we must be able to get their * mappings from the physical map system. */ for (va = start; va < end; va += PAGE_SIZE) { pa = pmap_extract(pmap, va); if (pa != 0) { pmap_change_wiring(pmap, va, FALSE); if (!fictitious) { m = PHYS_TO_VM_PAGE(pa); vm_page_lock(m); vm_page_unwire(m, TRUE); vm_page_unlock(m); } } } } /* * Routine: * vm_fault_copy_entry * Function: * Create new shadow object backing dst_entry with private copy of * all underlying pages. When src_entry is equal to dst_entry, * function implements COW for wired-down map entry. Otherwise, * it forks wired entry into dst_map. * * In/out conditions: * The source and destination maps must be locked for write. * The source map entry must be wired down (or be a sharing map * entry corresponding to a main map entry that is wired down). */ void vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, vm_map_entry_t dst_entry, vm_map_entry_t src_entry, vm_ooffset_t *fork_charge) { vm_object_t backing_object, dst_object, object, src_object; vm_pindex_t dst_pindex, pindex, src_pindex; vm_prot_t access, prot; vm_offset_t vaddr; vm_page_t dst_m; vm_page_t src_m; boolean_t src_readonly, upgrade; #ifdef lint src_map++; #endif /* lint */ upgrade = src_entry == dst_entry; src_object = src_entry->object.vm_object; src_pindex = OFF_TO_IDX(src_entry->offset); src_readonly = (src_entry->protection & VM_PROT_WRITE) == 0; /* * Create the top-level object for the destination entry. (Doesn't * actually shadow anything - we copy the pages directly.) */ dst_object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(dst_entry->end - dst_entry->start)); #if VM_NRESERVLEVEL > 0 dst_object->flags |= OBJ_COLORED; dst_object->pg_color = atop(dst_entry->start); #endif VM_OBJECT_LOCK(dst_object); KASSERT(upgrade || dst_entry->object.vm_object == NULL, ("vm_fault_copy_entry: vm_object not NULL")); dst_entry->object.vm_object = dst_object; dst_entry->offset = 0; dst_object->charge = dst_entry->end - dst_entry->start; if (fork_charge != NULL) { KASSERT(dst_entry->cred == NULL, ("vm_fault_copy_entry: leaked swp charge")); dst_object->cred = curthread->td_ucred; crhold(dst_object->cred); *fork_charge += dst_object->charge; } else { dst_object->cred = dst_entry->cred; dst_entry->cred = NULL; } access = prot = dst_entry->protection; /* * If not an upgrade, then enter the mappings in the pmap as * read and/or execute accesses. Otherwise, enter them as * write accesses. * * A writeable large page mapping is only created if all of * the constituent small page mappings are modified. Marking * PTEs as modified on inception allows promotion to happen * without taking potentially large number of soft faults. */ if (!upgrade) access &= ~VM_PROT_WRITE; /* * Loop through all of the pages in the entry's range, copying each * one from the source object (it should be there) to the destination * object. */ for (vaddr = dst_entry->start, dst_pindex = 0; vaddr < dst_entry->end; vaddr += PAGE_SIZE, dst_pindex++) { /* * Allocate a page in the destination object. */ do { dst_m = vm_page_alloc(dst_object, dst_pindex, VM_ALLOC_NORMAL); if (dst_m == NULL) { VM_OBJECT_UNLOCK(dst_object); VM_WAIT; VM_OBJECT_LOCK(dst_object); } } while (dst_m == NULL); /* * Find the page in the source object, and copy it in. * (Because the source is wired down, the page will be in * memory.) */ VM_OBJECT_LOCK(src_object); object = src_object; pindex = src_pindex + dst_pindex; while ((src_m = vm_page_lookup(object, pindex)) == NULL && src_readonly && (backing_object = object->backing_object) != NULL) { /* * Allow fallback to backing objects if we are reading. */ VM_OBJECT_LOCK(backing_object); pindex += OFF_TO_IDX(object->backing_object_offset); VM_OBJECT_UNLOCK(object); object = backing_object; } if (src_m == NULL) panic("vm_fault_copy_wired: page missing"); pmap_copy_page(src_m, dst_m); VM_OBJECT_UNLOCK(object); dst_m->valid = VM_PAGE_BITS_ALL; VM_OBJECT_UNLOCK(dst_object); /* * Enter it in the pmap. If a wired, copy-on-write * mapping is being replaced by a write-enabled * mapping, then wire that new mapping. */ pmap_enter(dst_map->pmap, vaddr, access, dst_m, prot, upgrade); /* * Mark it no longer busy, and put it on the active list. */ VM_OBJECT_LOCK(dst_object); if (upgrade) { vm_page_lock(src_m); vm_page_unwire(src_m, 0); vm_page_unlock(src_m); vm_page_lock(dst_m); vm_page_wire(dst_m); vm_page_unlock(dst_m); } else { vm_page_lock(dst_m); vm_page_activate(dst_m); vm_page_unlock(dst_m); } vm_page_wakeup(dst_m); } VM_OBJECT_UNLOCK(dst_object); if (upgrade) { dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); vm_object_deallocate(src_object); } } /* * This routine checks around the requested page for other pages that * might be able to be faulted in. This routine brackets the viable * pages for the pages to be paged in. * * Inputs: * m, rbehind, rahead * * Outputs: * marray (array of vm_page_t), reqpage (index of requested page) * * Return value: * number of pages in marray */ static int vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) vm_page_t m; int rbehind; int rahead; vm_page_t *marray; int *reqpage; { int i,j; vm_object_t object; vm_pindex_t pindex, startpindex, endpindex, tpindex; vm_page_t rtm; int cbehind, cahead; VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); object = m->object; pindex = m->pindex; cbehind = cahead = 0; /* * if the requested page is not available, then give up now */ if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) { return 0; } if ((cbehind == 0) && (cahead == 0)) { *reqpage = 0; marray[0] = m; return 1; } if (rahead > cahead) { rahead = cahead; } if (rbehind > cbehind) { rbehind = cbehind; } /* * scan backward for the read behind pages -- in memory */ if (pindex > 0) { if (rbehind > pindex) { rbehind = pindex; startpindex = 0; } else { startpindex = pindex - rbehind; } if ((rtm = TAILQ_PREV(m, pglist, listq)) != NULL && rtm->pindex >= startpindex) startpindex = rtm->pindex + 1; /* tpindex is unsigned; beware of numeric underflow. */ for (i = 0, tpindex = pindex - 1; tpindex >= startpindex && tpindex < pindex; i++, tpindex--) { rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED); if (rtm == NULL) { /* * Shift the allocated pages to the * beginning of the array. */ for (j = 0; j < i; j++) { marray[j] = marray[j + tpindex + 1 - startpindex]; } break; } marray[tpindex - startpindex] = rtm; } } else { startpindex = 0; i = 0; } marray[i] = m; /* page offset of the required page */ *reqpage = i; tpindex = pindex + 1; i++; /* * scan forward for the read ahead pages */ endpindex = tpindex + rahead; if ((rtm = TAILQ_NEXT(m, listq)) != NULL && rtm->pindex < endpindex) endpindex = rtm->pindex; if (endpindex > object->size) endpindex = object->size; for (; tpindex < endpindex; i++, tpindex++) { rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED); if (rtm == NULL) { break; } marray[i] = rtm; } /* return number of pages */ return i; } /* * Block entry into the machine-independent layer's page fault handler by * the calling thread. Subsequent calls to vm_fault() by that thread will * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of * spurious page faults. */ int vm_fault_disable_pagefaults(void) { return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); } void vm_fault_enable_pagefaults(int save) { curthread_pflags_restore(save); } Index: projects/nand/sys/vm/vm_map.c =================================================================== --- projects/nand/sys/vm/vm_map.c (revision 235262) +++ projects/nand/sys/vm/vm_map.c (revision 235263) @@ -1,4072 +1,4074 @@ /*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * Virtual memory mapping module. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Virtual memory maps provide for the mapping, protection, * and sharing of virtual memory objects. In addition, * this module provides for an efficient virtual copy of * memory from one map to another. * * Synchronization is required prior to most operations. * * Maps consist of an ordered doubly-linked list of simple * entries; a self-adjusting binary search tree of these * entries is used to speed up lookups. * * Since portions of maps are specified by start/end addresses, * which may not align with existing map entries, all * routines merely "clip" entries to these start/end values. * [That is, an entry is split into two, bordering at a * start or end value.] Note that these clippings may not * always be necessary (as the two resulting entries are then * not changed); however, the clipping is done for convenience. * * As mentioned above, virtual copy operations are performed * by copying VM object references from one map to * another, and then marking both regions as copy-on-write. */ static struct mtx map_sleep_mtx; static uma_zone_t mapentzone; static uma_zone_t kmapentzone; static uma_zone_t mapzone; static uma_zone_t vmspace_zone; static struct vm_object kmapentobj; static int vmspace_zinit(void *mem, int size, int flags); static void vmspace_zfini(void *mem, int size); static int vm_map_zinit(void *mem, int ize, int flags); static void vm_map_zfini(void *mem, int size); static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max); static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); #ifdef INVARIANTS static void vm_map_zdtor(void *mem, int size, void *arg); static void vmspace_zdtor(void *mem, int size, void *arg); #endif #define ENTRY_CHARGED(e) ((e)->cred != NULL || \ ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \ !((e)->eflags & MAP_ENTRY_NEEDS_COPY))) /* * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type * stable. */ #define PROC_VMSPACE_LOCK(p) do { } while (0) #define PROC_VMSPACE_UNLOCK(p) do { } while (0) /* * VM_MAP_RANGE_CHECK: [ internal use only ] * * Asserts that the starting and ending region * addresses fall within the valid range of the map. */ #define VM_MAP_RANGE_CHECK(map, start, end) \ { \ if (start < vm_map_min(map)) \ start = vm_map_min(map); \ if (end > vm_map_max(map)) \ end = vm_map_max(map); \ if (start > end) \ start = end; \ } /* * vm_map_startup: * * Initialize the vm_map module. Must be called before * any other vm_map routines. * * Map and entry structures are allocated from the general * purpose memory pool with some exceptions: * * - The kernel map and kmem submap are allocated statically. * - Kernel map entries are allocated out of a static pool. * * These restrictions are necessary since malloc() uses the * maps and requires map entries. */ void vm_map_startup(void) { mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, #ifdef INVARIANTS vm_map_zdtor, #else NULL, #endif vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); uma_prealloc(mapzone, MAX_KMAP); kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MTXCLASS | UMA_ZONE_VM); uma_prealloc(kmapentzone, MAX_KMAPENT); mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); } static void vmspace_zfini(void *mem, int size) { struct vmspace *vm; vm = (struct vmspace *)mem; vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map)); } static int vmspace_zinit(void *mem, int size, int flags) { struct vmspace *vm; vm = (struct vmspace *)mem; vm->vm_map.pmap = NULL; (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); return (0); } static void vm_map_zfini(void *mem, int size) { vm_map_t map; map = (vm_map_t)mem; mtx_destroy(&map->system_mtx); sx_destroy(&map->lock); } static int vm_map_zinit(void *mem, int size, int flags) { vm_map_t map; map = (vm_map_t)mem; map->nentries = 0; map->size = 0; mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); sx_init(&map->lock, "user map"); return (0); } #ifdef INVARIANTS static void vmspace_zdtor(void *mem, int size, void *arg) { struct vmspace *vm; vm = (struct vmspace *)mem; vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); } static void vm_map_zdtor(void *mem, int size, void *arg) { vm_map_t map; map = (vm_map_t)mem; KASSERT(map->nentries == 0, ("map %p nentries == %d on free.", map, map->nentries)); KASSERT(map->size == 0, ("map %p size == %lu on free.", map, (unsigned long)map->size)); } #endif /* INVARIANTS */ /* * Allocate a vmspace structure, including a vm_map and pmap, * and initialize those structures. The refcnt is set to 1. */ struct vmspace * vmspace_alloc(min, max) vm_offset_t min, max; { struct vmspace *vm; vm = uma_zalloc(vmspace_zone, M_WAITOK); if (vm->vm_map.pmap == NULL && !pmap_pinit(vmspace_pmap(vm))) { uma_zfree(vmspace_zone, vm); return (NULL); } CTR1(KTR_VM, "vmspace_alloc: %p", vm); _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max); vm->vm_refcnt = 1; vm->vm_shm = NULL; vm->vm_swrss = 0; vm->vm_tsize = 0; vm->vm_dsize = 0; vm->vm_ssize = 0; vm->vm_taddr = 0; vm->vm_daddr = 0; vm->vm_maxsaddr = 0; return (vm); } void vm_init2(void) { uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count, (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 + maxproc * 2 + maxfiles); vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, #ifdef INVARIANTS vmspace_zdtor, #else NULL, #endif vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); } static void vmspace_container_reset(struct proc *p) { #ifdef RACCT PROC_LOCK(p); racct_set(p, RACCT_DATA, 0); racct_set(p, RACCT_STACK, 0); racct_set(p, RACCT_RSS, 0); racct_set(p, RACCT_MEMLOCK, 0); racct_set(p, RACCT_VMEM, 0); PROC_UNLOCK(p); #endif } static inline void vmspace_dofree(struct vmspace *vm) { CTR1(KTR_VM, "vmspace_free: %p", vm); /* * Make sure any SysV shm is freed, it might not have been in * exit1(). */ shmexit(vm); /* * Lock the map, to wait out all other references to it. * Delete all of the mappings and pages they hold, then call * the pmap module to reclaim anything left. */ (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset, vm->vm_map.max_offset); pmap_release(vmspace_pmap(vm)); vm->vm_map.pmap = NULL; uma_zfree(vmspace_zone, vm); } void vmspace_free(struct vmspace *vm) { if (vm->vm_refcnt == 0) panic("vmspace_free: attempt to free already freed vmspace"); if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1) vmspace_dofree(vm); } void vmspace_exitfree(struct proc *p) { struct vmspace *vm; PROC_VMSPACE_LOCK(p); vm = p->p_vmspace; p->p_vmspace = NULL; PROC_VMSPACE_UNLOCK(p); KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace")); vmspace_free(vm); } void vmspace_exit(struct thread *td) { int refcnt; struct vmspace *vm; struct proc *p; /* * Release user portion of address space. * This releases references to vnodes, * which could cause I/O if the file has been unlinked. * Need to do this early enough that we can still sleep. * * The last exiting process to reach this point releases as * much of the environment as it can. vmspace_dofree() is the * slower fallback in case another process had a temporary * reference to the vmspace. */ p = td->td_proc; vm = p->p_vmspace; atomic_add_int(&vmspace0.vm_refcnt, 1); do { refcnt = vm->vm_refcnt; if (refcnt > 1 && p->p_vmspace != &vmspace0) { /* Switch now since other proc might free vmspace */ PROC_VMSPACE_LOCK(p); p->p_vmspace = &vmspace0; PROC_VMSPACE_UNLOCK(p); pmap_activate(td); } } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1)); if (refcnt == 1) { if (p->p_vmspace != vm) { /* vmspace not yet freed, switch back */ PROC_VMSPACE_LOCK(p); p->p_vmspace = vm; PROC_VMSPACE_UNLOCK(p); pmap_activate(td); } pmap_remove_pages(vmspace_pmap(vm)); /* Switch now since this proc will free vmspace */ PROC_VMSPACE_LOCK(p); p->p_vmspace = &vmspace0; PROC_VMSPACE_UNLOCK(p); pmap_activate(td); vmspace_dofree(vm); } vmspace_container_reset(p); } /* Acquire reference to vmspace owned by another process. */ struct vmspace * vmspace_acquire_ref(struct proc *p) { struct vmspace *vm; int refcnt; PROC_VMSPACE_LOCK(p); vm = p->p_vmspace; if (vm == NULL) { PROC_VMSPACE_UNLOCK(p); return (NULL); } do { refcnt = vm->vm_refcnt; if (refcnt <= 0) { /* Avoid 0->1 transition */ PROC_VMSPACE_UNLOCK(p); return (NULL); } } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1)); if (vm != p->p_vmspace) { PROC_VMSPACE_UNLOCK(p); vmspace_free(vm); return (NULL); } PROC_VMSPACE_UNLOCK(p); return (vm); } void _vm_map_lock(vm_map_t map, const char *file, int line) { if (map->system_map) mtx_lock_flags_(&map->system_mtx, 0, file, line); else sx_xlock_(&map->lock, file, line); map->timestamp++; } static void vm_map_process_deferred(void) { struct thread *td; vm_map_entry_t entry; vm_object_t object; td = curthread; while ((entry = td->td_map_def_user) != NULL) { td->td_map_def_user = entry->next; if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) { /* * Decrement the object's writemappings and * possibly the vnode's v_writecount. */ KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, ("Submap with writecount")); object = entry->object.vm_object; KASSERT(object != NULL, ("No object for writecount")); vnode_pager_release_writecount(object, entry->start, entry->end); } vm_map_entry_deallocate(entry, FALSE); } } void _vm_map_unlock(vm_map_t map, const char *file, int line) { if (map->system_map) mtx_unlock_flags_(&map->system_mtx, 0, file, line); else { sx_xunlock_(&map->lock, file, line); vm_map_process_deferred(); } } void _vm_map_lock_read(vm_map_t map, const char *file, int line) { if (map->system_map) mtx_lock_flags_(&map->system_mtx, 0, file, line); else sx_slock_(&map->lock, file, line); } void _vm_map_unlock_read(vm_map_t map, const char *file, int line) { if (map->system_map) mtx_unlock_flags_(&map->system_mtx, 0, file, line); else { sx_sunlock_(&map->lock, file, line); vm_map_process_deferred(); } } int _vm_map_trylock(vm_map_t map, const char *file, int line) { int error; error = map->system_map ? !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : !sx_try_xlock_(&map->lock, file, line); if (error == 0) map->timestamp++; return (error == 0); } int _vm_map_trylock_read(vm_map_t map, const char *file, int line) { int error; error = map->system_map ? !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : !sx_try_slock_(&map->lock, file, line); return (error == 0); } /* * _vm_map_lock_upgrade: [ internal use only ] * * Tries to upgrade a read (shared) lock on the specified map to a write * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a * non-zero value if the upgrade fails. If the upgrade fails, the map is * returned without a read or write lock held. * * Requires that the map be read locked. */ int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line) { unsigned int last_timestamp; if (map->system_map) { mtx_assert_(&map->system_mtx, MA_OWNED, file, line); } else { if (!sx_try_upgrade_(&map->lock, file, line)) { last_timestamp = map->timestamp; sx_sunlock_(&map->lock, file, line); vm_map_process_deferred(); /* * If the map's timestamp does not change while the * map is unlocked, then the upgrade succeeds. */ sx_xlock_(&map->lock, file, line); if (last_timestamp != map->timestamp) { sx_xunlock_(&map->lock, file, line); return (1); } } } map->timestamp++; return (0); } void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line) { if (map->system_map) { mtx_assert_(&map->system_mtx, MA_OWNED, file, line); } else sx_downgrade_(&map->lock, file, line); } /* * vm_map_locked: * * Returns a non-zero value if the caller holds a write (exclusive) lock * on the specified map and the value "0" otherwise. */ int vm_map_locked(vm_map_t map) { if (map->system_map) return (mtx_owned(&map->system_mtx)); else return (sx_xlocked(&map->lock)); } #ifdef INVARIANTS static void _vm_map_assert_locked(vm_map_t map, const char *file, int line) { if (map->system_map) mtx_assert_(&map->system_mtx, MA_OWNED, file, line); else sx_assert_(&map->lock, SA_XLOCKED, file, line); } #define VM_MAP_ASSERT_LOCKED(map) \ _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE) #else #define VM_MAP_ASSERT_LOCKED(map) #endif /* * _vm_map_unlock_and_wait: * * Atomically releases the lock on the specified map and puts the calling * thread to sleep. The calling thread will remain asleep until either * vm_map_wakeup() is performed on the map or the specified timeout is * exceeded. * * WARNING! This function does not perform deferred deallocations of * objects and map entries. Therefore, the calling thread is expected to * reacquire the map lock after reawakening and later perform an ordinary * unlock operation, such as vm_map_unlock(), before completing its * operation on the map. */ int _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line) { mtx_lock(&map_sleep_mtx); if (map->system_map) mtx_unlock_flags_(&map->system_mtx, 0, file, line); else sx_xunlock_(&map->lock, file, line); return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", timo)); } /* * vm_map_wakeup: * * Awaken any threads that have slept on the map using * vm_map_unlock_and_wait(). */ void vm_map_wakeup(vm_map_t map) { /* * Acquire and release map_sleep_mtx to prevent a wakeup() * from being performed (and lost) between the map unlock * and the msleep() in _vm_map_unlock_and_wait(). */ mtx_lock(&map_sleep_mtx); mtx_unlock(&map_sleep_mtx); wakeup(&map->root); } void vm_map_busy(vm_map_t map) { VM_MAP_ASSERT_LOCKED(map); map->busy++; } void vm_map_unbusy(vm_map_t map) { VM_MAP_ASSERT_LOCKED(map); KASSERT(map->busy, ("vm_map_unbusy: not busy")); if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) { vm_map_modflags(map, 0, MAP_BUSY_WAKEUP); wakeup(&map->busy); } } void vm_map_wait_busy(vm_map_t map) { VM_MAP_ASSERT_LOCKED(map); while (map->busy) { vm_map_modflags(map, MAP_BUSY_WAKEUP, 0); if (map->system_map) msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0); else sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0); } map->timestamp++; } long vmspace_resident_count(struct vmspace *vmspace) { return pmap_resident_count(vmspace_pmap(vmspace)); } long vmspace_wired_count(struct vmspace *vmspace) { return pmap_wired_count(vmspace_pmap(vmspace)); } /* * vm_map_create: * * Creates and returns a new empty VM map with * the given physical map structure, and having * the given lower and upper address bounds. */ vm_map_t vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) { vm_map_t result; result = uma_zalloc(mapzone, M_WAITOK); CTR1(KTR_VM, "vm_map_create: %p", result); _vm_map_init(result, pmap, min, max); return (result); } /* * Initialize an existing vm_map structure * such as that in the vmspace structure. */ static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) { map->header.next = map->header.prev = &map->header; map->needs_wakeup = FALSE; map->system_map = 0; map->pmap = pmap; map->min_offset = min; map->max_offset = max; map->flags = 0; map->root = NULL; map->timestamp = 0; map->busy = 0; } void vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) { _vm_map_init(map, pmap, min, max); mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); sx_init(&map->lock, "user map"); } /* * vm_map_entry_dispose: [ internal use only ] * * Inverse of vm_map_entry_create. */ static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) { uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); } /* * vm_map_entry_create: [ internal use only ] * * Allocates a VM map entry for insertion. * No entry fields are filled in. */ static vm_map_entry_t vm_map_entry_create(vm_map_t map) { vm_map_entry_t new_entry; if (map->system_map) new_entry = uma_zalloc(kmapentzone, M_NOWAIT); else new_entry = uma_zalloc(mapentzone, M_WAITOK); if (new_entry == NULL) panic("vm_map_entry_create: kernel resources exhausted"); return (new_entry); } /* * vm_map_entry_set_behavior: * * Set the expected access behavior, either normal, random, or * sequential. */ static inline void vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) { entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | (behavior & MAP_ENTRY_BEHAV_MASK); } /* * vm_map_entry_set_max_free: * * Set the max_free field in a vm_map_entry. */ static inline void vm_map_entry_set_max_free(vm_map_entry_t entry) { entry->max_free = entry->adj_free; if (entry->left != NULL && entry->left->max_free > entry->max_free) entry->max_free = entry->left->max_free; if (entry->right != NULL && entry->right->max_free > entry->max_free) entry->max_free = entry->right->max_free; } /* * vm_map_entry_splay: * * The Sleator and Tarjan top-down splay algorithm with the * following variation. Max_free must be computed bottom-up, so * on the downward pass, maintain the left and right spines in * reverse order. Then, make a second pass up each side to fix * the pointers and compute max_free. The time bound is O(log n) * amortized. * * The new root is the vm_map_entry containing "addr", or else an * adjacent entry (lower or higher) if addr is not in the tree. * * The map must be locked, and leaves it so. * * Returns: the new root. */ static vm_map_entry_t vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root) { vm_map_entry_t llist, rlist; vm_map_entry_t ltree, rtree; vm_map_entry_t y; /* Special case of empty tree. */ if (root == NULL) return (root); /* * Pass One: Splay down the tree until we find addr or a NULL * pointer where addr would go. llist and rlist are the two * sides in reverse order (bottom-up), with llist linked by * the right pointer and rlist linked by the left pointer in * the vm_map_entry. Wait until Pass Two to set max_free on * the two spines. */ llist = NULL; rlist = NULL; for (;;) { /* root is never NULL in here. */ if (addr < root->start) { y = root->left; if (y == NULL) break; if (addr < y->start && y->left != NULL) { /* Rotate right and put y on rlist. */ root->left = y->right; y->right = root; vm_map_entry_set_max_free(root); root = y->left; y->left = rlist; rlist = y; } else { /* Put root on rlist. */ root->left = rlist; rlist = root; root = y; } } else if (addr >= root->end) { y = root->right; if (y == NULL) break; if (addr >= y->end && y->right != NULL) { /* Rotate left and put y on llist. */ root->right = y->left; y->left = root; vm_map_entry_set_max_free(root); root = y->right; y->right = llist; llist = y; } else { /* Put root on llist. */ root->right = llist; llist = root; root = y; } } else break; } /* * Pass Two: Walk back up the two spines, flip the pointers * and set max_free. The subtrees of the root go at the * bottom of llist and rlist. */ ltree = root->left; while (llist != NULL) { y = llist->right; llist->right = ltree; vm_map_entry_set_max_free(llist); ltree = llist; llist = y; } rtree = root->right; while (rlist != NULL) { y = rlist->left; rlist->left = rtree; vm_map_entry_set_max_free(rlist); rtree = rlist; rlist = y; } /* * Final assembly: add ltree and rtree as subtrees of root. */ root->left = ltree; root->right = rtree; vm_map_entry_set_max_free(root); return (root); } /* * vm_map_entry_{un,}link: * * Insert/remove entries from maps. */ static void vm_map_entry_link(vm_map_t map, vm_map_entry_t after_where, vm_map_entry_t entry) { CTR4(KTR_VM, "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map, map->nentries, entry, after_where); VM_MAP_ASSERT_LOCKED(map); map->nentries++; entry->prev = after_where; entry->next = after_where->next; entry->next->prev = entry; after_where->next = entry; if (after_where != &map->header) { if (after_where != map->root) vm_map_entry_splay(after_where->start, map->root); entry->right = after_where->right; entry->left = after_where; after_where->right = NULL; after_where->adj_free = entry->start - after_where->end; vm_map_entry_set_max_free(after_where); } else { entry->right = map->root; entry->left = NULL; } entry->adj_free = (entry->next == &map->header ? map->max_offset : entry->next->start) - entry->end; vm_map_entry_set_max_free(entry); map->root = entry; } static void vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry) { vm_map_entry_t next, prev, root; VM_MAP_ASSERT_LOCKED(map); if (entry != map->root) vm_map_entry_splay(entry->start, map->root); if (entry->left == NULL) root = entry->right; else { root = vm_map_entry_splay(entry->start, entry->left); root->right = entry->right; root->adj_free = (entry->next == &map->header ? map->max_offset : entry->next->start) - root->end; vm_map_entry_set_max_free(root); } map->root = root; prev = entry->prev; next = entry->next; next->prev = prev; prev->next = next; map->nentries--; CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map, map->nentries, entry); } /* * vm_map_entry_resize_free: * * Recompute the amount of free space following a vm_map_entry * and propagate that value up the tree. Call this function after * resizing a map entry in-place, that is, without a call to * vm_map_entry_link() or _unlink(). * * The map must be locked, and leaves it so. */ static void vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry) { /* * Using splay trees without parent pointers, propagating * max_free up the tree is done by moving the entry to the * root and making the change there. */ if (entry != map->root) map->root = vm_map_entry_splay(entry->start, map->root); entry->adj_free = (entry->next == &map->header ? map->max_offset : entry->next->start) - entry->end; vm_map_entry_set_max_free(entry); } /* * vm_map_lookup_entry: [ internal use only ] * * Finds the map entry containing (or * immediately preceding) the specified address * in the given map; the entry is returned * in the "entry" parameter. The boolean * result indicates whether the address is * actually contained in the map. */ boolean_t vm_map_lookup_entry( vm_map_t map, vm_offset_t address, vm_map_entry_t *entry) /* OUT */ { vm_map_entry_t cur; boolean_t locked; /* * If the map is empty, then the map entry immediately preceding * "address" is the map's header. */ cur = map->root; if (cur == NULL) *entry = &map->header; else if (address >= cur->start && cur->end > address) { *entry = cur; return (TRUE); } else if ((locked = vm_map_locked(map)) || sx_try_upgrade(&map->lock)) { /* * Splay requires a write lock on the map. However, it only * restructures the binary search tree; it does not otherwise * change the map. Thus, the map's timestamp need not change * on a temporary upgrade. */ map->root = cur = vm_map_entry_splay(address, cur); if (!locked) sx_downgrade(&map->lock); /* * If "address" is contained within a map entry, the new root * is that map entry. Otherwise, the new root is a map entry * immediately before or after "address". */ if (address >= cur->start) { *entry = cur; if (cur->end > address) return (TRUE); } else *entry = cur->prev; } else /* * Since the map is only locked for read access, perform a * standard binary search tree lookup for "address". */ for (;;) { if (address < cur->start) { if (cur->left == NULL) { *entry = cur->prev; break; } cur = cur->left; } else if (cur->end > address) { *entry = cur; return (TRUE); } else { if (cur->right == NULL) { *entry = cur; break; } cur = cur->right; } } return (FALSE); } /* * vm_map_insert: * * Inserts the given whole VM object into the target * map at the specified address range. The object's * size should match that of the address range. * * Requires that the map be locked, and leaves it so. * * If object is non-NULL, ref count must be bumped by caller * prior to making call to account for the new entry. */ int vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) { vm_map_entry_t new_entry; vm_map_entry_t prev_entry; vm_map_entry_t temp_entry; vm_eflags_t protoeflags; struct ucred *cred; vm_inherit_t inheritance; boolean_t charge_prev_obj; VM_MAP_ASSERT_LOCKED(map); /* * Check that the start and end points are not bogus. */ if ((start < map->min_offset) || (end > map->max_offset) || (start >= end)) return (KERN_INVALID_ADDRESS); /* * Find the entry prior to the proposed starting address; if it's part * of an existing entry, this range is bogus. */ if (vm_map_lookup_entry(map, start, &temp_entry)) return (KERN_NO_SPACE); prev_entry = temp_entry; /* * Assert that the next entry doesn't overlap the end point. */ if ((prev_entry->next != &map->header) && (prev_entry->next->start < end)) return (KERN_NO_SPACE); protoeflags = 0; charge_prev_obj = FALSE; if (cow & MAP_COPY_ON_WRITE) protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY; if (cow & MAP_NOFAULT) { protoeflags |= MAP_ENTRY_NOFAULT; KASSERT(object == NULL, ("vm_map_insert: paradoxical MAP_NOFAULT request")); } if (cow & MAP_DISABLE_SYNCER) protoeflags |= MAP_ENTRY_NOSYNC; if (cow & MAP_DISABLE_COREDUMP) protoeflags |= MAP_ENTRY_NOCOREDUMP; if (cow & MAP_VN_WRITECOUNT) protoeflags |= MAP_ENTRY_VN_WRITECNT; if (cow & MAP_INHERIT_SHARE) inheritance = VM_INHERIT_SHARE; else inheritance = VM_INHERIT_DEFAULT; cred = NULL; KASSERT((object != kmem_object && object != kernel_object) || ((object == kmem_object || object == kernel_object) && !(protoeflags & MAP_ENTRY_NEEDS_COPY)), ("kmem or kernel object and cow")); if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT)) goto charged; if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) && ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) { if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start)) return (KERN_RESOURCE_SHORTAGE); KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) || object->cred == NULL, ("OVERCOMMIT: vm_map_insert o %p", object)); cred = curthread->td_ucred; crhold(cred); if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY)) charge_prev_obj = TRUE; } charged: /* Expand the kernel pmap, if necessary. */ if (map == kernel_map && end > kernel_vm_end) pmap_growkernel(end); if (object != NULL) { /* * OBJ_ONEMAPPING must be cleared unless this mapping * is trivially proven to be the only mapping for any * of the object's pages. (Object granularity * reference counting is insufficient to recognize * aliases with precision.) */ VM_OBJECT_LOCK(object); if (object->ref_count > 1 || object->shadow_count != 0) vm_object_clear_flag(object, OBJ_ONEMAPPING); VM_OBJECT_UNLOCK(object); } else if ((prev_entry != &map->header) && (prev_entry->eflags == protoeflags) && (prev_entry->end == start) && (prev_entry->wired_count == 0) && (prev_entry->cred == cred || (prev_entry->object.vm_object != NULL && (prev_entry->object.vm_object->cred == cred))) && vm_object_coalesce(prev_entry->object.vm_object, prev_entry->offset, (vm_size_t)(prev_entry->end - prev_entry->start), (vm_size_t)(end - prev_entry->end), charge_prev_obj)) { /* * We were able to extend the object. Determine if we * can extend the previous map entry to include the * new range as well. */ if ((prev_entry->inheritance == inheritance) && (prev_entry->protection == prot) && (prev_entry->max_protection == max)) { map->size += (end - prev_entry->end); prev_entry->end = end; vm_map_entry_resize_free(map, prev_entry); vm_map_simplify_entry(map, prev_entry); if (cred != NULL) crfree(cred); return (KERN_SUCCESS); } /* * If we can extend the object but cannot extend the * map entry, we have to create a new map entry. We * must bump the ref count on the extended object to * account for it. object may be NULL. */ object = prev_entry->object.vm_object; offset = prev_entry->offset + (prev_entry->end - prev_entry->start); vm_object_reference(object); if (cred != NULL && object != NULL && object->cred != NULL && !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { /* Object already accounts for this uid. */ crfree(cred); cred = NULL; } } /* * NOTE: if conditionals fail, object can be NULL here. This occurs * in things like the buffer map where we manage kva but do not manage * backing objects. */ /* * Create a new entry */ new_entry = vm_map_entry_create(map); new_entry->start = start; new_entry->end = end; new_entry->cred = NULL; new_entry->eflags = protoeflags; new_entry->object.vm_object = object; new_entry->offset = offset; new_entry->avail_ssize = 0; new_entry->inheritance = inheritance; new_entry->protection = prot; new_entry->max_protection = max; new_entry->wired_count = 0; + new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT; + new_entry->next_read = OFF_TO_IDX(offset); KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry), ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry)); new_entry->cred = cred; /* * Insert the new entry into the list */ vm_map_entry_link(map, prev_entry, new_entry); map->size += new_entry->end - new_entry->start; /* * It may be possible to merge the new entry with the next and/or * previous entries. However, due to MAP_STACK_* being a hack, a * panic can result from merging such entries. */ if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0) vm_map_simplify_entry(map, new_entry); if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) { vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), end - start, cow & MAP_PREFAULT_PARTIAL); } return (KERN_SUCCESS); } /* * vm_map_findspace: * * Find the first fit (lowest VM address) for "length" free bytes * beginning at address >= start in the given map. * * In a vm_map_entry, "adj_free" is the amount of free space * adjacent (higher address) to this entry, and "max_free" is the * maximum amount of contiguous free space in its subtree. This * allows finding a free region in one path down the tree, so * O(log n) amortized with splay trees. * * The map must be locked, and leaves it so. * * Returns: 0 on success, and starting address in *addr, * 1 if insufficient space. */ int vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, vm_offset_t *addr) /* OUT */ { vm_map_entry_t entry; vm_offset_t st; /* * Request must fit within min/max VM address and must avoid * address wrap. */ if (start < map->min_offset) start = map->min_offset; if (start + length > map->max_offset || start + length < start) return (1); /* Empty tree means wide open address space. */ if (map->root == NULL) { *addr = start; return (0); } /* * After splay, if start comes before root node, then there * must be a gap from start to the root. */ map->root = vm_map_entry_splay(start, map->root); if (start + length <= map->root->start) { *addr = start; return (0); } /* * Root is the last node that might begin its gap before * start, and this is the last comparison where address * wrap might be a problem. */ st = (start > map->root->end) ? start : map->root->end; if (length <= map->root->end + map->root->adj_free - st) { *addr = st; return (0); } /* With max_free, can immediately tell if no solution. */ entry = map->root->right; if (entry == NULL || length > entry->max_free) return (1); /* * Search the right subtree in the order: left subtree, root, * right subtree (first fit). The previous splay implies that * all regions in the right subtree have addresses > start. */ while (entry != NULL) { if (entry->left != NULL && entry->left->max_free >= length) entry = entry->left; else if (entry->adj_free >= length) { *addr = entry->end; return (0); } else entry = entry->right; } /* Can't get here, so panic if we do. */ panic("vm_map_findspace: max_free corrupt"); } int vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_size_t length, vm_prot_t prot, vm_prot_t max, int cow) { vm_offset_t end; int result; end = start + length; vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); (void) vm_map_delete(map, start, end); result = vm_map_insert(map, object, offset, start, end, prot, max, cow); vm_map_unlock(map); return (result); } /* * vm_map_find finds an unallocated region in the target address * map with the given length. The search is defined to be * first-fit from the specified address; the region found is * returned in the same parameter. * * If object is non-NULL, ref count must be bumped by caller * prior to making call to account for the new entry. */ int vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, /* IN/OUT */ vm_size_t length, int find_space, vm_prot_t prot, vm_prot_t max, int cow) { vm_offset_t start; int result; start = *addr; vm_map_lock(map); do { if (find_space != VMFS_NO_SPACE) { if (vm_map_findspace(map, start, length, addr)) { vm_map_unlock(map); return (KERN_NO_SPACE); } switch (find_space) { case VMFS_ALIGNED_SPACE: pmap_align_superpage(object, offset, addr, length); break; #ifdef VMFS_TLB_ALIGNED_SPACE case VMFS_TLB_ALIGNED_SPACE: pmap_align_tlb(addr); break; #endif default: break; } start = *addr; } result = vm_map_insert(map, object, offset, start, start + length, prot, max, cow); } while (result == KERN_NO_SPACE && (find_space == VMFS_ALIGNED_SPACE #ifdef VMFS_TLB_ALIGNED_SPACE || find_space == VMFS_TLB_ALIGNED_SPACE #endif )); vm_map_unlock(map); return (result); } /* * vm_map_simplify_entry: * * Simplify the given map entry by merging with either neighbor. This * routine also has the ability to merge with both neighbors. * * The map must be locked. * * This routine guarentees that the passed entry remains valid (though * possibly extended). When merging, this routine may delete one or * both neighbors. */ void vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) { vm_map_entry_t next, prev; vm_size_t prevsize, esize; if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) return; prev = entry->prev; if (prev != &map->header) { prevsize = prev->end - prev->start; if ( (prev->end == entry->start) && (prev->object.vm_object == entry->object.vm_object) && (!prev->object.vm_object || (prev->offset + prevsize == entry->offset)) && (prev->eflags == entry->eflags) && (prev->protection == entry->protection) && (prev->max_protection == entry->max_protection) && (prev->inheritance == entry->inheritance) && (prev->wired_count == entry->wired_count) && (prev->cred == entry->cred)) { vm_map_entry_unlink(map, prev); entry->start = prev->start; entry->offset = prev->offset; if (entry->prev != &map->header) vm_map_entry_resize_free(map, entry->prev); /* * If the backing object is a vnode object, * vm_object_deallocate() calls vrele(). * However, vrele() does not lock the vnode * because the vnode has additional * references. Thus, the map lock can be kept * without causing a lock-order reversal with * the vnode lock. * * Since we count the number of virtual page * mappings in object->un_pager.vnp.writemappings, * the writemappings value should not be adjusted * when the entry is disposed of. */ if (prev->object.vm_object) vm_object_deallocate(prev->object.vm_object); if (prev->cred != NULL) crfree(prev->cred); vm_map_entry_dispose(map, prev); } } next = entry->next; if (next != &map->header) { esize = entry->end - entry->start; if ((entry->end == next->start) && (next->object.vm_object == entry->object.vm_object) && (!entry->object.vm_object || (entry->offset + esize == next->offset)) && (next->eflags == entry->eflags) && (next->protection == entry->protection) && (next->max_protection == entry->max_protection) && (next->inheritance == entry->inheritance) && (next->wired_count == entry->wired_count) && (next->cred == entry->cred)) { vm_map_entry_unlink(map, next); entry->end = next->end; vm_map_entry_resize_free(map, entry); /* * See comment above. */ if (next->object.vm_object) vm_object_deallocate(next->object.vm_object); if (next->cred != NULL) crfree(next->cred); vm_map_entry_dispose(map, next); } } } /* * vm_map_clip_start: [ internal use only ] * * Asserts that the given entry begins at or after * the specified address; if necessary, * it splits the entry into two. */ #define vm_map_clip_start(map, entry, startaddr) \ { \ if (startaddr > entry->start) \ _vm_map_clip_start(map, entry, startaddr); \ } /* * This routine is called only when it is known that * the entry must be split. */ static void _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) { vm_map_entry_t new_entry; VM_MAP_ASSERT_LOCKED(map); /* * Split off the front portion -- note that we must insert the new * entry BEFORE this one, so that this entry has the specified * starting address. */ vm_map_simplify_entry(map, entry); /* * If there is no object backing this entry, we might as well create * one now. If we defer it, an object can get created after the map * is clipped, and individual objects will be created for the split-up * map. This is a bit of a hack, but is also about the best place to * put this improvement. */ if (entry->object.vm_object == NULL && !map->system_map) { vm_object_t object; object = vm_object_allocate(OBJT_DEFAULT, atop(entry->end - entry->start)); entry->object.vm_object = object; entry->offset = 0; if (entry->cred != NULL) { object->cred = entry->cred; object->charge = entry->end - entry->start; entry->cred = NULL; } } else if (entry->object.vm_object != NULL && ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && entry->cred != NULL) { VM_OBJECT_LOCK(entry->object.vm_object); KASSERT(entry->object.vm_object->cred == NULL, ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry)); entry->object.vm_object->cred = entry->cred; entry->object.vm_object->charge = entry->end - entry->start; VM_OBJECT_UNLOCK(entry->object.vm_object); entry->cred = NULL; } new_entry = vm_map_entry_create(map); *new_entry = *entry; new_entry->end = start; entry->offset += (start - entry->start); entry->start = start; if (new_entry->cred != NULL) crhold(entry->cred); vm_map_entry_link(map, entry->prev, new_entry); if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { vm_object_reference(new_entry->object.vm_object); /* * The object->un_pager.vnp.writemappings for the * object of MAP_ENTRY_VN_WRITECNT type entry shall be * kept as is here. The virtual pages are * re-distributed among the clipped entries, so the sum is * left the same. */ } } /* * vm_map_clip_end: [ internal use only ] * * Asserts that the given entry ends at or before * the specified address; if necessary, * it splits the entry into two. */ #define vm_map_clip_end(map, entry, endaddr) \ { \ if ((endaddr) < (entry->end)) \ _vm_map_clip_end((map), (entry), (endaddr)); \ } /* * This routine is called only when it is known that * the entry must be split. */ static void _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) { vm_map_entry_t new_entry; VM_MAP_ASSERT_LOCKED(map); /* * If there is no object backing this entry, we might as well create * one now. If we defer it, an object can get created after the map * is clipped, and individual objects will be created for the split-up * map. This is a bit of a hack, but is also about the best place to * put this improvement. */ if (entry->object.vm_object == NULL && !map->system_map) { vm_object_t object; object = vm_object_allocate(OBJT_DEFAULT, atop(entry->end - entry->start)); entry->object.vm_object = object; entry->offset = 0; if (entry->cred != NULL) { object->cred = entry->cred; object->charge = entry->end - entry->start; entry->cred = NULL; } } else if (entry->object.vm_object != NULL && ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && entry->cred != NULL) { VM_OBJECT_LOCK(entry->object.vm_object); KASSERT(entry->object.vm_object->cred == NULL, ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry)); entry->object.vm_object->cred = entry->cred; entry->object.vm_object->charge = entry->end - entry->start; VM_OBJECT_UNLOCK(entry->object.vm_object); entry->cred = NULL; } /* * Create a new entry and insert it AFTER the specified entry */ new_entry = vm_map_entry_create(map); *new_entry = *entry; new_entry->start = entry->end = end; new_entry->offset += (end - entry->start); if (new_entry->cred != NULL) crhold(entry->cred); vm_map_entry_link(map, entry, new_entry); if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { vm_object_reference(new_entry->object.vm_object); } } /* * vm_map_submap: [ kernel use only ] * * Mark the given range as handled by a subordinate map. * * This range must have been created with vm_map_find, * and no other operations may have been performed on this * range prior to calling vm_map_submap. * * Only a limited number of operations can be performed * within this rage after calling vm_map_submap: * vm_fault * [Don't try vm_map_copy!] * * To remove a submapping, one must first remove the * range from the superior map, and then destroy the * submap (if desired). [Better yet, don't try it.] */ int vm_map_submap( vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap) { vm_map_entry_t entry; int result = KERN_INVALID_ARGUMENT; vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); if (vm_map_lookup_entry(map, start, &entry)) { vm_map_clip_start(map, entry, start); } else entry = entry->next; vm_map_clip_end(map, entry, end); if ((entry->start == start) && (entry->end == end) && ((entry->eflags & MAP_ENTRY_COW) == 0) && (entry->object.vm_object == NULL)) { entry->object.sub_map = submap; entry->eflags |= MAP_ENTRY_IS_SUB_MAP; result = KERN_SUCCESS; } vm_map_unlock(map); return (result); } /* * The maximum number of pages to map */ #define MAX_INIT_PT 96 /* * vm_map_pmap_enter: * * Preload read-only mappings for the given object's resident pages into * the given map. This eliminates the soft faults on process startup and * immediately after an mmap(2). Because these are speculative mappings, * cached pages are not reactivated and mapped. */ void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags) { vm_offset_t start; vm_page_t p, p_start; vm_pindex_t psize, tmpidx; if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) return; VM_OBJECT_LOCK(object); if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { pmap_object_init_pt(map->pmap, addr, object, pindex, size); goto unlock_return; } psize = atop(size); if ((flags & MAP_PREFAULT_PARTIAL) && psize > MAX_INIT_PT && object->resident_page_count > MAX_INIT_PT) goto unlock_return; if (psize + pindex > object->size) { if (object->size < pindex) goto unlock_return; psize = object->size - pindex; } start = 0; p_start = NULL; p = vm_page_find_least(object, pindex); /* * Assert: the variable p is either (1) the page with the * least pindex greater than or equal to the parameter pindex * or (2) NULL. */ for (; p != NULL && (tmpidx = p->pindex - pindex) < psize; p = TAILQ_NEXT(p, listq)) { /* * don't allow an madvise to blow away our really * free pages allocating pv entries. */ if ((flags & MAP_PREFAULT_MADVISE) && cnt.v_free_count < cnt.v_free_reserved) { psize = tmpidx; break; } if (p->valid == VM_PAGE_BITS_ALL) { if (p_start == NULL) { start = addr + ptoa(tmpidx); p_start = p; } } else if (p_start != NULL) { pmap_enter_object(map->pmap, start, addr + ptoa(tmpidx), p_start, prot); p_start = NULL; } } if (p_start != NULL) pmap_enter_object(map->pmap, start, addr + ptoa(psize), p_start, prot); unlock_return: VM_OBJECT_UNLOCK(object); } /* * vm_map_protect: * * Sets the protection of the specified address * region in the target map. If "set_max" is * specified, the maximum protection is to be set; * otherwise, only the current protection is affected. */ int vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_prot_t new_prot, boolean_t set_max) { vm_map_entry_t current, entry; vm_object_t obj; struct ucred *cred; vm_prot_t old_prot; vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); if (vm_map_lookup_entry(map, start, &entry)) { vm_map_clip_start(map, entry, start); } else { entry = entry->next; } /* * Make a first pass to check for protection violations. */ current = entry; while ((current != &map->header) && (current->start < end)) { if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { vm_map_unlock(map); return (KERN_INVALID_ARGUMENT); } if ((new_prot & current->max_protection) != new_prot) { vm_map_unlock(map); return (KERN_PROTECTION_FAILURE); } current = current->next; } /* * Do an accounting pass for private read-only mappings that * now will do cow due to allowed write (e.g. debugger sets * breakpoint on text segment) */ for (current = entry; (current != &map->header) && (current->start < end); current = current->next) { vm_map_clip_end(map, current, end); if (set_max || ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 || ENTRY_CHARGED(current)) { continue; } cred = curthread->td_ucred; obj = current->object.vm_object; if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) { if (!swap_reserve(current->end - current->start)) { vm_map_unlock(map); return (KERN_RESOURCE_SHORTAGE); } crhold(cred); current->cred = cred; continue; } VM_OBJECT_LOCK(obj); if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { VM_OBJECT_UNLOCK(obj); continue; } /* * Charge for the whole object allocation now, since * we cannot distinguish between non-charged and * charged clipped mapping of the same object later. */ KASSERT(obj->charge == 0, ("vm_map_protect: object %p overcharged\n", obj)); if (!swap_reserve(ptoa(obj->size))) { VM_OBJECT_UNLOCK(obj); vm_map_unlock(map); return (KERN_RESOURCE_SHORTAGE); } crhold(cred); obj->cred = cred; obj->charge = ptoa(obj->size); VM_OBJECT_UNLOCK(obj); } /* * Go back and fix up protections. [Note that clipping is not * necessary the second time.] */ current = entry; while ((current != &map->header) && (current->start < end)) { old_prot = current->protection; if (set_max) current->protection = (current->max_protection = new_prot) & old_prot; else current->protection = new_prot; if ((current->eflags & (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED)) == (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED) && (current->protection & VM_PROT_WRITE) != 0 && (old_prot & VM_PROT_WRITE) == 0) { vm_fault_copy_entry(map, map, current, current, NULL); } /* * When restricting access, update the physical map. Worry * about copy-on-write here. */ if ((old_prot & ~current->protection) != 0) { #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ VM_PROT_ALL) pmap_protect(map->pmap, current->start, current->end, current->protection & MASK(current)); #undef MASK } vm_map_simplify_entry(map, current); current = current->next; } vm_map_unlock(map); return (KERN_SUCCESS); } /* * vm_map_madvise: * * This routine traverses a processes map handling the madvise * system call. Advisories are classified as either those effecting * the vm_map_entry structure, or those effecting the underlying * objects. */ int vm_map_madvise( vm_map_t map, vm_offset_t start, vm_offset_t end, int behav) { vm_map_entry_t current, entry; int modify_map = 0; /* * Some madvise calls directly modify the vm_map_entry, in which case * we need to use an exclusive lock on the map and we need to perform * various clipping operations. Otherwise we only need a read-lock * on the map. */ switch(behav) { case MADV_NORMAL: case MADV_SEQUENTIAL: case MADV_RANDOM: case MADV_NOSYNC: case MADV_AUTOSYNC: case MADV_NOCORE: case MADV_CORE: modify_map = 1; vm_map_lock(map); break; case MADV_WILLNEED: case MADV_DONTNEED: case MADV_FREE: vm_map_lock_read(map); break; default: return (KERN_INVALID_ARGUMENT); } /* * Locate starting entry and clip if necessary. */ VM_MAP_RANGE_CHECK(map, start, end); if (vm_map_lookup_entry(map, start, &entry)) { if (modify_map) vm_map_clip_start(map, entry, start); } else { entry = entry->next; } if (modify_map) { /* * madvise behaviors that are implemented in the vm_map_entry. * * We clip the vm_map_entry so that behavioral changes are * limited to the specified address range. */ for (current = entry; (current != &map->header) && (current->start < end); current = current->next ) { if (current->eflags & MAP_ENTRY_IS_SUB_MAP) continue; vm_map_clip_end(map, current, end); switch (behav) { case MADV_NORMAL: vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL); break; case MADV_SEQUENTIAL: vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL); break; case MADV_RANDOM: vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM); break; case MADV_NOSYNC: current->eflags |= MAP_ENTRY_NOSYNC; break; case MADV_AUTOSYNC: current->eflags &= ~MAP_ENTRY_NOSYNC; break; case MADV_NOCORE: current->eflags |= MAP_ENTRY_NOCOREDUMP; break; case MADV_CORE: current->eflags &= ~MAP_ENTRY_NOCOREDUMP; break; default: break; } vm_map_simplify_entry(map, current); } vm_map_unlock(map); } else { vm_pindex_t pstart, pend; /* * madvise behaviors that are implemented in the underlying * vm_object. * * Since we don't clip the vm_map_entry, we have to clip * the vm_object pindex and count. */ for (current = entry; (current != &map->header) && (current->start < end); current = current->next ) { vm_offset_t useStart; if (current->eflags & MAP_ENTRY_IS_SUB_MAP) continue; pstart = OFF_TO_IDX(current->offset); pend = pstart + atop(current->end - current->start); useStart = current->start; if (current->start < start) { pstart += atop(start - current->start); useStart = start; } if (current->end > end) pend -= atop(current->end - end); if (pstart >= pend) continue; vm_object_madvise(current->object.vm_object, pstart, pend, behav); if (behav == MADV_WILLNEED) { vm_map_pmap_enter(map, useStart, current->protection, current->object.vm_object, pstart, ptoa(pend - pstart), MAP_PREFAULT_MADVISE ); } } vm_map_unlock_read(map); } return (0); } /* * vm_map_inherit: * * Sets the inheritance of the specified address * range in the target map. Inheritance * affects how the map will be shared with * child maps at the time of vmspace_fork. */ int vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_inherit_t new_inheritance) { vm_map_entry_t entry; vm_map_entry_t temp_entry; switch (new_inheritance) { case VM_INHERIT_NONE: case VM_INHERIT_COPY: case VM_INHERIT_SHARE: break; default: return (KERN_INVALID_ARGUMENT); } vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); if (vm_map_lookup_entry(map, start, &temp_entry)) { entry = temp_entry; vm_map_clip_start(map, entry, start); } else entry = temp_entry->next; while ((entry != &map->header) && (entry->start < end)) { vm_map_clip_end(map, entry, end); entry->inheritance = new_inheritance; vm_map_simplify_entry(map, entry); entry = entry->next; } vm_map_unlock(map); return (KERN_SUCCESS); } /* * vm_map_unwire: * * Implements both kernel and user unwiring. */ int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) { vm_map_entry_t entry, first_entry, tmp_entry; vm_offset_t saved_start; unsigned int last_timestamp; int rv; boolean_t need_wakeup, result, user_unwire; user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); if (!vm_map_lookup_entry(map, start, &first_entry)) { if (flags & VM_MAP_WIRE_HOLESOK) first_entry = first_entry->next; else { vm_map_unlock(map); return (KERN_INVALID_ADDRESS); } } last_timestamp = map->timestamp; entry = first_entry; while (entry != &map->header && entry->start < end) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { /* * We have not yet clipped the entry. */ saved_start = (start >= entry->start) ? start : entry->start; entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; if (vm_map_unlock_and_wait(map, 0)) { /* * Allow interruption of user unwiring? */ } vm_map_lock(map); if (last_timestamp+1 != map->timestamp) { /* * Look again for the entry because the map was * modified while it was unlocked. * Specifically, the entry may have been * clipped, merged, or deleted. */ if (!vm_map_lookup_entry(map, saved_start, &tmp_entry)) { if (flags & VM_MAP_WIRE_HOLESOK) tmp_entry = tmp_entry->next; else { if (saved_start == start) { /* * First_entry has been deleted. */ vm_map_unlock(map); return (KERN_INVALID_ADDRESS); } end = saved_start; rv = KERN_INVALID_ADDRESS; goto done; } } if (entry == first_entry) first_entry = tmp_entry; else first_entry = NULL; entry = tmp_entry; } last_timestamp = map->timestamp; continue; } vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); /* * Mark the entry in case the map lock is released. (See * above.) */ entry->eflags |= MAP_ENTRY_IN_TRANSITION; /* * Check the map for holes in the specified region. * If VM_MAP_WIRE_HOLESOK was specified, skip this check. */ if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && (entry->end < end && (entry->next == &map->header || entry->next->start > entry->end))) { end = entry->end; rv = KERN_INVALID_ADDRESS; goto done; } /* * If system unwiring, require that the entry is system wired. */ if (!user_unwire && vm_map_entry_system_wired_count(entry) == 0) { end = entry->end; rv = KERN_INVALID_ARGUMENT; goto done; } entry = entry->next; } rv = KERN_SUCCESS; done: need_wakeup = FALSE; if (first_entry == NULL) { result = vm_map_lookup_entry(map, start, &first_entry); if (!result && (flags & VM_MAP_WIRE_HOLESOK)) first_entry = first_entry->next; else KASSERT(result, ("vm_map_unwire: lookup failed")); } entry = first_entry; while (entry != &map->header && entry->start < end) { if (rv == KERN_SUCCESS && (!user_unwire || (entry->eflags & MAP_ENTRY_USER_WIRED))) { if (user_unwire) entry->eflags &= ~MAP_ENTRY_USER_WIRED; entry->wired_count--; if (entry->wired_count == 0) { /* * Retain the map lock. */ vm_fault_unwire(map, entry->start, entry->end, entry->object.vm_object != NULL && (entry->object.vm_object->type == OBJT_DEVICE || entry->object.vm_object->type == OBJT_SG)); } } KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, ("vm_map_unwire: in-transition flag missing")); entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; need_wakeup = TRUE; } vm_map_simplify_entry(map, entry); entry = entry->next; } vm_map_unlock(map); if (need_wakeup) vm_map_wakeup(map); return (rv); } /* * vm_map_wire: * * Implements both kernel and user wiring. */ int vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) { vm_map_entry_t entry, first_entry, tmp_entry; vm_offset_t saved_end, saved_start; unsigned int last_timestamp; int rv; boolean_t fictitious, need_wakeup, result, user_wire; vm_prot_t prot; prot = 0; if (flags & VM_MAP_WIRE_WRITE) prot |= VM_PROT_WRITE; user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); if (!vm_map_lookup_entry(map, start, &first_entry)) { if (flags & VM_MAP_WIRE_HOLESOK) first_entry = first_entry->next; else { vm_map_unlock(map); return (KERN_INVALID_ADDRESS); } } last_timestamp = map->timestamp; entry = first_entry; while (entry != &map->header && entry->start < end) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { /* * We have not yet clipped the entry. */ saved_start = (start >= entry->start) ? start : entry->start; entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; if (vm_map_unlock_and_wait(map, 0)) { /* * Allow interruption of user wiring? */ } vm_map_lock(map); if (last_timestamp + 1 != map->timestamp) { /* * Look again for the entry because the map was * modified while it was unlocked. * Specifically, the entry may have been * clipped, merged, or deleted. */ if (!vm_map_lookup_entry(map, saved_start, &tmp_entry)) { if (flags & VM_MAP_WIRE_HOLESOK) tmp_entry = tmp_entry->next; else { if (saved_start == start) { /* * first_entry has been deleted. */ vm_map_unlock(map); return (KERN_INVALID_ADDRESS); } end = saved_start; rv = KERN_INVALID_ADDRESS; goto done; } } if (entry == first_entry) first_entry = tmp_entry; else first_entry = NULL; entry = tmp_entry; } last_timestamp = map->timestamp; continue; } vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); /* * Mark the entry in case the map lock is released. (See * above.) */ entry->eflags |= MAP_ENTRY_IN_TRANSITION; if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || (entry->protection & prot) != prot) { entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; if ((flags & VM_MAP_WIRE_HOLESOK) == 0) { end = entry->end; rv = KERN_INVALID_ADDRESS; goto done; } goto next_entry; } if (entry->wired_count == 0) { entry->wired_count++; saved_start = entry->start; saved_end = entry->end; fictitious = entry->object.vm_object != NULL && (entry->object.vm_object->type == OBJT_DEVICE || entry->object.vm_object->type == OBJT_SG); /* * Release the map lock, relying on the in-transition * mark. Mark the map busy for fork. */ vm_map_busy(map); vm_map_unlock(map); rv = vm_fault_wire(map, saved_start, saved_end, fictitious); vm_map_lock(map); vm_map_unbusy(map); if (last_timestamp + 1 != map->timestamp) { /* * Look again for the entry because the map was * modified while it was unlocked. The entry * may have been clipped, but NOT merged or * deleted. */ result = vm_map_lookup_entry(map, saved_start, &tmp_entry); KASSERT(result, ("vm_map_wire: lookup failed")); if (entry == first_entry) first_entry = tmp_entry; else first_entry = NULL; entry = tmp_entry; while (entry->end < saved_end) { if (rv != KERN_SUCCESS) { KASSERT(entry->wired_count == 1, ("vm_map_wire: bad count")); entry->wired_count = -1; } entry = entry->next; } } last_timestamp = map->timestamp; if (rv != KERN_SUCCESS) { KASSERT(entry->wired_count == 1, ("vm_map_wire: bad count")); /* * Assign an out-of-range value to represent * the failure to wire this entry. */ entry->wired_count = -1; end = entry->end; goto done; } } else if (!user_wire || (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { entry->wired_count++; } /* * Check the map for holes in the specified region. * If VM_MAP_WIRE_HOLESOK was specified, skip this check. */ next_entry: if (((flags & VM_MAP_WIRE_HOLESOK) == 0) && (entry->end < end && (entry->next == &map->header || entry->next->start > entry->end))) { end = entry->end; rv = KERN_INVALID_ADDRESS; goto done; } entry = entry->next; } rv = KERN_SUCCESS; done: need_wakeup = FALSE; if (first_entry == NULL) { result = vm_map_lookup_entry(map, start, &first_entry); if (!result && (flags & VM_MAP_WIRE_HOLESOK)) first_entry = first_entry->next; else KASSERT(result, ("vm_map_wire: lookup failed")); } entry = first_entry; while (entry != &map->header && entry->start < end) { if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) goto next_entry_done; if (rv == KERN_SUCCESS) { if (user_wire) entry->eflags |= MAP_ENTRY_USER_WIRED; } else if (entry->wired_count == -1) { /* * Wiring failed on this entry. Thus, unwiring is * unnecessary. */ entry->wired_count = 0; } else { if (!user_wire || (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) entry->wired_count--; if (entry->wired_count == 0) { /* * Retain the map lock. */ vm_fault_unwire(map, entry->start, entry->end, entry->object.vm_object != NULL && (entry->object.vm_object->type == OBJT_DEVICE || entry->object.vm_object->type == OBJT_SG)); } } next_entry_done: KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, ("vm_map_wire: in-transition flag missing")); entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION|MAP_ENTRY_WIRE_SKIPPED); if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; need_wakeup = TRUE; } vm_map_simplify_entry(map, entry); entry = entry->next; } vm_map_unlock(map); if (need_wakeup) vm_map_wakeup(map); return (rv); } /* * vm_map_sync * * Push any dirty cached pages in the address range to their pager. * If syncio is TRUE, dirty pages are written synchronously. * If invalidate is TRUE, any cached pages are freed as well. * * If the size of the region from start to end is zero, we are * supposed to flush all modified pages within the region containing * start. Unfortunately, a region can be split or coalesced with * neighboring regions, making it difficult to determine what the * original region was. Therefore, we approximate this requirement by * flushing the current region containing start. * * Returns an error if any part of the specified range is not mapped. */ int vm_map_sync( vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t syncio, boolean_t invalidate) { vm_map_entry_t current; vm_map_entry_t entry; vm_size_t size; vm_object_t object; vm_ooffset_t offset; unsigned int last_timestamp; boolean_t failed; vm_map_lock_read(map); VM_MAP_RANGE_CHECK(map, start, end); if (!vm_map_lookup_entry(map, start, &entry)) { vm_map_unlock_read(map); return (KERN_INVALID_ADDRESS); } else if (start == end) { start = entry->start; end = entry->end; } /* * Make a first pass to check for user-wired memory and holes. */ for (current = entry; current != &map->header && current->start < end; current = current->next) { if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { vm_map_unlock_read(map); return (KERN_INVALID_ARGUMENT); } if (end > current->end && (current->next == &map->header || current->end != current->next->start)) { vm_map_unlock_read(map); return (KERN_INVALID_ADDRESS); } } if (invalidate) pmap_remove(map->pmap, start, end); failed = FALSE; /* * Make a second pass, cleaning/uncaching pages from the indicated * objects as we go. */ for (current = entry; current != &map->header && current->start < end;) { offset = current->offset + (start - current->start); size = (end <= current->end ? end : current->end) - start; if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { vm_map_t smap; vm_map_entry_t tentry; vm_size_t tsize; smap = current->object.sub_map; vm_map_lock_read(smap); (void) vm_map_lookup_entry(smap, offset, &tentry); tsize = tentry->end - offset; if (tsize < size) size = tsize; object = tentry->object.vm_object; offset = tentry->offset + (offset - tentry->start); vm_map_unlock_read(smap); } else { object = current->object.vm_object; } vm_object_reference(object); last_timestamp = map->timestamp; vm_map_unlock_read(map); if (!vm_object_sync(object, offset, size, syncio, invalidate)) failed = TRUE; start += size; vm_object_deallocate(object); vm_map_lock_read(map); if (last_timestamp == map->timestamp || !vm_map_lookup_entry(map, start, ¤t)) current = current->next; } vm_map_unlock_read(map); return (failed ? KERN_FAILURE : KERN_SUCCESS); } /* * vm_map_entry_unwire: [ internal use only ] * * Make the region specified by this entry pageable. * * The map in question should be locked. * [This is the reason for this routine's existence.] */ static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) { vm_fault_unwire(map, entry->start, entry->end, entry->object.vm_object != NULL && (entry->object.vm_object->type == OBJT_DEVICE || entry->object.vm_object->type == OBJT_SG)); entry->wired_count = 0; } static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) { if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) vm_object_deallocate(entry->object.vm_object); uma_zfree(system_map ? kmapentzone : mapentzone, entry); } /* * vm_map_entry_delete: [ internal use only ] * * Deallocate the given entry from the target map. */ static void vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) { vm_object_t object; vm_pindex_t offidxstart, offidxend, count, size1; vm_ooffset_t size; vm_map_entry_unlink(map, entry); object = entry->object.vm_object; size = entry->end - entry->start; map->size -= size; if (entry->cred != NULL) { swap_release_by_cred(size, entry->cred); crfree(entry->cred); } if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && (object != NULL)) { KASSERT(entry->cred == NULL || object->cred == NULL || (entry->eflags & MAP_ENTRY_NEEDS_COPY), ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); count = OFF_TO_IDX(size); offidxstart = OFF_TO_IDX(entry->offset); offidxend = offidxstart + count; VM_OBJECT_LOCK(object); if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING || object == kernel_object || object == kmem_object)) { vm_object_collapse(object); /* * The option OBJPR_NOTMAPPED can be passed here * because vm_map_delete() already performed * pmap_remove() on the only mapping to this range * of pages. */ vm_object_page_remove(object, offidxstart, offidxend, OBJPR_NOTMAPPED); if (object->type == OBJT_SWAP) swap_pager_freespace(object, offidxstart, count); if (offidxend >= object->size && offidxstart < object->size) { size1 = object->size; object->size = offidxstart; if (object->cred != NULL) { size1 -= object->size; KASSERT(object->charge >= ptoa(size1), ("vm_map_entry_delete: object->charge < 0")); swap_release_by_cred(ptoa(size1), object->cred); object->charge -= ptoa(size1); } } } VM_OBJECT_UNLOCK(object); } else entry->object.vm_object = NULL; if (map->system_map) vm_map_entry_deallocate(entry, TRUE); else { entry->next = curthread->td_map_def_user; curthread->td_map_def_user = entry; } } /* * vm_map_delete: [ internal use only ] * * Deallocates the given address range from the target * map. */ int vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end) { vm_map_entry_t entry; vm_map_entry_t first_entry; VM_MAP_ASSERT_LOCKED(map); /* * Find the start of the region, and clip it */ if (!vm_map_lookup_entry(map, start, &first_entry)) entry = first_entry->next; else { entry = first_entry; vm_map_clip_start(map, entry, start); } /* * Step through all entries in this region */ while ((entry != &map->header) && (entry->start < end)) { vm_map_entry_t next; /* * Wait for wiring or unwiring of an entry to complete. * Also wait for any system wirings to disappear on * user maps. */ if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || (vm_map_pmap(map) != kernel_pmap && vm_map_entry_system_wired_count(entry) != 0)) { unsigned int last_timestamp; vm_offset_t saved_start; vm_map_entry_t tmp_entry; saved_start = entry->start; entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; last_timestamp = map->timestamp; (void) vm_map_unlock_and_wait(map, 0); vm_map_lock(map); if (last_timestamp + 1 != map->timestamp) { /* * Look again for the entry because the map was * modified while it was unlocked. * Specifically, the entry may have been * clipped, merged, or deleted. */ if (!vm_map_lookup_entry(map, saved_start, &tmp_entry)) entry = tmp_entry->next; else { entry = tmp_entry; vm_map_clip_start(map, entry, saved_start); } } continue; } vm_map_clip_end(map, entry, end); next = entry->next; /* * Unwire before removing addresses from the pmap; otherwise, * unwiring will put the entries back in the pmap. */ if (entry->wired_count != 0) { vm_map_entry_unwire(map, entry); } pmap_remove(map->pmap, entry->start, entry->end); /* * Delete the entry only after removing all pmap * entries pointing to its pages. (Otherwise, its * page frames may be reallocated, and any modify bits * will be set in the wrong object!) */ vm_map_entry_delete(map, entry); entry = next; } return (KERN_SUCCESS); } /* * vm_map_remove: * * Remove the given address range from the target map. * This is the exported form of vm_map_delete. */ int vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end) { int result; vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); result = vm_map_delete(map, start, end); vm_map_unlock(map); return (result); } /* * vm_map_check_protection: * * Assert that the target map allows the specified privilege on the * entire address region given. The entire region must be allocated. * * WARNING! This code does not and should not check whether the * contents of the region is accessible. For example a smaller file * might be mapped into a larger address space. * * NOTE! This code is also called by munmap(). * * The map must be locked. A read lock is sufficient. */ boolean_t vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_prot_t protection) { vm_map_entry_t entry; vm_map_entry_t tmp_entry; if (!vm_map_lookup_entry(map, start, &tmp_entry)) return (FALSE); entry = tmp_entry; while (start < end) { if (entry == &map->header) return (FALSE); /* * No holes allowed! */ if (start < entry->start) return (FALSE); /* * Check protection associated with entry. */ if ((entry->protection & protection) != protection) return (FALSE); /* go to next entry */ start = entry->end; entry = entry->next; } return (TRUE); } /* * vm_map_copy_entry: * * Copies the contents of the source entry to the destination * entry. The entries *must* be aligned properly. */ static void vm_map_copy_entry( vm_map_t src_map, vm_map_t dst_map, vm_map_entry_t src_entry, vm_map_entry_t dst_entry, vm_ooffset_t *fork_charge) { vm_object_t src_object; vm_map_entry_t fake_entry; vm_offset_t size; struct ucred *cred; int charged; VM_MAP_ASSERT_LOCKED(dst_map); if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP) return; if (src_entry->wired_count == 0) { /* * If the source entry is marked needs_copy, it is already * write-protected. */ if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { pmap_protect(src_map->pmap, src_entry->start, src_entry->end, src_entry->protection & ~VM_PROT_WRITE); } /* * Make a copy of the object. */ size = src_entry->end - src_entry->start; if ((src_object = src_entry->object.vm_object) != NULL) { VM_OBJECT_LOCK(src_object); charged = ENTRY_CHARGED(src_entry); if ((src_object->handle == NULL) && (src_object->type == OBJT_DEFAULT || src_object->type == OBJT_SWAP)) { vm_object_collapse(src_object); if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) { vm_object_split(src_entry); src_object = src_entry->object.vm_object; } } vm_object_reference_locked(src_object); vm_object_clear_flag(src_object, OBJ_ONEMAPPING); if (src_entry->cred != NULL && !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { KASSERT(src_object->cred == NULL, ("OVERCOMMIT: vm_map_copy_entry: cred %p", src_object)); src_object->cred = src_entry->cred; src_object->charge = size; } VM_OBJECT_UNLOCK(src_object); dst_entry->object.vm_object = src_object; if (charged) { cred = curthread->td_ucred; crhold(cred); dst_entry->cred = cred; *fork_charge += size; if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) { crhold(cred); src_entry->cred = cred; *fork_charge += size; } } src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY); dst_entry->offset = src_entry->offset; if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) { /* * MAP_ENTRY_VN_WRITECNT cannot * indicate write reference from * src_entry, since the entry is * marked as needs copy. Allocate a * fake entry that is used to * decrement object->un_pager.vnp.writecount * at the appropriate time. Attach * fake_entry to the deferred list. */ fake_entry = vm_map_entry_create(dst_map); fake_entry->eflags = MAP_ENTRY_VN_WRITECNT; src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT; vm_object_reference(src_object); fake_entry->object.vm_object = src_object; fake_entry->start = src_entry->start; fake_entry->end = src_entry->end; fake_entry->next = curthread->td_map_def_user; curthread->td_map_def_user = fake_entry; } } else { dst_entry->object.vm_object = NULL; dst_entry->offset = 0; if (src_entry->cred != NULL) { dst_entry->cred = curthread->td_ucred; crhold(dst_entry->cred); *fork_charge += size; } } pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, dst_entry->end - dst_entry->start, src_entry->start); } else { /* * Of course, wired down pages can't be set copy-on-write. * Cause wired pages to be copied into the new map by * simulating faults (the new pages are pageable) */ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry, fork_charge); } } /* * vmspace_map_entry_forked: * Update the newly-forked vmspace each time a map entry is inherited * or copied. The values for vm_dsize and vm_tsize are approximate * (and mostly-obsolete ideas in the face of mmap(2) et al.) */ static void vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, vm_map_entry_t entry) { vm_size_t entrysize; vm_offset_t newend; entrysize = entry->end - entry->start; vm2->vm_map.size += entrysize; if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { vm2->vm_ssize += btoc(entrysize); } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { newend = MIN(entry->end, (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)); vm2->vm_dsize += btoc(newend - entry->start); } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { newend = MIN(entry->end, (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)); vm2->vm_tsize += btoc(newend - entry->start); } } /* * vmspace_fork: * Create a new process vmspace structure and vm_map * based on those of an existing process. The new map * is based on the old map, according to the inheritance * values on the regions in that map. * * XXX It might be worth coalescing the entries added to the new vmspace. * * The source map must not be locked. */ struct vmspace * vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge) { struct vmspace *vm2; vm_map_t new_map, old_map; vm_map_entry_t new_entry, old_entry; vm_object_t object; int locked; old_map = &vm1->vm_map; /* Copy immutable fields of vm1 to vm2. */ vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); if (vm2 == NULL) return (NULL); vm2->vm_taddr = vm1->vm_taddr; vm2->vm_daddr = vm1->vm_daddr; vm2->vm_maxsaddr = vm1->vm_maxsaddr; vm_map_lock(old_map); if (old_map->busy) vm_map_wait_busy(old_map); new_map = &vm2->vm_map; locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */ KASSERT(locked, ("vmspace_fork: lock failed")); old_entry = old_map->header.next; while (old_entry != &old_map->header) { if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) panic("vm_map_fork: encountered a submap"); switch (old_entry->inheritance) { case VM_INHERIT_NONE: break; case VM_INHERIT_SHARE: /* * Clone the entry, creating the shared object if necessary. */ object = old_entry->object.vm_object; if (object == NULL) { object = vm_object_allocate(OBJT_DEFAULT, atop(old_entry->end - old_entry->start)); old_entry->object.vm_object = object; old_entry->offset = 0; if (old_entry->cred != NULL) { object->cred = old_entry->cred; object->charge = old_entry->end - old_entry->start; old_entry->cred = NULL; } } /* * Add the reference before calling vm_object_shadow * to insure that a shadow object is created. */ vm_object_reference(object); if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) { vm_object_shadow(&old_entry->object.vm_object, &old_entry->offset, old_entry->end - old_entry->start); old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; /* Transfer the second reference too. */ vm_object_reference( old_entry->object.vm_object); /* * As in vm_map_simplify_entry(), the * vnode lock will not be acquired in * this call to vm_object_deallocate(). */ vm_object_deallocate(object); object = old_entry->object.vm_object; } VM_OBJECT_LOCK(object); vm_object_clear_flag(object, OBJ_ONEMAPPING); if (old_entry->cred != NULL) { KASSERT(object->cred == NULL, ("vmspace_fork both cred")); object->cred = old_entry->cred; object->charge = old_entry->end - old_entry->start; old_entry->cred = NULL; } VM_OBJECT_UNLOCK(object); /* * Clone the entry, referencing the shared object. */ new_entry = vm_map_entry_create(new_map); *new_entry = *old_entry; new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION); new_entry->wired_count = 0; if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) { object = new_entry->object.vm_object; KASSERT(((struct vnode *)object->handle)-> v_writecount > 0, ("vmspace_fork: v_writecount")); KASSERT(object->un_pager.vnp.writemappings > 0, ("vmspace_fork: vnp.writecount")); vnode_pager_update_writecount(object, new_entry->start, new_entry->end); } /* * Insert the entry into the new map -- we know we're * inserting at the end of the new map. */ vm_map_entry_link(new_map, new_map->header.prev, new_entry); vmspace_map_entry_forked(vm1, vm2, new_entry); /* * Update the physical map */ pmap_copy(new_map->pmap, old_map->pmap, new_entry->start, (old_entry->end - old_entry->start), old_entry->start); break; case VM_INHERIT_COPY: /* * Clone the entry and link into the map. */ new_entry = vm_map_entry_create(new_map); *new_entry = *old_entry; /* * Copied entry is COW over the old object. */ new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT); new_entry->wired_count = 0; new_entry->object.vm_object = NULL; new_entry->cred = NULL; vm_map_entry_link(new_map, new_map->header.prev, new_entry); vmspace_map_entry_forked(vm1, vm2, new_entry); vm_map_copy_entry(old_map, new_map, old_entry, new_entry, fork_charge); break; } old_entry = old_entry->next; } /* * Use inlined vm_map_unlock() to postpone handling the deferred * map entries, which cannot be done until both old_map and * new_map locks are released. */ sx_xunlock(&old_map->lock); sx_xunlock(&new_map->lock); vm_map_process_deferred(); return (vm2); } int vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, vm_prot_t prot, vm_prot_t max, int cow) { vm_map_entry_t new_entry, prev_entry; vm_offset_t bot, top; vm_size_t init_ssize; int orient, rv; rlim_t vmemlim; /* * The stack orientation is piggybacked with the cow argument. * Extract it into orient and mask the cow argument so that we * don't pass it around further. * NOTE: We explicitly allow bi-directional stacks. */ orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP); cow &= ~orient; KASSERT(orient != 0, ("No stack grow direction")); if (addrbos < vm_map_min(map) || addrbos > vm_map_max(map) || addrbos + max_ssize < addrbos) return (KERN_NO_SPACE); init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz; PROC_LOCK(curthread->td_proc); vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM); PROC_UNLOCK(curthread->td_proc); vm_map_lock(map); /* If addr is already mapped, no go */ if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { vm_map_unlock(map); return (KERN_NO_SPACE); } /* If we would blow our VMEM resource limit, no go */ if (map->size + init_ssize > vmemlim) { vm_map_unlock(map); return (KERN_NO_SPACE); } /* * If we can't accomodate max_ssize in the current mapping, no go. * However, we need to be aware that subsequent user mappings might * map into the space we have reserved for stack, and currently this * space is not protected. * * Hopefully we will at least detect this condition when we try to * grow the stack. */ if ((prev_entry->next != &map->header) && (prev_entry->next->start < addrbos + max_ssize)) { vm_map_unlock(map); return (KERN_NO_SPACE); } /* * We initially map a stack of only init_ssize. We will grow as * needed later. Depending on the orientation of the stack (i.e. * the grow direction) we either map at the top of the range, the * bottom of the range or in the middle. * * Note: we would normally expect prot and max to be VM_PROT_ALL, * and cow to be 0. Possibly we should eliminate these as input * parameters, and just pass these values here in the insert call. */ if (orient == MAP_STACK_GROWS_DOWN) bot = addrbos + max_ssize - init_ssize; else if (orient == MAP_STACK_GROWS_UP) bot = addrbos; else bot = round_page(addrbos + max_ssize/2 - init_ssize/2); top = bot + init_ssize; rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow); /* Now set the avail_ssize amount. */ if (rv == KERN_SUCCESS) { if (prev_entry != &map->header) vm_map_clip_end(map, prev_entry, bot); new_entry = prev_entry->next; if (new_entry->end != top || new_entry->start != bot) panic("Bad entry start/end for new stack entry"); new_entry->avail_ssize = max_ssize - init_ssize; if (orient & MAP_STACK_GROWS_DOWN) new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; if (orient & MAP_STACK_GROWS_UP) new_entry->eflags |= MAP_ENTRY_GROWS_UP; } vm_map_unlock(map); return (rv); } static int stack_guard_page = 0; TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page); SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW, &stack_guard_page, 0, "Insert stack guard page ahead of the growable segments."); /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the * desired address is already mapped, or if we successfully grow * the stack. Also returns KERN_SUCCESS if addr is outside the * stack range (this is strange, but preserves compatibility with * the grow function in vm_machdep.c). */ int vm_map_growstack(struct proc *p, vm_offset_t addr) { vm_map_entry_t next_entry, prev_entry; vm_map_entry_t new_entry, stack_entry; struct vmspace *vm = p->p_vmspace; vm_map_t map = &vm->vm_map; vm_offset_t end; size_t grow_amount, max_grow; rlim_t stacklim, vmemlim; int is_procstack, rv; struct ucred *cred; #ifdef notyet uint64_t limit; #endif #ifdef RACCT int error; #endif Retry: PROC_LOCK(p); stacklim = lim_cur(p, RLIMIT_STACK); vmemlim = lim_cur(p, RLIMIT_VMEM); PROC_UNLOCK(p); vm_map_lock_read(map); /* If addr is already in the entry range, no need to grow.*/ if (vm_map_lookup_entry(map, addr, &prev_entry)) { vm_map_unlock_read(map); return (KERN_SUCCESS); } next_entry = prev_entry->next; if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) { /* * This entry does not grow upwards. Since the address lies * beyond this entry, the next entry (if one exists) has to * be a downward growable entry. The entry list header is * never a growable entry, so it suffices to check the flags. */ if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) { vm_map_unlock_read(map); return (KERN_SUCCESS); } stack_entry = next_entry; } else { /* * This entry grows upward. If the next entry does not at * least grow downwards, this is the entry we need to grow. * otherwise we have two possible choices and we have to * select one. */ if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) { /* * We have two choices; grow the entry closest to * the address to minimize the amount of growth. */ if (addr - prev_entry->end <= next_entry->start - addr) stack_entry = prev_entry; else stack_entry = next_entry; } else stack_entry = prev_entry; } if (stack_entry == next_entry) { KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo")); KASSERT(addr < stack_entry->start, ("foo")); end = (prev_entry != &map->header) ? prev_entry->end : stack_entry->start - stack_entry->avail_ssize; grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE); max_grow = stack_entry->start - end; } else { KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo")); KASSERT(addr >= stack_entry->end, ("foo")); end = (next_entry != &map->header) ? next_entry->start : stack_entry->end + stack_entry->avail_ssize; grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE); max_grow = end - stack_entry->end; } if (grow_amount > stack_entry->avail_ssize) { vm_map_unlock_read(map); return (KERN_NO_SPACE); } /* * If there is no longer enough space between the entries nogo, and * adjust the available space. Note: this should only happen if the * user has mapped into the stack area after the stack was created, * and is probably an error. * * This also effectively destroys any guard page the user might have * intended by limiting the stack size. */ if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) { if (vm_map_lock_upgrade(map)) goto Retry; stack_entry->avail_ssize = max_grow; vm_map_unlock(map); return (KERN_NO_SPACE); } is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0; /* * If this is the main process stack, see if we're over the stack * limit. */ if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { vm_map_unlock_read(map); return (KERN_NO_SPACE); } #ifdef RACCT PROC_LOCK(p); if (is_procstack && racct_set(p, RACCT_STACK, ctob(vm->vm_ssize) + grow_amount)) { PROC_UNLOCK(p); vm_map_unlock_read(map); return (KERN_NO_SPACE); } PROC_UNLOCK(p); #endif /* Round up the grow amount modulo SGROWSIZ */ grow_amount = roundup (grow_amount, sgrowsiz); if (grow_amount > stack_entry->avail_ssize) grow_amount = stack_entry->avail_ssize; if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { grow_amount = trunc_page((vm_size_t)stacklim) - ctob(vm->vm_ssize); } #ifdef notyet PROC_LOCK(p); limit = racct_get_available(p, RACCT_STACK); PROC_UNLOCK(p); if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit)) grow_amount = limit - ctob(vm->vm_ssize); #endif /* If we would blow our VMEM resource limit, no go */ if (map->size + grow_amount > vmemlim) { vm_map_unlock_read(map); rv = KERN_NO_SPACE; goto out; } #ifdef RACCT PROC_LOCK(p); if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) { PROC_UNLOCK(p); vm_map_unlock_read(map); rv = KERN_NO_SPACE; goto out; } PROC_UNLOCK(p); #endif if (vm_map_lock_upgrade(map)) goto Retry; if (stack_entry == next_entry) { /* * Growing downward. */ /* Get the preliminary new entry start value */ addr = stack_entry->start - grow_amount; /* * If this puts us into the previous entry, cut back our * growth to the available space. Also, see the note above. */ if (addr < end) { stack_entry->avail_ssize = max_grow; addr = end; if (stack_guard_page) addr += PAGE_SIZE; } rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start, p->p_sysent->sv_stackprot, VM_PROT_ALL, 0); /* Adjust the available stack space by the amount we grew. */ if (rv == KERN_SUCCESS) { if (prev_entry != &map->header) vm_map_clip_end(map, prev_entry, addr); new_entry = prev_entry->next; KASSERT(new_entry == stack_entry->prev, ("foo")); KASSERT(new_entry->end == stack_entry->start, ("foo")); KASSERT(new_entry->start == addr, ("foo")); grow_amount = new_entry->end - new_entry->start; new_entry->avail_ssize = stack_entry->avail_ssize - grow_amount; stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN; new_entry->eflags |= MAP_ENTRY_GROWS_DOWN; } } else { /* * Growing upward. */ addr = stack_entry->end + grow_amount; /* * If this puts us into the next entry, cut back our growth * to the available space. Also, see the note above. */ if (addr > end) { stack_entry->avail_ssize = end - stack_entry->end; addr = end; if (stack_guard_page) addr -= PAGE_SIZE; } grow_amount = addr - stack_entry->end; cred = stack_entry->cred; if (cred == NULL && stack_entry->object.vm_object != NULL) cred = stack_entry->object.vm_object->cred; if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred)) rv = KERN_NO_SPACE; /* Grow the underlying object if applicable. */ else if (stack_entry->object.vm_object == NULL || vm_object_coalesce(stack_entry->object.vm_object, stack_entry->offset, (vm_size_t)(stack_entry->end - stack_entry->start), (vm_size_t)grow_amount, cred != NULL)) { map->size += (addr - stack_entry->end); /* Update the current entry. */ stack_entry->end = addr; stack_entry->avail_ssize -= grow_amount; vm_map_entry_resize_free(map, stack_entry); rv = KERN_SUCCESS; if (next_entry != &map->header) vm_map_clip_start(map, next_entry, addr); } else rv = KERN_FAILURE; } if (rv == KERN_SUCCESS && is_procstack) vm->vm_ssize += btoc(grow_amount); vm_map_unlock(map); /* * Heed the MAP_WIREFUTURE flag if it was set for this process. */ if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) { vm_map_wire(map, (stack_entry == next_entry) ? addr : addr - grow_amount, (stack_entry == next_entry) ? stack_entry->start : addr, (p->p_flag & P_SYSTEM) ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); } out: #ifdef RACCT if (rv != KERN_SUCCESS) { PROC_LOCK(p); error = racct_set(p, RACCT_VMEM, map->size); KASSERT(error == 0, ("decreasing RACCT_VMEM failed")); error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize)); KASSERT(error == 0, ("decreasing RACCT_STACK failed")); PROC_UNLOCK(p); } #endif return (rv); } /* * Unshare the specified VM space for exec. If other processes are * mapped to it, then create a new one. The new vmspace is null. */ int vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser) { struct vmspace *oldvmspace = p->p_vmspace; struct vmspace *newvmspace; newvmspace = vmspace_alloc(minuser, maxuser); if (newvmspace == NULL) return (ENOMEM); newvmspace->vm_swrss = oldvmspace->vm_swrss; /* * This code is written like this for prototype purposes. The * goal is to avoid running down the vmspace here, but let the * other process's that are still using the vmspace to finally * run it down. Even though there is little or no chance of blocking * here, it is a good idea to keep this form for future mods. */ PROC_VMSPACE_LOCK(p); p->p_vmspace = newvmspace; PROC_VMSPACE_UNLOCK(p); if (p == curthread->td_proc) pmap_activate(curthread); vmspace_free(oldvmspace); return (0); } /* * Unshare the specified VM space for forcing COW. This * is called by rfork, for the (RFMEM|RFPROC) == 0 case. */ int vmspace_unshare(struct proc *p) { struct vmspace *oldvmspace = p->p_vmspace; struct vmspace *newvmspace; vm_ooffset_t fork_charge; if (oldvmspace->vm_refcnt == 1) return (0); fork_charge = 0; newvmspace = vmspace_fork(oldvmspace, &fork_charge); if (newvmspace == NULL) return (ENOMEM); if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) { vmspace_free(newvmspace); return (ENOMEM); } PROC_VMSPACE_LOCK(p); p->p_vmspace = newvmspace; PROC_VMSPACE_UNLOCK(p); if (p == curthread->td_proc) pmap_activate(curthread); vmspace_free(oldvmspace); return (0); } /* * vm_map_lookup: * * Finds the VM object, offset, and * protection for a given virtual address in the * specified map, assuming a page fault of the * type specified. * * Leaves the map in question locked for read; return * values are guaranteed until a vm_map_lookup_done * call is performed. Note that the map argument * is in/out; the returned map must be used in * the call to vm_map_lookup_done. * * A handle (out_entry) is returned for use in * vm_map_lookup_done, to make that fast. * * If a lookup is requested with "write protection" * specified, the map may be changed to perform virtual * copying operations, although the data referenced will * remain the same. */ int vm_map_lookup(vm_map_t *var_map, /* IN/OUT */ vm_offset_t vaddr, vm_prot_t fault_typea, vm_map_entry_t *out_entry, /* OUT */ vm_object_t *object, /* OUT */ vm_pindex_t *pindex, /* OUT */ vm_prot_t *out_prot, /* OUT */ boolean_t *wired) /* OUT */ { vm_map_entry_t entry; vm_map_t map = *var_map; vm_prot_t prot; vm_prot_t fault_type = fault_typea; vm_object_t eobject; vm_size_t size; struct ucred *cred; RetryLookup:; vm_map_lock_read(map); /* * Lookup the faulting address. */ if (!vm_map_lookup_entry(map, vaddr, out_entry)) { vm_map_unlock_read(map); return (KERN_INVALID_ADDRESS); } entry = *out_entry; /* * Handle submaps. */ if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { vm_map_t old_map = map; *var_map = map = entry->object.sub_map; vm_map_unlock_read(old_map); goto RetryLookup; } /* * Check whether this task is allowed to have this page. */ prot = entry->protection; fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) { vm_map_unlock_read(map); return (KERN_PROTECTION_FAILURE); } if ((entry->eflags & MAP_ENTRY_USER_WIRED) && (entry->eflags & MAP_ENTRY_COW) && (fault_type & VM_PROT_WRITE)) { vm_map_unlock_read(map); return (KERN_PROTECTION_FAILURE); } /* * If this page is not pageable, we have to get it for all possible * accesses. */ *wired = (entry->wired_count != 0); if (*wired) fault_type = entry->protection; size = entry->end - entry->start; /* * If the entry was copy-on-write, we either ... */ if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { /* * If we want to write the page, we may as well handle that * now since we've got the map locked. * * If we don't need to write the page, we just demote the * permissions allowed. */ if ((fault_type & VM_PROT_WRITE) != 0 || (fault_typea & VM_PROT_COPY) != 0) { /* * Make a new object, and place it in the object * chain. Note that no new references have appeared * -- one just moved from the map to the new * object. */ if (vm_map_lock_upgrade(map)) goto RetryLookup; if (entry->cred == NULL) { /* * The debugger owner is charged for * the memory. */ cred = curthread->td_ucred; crhold(cred); if (!swap_reserve_by_cred(size, cred)) { crfree(cred); vm_map_unlock(map); return (KERN_RESOURCE_SHORTAGE); } entry->cred = cred; } vm_object_shadow(&entry->object.vm_object, &entry->offset, size); entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; eobject = entry->object.vm_object; if (eobject->cred != NULL) { /* * The object was not shadowed. */ swap_release_by_cred(size, entry->cred); crfree(entry->cred); entry->cred = NULL; } else if (entry->cred != NULL) { VM_OBJECT_LOCK(eobject); eobject->cred = entry->cred; eobject->charge = size; VM_OBJECT_UNLOCK(eobject); entry->cred = NULL; } vm_map_lock_downgrade(map); } else { /* * We're attempting to read a copy-on-write page -- * don't allow writes. */ prot &= ~VM_PROT_WRITE; } } /* * Create an object if necessary. */ if (entry->object.vm_object == NULL && !map->system_map) { if (vm_map_lock_upgrade(map)) goto RetryLookup; entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, atop(size)); entry->offset = 0; if (entry->cred != NULL) { VM_OBJECT_LOCK(entry->object.vm_object); entry->object.vm_object->cred = entry->cred; entry->object.vm_object->charge = size; VM_OBJECT_UNLOCK(entry->object.vm_object); entry->cred = NULL; } vm_map_lock_downgrade(map); } /* * Return the object/offset from this entry. If the entry was * copy-on-write or empty, it has been fixed up. */ *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); *object = entry->object.vm_object; *out_prot = prot; return (KERN_SUCCESS); } /* * vm_map_lookup_locked: * * Lookup the faulting address. A version of vm_map_lookup that returns * KERN_FAILURE instead of blocking on map lock or memory allocation. */ int vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */ vm_offset_t vaddr, vm_prot_t fault_typea, vm_map_entry_t *out_entry, /* OUT */ vm_object_t *object, /* OUT */ vm_pindex_t *pindex, /* OUT */ vm_prot_t *out_prot, /* OUT */ boolean_t *wired) /* OUT */ { vm_map_entry_t entry; vm_map_t map = *var_map; vm_prot_t prot; vm_prot_t fault_type = fault_typea; /* * Lookup the faulting address. */ if (!vm_map_lookup_entry(map, vaddr, out_entry)) return (KERN_INVALID_ADDRESS); entry = *out_entry; /* * Fail if the entry refers to a submap. */ if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) return (KERN_FAILURE); /* * Check whether this task is allowed to have this page. */ prot = entry->protection; fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; if ((fault_type & prot) != fault_type) return (KERN_PROTECTION_FAILURE); if ((entry->eflags & MAP_ENTRY_USER_WIRED) && (entry->eflags & MAP_ENTRY_COW) && (fault_type & VM_PROT_WRITE)) return (KERN_PROTECTION_FAILURE); /* * If this page is not pageable, we have to get it for all possible * accesses. */ *wired = (entry->wired_count != 0); if (*wired) fault_type = entry->protection; if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { /* * Fail if the entry was copy-on-write for a write fault. */ if (fault_type & VM_PROT_WRITE) return (KERN_FAILURE); /* * We're attempting to read a copy-on-write page -- * don't allow writes. */ prot &= ~VM_PROT_WRITE; } /* * Fail if an object should be created. */ if (entry->object.vm_object == NULL && !map->system_map) return (KERN_FAILURE); /* * Return the object/offset from this entry. If the entry was * copy-on-write or empty, it has been fixed up. */ *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); *object = entry->object.vm_object; *out_prot = prot; return (KERN_SUCCESS); } /* * vm_map_lookup_done: * * Releases locks acquired by a vm_map_lookup * (according to the handle returned by that lookup). */ void vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) { /* * Unlock the main-level map */ vm_map_unlock_read(map); } #include "opt_ddb.h" #ifdef DDB #include #include /* * vm_map_print: [ debug ] */ DB_SHOW_COMMAND(map, vm_map_print) { static int nlines; /* XXX convert args. */ vm_map_t map = (vm_map_t)addr; boolean_t full = have_addr; vm_map_entry_t entry; db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n", (void *)map, (void *)map->pmap, map->nentries, map->timestamp); nlines++; if (!full && db_indent) return; db_indent += 2; for (entry = map->header.next; entry != &map->header; entry = entry->next) { db_iprintf("map entry %p: start=%p, end=%p\n", (void *)entry, (void *)entry->start, (void *)entry->end); nlines++; { static char *inheritance_name[4] = {"share", "copy", "none", "donate_copy"}; db_iprintf(" prot=%x/%x/%s", entry->protection, entry->max_protection, inheritance_name[(int)(unsigned char)entry->inheritance]); if (entry->wired_count != 0) db_printf(", wired"); } if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { db_printf(", share=%p, offset=0x%jx\n", (void *)entry->object.sub_map, (uintmax_t)entry->offset); nlines++; if ((entry->prev == &map->header) || (entry->prev->object.sub_map != entry->object.sub_map)) { db_indent += 2; vm_map_print((db_expr_t)(intptr_t) entry->object.sub_map, full, 0, (char *)0); db_indent -= 2; } } else { if (entry->cred != NULL) db_printf(", ruid %d", entry->cred->cr_ruid); db_printf(", object=%p, offset=0x%jx", (void *)entry->object.vm_object, (uintmax_t)entry->offset); if (entry->object.vm_object && entry->object.vm_object->cred) db_printf(", obj ruid %d charge %jx", entry->object.vm_object->cred->cr_ruid, (uintmax_t)entry->object.vm_object->charge); if (entry->eflags & MAP_ENTRY_COW) db_printf(", copy (%s)", (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); db_printf("\n"); nlines++; if ((entry->prev == &map->header) || (entry->prev->object.vm_object != entry->object.vm_object)) { db_indent += 2; vm_object_print((db_expr_t)(intptr_t) entry->object.vm_object, full, 0, (char *)0); nlines += 4; db_indent -= 2; } } } db_indent -= 2; if (db_indent == 0) nlines = 0; } DB_SHOW_COMMAND(procvm, procvm) { struct proc *p; if (have_addr) { p = (struct proc *) addr; } else { p = curproc; } db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n", (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map, (void *)vmspace_pmap(p->p_vmspace)); vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL); } #endif /* DDB */ Index: projects/nand/sys/vm/vm_map.h =================================================================== --- projects/nand/sys/vm/vm_map.h (revision 235262) +++ projects/nand/sys/vm/vm_map.h (revision 235263) @@ -1,389 +1,398 @@ /*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)vm_map.h 8.9 (Berkeley) 5/17/95 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * * $FreeBSD$ */ /* * Virtual memory map module definitions. */ #ifndef _VM_MAP_ #define _VM_MAP_ #include #include #include /* * Types defined: * * vm_map_t the high-level address map data structure. * vm_map_entry_t an entry in an address map. */ typedef u_char vm_flags_t; typedef u_int vm_eflags_t; /* * Objects which live in maps may be either VM objects, or * another map (called a "sharing map") which denotes read-write * sharing with other maps. */ union vm_map_object { struct vm_object *vm_object; /* object object */ struct vm_map *sub_map; /* belongs to another map */ }; /* * Address map entries consist of start and end addresses, * a VM object (or sharing map) and offset into that object, * and user-exported inheritance and protection information. * Also included is control information for virtual copy operations. */ struct vm_map_entry { struct vm_map_entry *prev; /* previous entry */ struct vm_map_entry *next; /* next entry */ struct vm_map_entry *left; /* left child in binary search tree */ struct vm_map_entry *right; /* right child in binary search tree */ vm_offset_t start; /* start address */ vm_offset_t end; /* end address */ vm_offset_t avail_ssize; /* amt can grow if this is a stack */ vm_size_t adj_free; /* amount of adjacent free space */ vm_size_t max_free; /* max free space in subtree */ union vm_map_object object; /* object I point to */ vm_ooffset_t offset; /* offset into object */ vm_eflags_t eflags; /* map entry flags */ vm_prot_t protection; /* protection code */ vm_prot_t max_protection; /* maximum protection */ vm_inherit_t inheritance; /* inheritance */ + uint8_t read_ahead; /* pages in the read-ahead window */ int wired_count; /* can be paged if = 0 */ - vm_pindex_t lastr; /* last read */ + vm_pindex_t next_read; /* index of the next sequential read */ struct ucred *cred; /* tmp storage for creator ref */ }; #define MAP_ENTRY_NOSYNC 0x0001 #define MAP_ENTRY_IS_SUB_MAP 0x0002 #define MAP_ENTRY_COW 0x0004 #define MAP_ENTRY_NEEDS_COPY 0x0008 #define MAP_ENTRY_NOFAULT 0x0010 #define MAP_ENTRY_USER_WIRED 0x0020 #define MAP_ENTRY_BEHAV_NORMAL 0x0000 /* default behavior */ #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x0040 /* expect sequential access */ #define MAP_ENTRY_BEHAV_RANDOM 0x0080 /* expect random access */ #define MAP_ENTRY_BEHAV_RESERVED 0x00C0 /* future use */ #define MAP_ENTRY_BEHAV_MASK 0x00C0 #define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */ #define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiters in transition */ #define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */ #define MAP_ENTRY_GROWS_DOWN 0x1000 /* Top-down stacks */ #define MAP_ENTRY_GROWS_UP 0x2000 /* Bottom-up stacks */ #define MAP_ENTRY_WIRE_SKIPPED 0x4000 #define MAP_ENTRY_VN_WRITECNT 0x8000 /* writeable vnode mapping */ #ifdef _KERNEL static __inline u_char vm_map_entry_behavior(vm_map_entry_t entry) { return (entry->eflags & MAP_ENTRY_BEHAV_MASK); } static __inline int vm_map_entry_user_wired_count(vm_map_entry_t entry) { if (entry->eflags & MAP_ENTRY_USER_WIRED) return (1); return (0); } static __inline int vm_map_entry_system_wired_count(vm_map_entry_t entry) { return (entry->wired_count - vm_map_entry_user_wired_count(entry)); } #endif /* _KERNEL */ /* * A map is a set of map entries. These map entries are * organized both as a binary search tree and as a doubly-linked * list. Both structures are ordered based upon the start and * end addresses contained within each map entry. Sleator and * Tarjan's top-down splay algorithm is employed to control * height imbalance in the binary search tree. * * List of locks * (c) const until freed */ struct vm_map { struct vm_map_entry header; /* List of entries */ struct sx lock; /* Lock for map data */ struct mtx system_mtx; int nentries; /* Number of entries */ vm_size_t size; /* virtual size */ u_int timestamp; /* Version number */ u_char needs_wakeup; u_char system_map; /* (c) Am I a system map? */ vm_flags_t flags; /* flags for this vm_map */ vm_map_entry_t root; /* Root of a binary search tree */ pmap_t pmap; /* (c) Physical map */ #define min_offset header.start /* (c) */ #define max_offset header.end /* (c) */ int busy; }; /* * vm_flags_t values */ #define MAP_WIREFUTURE 0x01 /* wire all future pages */ #define MAP_BUSY_WAKEUP 0x02 #ifdef _KERNEL static __inline vm_offset_t vm_map_max(vm_map_t map) { return (map->max_offset); } static __inline vm_offset_t vm_map_min(vm_map_t map) { return (map->min_offset); } static __inline pmap_t vm_map_pmap(vm_map_t map) { return (map->pmap); } static __inline void vm_map_modflags(vm_map_t map, vm_flags_t set, vm_flags_t clear) { map->flags = (map->flags | set) & ~clear; } #endif /* _KERNEL */ /* * Shareable process virtual address space. * * List of locks * (c) const until freed */ struct vmspace { struct vm_map vm_map; /* VM address map */ struct shmmap_state *vm_shm; /* SYS5 shared memory private data XXX */ segsz_t vm_swrss; /* resident set size before last swap */ segsz_t vm_tsize; /* text size (pages) XXX */ segsz_t vm_dsize; /* data size (pages) XXX */ segsz_t vm_ssize; /* stack size (pages) */ caddr_t vm_taddr; /* (c) user virtual address of text */ caddr_t vm_daddr; /* (c) user virtual address of data */ caddr_t vm_maxsaddr; /* user VA at max stack growth */ volatile int vm_refcnt; /* number of references */ /* * Keep the PMAP last, so that CPU-specific variations of that * structure on a single architecture don't result in offset * variations of the machine-independent fields in the vmspace. */ struct pmap vm_pmap; /* private physical map */ }; #ifdef _KERNEL static __inline pmap_t vmspace_pmap(struct vmspace *vmspace) { return &vmspace->vm_pmap; } #endif /* _KERNEL */ #ifdef _KERNEL /* * Macros: vm_map_lock, etc. * Function: * Perform locking on the data portion of a map. Note that * these macros mimic procedure calls returning void. The * semicolon is supplied by the user of these macros, not * by the macros themselves. The macros can safely be used * as unbraced elements in a higher level statement. */ void _vm_map_lock(vm_map_t map, const char *file, int line); void _vm_map_unlock(vm_map_t map, const char *file, int line); int _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line); void _vm_map_lock_read(vm_map_t map, const char *file, int line); void _vm_map_unlock_read(vm_map_t map, const char *file, int line); int _vm_map_trylock(vm_map_t map, const char *file, int line); int _vm_map_trylock_read(vm_map_t map, const char *file, int line); int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line); void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line); int vm_map_locked(vm_map_t map); void vm_map_wakeup(vm_map_t map); void vm_map_busy(vm_map_t map); void vm_map_unbusy(vm_map_t map); void vm_map_wait_busy(vm_map_t map); #define vm_map_lock(map) _vm_map_lock(map, LOCK_FILE, LOCK_LINE) #define vm_map_unlock(map) _vm_map_unlock(map, LOCK_FILE, LOCK_LINE) #define vm_map_unlock_and_wait(map, timo) \ _vm_map_unlock_and_wait(map, timo, LOCK_FILE, LOCK_LINE) #define vm_map_lock_read(map) _vm_map_lock_read(map, LOCK_FILE, LOCK_LINE) #define vm_map_unlock_read(map) _vm_map_unlock_read(map, LOCK_FILE, LOCK_LINE) #define vm_map_trylock(map) _vm_map_trylock(map, LOCK_FILE, LOCK_LINE) #define vm_map_trylock_read(map) \ _vm_map_trylock_read(map, LOCK_FILE, LOCK_LINE) #define vm_map_lock_upgrade(map) \ _vm_map_lock_upgrade(map, LOCK_FILE, LOCK_LINE) #define vm_map_lock_downgrade(map) \ _vm_map_lock_downgrade(map, LOCK_FILE, LOCK_LINE) long vmspace_resident_count(struct vmspace *vmspace); long vmspace_wired_count(struct vmspace *vmspace); #endif /* _KERNEL */ /* XXX: number of kernel maps and entries to statically allocate */ #define MAX_KMAP 10 #define MAX_KMAPENT 128 /* * Copy-on-write flags for vm_map operations */ #define MAP_INHERIT_SHARE 0x0001 #define MAP_COPY_ON_WRITE 0x0002 #define MAP_NOFAULT 0x0004 #define MAP_PREFAULT 0x0008 #define MAP_PREFAULT_PARTIAL 0x0010 #define MAP_DISABLE_SYNCER 0x0020 #define MAP_DISABLE_COREDUMP 0x0100 #define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */ #define MAP_VN_WRITECOUNT 0x0400 #define MAP_STACK_GROWS_DOWN 0x1000 #define MAP_STACK_GROWS_UP 0x2000 #define MAP_ACC_CHARGED 0x4000 #define MAP_ACC_NO_CHARGE 0x8000 /* * vm_fault option flags */ #define VM_FAULT_NORMAL 0 /* Nothing special */ #define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */ #define VM_FAULT_DIRTY 2 /* Dirty the page; use w/VM_PROT_COPY */ + +/* + * Initially, mappings are slightly sequential. The maximum window size must + * account for the map entry's "read_ahead" field being defined as an uint8_t. + */ +#define VM_FAULT_READ_AHEAD_MIN 7 +#define VM_FAULT_READ_AHEAD_INIT 15 +#define VM_FAULT_READ_AHEAD_MAX min(atop(MAXPHYS) - 1, UINT8_MAX) /* * The following "find_space" options are supported by vm_map_find() */ #define VMFS_NO_SPACE 0 /* don't find; use the given range */ #define VMFS_ANY_SPACE 1 /* find a range with any alignment */ #define VMFS_ALIGNED_SPACE 2 /* find a superpage-aligned range */ #if defined(__mips__) #define VMFS_TLB_ALIGNED_SPACE 3 /* find a TLB entry aligned range */ #endif /* * vm_map_wire and vm_map_unwire option flags */ #define VM_MAP_WIRE_SYSTEM 0 /* wiring in a kernel map */ #define VM_MAP_WIRE_USER 1 /* wiring in a user map */ #define VM_MAP_WIRE_NOHOLES 0 /* region must not have holes */ #define VM_MAP_WIRE_HOLESOK 2 /* region may have holes */ #define VM_MAP_WIRE_WRITE 4 /* Validate writable. */ #ifdef _KERNEL boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t); vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t); int vm_map_delete(vm_map_t, vm_offset_t, vm_offset_t); int vm_map_find(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, int, vm_prot_t, vm_prot_t, int); int vm_map_fixed(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int); int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *); int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t); void vm_map_init(vm_map_t, pmap_t, vm_offset_t, vm_offset_t); int vm_map_insert (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int); int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, vm_pindex_t *, vm_prot_t *, boolean_t *); int vm_map_lookup_locked(vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *, vm_pindex_t *, vm_prot_t *, boolean_t *); void vm_map_lookup_done (vm_map_t, vm_map_entry_t); boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t); void vm_map_startup (void); int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t); int vm_map_sync(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t); int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int); void vm_map_simplify_entry (vm_map_t, vm_map_entry_t); void vm_init2 (void); int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int); int vm_map_growstack (struct proc *p, vm_offset_t addr); int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags); int vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags); long vmspace_swap_count(struct vmspace *vmspace); #endif /* _KERNEL */ #endif /* _VM_MAP_ */ Index: projects/nand/sys =================================================================== --- projects/nand/sys (revision 235262) +++ projects/nand/sys (revision 235263) Property changes on: projects/nand/sys ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys:r235157-235262 Index: projects/nand/tools/build/mk/OptionalObsoleteFiles.inc =================================================================== --- projects/nand/tools/build/mk/OptionalObsoleteFiles.inc (revision 235262) +++ projects/nand/tools/build/mk/OptionalObsoleteFiles.inc (revision 235263) @@ -1,4023 +1,4025 @@ # # $FreeBSD$ # # This file add support for the WITHOUT_* and WITH_* knobs in src.conf(5) to # the check-old and delete-old* targets. # .if ${MK_ACCT} == no OLD_FILES+=etc/periodic/daily/310.accounting OLD_FILES+=usr/sbin/accton OLD_FILES+=usr/sbin/sa OLD_FILES+=usr/share/man/man8/accton.8.gz OLD_FILES+=usr/share/man/man8/sa.8.gz .endif .if ${MK_ACPI} == no OLD_FILES+=usr/sbin/acpiconf OLD_FILES+=usr/sbin/acpidb OLD_FILES+=usr/sbin/acpidump OLD_FILES+=usr/sbin/iasl OLD_FILES+=usr/share/man/man8/acpiconf.8.gz OLD_FILES+=usr/share/man/man8/acpidb.8.gz OLD_FILES+=usr/share/man/man8/acpidump.8.gz OLD_FILES+=usr/share/man/man8/iasl.8.gz .endif .if ${MK_AMD} == no OLD_FILES+=etc/amd.map OLD_FILES+=usr/bin/pawd OLD_FILES+=usr/sbin/amd OLD_FILES+=usr/sbin/amq OLD_FILES+=usr/sbin/fixmount OLD_FILES+=usr/sbin/fsinfo OLD_FILES+=usr/sbin/hlfsd OLD_FILES+=usr/sbin/mk-amd-map OLD_FILES+=usr/sbin/wire-test OLD_FILES+=usr/share/examples/etc/amd.map OLD_FILES+=usr/share/info/am-utils.info.gz OLD_FILES+=usr/share/man/man1/pawd.1.gz OLD_FILES+=usr/share/man/man5/amd.conf.5.gz OLD_FILES+=usr/share/man/man8/amd.8.gz OLD_FILES+=usr/share/man/man8/amq.8.gz OLD_FILES+=usr/share/man/man8/fixmount.8.gz OLD_FILES+=usr/share/man/man8/fsinfo.8.gz OLD_FILES+=usr/share/man/man8/hlfsd.8.gz OLD_FILES+=usr/share/man/man8/mk-amd-map.8.gz OLD_FILES+=usr/share/man/man8/wire-test.8.gz .endif .if ${MK_APM} == no OLD_FILES+=etc/apmd.conf OLD_FILES+=usr/sbin/apm OLD_FILES+=usr/share/examples/etc/apmd.conf OLD_FILES+=usr/share/man/man8/amd64/apm.8.gz OLD_FILES+=usr/share/man/man8/amd64/apmconf.8.gz .endif .if ${MK_AT} == no OLD_FILES+=usr/bin/at OLD_FILES+=usr/bin/atq OLD_FILES+=usr/bin/atrm OLD_FILES+=usr/bin/batch OLD_FILES+=usr/libexec/atrun OLD_FILES+=usr/share/man/man1/at.1.gz OLD_FILES+=usr/share/man/man1/atq.1.gz OLD_FILES+=usr/share/man/man1/atrm.1.gz OLD_FILES+=usr/share/man/man1/batch.1.gz OLD_FILES+=usr/share/man/man8/atrun.8.gz .endif .if ${MK_ATM} == no OLD_FILES+=rescue/atmconfig OLD_FILES+=sbin/atmconfig OLD_FILES+=usr/bin/sscop OLD_FILES+=usr/include/bsnmp/snmp_atm.h OLD_FILES+=usr/include/netnatm/addr.h OLD_FILES+=usr/include/netnatm/api/atmapi.h OLD_FILES+=usr/include/netnatm/api/ccatm.h OLD_FILES+=usr/include/netnatm/api/unisap.h OLD_FILES+=usr/include/netnatm/msg/uni_config.h OLD_FILES+=usr/include/netnatm/msg/uni_hdr.h OLD_FILES+=usr/include/netnatm/msg/uni_ie.h OLD_FILES+=usr/include/netnatm/msg/uni_msg.h OLD_FILES+=usr/include/netnatm/msg/unimsglib.h OLD_FILES+=usr/include/netnatm/msg/uniprint.h OLD_FILES+=usr/include/netnatm/msg/unistruct.h OLD_FILES+=usr/include/netnatm/saal/sscfu.h OLD_FILES+=usr/include/netnatm/saal/sscfudef.h OLD_FILES+=usr/include/netnatm/saal/sscop.h OLD_FILES+=usr/include/netnatm/saal/sscopdef.h OLD_FILES+=usr/include/netnatm/sig/uni.h OLD_FILES+=usr/include/netnatm/sig/unidef.h OLD_FILES+=usr/include/netnatm/sig/unisig.h OLD_FILES+=usr/include/netnatm/unimsg.h OLD_FILES+=usr/lib/libngatm.a OLD_FILES+=usr/lib/libngatm.so OLD_LIBS+=usr/lib/libngatm.so.4 OLD_FILES+=usr/lib/libngatm_p.a OLD_FILES+=usr/lib/snmp_atm.so OLD_LIBS+=usr/lib/snmp_atm.so.6 .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64" OLD_FILES+=usr/lib32/libngatm.a OLD_FILES+=usr/lib32/libngatm.so OLD_LIBS+=usr/lib32/libngatm.so.4 OLD_FILES+=usr/lib32/libngatm_p.a .endif OLD_FILES+=usr/share/doc/atm/atmconfig.help OLD_FILES+=usr/share/doc/atm/atmconfig_device.help OLD_FILES+=usr/share/man/man1/sscop.1.gz OLD_FILES+=usr/share/man/man3/libngatm.3.gz OLD_FILES+=usr/share/man/man3/snmp_atm.3.gz OLD_FILES+=usr/share/man/man3/uniaddr.3.gz OLD_FILES+=usr/share/man/man3/unifunc.3.gz OLD_FILES+=usr/share/man/man3/unimsg.3.gz OLD_FILES+=usr/share/man/man3/unisap.3.gz OLD_FILES+=usr/share/man/man3/unistruct.3.gz OLD_FILES+=usr/share/man/man8/atmconfig.8.gz OLD_FILES+=usr/share/snmp/defs/atm_freebsd.def OLD_FILES+=usr/share/snmp/defs/atm_tree.def OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM-FREEBSD-MIB.txt OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM.txt .endif .if ${MK_AUDIT} == no OLD_FILES+=usr/sbin/audit OLD_FILES+=usr/sbin/auditd OLD_FILES+=usr/sbin/auditreduce OLD_FILES+=usr/sbin/praudit OLD_FILES+=usr/share/man/man1/auditreduce.1.gz OLD_FILES+=usr/share/man/man1/praudit.1.gz OLD_FILES+=usr/share/man/man8/audit.8.gz OLD_FILES+=usr/share/man/man8/auditd.8.gz .endif .if ${MK_AUTHPF} == no OLD_FILES+=usr/sbin/authpf OLD_FILES+=usr/share/man/man8/authpf.8.gz .endif .if ${MK_BIND} == no OLD_FILES+=etc/periodic/daily/470.status-named OLD_FILES+=usr/bin/dig OLD_FILES+=usr/bin/host OLD_FILES+=usr/bin/nslookup OLD_FILES+=usr/bin/nsupdate OLD_FILES+=usr/include/lwres/context.h OLD_FILES+=usr/include/lwres/int.h OLD_FILES+=usr/include/lwres/ipv6.h OLD_FILES+=usr/include/lwres/lang.h OLD_FILES+=usr/include/lwres/list.h OLD_FILES+=usr/include/lwres/lwbuffer.h OLD_FILES+=usr/include/lwres/lwpacket.h OLD_FILES+=usr/include/lwres/lwres.h OLD_FILES+=usr/include/lwres/result.h OLD_FILES+=usr/include/lwres/version.h OLD_FILES+=usr/include/lwres/net.h OLD_FILES+=usr/include/lwres/netdb.h OLD_FILES+=usr/include/lwres/platform.h OLD_DIRS+=usr/include/lwres OLD_FILES+=usr/lib/liblwres.a OLD_FILES+=usr/lib/liblwres_p.a OLD_LIBS+=usr/lib/liblwres.so.50 OLD_FILES+=usr/lib/liblwres.so OLD_FILES+=usr/sbin/named OLD_FILES+=usr/sbin/lwresd OLD_FILES+=usr/sbin/named-checkconf OLD_FILES+=usr/sbin/named-checkzone OLD_FILES+=usr/sbin/named.reload OLD_FILES+=usr/sbin/named.reconfig OLD_FILES+=usr/sbin/rndc OLD_FILES+=usr/sbin/rndc-confgen OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch01.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch02.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch03.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch04.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch05.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch06.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch07.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch08.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch09.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.html OLD_DIRS+=usr/share/doc/bind9/arm OLD_FILES+=usr/share/doc/bind9/misc/dnssec OLD_FILES+=usr/share/doc/bind9/misc/format-options.pl OLD_FILES+=usr/share/doc/bind9/misc/ipv6 OLD_FILES+=usr/share/doc/bind9/misc/migration OLD_FILES+=usr/share/doc/bind9/misc/migration-4to9 OLD_FILES+=usr/share/doc/bind9/misc/options OLD_FILES+=usr/share/doc/bind9/misc/rfc-compliance OLD_FILES+=usr/share/doc/bind9/misc/roadmap OLD_FILES+=usr/share/doc/bind9/misc/sdb OLD_DIRS+=usr/share/doc/bind9/misc OLD_DIRS+=usr/share/doc/bind9 OLD_FILES+=usr/share/doc/bind9/CHANGES OLD_FILES+=usr/share/doc/bind9/COPYRIGHT OLD_FILES+=usr/share/doc/bind9/FAQ OLD_FILES+=usr/share/doc/bind9/README OLD_FILES+=usr/share/man/man1/dig.1.gz OLD_FILES+=usr/share/man/man1/host.1.gz OLD_FILES+=usr/share/man/man1/nslookup.1.gz OLD_FILES+=usr/share/man/man3/lwres.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer.3.gz OLD_FILES+=usr/share/man/man3/lwres_config.3.gz OLD_FILES+=usr/share/man/man3/lwres_context.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabn.3.gz OLD_FILES+=usr/share/man/man3/lwres_gai_strerror.3.gz OLD_FILES+=usr/share/man/man3/lwres_getaddrinfo.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_getipnode.3.gz OLD_FILES+=usr/share/man/man3/lwres_getnameinfo.3.gz OLD_FILES+=usr/share/man/man3/lwres_getrrsetbyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnba.3.gz OLD_FILES+=usr/share/man/man3/lwres_inetntop.3.gz OLD_FILES+=usr/share/man/man3/lwres_hstrerror.3.gz OLD_FILES+=usr/share/man/man3/lwres_noop.3.gz OLD_FILES+=usr/share/man/man3/lwres_packet.3.gz OLD_FILES+=usr/share/man/man3/lwres_resutil.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_add.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_back.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_clear.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_first.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_forward.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getmem.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint16.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint32.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint8.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_init.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_invalidate.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putmem.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint16.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint32.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint8.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_subtract.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_clear.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_get.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_init.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_print.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_allocmem.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_create.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_destroy.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_freemem.3.gz OLD_FILES+=usr/share/man/man3/lwres_freeaddrinfo.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_initserial.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_nextserial.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_sendrecv.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_endhostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_endhostent_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyname2.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyname_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostent_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_sethostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_sethostent_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_freehostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_getipnodebyaddr.3.gz OLD_FILES+=usr/share/man/man3/lwres_getipnodebyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_herror.3.gz OLD_FILES+=usr/share/man/man3/lwres_net_ntop.3.gz OLD_FILES+=usr/share/man/man3/lwres_nooprequest_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_nooprequest_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_nooprequest_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_noopresponse_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_noopresponse_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_noopresponse_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_lwpacket_parseheader.3.gz OLD_FILES+=usr/share/man/man3/lwres_lwpacket_renderheader.3.gz OLD_FILES+=usr/share/man/man3/lwres_addr_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_getaddrsbyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_getnamebyaddr.3.gz OLD_FILES+=usr/share/man/man3/lwres_string_parse.3.gz OLD_FILES+=usr/share/man/man5/named.conf.5.gz OLD_FILES+=usr/share/man/man5/rndc.conf.5.gz OLD_FILES+=usr/share/man/man1/nsupdate.1.gz OLD_FILES+=usr/share/man/man8/dnssec-keygen.8.gz OLD_FILES+=usr/share/man/man8/dnssec-signzone.8.gz OLD_FILES+=usr/share/man/man8/named.8.gz OLD_FILES+=usr/share/man/man8/lwresd.8.gz OLD_FILES+=usr/share/man/man8/named-checkconf.8.gz OLD_FILES+=usr/share/man/man8/named-checkzone.8.gz OLD_FILES+=usr/share/man/man8/named.reload.8.gz OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz OLD_FILES+=usr/share/man/man8/rndc.8.gz OLD_FILES+=usr/share/man/man8/rndc-confgen.8.gz OLD_DIRS+=var/named/dev OLD_DIRS+=var/named/var/stats OLD_DIRS+=var/named/var/run/named OLD_DIRS+=var/named/var/run OLD_DIRS+=var/named/var/log OLD_DIRS+=var/named/var/dump OLD_DIRS+=var/named/var .endif .if ${MK_BIND_DNSSEC} == no || ${MK_BIND} == no OLD_FILES+=usr/sbin/dnssec-keygen OLD_FILES+=usr/sbin/dnssec-signzone .endif .if ${MK_BIND_ETC} == no || ${MK_BIND} == no OLD_FILES+=var/named/etc/namedb/PROTO.localhost-v6.rev OLD_FILES+=var/named/etc/namedb/PROTO.localhost.rev OLD_FILES+=var/named/etc/namedb/make-localhost #OLD_FILES+=var/named/etc/namedb/named.conf # intentionally left out OLD_FILES+=var/named/etc/namedb/named.root OLD_DIRS+=var/named/etc/namedb/slave OLD_DIRS+=var/named/etc/namedb/master OLD_DIRS+=var/named/etc/namedb/dynamic #OLD_DIRS+=var/named/etc/namedb #OLD_DIRS+=var/named/etc .endif #.if ${MK_BIND_LIBS} == no || ${MK_BIND} == no # to be filled in and removed above #.endif #.if ${MK_BIND_LIBS_LWRES} == no || ${MK_BIND} == no # to be filled in and removed above #.endif #.if ${MK_BIND_NAMED} == no || ${MK_BIND} == no # to be filled in and removed above #.endif .if ${MK_BLUETOOTH} == no OLD_FILES+=etc/bluetooth/hcsecd.conf OLD_FILES+=etc/bluetooth/hosts OLD_FILES+=etc/bluetooth/protocols OLD_FILES+=usr/bin/bthost OLD_FILES+=usr/bin/btsockstat OLD_FILES+=usr/bin/rfcomm_sppd OLD_FILES+=usr/include/bluetooth.h OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_bluetooth.h OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_bt3c.h OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket.h OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket_hci_raw.h OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket_l2cap.h OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket_rfcomm.h OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket_sco.h OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_h4.h OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_hci.h OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_l2cap.h OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_ubt.h OLD_DIRS+=usr/include/netgraph/bluetooth/include OLD_DIRS+=usr/include/netgraph/bluetooth OLD_FILES+=usr/include/sdp.h OLD_FILES+=usr/lib/libbluetooth.a OLD_FILES+=usr/lib/libbluetooth.so OLD_LIBS+=usr/lib/libbluetooth.so.4 OLD_FILES+=usr/lib/libbluetooth_p.a OLD_FILES+=usr/lib/libsdp.a OLD_FILES+=usr/lib/libsdp.so OLD_LIBS+=usr/lib/libsdp.so.4 OLD_FILES+=usr/lib/libsdp_p.a .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64" OLD_FILES+=usr/lib32/libbluetooth.a OLD_FILES+=usr/lib32/libbluetooth.so OLD_LIBS+=usr/lib32/libbluetooth.so.4 OLD_FILES+=usr/lib32/libbluetooth_p.a OLD_FILES+=usr/lib32/libsdp.a OLD_FILES+=usr/lib32/libsdp.so OLD_LIBS+=usr/lib32/libsdp.so.4 OLD_FILES+=usr/lib32/libsdp_p.a .endif OLD_FILES+=usr/sbin/bcmfw OLD_FILES+=usr/sbin/bt3cfw OLD_FILES+=usr/sbin/bthidcontrol OLD_FILES+=usr/sbin/bthidd OLD_FILES+=usr/sbin/btpand OLD_FILES+=usr/sbin/hccontrol OLD_FILES+=usr/sbin/hcsecd OLD_FILES+=usr/sbin/hcseriald OLD_FILES+=usr/sbin/l2control OLD_FILES+=usr/sbin/l2ping OLD_FILES+=usr/sbin/rfcomm_pppd OLD_FILES+=usr/sbin/sdpcontrol OLD_FILES+=usr/sbin/sdpd OLD_FILES+=usr/share/man/man1/bthost.1.gz OLD_FILES+=usr/share/man/man1/btsockstat.1.gz OLD_FILES+=usr/share/man/man1/rfcomm_sppd.1.gz OLD_FILES+=usr/share/man/man3/SDP_GET128.3.gz OLD_FILES+=usr/share/man/man3/SDP_GET16.3.gz OLD_FILES+=usr/share/man/man3/SDP_GET32.3.gz OLD_FILES+=usr/share/man/man3/SDP_GET64.3.gz OLD_FILES+=usr/share/man/man3/SDP_GET8.3.gz OLD_FILES+=usr/share/man/man3/SDP_PUT128.3.gz OLD_FILES+=usr/share/man/man3/SDP_PUT16.3.gz OLD_FILES+=usr/share/man/man3/SDP_PUT32.3.gz OLD_FILES+=usr/share/man/man3/SDP_PUT64.3.gz OLD_FILES+=usr/share/man/man3/SDP_PUT8.3.gz OLD_FILES+=usr/share/man/man3/bdaddr_any.3.gz OLD_FILES+=usr/share/man/man3/bdaddr_copy.3.gz OLD_FILES+=usr/share/man/man3/bdaddr_same.3.gz OLD_FILES+=usr/share/man/man3/bluetooth.3.gz OLD_FILES+=usr/share/man/man3/bt_aton.3.gz OLD_FILES+=usr/share/man/man3/bt_devaddr.3.gz OLD_FILES+=usr/share/man/man3/bt_devclose.3.gz OLD_FILES+=usr/share/man/man3/bt_devenum.3.gz OLD_FILES+=usr/share/man/man3/bt_devfilter.3.gz OLD_FILES+=usr/share/man/man3/bt_devfilter_evt_clr.3.gz OLD_FILES+=usr/share/man/man3/bt_devfilter_evt_set.3.gz OLD_FILES+=usr/share/man/man3/bt_devfilter_evt_tst.3.gz OLD_FILES+=usr/share/man/man3/bt_devfilter_pkt_clr.3.gz OLD_FILES+=usr/share/man/man3/bt_devfilter_pkt_set.3.gz OLD_FILES+=usr/share/man/man3/bt_devfilter_pkt_tst.3.gz OLD_FILES+=usr/share/man/man3/bt_devinfo.3.gz OLD_FILES+=usr/share/man/man3/bt_devinquiry.3.gz OLD_FILES+=usr/share/man/man3/bt_devname.3.gz OLD_FILES+=usr/share/man/man3/bt_devopen.3.gz OLD_FILES+=usr/share/man/man3/bt_devreq.3.gz OLD_FILES+=usr/share/man/man3/bt_devsend.3.gz OLD_FILES+=usr/share/man/man3/bt_endhostent.3.gz OLD_FILES+=usr/share/man/man3/bt_endprotoent.3.gz OLD_FILES+=usr/share/man/man3/bt_gethostbyaddr.3.gz OLD_FILES+=usr/share/man/man3/bt_gethostbyname.3.gz OLD_FILES+=usr/share/man/man3/bt_gethostent.3.gz OLD_FILES+=usr/share/man/man3/bt_getprotobyname.3.gz OLD_FILES+=usr/share/man/man3/bt_getprotobynumber.3.gz OLD_FILES+=usr/share/man/man3/bt_getprotoent.3.gz OLD_FILES+=usr/share/man/man3/bt_ntoa.3.gz OLD_FILES+=usr/share/man/man3/bt_sethostent.3.gz OLD_FILES+=usr/share/man/man3/bt_setprotoent.3.gz OLD_FILES+=usr/share/man/man3/sdp.3.gz OLD_FILES+=usr/share/man/man3/sdp_attr2desc.3.gz OLD_FILES+=usr/share/man/man3/sdp_change_service.3.gz OLD_FILES+=usr/share/man/man3/sdp_close.3.gz OLD_FILES+=usr/share/man/man3/sdp_error.3.gz OLD_FILES+=usr/share/man/man3/sdp_open.3.gz OLD_FILES+=usr/share/man/man3/sdp_open_local.3.gz OLD_FILES+=usr/share/man/man3/sdp_register_service.3.gz OLD_FILES+=usr/share/man/man3/sdp_search.3.gz OLD_FILES+=usr/share/man/man3/sdp_unregister_service.3.gz OLD_FILES+=usr/share/man/man3/sdp_uuid2desc.3.gz OLD_FILES+=usr/share/man/man5/hcsecd.conf.5.gz OLD_FILES+=usr/share/man/man8/bcmfw.8.gz OLD_FILES+=usr/share/man/man8/bt3cfw.8.gz OLD_FILES+=usr/share/man/man8/bthidcontrol.8.gz OLD_FILES+=usr/share/man/man8/bthidd.8.gz OLD_FILES+=usr/share/man/man8/btpand.8.gz OLD_FILES+=usr/share/man/man8/hccontrol.8.gz OLD_FILES+=usr/share/man/man8/hcsecd.8.gz OLD_FILES+=usr/share/man/man8/hcseriald.8.gz OLD_FILES+=usr/share/man/man8/l2control.8.gz OLD_FILES+=usr/share/man/man8/l2ping.8.gz OLD_FILES+=usr/share/man/man8/rfcomm_pppd.8.gz OLD_FILES+=usr/share/man/man8/sdpcontrol.8.gz OLD_FILES+=usr/share/man/man8/sdpd.8.gz .endif #.if ${MK_BOOT} == no # to be filled in #.endif .if ${MK_CALENDAR} == no OLD_FILES+=etc/periodic/daily/300.calendar OLD_FILES+=usr/bin/calendar OLD_FILES+=usr/share/calendar/calendar.all OLD_FILES+=usr/share/calendar/calendar.australia OLD_FILES+=usr/share/calendar/calendar.birthday OLD_FILES+=usr/share/calendar/calendar.christian OLD_FILES+=usr/share/calendar/calendar.computer OLD_FILES+=usr/share/calendar/calendar.croatian OLD_FILES+=usr/share/calendar/calendar.dutch OLD_FILES+=usr/share/calendar/calendar.freebsd OLD_FILES+=usr/share/calendar/calendar.french OLD_FILES+=usr/share/calendar/calendar.german OLD_FILES+=usr/share/calendar/calendar.history OLD_FILES+=usr/share/calendar/calendar.holiday OLD_FILES+=usr/share/calendar/calendar.hungarian OLD_FILES+=usr/share/calendar/calendar.judaic OLD_FILES+=usr/share/calendar/calendar.lotr OLD_FILES+=usr/share/calendar/calendar.music OLD_FILES+=usr/share/calendar/calendar.newzealand OLD_FILES+=usr/share/calendar/calendar.russian OLD_FILES+=usr/share/calendar/calendar.southafrica OLD_FILES+=usr/share/calendar/calendar.ukrainian OLD_FILES+=usr/share/calendar/calendar.usholiday OLD_FILES+=usr/share/calendar/calendar.world OLD_DIRS+=usr/share/calendar/de_AT.ISO_8859-15 OLD_FILES+=usr/share/calendar/de_AT.ISO_8859-15/calendar.feiertag OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.all OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.feiertag OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.geschichte OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.kirche OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.literatur OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.musik OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.wissenschaft OLD_FILES+=usr/share/calendar/de_DE.ISO8859-15 OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.all OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.fetes OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.french OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.jferies OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.proverbes OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-15 OLD_FILES+=usr/share/calendar/hr_HR.ISO8859-2/calendar.all OLD_FILES+=usr/share/calendar/hr_HR.ISO8859-2/calendar.praznici OLD_FILES+=usr/share/calendar/hu_HU.ISO8859-2/calendar.all OLD_FILES+=usr/share/calendar/hu_HU.ISO8859-2/calendar.nevnapok OLD_FILES+=usr/share/calendar/hu_HU.ISO8859-2/calendar.unnepek OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.all OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.common OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.holiday OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.military OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.msk OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.orthodox OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.pagan OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.all OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.holiday OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.misc OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.orthodox OLD_FILES+=usr/share/man/man1/calendar.1.gz .endif .if ${MK_CDDL} == no OLD_LIBS+=lib/libavl.so.2 OLD_LIBS+=lib/libctf.so.2 OLD_LIBS+=lib/libdtrace.so.2 OLD_LIBS+=lib/libnvpair.so.2 OLD_LIBS+=lib/libumem.so.2 OLD_LIBS+=lib/libuutil.so.2 OLD_FILES+=usr/bin/ctfconvert OLD_FILES+=usr/bin/ctfdump OLD_FILES+=usr/bin/ctfmerge OLD_FILES+=usr/bin/sgsmsg OLD_FILES+=usr/lib/dtrace/drti.o OLD_FILES+=usr/lib/dtrace/errno.d OLD_FILES+=usr/lib/dtrace/psinfo.d OLD_FILES+=usr/lib/dtrace/signal.d OLD_FILES+=usr/lib/dtrace/unistd.d OLD_FILES+=usr/lib/libavl.a OLD_FILES+=usr/lib/libavl.so OLD_FILES+=usr/lib/libavl_p.a OLD_FILES+=usr/lib/libctf.a OLD_FILES+=usr/lib/libctf.so OLD_FILES+=usr/lib/libctf_p.a OLD_FILES+=usr/lib/libdtrace.a OLD_FILES+=usr/lib/libdtrace.so OLD_FILES+=usr/lib/libdtrace_p.a OLD_FILES+=usr/lib/libnvpair.a OLD_FILES+=usr/lib/libnvpair.so OLD_FILES+=usr/lib/libnvpair_p.a OLD_FILES+=usr/lib/libumem.a OLD_FILES+=usr/lib/libumem.so OLD_FILES+=usr/lib/libumem_p.a OLD_FILES+=usr/lib/libuutil.a OLD_FILES+=usr/lib/libuutil.so OLD_FILES+=usr/lib/libuutil_p.a .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64" OLD_FILES+=usr/lib32/dtrace/drti.o OLD_FILES+=usr/lib32/libavl.a OLD_FILES+=usr/lib32/libavl.so OLD_LIBS+=usr/lib32/libavl.so.2 OLD_FILES+=usr/lib32/libavl_p.a OLD_FILES+=usr/lib32/libctf.a OLD_FILES+=usr/lib32/libctf.so OLD_LIBS+=usr/lib32/libctf.so.2 OLD_FILES+=usr/lib32/libctf_p.a OLD_FILES+=usr/lib32/libdtrace.a OLD_FILES+=usr/lib32/libdtrace.so OLD_LIBS+=usr/lib32/libdtrace.so.2 OLD_FILES+=usr/lib32/libdtrace_p.a OLD_FILES+=usr/lib32/libnvpair.a OLD_FILES+=usr/lib32/libnvpair.so OLD_LIBS+=usr/lib32/libnvpair.so.2 OLD_FILES+=usr/lib32/libnvpair_p.a OLD_FILES+=usr/lib32/libumem.a OLD_FILES+=usr/lib32/libumem.so OLD_LIBS+=usr/lib32/libumem.so.2 OLD_FILES+=usr/lib32/libumem_p.a OLD_FILES+=usr/lib32/libuutil.a OLD_FILES+=usr/lib32/libuutil.so OLD_LIBS+=usr/lib32/libuutil.so.2 OLD_FILES+=usr/lib32/libuutil_p.a .endif OLD_FILES+=usr/sbin/dtrace OLD_FILES+=usr/sbin/lockstat OLD_FILES+=usr/share/man/man1/dtrace.1.gz .endif .if ${MK_ZFS} == no OLD_FILES+=boot/gptzfsboot OLD_FILES+=boot/zfsboot OLD_FILES+=boot/zfsloader OLD_FILES+=etc/periodic/daily/404.status-zfs OLD_FILES+=etc/periodic/daily/800.scrub-zfs OLD_LIBS+=lib/libzfs.so.2 OLD_LIBS+=lib/libzpool.so.2 OLD_FILES+=rescue/zfs OLD_FILES+=rescue/zpool OLD_FILES+=sbin/zfs OLD_FILES+=sbin/zpool OLD_FILES+=usr/bin/zinject OLD_FILES+=usr/bin/ztest OLD_FILES+=usr/lib/libzfs.a OLD_FILES+=usr/lib/libzfs.so OLD_FILES+=usr/lib/libzfs_p.a OLD_FILES+=usr/lib/libzpool.a OLD_FILES+=usr/lib/libzpool.so .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64" OLD_FILES+=usr/lib32/libzfs.a OLD_FILES+=usr/lib32/libzfs.so OLD_LIBS+=usr/lib32/libzfs.so.2 OLD_FILES+=usr/lib32/libzfs_p.a OLD_FILES+=usr/lib32/libzpool.a OLD_FILES+=usr/lib32/libzpool.so OLD_LIBS+=usr/lib32/libzpool.so.2 .endif OLD_FILES+=usr/sbin/zdb OLD_FILES+=usr/share/man/man8/zdb.8.gz OLD_FILES+=usr/share/man/man8/zfs.8.gz OLD_FILES+=usr/share/man/man8/zpool.8.gz .endif .if ${MK_CLANG} == no OLD_FILES+=usr/bin/clang OLD_FILES+=usr/bin/clang++ OLD_FILES+=usr/bin/clang-cpp OLD_FILES+=usr/bin/clang-tblgen OLD_FILES+=usr/bin/tblgen OLD_FILES+=usr/include/clang/2.8/emmintrin.h OLD_FILES+=usr/include/clang/2.8/mm_malloc.h OLD_FILES+=usr/include/clang/2.8/mmintrin.h OLD_FILES+=usr/include/clang/2.8/pmmintrin.h OLD_FILES+=usr/include/clang/2.8/tmmintrin.h OLD_FILES+=usr/include/clang/2.8/xmmintrin.h OLD_DIRS+=usr/include/clang/2.8 OLD_FILES+=usr/include/clang/2.9/emmintrin.h OLD_FILES+=usr/include/clang/2.9/mm_malloc.h OLD_FILES+=usr/include/clang/2.9/mmintrin.h OLD_FILES+=usr/include/clang/2.9/pmmintrin.h OLD_FILES+=usr/include/clang/2.9/tmmintrin.h OLD_FILES+=usr/include/clang/2.9/xmmintrin.h OLD_DIRS+=usr/include/clang/2.9 OLD_FILES+=usr/include/clang/3.0/altivec.h OLD_FILES+=usr/include/clang/3.0/avxintrin.h OLD_FILES+=usr/include/clang/3.0/emmintrin.h OLD_FILES+=usr/include/clang/3.0/immintrin.h OLD_FILES+=usr/include/clang/3.0/mm3dnow.h OLD_FILES+=usr/include/clang/3.0/mm_malloc.h OLD_FILES+=usr/include/clang/3.0/mmintrin.h OLD_FILES+=usr/include/clang/3.0/nmmintrin.h OLD_FILES+=usr/include/clang/3.0/pmmintrin.h OLD_FILES+=usr/include/clang/3.0/smmintrin.h OLD_FILES+=usr/include/clang/3.0/tmmintrin.h OLD_FILES+=usr/include/clang/3.0/wmmintrin.h OLD_FILES+=usr/include/clang/3.0/x86intrin.h OLD_FILES+=usr/include/clang/3.0/xmmintrin.h OLD_DIRS+=usr/include/clang/3.0 OLD_DIRS+=usr/include/clang OLD_FILES+=usr/share/doc/llvm/clang/LICENSE.TXT OLD_DIRS+=usr/share/doc/llvm/clang OLD_FILES+=usr/share/doc/llvm/COPYRIGHT.regex OLD_FILES+=usr/share/doc/llvm/LICENSE.TXT OLD_DIRS+=usr/share/doc/llvm OLD_FILES+=usr/share/man/man1/clang.1.gz OLD_FILES+=usr/share/man/man1/clang++.1.gz OLD_FILES+=usr/share/man/man1/clang-cpp.1.gz OLD_FILES+=usr/share/man/man1/tblgen.1.gz .endif .if ${MK_CLANG_EXTRAS} == no OLD_FILES+=usr/bin/bugpoint OLD_FILES+=usr/bin/llc OLD_FILES+=usr/bin/lli OLD_FILES+=usr/bin/llvm-ar OLD_FILES+=usr/bin/llvm-as OLD_FILES+=usr/bin/llvm-bcanalyzer OLD_FILES+=usr/bin/llvm-diff OLD_FILES+=usr/bin/llvm-dis OLD_FILES+=usr/bin/llvm-extract OLD_FILES+=usr/bin/llvm-ld OLD_FILES+=usr/bin/llvm-link OLD_FILES+=usr/bin/llvm-mc OLD_FILES+=usr/bin/llvm-nm OLD_FILES+=usr/bin/llvm-objdump OLD_FILES+=usr/bin/llvm-prof OLD_FILES+=usr/bin/llvm-ranlib OLD_FILES+=usr/bin/llvm-rtdyld OLD_FILES+=usr/bin/llvm-stub OLD_FILES+=usr/bin/macho-dump OLD_FILES+=usr/bin/opt OLD_FILES+=usr/share/man/man1/bugpoint.1.gz OLD_FILES+=usr/share/man/man1/llc.1.gz OLD_FILES+=usr/share/man/man1/lli.1.gz OLD_FILES+=usr/share/man/man1/llvm-ar.1.gz OLD_FILES+=usr/share/man/man1/llvm-as.1.gz OLD_FILES+=usr/share/man/man1/llvm-bcanalyzer.1.gz OLD_FILES+=usr/share/man/man1/llvm-diff.1.gz OLD_FILES+=usr/share/man/man1/llvm-dis.1.gz OLD_FILES+=usr/share/man/man1/llvm-extract.1.gz OLD_FILES+=usr/share/man/man1/llvm-ld.1.gz OLD_FILES+=usr/share/man/man1/llvm-link.1.gz OLD_FILES+=usr/share/man/man1/llvm-nm.1.gz OLD_FILES+=usr/share/man/man1/llvm-prof.1.gz OLD_FILES+=usr/share/man/man1/llvm-ranlib.1.gz OLD_FILES+=usr/share/man/man1/opt.1.gz .endif .if ${MK_CPP} == no OLD_FILES+=usr/bin/cpp OLD_FILES+=usr/share/man/man1/cpp.1.gz .endif #.if ${MK_CRYPT} == no # to be filled in #.endif .if ${MK_CTM} == no OLD_FILES+=usr/sbin/ctm OLD_FILES+=usr/sbin/ctm_dequeue OLD_FILES+=usr/sbin/ctm_rmail OLD_FILES+=usr/sbin/ctm_smail OLD_FILES+=usr/share/man/man1/ctm.1.gz OLD_FILES+=usr/share/man/man1/ctm_dequeue.1.gz OLD_FILES+=usr/share/man/man1/ctm_rmail.1.gz OLD_FILES+=usr/share/man/man1/ctm_smail.1.gz OLD_FILES+=usr/share/man/man5/ctm.5.gz .endif .if ${MK_CVS} == no OLD_FILES+=usr/bin/cvs OLD_FILES+=usr/bin/cvsbug OLD_FILES+=usr/share/examples/cvs/contrib/README OLD_FILES+=usr/share/examples/cvs/contrib/clmerge OLD_FILES+=usr/share/examples/cvs/contrib/cln_hist OLD_FILES+=usr/share/examples/cvs/contrib/commit_prep OLD_FILES+=usr/share/examples/cvs/contrib/cvs2vendor OLD_FILES+=usr/share/examples/cvs/contrib/cvs_acls OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck.man OLD_FILES+=usr/share/examples/cvs/contrib/cvshelp.man OLD_FILES+=usr/share/examples/cvs/contrib/descend.man OLD_FILES+=usr/share/examples/cvs/contrib/easy-import OLD_FILES+=usr/share/examples/cvs/contrib/intro.doc OLD_FILES+=usr/share/examples/cvs/contrib/log OLD_FILES+=usr/share/examples/cvs/contrib/log_accum OLD_FILES+=usr/share/examples/cvs/contrib/mfpipe OLD_FILES+=usr/share/examples/cvs/contrib/rcs-to-cvs OLD_FILES+=usr/share/examples/cvs/contrib/rcs2log OLD_FILES+=usr/share/examples/cvs/contrib/rcslock OLD_FILES+=usr/share/examples/cvs/contrib/sccs2rcs OLD_FILES+=usr/share/info/cvs.info.gz OLD_FILES+=usr/share/info/cvsclient.info.gz OLD_FILES+=usr/share/man/man1/cvs.1.gz OLD_FILES+=usr/share/man/man5/cvs.5.gz OLD_FILES+=usr/share/man/man8/cvsbug.8.gz .endif # devd(8) and gperf(1) not listed here on purpose .if ${MK_CXX} == no OLD_FILES+=usr/bin/CC OLD_FILES+=usr/bin/c++ OLD_FILES+=usr/bin/c++filt OLD_FILES+=usr/bin/g++ OLD_FILES+=usr/include/c++/4.2/algorithm OLD_FILES+=usr/include/c++/4.2/backward/algo.h OLD_FILES+=usr/include/c++/4.2/backward/algobase.h OLD_FILES+=usr/include/c++/4.2/backward/alloc.h OLD_FILES+=usr/include/c++/4.2/backward/backward_warning.h OLD_FILES+=usr/include/c++/4.2/backward/bvector.h OLD_FILES+=usr/include/c++/4.2/backward/complex.h OLD_FILES+=usr/include/c++/4.2/backward/defalloc.h OLD_FILES+=usr/include/c++/4.2/backward/deque.h OLD_FILES+=usr/include/c++/4.2/backward/fstream.h OLD_FILES+=usr/include/c++/4.2/backward/function.h OLD_FILES+=usr/include/c++/4.2/backward/hash_map.h OLD_FILES+=usr/include/c++/4.2/backward/hash_set.h OLD_FILES+=usr/include/c++/4.2/backward/hashtable.h OLD_FILES+=usr/include/c++/4.2/backward/heap.h OLD_FILES+=usr/include/c++/4.2/backward/iomanip.h OLD_FILES+=usr/include/c++/4.2/backward/iostream.h OLD_FILES+=usr/include/c++/4.2/backward/istream.h OLD_FILES+=usr/include/c++/4.2/backward/iterator.h OLD_FILES+=usr/include/c++/4.2/backward/list.h OLD_FILES+=usr/include/c++/4.2/backward/map.h OLD_FILES+=usr/include/c++/4.2/backward/multimap.h OLD_FILES+=usr/include/c++/4.2/backward/multiset.h OLD_FILES+=usr/include/c++/4.2/backward/new.h OLD_FILES+=usr/include/c++/4.2/backward/ostream.h OLD_FILES+=usr/include/c++/4.2/backward/pair.h OLD_FILES+=usr/include/c++/4.2/backward/queue.h OLD_FILES+=usr/include/c++/4.2/backward/rope.h OLD_FILES+=usr/include/c++/4.2/backward/set.h OLD_FILES+=usr/include/c++/4.2/backward/slist.h OLD_FILES+=usr/include/c++/4.2/backward/stack.h OLD_FILES+=usr/include/c++/4.2/backward/stream.h OLD_FILES+=usr/include/c++/4.2/backward/streambuf.h OLD_FILES+=usr/include/c++/4.2/backward/strstream OLD_FILES+=usr/include/c++/4.2/backward/tempbuf.h OLD_FILES+=usr/include/c++/4.2/backward/tree.h OLD_FILES+=usr/include/c++/4.2/backward/vector.h OLD_FILES+=usr/include/c++/4.2/bits/allocator.h OLD_FILES+=usr/include/c++/4.2/bits/atomic_word.h OLD_FILES+=usr/include/c++/4.2/bits/basic_file.h OLD_FILES+=usr/include/c++/4.2/bits/basic_ios.h OLD_FILES+=usr/include/c++/4.2/bits/basic_ios.tcc OLD_FILES+=usr/include/c++/4.2/bits/basic_string.h OLD_FILES+=usr/include/c++/4.2/bits/basic_string.tcc OLD_FILES+=usr/include/c++/4.2/bits/boost_concept_check.h OLD_FILES+=usr/include/c++/4.2/bits/c++allocator.h OLD_FILES+=usr/include/c++/4.2/bits/c++config.h OLD_FILES+=usr/include/c++/4.2/bits/c++io.h OLD_FILES+=usr/include/c++/4.2/bits/c++locale.h OLD_FILES+=usr/include/c++/4.2/bits/c++locale_internal.h OLD_FILES+=usr/include/c++/4.2/bits/char_traits.h OLD_FILES+=usr/include/c++/4.2/bits/cmath.tcc OLD_FILES+=usr/include/c++/4.2/bits/codecvt.h OLD_FILES+=usr/include/c++/4.2/bits/compatibility.h OLD_FILES+=usr/include/c++/4.2/bits/concept_check.h OLD_FILES+=usr/include/c++/4.2/bits/cpp_type_traits.h OLD_FILES+=usr/include/c++/4.2/bits/cpu_defines.h OLD_FILES+=usr/include/c++/4.2/bits/ctype_base.h OLD_FILES+=usr/include/c++/4.2/bits/ctype_inline.h OLD_FILES+=usr/include/c++/4.2/bits/ctype_noninline.h OLD_FILES+=usr/include/c++/4.2/bits/cxxabi_tweaks.h OLD_FILES+=usr/include/c++/4.2/bits/deque.tcc OLD_FILES+=usr/include/c++/4.2/bits/fstream.tcc OLD_FILES+=usr/include/c++/4.2/bits/functexcept.h OLD_FILES+=usr/include/c++/4.2/bits/gslice.h OLD_FILES+=usr/include/c++/4.2/bits/gslice_array.h OLD_FILES+=usr/include/c++/4.2/bits/gthr-default.h OLD_FILES+=usr/include/c++/4.2/bits/gthr-posix.h OLD_FILES+=usr/include/c++/4.2/bits/gthr-single.h OLD_FILES+=usr/include/c++/4.2/bits/gthr-tpf.h OLD_FILES+=usr/include/c++/4.2/bits/gthr.h OLD_FILES+=usr/include/c++/4.2/bits/indirect_array.h OLD_FILES+=usr/include/c++/4.2/bits/ios_base.h OLD_FILES+=usr/include/c++/4.2/bits/istream.tcc OLD_FILES+=usr/include/c++/4.2/bits/list.tcc OLD_FILES+=usr/include/c++/4.2/bits/locale_classes.h OLD_FILES+=usr/include/c++/4.2/bits/locale_facets.h OLD_FILES+=usr/include/c++/4.2/bits/locale_facets.tcc OLD_FILES+=usr/include/c++/4.2/bits/localefwd.h OLD_FILES+=usr/include/c++/4.2/bits/mask_array.h OLD_FILES+=usr/include/c++/4.2/bits/messages_members.h OLD_FILES+=usr/include/c++/4.2/bits/os_defines.h OLD_FILES+=usr/include/c++/4.2/bits/ostream.tcc OLD_FILES+=usr/include/c++/4.2/bits/ostream_insert.h OLD_FILES+=usr/include/c++/4.2/bits/postypes.h OLD_FILES+=usr/include/c++/4.2/bits/slice_array.h OLD_FILES+=usr/include/c++/4.2/bits/sstream.tcc OLD_FILES+=usr/include/c++/4.2/bits/stl_algo.h OLD_FILES+=usr/include/c++/4.2/bits/stl_algobase.h OLD_FILES+=usr/include/c++/4.2/bits/stl_bvector.h OLD_FILES+=usr/include/c++/4.2/bits/stl_construct.h OLD_FILES+=usr/include/c++/4.2/bits/stl_deque.h OLD_FILES+=usr/include/c++/4.2/bits/stl_function.h OLD_FILES+=usr/include/c++/4.2/bits/stl_heap.h OLD_FILES+=usr/include/c++/4.2/bits/stl_iterator.h OLD_FILES+=usr/include/c++/4.2/bits/stl_iterator_base_funcs.h OLD_FILES+=usr/include/c++/4.2/bits/stl_iterator_base_types.h OLD_FILES+=usr/include/c++/4.2/bits/stl_list.h OLD_FILES+=usr/include/c++/4.2/bits/stl_map.h OLD_FILES+=usr/include/c++/4.2/bits/stl_multimap.h OLD_FILES+=usr/include/c++/4.2/bits/stl_multiset.h OLD_FILES+=usr/include/c++/4.2/bits/stl_numeric.h OLD_FILES+=usr/include/c++/4.2/bits/stl_pair.h OLD_FILES+=usr/include/c++/4.2/bits/stl_queue.h OLD_FILES+=usr/include/c++/4.2/bits/stl_raw_storage_iter.h OLD_FILES+=usr/include/c++/4.2/bits/stl_relops.h OLD_FILES+=usr/include/c++/4.2/bits/stl_set.h OLD_FILES+=usr/include/c++/4.2/bits/stl_stack.h OLD_FILES+=usr/include/c++/4.2/bits/stl_tempbuf.h OLD_FILES+=usr/include/c++/4.2/bits/stl_tree.h OLD_FILES+=usr/include/c++/4.2/bits/stl_uninitialized.h OLD_FILES+=usr/include/c++/4.2/bits/stl_vector.h OLD_FILES+=usr/include/c++/4.2/bits/stream_iterator.h OLD_FILES+=usr/include/c++/4.2/bits/streambuf.tcc OLD_FILES+=usr/include/c++/4.2/bits/streambuf_iterator.h OLD_FILES+=usr/include/c++/4.2/bits/stringfwd.h OLD_FILES+=usr/include/c++/4.2/bits/time_members.h OLD_FILES+=usr/include/c++/4.2/bits/valarray_after.h OLD_FILES+=usr/include/c++/4.2/bits/valarray_array.h OLD_FILES+=usr/include/c++/4.2/bits/valarray_array.tcc OLD_FILES+=usr/include/c++/4.2/bits/valarray_before.h OLD_FILES+=usr/include/c++/4.2/bits/vector.tcc OLD_FILES+=usr/include/c++/4.2/bitset OLD_FILES+=usr/include/c++/4.2/cassert OLD_FILES+=usr/include/c++/4.2/cctype OLD_FILES+=usr/include/c++/4.2/cerrno OLD_FILES+=usr/include/c++/4.2/cfloat OLD_FILES+=usr/include/c++/4.2/ciso646 OLD_FILES+=usr/include/c++/4.2/climits OLD_FILES+=usr/include/c++/4.2/clocale OLD_FILES+=usr/include/c++/4.2/cmath OLD_FILES+=usr/include/c++/4.2/complex OLD_FILES+=usr/include/c++/4.2/csetjmp OLD_FILES+=usr/include/c++/4.2/csignal OLD_FILES+=usr/include/c++/4.2/cstdarg OLD_FILES+=usr/include/c++/4.2/cstddef OLD_FILES+=usr/include/c++/4.2/cstdio OLD_FILES+=usr/include/c++/4.2/cstdlib OLD_FILES+=usr/include/c++/4.2/cstring OLD_FILES+=usr/include/c++/4.2/ctime OLD_FILES+=usr/include/c++/4.2/cwchar OLD_FILES+=usr/include/c++/4.2/cwctype OLD_FILES+=usr/include/c++/4.2/cxxabi.h OLD_FILES+=usr/include/c++/4.2/debug/bitset OLD_FILES+=usr/include/c++/4.2/debug/debug.h OLD_FILES+=usr/include/c++/4.2/debug/deque OLD_FILES+=usr/include/c++/4.2/debug/formatter.h OLD_FILES+=usr/include/c++/4.2/debug/functions.h OLD_FILES+=usr/include/c++/4.2/debug/hash_map OLD_FILES+=usr/include/c++/4.2/debug/hash_map.h OLD_FILES+=usr/include/c++/4.2/debug/hash_multimap.h OLD_FILES+=usr/include/c++/4.2/debug/hash_multiset.h OLD_FILES+=usr/include/c++/4.2/debug/hash_set OLD_FILES+=usr/include/c++/4.2/debug/hash_set.h OLD_FILES+=usr/include/c++/4.2/debug/list OLD_FILES+=usr/include/c++/4.2/debug/macros.h OLD_FILES+=usr/include/c++/4.2/debug/map OLD_FILES+=usr/include/c++/4.2/debug/map.h OLD_FILES+=usr/include/c++/4.2/debug/multimap.h OLD_FILES+=usr/include/c++/4.2/debug/multiset.h OLD_FILES+=usr/include/c++/4.2/debug/safe_base.h OLD_FILES+=usr/include/c++/4.2/debug/safe_iterator.h OLD_FILES+=usr/include/c++/4.2/debug/safe_iterator.tcc OLD_FILES+=usr/include/c++/4.2/debug/safe_sequence.h OLD_FILES+=usr/include/c++/4.2/debug/set OLD_FILES+=usr/include/c++/4.2/debug/set.h OLD_FILES+=usr/include/c++/4.2/debug/string OLD_FILES+=usr/include/c++/4.2/debug/vector OLD_FILES+=usr/include/c++/4.2/deque OLD_FILES+=usr/include/c++/4.2/exception OLD_FILES+=usr/include/c++/4.2/exception_defines.h OLD_FILES+=usr/include/c++/4.2/ext/algorithm OLD_FILES+=usr/include/c++/4.2/ext/array_allocator.h OLD_FILES+=usr/include/c++/4.2/ext/atomicity.h OLD_FILES+=usr/include/c++/4.2/ext/bitmap_allocator.h OLD_FILES+=usr/include/c++/4.2/ext/codecvt_specializations.h OLD_FILES+=usr/include/c++/4.2/ext/concurrence.h OLD_FILES+=usr/include/c++/4.2/ext/debug_allocator.h OLD_FILES+=usr/include/c++/4.2/ext/functional OLD_FILES+=usr/include/c++/4.2/ext/hash_fun.h OLD_FILES+=usr/include/c++/4.2/ext/hash_map OLD_FILES+=usr/include/c++/4.2/ext/hash_set OLD_FILES+=usr/include/c++/4.2/ext/hashtable.h OLD_FILES+=usr/include/c++/4.2/ext/iterator OLD_FILES+=usr/include/c++/4.2/ext/malloc_allocator.h OLD_FILES+=usr/include/c++/4.2/ext/memory OLD_FILES+=usr/include/c++/4.2/ext/mt_allocator.h OLD_FILES+=usr/include/c++/4.2/ext/new_allocator.h OLD_FILES+=usr/include/c++/4.2/ext/numeric OLD_FILES+=usr/include/c++/4.2/ext/numeric_traits.h OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/assoc_container.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy/basic_tree_policy_base.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy/null_node_metadata.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy/traits.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_types.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/bin_search_tree_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/cond_dtor_entry_dealtor.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/cond_key_dtor_entry_dealtor.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/find_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/info_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/iterators_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/node_iterators.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/point_iterators.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/policy_access_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/r_erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/rotate_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/split_join_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/traits.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/binary_heap_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/const_iterator.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/const_point_iterator.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/entry_cmp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/entry_pred.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/find_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/info_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/iterators_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/policy_access_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/resize_policy.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/split_join_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/trace_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_/binomial_heap_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/binomial_heap_base_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/find_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/split_join_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/cc_ht_map_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/cmp_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/cond_key_dtor_entry_dealtor.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/constructor_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/constructor_destructor_no_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/constructor_destructor_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/debug_no_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/debug_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/entry_list_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/erase_no_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/erase_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/find_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/find_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/info_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/insert_no_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/insert_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/iterators_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/policy_access_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/resize_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/resize_no_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/resize_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/size_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/standard_policies.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/trace_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cond_dealtor.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/container_base_dispatch.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/eq_fn/eq_by_less.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/eq_fn/hash_eq_fn.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/constructor_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/constructor_destructor_no_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/constructor_destructor_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/debug_no_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/debug_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/erase_no_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/erase_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/find_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/find_no_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/find_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/gp_ht_map_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/info_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/insert_no_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/insert_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/iterator_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/policy_access_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/resize_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/resize_no_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/resize_store_hash_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/standard_policies.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/trace_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/direct_mask_range_hashing_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/direct_mod_range_hashing_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/linear_probe_fn_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/mask_based_range_hashing.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/mod_based_range_hashing.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/probe_fn_base.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/quadratic_probe_fn_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/ranged_hash_fn.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/ranged_probe_fn.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_probe_fn.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_range_hashing.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_ranged_hash_fn.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_ranged_probe_fn.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/const_iterator.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/const_point_iterator.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/info_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/iterators_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/left_child_next_sibling_heap_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/node.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/null_metadata.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/policy_access_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/trace_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/constructor_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/entry_metadata_base.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/find_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/info_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/iterators_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/lu_map_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/trace_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/counter_lu_metadata.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/counter_lu_policy_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/mtf_lu_policy_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/sample_update_policy.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/map_debug_base.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/cond_dtor.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/info_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/iterators_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/node_iterators.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/ov_tree_map_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/policy_access_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/split_join_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/traits.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/find_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/pairing_heap_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/split_join_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/child_iterator.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/cond_dtor_entry_dealtor.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/const_child_iterator.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/find_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/head.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/info_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/insert_join_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/internal_node.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/iterators_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/leaf.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/node_base.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/node_iterators.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/node_metadata_base.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/pat_trie_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/point_iterators.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/policy_access_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/r_erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/rotate_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/split_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/split_join_branch_bag.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/synth_e_access_traits.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/trace_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/traits.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/update_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/priority_queue_base_dispatch.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/find_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/info_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/node.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/rb_tree_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/split_join_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/traits.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/rc.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/rc_binomial_heap_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/split_join_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/trace_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/cc_hash_max_collision_check_resize_trigger_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_exponential_size_policy_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_load_check_resize_trigger_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_load_check_resize_trigger_size_base.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_prime_size_policy_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_standard_resize_policy_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/sample_resize_policy.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/sample_resize_trigger.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/sample_size_policy.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/find_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/info_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/node.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/splay_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/splay_tree_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/split_join_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/traits.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/standard_policies.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/constructors_destructor_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/debug_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/erase_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/find_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/insert_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/split_join_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/thin_heap_.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/trace_fn_imps.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/node_metadata_selector.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/null_node_update_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/order_statistics_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/sample_tree_node_update.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_trace_base.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/node_metadata_selector.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/null_node_update_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/order_statistics_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/prefix_search_node_update_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/sample_trie_e_access_traits.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/sample_trie_node_update.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/string_trie_e_access_traits_imp.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/trie_policy_base.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/type_utils.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/types_traits.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/const_iterator.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/const_point_iterator.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/iterator.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/point_iterator.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/exception.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/hash_policy.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/list_update_policy.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/priority_queue.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/tag_and_trait.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/tree_policy.hpp OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/trie_policy.hpp OLD_FILES+=usr/include/c++/4.2/ext/pod_char_traits.h OLD_FILES+=usr/include/c++/4.2/ext/pool_allocator.h OLD_FILES+=usr/include/c++/4.2/ext/rb_tree OLD_FILES+=usr/include/c++/4.2/ext/rc_string_base.h OLD_FILES+=usr/include/c++/4.2/ext/rope OLD_FILES+=usr/include/c++/4.2/ext/ropeimpl.h OLD_FILES+=usr/include/c++/4.2/ext/slist OLD_FILES+=usr/include/c++/4.2/ext/sso_string_base.h OLD_FILES+=usr/include/c++/4.2/ext/stdio_filebuf.h OLD_FILES+=usr/include/c++/4.2/ext/stdio_sync_filebuf.h OLD_FILES+=usr/include/c++/4.2/ext/throw_allocator.h OLD_FILES+=usr/include/c++/4.2/ext/type_traits.h OLD_FILES+=usr/include/c++/4.2/ext/typelist.h OLD_FILES+=usr/include/c++/4.2/ext/vstring.h OLD_FILES+=usr/include/c++/4.2/ext/vstring.tcc OLD_FILES+=usr/include/c++/4.2/ext/vstring_fwd.h OLD_FILES+=usr/include/c++/4.2/ext/vstring_util.h OLD_FILES+=usr/include/c++/4.2/fstream OLD_FILES+=usr/include/c++/4.2/functional OLD_FILES+=usr/include/c++/4.2/iomanip OLD_FILES+=usr/include/c++/4.2/ios OLD_FILES+=usr/include/c++/4.2/iosfwd OLD_FILES+=usr/include/c++/4.2/iostream OLD_FILES+=usr/include/c++/4.2/istream OLD_FILES+=usr/include/c++/4.2/iterator OLD_FILES+=usr/include/c++/4.2/limits OLD_FILES+=usr/include/c++/4.2/list OLD_FILES+=usr/include/c++/4.2/locale OLD_FILES+=usr/include/c++/4.2/map OLD_FILES+=usr/include/c++/4.2/memory OLD_FILES+=usr/include/c++/4.2/new OLD_FILES+=usr/include/c++/4.2/numeric OLD_FILES+=usr/include/c++/4.2/ostream OLD_FILES+=usr/include/c++/4.2/queue OLD_FILES+=usr/include/c++/4.2/set OLD_FILES+=usr/include/c++/4.2/sstream OLD_FILES+=usr/include/c++/4.2/stack OLD_FILES+=usr/include/c++/4.2/stdexcept OLD_FILES+=usr/include/c++/4.2/streambuf OLD_FILES+=usr/include/c++/4.2/string OLD_FILES+=usr/include/c++/4.2/tr1/array OLD_FILES+=usr/include/c++/4.2/tr1/bind_iterate.h OLD_FILES+=usr/include/c++/4.2/tr1/bind_repeat.h OLD_FILES+=usr/include/c++/4.2/tr1/boost_shared_ptr.h OLD_FILES+=usr/include/c++/4.2/tr1/cctype OLD_FILES+=usr/include/c++/4.2/tr1/cfenv OLD_FILES+=usr/include/c++/4.2/tr1/cfloat OLD_FILES+=usr/include/c++/4.2/tr1/cinttypes OLD_FILES+=usr/include/c++/4.2/tr1/climits OLD_FILES+=usr/include/c++/4.2/tr1/cmath OLD_FILES+=usr/include/c++/4.2/tr1/common.h OLD_FILES+=usr/include/c++/4.2/tr1/complex OLD_FILES+=usr/include/c++/4.2/tr1/cstdarg OLD_FILES+=usr/include/c++/4.2/tr1/cstdbool OLD_FILES+=usr/include/c++/4.2/tr1/cstdint OLD_FILES+=usr/include/c++/4.2/tr1/cstdio OLD_FILES+=usr/include/c++/4.2/tr1/cstdlib OLD_FILES+=usr/include/c++/4.2/tr1/ctgmath OLD_FILES+=usr/include/c++/4.2/tr1/ctime OLD_FILES+=usr/include/c++/4.2/tr1/ctype.h OLD_FILES+=usr/include/c++/4.2/tr1/cwchar OLD_FILES+=usr/include/c++/4.2/tr1/cwctype OLD_FILES+=usr/include/c++/4.2/tr1/fenv.h OLD_FILES+=usr/include/c++/4.2/tr1/float.h OLD_FILES+=usr/include/c++/4.2/tr1/functional OLD_FILES+=usr/include/c++/4.2/tr1/functional_hash.h OLD_FILES+=usr/include/c++/4.2/tr1/functional_iterate.h OLD_FILES+=usr/include/c++/4.2/tr1/hashtable OLD_FILES+=usr/include/c++/4.2/tr1/hashtable_policy.h OLD_FILES+=usr/include/c++/4.2/tr1/inttypes.h OLD_FILES+=usr/include/c++/4.2/tr1/limits.h OLD_FILES+=usr/include/c++/4.2/tr1/math.h OLD_FILES+=usr/include/c++/4.2/tr1/memory OLD_FILES+=usr/include/c++/4.2/tr1/mu_iterate.h OLD_FILES+=usr/include/c++/4.2/tr1/random OLD_FILES+=usr/include/c++/4.2/tr1/random.tcc OLD_FILES+=usr/include/c++/4.2/tr1/ref_fwd.h OLD_FILES+=usr/include/c++/4.2/tr1/ref_wrap_iterate.h OLD_FILES+=usr/include/c++/4.2/tr1/repeat.h OLD_FILES+=usr/include/c++/4.2/tr1/stdarg.h OLD_FILES+=usr/include/c++/4.2/tr1/stdbool.h OLD_FILES+=usr/include/c++/4.2/tr1/stdint.h OLD_FILES+=usr/include/c++/4.2/tr1/stdio.h OLD_FILES+=usr/include/c++/4.2/tr1/stdlib.h OLD_FILES+=usr/include/c++/4.2/tr1/tgmath.h OLD_FILES+=usr/include/c++/4.2/tr1/tuple OLD_FILES+=usr/include/c++/4.2/tr1/tuple_defs.h OLD_FILES+=usr/include/c++/4.2/tr1/tuple_iterate.h OLD_FILES+=usr/include/c++/4.2/tr1/type_traits OLD_FILES+=usr/include/c++/4.2/tr1/type_traits_fwd.h OLD_FILES+=usr/include/c++/4.2/tr1/unordered_map OLD_FILES+=usr/include/c++/4.2/tr1/unordered_set OLD_FILES+=usr/include/c++/4.2/tr1/utility OLD_FILES+=usr/include/c++/4.2/tr1/wchar.h OLD_FILES+=usr/include/c++/4.2/tr1/wctype.h OLD_FILES+=usr/include/c++/4.2/typeinfo OLD_FILES+=usr/include/c++/4.2/utility OLD_FILES+=usr/include/c++/4.2/valarray OLD_FILES+=usr/include/c++/4.2/vector OLD_FILES+=usr/lib/libstdc++.a # Keep libs to allow bootstrapping g++(1) with gperf(1) #OLD_LIBS+=usr/lib/libstdc++.so #OLD_LIBS+=usr/lib/libstdc++.so.6 OLD_FILES+=usr/lib/libstdc++_p.a OLD_FILES+=usr/lib/libsupc++.a OLD_FILES+=usr/lib/libsupc++_p.a OLD_FILES+=usr/libexec/cc1plus .endif .if ${MK_DICT} == no OLD_FILES+=usr/share/dict/README OLD_FILES+=usr/share/dict/freebsd OLD_FILES+=usr/share/dict/propernames OLD_FILES+=usr/share/dict/web2 OLD_FILES+=usr/share/dict/web2a OLD_FILES+=usr/share/dict/words .endif #.if ${MK_EXAMPLES} == no # to be filled in #.endif .if ${MK_FLOPPY} == no OLD_FILES+=usr/sbin/fdcontrol OLD_FILES+=usr/sbin/fdformat OLD_FILES+=usr/sbin/fdread OLD_FILES+=usr/sbin/fdwrite OLD_FILES+=usr/share/man/man1/fdformat.1.gz OLD_FILES+=usr/share/man/man1/fdread.1.gz OLD_FILES+=usr/share/man/man1/fdwrite.1.gz OLD_FILES+=usr/share/man/man8/fdcontrol.8.gz .endif .if ${MK_FREEBSD_UPDATE} == no OLD_FILES+=etc/freebsd-update.conf OLD_FILES+=usr/sbin/freebsd-update OLD_FILES+=usr/share/examples/etc/freebsd-update.conf OLD_FILES+=usr/share/man/man8/freebsd-update.8.gz .endif .if ${MK_GAMES} == no OLD_FILES+=usr/games/bcd OLD_FILES+=usr/games/caesar OLD_FILES+=usr/games/factor OLD_FILES+=usr/games/fortune OLD_FILES+=usr/games/grdc OLD_FILES+=usr/games/morse OLD_FILES+=usr/games/number OLD_FILES+=usr/games/pom OLD_FILES+=usr/games/ppt OLD_FILES+=usr/games/primes OLD_FILES+=usr/games/random OLD_FILES+=usr/games/rot13 OLD_FILES+=usr/games/strfile OLD_FILES+=usr/games/unstr OLD_FILES+=usr/share/games/fortune/fortunes OLD_FILES+=usr/share/games/fortune/fortunes-o OLD_FILES+=usr/share/games/fortune/fortunes-o.dat OLD_FILES+=usr/share/games/fortune/fortunes.dat OLD_FILES+=usr/share/games/fortune/freebsd-tips OLD_FILES+=usr/share/games/fortune/freebsd-tips.dat OLD_FILES+=usr/share/games/fortune/gerrold.limerick OLD_FILES+=usr/share/games/fortune/gerrold.limerick.dat OLD_FILES+=usr/share/games/fortune/limerick OLD_FILES+=usr/share/games/fortune/limerick.dat OLD_FILES+=usr/share/games/fortune/murphy OLD_FILES+=usr/share/games/fortune/murphy-o OLD_FILES+=usr/share/games/fortune/murphy-o.dat OLD_FILES+=usr/share/games/fortune/murphy.dat OLD_FILES+=usr/share/games/fortune/startrek OLD_FILES+=usr/share/games/fortune/startrek.dat OLD_FILES+=usr/share/games/fortune/zippy OLD_FILES+=usr/share/games/fortune/zippy.dat OLD_FILES+=usr/share/man/man6/bcd.6.gz OLD_FILES+=usr/share/man/man6/caesar.6.gz OLD_FILES+=usr/share/man/man6/factor.6.gz OLD_FILES+=usr/share/man/man6/fortune.6.gz OLD_FILES+=usr/share/man/man6/grdc.6.gz OLD_FILES+=usr/share/man/man6/morse.6.gz OLD_FILES+=usr/share/man/man6/number.6.gz OLD_FILES+=usr/share/man/man6/pom.6.gz OLD_FILES+=usr/share/man/man6/ppt.6.gz OLD_FILES+=usr/share/man/man6/primes.6.gz OLD_FILES+=usr/share/man/man6/random.6.gz OLD_FILES+=usr/share/man/man6/rot13.6.gz OLD_FILES+=usr/share/man/man8/strfile.8.gz OLD_FILES+=usr/share/man/man8/unstr.8.gz .endif .if ${MK_GCC} == no OLD_FILES+=usr/bin/c++filt OLD_FILES+=usr/bin/g++ OLD_FILES+=usr/bin/gcc OLD_FILES+=usr/bin/gcov OLD_FILES+=usr/bin/gcpp .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/gcc/4.2/emmintrin.h OLD_FILES+=usr/include/gcc/4.2/mm_malloc.h OLD_FILES+=usr/include/gcc/4.2/mmintrin.h OLD_FILES+=usr/include/gcc/4.2/pmmintrin.h OLD_FILES+=usr/include/gcc/4.2/tmmintrin.h OLD_FILES+=usr/include/gcc/4.2/xmmintrin.h .elif ${TARGET_ARCH} == "ia64" OLD_FILES+=usr/include/gcc/4.2/ia64intrin.h .elif ${TARGET_ARCH} == "arm" OLD_FILES+=usr/include/gcc/4.2/mmintrin.h .elif ${TARGET_ARCH} == "powerpc" || ${TARGET_ARCH} == "powerpc64" OLD_FILES+=usr/include/gcc/4.2/altivec.h OLD_FILES+=usr/include/gcc/4.2/ppc-asm.h OLD_FILES+=usr/include/gcc/4.2/spe.h .endif OLD_DIRS+=usr/include/gcc/4.2 OLD_DIRS+=usr/include/gcc OLD_FILES+=usr/libexec/cc1 OLD_FILES+=usr/libexec/cc1plus OLD_FILES+=usr/share/info/cpp.info.gz OLD_FILES+=usr/share/info/cppinternals.info.gz OLD_FILES+=usr/share/info/gcc.info.gz OLD_FILES+=usr/share/info/gccint.info.gz OLD_FILES+=usr/share/man/man1/g++.1.gz OLD_FILES+=usr/share/man/man1/gcc.1.gz OLD_FILES+=usr/share/man/man1/gcov.1.gz OLD_FILES+=usr/share/man/man1/gcpp.1.gz .endif .if ${MK_GCOV} == no OLD_FILES+=usr/bin/gcov OLD_FILES+=usr/share/man/man1/gcov.1.gz .endif .if ${MK_GDB} == no OLD_FILES+=usr/bin/gdb OLD_FILES+=usr/bin/gdbserver OLD_FILES+=usr/bin/gdbtui OLD_FILES+=usr/bin/kgdb OLD_FILES+=usr/share/info/gdb.info.gz OLD_FILES+=usr/share/info/gdbint.info.gz OLD_FILES+=usr/share/info/stabs.info.gz OLD_FILES+=usr/share/man/man1/gdb.1.gz OLD_FILES+=usr/share/man/man1/gdbserver.1.gz OLD_FILES+=usr/share/man/man1/kgdb.1.gz .endif .if ${MK_GPIB} == no OLD_FILES+=usr/include/dev/ieee488/ibfoo_int.h OLD_FILES+=usr/include/dev/ieee488/ugpib.h OLD_FILES+=usr/include/dev/ieee488/upd7210.h OLD_FILES+=usr/include/gpib/gpib.h OLD_FILES+=usr/lib/libgpib.a OLD_FILES+=usr/lib/libgpib.so OLD_LIBS+=usr/lib/libgpib.so.3 OLD_FILES+=usr/lib/libgpib_p.a .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64" OLD_FILES+=usr/lib32/libgpib.a OLD_FILES+=usr/lib32/libgpib.so OLD_LIBS+=usr/lib32/libgpib.so.3 OLD_FILES+=usr/lib32/libgpib_p.a .endif .endif .if ${MK_GPIO} == no OLD_FILES+=usr/sbin/gpioctl OLD_FILES+=usr/share/man/man8/gpioctl.8.gz .endif # Also includes vgrind(1) .if ${MK_GROFF} == no OLD_FILES+=usr/bin/addftinfo OLD_FILES+=usr/bin/afmtodit OLD_FILES+=usr/bin/eqn OLD_FILES+=usr/bin/grn OLD_FILES+=usr/bin/grodvi OLD_FILES+=usr/bin/groff OLD_FILES+=usr/bin/grog OLD_FILES+=usr/bin/grolbp OLD_FILES+=usr/bin/grolj4 OLD_FILES+=usr/bin/grops OLD_FILES+=usr/bin/grotty OLD_FILES+=usr/bin/hpftodit OLD_FILES+=usr/bin/indxbib OLD_FILES+=usr/bin/lkbib OLD_FILES+=usr/bin/lookbib OLD_FILES+=usr/bin/mmroff OLD_FILES+=usr/bin/neqn OLD_FILES+=usr/bin/nroff OLD_FILES+=usr/bin/pfbtops OLD_FILES+=usr/bin/pic OLD_FILES+=usr/bin/post-grohtml OLD_FILES+=usr/bin/pre-grohtml OLD_FILES+=usr/bin/psroff OLD_FILES+=usr/bin/refer OLD_FILES+=usr/bin/soelim OLD_FILES+=usr/bin/tbl OLD_FILES+=usr/bin/tfmtodit OLD_FILES+=usr/bin/troff OLD_FILES+=usr/bin/vgrind OLD_FILES+=usr/libexec/vfontedpr OLD_FILES+=usr/share/groff_font/devX100-12/CB OLD_FILES+=usr/share/groff_font/devX100-12/CBI OLD_FILES+=usr/share/groff_font/devX100-12/CI OLD_FILES+=usr/share/groff_font/devX100-12/CR OLD_FILES+=usr/share/groff_font/devX100-12/DESC OLD_FILES+=usr/share/groff_font/devX100-12/HB OLD_FILES+=usr/share/groff_font/devX100-12/HBI OLD_FILES+=usr/share/groff_font/devX100-12/HI OLD_FILES+=usr/share/groff_font/devX100-12/HR OLD_FILES+=usr/share/groff_font/devX100-12/NB OLD_FILES+=usr/share/groff_font/devX100-12/NBI OLD_FILES+=usr/share/groff_font/devX100-12/NI OLD_FILES+=usr/share/groff_font/devX100-12/NR OLD_FILES+=usr/share/groff_font/devX100-12/S OLD_FILES+=usr/share/groff_font/devX100-12/TB OLD_FILES+=usr/share/groff_font/devX100-12/TBI OLD_FILES+=usr/share/groff_font/devX100-12/TI OLD_FILES+=usr/share/groff_font/devX100-12/TR OLD_DIRS+=usr/share/groff_font/devX100-12 OLD_FILES+=usr/share/groff_font/devX100/CB OLD_FILES+=usr/share/groff_font/devX100/CBI OLD_FILES+=usr/share/groff_font/devX100/CI OLD_FILES+=usr/share/groff_font/devX100/CR OLD_FILES+=usr/share/groff_font/devX100/DESC OLD_FILES+=usr/share/groff_font/devX100/HB OLD_FILES+=usr/share/groff_font/devX100/HBI OLD_FILES+=usr/share/groff_font/devX100/HI OLD_FILES+=usr/share/groff_font/devX100/HR OLD_FILES+=usr/share/groff_font/devX100/NB OLD_FILES+=usr/share/groff_font/devX100/NBI OLD_FILES+=usr/share/groff_font/devX100/NI OLD_FILES+=usr/share/groff_font/devX100/NR OLD_FILES+=usr/share/groff_font/devX100/S OLD_FILES+=usr/share/groff_font/devX100/TB OLD_FILES+=usr/share/groff_font/devX100/TBI OLD_FILES+=usr/share/groff_font/devX100/TI OLD_FILES+=usr/share/groff_font/devX100/TR OLD_DIRS+=usr/share/groff_font/devX100 OLD_FILES+=usr/share/groff_font/devX75-12/CB OLD_FILES+=usr/share/groff_font/devX75-12/CBI OLD_FILES+=usr/share/groff_font/devX75-12/CI OLD_FILES+=usr/share/groff_font/devX75-12/CR OLD_FILES+=usr/share/groff_font/devX75-12/DESC OLD_FILES+=usr/share/groff_font/devX75-12/HB OLD_FILES+=usr/share/groff_font/devX75-12/HBI OLD_FILES+=usr/share/groff_font/devX75-12/HI OLD_FILES+=usr/share/groff_font/devX75-12/HR OLD_FILES+=usr/share/groff_font/devX75-12/NB OLD_FILES+=usr/share/groff_font/devX75-12/NBI OLD_FILES+=usr/share/groff_font/devX75-12/NI OLD_FILES+=usr/share/groff_font/devX75-12/NR OLD_FILES+=usr/share/groff_font/devX75-12/S OLD_FILES+=usr/share/groff_font/devX75-12/TB OLD_FILES+=usr/share/groff_font/devX75-12/TBI OLD_FILES+=usr/share/groff_font/devX75-12/TI OLD_FILES+=usr/share/groff_font/devX75-12/TR OLD_DIRS+=usr/share/groff_font/devX75-12 OLD_FILES+=usr/share/groff_font/devX75/CB OLD_FILES+=usr/share/groff_font/devX75/CBI OLD_FILES+=usr/share/groff_font/devX75/CI OLD_FILES+=usr/share/groff_font/devX75/CR OLD_FILES+=usr/share/groff_font/devX75/DESC OLD_FILES+=usr/share/groff_font/devX75/HB OLD_FILES+=usr/share/groff_font/devX75/HBI OLD_FILES+=usr/share/groff_font/devX75/HI OLD_FILES+=usr/share/groff_font/devX75/HR OLD_FILES+=usr/share/groff_font/devX75/NB OLD_FILES+=usr/share/groff_font/devX75/NBI OLD_FILES+=usr/share/groff_font/devX75/NI OLD_FILES+=usr/share/groff_font/devX75/NR OLD_FILES+=usr/share/groff_font/devX75/S OLD_FILES+=usr/share/groff_font/devX75/TB OLD_FILES+=usr/share/groff_font/devX75/TBI OLD_FILES+=usr/share/groff_font/devX75/TI OLD_FILES+=usr/share/groff_font/devX75/TR OLD_DIRS+=usr/share/groff_font/devX75 OLD_FILES+=usr/share/groff_font/devascii/B OLD_FILES+=usr/share/groff_font/devascii/BI OLD_FILES+=usr/share/groff_font/devascii/CW OLD_FILES+=usr/share/groff_font/devascii/DESC OLD_FILES+=usr/share/groff_font/devascii/I OLD_FILES+=usr/share/groff_font/devascii/L OLD_FILES+=usr/share/groff_font/devascii/R OLD_FILES+=usr/share/groff_font/devascii/S OLD_DIRS+=usr/share/groff_font/devascii OLD_FILES+=usr/share/groff_font/devcp1047/B OLD_FILES+=usr/share/groff_font/devcp1047/BI OLD_FILES+=usr/share/groff_font/devcp1047/CW OLD_FILES+=usr/share/groff_font/devcp1047/DESC OLD_FILES+=usr/share/groff_font/devcp1047/I OLD_FILES+=usr/share/groff_font/devcp1047/L OLD_FILES+=usr/share/groff_font/devcp1047/R OLD_FILES+=usr/share/groff_font/devcp1047/S OLD_DIRS+=usr/share/groff_font/devcp1047 OLD_FILES+=usr/share/groff_font/devdvi/CW OLD_FILES+=usr/share/groff_font/devdvi/CWEC OLD_FILES+=usr/share/groff_font/devdvi/CWI OLD_FILES+=usr/share/groff_font/devdvi/CWIEC OLD_FILES+=usr/share/groff_font/devdvi/CWITC OLD_FILES+=usr/share/groff_font/devdvi/CWTC OLD_FILES+=usr/share/groff_font/devdvi/CompileFonts OLD_FILES+=usr/share/groff_font/devdvi/DESC OLD_FILES+=usr/share/groff_font/devdvi/EX OLD_FILES+=usr/share/groff_font/devdvi/HB OLD_FILES+=usr/share/groff_font/devdvi/HBEC OLD_FILES+=usr/share/groff_font/devdvi/HBI OLD_FILES+=usr/share/groff_font/devdvi/HBIEC OLD_FILES+=usr/share/groff_font/devdvi/HBITC OLD_FILES+=usr/share/groff_font/devdvi/HBTC OLD_FILES+=usr/share/groff_font/devdvi/HI OLD_FILES+=usr/share/groff_font/devdvi/HIEC OLD_FILES+=usr/share/groff_font/devdvi/HITC OLD_FILES+=usr/share/groff_font/devdvi/HR OLD_FILES+=usr/share/groff_font/devdvi/HREC OLD_FILES+=usr/share/groff_font/devdvi/HRTC OLD_FILES+=usr/share/groff_font/devdvi/MI OLD_FILES+=usr/share/groff_font/devdvi/Makefile OLD_FILES+=usr/share/groff_font/devdvi/S OLD_FILES+=usr/share/groff_font/devdvi/SA OLD_FILES+=usr/share/groff_font/devdvi/SB OLD_FILES+=usr/share/groff_font/devdvi/SC OLD_FILES+=usr/share/groff_font/devdvi/TB OLD_FILES+=usr/share/groff_font/devdvi/TBEC OLD_FILES+=usr/share/groff_font/devdvi/TBI OLD_FILES+=usr/share/groff_font/devdvi/TBIEC OLD_FILES+=usr/share/groff_font/devdvi/TBITC OLD_FILES+=usr/share/groff_font/devdvi/TBTC OLD_FILES+=usr/share/groff_font/devdvi/TI OLD_FILES+=usr/share/groff_font/devdvi/TIEC OLD_FILES+=usr/share/groff_font/devdvi/TITC OLD_FILES+=usr/share/groff_font/devdvi/TR OLD_FILES+=usr/share/groff_font/devdvi/TREC OLD_FILES+=usr/share/groff_font/devdvi/TRTC OLD_FILES+=usr/share/groff_font/devdvi/ec.map OLD_FILES+=usr/share/groff_font/devdvi/msam.map OLD_FILES+=usr/share/groff_font/devdvi/msbm.map OLD_FILES+=usr/share/groff_font/devdvi/tc.map OLD_FILES+=usr/share/groff_font/devdvi/texb.map OLD_FILES+=usr/share/groff_font/devdvi/texex.map OLD_FILES+=usr/share/groff_font/devdvi/texi.map OLD_FILES+=usr/share/groff_font/devdvi/texmi.map OLD_FILES+=usr/share/groff_font/devdvi/texr.map OLD_FILES+=usr/share/groff_font/devdvi/texsy.map OLD_FILES+=usr/share/groff_font/devdvi/textex.map OLD_FILES+=usr/share/groff_font/devdvi/textt.map OLD_DIRS+=usr/share/groff_font/devdvi OLD_FILES+=usr/share/groff_font/devhtml/B OLD_FILES+=usr/share/groff_font/devhtml/BI OLD_FILES+=usr/share/groff_font/devhtml/CB OLD_FILES+=usr/share/groff_font/devhtml/CBI OLD_FILES+=usr/share/groff_font/devhtml/CI OLD_FILES+=usr/share/groff_font/devhtml/CR OLD_FILES+=usr/share/groff_font/devhtml/DESC OLD_FILES+=usr/share/groff_font/devhtml/I OLD_FILES+=usr/share/groff_font/devhtml/R OLD_FILES+=usr/share/groff_font/devhtml/S OLD_DIRS+=usr/share/groff_font/devhtml OLD_FILES+=usr/share/groff_font/devkoi8-r/B OLD_FILES+=usr/share/groff_font/devkoi8-r/BI OLD_FILES+=usr/share/groff_font/devkoi8-r/CW OLD_FILES+=usr/share/groff_font/devkoi8-r/DESC OLD_FILES+=usr/share/groff_font/devkoi8-r/I OLD_FILES+=usr/share/groff_font/devkoi8-r/L OLD_FILES+=usr/share/groff_font/devkoi8-r/R OLD_FILES+=usr/share/groff_font/devkoi8-r/S OLD_DIRS+=usr/share/groff_font/devkoi8-r OLD_FILES+=usr/share/groff_font/devlatin1/B OLD_FILES+=usr/share/groff_font/devlatin1/BI OLD_FILES+=usr/share/groff_font/devlatin1/CW OLD_FILES+=usr/share/groff_font/devlatin1/DESC OLD_FILES+=usr/share/groff_font/devlatin1/I OLD_FILES+=usr/share/groff_font/devlatin1/L OLD_FILES+=usr/share/groff_font/devlatin1/R OLD_FILES+=usr/share/groff_font/devlatin1/S OLD_DIRS+=usr/share/groff_font/devlatin1 OLD_FILES+=usr/share/groff_font/devlbp/CB OLD_FILES+=usr/share/groff_font/devlbp/CI OLD_FILES+=usr/share/groff_font/devlbp/CR OLD_FILES+=usr/share/groff_font/devlbp/DESC OLD_FILES+=usr/share/groff_font/devlbp/EB OLD_FILES+=usr/share/groff_font/devlbp/EI OLD_FILES+=usr/share/groff_font/devlbp/ER OLD_FILES+=usr/share/groff_font/devlbp/HB OLD_FILES+=usr/share/groff_font/devlbp/HBI OLD_FILES+=usr/share/groff_font/devlbp/HI OLD_FILES+=usr/share/groff_font/devlbp/HNB OLD_FILES+=usr/share/groff_font/devlbp/HNBI OLD_FILES+=usr/share/groff_font/devlbp/HNI OLD_FILES+=usr/share/groff_font/devlbp/HNR OLD_FILES+=usr/share/groff_font/devlbp/HR OLD_FILES+=usr/share/groff_font/devlbp/TB OLD_FILES+=usr/share/groff_font/devlbp/TBI OLD_FILES+=usr/share/groff_font/devlbp/TI OLD_FILES+=usr/share/groff_font/devlbp/TR OLD_DIRS+=usr/share/groff_font/devlbp OLD_FILES+=usr/share/groff_font/devlj4/AB OLD_FILES+=usr/share/groff_font/devlj4/ABI OLD_FILES+=usr/share/groff_font/devlj4/AI OLD_FILES+=usr/share/groff_font/devlj4/ALBB OLD_FILES+=usr/share/groff_font/devlj4/ALBR OLD_FILES+=usr/share/groff_font/devlj4/AOB OLD_FILES+=usr/share/groff_font/devlj4/AOI OLD_FILES+=usr/share/groff_font/devlj4/AOR OLD_FILES+=usr/share/groff_font/devlj4/AR OLD_FILES+=usr/share/groff_font/devlj4/CB OLD_FILES+=usr/share/groff_font/devlj4/CBI OLD_FILES+=usr/share/groff_font/devlj4/CI OLD_FILES+=usr/share/groff_font/devlj4/CLARENDON OLD_FILES+=usr/share/groff_font/devlj4/CORONET OLD_FILES+=usr/share/groff_font/devlj4/CR OLD_FILES+=usr/share/groff_font/devlj4/DESC OLD_FILES+=usr/share/groff_font/devlj4/GB OLD_FILES+=usr/share/groff_font/devlj4/GBI OLD_FILES+=usr/share/groff_font/devlj4/GI OLD_FILES+=usr/share/groff_font/devlj4/GR OLD_FILES+=usr/share/groff_font/devlj4/LGB OLD_FILES+=usr/share/groff_font/devlj4/LGI OLD_FILES+=usr/share/groff_font/devlj4/LGR OLD_FILES+=usr/share/groff_font/devlj4/MARIGOLD OLD_FILES+=usr/share/groff_font/devlj4/OB OLD_FILES+=usr/share/groff_font/devlj4/OBI OLD_FILES+=usr/share/groff_font/devlj4/OI OLD_FILES+=usr/share/groff_font/devlj4/OR OLD_FILES+=usr/share/groff_font/devlj4/S OLD_FILES+=usr/share/groff_font/devlj4/SYMBOL OLD_FILES+=usr/share/groff_font/devlj4/TB OLD_FILES+=usr/share/groff_font/devlj4/TBI OLD_FILES+=usr/share/groff_font/devlj4/TI OLD_FILES+=usr/share/groff_font/devlj4/TNRB OLD_FILES+=usr/share/groff_font/devlj4/TNRBI OLD_FILES+=usr/share/groff_font/devlj4/TNRI OLD_FILES+=usr/share/groff_font/devlj4/TNRR OLD_FILES+=usr/share/groff_font/devlj4/TR OLD_FILES+=usr/share/groff_font/devlj4/UB OLD_FILES+=usr/share/groff_font/devlj4/UBI OLD_FILES+=usr/share/groff_font/devlj4/UCB OLD_FILES+=usr/share/groff_font/devlj4/UCBI OLD_FILES+=usr/share/groff_font/devlj4/UCI OLD_FILES+=usr/share/groff_font/devlj4/UCR OLD_FILES+=usr/share/groff_font/devlj4/UI OLD_FILES+=usr/share/groff_font/devlj4/UR OLD_FILES+=usr/share/groff_font/devlj4/WINGDINGS OLD_DIRS+=usr/share/groff_font/devlj4 OLD_FILES+=usr/share/groff_font/devps/AB OLD_FILES+=usr/share/groff_font/devps/ABI OLD_FILES+=usr/share/groff_font/devps/AI OLD_FILES+=usr/share/groff_font/devps/AR OLD_FILES+=usr/share/groff_font/devps/BMB OLD_FILES+=usr/share/groff_font/devps/BMBI OLD_FILES+=usr/share/groff_font/devps/BMI OLD_FILES+=usr/share/groff_font/devps/BMR OLD_FILES+=usr/share/groff_font/devps/CB OLD_FILES+=usr/share/groff_font/devps/CBI OLD_FILES+=usr/share/groff_font/devps/CI OLD_FILES+=usr/share/groff_font/devps/CR OLD_FILES+=usr/share/groff_font/devps/DESC OLD_FILES+=usr/share/groff_font/devps/EURO OLD_FILES+=usr/share/groff_font/devps/HB OLD_FILES+=usr/share/groff_font/devps/HBI OLD_FILES+=usr/share/groff_font/devps/HI OLD_FILES+=usr/share/groff_font/devps/HNB OLD_FILES+=usr/share/groff_font/devps/HNBI OLD_FILES+=usr/share/groff_font/devps/HNI OLD_FILES+=usr/share/groff_font/devps/HNR OLD_FILES+=usr/share/groff_font/devps/HR OLD_FILES+=usr/share/groff_font/devps/Makefile OLD_FILES+=usr/share/groff_font/devps/NB OLD_FILES+=usr/share/groff_font/devps/NBI OLD_FILES+=usr/share/groff_font/devps/NI OLD_FILES+=usr/share/groff_font/devps/NR OLD_FILES+=usr/share/groff_font/devps/PB OLD_FILES+=usr/share/groff_font/devps/PBI OLD_FILES+=usr/share/groff_font/devps/PI OLD_FILES+=usr/share/groff_font/devps/PR OLD_FILES+=usr/share/groff_font/devps/S OLD_FILES+=usr/share/groff_font/devps/SS OLD_FILES+=usr/share/groff_font/devps/TB OLD_FILES+=usr/share/groff_font/devps/TBI OLD_FILES+=usr/share/groff_font/devps/TI OLD_FILES+=usr/share/groff_font/devps/TR OLD_FILES+=usr/share/groff_font/devps/ZCMI OLD_FILES+=usr/share/groff_font/devps/ZD OLD_FILES+=usr/share/groff_font/devps/ZDR OLD_FILES+=usr/share/groff_font/devps/afmname OLD_FILES+=usr/share/groff_font/devps/dingbats.map OLD_FILES+=usr/share/groff_font/devps/dingbats.rmap OLD_FILES+=usr/share/groff_font/devps/download OLD_FILES+=usr/share/groff_font/devps/freeeuro.pfa OLD_FILES+=usr/share/groff_font/devps/lgreekmap OLD_FILES+=usr/share/groff_font/devps/prologue OLD_FILES+=usr/share/groff_font/devps/symbol.sed OLD_FILES+=usr/share/groff_font/devps/symbolchars OLD_FILES+=usr/share/groff_font/devps/symbolsl.afm OLD_FILES+=usr/share/groff_font/devps/symbolsl.pfa OLD_FILES+=usr/share/groff_font/devps/text.enc OLD_FILES+=usr/share/groff_font/devps/textmap OLD_FILES+=usr/share/groff_font/devps/zapfdr.pfa OLD_DIRS+=usr/share/groff_font/devps OLD_FILES+=usr/share/groff_font/devutf8/B OLD_FILES+=usr/share/groff_font/devutf8/BI OLD_FILES+=usr/share/groff_font/devutf8/CW OLD_FILES+=usr/share/groff_font/devutf8/DESC OLD_FILES+=usr/share/groff_font/devutf8/I OLD_FILES+=usr/share/groff_font/devutf8/L OLD_FILES+=usr/share/groff_font/devutf8/R OLD_FILES+=usr/share/groff_font/devutf8/S OLD_DIRS+=usr/share/groff_font/devutf8 OLD_DIRS+=usr/share/groff_font OLD_FILES+=usr/share/info/groff.info.gz OLD_FILES+=usr/share/man/man1/addftinfo.1.gz OLD_FILES+=usr/share/man/man1/afmtodit.1.gz OLD_FILES+=usr/share/man/man1/eqn.1.gz OLD_FILES+=usr/share/man/man1/grn.1.gz OLD_FILES+=usr/share/man/man1/grodvi.1.gz OLD_FILES+=usr/share/man/man1/groff.1.gz OLD_FILES+=usr/share/man/man1/grog.1.gz OLD_FILES+=usr/share/man/man1/grolbp.1.gz OLD_FILES+=usr/share/man/man1/grolj4.1.gz OLD_FILES+=usr/share/man/man1/grops.1.gz OLD_FILES+=usr/share/man/man1/grotty.1.gz OLD_FILES+=usr/share/man/man1/hpftodit.1.gz OLD_FILES+=usr/share/man/man1/indxbib.1.gz OLD_FILES+=usr/share/man/man1/lkbib.1.gz OLD_FILES+=usr/share/man/man1/lookbib.1.gz OLD_FILES+=usr/share/man/man1/mmroff.1.gz OLD_FILES+=usr/share/man/man1/neqn.1.gz OLD_FILES+=usr/share/man/man1/nroff.1.gz OLD_FILES+=usr/share/man/man1/pfbtops.1.gz OLD_FILES+=usr/share/man/man1/pic.1.gz OLD_FILES+=usr/share/man/man1/psroff.1.gz OLD_FILES+=usr/share/man/man1/refer.1.gz OLD_FILES+=usr/share/man/man1/soelim.1.gz OLD_FILES+=usr/share/man/man1/tbl.1.gz OLD_FILES+=usr/share/man/man1/tfmtodit.1.gz OLD_FILES+=usr/share/man/man1/troff.1.gz OLD_FILES+=usr/share/man/man1/vgrind.1.gz OLD_FILES+=usr/share/man/man5/groff_font.5.gz OLD_FILES+=usr/share/man/man5/groff_out.5.gz OLD_FILES+=usr/share/man/man5/groff_tmac.5.gz OLD_FILES+=usr/share/man/man5/lj4_font.5.gz OLD_FILES+=usr/share/man/man5/tmac.5.gz OLD_FILES+=usr/share/man/man5/vgrindefs.5.gz OLD_FILES+=usr/share/man/man7/ditroff.7.gz OLD_FILES+=usr/share/man/man7/groff.7.gz OLD_FILES+=usr/share/man/man7/groff_char.7.gz OLD_FILES+=usr/share/man/man7/groff_diff.7.gz OLD_FILES+=usr/share/man/man7/groff_man.7.gz OLD_FILES+=usr/share/man/man7/groff_mdoc.7.gz OLD_FILES+=usr/share/man/man7/groff_me.7.gz OLD_FILES+=usr/share/man/man7/groff_mm.7.gz OLD_FILES+=usr/share/man/man7/groff_mmse.7.gz OLD_FILES+=usr/share/man/man7/groff_ms.7.gz OLD_FILES+=usr/share/man/man7/groff_trace.7.gz OLD_FILES+=usr/share/man/man7/groff_www.7.gz OLD_FILES+=usr/share/man/man7/man.7.gz OLD_FILES+=usr/share/man/man7/mdoc.7.gz OLD_FILES+=usr/share/man/man7/mdoc.samples.7.gz OLD_FILES+=usr/share/man/man7/me.7.gz OLD_FILES+=usr/share/man/man7/mm.7.gz OLD_FILES+=usr/share/man/man7/mmse.7.gz OLD_FILES+=usr/share/man/man7/ms.7.gz OLD_FILES+=usr/share/man/man7/orig_me.7.gz OLD_FILES+=usr/share/man/man7/roff.7.gz OLD_FILES+=usr/share/me/acm.me OLD_FILES+=usr/share/me/chars.me OLD_FILES+=usr/share/me/deltext.me OLD_FILES+=usr/share/me/eqn.me OLD_FILES+=usr/share/me/float.me OLD_FILES+=usr/share/me/footnote.me OLD_FILES+=usr/share/me/index.me OLD_FILES+=usr/share/me/letterhead.me OLD_FILES+=usr/share/me/local.me OLD_FILES+=usr/share/me/null.me OLD_FILES+=usr/share/me/refer.me OLD_FILES+=usr/share/me/revisions OLD_FILES+=usr/share/me/sh.me OLD_FILES+=usr/share/me/tbl.me OLD_FILES+=usr/share/me/thesis.me OLD_DIRS+=usr/share/me OLD_FILES+=usr/share/misc/vgrindefs OLD_FILES+=usr/share/misc/vgrindefs.db OLD_FILES+=usr/share/tmac/X.tmac OLD_FILES+=usr/share/tmac/Xps.tmac OLD_FILES+=usr/share/tmac/a4.tmac OLD_FILES+=usr/share/tmac/an-old.tmac OLD_FILES+=usr/share/tmac/an.tmac OLD_FILES+=usr/share/tmac/andoc.tmac OLD_FILES+=usr/share/tmac/composite.tmac OLD_FILES+=usr/share/tmac/cp1047.tmac OLD_FILES+=usr/share/tmac/devtag.tmac OLD_FILES+=usr/share/tmac/doc.tmac OLD_FILES+=usr/share/tmac/dvi.tmac OLD_FILES+=usr/share/tmac/e.tmac OLD_FILES+=usr/share/tmac/ec.tmac OLD_FILES+=usr/share/tmac/eqnrc OLD_FILES+=usr/share/tmac/europs.tmac OLD_FILES+=usr/share/tmac/html-end.tmac OLD_FILES+=usr/share/tmac/html.tmac OLD_FILES+=usr/share/tmac/hyphen.ru OLD_FILES+=usr/share/tmac/hyphen.us OLD_FILES+=usr/share/tmac/hyphenex.us OLD_FILES+=usr/share/tmac/koi8-r.tmac OLD_FILES+=usr/share/tmac/latin1.tmac OLD_FILES+=usr/share/tmac/latin2.tmac OLD_FILES+=usr/share/tmac/latin9.tmac OLD_FILES+=usr/share/tmac/lbp.tmac OLD_FILES+=usr/share/tmac/lj4.tmac OLD_FILES+=usr/share/tmac/m.tmac OLD_FILES+=usr/share/tmac/man.local OLD_FILES+=usr/share/tmac/man.tmac OLD_FILES+=usr/share/tmac/mandoc.tmac OLD_FILES+=usr/share/tmac/mdoc.local OLD_FILES+=usr/share/tmac/mdoc.tmac OLD_FILES+=usr/share/tmac/mdoc/doc-common OLD_FILES+=usr/share/tmac/mdoc/doc-ditroff OLD_FILES+=usr/share/tmac/mdoc/doc-nroff OLD_FILES+=usr/share/tmac/mdoc/doc-syms OLD_FILES+=usr/share/tmac/mdoc/fr.ISO8859-1 OLD_FILES+=usr/share/tmac/mdoc/ru.KOI8-R OLD_DIRS+=usr/share/tmac/mdoc OLD_FILES+=usr/share/tmac/me.tmac OLD_FILES+=usr/share/tmac/mm/0.MT OLD_FILES+=usr/share/tmac/mm/4.MT OLD_FILES+=usr/share/tmac/mm/5.MT OLD_FILES+=usr/share/tmac/mm/locale OLD_FILES+=usr/share/tmac/mm/mm.tmac OLD_FILES+=usr/share/tmac/mm/mmse.tmac OLD_FILES+=usr/share/tmac/mm/ms.cov OLD_FILES+=usr/share/tmac/mm/se_locale OLD_FILES+=usr/share/tmac/mm/se_ms.cov OLD_DIRS+=usr/share/tmac/mm OLD_FILES+=usr/share/tmac/ms.tmac OLD_FILES+=usr/share/tmac/mse.tmac OLD_FILES+=usr/share/tmac/papersize.tmac OLD_FILES+=usr/share/tmac/pic.tmac OLD_FILES+=usr/share/tmac/ps.tmac OLD_FILES+=usr/share/tmac/psatk.tmac OLD_FILES+=usr/share/tmac/psold.tmac OLD_FILES+=usr/share/tmac/pspic.tmac OLD_FILES+=usr/share/tmac/s.tmac OLD_FILES+=usr/share/tmac/safer.tmac OLD_FILES+=usr/share/tmac/tmac.orig_me OLD_FILES+=usr/share/tmac/tmac.vgrind OLD_FILES+=usr/share/tmac/trace.tmac OLD_FILES+=usr/share/tmac/troffrc OLD_FILES+=usr/share/tmac/troffrc-end OLD_FILES+=usr/share/tmac/tty-char.tmac OLD_FILES+=usr/share/tmac/tty.tmac OLD_FILES+=usr/share/tmac/unicode.tmac OLD_FILES+=usr/share/tmac/www.tmac OLD_DIRS+=usr/share/tmac .endif .if ${MK_GSSAPI} == no OLD_FILES+=usr/lib/libgssapi.a OLD_FILES+=usr/lib/libgssapi.so OLD_LIBS+=usr/lib/libgssapi.so.10 OLD_FILES+=usr/lib/libgssapi_p.a OLD_FILES+=usr/lib/librpcsec_gss.a OLD_FILES+=usr/lib/librpcsec_gss.so OLD_LIBS+=usr/lib/librpcsec_gss.so.1 .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64" OLD_FILES+=usr/lib32/libgssapi.a OLD_FILES+=usr/lib32/libgssapi.so OLD_LIBS+=usr/lib32/libgssapi.so.10 OLD_FILES+=usr/lib32/libgssapi_p.a OLD_FILES+=usr/lib32/librpcsec_gss.a OLD_FILES+=usr/lib32/librpcsec_gss.so OLD_LIBS+=usr/lib32/librpcsec_gss.so.1 .endif OLD_FILES+=usr/sbin/gssd OLD_FILES+=usr/share/man/man3/gss_accept_sec_context.3.gz OLD_FILES+=usr/share/man/man3/gss_acquire_cred.3.gz OLD_FILES+=usr/share/man/man3/gss_add_cred.3.gz OLD_FILES+=usr/share/man/man3/gss_add_oid_set_member.3.gz OLD_FILES+=usr/share/man/man3/gss_canonicalize_name.3.gz OLD_FILES+=usr/share/man/man3/gss_compare_name.3.gz OLD_FILES+=usr/share/man/man3/gss_context_time.3.gz OLD_FILES+=usr/share/man/man3/gss_create_empty_oid_set.3.gz OLD_FILES+=usr/share/man/man3/gss_delete_sec_context.3.gz OLD_FILES+=usr/share/man/man3/gss_display_name.3.gz OLD_FILES+=usr/share/man/man3/gss_display_status.3.gz OLD_FILES+=usr/share/man/man3/gss_duplicate_name.3.gz OLD_FILES+=usr/share/man/man3/gss_export_name.3.gz OLD_FILES+=usr/share/man/man3/gss_export_sec_context.3.gz OLD_FILES+=usr/share/man/man3/gss_get_mic.3.gz OLD_FILES+=usr/share/man/man3/gss_import_name.3.gz OLD_FILES+=usr/share/man/man3/gss_import_sec_context.3.gz OLD_FILES+=usr/share/man/man3/gss_indicate_mechs.3.gz OLD_FILES+=usr/share/man/man3/gss_init_sec_context.3.gz OLD_FILES+=usr/share/man/man3/gss_inquire_context.3.gz OLD_FILES+=usr/share/man/man3/gss_inquire_cred.3.gz OLD_FILES+=usr/share/man/man3/gss_inquire_cred_by_mech.3.gz OLD_FILES+=usr/share/man/man3/gss_inquire_mechs_for_name.3.gz OLD_FILES+=usr/share/man/man3/gss_inquire_names_for_mech.3.gz OLD_FILES+=usr/share/man/man3/gss_process_context_token.3.gz OLD_FILES+=usr/share/man/man3/gss_release_buffer.3.gz OLD_FILES+=usr/share/man/man3/gss_release_cred.3.gz OLD_FILES+=usr/share/man/man3/gss_release_name.3.gz OLD_FILES+=usr/share/man/man3/gss_release_oid_set.3.gz OLD_FILES+=usr/share/man/man3/gss_seal.3.gz OLD_FILES+=usr/share/man/man3/gss_sign.3.gz OLD_FILES+=usr/share/man/man3/gss_test_oid_set_member.3.gz OLD_FILES+=usr/share/man/man3/gss_unseal.3.gz OLD_FILES+=usr/share/man/man3/gss_unwrap.3.gz OLD_FILES+=usr/share/man/man3/gss_verify.3.gz OLD_FILES+=usr/share/man/man3/gss_verify_mic.3.gz OLD_FILES+=usr/share/man/man3/gss_wrap.3.gz OLD_FILES+=usr/share/man/man3/gss_wrap_size_limit.3.gz OLD_FILES+=usr/share/man/man3/gssapi.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_get_error.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_get_mech_info.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_get_mechanisms.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_get_principal_name.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_get_versions.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_getcred.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_is_installed.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_max_data_length.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_mech_to_oid.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_oid_to_mech.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_qop_to_num.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_seccreate.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_set_callback.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_set_defaults.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_set_svc_name.3.gz OLD_FILES+=usr/share/man/man3/rpc_gss_svc_max_data_length.3.gz OLD_FILES+=usr/share/man/man3/rpcsec_gss.3.gz OLD_FILES+=usr/share/man/man5/mech.5.gz OLD_FILES+=usr/share/man/man5/qop.5.gz OLD_FILES+=usr/share/man/man8/gssd.8.gz .endif .if ${MK_HESIOD} == no OLD_FILES+=usr/bin/hesinfo OLD_FILES+=usr/include/hesiod.h OLD_FILES+=usr/share/man/man1/hesinfo.1.gz OLD_FILES+=usr/share/man/man3/hesiod.3.gz OLD_FILES+=usr/share/man/man5/hesiod.conf.5.gz .endif #.if ${MK_HTML} == no # to be filled in #.endif .if ${MK_IDEA} == no OLD_FILES+=usr/include/openssl/idea.h .endif .if ${MK_INET6} == no OLD_FILES+=sbin/ping6 OLD_FILES+=sbin/rtsol OLD_FILES+=usr/sbin/faithd OLD_FILES+=usr/sbin/ip6addrctl OLD_FILES+=usr/sbin/mld6query OLD_FILES+=usr/sbin/ndp OLD_FILES+=usr/sbin/rip6query OLD_FILES+=usr/sbin/route6d OLD_FILES+=usr/sbin/rrenumd OLD_FILES+=usr/sbin/rtadvd OLD_FILES+=usr/sbin/rtsold OLD_FILES+=usr/sbin/traceroute6 OLD_FILES+=usr/share/man/man5/rrenumd.conf.5.gz OLD_FILES+=usr/share/man/man5/rtadvd.conf.5.gz OLD_FILES+=usr/share/man/man8/ip6addrctl.8.gz OLD_FILES+=usr/share/man/man8/mld6query.8.gz OLD_FILES+=usr/share/man/man8/ndp.8.gz OLD_FILES+=usr/share/man/man8/ping6.8.gz OLD_FILES+=usr/share/man/man8/rip6query.8.gz OLD_FILES+=usr/share/man/man8/route6d.8.gz OLD_FILES+=usr/share/man/man8/rrenumd.8.gz OLD_FILES+=usr/share/man/man8/rtadvd.8.gz OLD_FILES+=usr/share/man/man8/rtsol.8.gz OLD_FILES+=usr/share/man/man8/rtsold.8.gz OLD_FILES+=usr/share/man/man8/traceroute6.8.gz .endif .if ${MK_INET6_SUPPORT} == no OLD_FILES+=rescue/ping6 .endif #.if ${MK_INFO} == no # to be filled in #.endif .if ${MK_IPFILTER} == no OLD_FILES+=etc/periodic/security/510.ipfdenied OLD_FILES+=rescue/ipf OLD_FILES+=sbin/ipf OLD_FILES+=sbin/ipfs OLD_FILES+=sbin/ipfstat OLD_FILES+=sbin/ipftest OLD_FILES+=sbin/ipmon OLD_FILES+=sbin/ipnat OLD_FILES+=sbin/ippool OLD_FILES+=sbin/ipresend OLD_FILES+=usr/include/netinet/ip_auth.h OLD_FILES+=usr/include/netinet/ip_compat.h OLD_FILES+=usr/include/netinet/ip_fil.h OLD_FILES+=usr/include/netinet/ip_frag.h OLD_FILES+=usr/include/netinet/ip_htable.h OLD_FILES+=usr/include/netinet/ip_lookup.h OLD_FILES+=usr/include/netinet/ip_nat.h OLD_FILES+=usr/include/netinet/ip_pool.h OLD_FILES+=usr/include/netinet/ip_proxy.h OLD_FILES+=usr/include/netinet/ip_rules.h OLD_FILES+=usr/include/netinet/ip_scan.h OLD_FILES+=usr/include/netinet/ip_state.h OLD_FILES+=usr/include/netinet/ip_sync.h OLD_FILES+=usr/include/netinet/ipl.h OLD_FILES+=usr/share/examples/ipfilter/README OLD_FILES+=usr/share/examples/ipfilter/BASIC.NAT OLD_FILES+=usr/share/examples/ipfilter/BASIC_1.FW OLD_FILES+=usr/share/examples/ipfilter/BASIC_2.FW OLD_FILES+=usr/share/examples/ipfilter/example.1 OLD_FILES+=usr/share/examples/ipfilter/example.2 OLD_FILES+=usr/share/examples/ipfilter/example.3 OLD_FILES+=usr/share/examples/ipfilter/example.4 OLD_FILES+=usr/share/examples/ipfilter/example.5 OLD_FILES+=usr/share/examples/ipfilter/example.6 OLD_FILES+=usr/share/examples/ipfilter/example.7 OLD_FILES+=usr/share/examples/ipfilter/example.8 OLD_FILES+=usr/share/examples/ipfilter/example.9 OLD_FILES+=usr/share/examples/ipfilter/example.10 OLD_FILES+=usr/share/examples/ipfilter/example.11 OLD_FILES+=usr/share/examples/ipfilter/example.12 OLD_FILES+=usr/share/examples/ipfilter/example.13 OLD_FILES+=usr/share/examples/ipfilter/example.sr OLD_FILES+=usr/share/examples/ipfilter/firewall OLD_FILES+=usr/share/examples/ipfilter/ftp-proxy OLD_FILES+=usr/share/examples/ipfilter/ftppxy OLD_FILES+=usr/share/examples/ipfilter/nat-setup OLD_FILES+=usr/share/examples/ipfilter/nat.eg OLD_FILES+=usr/share/examples/ipfilter/server OLD_FILES+=usr/share/examples/ipfilter/tcpstate OLD_FILES+=usr/share/examples/ipfilter/example.14 OLD_FILES+=usr/share/examples/ipfilter/firewall.1 OLD_FILES+=usr/share/examples/ipfilter/firewall.2 OLD_FILES+=usr/share/examples/ipfilter/ipf.conf.permissive OLD_FILES+=usr/share/examples/ipfilter/ipf.conf.restrictive OLD_FILES+=usr/share/examples/ipfilter/ipf.conf.sample OLD_FILES+=usr/share/examples/ipfilter/ipnat.conf.sample OLD_FILES+=usr/share/examples/ipfilter/ipf-howto.txt OLD_FILES+=usr/share/examples/ipfilter/examples.txt OLD_FILES+=usr/share/examples/ipfilter/rules.txt OLD_FILES+=usr/share/man/man1/ipftest.1.gz OLD_FILES+=usr/share/man/man1/ipresend.1.gz OLD_FILES+=usr/share/man/man4/ipf.4.gz OLD_FILES+=usr/share/man/man4/ipl.4.gz OLD_FILES+=usr/share/man/man4/ipfilter.4.gz OLD_FILES+=usr/share/man/man4/ipnat.4.gz OLD_FILES+=usr/share/man/man5/ipf.5.gz OLD_FILES+=usr/share/man/man5/ipf.conf.5.gz OLD_FILES+=usr/share/man/man5/ipf6.conf.5.gz OLD_FILES+=usr/share/man/man5/ipnat.5.gz OLD_FILES+=usr/share/man/man5/ipnat.conf.5.gz OLD_FILES+=usr/share/man/man5/ippool.5.gz OLD_FILES+=usr/share/man/man8/ipf.8.gz OLD_FILES+=usr/share/man/man8/ipfs.8.gz OLD_FILES+=usr/share/man/man8/ipfstat.8.gz OLD_FILES+=usr/share/man/man8/ipmon.8.gz OLD_FILES+=usr/share/man/man8/ipnat.8.gz OLD_FILES+=usr/share/man/man8/ippool.8.gz +OLD_FILES+=etc/periodic/security/510.ipfdenied +OLD_FILES+=etc/periodic/security/610.ipf6denied .endif .if ${MK_IPFW} == no OLD_FILES+=etc/periodic/security/500.ipfwdenied OLD_FILES+=etc/periodic/security/550.ipfwlimit OLD_FILES+=sbin/ipfw OLD_FILES+=sbin/natd OLD_FILES+=usr/sbin/ipfwpcap OLD_FILES+=usr/share/man/man8/ipfw.8.gz OLD_FILES+=usr/share/man/man8/ipfwpcap.8.gz OLD_FILES+=usr/share/man/man8/natd.8.gz .endif .if ${MK_IPX} == no OLD_LIBS+=lib/libipx.so.5 OLD_FILES+=usr/lib/libipx.a OLD_FILES+=usr/lib/libipx.so OLD_FILES+=usr/lib/libipx_p.a .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64" OLD_FILES+=usr/lib32/libipx.a OLD_FILES+=usr/lib32/libipx.so OLD_LIBS+=usr/lib32/libipx.so.5 OLD_FILES+=usr/lib32/libipx_p.a .endif OLD_FILES+=usr/sbin/IPXrouted OLD_FILES+=usr/share/man/man3/ipx.3.gz OLD_FILES+=usr/share/man/man3/ipx_addr.3.gz OLD_FILES+=usr/share/man/man3/ipx_ntoa.3.gz OLD_FILES+=usr/share/man/man8/IPXrouted.8.gz .endif .if ${MK_JAIL} == no OLD_FILES+=usr/sbin/jail OLD_FILES+=usr/sbin/jexec OLD_FILES+=usr/sbin/jls OLD_FILES+=usr/share/man/man8/jail.8.gz OLD_FILES+=usr/share/man/man8/jexec.8.gz OLD_FILES+=usr/share/man/man8/jls.8.gz .endif .if ${MK_KERBEROS} == no OLD_FILES+=usr/bin/compile_et OLD_FILES+=usr/bin/hxtool OLD_FILES+=usr/bin/kadmin OLD_FILES+=usr/bin/kdestroy OLD_FILES+=usr/bin/kf OLD_FILES+=usr/bin/kgetcred OLD_FILES+=usr/bin/kinit OLD_FILES+=usr/bin/klist OLD_FILES+=usr/bin/kpasswd OLD_FILES+=usr/bin/krb5-config OLD_FILES+=usr/bin/ksu OLD_FILES+=usr/bin/kswitch OLD_FILES+=usr/bin/string2key OLD_FILES+=usr/bin/verify_krb5_conf OLD_FILES+=usr/include/asn1-common.h OLD_FILES+=usr/include/asn1_err.h OLD_FILES+=usr/include/base64.h OLD_FILES+=usr/include/cms_asn1.h OLD_FILES+=usr/include/crmf_asn1.h OLD_FILES+=usr/include/der-private.h OLD_FILES+=usr/include/der-protos.h OLD_FILES+=usr/include/der.h OLD_FILES+=usr/include/digest_asn1.h OLD_FILES+=usr/include/getarg.h OLD_FILES+=usr/include/gssapi/gssapi_krb5.h OLD_FILES+=usr/include/hdb-protos.h OLD_FILES+=usr/include/hdb.h OLD_FILES+=usr/include/hdb_asn1.h OLD_FILES+=usr/include/hdb_err.h OLD_FILES+=usr/include/heim_asn1.h OLD_FILES+=usr/include/heim_err.h OLD_FILES+=usr/include/heim_threads.h OLD_FILES+=usr/include/heimbase.h OLD_FILES+=usr/include/heimntlm-protos.h OLD_FILES+=usr/include/heimntlm.h OLD_FILES+=usr/include/hex.h OLD_FILES+=usr/include/hx509-private.h OLD_FILES+=usr/include/hx509-protos.h OLD_FILES+=usr/include/hx509.h OLD_FILES+=usr/include/hx509_err.h OLD_FILES+=usr/include/k524_err.h OLD_FILES+=usr/include/kadm5/admin.h OLD_FILES+=usr/include/kadm5/kadm5-private.h OLD_FILES+=usr/include/kadm5/kadm5-protos.h OLD_FILES+=usr/include/kadm5/kadm5-pwcheck.h OLD_FILES+=usr/include/kadm5/kadm5_err.h OLD_FILES+=usr/include/kadm5/private.h OLD_FILES+=usr/include/kafs.h OLD_FILES+=usr/include/kdc-protos.h OLD_FILES+=usr/include/kdc.h OLD_FILES+=usr/include/krb5-private.h OLD_FILES+=usr/include/krb5-protos.h OLD_FILES+=usr/include/krb5-types.h OLD_FILES+=usr/include/krb5.h OLD_FILES+=usr/include/krb5/ccache_plugin.h OLD_FILES+=usr/include/krb5/locate_plugin.h OLD_FILES+=usr/include/krb5/send_to_kdc_plugin.h OLD_FILES+=usr/include/krb5/windc_plugin.h OLD_FILES+=usr/include/krb5_asn1.h OLD_FILES+=usr/include/krb5_ccapi.h OLD_FILES+=usr/include/krb5_err.h OLD_FILES+=usr/include/kx509_asn1.h OLD_FILES+=usr/include/ntlm_err.h OLD_FILES+=usr/include/ocsp_asn1.h OLD_FILES+=usr/include/parse_bytes.h OLD_FILES+=usr/include/parse_time.h OLD_FILES+=usr/include/parse_units.h OLD_FILES+=usr/include/pkcs10_asn1.h OLD_FILES+=usr/include/pkcs12_asn1.h OLD_FILES+=usr/include/pkcs8_asn1.h OLD_FILES+=usr/include/pkcs9_asn1.h OLD_FILES+=usr/include/pkinit_asn1.h OLD_FILES+=usr/include/resolve.h OLD_FILES+=usr/include/rfc2459_asn1.h OLD_FILES+=usr/include/roken-common.h OLD_FILES+=usr/include/rtbl.h OLD_FILES+=usr/include/wind.h OLD_FILES+=usr/include/wind_err.h OLD_FILES+=usr/include/xdbm.h OLD_FILES+=usr/lib/libasn1.a OLD_FILES+=usr/lib/libasn1.so OLD_LIBS+=usr/lib/libasn1.so.11 OLD_FILES+=usr/lib/libasn1_p.a OLD_FILES+=usr/lib/libcom_err.a OLD_FILES+=usr/lib/libcom_err.so OLD_LIBS+=usr/lib/libcom_err.so.5 OLD_FILES+=usr/lib/libcom_err_p.a OLD_FILES+=usr/lib/libcom_err_p.a OLD_FILES+=usr/lib/libgssapi_krb5.a OLD_FILES+=usr/lib/libgssapi_krb5.so OLD_LIBS+=usr/lib/libgssapi_krb5.so.10 OLD_FILES+=usr/lib/libgssapi_krb5_p.a OLD_FILES+=usr/lib/libgssapi_ntlm.a OLD_FILES+=usr/lib/libgssapi_ntlm.so OLD_LIBS+=usr/lib/libgssapi_ntlm.so.10 OLD_FILES+=usr/lib/libgssapi_ntlm_p.a OLD_FILES+=usr/lib/libgssapi_spnego.a OLD_FILES+=usr/lib/libgssapi_spnego.so OLD_LIBS+=usr/lib/libgssapi_spnego.so.10 OLD_FILES+=usr/lib/libgssapi_spnego_p.a OLD_FILES+=usr/lib/libhdb.a OLD_FILES+=usr/lib/libhdb.so OLD_LIBS+=usr/lib/libhdb.so.11 OLD_FILES+=usr/lib/libhdb_p.a OLD_FILES+=usr/lib/libheimbase.a OLD_FILES+=usr/lib/libheimbase.so OLD_LIBS+=usr/lib/libheimbase.so.11 OLD_FILES+=usr/lib/libheimbase_p.a OLD_FILES+=usr/lib/libheimntlm.a OLD_FILES+=usr/lib/libheimntlm.so OLD_LIBS+=usr/lib/libheimntlm.so.11 OLD_FILES+=usr/lib/libheimntlm_p.a OLD_FILES+=usr/lib/libheimsqlite.a OLD_FILES+=usr/lib/libheimsqlite.so OLD_LIBS+=usr/lib/libheimsqlite.so.11 OLD_FILES+=usr/lib/libheimsqlite_p.a OLD_FILES+=usr/lib/libhx509.a OLD_FILES+=usr/lib/libhx509.so OLD_LIBS+=usr/lib/libhx509.so.11 OLD_FILES+=usr/lib/libhx509_p.a OLD_FILES+=usr/lib/libkadm5clnt.a OLD_FILES+=usr/lib/libkadm5clnt.so OLD_LIBS+=usr/lib/libkadm5clnt.so.11 OLD_FILES+=usr/lib/libkadm5clnt_p.a OLD_FILES+=usr/lib/libkadm5srv.a OLD_FILES+=usr/lib/libkadm5srv.so OLD_LIBS+=usr/lib/libkadm5srv.so.11 OLD_FILES+=usr/lib/libkadm5srv_p.a OLD_FILES+=usr/lib/libkafs5.a OLD_FILES+=usr/lib/libkafs5.so OLD_LIBS+=usr/lib/libkafs5.so.11 OLD_FILES+=usr/lib/libkafs5_p.a OLD_FILES+=usr/lib/libkdc.a OLD_FILES+=usr/lib/libkdc.so OLD_LIBS+=usr/lib/libkdc.so.11 OLD_FILES+=usr/lib/libkdc_p.a OLD_FILES+=usr/lib/libkrb5.a OLD_FILES+=usr/lib/libkrb5.so OLD_LIBS+=usr/lib/libkrb5.so.11 OLD_FILES+=usr/lib/libkrb5_p.a OLD_FILES+=usr/lib/libroken.a OLD_FILES+=usr/lib/libroken.so OLD_LIBS+=usr/lib/libroken.so.11 OLD_FILES+=usr/lib/libroken_p.a OLD_FILES+=usr/lib/libwind.a OLD_FILES+=usr/lib/libwind.so OLD_LIBS+=usr/lib/libwind.so.11 OLD_FILES+=usr/lib/libwind_p.a OLD_FILES+=usr/lib/pam_krb5.so OLD_LIBS+=usr/lib/pam_krb5.so.5 OLD_FILES+=usr/lib/pam_ksu.so OLD_LIBS+=usr/lib/pam_ksu.so.5 .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64" OLD_FILES+=usr/lib32/libasn1.a OLD_FILES+=usr/lib32/libasn1.so OLD_LIBS+=usr/lib32/libasn1.so.11 OLD_FILES+=usr/lib32/libasn1_p.a OLD_FILES+=usr/lib32/libgssapi_krb5.a OLD_FILES+=usr/lib32/libgssapi_krb5.so OLD_LIBS+=usr/lib32/libgssapi_krb5.so.10 OLD_FILES+=usr/lib32/libgssapi_krb5_p.a OLD_FILES+=usr/lib32/libgssapi_ntlm.a OLD_FILES+=usr/lib32/libgssapi_ntlm.so OLD_LIBS+=usr/lib32/libgssapi_ntlm.so.10 OLD_FILES+=usr/lib32/libgssapi_ntlm_p.a OLD_FILES+=usr/lib32/libgssapi_spnego.a OLD_FILES+=usr/lib32/libgssapi_spnego.so OLD_LIBS+=usr/lib32/libgssapi_spnego.so.10 OLD_FILES+=usr/lib32/libgssapi_spnego_p.a OLD_FILES+=usr/lib32/libhdb.a OLD_FILES+=usr/lib32/libhdb.so OLD_LIBS+=usr/lib32/libhdb.so.11 OLD_FILES+=usr/lib32/libhdb_p.a OLD_FILES+=usr/lib32/libheimbase.a OLD_FILES+=usr/lib32/libheimbase.so OLD_LIBS+=usr/lib32/libheimbase.so.11 OLD_FILES+=usr/lib32/libheimbase_p.a OLD_FILES+=usr/lib32/libheimntlm.a OLD_FILES+=usr/lib32/libheimntlm.so OLD_LIBS+=usr/lib32/libheimntlm.so.11 OLD_FILES+=usr/lib32/libheimntlm_p.a OLD_FILES+=usr/lib32/libheimsqlite.a OLD_FILES+=usr/lib32/libheimsqlite.so OLD_LIBS+=usr/lib32/libheimsqlite.so.11 OLD_FILES+=usr/lib32/libheimsqlite_p.a OLD_FILES+=usr/lib32/libhx509.a OLD_FILES+=usr/lib32/libhx509.so OLD_LIBS+=usr/lib32/libhx509.so.11 OLD_FILES+=usr/lib32/libhx509_p.a OLD_FILES+=usr/lib32/libkadm5clnt.a OLD_FILES+=usr/lib32/libkadm5clnt.so OLD_LIBS+=usr/lib32/libkadm5clnt.so.11 OLD_FILES+=usr/lib32/libkadm5clnt_p.a OLD_FILES+=usr/lib32/libkadm5srv.a OLD_FILES+=usr/lib32/libkadm5srv.so OLD_LIBS+=usr/lib32/libkadm5srv.so.11 OLD_FILES+=usr/lib32/libkadm5srv_p.a OLD_FILES+=usr/lib32/libkafs5.a OLD_FILES+=usr/lib32/libkafs5.so OLD_LIBS+=usr/lib32/libkafs5.so.11 OLD_FILES+=usr/lib32/libkafs5_p.a OLD_FILES+=usr/lib32/libkdc.a OLD_FILES+=usr/lib32/libkdc.so OLD_LIBS+=usr/lib32/libkdc.so.11 OLD_FILES+=usr/lib32/libkdc_p.a OLD_FILES+=usr/lib32/libkrb5.a OLD_FILES+=usr/lib32/libkrb5.so OLD_LIBS+=usr/lib32/libkrb5.so.11 OLD_FILES+=usr/lib32/libkrb5_p.a OLD_FILES+=usr/lib32/libroken.a OLD_FILES+=usr/lib32/libroken.so OLD_LIBS+=usr/lib32/libroken.so.11 OLD_FILES+=usr/lib32/libroken_p.a OLD_FILES+=usr/lib32/libwind.a OLD_FILES+=usr/lib32/libwind.so OLD_LIBS+=usr/lib32/libwind.so.11 OLD_FILES+=usr/lib32/libwind_p.a OLD_FILES+=usr/lib32/pam_krb5.so OLD_LIBS+=usr/lib32/pam_krb5.so.5 OLD_FILES+=usr/lib32/pam_ksu.so OLD_LIBS+=usr/lib32/pam_ksu.so.5 .endif OLD_FILES+=usr/libexec/digest-service OLD_FILES+=usr/libexec/hprop OLD_FILES+=usr/libexec/hpropd OLD_FILES+=usr/libexec/ipropd-master OLD_FILES+=usr/libexec/ipropd-slave OLD_FILES+=usr/libexec/kadmind OLD_FILES+=usr/libexec/kcm OLD_FILES+=usr/libexec/kdc OLD_FILES+=usr/libexec/kdigest OLD_FILES+=usr/libexec/kfd OLD_FILES+=usr/libexec/kimpersonate OLD_FILES+=usr/libexec/kpasswdd OLD_FILES+=usr/sbin/kstash OLD_FILES+=usr/sbin/ktutil OLD_FILES+=usr/sbin/iprop-log OLD_FILES+=usr/share/info/heimdal.info.gz OLD_FILES+=usr/share/man/man1/kdestroy.1.gz OLD_FILES+=usr/share/man/man1/kf.1.gz OLD_FILES+=usr/share/man/man1/kinit.1.gz OLD_FILES+=usr/share/man/man1/klist.1.gz OLD_FILES+=usr/share/man/man1/kpasswd.1.gz OLD_FILES+=usr/share/man/man1/krb5-config.1.gz OLD_FILES+=usr/share/man/man1/kswitch.1.gz OLD_FILES+=usr/share/man/man3/HDB.3.gz OLD_FILES+=usr/share/man/man3/hdb__del.3.gz OLD_FILES+=usr/share/man/man3/hdb__get.3.gz OLD_FILES+=usr/share/man/man3/hdb__put.3.gz OLD_FILES+=usr/share/man/man3/hdb_auth_status.3.gz OLD_FILES+=usr/share/man/man3/hdb_check_constrained_delegation.3.gz OLD_FILES+=usr/share/man/man3/hdb_check_pkinit_ms_upn_match.3.gz OLD_FILES+=usr/share/man/man3/hdb_check_s4u2self.3.gz OLD_FILES+=usr/share/man/man3/hdb_close.3.gz OLD_FILES+=usr/share/man/man3/hdb_destroy.3.gz OLD_FILES+=usr/share/man/man3/hdb_entry_ex.3.gz OLD_FILES+=usr/share/man/man3/hdb_fetch_kvno.3.gz OLD_FILES+=usr/share/man/man3/hdb_firstkey.3.gz OLD_FILES+=usr/share/man/man3/hdb_free.3.gz OLD_FILES+=usr/share/man/man3/hdb_get_realms.3.gz OLD_FILES+=usr/share/man/man3/hdb_lock.3.gz OLD_FILES+=usr/share/man/man3/hdb_name.3.gz OLD_FILES+=usr/share/man/man3/hdb_nextkey.3.gz OLD_FILES+=usr/share/man/man3/hdb_open.3.gz OLD_FILES+=usr/share/man/man3/hdb_password.3.gz OLD_FILES+=usr/share/man/man3/hdb_remove.3.gz OLD_FILES+=usr/share/man/man3/hdb_rename.3.gz OLD_FILES+=usr/share/man/man3/hdb_store.3.gz OLD_FILES+=usr/share/man/man3/hdb_unlock.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_build_ntlm1_master.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_build_ntlm2_master.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_calculate_lm2.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_calculate_ntlm1.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_calculate_ntlm2.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_decode_targetinfo.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_targetinfo.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_type1.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_type2.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_type3.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_free_buf.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_free_targetinfo.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_free_type1.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_free_type2.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_free_type3.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_keyex_unwrap.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_nt_key.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_ntlmv2_key.3.gz OLD_FILES+=usr/share/man/man3/heim_ntlm_verify_ntlm2.3.gz OLD_FILES+=usr/share/man/man3/hx509.3.gz OLD_FILES+=usr/share/man/man3/hx509_bitstring_print.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_sign.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_sign_self.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_crl_dp_uri.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_eku.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_hostname.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_jid.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_ms_upn.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_otherName.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_pkinit.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_rfc822name.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_free.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_init.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_ca.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_domaincontroller.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_notAfter.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_notAfter_lifetime.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_notBefore.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_proxy.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_serialnumber.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_spki.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_subject.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_template.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_unique.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_subject_expand.3.gz OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_template_units.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_binary.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_check_eku.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_cmp.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_find_subjectAltName_otherName.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_free.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_get_SPKI.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_get_SPKI_AlgorithmIdentifier.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_get_attribute.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_get_base_subject.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_get_friendly_name.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_get_issuer.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_get_issuer_unique_id.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_get_notAfter.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_get_notBefore.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_get_serialnumber.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_get_subject.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_get_subject_unique_id.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_init.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_init_data.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_keyusage_print.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_ref.3.gz OLD_FILES+=usr/share/man/man3/hx509_cert_set_friendly_name.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_add.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_append.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_end_seq.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_filter.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_find.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_free.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_info.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_init.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_iter_f.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_merge.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_next_cert.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_start_seq.3.gz OLD_FILES+=usr/share/man/man3/hx509_certs_store.3.gz OLD_FILES+=usr/share/man/man3/hx509_ci_print_names.3.gz OLD_FILES+=usr/share/man/man3/hx509_clear_error_string.3.gz OLD_FILES+=usr/share/man/man3/hx509_cms.3.gz OLD_FILES+=usr/share/man/man3/hx509_cms_create_signed_1.3.gz OLD_FILES+=usr/share/man/man3/hx509_cms_envelope_1.3.gz OLD_FILES+=usr/share/man/man3/hx509_cms_unenvelope.3.gz OLD_FILES+=usr/share/man/man3/hx509_cms_unwrap_ContentInfo.3.gz OLD_FILES+=usr/share/man/man3/hx509_cms_verify_signed.3.gz OLD_FILES+=usr/share/man/man3/hx509_cms_wrap_ContentInfo.3.gz OLD_FILES+=usr/share/man/man3/hx509_context_free.3.gz OLD_FILES+=usr/share/man/man3/hx509_context_init.3.gz OLD_FILES+=usr/share/man/man3/hx509_context_set_missing_revoke.3.gz OLD_FILES+=usr/share/man/man3/hx509_crl_add_revoked_certs.3.gz OLD_FILES+=usr/share/man/man3/hx509_crl_alloc.3.gz OLD_FILES+=usr/share/man/man3/hx509_crl_free.3.gz OLD_FILES+=usr/share/man/man3/hx509_crl_lifetime.3.gz OLD_FILES+=usr/share/man/man3/hx509_crl_sign.3.gz OLD_FILES+=usr/share/man/man3/hx509_crypto.3.gz OLD_FILES+=usr/share/man/man3/hx509_env.3.gz OLD_FILES+=usr/share/man/man3/hx509_env_add.3.gz OLD_FILES+=usr/share/man/man3/hx509_env_add_binding.3.gz OLD_FILES+=usr/share/man/man3/hx509_env_find.3.gz OLD_FILES+=usr/share/man/man3/hx509_env_find_binding.3.gz OLD_FILES+=usr/share/man/man3/hx509_env_free.3.gz OLD_FILES+=usr/share/man/man3/hx509_env_lfind.3.gz OLD_FILES+=usr/share/man/man3/hx509_err.3.gz OLD_FILES+=usr/share/man/man3/hx509_error.3.gz OLD_FILES+=usr/share/man/man3/hx509_free_error_string.3.gz OLD_FILES+=usr/share/man/man3/hx509_free_octet_string_list.3.gz OLD_FILES+=usr/share/man/man3/hx509_general_name_unparse.3.gz OLD_FILES+=usr/share/man/man3/hx509_get_error_string.3.gz OLD_FILES+=usr/share/man/man3/hx509_get_one_cert.3.gz OLD_FILES+=usr/share/man/man3/hx509_keyset.3.gz OLD_FILES+=usr/share/man/man3/hx509_lock.3.gz OLD_FILES+=usr/share/man/man3/hx509_misc.3.gz OLD_FILES+=usr/share/man/man3/hx509_name.3.gz OLD_FILES+=usr/share/man/man3/hx509_name_binary.3.gz OLD_FILES+=usr/share/man/man3/hx509_name_cmp.3.gz OLD_FILES+=usr/share/man/man3/hx509_name_copy.3.gz OLD_FILES+=usr/share/man/man3/hx509_name_expand.3.gz OLD_FILES+=usr/share/man/man3/hx509_name_free.3.gz OLD_FILES+=usr/share/man/man3/hx509_name_is_null_p.3.gz OLD_FILES+=usr/share/man/man3/hx509_name_to_Name.3.gz OLD_FILES+=usr/share/man/man3/hx509_name_to_string.3.gz OLD_FILES+=usr/share/man/man3/hx509_ocsp_request.3.gz OLD_FILES+=usr/share/man/man3/hx509_ocsp_verify.3.gz OLD_FILES+=usr/share/man/man3/hx509_oid_print.3.gz OLD_FILES+=usr/share/man/man3/hx509_oid_sprint.3.gz OLD_FILES+=usr/share/man/man3/hx509_parse_name.3.gz OLD_FILES+=usr/share/man/man3/hx509_peer.3.gz OLD_FILES+=usr/share/man/man3/hx509_peer_info_add_cms_alg.3.gz OLD_FILES+=usr/share/man/man3/hx509_peer_info_alloc.3.gz OLD_FILES+=usr/share/man/man3/hx509_peer_info_free.3.gz OLD_FILES+=usr/share/man/man3/hx509_peer_info_set_cert.3.gz OLD_FILES+=usr/share/man/man3/hx509_peer_info_set_cms_algs.3.gz OLD_FILES+=usr/share/man/man3/hx509_print.3.gz OLD_FILES+=usr/share/man/man3/hx509_print_cert.3.gz OLD_FILES+=usr/share/man/man3/hx509_print_stdout.3.gz OLD_FILES+=usr/share/man/man3/hx509_query.3.gz OLD_FILES+=usr/share/man/man3/hx509_query_alloc.3.gz OLD_FILES+=usr/share/man/man3/hx509_query_free.3.gz OLD_FILES+=usr/share/man/man3/hx509_query_match_cmp_func.3.gz OLD_FILES+=usr/share/man/man3/hx509_query_match_eku.3.gz OLD_FILES+=usr/share/man/man3/hx509_query_match_friendly_name.3.gz OLD_FILES+=usr/share/man/man3/hx509_query_match_issuer_serial.3.gz OLD_FILES+=usr/share/man/man3/hx509_query_match_option.3.gz OLD_FILES+=usr/share/man/man3/hx509_query_statistic_file.3.gz OLD_FILES+=usr/share/man/man3/hx509_query_unparse_stats.3.gz OLD_FILES+=usr/share/man/man3/hx509_revoke.3.gz OLD_FILES+=usr/share/man/man3/hx509_revoke_add_crl.3.gz OLD_FILES+=usr/share/man/man3/hx509_revoke_add_ocsp.3.gz OLD_FILES+=usr/share/man/man3/hx509_revoke_free.3.gz OLD_FILES+=usr/share/man/man3/hx509_revoke_init.3.gz OLD_FILES+=usr/share/man/man3/hx509_revoke_ocsp_print.3.gz OLD_FILES+=usr/share/man/man3/hx509_revoke_verify.3.gz OLD_FILES+=usr/share/man/man3/hx509_set_error_string.3.gz OLD_FILES+=usr/share/man/man3/hx509_set_error_stringv.3.gz OLD_FILES+=usr/share/man/man3/hx509_unparse_der_name.3.gz OLD_FILES+=usr/share/man/man3/hx509_validate_cert.3.gz OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_add_flags.3.gz OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_free.3.gz OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_init.3.gz OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_set_print.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify_attach_anchors.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify_attach_revoke.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify_ctx_f_allow_default_trustanchors.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify_destroy_ctx.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify_hostname.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify_init_ctx.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify_path.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify_set_max_depth.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify_set_proxy_certificate.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify_set_strict_rfc3280_verification.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify_set_time.3.gz OLD_FILES+=usr/share/man/man3/hx509_verify_signature.3.gz OLD_FILES+=usr/share/man/man3/hx509_xfree.3.gz OLD_FILES+=usr/share/man/man3/k_afs_cell_of_file.3.gz OLD_FILES+=usr/share/man/man3/k_afs_cell_of_file.3.gz OLD_FILES+=usr/share/man/man3/k_hasafs.3.gz OLD_FILES+=usr/share/man/man3/k_hasafs.3.gz OLD_FILES+=usr/share/man/man3/k_pioctl.3.gz OLD_FILES+=usr/share/man/man3/k_pioctl.3.gz OLD_FILES+=usr/share/man/man3/k_setpag.3.gz OLD_FILES+=usr/share/man/man3/k_setpag.3.gz OLD_FILES+=usr/share/man/man3/k_unlog.3.gz OLD_FILES+=usr/share/man/man3/k_unlog.3.gz OLD_FILES+=usr/share/man/man3/kadm5_pwcheck.3.gz OLD_FILES+=usr/share/man/man3/kafs.3.gz OLD_FILES+=usr/share/man/man3/kafs.3.gz OLD_FILES+=usr/share/man/man3/kafs5.3.gz OLD_FILES+=usr/share/man/man3/kafs5.3.gz OLD_FILES+=usr/share/man/man3/kafs_set_verbose.3.gz OLD_FILES+=usr/share/man/man3/kafs_set_verbose.3.gz OLD_FILES+=usr/share/man/man3/kafs_settoken.3.gz OLD_FILES+=usr/share/man/man3/kafs_settoken.3.gz OLD_FILES+=usr/share/man/man3/kafs_settoken5.3.gz OLD_FILES+=usr/share/man/man3/kafs_settoken5.3.gz OLD_FILES+=usr/share/man/man3/kafs_settoken_rxkad.3.gz OLD_FILES+=usr/share/man/man3/kafs_settoken_rxkad.3.gz OLD_FILES+=usr/share/man/man3/krb5.3.gz OLD_FILES+=usr/share/man/man3/krb524_convert_creds_kdc.3.gz OLD_FILES+=usr/share/man/man3/krb524_convert_creds_kdc_ccache.3.gz OLD_FILES+=usr/share/man/man3/krb5_425_conv_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_425_conv_principal_ext.3.gz OLD_FILES+=usr/share/man/man3/krb5_524_conv_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_acc_ops.3.gz OLD_FILES+=usr/share/man/man3/krb5_acl_match_file.3.gz OLD_FILES+=usr/share/man/man3/krb5_acl_match_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_add_et_list.3.gz OLD_FILES+=usr/share/man/man3/krb5_add_extra_addresses.3.gz OLD_FILES+=usr/share/man/man3/krb5_add_ignore_addresses.3.gz OLD_FILES+=usr/share/man/man3/krb5_addlog_dest.3.gz OLD_FILES+=usr/share/man/man3/krb5_addlog_func.3.gz OLD_FILES+=usr/share/man/man3/krb5_addr2sockaddr.3.gz OLD_FILES+=usr/share/man/man3/krb5_address.3.gz OLD_FILES+=usr/share/man/man3/krb5_address_compare.3.gz OLD_FILES+=usr/share/man/man3/krb5_address_order.3.gz OLD_FILES+=usr/share/man/man3/krb5_address_prefixlen_boundary.3.gz OLD_FILES+=usr/share/man/man3/krb5_address_search.3.gz OLD_FILES+=usr/share/man/man3/krb5_afslog.3.gz OLD_FILES+=usr/share/man/man3/krb5_afslog.3.gz OLD_FILES+=usr/share/man/man3/krb5_afslog_uid.3.gz OLD_FILES+=usr/share/man/man3/krb5_afslog_uid.3.gz OLD_FILES+=usr/share/man/man3/krb5_allow_weak_crypto.3.gz OLD_FILES+=usr/share/man/man3/krb5_aname_to_localname.3.gz OLD_FILES+=usr/share/man/man3/krb5_anyaddr.3.gz OLD_FILES+=usr/share/man/man3/krb5_appdefault.3.gz OLD_FILES+=usr/share/man/man3/krb5_appdefault_boolean.3.gz OLD_FILES+=usr/share/man/man3/krb5_appdefault_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_appdefault_time.3.gz OLD_FILES+=usr/share/man/man3/krb5_append_addresses.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_free.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_genaddrs.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_getaddrs.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_getflags.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_getkey.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_getlocalsubkey.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_getrcache.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_getremotesubkey.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_getuserkey.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_init.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_initivector.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_setaddrs.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_setaddrs_from_fd.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_setflags.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_setivector.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_setkey.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_setlocalsubkey.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_setrcache.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_setremotesubkey.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_con_setuserkey.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_context.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_getauthenticator.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_getcksumtype.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_getkeytype.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_getlocalseqnumber.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_getremoteseqnumber.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_getremoteseqnumber.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_setcksumtype.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_setkeytype.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_setlocalseqnumber.3.gz OLD_FILES+=usr/share/man/man3/krb5_auth_setremoteseqnumber.3.gz OLD_FILES+=usr/share/man/man3/krb5_build_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_build_principal_ext.3.gz OLD_FILES+=usr/share/man/man3/krb5_build_principal_va.3.gz OLD_FILES+=usr/share/man/man3/krb5_build_principal_va_ext.3.gz OLD_FILES+=usr/share/man/man3/krb5_c_enctype_compare.3.gz OLD_FILES+=usr/share/man/man3/krb5_c_make_checksum.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_cache_end_seq_get.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_cache_get_first.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_cache_match.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_cache_next.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_clear_mcred.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_close.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_copy_cache.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_copy_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_copy_match_f.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_default.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_default_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_destroy.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_end_seq_get.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_gen_new.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_get_config.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_get_flags.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_get_friendly_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_get_full_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_get_kdc_offset.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_get_lifetime.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_get_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_get_ops.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_get_prefix_ops.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_get_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_get_type.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_get_version.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_initialize.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_last_change_time.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_move.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_new_unique.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_next_cred.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_register.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_remove_cred.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_resolve.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_retrieve_cred.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_set_config.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_set_default_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_set_flags.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_set_friendly_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_set_kdc_offset.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_start_seq_get.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_store_cred.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_support_switch.3.gz OLD_FILES+=usr/share/man/man3/krb5_cc_switch.3.gz OLD_FILES+=usr/share/man/man3/krb5_ccache.3.gz OLD_FILES+=usr/share/man/man3/krb5_ccache_intro.3.gz OLD_FILES+=usr/share/man/man3/krb5_cccol_cursor_free.3.gz OLD_FILES+=usr/share/man/man3/krb5_cccol_cursor_new.3.gz OLD_FILES+=usr/share/man/man3/krb5_cccol_cursor_next.3.gz OLD_FILES+=usr/share/man/man3/krb5_cccol_last_change_time.3.gz OLD_FILES+=usr/share/man/man3/krb5_change_password.3.gz OLD_FILES+=usr/share/man/man3/krb5_check_transited.3.gz OLD_FILES+=usr/share/man/man3/krb5_checksum_is_collision_proof.3.gz OLD_FILES+=usr/share/man/man3/krb5_checksum_is_keyed.3.gz OLD_FILES+=usr/share/man/man3/krb5_checksumsize.3.gz OLD_FILES+=usr/share/man/man3/krb5_cksumtype_to_enctype.3.gz OLD_FILES+=usr/share/man/man3/krb5_clear_error_message.3.gz OLD_FILES+=usr/share/man/man3/krb5_clear_error_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_closelog.3.gz OLD_FILES+=usr/share/man/man3/krb5_compare_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_file_free.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_free_strings.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_get_bool.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_get_bool_default.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_get_list.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_get_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_get_string_default.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_get_strings.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_get_time.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_get_time_default.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_parse_file_multi.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_parse_string_multi.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_vget_bool.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_vget_bool_default.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_vget_list.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_vget_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_vget_string_default.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_vget_strings.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_vget_time.3.gz OLD_FILES+=usr/share/man/man3/krb5_config_vget_time_default.3.gz OLD_FILES+=usr/share/man/man3/krb5_copy_address.3.gz OLD_FILES+=usr/share/man/man3/krb5_copy_addresses.3.gz OLD_FILES+=usr/share/man/man3/krb5_copy_context.3.gz OLD_FILES+=usr/share/man/man3/krb5_copy_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_copy_creds_contents.3.gz OLD_FILES+=usr/share/man/man3/krb5_copy_data.3.gz OLD_FILES+=usr/share/man/man3/krb5_copy_host_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_copy_keyblock.3.gz OLD_FILES+=usr/share/man/man3/krb5_copy_keyblock_contents.3.gz OLD_FILES+=usr/share/man/man3/krb5_copy_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_copy_ticket.3.gz OLD_FILES+=usr/share/man/man3/krb5_create_checksum.3.gz OLD_FILES+=usr/share/man/man3/krb5_create_checksum_iov.3.gz OLD_FILES+=usr/share/man/man3/krb5_credential.3.gz OLD_FILES+=usr/share/man/man3/krb5_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_creds_get_ticket_flags.3.gz OLD_FILES+=usr/share/man/man3/krb5_crypto.3.gz OLD_FILES+=usr/share/man/man3/krb5_crypto_destroy.3.gz OLD_FILES+=usr/share/man/man3/krb5_crypto_fx_cf2.3.gz OLD_FILES+=usr/share/man/man3/krb5_crypto_getblocksize.3.gz OLD_FILES+=usr/share/man/man3/krb5_crypto_getconfoundersize.3.gz OLD_FILES+=usr/share/man/man3/krb5_crypto_getenctype.3.gz OLD_FILES+=usr/share/man/man3/krb5_crypto_getpadsize.3.gz OLD_FILES+=usr/share/man/man3/krb5_crypto_init.3.gz OLD_FILES+=usr/share/man/man3/krb5_crypto_iov.3.gz OLD_FILES+=usr/share/man/man3/krb5_data_alloc.3.gz OLD_FILES+=usr/share/man/man3/krb5_data_cmp.3.gz OLD_FILES+=usr/share/man/man3/krb5_data_copy.3.gz OLD_FILES+=usr/share/man/man3/krb5_data_ct_cmp.3.gz OLD_FILES+=usr/share/man/man3/krb5_data_free.3.gz OLD_FILES+=usr/share/man/man3/krb5_data_realloc.3.gz OLD_FILES+=usr/share/man/man3/krb5_data_zero.3.gz OLD_FILES+=usr/share/man/man3/krb5_decrypt.3.gz OLD_FILES+=usr/share/man/man3/krb5_decrypt_EncryptedData.3.gz OLD_FILES+=usr/share/man/man3/krb5_decrypt_iov_ivec.3.gz OLD_FILES+=usr/share/man/man3/krb5_deprecated.3.gz OLD_FILES+=usr/share/man/man3/krb5_digest.3.gz OLD_FILES+=usr/share/man/man3/krb5_digest_probe.3.gz OLD_FILES+=usr/share/man/man3/krb5_eai_to_heim_errno.3.gz OLD_FILES+=usr/share/man/man3/krb5_encrypt.3.gz OLD_FILES+=usr/share/man/man3/krb5_encrypt_EncryptedData.3.gz OLD_FILES+=usr/share/man/man3/krb5_encrypt_iov_ivec.3.gz OLD_FILES+=usr/share/man/man3/krb5_enctype_disable.3.gz OLD_FILES+=usr/share/man/man3/krb5_enctype_enable.3.gz OLD_FILES+=usr/share/man/man3/krb5_enctype_valid.3.gz OLD_FILES+=usr/share/man/man3/krb5_enctypes_compatible_keys.3.gz OLD_FILES+=usr/share/man/man3/krb5_error.3.gz OLD_FILES+=usr/share/man/man3/krb5_expand_hostname.3.gz OLD_FILES+=usr/share/man/man3/krb5_expand_hostname_realms.3.gz OLD_FILES+=usr/share/man/man3/krb5_fcc_ops.3.gz OLD_FILES+=usr/share/man/man3/krb5_fileformats.3.gz OLD_FILES+=usr/share/man/man3/krb5_find_padata.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_address.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_addresses.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_config_files.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_context.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_context.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_cred_contents.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_creds_contents.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_data.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_data_contents.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_error_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_host_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_host_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_keyblock.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_keyblock_contents.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_krbhst.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_ticket.3.gz OLD_FILES+=usr/share/man/man3/krb5_free_unparsed_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_fwd_tgt_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_generate_random_block.3.gz OLD_FILES+=usr/share/man/man3/krb5_generate_subkey.3.gz OLD_FILES+=usr/share/man/man3/krb5_generate_subkey_extended.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_all_client_addrs.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_all_server_addrs.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_cred_from_kdc.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_cred_from_kdc_opt.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_credentials.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_default_config_files.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_default_in_tkt_etypes.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_default_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_default_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_default_realms.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_dns_canonicalize_hostname.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_extra_addresses.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_fcache_version.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_forwarded_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_host_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_ignore_addresses.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_in_cred.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_in_tkt_with_keytab.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_in_tkt_with_password.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_in_tkt_with_skey.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_init_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_keyblock.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_keytab.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_alloc.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_free.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_get_error.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_init.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_password.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_kdc_sec_offset.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_krb524hst.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_krb_admin_hst.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_krb_changepw_hst.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_krbhst.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_max_time_skew.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_use_admin_kdc.3.gz OLD_FILES+=usr/share/man/man3/krb5_get_validated_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_getportbyname.3.gz OLD_FILES+=usr/share/man/man3/krb5_h_addr2addr.3.gz OLD_FILES+=usr/share/man/man3/krb5_h_addr2sockaddr.3.gz OLD_FILES+=usr/share/man/man3/krb5_h_errno_to_heim_errno.3.gz OLD_FILES+=usr/share/man/man3/krb5_init_context.3.gz OLD_FILES+=usr/share/man/man3/krb5_init_creds_free.3.gz OLD_FILES+=usr/share/man/man3/krb5_init_creds_get.3.gz OLD_FILES+=usr/share/man/man3/krb5_init_creds_get_error.3.gz OLD_FILES+=usr/share/man/man3/krb5_init_creds_init.3.gz OLD_FILES+=usr/share/man/man3/krb5_init_creds_intro.3.gz OLD_FILES+=usr/share/man/man3/krb5_init_creds_set_keytab.3.gz OLD_FILES+=usr/share/man/man3/krb5_init_creds_set_password.3.gz OLD_FILES+=usr/share/man/man3/krb5_init_creds_set_service.3.gz OLD_FILES+=usr/share/man/man3/krb5_init_creds_step.3.gz OLD_FILES+=usr/share/man/man3/krb5_init_ets.3.gz OLD_FILES+=usr/share/man/man3/krb5_initlog.3.gz OLD_FILES+=usr/share/man/man3/krb5_introduction.3.gz OLD_FILES+=usr/share/man/man3/krb5_is_config_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_is_thread_safe.3.gz OLD_FILES+=usr/share/man/man3/krb5_kerberos_enctypes.3.gz OLD_FILES+=usr/share/man/man3/krb5_keyblock_get_enctype.3.gz OLD_FILES+=usr/share/man/man3/krb5_keyblock_init.3.gz OLD_FILES+=usr/share/man/man3/krb5_keyblock_zero.3.gz OLD_FILES+=usr/share/man/man3/krb5_keytab.3.gz OLD_FILES+=usr/share/man/man3/krb5_keytab_intro.3.gz OLD_FILES+=usr/share/man/man3/krb5_keytab_key_proc.3.gz OLD_FILES+=usr/share/man/man3/krb5_keytype_to_enctypes.3.gz OLD_FILES+=usr/share/man/man3/krb5_keytype_to_enctypes_default.3.gz OLD_FILES+=usr/share/man/man3/krb5_keytype_to_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_krbhst_format_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_krbhst_free.3.gz OLD_FILES+=usr/share/man/man3/krb5_krbhst_get_addrinfo.3.gz OLD_FILES+=usr/share/man/man3/krb5_krbhst_get_addrinfo.3.gz OLD_FILES+=usr/share/man/man3/krb5_krbhst_init.3.gz OLD_FILES+=usr/share/man/man3/krb5_krbhst_next.3.gz OLD_FILES+=usr/share/man/man3/krb5_krbhst_next_as_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_krbhst_reset.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_add_entry.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_close.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_compare.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_copy_entry_contents.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_default.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_default_modify_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_default_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_destroy.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_end_seq_get.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_free_entry.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_get_entry.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_get_full_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_get_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_get_type.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_have_content.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_next_entry.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_read_service_key.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_register.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_remove_entry.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_resolve.3.gz OLD_FILES+=usr/share/man/man3/krb5_kt_start_seq_get.3.gz OLD_FILES+=usr/share/man/man3/krb5_kuserok.3.gz OLD_FILES+=usr/share/man/man3/krb5_log.3.gz OLD_FILES+=usr/share/man/man3/krb5_log_msg.3.gz OLD_FILES+=usr/share/man/man3/krb5_make_addrport.3.gz OLD_FILES+=usr/share/man/man3/krb5_make_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_max_sockaddr_size.3.gz OLD_FILES+=usr/share/man/man3/krb5_mcc_ops.3.gz OLD_FILES+=usr/share/man/man3/krb5_mk_req.3.gz OLD_FILES+=usr/share/man/man3/krb5_mk_safe.3.gz OLD_FILES+=usr/share/man/man3/krb5_openlog.3.gz OLD_FILES+=usr/share/man/man3/krb5_pac.3.gz OLD_FILES+=usr/share/man/man3/krb5_pac_get_buffer.3.gz OLD_FILES+=usr/share/man/man3/krb5_pac_verify.3.gz OLD_FILES+=usr/share/man/man3/krb5_parse_address.3.gz OLD_FILES+=usr/share/man/man3/krb5_parse_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_parse_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_parse_name_flags.3.gz OLD_FILES+=usr/share/man/man3/krb5_parse_nametype.3.gz OLD_FILES+=usr/share/man/man3/krb5_password_key_proc.3.gz OLD_FILES+=usr/share/man/man3/krb5_plugin_register.3.gz OLD_FILES+=usr/share/man/man3/krb5_prepend_config_files_default.3.gz OLD_FILES+=usr/share/man/man3/krb5_princ_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_princ_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_princ_set_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_princ_set_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_principal_compare.3.gz OLD_FILES+=usr/share/man/man3/krb5_principal_compare_any_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_principal_get_comp_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_principal_get_num_comp.3.gz OLD_FILES+=usr/share/man/man3/krb5_principal_get_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_principal_get_type.3.gz OLD_FILES+=usr/share/man/man3/krb5_principal_intro.3.gz OLD_FILES+=usr/share/man/man3/krb5_principal_is_krbtgt.3.gz OLD_FILES+=usr/share/man/man3/krb5_principal_match.3.gz OLD_FILES+=usr/share/man/man3/krb5_principal_set_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_principal_set_type.3.gz OLD_FILES+=usr/share/man/man3/krb5_print_address.3.gz OLD_FILES+=usr/share/man/man3/krb5_random_to_key.3.gz OLD_FILES+=usr/share/man/man3/krb5_rcache.3.gz OLD_FILES+=usr/share/man/man3/krb5_rd_error.3.gz OLD_FILES+=usr/share/man/man3/krb5_rd_req_ctx.3.gz OLD_FILES+=usr/share/man/man3/krb5_rd_req_in_ctx_alloc.3.gz OLD_FILES+=usr/share/man/man3/krb5_rd_req_in_set_keytab.3.gz OLD_FILES+=usr/share/man/man3/krb5_rd_req_in_set_pac_check.3.gz OLD_FILES+=usr/share/man/man3/krb5_rd_req_out_ctx_free.3.gz OLD_FILES+=usr/share/man/man3/krb5_rd_req_out_get_server.3.gz OLD_FILES+=usr/share/man/man3/krb5_rd_safe.3.gz OLD_FILES+=usr/share/man/man3/krb5_realm_compare.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_address.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_addrs.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_authdata.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_creds_tag.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_data.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_int16.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_int32.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_int8.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_keyblock.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_stringz.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_times.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_uint16.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_uint32.3.gz OLD_FILES+=usr/share/man/man3/krb5_ret_uint8.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_config_files.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_default_in_tkt_etypes.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_default_realm.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_dns_canonicalize_hostname.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_error_message.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_error_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_extra_addresses.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_fcache_version.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_home_dir_access.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_ignore_addresses.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_kdc_sec_offset.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_max_time_skew.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_password.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_real_time.3.gz OLD_FILES+=usr/share/man/man3/krb5_set_use_admin_kdc.3.gz OLD_FILES+=usr/share/man/man3/krb5_sname_to_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_sock_to_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_sockaddr2address.3.gz OLD_FILES+=usr/share/man/man3/krb5_sockaddr2port.3.gz OLD_FILES+=usr/share/man/man3/krb5_sockaddr_uninteresting.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_clear_flags.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_emem.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_free.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_from_data.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_from_fd.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_from_mem.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_from_readonly_mem.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_get_byteorder.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_get_eof_code.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_is_flags.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_read.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_seek.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_set_byteorder.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_set_eof_code.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_set_flags.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_set_max_alloc.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_to_data.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_truncate.3.gz OLD_FILES+=usr/share/man/man3/krb5_storage_write.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_address.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_addrs.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_authdata.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_creds_tag.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_data.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_int16.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_int32.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_int8.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_keyblock.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_principal.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_stringz.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_times.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_uint16.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_uint32.3.gz OLD_FILES+=usr/share/man/man3/krb5_store_uint8.3.gz OLD_FILES+=usr/share/man/man3/krb5_string_to_key.3.gz OLD_FILES+=usr/share/man/man3/krb5_string_to_keytype.3.gz OLD_FILES+=usr/share/man/man3/krb5_support.3.gz OLD_FILES+=usr/share/man/man3/krb5_ticket.3.gz OLD_FILES+=usr/share/man/man3/krb5_ticket_get_authorization_data_type.3.gz OLD_FILES+=usr/share/man/man3/krb5_ticket_get_client.3.gz OLD_FILES+=usr/share/man/man3/krb5_ticket_get_endtime.3.gz OLD_FILES+=usr/share/man/man3/krb5_ticket_get_flags.3.gz OLD_FILES+=usr/share/man/man3/krb5_ticket_get_server.3.gz OLD_FILES+=usr/share/man/man3/krb5_timeofday.3.gz OLD_FILES+=usr/share/man/man3/krb5_unparse_name.3.gz OLD_FILES+=usr/share/man/man3/krb5_unparse_name_fixed.3.gz OLD_FILES+=usr/share/man/man3/krb5_unparse_name_fixed_flags.3.gz OLD_FILES+=usr/share/man/man3/krb5_unparse_name_fixed_short.3.gz OLD_FILES+=usr/share/man/man3/krb5_unparse_name_flags.3.gz OLD_FILES+=usr/share/man/man3/krb5_unparse_name_short.3.gz OLD_FILES+=usr/share/man/man3/krb5_us_timeofday.3.gz OLD_FILES+=usr/share/man/man3/krb5_v4compat.3.gz OLD_FILES+=usr/share/man/man3/krb5_verify_checksum.3.gz OLD_FILES+=usr/share/man/man3/krb5_verify_checksum_iov.3.gz OLD_FILES+=usr/share/man/man3/krb5_verify_init_creds.3.gz OLD_FILES+=usr/share/man/man3/krb5_verify_opt_init.3.gz OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_flags.3.gz OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_keytab.3.gz OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_secure.3.gz OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_service.3.gz OLD_FILES+=usr/share/man/man3/krb5_verify_user.3.gz OLD_FILES+=usr/share/man/man3/krb5_verify_user_lrealm.3.gz OLD_FILES+=usr/share/man/man3/krb5_verify_user_opt.3.gz OLD_FILES+=usr/share/man/man3/krb5_vlog.3.gz OLD_FILES+=usr/share/man/man3/krb5_vlog_msg.3.gz OLD_FILES+=usr/share/man/man3/krb5_vset_error_string.3.gz OLD_FILES+=usr/share/man/man3/krb5_vwarn.3.gz OLD_FILES+=usr/share/man/man3/krb_afslog.3.gz OLD_FILES+=usr/share/man/man3/krb_afslog.3.gz OLD_FILES+=usr/share/man/man3/krb_afslog_uid.3.gz OLD_FILES+=usr/share/man/man3/krb_afslog_uid.3.gz OLD_FILES+=usr/share/man/man3/ntlm_buf.3.gz OLD_FILES+=usr/share/man/man3/ntlm_core.3.gz OLD_FILES+=usr/share/man/man3/ntlm_type1.3.gz OLD_FILES+=usr/share/man/man3/ntlm_type2.3.gz OLD_FILES+=usr/share/man/man3/ntlm_type3.3.gz OLD_FILES+=usr/share/man/man5/krb5.conf.5.gz OLD_FILES+=usr/share/man/man8/hprop.8.gz OLD_FILES+=usr/share/man/man8/hpropd.8.gz OLD_FILES+=usr/share/man/man8/iprop-log.8.gz OLD_FILES+=usr/share/man/man8/iprop.8.gz OLD_FILES+=usr/share/man/man8/kadmin.8.gz OLD_FILES+=usr/share/man/man8/kadmind.8.gz OLD_FILES+=usr/share/man/man8/kcm.8.gz OLD_FILES+=usr/share/man/man8/kdc.8.gz OLD_FILES+=usr/share/man/man8/kdigest.8.gz OLD_FILES+=usr/share/man/man8/kerberos.8.gz OLD_FILES+=usr/share/man/man8/kimpersonate.8.gz OLD_FILES+=usr/share/man/man8/kpasswdd.8.gz OLD_FILES+=usr/share/man/man8/kstash.8.gz OLD_FILES+=usr/share/man/man8/ktutil.8.gz OLD_FILES+=usr/share/man/man8/pam_krb5.8.gz OLD_FILES+=usr/share/man/man8/pam_ksu.8.gz OLD_FILES+=usr/share/man/man8/string2key.8.gz OLD_FILES+=usr/share/man/man8/verify_krb5_conf.8.gz OLD_FILES+=usr/share/man/man8/verify_krb5_conf.8.gz .endif #.if ${MK_LIB32} == no # to be filled in #.endif #.if ${MK_LIBTHR} == no # to be filled in #.endif #.if ${MK_LOCALES} == no # to be filled in #.endif .if ${MK_LOCATE} == no OLD_FILES+=etc/locate.rc OLD_FILES+=etc/periodic/weekly/310.locate OLD_FILES+=usr/bin/locate OLD_FILES+=usr/libexec/locate.bigram OLD_FILES+=usr/libexec/locate.code OLD_FILES+=usr/libexec/locate.concatdb OLD_FILES+=usr/libexec/locate.mklocatedb OLD_FILES+=usr/libexec/locate.updatedb OLD_FILES+=usr/share/man/man1/locate.1.gz OLD_FILES+=usr/share/man/man8/locate.updatedb.8.gz OLD_FILES+=usr/share/man/man8/updatedb.8.gz .endif .if ${MK_LPR} == no OLD_FILES+=etc/hosts.lpd OLD_FILES+=etc/printcap OLD_FILES+=usr/bin/lp OLD_FILES+=usr/bin/lpq OLD_FILES+=usr/bin/lpr OLD_FILES+=usr/bin/lprm OLD_FILES+=usr/libexec/lpr/ru/bjc-240.sh.sample OLD_FILES+=usr/libexec/lpr/ru/koi2alt OLD_FILES+=usr/libexec/lpr/ru/koi2855 OLD_FILES+=usr/libexec/lpr/lpf OLD_FILES+=usr/sbin/chkprintcap OLD_FILES+=usr/sbin/lpc OLD_FILES+=usr/sbin/lpd OLD_FILES+=usr/sbin/lptest OLD_FILES+=usr/sbin/pac OLD_FILES+=usr/share/doc/smm/07.lpd/paper.ascii.gz OLD_FILES+=usr/share/examples/etc/hosts.lpd OLD_FILES+=usr/share/examples/etc/printcap OLD_FILES+=usr/share/man/man1/lp.1.gz OLD_FILES+=usr/share/man/man1/lpq.1.gz OLD_FILES+=usr/share/man/man1/lpr.1.gz OLD_FILES+=usr/share/man/man1/lprm.1.gz OLD_FILES+=usr/share/man/man1/lptest.1.gz OLD_FILES+=usr/share/man/man5/printcap.5.gz OLD_FILES+=usr/share/man/man8/chkprintcap.8.gz OLD_FILES+=usr/share/man/man8/lpc.8.gz OLD_FILES+=usr/share/man/man8/lpd.8.gz OLD_FILES+=usr/share/man/man8/pac.8.gz .endif .if ${MK_MAIL} == no OLD_FILES+=etc/periodic/daily/130.clean-msgs OLD_FILES+=usr/bin/Mail OLD_FILES+=usr/bin/biff OLD_FILES+=usr/bin/from OLD_FILES+=usr/bin/mail OLD_FILES+=usr/bin/mailx OLD_FILES+=usr/bin/msgs OLD_FILES+=usr/libexec/comsat OLD_FILES+=usr/share/examples/etc/mail.rc OLD_FILES+=usr/share/man/man1/Mail.1.gz OLD_FILES+=usr/share/man/man1/biff.1.gz OLD_FILES+=usr/share/man/man1/from.1.gz OLD_FILES+=usr/share/man/man1/mail.1.gz OLD_FILES+=usr/share/man/man1/mailx.1.gz OLD_FILES+=usr/share/man/man1/msgs.1.gz OLD_FILES+=usr/share/man/man8/comsat.8.gz OLD_FILES+=usr/share/misc/mail.help OLD_FILES+=usr/share/misc/mail.tildehelp .endif .if ${MK_MAILWRAPPER} == no OLD_FILES+=etc/mail/mailer.conf .if ${MK_SENDMAIL} == no OLD_FILES+=usr/sbin/mailwrapper .endif OLD_FILES+=usr/share/man/man8/mailwrapper.8.gz .endif #.if ${MK_MAN} == no # This should add a dependency to a special target which removes all man pages. # Listing all of them here is overkill. #.endif .if ${MK_NCP} == no OLD_FILES+=usr/bin/ncplist OLD_FILES+=usr/bin/ncplogin OLD_FILES+=usr/bin/ncplogout OLD_FILES+=usr/include/fs/nwfs/nwfs.h OLD_FILES+=usr/include/fs/nwfs/nwfs_mount.h OLD_FILES+=usr/include/fs/nwfs/nwfs_node.h OLD_FILES+=usr/include/fs/nwfs/nwfs_subr.h #OLD_DIRS+=usr/include/fs/nwfs OLD_FILES+=usr/include/netncp/ncp.h OLD_FILES+=usr/include/netncp/ncp_cfg.h OLD_FILES+=usr/include/netncp/ncp_conn.h OLD_FILES+=usr/include/netncp/ncp_file.h OLD_FILES+=usr/include/netncp/ncp_lib.h OLD_FILES+=usr/include/netncp/ncp_ncp.h OLD_FILES+=usr/include/netncp/ncp_nls.h OLD_FILES+=usr/include/netncp/ncp_rcfile.h OLD_FILES+=usr/include/netncp/ncp_rq.h OLD_FILES+=usr/include/netncp/ncp_sock.h OLD_FILES+=usr/include/netncp/ncp_subr.h OLD_FILES+=usr/include/netncp/ncp_user.h OLD_FILES+=usr/include/netncp/ncpio.h OLD_FILES+=usr/include/netncp/nwerror.h #OLD_DIRS+=usr/include/netncp OLD_FILES+=usr/lib/libncp.a OLD_FILES+=usr/lib/libncp.so OLD_LIBS+=usr/lib/libncp.so.4 OLD_FILES+=usr/lib/libncp_p.a .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64" OLD_FILES+=usr/lib32/libncp.a OLD_FILES+=usr/lib32/libncp.so OLD_LIBS+=usr/lib32/libncp.so.4 OLD_FILES+=usr/lib32/libncp_p.a .endif OLD_FILES+=usr/sbin/mount_nwfs OLD_FILES+=usr/share/man/man1/ncplist.1.gz OLD_FILES+=usr/share/man/man1/ncplogin.1.gz OLD_FILES+=usr/share/man/man1/ncplogout.1.gz OLD_FILES+=usr/share/man/man8/mount_nwfs.8.gz .endif .if ${MK_NDIS} == no OLD_FILES+=usr/sbin/ndiscvt OLD_FILES+=usr/sbin/ndisgen OLD_FILES+=usr/share/man/man8/ndiscvt.8.gz OLD_FILES+=usr/share/man/man8/ndisgen.8.gz OLD_FILES+=usr/share/misc/windrv_stub.c .endif .if ${MK_NETCAT} == no OLD_FILES+=usr/bin/nc OLD_FILES+=usr/share/man/man1/nc.1.gz .endif #.if ${MK_NIS} == no # to be filled in #.endif #.if ${MK_NLS} == no # to be filled in #.endif .if ${MK_NTP} == no OLD_FILES+=etc/ntp.conf OLD_FILES+=etc/periodic/daily/480.status-ntpd OLD_FILES+=usr/bin/ntpq OLD_FILES+=usr/sbin/ntp-keygen OLD_FILES+=usr/sbin/ntpd OLD_FILES+=usr/sbin/ntpdate OLD_FILES+=usr/sbin/ntpdc OLD_FILES+=usr/sbin/ntptime OLD_FILES+=usr/sbin/sntp OLD_FILES+=usr/share/doc/ntp/accopt.html OLD_FILES+=usr/share/doc/ntp/assoc.html OLD_FILES+=usr/share/doc/ntp/audio.html OLD_FILES+=usr/share/doc/ntp/authopt.html OLD_FILES+=usr/share/doc/ntp/build.html OLD_FILES+=usr/share/doc/ntp/clockopt.html OLD_FILES+=usr/share/doc/ntp/config.html OLD_FILES+=usr/share/doc/ntp/confopt.html OLD_FILES+=usr/share/doc/ntp/copyright.html OLD_FILES+=usr/share/doc/ntp/debug.html OLD_FILES+=usr/share/doc/ntp/driver1.html OLD_FILES+=usr/share/doc/ntp/driver10.html OLD_FILES+=usr/share/doc/ntp/driver11.html OLD_FILES+=usr/share/doc/ntp/driver12.html OLD_FILES+=usr/share/doc/ntp/driver16.html OLD_FILES+=usr/share/doc/ntp/driver18.html OLD_FILES+=usr/share/doc/ntp/driver19.html OLD_FILES+=usr/share/doc/ntp/driver2.html OLD_FILES+=usr/share/doc/ntp/driver20.html OLD_FILES+=usr/share/doc/ntp/driver22.html OLD_FILES+=usr/share/doc/ntp/driver26.html OLD_FILES+=usr/share/doc/ntp/driver27.html OLD_FILES+=usr/share/doc/ntp/driver28.html OLD_FILES+=usr/share/doc/ntp/driver29.html OLD_FILES+=usr/share/doc/ntp/driver3.html OLD_FILES+=usr/share/doc/ntp/driver30.html OLD_FILES+=usr/share/doc/ntp/driver32.html OLD_FILES+=usr/share/doc/ntp/driver33.html OLD_FILES+=usr/share/doc/ntp/driver34.html OLD_FILES+=usr/share/doc/ntp/driver35.html OLD_FILES+=usr/share/doc/ntp/driver36.html OLD_FILES+=usr/share/doc/ntp/driver37.html OLD_FILES+=usr/share/doc/ntp/driver4.html OLD_FILES+=usr/share/doc/ntp/driver5.html OLD_FILES+=usr/share/doc/ntp/driver6.html OLD_FILES+=usr/share/doc/ntp/driver7.html OLD_FILES+=usr/share/doc/ntp/driver8.html OLD_FILES+=usr/share/doc/ntp/driver9.html OLD_FILES+=usr/share/doc/ntp/extern.html OLD_FILES+=usr/share/doc/ntp/hints.html OLD_FILES+=usr/share/doc/ntp/howto.html OLD_FILES+=usr/share/doc/ntp/index.html OLD_FILES+=usr/share/doc/ntp/kern.html OLD_FILES+=usr/share/doc/ntp/ldisc.html OLD_FILES+=usr/share/doc/ntp/measure.html OLD_FILES+=usr/share/doc/ntp/miscopt.html OLD_FILES+=usr/share/doc/ntp/monopt.html OLD_FILES+=usr/share/doc/ntp/mx4200data.html OLD_FILES+=usr/share/doc/ntp/notes.html OLD_FILES+=usr/share/doc/ntp/ntpd.html OLD_FILES+=usr/share/doc/ntp/ntpdate.html OLD_FILES+=usr/share/doc/ntp/ntpdc.html OLD_FILES+=usr/share/doc/ntp/ntpq.html OLD_FILES+=usr/share/doc/ntp/ntptime.html OLD_FILES+=usr/share/doc/ntp/ntptrace.html OLD_FILES+=usr/share/doc/ntp/parsedata.html OLD_FILES+=usr/share/doc/ntp/parsenew.html OLD_FILES+=usr/share/doc/ntp/patches.html OLD_FILES+=usr/share/doc/ntp/porting.html OLD_FILES+=usr/share/doc/ntp/pps.html OLD_FILES+=usr/share/doc/ntp/prefer.html OLD_FILES+=usr/share/doc/ntp/quick.html OLD_FILES+=usr/share/doc/ntp/rdebug.html OLD_FILES+=usr/share/doc/ntp/refclock.html OLD_FILES+=usr/share/doc/ntp/release.html OLD_FILES+=usr/share/doc/ntp/tickadj.html OLD_FILES+=usr/share/examples/etc/ntp.conf OLD_FILES+=usr/share/man/man5/ntp.conf.5.gz OLD_FILES+=usr/share/man/man5/ntp.keys.5.gz OLD_FILES+=usr/share/man/man8/ntp-keygen.8.gz OLD_FILES+=usr/share/man/man8/ntpd.8.gz OLD_FILES+=usr/share/man/man8/ntpdate.8.gz OLD_FILES+=usr/share/man/man8/ntpdc.8.gz OLD_FILES+=usr/share/man/man8/ntpq.8.gz OLD_FILES+=usr/share/man/man8/ntptime.8.gz .endif #.if ${MK_OBJC} == no # to be filled in #.endif #.if ${MK_OPENSSH} == no # to be filled in #.endif #.if ${MK_OPENSSL} == no # to be filled in #.endif .if ${MK_PF} == no OLD_FILES+=etc/periodic/security/520.pfdenied OLD_FILES+=etc/pf.os OLD_FILES+=sbin/pfctl OLD_FILES+=sbin/pflogd OLD_FILES+=usr/libexec/tftp-proxy OLD_FILES+=usr/sbin/ftp-proxy OLD_FILES+=usr/share/examples/etc/pf.os OLD_FILES+=usr/share/examples/pf/ackpri OLD_FILES+=usr/share/examples/pf/faq-example1 OLD_FILES+=usr/share/examples/pf/faq-example2 OLD_FILES+=usr/share/examples/pf/faq-example3 OLD_FILES+=usr/share/examples/pf/pf.conf OLD_FILES+=usr/share/examples/pf/queue1 OLD_FILES+=usr/share/examples/pf/queue2 OLD_FILES+=usr/share/examples/pf/queue3 OLD_FILES+=usr/share/examples/pf/queue4 OLD_FILES+=usr/share/examples/pf/spamd OLD_FILES+=usr/share/man/man4/pf.4.gz OLD_FILES+=usr/share/man/man4/pflog.4.gz OLD_FILES+=usr/share/man/man4/pfsync.4.gz OLD_FILES+=usr/share/man/man5/pf.conf.5.gz OLD_FILES+=usr/share/man/man5/pf.os.5.gz OLD_FILES+=usr/share/man/man8/ftp-proxy.8.gz OLD_FILES+=usr/share/man/man8/pfctl.8.gz OLD_FILES+=usr/share/man/man8/pflogd.8.gz OLD_FILES+=usr/share/man/man8/tftp-proxy.8.gz .endif .if ${MK_PKGTOOLS} == no OLD_FILES+=etc/periodic/daily/490.status-pkg-changes OLD_FILES+=etc/periodic/security/460.chkportsum OLD_FILES+=etc/periodic/weekly/400.status-pkg OLD_FILES+=usr/include/pkg.h OLD_FILES+=usr/lib/libpkg.a OLD_FILES+=usr/lib/libpkg.so OLD_LIBS+=usr/lib/libpkg.so.0 OLD_FILES+=usr/sbin/pkg_add OLD_FILES+=usr/sbin/pkg_create OLD_FILES+=usr/sbin/pkg_delete OLD_FILES+=usr/sbin/pkg_info OLD_FILES+=usr/sbin/pkg_updating OLD_FILES+=usr/sbin/pkg_version OLD_FILES+=usr/share/man/man1/pkg_add.1.gz OLD_FILES+=usr/share/man/man1/pkg_create.1.gz OLD_FILES+=usr/share/man/man1/pkg_delete.1.gz OLD_FILES+=usr/share/man/man1/pkg_info.1.gz OLD_FILES+=usr/share/man/man1/pkg_updating.1.gz OLD_FILES+=usr/share/man/man1/pkg_version.1.gz .endif .if ${MK_PORTSNAP} == no OLD_FILES+=etc/portsnap.conf OLD_FILES+=usr/libexec/make_index OLD_FILES+=usr/libexec/phttpget OLD_FILES+=usr/sbin/portsnap OLD_FILES+=usr/share/examples/etc/portsnap.conf OLD_FILES+=usr/share/man/man8/portsnap.8.gz .endif .if ${MK_PPP} == no OLD_FILES+=etc/ppp/ppp.conf OLD_FILES+=usr/sbin/ppp OLD_FILES+=usr/sbin/pppctl OLD_FILES+=usr/share/man/man8/ppp.8.gz OLD_FILES+=usr/share/man/man8/pppctl.8.gz .endif .if ${MK_PROFILE} == no OLD_FILES+=usr/lib/libalias_cuseeme_p.a OLD_FILES+=usr/lib/libalias_dummy_p.a OLD_FILES+=usr/lib/libalias_ftp_p.a OLD_FILES+=usr/lib/libalias_irc_p.a OLD_FILES+=usr/lib/libalias_nbt_p.a OLD_FILES+=usr/lib/libalias_p.a OLD_FILES+=usr/lib/libalias_pptp_p.a OLD_FILES+=usr/lib/libalias_skinny_p.a OLD_FILES+=usr/lib/libalias_smedia_p.a OLD_FILES+=usr/lib/libarchive_p.a OLD_FILES+=usr/lib/libasn1_p.a OLD_FILES+=usr/lib/libbegemot_p.a OLD_FILES+=usr/lib/libbluetooth_p.a OLD_FILES+=usr/lib/libbsdxml_p.a OLD_FILES+=usr/lib/libbsm_p.a OLD_FILES+=usr/lib/libbsnmp_p.a OLD_FILES+=usr/lib/libbz2_p.a OLD_FILES+=usr/lib/libc_p.a OLD_FILES+=usr/lib/libcalendar_p.a OLD_FILES+=usr/lib/libcam_p.a OLD_FILES+=usr/lib/libcom_err_p.a OLD_FILES+=usr/lib/libcompat_p.a OLD_FILES+=usr/lib/libcrypt_p.a OLD_FILES+=usr/lib/libcrypto_p.a OLD_FILES+=usr/lib/libcurses_p.a OLD_FILES+=usr/lib/libcursesw_p.a OLD_FILES+=usr/lib/libdevinfo_p.a OLD_FILES+=usr/lib/libdevstat_p.a OLD_FILES+=usr/lib/libdialog_p.a OLD_FILES+=usr/lib/libedit_p.a OLD_FILES+=usr/lib/libelf_p.a OLD_FILES+=usr/lib/libfetch_p.a OLD_FILES+=usr/lib/libfl_p.a OLD_FILES+=usr/lib/libform_p.a OLD_FILES+=usr/lib/libformw_p.a OLD_FILES+=usr/lib/libftpio_p.a OLD_FILES+=usr/lib/libgcc_p.a OLD_FILES+=usr/lib/libgeom_p.a OLD_FILES+=usr/lib/libgnuregex_p.a OLD_FILES+=usr/lib/libgpib_p.a OLD_FILES+=usr/lib/libgssapi_krb5_p.a OLD_FILES+=usr/lib/libgssapi_p.a OLD_FILES+=usr/lib/libhdb_p.a OLD_FILES+=usr/lib/libheimbase_p.a OLD_FILES+=usr/lib/libheimsqlite_p.a OLD_FILES+=usr/lib/libhistory_p.a OLD_FILES+=usr/lib/libipsec_p.a OLD_FILES+=usr/lib/libipx_p.a OLD_FILES+=usr/lib/libjail_p.a OLD_FILES+=usr/lib/libkadm5clnt_p.a OLD_FILES+=usr/lib/libkadm5srv_p.a OLD_FILES+=usr/lib/libkafs5_p.a OLD_FILES+=usr/lib/libkdc_p.a OLD_FILES+=usr/lib/libkiconv_p.a OLD_FILES+=usr/lib/libkrb5_p.a OLD_FILES+=usr/lib/libkvm_p.a OLD_FILES+=usr/lib/libl_p.a OLD_FILES+=usr/lib/libln_p.a OLD_FILES+=usr/lib/liblwres_p.a OLD_FILES+=usr/lib/libm_p.a OLD_FILES+=usr/lib/libmagic_p.a OLD_FILES+=usr/lib/libmd_p.a OLD_FILES+=usr/lib/libmemstat_p.a OLD_FILES+=usr/lib/libmenu_p.a OLD_FILES+=usr/lib/libmenuw_p.a OLD_FILES+=usr/lib/libmilter_p.a OLD_FILES+=usr/lib/libmp_p.a OLD_FILES+=usr/lib/libncp_p.a OLD_FILES+=usr/lib/libncurses_p.a OLD_FILES+=usr/lib/libncursesw_p.a OLD_FILES+=usr/lib/libnetgraph_p.a OLD_FILES+=usr/lib/libngatm_p.a OLD_FILES+=usr/lib/libobjc_p.a OLD_FILES+=usr/lib/libopie_p.a OLD_FILES+=usr/lib/libpanel_p.a OLD_FILES+=usr/lib/libpanelw_p.a OLD_FILES+=usr/lib/libpcap_p.a OLD_FILES+=usr/lib/libpmc_p.a OLD_FILES+=usr/lib/libpthread_p.a OLD_FILES+=usr/lib/libradius_p.a OLD_FILES+=usr/lib/libreadline_p.a OLD_FILES+=usr/lib/libroken_p.a OLD_FILES+=usr/lib/librpcsvc_p.a OLD_FILES+=usr/lib/librt_p.a OLD_FILES+=usr/lib/libsbuf_p.a OLD_FILES+=usr/lib/libsdp_p.a OLD_FILES+=usr/lib/libsmb_p.a OLD_FILES+=usr/lib/libssh_p.a OLD_FILES+=usr/lib/libssl_p.a OLD_FILES+=usr/lib/libstdc++_p.a OLD_FILES+=usr/lib/libsupc++_p.a OLD_FILES+=usr/lib/libtacplus_p.a OLD_FILES+=usr/lib/libtermcap_p.a OLD_FILES+=usr/lib/libtermcapw_p.a OLD_FILES+=usr/lib/libtermlib_p.a OLD_FILES+=usr/lib/libtermlibw_p.a OLD_FILES+=usr/lib/libthr_p.a OLD_FILES+=usr/lib/libthread_db_p.a OLD_FILES+=usr/lib/libtinfo_p.a OLD_FILES+=usr/lib/libtinfow_p.a OLD_FILES+=usr/lib/libufs_p.a OLD_FILES+=usr/lib/libugidfw_p.a OLD_FILES+=usr/lib/libusbhid_p.a OLD_FILES+=usr/lib/libutil_p.a OLD_FILES+=usr/lib/libvgl_p.a OLD_FILES+=usr/lib/libwind_p.a OLD_FILES+=usr/lib/libwrap_p.a OLD_FILES+=usr/lib/liby_p.a OLD_FILES+=usr/lib/libypclnt_p.a OLD_FILES+=usr/lib/libz_p.a .endif .if ${MK_RCMDS} == no OLD_FILES+=bin/rcp OLD_FILES+=etc/periodic/daily/140.clean-rwho OLD_FILES+=etc/periodic/daily/430.status-rwho OLD_FILES+=rescue/rcp OLD_FILES+=usr/bin/rlogin OLD_FILES+=usr/bin/rsh OLD_FILES+=usr/bin/ruptime OLD_FILES+=usr/bin/rwho OLD_FILES+=usr/libexec/rlogind OLD_FILES+=usr/libexec/rshd OLD_FILES+=usr/sbin/rwhod OLD_FILES+=usr/share/man/man1/rcp.1.gz OLD_FILES+=usr/share/man/man1/rlogin.1.gz OLD_FILES+=usr/share/man/man1/rsh.1.gz OLD_FILES+=usr/share/man/man1/ruptime.1.gz OLD_FILES+=usr/share/man/man1/rwho.1.gz OLD_FILES+=usr/share/man/man8/rlogind.8.gz OLD_FILES+=usr/share/man/man8/rshd.8.gz OLD_FILES+=usr/share/man/man8/rwhod.8.gz .endif .if ${MK_RCS} == no OLD_FILES+=usr/bin/ci OLD_FILES+=usr/bin/co OLD_FILES+=usr/bin/ident OLD_FILES+=usr/bin/merge OLD_FILES+=usr/bin/rcs OLD_FILES+=usr/bin/rcsclean OLD_FILES+=usr/bin/rcsdiff OLD_FILES+=usr/bin/rcsfreeze OLD_FILES+=usr/bin/rcsmerge OLD_FILES+=usr/bin/rlog OLD_FILES+=usr/share/man/man1/ci.1.gz OLD_FILES+=usr/share/man/man1/co.1.gz OLD_FILES+=usr/share/man/man1/ident.1.gz OLD_FILES+=usr/share/man/man1/merge.1.gz OLD_FILES+=usr/share/man/man1/rcs.1.gz OLD_FILES+=usr/share/man/man1/rcsclean.1.gz OLD_FILES+=usr/share/man/man1/rcsdiff.1.gz OLD_FILES+=usr/share/man/man1/rcsfreeze.1.gz OLD_FILES+=usr/share/man/man1/rcsintro.1.gz OLD_FILES+=usr/share/man/man1/rcsmerge.1.gz OLD_FILES+=usr/share/man/man1/rlog.1.gz OLD_FILES+=usr/share/man/man5/rcsfile.5.gz .endif #.if ${MK_RESCUE} == no # to be filled in or replaced with a special target #.endif .if ${MK_ROUTED} == no OLD_FILES+=sbin/routed OLD_FILES+=sbin/rtquery OLD_FILES+=usr/share/man/man8/routed.8.gz OLD_FILES+=usr/share/man/man8/rtquery.8.gz .endif .if ${MK_SENDMAIL} == no OLD_FILES+=etc/periodic/daily/150.clean-hoststat OLD_FILES+=etc/periodic/daily/210.backup-aliases OLD_FILES+=etc/periodic/daily/440.status-mailq OLD_FILES+=etc/periodic/daily/460.status-mail-rejects OLD_FILES+=etc/periodic/daily/500.queuerun .if ${MK_MAILWRAPPER} == no OLD_FILES+=bin/rmail .endif OLD_FILES+=usr/bin/vacation OLD_FILES+=usr/include/libmilter/mfapi.h OLD_FILES+=usr/include/libmilter/mfdef.h OLD_FILES+=usr/lib/libmilter.a OLD_LIBS+=usr/lib/libmilter.so.5 OLD_FILES+=usr/lib/libmilter_p.a .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64" OLD_FILES+=usr/lib32/libmilter.a OLD_LIBS+=usr/lib32/libmilter.so.5 OLD_FILES+=usr/lib32/libmilter_p.a .endif OLD_FILES+=usr/libexec/mail.local OLD_FILES+=usr/libexec/sendmail/sendmail OLD_FILES+=usr/libexec/smrsh OLD_FILES+=usr/sbin/editmap OLD_FILES+=usr/sbin/mailstats OLD_FILES+=usr/sbin/makemap OLD_FILES+=usr/sbin/praliases OLD_FILES+=usr/share/doc/smm/08.sendmailop/paper.ascii.gz OLD_FILES+=usr/share/man/man1/mailq.1.gz OLD_FILES+=usr/share/man/man1/newaliases.1.gz OLD_FILES+=usr/share/man/man1/vacation.1.gz OLD_FILES+=usr/share/man/man5/aliases.5.gz OLD_FILES+=usr/share/man/man8/editmap.8.gz OLD_FILES+=usr/share/man/man8/hoststat.8.gz OLD_FILES+=usr/share/man/man8/mail.local.8.gz OLD_FILES+=usr/share/man/man8/mailstats.8.gz OLD_FILES+=usr/share/man/man8/makemap.8.gz OLD_FILES+=usr/share/man/man8/praliases.8.gz OLD_FILES+=usr/share/man/man8/purgestat.8.gz OLD_FILES+=usr/share/man/man8/rmail.8.gz OLD_FILES+=usr/share/man/man8/sendmail.8.gz OLD_FILES+=usr/share/man/man8/smrsh.8.gz OLD_FILES+=usr/share/sendmail/cf/README OLD_FILES+=usr/share/sendmail/cf/cf/Makefile OLD_FILES+=usr/share/sendmail/cf/cf/README OLD_FILES+=usr/share/sendmail/cf/cf/chez.cs.mc OLD_FILES+=usr/share/sendmail/cf/cf/clientproto.mc OLD_FILES+=usr/share/sendmail/cf/cf/cs-hpux10.mc OLD_FILES+=usr/share/sendmail/cf/cf/cs-hpux9.mc OLD_FILES+=usr/share/sendmail/cf/cf/cs-osf1.mc OLD_FILES+=usr/share/sendmail/cf/cf/cs-solaris2.mc OLD_FILES+=usr/share/sendmail/cf/cf/cs-sunos4.1.mc OLD_FILES+=usr/share/sendmail/cf/cf/cs-ultrix4.mc OLD_FILES+=usr/share/sendmail/cf/cf/cyrusproto.mc OLD_FILES+=usr/share/sendmail/cf/cf/generic-bsd4.4.mc OLD_FILES+=usr/share/sendmail/cf/cf/generic-hpux10.mc OLD_FILES+=usr/share/sendmail/cf/cf/generic-hpux9.mc OLD_FILES+=usr/share/sendmail/cf/cf/generic-linux.mc OLD_FILES+=usr/share/sendmail/cf/cf/generic-mpeix.mc OLD_FILES+=usr/share/sendmail/cf/cf/generic-nextstep3.3.mc OLD_FILES+=usr/share/sendmail/cf/cf/generic-osf1.mc OLD_FILES+=usr/share/sendmail/cf/cf/generic-solaris.mc OLD_FILES+=usr/share/sendmail/cf/cf/generic-sunos4.1.mc OLD_FILES+=usr/share/sendmail/cf/cf/generic-ultrix4.mc OLD_FILES+=usr/share/sendmail/cf/cf/huginn.cs.mc OLD_FILES+=usr/share/sendmail/cf/cf/knecht.mc OLD_FILES+=usr/share/sendmail/cf/cf/mail.cs.mc OLD_FILES+=usr/share/sendmail/cf/cf/mail.eecs.mc OLD_FILES+=usr/share/sendmail/cf/cf/mailspool.cs.mc OLD_FILES+=usr/share/sendmail/cf/cf/python.cs.mc OLD_FILES+=usr/share/sendmail/cf/cf/s2k-osf1.mc OLD_FILES+=usr/share/sendmail/cf/cf/s2k-ultrix4.mc OLD_FILES+=usr/share/sendmail/cf/cf/submit.cf OLD_FILES+=usr/share/sendmail/cf/cf/submit.mc OLD_FILES+=usr/share/sendmail/cf/cf/tcpproto.mc OLD_FILES+=usr/share/sendmail/cf/cf/ucbarpa.mc OLD_FILES+=usr/share/sendmail/cf/cf/ucbvax.mc OLD_FILES+=usr/share/sendmail/cf/cf/uucpproto.mc OLD_FILES+=usr/share/sendmail/cf/cf/vangogh.cs.mc OLD_FILES+=usr/share/sendmail/cf/domain/Berkeley.EDU.m4 OLD_FILES+=usr/share/sendmail/cf/domain/CS.Berkeley.EDU.m4 OLD_FILES+=usr/share/sendmail/cf/domain/EECS.Berkeley.EDU.m4 OLD_FILES+=usr/share/sendmail/cf/domain/S2K.Berkeley.EDU.m4 OLD_FILES+=usr/share/sendmail/cf/domain/berkeley-only.m4 OLD_FILES+=usr/share/sendmail/cf/domain/generic.m4 OLD_FILES+=usr/share/sendmail/cf/feature/accept_unqualified_senders.m4 OLD_FILES+=usr/share/sendmail/cf/feature/accept_unresolvable_domains.m4 OLD_FILES+=usr/share/sendmail/cf/feature/access_db.m4 OLD_FILES+=usr/share/sendmail/cf/feature/allmasquerade.m4 OLD_FILES+=usr/share/sendmail/cf/feature/always_add_domain.m4 OLD_FILES+=usr/share/sendmail/cf/feature/authinfo.m4 OLD_FILES+=usr/share/sendmail/cf/feature/badmx.m4 OLD_FILES+=usr/share/sendmail/cf/feature/bestmx_is_local.m4 OLD_FILES+=usr/share/sendmail/cf/feature/bitdomain.m4 OLD_FILES+=usr/share/sendmail/cf/feature/blacklist_recipients.m4 OLD_FILES+=usr/share/sendmail/cf/feature/block_bad_helo.m4 OLD_FILES+=usr/share/sendmail/cf/feature/compat_check.m4 OLD_FILES+=usr/share/sendmail/cf/feature/conncontrol.m4 OLD_FILES+=usr/share/sendmail/cf/feature/delay_checks.m4 OLD_FILES+=usr/share/sendmail/cf/feature/dnsbl.m4 OLD_FILES+=usr/share/sendmail/cf/feature/domaintable.m4 OLD_FILES+=usr/share/sendmail/cf/feature/enhdnsbl.m4 OLD_FILES+=usr/share/sendmail/cf/feature/generics_entire_domain.m4 OLD_FILES+=usr/share/sendmail/cf/feature/genericstable.m4 OLD_FILES+=usr/share/sendmail/cf/feature/greet_pause.m4 OLD_FILES+=usr/share/sendmail/cf/feature/ldap_routing.m4 OLD_FILES+=usr/share/sendmail/cf/feature/limited_masquerade.m4 OLD_FILES+=usr/share/sendmail/cf/feature/local_lmtp.m4 OLD_FILES+=usr/share/sendmail/cf/feature/local_no_masquerade.m4 OLD_FILES+=usr/share/sendmail/cf/feature/local_procmail.m4 OLD_FILES+=usr/share/sendmail/cf/feature/lookupdotdomain.m4 OLD_FILES+=usr/share/sendmail/cf/feature/loose_relay_check.m4 OLD_FILES+=usr/share/sendmail/cf/feature/mailertable.m4 OLD_FILES+=usr/share/sendmail/cf/feature/masquerade_entire_domain.m4 OLD_FILES+=usr/share/sendmail/cf/feature/masquerade_envelope.m4 OLD_FILES+=usr/share/sendmail/cf/feature/msp.m4 OLD_FILES+=usr/share/sendmail/cf/feature/mtamark.m4 OLD_FILES+=usr/share/sendmail/cf/feature/no_default_msa.m4 OLD_FILES+=usr/share/sendmail/cf/feature/nocanonify.m4 OLD_FILES+=usr/share/sendmail/cf/feature/notsticky.m4 OLD_FILES+=usr/share/sendmail/cf/feature/nouucp.m4 OLD_FILES+=usr/share/sendmail/cf/feature/nullclient.m4 OLD_FILES+=usr/share/sendmail/cf/feature/preserve_local_plus_detail.m4 OLD_FILES+=usr/share/sendmail/cf/feature/preserve_luser_host.m4 OLD_FILES+=usr/share/sendmail/cf/feature/promiscuous_relay.m4 OLD_FILES+=usr/share/sendmail/cf/feature/queuegroup.m4 OLD_FILES+=usr/share/sendmail/cf/feature/ratecontrol.m4 OLD_FILES+=usr/share/sendmail/cf/feature/redirect.m4 OLD_FILES+=usr/share/sendmail/cf/feature/relay_based_on_MX.m4 OLD_FILES+=usr/share/sendmail/cf/feature/relay_entire_domain.m4 OLD_FILES+=usr/share/sendmail/cf/feature/relay_hosts_only.m4 OLD_FILES+=usr/share/sendmail/cf/feature/relay_local_from.m4 OLD_FILES+=usr/share/sendmail/cf/feature/relay_mail_from.m4 OLD_FILES+=usr/share/sendmail/cf/feature/require_rdns.m4 OLD_FILES+=usr/share/sendmail/cf/feature/smrsh.m4 OLD_FILES+=usr/share/sendmail/cf/feature/stickyhost.m4 OLD_FILES+=usr/share/sendmail/cf/feature/use_client_ptr.m4 OLD_FILES+=usr/share/sendmail/cf/feature/use_ct_file.m4 OLD_FILES+=usr/share/sendmail/cf/feature/use_cw_file.m4 OLD_FILES+=usr/share/sendmail/cf/feature/uucpdomain.m4 OLD_FILES+=usr/share/sendmail/cf/feature/virtuser_entire_domain.m4 OLD_FILES+=usr/share/sendmail/cf/feature/virtusertable.m4 OLD_FILES+=usr/share/sendmail/cf/hack/cssubdomain.m4 OLD_FILES+=usr/share/sendmail/cf/m4/cf.m4 OLD_FILES+=usr/share/sendmail/cf/m4/cfhead.m4 OLD_FILES+=usr/share/sendmail/cf/m4/proto.m4 OLD_FILES+=usr/share/sendmail/cf/m4/version.m4 OLD_FILES+=usr/share/sendmail/cf/mailer/cyrus.m4 OLD_FILES+=usr/share/sendmail/cf/mailer/cyrusv2.m4 OLD_FILES+=usr/share/sendmail/cf/mailer/fax.m4 OLD_FILES+=usr/share/sendmail/cf/mailer/local.m4 OLD_FILES+=usr/share/sendmail/cf/mailer/mail11.m4 OLD_FILES+=usr/share/sendmail/cf/mailer/phquery.m4 OLD_FILES+=usr/share/sendmail/cf/mailer/pop.m4 OLD_FILES+=usr/share/sendmail/cf/mailer/procmail.m4 OLD_FILES+=usr/share/sendmail/cf/mailer/qpage.m4 OLD_FILES+=usr/share/sendmail/cf/mailer/smtp.m4 OLD_FILES+=usr/share/sendmail/cf/mailer/usenet.m4 OLD_FILES+=usr/share/sendmail/cf/mailer/uucp.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/a-ux.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/aix3.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/aix4.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/aix5.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/altos.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/amdahl-uts.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/bsd4.3.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/bsd4.4.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/bsdi.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/bsdi1.0.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/bsdi2.0.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/darwin.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/dgux.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/domainos.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/dragonfly.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/dynix3.2.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/freebsd4.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/freebsd5.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/freebsd6.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/gnu.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/hpux10.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/hpux11.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/hpux9.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/irix4.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/irix5.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/irix6.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/isc4.1.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/linux.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/maxion.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/mklinux.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/mpeix.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/nextstep.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/openbsd.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/osf1.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/powerux.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/ptx2.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/qnx.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/riscos4.5.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/sco-uw-2.1.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/sco3.2.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/sinix.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/solaris2.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/solaris2.ml.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/solaris2.pre5.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/solaris8.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/sunos3.5.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/sunos4.1.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/svr4.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/ultrix4.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/unicos.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/unicosmk.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/unicosmp.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/unixware7.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/unknown.m4 OLD_FILES+=usr/share/sendmail/cf/ostype/uxpds.m4 OLD_FILES+=usr/share/sendmail/cf/sendmail.schema OLD_FILES+=usr/share/sendmail/cf/sh/makeinfo.sh OLD_FILES+=usr/share/sendmail/cf/siteconfig/uucp.cogsci.m4 OLD_FILES+=usr/share/sendmail/cf/siteconfig/uucp.old.arpa.m4 OLD_FILES+=usr/share/sendmail/cf/siteconfig/uucp.ucbarpa.m4 OLD_FILES+=usr/share/sendmail/cf/siteconfig/uucp.ucbvax.m4 .endif #.if ${MK_SHAREDOCS} == no # to be filled in #.endif #.if ${MK_SYSCONS} == no # to be filled in #.endif .if ${MK_TCSH} == no OLD_FILES+=bin/csh OLD_FILES+=bin/tcsh OLD_FILES+=rescue/csh OLD_FILES+=rescue/tcsh OLD_FILES+=usr/share/examples/tcsh/complete.tcsh OLD_FILES+=usr/share/examples/tcsh/csh-mode.el OLD_FILES+=usr/share/man/man1/csh.1.gz OLD_FILES+=usr/share/man/man1/tcsh.1.gz OLD_FILES+=usr/share/nls/de_AT.ISO8859-1/tcsh.cat OLD_FILES+=usr/share/nls/de_AT.ISO8859-15/tcsh.cat OLD_FILES+=usr/share/nls/de_CH.ISO8859-1/tcsh.cat OLD_FILES+=usr/share/nls/de_CH.ISO8859-15/tcsh.cat OLD_FILES+=usr/share/nls/de_DE.ISO8859-1/tcsh.cat OLD_FILES+=usr/share/nls/de_DE.ISO8859-15/tcsh.cat OLD_FILES+=usr/share/nls/el_GR.ISO8859-7/tcsh.cat OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/tcsh.cat OLD_FILES+=usr/share/nls/es_ES.ISO8859-15/tcsh.cat OLD_FILES+=usr/share/nls/et_EE.ISO8859-15/tcsh.cat OLD_FILES+=usr/share/nls/fi_FI.ISO8859-1/tcsh.cat OLD_FILES+=usr/share/nls/fi_FI.ISO8859-15/tcsh.cat OLD_FILES+=usr/share/nls/fr_BE.ISO8859-1/tcsh.cat OLD_FILES+=usr/share/nls/fr_BE.ISO8859-15/tcsh.cat OLD_FILES+=usr/share/nls/fr_CA.ISO8859-1/tcsh.cat OLD_FILES+=usr/share/nls/fr_CA.ISO8859-15/tcsh.cat OLD_FILES+=usr/share/nls/fr_CH.ISO8859-1/tcsh.cat OLD_FILES+=usr/share/nls/fr_CH.ISO8859-15/tcsh.cat OLD_FILES+=usr/share/nls/fr_FR.ISO8859-1/tcsh.cat OLD_FILES+=usr/share/nls/fr_FR.ISO8859-15/tcsh.cat OLD_FILES+=usr/share/nls/it_CH.ISO8859-1/tcsh.cat OLD_FILES+=usr/share/nls/it_CH.ISO8859-15/tcsh.cat OLD_FILES+=usr/share/nls/it_IT.ISO8859-1/tcsh.cat OLD_FILES+=usr/share/nls/it_IT.ISO8859-15/tcsh.cat OLD_FILES+=usr/share/nls/ja_JP.eucJP/tcsh.cat OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/tcsh.cat OLD_FILES+=usr/share/nls/uk_UA.KOI8-U/tcsh.cat .endif .if ${MK_TELNET} == no OLD_FILES+=usr/bin/telnet OLD_FILES+=usr/libexec/telnetd OLD_FILES+=usr/share/man/man1/telnet.1.gz OLD_FILES+=usr/share/man/man8/telnetd.8.gz .endif #.if ${MK_TOOLCHAIN} == no # to be filled in #.endif #.if ${MK_USB} == no # to be filled in #.endif .if ${MK_UTMPX} == no OLD_FILES+=etc/periodic/monthly/200.accounting OLD_FILES+=usr/bin/last OLD_FILES+=usr/bin/users OLD_FILES+=usr/bin/who OLD_FILES+=usr/bin/wtmpcvt OLD_FILES+=usr/sbin/ac OLD_FILES+=usr/sbin/lastlogin OLD_FILES+=usr/sbin/utx OLD_FILES+=usr/sbin/utxrm OLD_FILES+=usr/share/man/man1/last.1.gz OLD_FILES+=usr/share/man/man1/users.1.gz OLD_FILES+=usr/share/man/man1/who.1.gz OLD_FILES+=usr/share/man/man1/wtmpcvt.1.gz OLD_FILES+=usr/share/man/man8/ac.8.gz OLD_FILES+=usr/share/man/man8/lastlogin.8.gz OLD_FILES+=usr/share/man/man8/utx.8.gz OLD_FILES+=usr/share/man/man8/utxrm.8.gz .endif .if ${MK_WIRELESS} == no OLD_FILES+=etc/regdomain.xml OLD_FILES+=usr/sbin/ancontrol OLD_FILES+=usr/sbin/hostapd OLD_FILES+=usr/sbin/hostapd_cli OLD_FILES+=usr/sbin/ndis_events OLD_FILES+=usr/sbin/wlandebug OLD_FILES+=usr/sbin/wlconfig OLD_FILES+=usr/sbin/wpa_cli OLD_FILES+=usr/sbin/wpa_passphrase OLD_FILES+=usr/sbin/wpa_supplicant OLD_FILES+=usr/share/examples/etc/regdomain.xml OLD_FILES+=usr/share/examples/etc/wpa_supplicant.conf OLD_FILES+=usr/share/examples/hostapd/hostapd.conf OLD_FILES+=usr/share/examples/hostapd/hostapd.eap_user OLD_FILES+=usr/share/examples/hostapd/hostapd.wpa_psk OLD_FILES+=usr/share/man/man5/hostapd.conf.5.gz OLD_FILES+=usr/share/man/man5/wpa_supplicant.conf.5.gz OLD_FILES+=usr/share/man/man8/ancontrol.8.gz OLD_FILES+=usr/share/man/man8/hostapd.8.gz OLD_FILES+=usr/share/man/man8/hostapd_cli.8.gz OLD_FILES+=usr/share/man/man8/i386/wlconfig.8.gz OLD_FILES+=usr/share/man/man8/ndis_events.8.gz OLD_FILES+=usr/share/man/man8/wlandebug.8.gz OLD_FILES+=usr/share/man/man8/wpa_cli.8.gz OLD_FILES+=usr/share/man/man8/wpa_passphrase.8.gz OLD_FILES+=usr/share/man/man8/wpa_supplicant.8.gz .endif Index: projects/nand/usr.bin/calendar/calendar.1 =================================================================== --- projects/nand/usr.bin/calendar/calendar.1 (revision 235262) +++ projects/nand/usr.bin/calendar/calendar.1 (revision 235263) @@ -1,327 +1,327 @@ .\" Copyright (c) 1989, 1990, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 4. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" @(#)calendar.1 8.1 (Berkeley) 6/29/93 .\" $FreeBSD$ .\" .Dd June 13, 2002 .Dt CALENDAR 1 .Os .Sh NAME .Nm calendar .Nd reminder service .Sh SYNOPSIS .Nm .Op Fl A Ar num .Op Fl a .Op Fl B Ar num .Op Fl D Ar moon|sun .Op Fl d .Op Fl F Ar friday .Op Fl f Ar calendarfile .Op Fl l Ar longitude .Oo .Bk -words .Fl t Ar dd Ns .Sm off .Op . Ar mm Op . Ar year .Sm on .Ek .Oc .Op Fl U Ar UTC-offset .Op Fl W Ar num .Sh DESCRIPTION The .Nm utility checks the current directory for a file named .Pa calendar and displays lines that fall into the specified date range. On the day before a weekend (normally Friday), events for the next three days are displayed. .Pp The following options are available: .Bl -tag -width Ds .It Fl A Ar num Print lines from today and the next .Ar num days (forward, future). .It Fl a Process the ``calendar'' files of all users and mail the results to them. This requires super-user privileges. .It Fl B Ar num Print lines from today and the previous .Ar num days (backward, past). .It Fl D Ar moon|sun Print UTC offset, longitude and moon or sun information. .It Fl d Debug option: print current date information. .It Fl F Ar friday Specify which day of the week is ``Friday'' (the day before the weekend begins). Default is 5. .It Fl f Pa calendarfile Use .Pa calendarfile as the default calendar file. .It Fl l Ar longitude Perform lunar and solar calculations from this longitude. If neither longitude nor UTC offset is specified, the calculations will be based on the difference between UTC time and localtime. If both are specified, UTC offset overrides longitude. .It Xo Fl t .Sm off .Ar dd .Op . Ar mm Op . Ar year .Sm on .Xc For test purposes only: set date directly to argument values. .It Fl U Ar UTC-offset Perform lunar and solar calculations from this UTC offset. If neither UTC offset nor longitude is specified, the calculations will be based on the difference between UTC time and localtime. If both are specified, UTC offset overrides longitude. .It Fl W Ar num Print lines from today and the next .Ar num days (forward, future). Ignore weekends when calculating the number of days. .El .Sh FILE FORMAT To handle calendars in your national code table you can specify .Dq LANG= in the calendar file as early as possible. .Pp To handle the local name of sequences, you can specify them as: .Dq SEQUENCE= in the calendar file as early as possible. .Pp The names of the following special days are recognized: .Bl -tag -width 123456789012345 -compact .It Easter Catholic Easter. .It Paskha Orthodox Easter. .It NewMoon The lunar New Moon. .It FullMoon The lunar Full Moon. .It MarEquinox The solar equinox in March. .It JunSolstice The solar solstice in June. .It SepEquinox The solar equinox in September. .It DecSolstice The solar solstice in December. .It ChineseNewYear The first day of the Chinese year. .El These names may be reassigned to their local names via an assignment like .Dq Easter=Pasen in the calendar file. .Pp Other lines should begin with a month and day. They may be entered in almost any format, either numeric or as character strings. If the proper locale is set, national month and weekday names can be used. A single asterisk (``*'') matches every month. A day without a month matches that day of every week. A month without a day matches the first of that month. Two numbers default to the month followed by the day. Lines with leading tabs default to the last entered date, allowing multiple line specifications for a single date. .Pp The names of the recognized special days may be followed by a positive or negative integer, like: .Dq Easter+3 or .Dq Paskha-4 . .Pp Weekdays may be followed by ``-4'' ...\& ``+5'' (aliases for last, first, second, third, fourth) for moving events like ``the last Monday in April''. .Pp By convention, dates followed by an asterisk are not fixed, i.e., change from year to year. .Pp Day descriptions start after the first character in the line; if the line does not contain a character, it is not displayed. If the first character in the line is a character, it is treated as a continuation of the previous line. .Pp The ``calendar'' file is preprocessed by .Xr cpp 1 , allowing the inclusion of shared files such as lists of company holidays or meetings. If the shared file is not referenced by a full pathname, .Xr cpp 1 searches in the current (or home) directory first, and then in the directory .Pa /usr/share/calendar . Empty lines and lines protected by the C commenting syntax .Pq Li /* ... */ are ignored. .Pp Some possible calendar entries ( characters highlighted by \fB\et\fR sequence) .Bd -unfilled -offset indent LANG=C Easter=Ostern #include #include 6/15\fB\et\fRJune 15 (if ambiguous, will default to month/day). Jun. 15\fB\et\fRJune 15. 15 June\fB\et\fRJune 15. Thursday\fB\et\fREvery Thursday. June\fB\et\fREvery June 1st. 15 *\fB\et\fR15th of every month. 2010/4/15\fB\et\fR15 April 2010 May Sun+2\fB\et\fRsecond Sunday in May (Muttertag) 04/SunLast\fB\et\fRlast Sunday in April, \fB\et\fRsummer time in Europe Easter\fB\et\fREaster Ostern-2\fB\et\fRGood Friday (2 days before Easter) Paskha\fB\et\fROrthodox Easter .Ed .Sh FILES .Bl -tag -width calendar.christian -compact .It Pa calendar -file in current directory +file in current directory. .It Pa ~/.calendar .Pa calendar HOME directory. A chdir is done into this directory if it exists. .It Pa ~/.calendar/calendar calendar file to use if no calendar file exists in the current directory. .It Pa ~/.calendar/nomail do not send mail if this file exists. .El .Pp The following default calendar files are provided in .Pa /usr/share/calendars: .Pp .Bl -tag -width calendar.southafrica -compact .It Pa calendar.all File which includes all the default files. .It Pa calendar.australia Calendar of events in Australia. .It Pa calendar.birthday Births and deaths of famous (and not-so-famous) people. .It Pa calendar.christian Christian holidays. This calendar should be updated yearly by the local system administrator so that roving holidays are set correctly for the current year. .It Pa calendar.computer Days of special significance to computer people. .It Pa calendar.croatian Calendar of events in Croatia. .It Pa calendar.dutch Calendar of events in the Netherlands. .It Pa calendar.freebsd Birthdays of .Fx committers. .It Pa calendar.french Calendar of events in France. .It Pa calendar.german Calendar of events in Germany. .It Pa calendar.history Everything else, mostly U.S.\& historical events. .It Pa calendar.holiday Other holidays, including the not-well-known, obscure, and .Em really obscure. .It Pa calendar.judaic Jewish holidays. The entries for this calendar have been obtained from the port deskutils/hebcal. .It Pa calendar.music Musical events, births, and deaths. Strongly oriented toward rock 'n' roll. .It Pa calendar.newzealand Calendar of events in New Zealand. .It Pa calendar.russian Russian calendar. .It Pa calendar.southafrica Calendar of events in South Africa. .It Pa calendar.usholiday U.S.\& holidays. This calendar should be updated yearly by the local system administrator so that roving holidays are set correctly for the current year. .It Pa calendar.world Includes all calendar files except for national files. .El .Sh COMPATIBILITY The .Nm program previously selected lines which had the correct date anywhere in the line. This is no longer true, the date is only recognized when it occurs at the beginning of a line. .Sh SEE ALSO .Xr at 1 , .Xr cpp 1 , .Xr mail 1 , .Xr cron 8 .Sh HISTORY A .Nm command appeared in .At v7 . .Sh NOTES Chinese New Year is calculated at 120 degrees east of Greenwich, which roughly corresponds with the east coast of China. For people west of China, this might result that the start of Chinese New Year and the day of the related new moon might differ. .Pp The phases of the moon and the longitude of the sun are calculated against the local position which corresponds with 30 degrees times the time-difference towards Greenwich. .Pp The new and full moons are happening on the day indicated: They might happen in the time period in the early night or in the late evening. It doesn't indicate that they are starting in the night on that date. .Pp Because of minor differences between the output of the formulas used and other sources on the Internet, Druids and Werewolves should double-check the start and end time of solar and lunar events. .Sh BUGS The .Nm utility does not handle Jewish holidays. .Pp There is no possibility to properly specify the local position needed for solar and lunar calculations. Index: projects/nand/usr.bin/calendar =================================================================== --- projects/nand/usr.bin/calendar (revision 235262) +++ projects/nand/usr.bin/calendar (revision 235263) Property changes on: projects/nand/usr.bin/calendar ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/usr.bin/calendar:r235157-235262 Index: projects/nand/usr.bin/clang/clang/clang.1 =================================================================== --- projects/nand/usr.bin/clang/clang/clang.1 (revision 235262) +++ projects/nand/usr.bin/clang/clang/clang.1 (revision 235263) @@ -1,503 +1,503 @@ .\" $FreeBSD$ .\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .ie \nF \{\ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . nr % 0 . rr F .\} .el \{\ . de IX .. .\} .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "CLANG 1" .TH CLANG 1 "2012-04-05" "clang 3.1" "Clang Tools Documentation" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" clang \- the Clang C, C++, and Objective\-C compiler .SH "SYNOPSIS" .IX Header "SYNOPSIS" \&\fBclang\fR [\fB\-c\fR|\fB\-S\fR|\fB\-E\fR] \fB\-std=\fR\fIstandard\fR \fB\-g\fR [\fB\-O0\fR|\fB\-O1\fR|\fB\-O2\fR|\fB\-Os\fR|\fB\-Oz\fR|\fB\-O3\fR|\fB\-O4\fR] \fB\-W\fR\fIwarnings...\fR \fB\-pedantic\fR \fB\-I\fR\fIdir...\fR \fB\-L\fR\fIdir...\fR \fB\-D\fR\fImacro[=defn]\fR \fB\-f\fR\fIfeature-option...\fR \fB\-m\fR\fImachine-option...\fR \fB\-o\fR \fIoutput-file\fR \fB\-stdlib=\fR\fIlibrary\fR \fIinput-filenames\fR .SH "DESCRIPTION" .IX Header "DESCRIPTION" \&\fBclang\fR is a C, \*(C+, and Objective-C compiler which encompasses preprocessing, parsing, optimization, code generation, assembly, and linking. Depending on which high-level mode setting is passed, Clang will stop before doing a full link. While Clang is highly integrated, it is important to understand the stages of compilation, to understand how to invoke it. These stages are: .IP "\fBDriver\fR" 4 .IX Item "Driver" The \fBclang\fR executable is actually a small driver which controls the overall execution of other tools such as the compiler, assembler and linker. Typically you do not need to interact with the driver, but you transparently use it to run the other tools. .IP "\fBPreprocessing\fR" 4 .IX Item "Preprocessing" This stage handles tokenization of the input source file, macro expansion, #include expansion and handling of other preprocessor directives. The output of this stage is typically called a \*(L".i\*(R" (for C), \*(L".ii\*(R" (for \*(C+), \*(L".mi\*(R" (for Objective-C) , or \*(L".mii\*(R" (for Objective\-\*(C+) file. .IP "\fBParsing and Semantic Analysis\fR" 4 .IX Item "Parsing and Semantic Analysis" This stage parses the input file, translating preprocessor tokens into a parse tree. Once in the form of a parser tree, it applies semantic analysis to compute types for expressions as well and determine whether the code is well formed. This stage is responsible for generating most of the compiler warnings as well as parse errors. The output of this stage is an \*(L"Abstract Syntax Tree\*(R" (\s-1AST\s0). .IP "\fBCode Generation and Optimization\fR" 4 .IX Item "Code Generation and Optimization" This stage translates an \s-1AST\s0 into low-level intermediate code (known as \*(L"\s-1LLVM\s0 \&\s-1IR\s0\*(R") and ultimately to machine code. This phase is responsible for optimizing the generated code and handling target-specific code generation. The output of this stage is typically called a \*(L".s\*(R" file or \*(L"assembly\*(R" file. .Sp Clang also supports the use of an integrated assembler, in which the code generator produces object files directly. This avoids the overhead of generating the \*(L".s\*(R" file and of calling the target assembler. .IP "\fBAssembler\fR" 4 .IX Item "Assembler" This stage runs the target assembler to translate the output of the compiler into a target object file. The output of this stage is typically called a \*(L".o\*(R" file or \*(L"object\*(R" file. .IP "\fBLinker\fR" 4 .IX Item "Linker" This stage runs the target linker to merge multiple object files into an executable or dynamic library. The output of this stage is typically called an \&\*(L"a.out\*(R", \*(L".dylib\*(R" or \*(L".so\*(R" file. .PP The Clang compiler supports a large number of options to control each of these stages. In addition to compilation of code, Clang also supports other tools: .PP \&\fBClang Static Analyzer\fR .PP The Clang Static Analyzer is a tool that scans source code to try to find bugs through code analysis. This tool uses many parts of Clang and is built into the same driver. .SH "OPTIONS" .IX Header "OPTIONS" .SS "Stage Selection Options" .IX Subsection "Stage Selection Options" .IP "\fB\-E\fR" 4 .IX Item "-E" Run the preprocessor stage. .IP "\fB\-fsyntax\-only\fR" 4 .IX Item "-fsyntax-only" Run the preprocessor, parser and type checking stages. .IP "\fB\-S\fR" 4 .IX Item "-S" Run the previous stages as well as \s-1LLVM\s0 generation and optimization stages and target-specific code generation, producing an assembly file. .IP "\fB\-c\fR" 4 .IX Item "-c" Run all of the above, plus the assembler, generating a target \*(L".o\*(R" object file. .IP "\fBno stage selection option\fR" 4 .IX Item "no stage selection option" If no stage selection option is specified, all stages above are run, and the linker is run to combine the results into an executable or shared library. .IP "\fB\-\-analyze\fR" 4 .IX Item "--analyze" Run the Clang Static Analyzer. .SS "Language Selection and Mode Options" .IX Subsection "Language Selection and Mode Options" .IP "\fB\-x\fR \fIlanguage\fR" 4 .IX Item "-x language" Treat subsequent input files as having type \fIlanguage\fR. .IP "\fB\-std\fR=\fIlanguage\fR" 4 .IX Item "-std=language" Specify the language standard to compile for. .IP "\fB\-stdlib\fR=\fIlanguage\fR" 4 .IX Item "-stdlib=language" Specify the \*(C+ standard library to use; supported options are libstdc++ and libc++. .IP "\fB\-ansi\fR" 4 .IX Item "-ansi" Same as \fB\-std=c89\fR. .IP "\fB\-ObjC++\fR" 4 .IX Item "-ObjC++" Treat source input files as Objective\-\*(C+ inputs. .IP "\fB\-ObjC\fR" 4 .IX Item "-ObjC" Treat source input files as Objective-C inputs. .IP "\fB\-trigraphs\fR" 4 .IX Item "-trigraphs" Enable trigraphs. .IP "\fB\-ffreestanding\fR" 4 .IX Item "-ffreestanding" Indicate that the file should be compiled for a freestanding, not a hosted, environment. .IP "\fB\-fno\-builtin\fR" 4 .IX Item "-fno-builtin" Disable special handling and optimizations of builtin functions like strlen and malloc. .IP "\fB\-fmath\-errno\fR" 4 .IX Item "-fmath-errno" Indicate that math functions should be treated as updating errno. .IP "\fB\-fpascal\-strings\fR" 4 .IX Item "-fpascal-strings" Enable support for Pascal-style strings with \*(L"\epfoo\*(R". .IP "\fB\-fms\-extensions\fR" 4 .IX Item "-fms-extensions" Enable support for Microsoft extensions. .IP "\fB\-fmsc\-version=\fR" 4 .IX Item "-fmsc-version=" Set _MSC_VER. Defaults to 1300 on Windows. Not set otherwise. .IP "\fB\-fborland\-extensions\fR" 4 .IX Item "-fborland-extensions" Enable support for Borland extensions. .IP "\fB\-fwritable\-strings\fR" 4 .IX Item "-fwritable-strings" Make all string literals default to writable. This disables uniquing of strings and other optimizations. .IP "\fB\-flax\-vector\-conversions\fR" 4 .IX Item "-flax-vector-conversions" Allow loose type checking rules for implicit vector conversions. .IP "\fB\-fblocks\fR" 4 .IX Item "-fblocks" Enable the \*(L"Blocks\*(R" language feature. .IP "\fB\-fobjc\-gc\-only\fR" 4 .IX Item "-fobjc-gc-only" Indicate that Objective-C code should be compiled in GC-only mode, which only works when Objective-C Garbage Collection is enabled. .IP "\fB\-fobjc\-gc\fR" 4 .IX Item "-fobjc-gc" Indicate that Objective-C code should be compiled in hybrid-GC mode, which works with both \s-1GC\s0 and non-GC mode. .IP "\fB\-fobjc\-abi\-version\fR=\fIversion\fR" 4 .IX Item "-fobjc-abi-version=version" Select the Objective-C \s-1ABI\s0 version to use. Available versions are 1 (legacy \&\*(L"fragile\*(R" \s-1ABI\s0), 2 (non-fragile \s-1ABI\s0 1), and 3 (non-fragile \s-1ABI\s0 2). .IP "\fB\-fobjc\-nonfragile\-abi\-version\fR=\fIversion\fR" 4 .IX Item "-fobjc-nonfragile-abi-version=version" Select the Objective-C non-fragile \s-1ABI\s0 version to use by default. This will only be used as the Objective-C \s-1ABI\s0 when the non-fragile \s-1ABI\s0 is enabled (either via \&\-fobjc\-nonfragile\-abi, or because it is the platform default). .IP "\fB\-fobjc\-nonfragile\-abi\fR" 4 .IX Item "-fobjc-nonfragile-abi" Enable use of the Objective-C non-fragile \s-1ABI\s0. On platforms for which this is the default \s-1ABI\s0, it can be disabled with \fB\-fno\-objc\-nonfragile\-abi\fR. .SS "Target Selection Options" .IX Subsection "Target Selection Options" Clang fully supports cross compilation as an inherent part of its design. Depending on how your version of Clang is configured, it may have support for a number of cross compilers, or may only support a native target. .IP "\fB\-arch\fR \fIarchitecture\fR" 4 .IX Item "-arch architecture" Specify the architecture to build for. .IP "\fB\-mmacosx\-version\-min\fR=\fIversion\fR" 4 .IX Item "-mmacosx-version-min=version" When building for Mac \s-1OS/X\s0, specify the minimum version supported by your application. .IP "\fB\-miphoneos\-version\-min\fR" 4 .IX Item "-miphoneos-version-min" When building for iPhone \s-1OS\s0, specify the minimum version supported by your application. .IP "\fB\-march\fR=\fIcpu\fR" 4 .IX Item "-march=cpu" Specify that Clang should generate code for a specific processor family member and later. For example, if you specify \-march=i486, the compiler is allowed to generate instructions that are valid on i486 and later processors, but which may not exist on earlier ones. .SS "Code Generation Options" .IX Subsection "Code Generation Options" .IP "\fB\-O0\fR \fB\-O1\fR \fB\-O2\fR \fB\-Os\fR \fB\-Oz\fR \fB\-O3\fR \fB\-O4\fR" 4 .IX Item "-O0 -O1 -O2 -Os -Oz -O3 -O4" Specify which optimization level to use. \fB\-O0\fR means \*(L"no optimization\*(R": this level compiles the fastest and generates the most debuggable code. \fB\-O2\fR is a moderate level of optimization which enables most optimizations. \fB\-Os\fR is like \&\fB\-O2\fR with extra optimizations to reduce code size. \fB\-Oz\fR is like \fB\-Os\fR (and thus \fB\-O2\fR), but reduces code size further. \fB\-O3\fR is like \fB\-O2\fR, except that it enables optimizations that take longer to perform or that may generate larger code (in an attempt to make the program run faster). On supported platforms, \fB\-O4\fR enables link-time optimization; object files are stored in the \s-1LLVM\s0 bitcode file format and whole program optimization is done at link time. \fB\-O1\fR is somewhere between \fB\-O0\fR and \fB\-O2\fR. .IP "\fB\-g\fR" 4 .IX Item "-g" Generate debug information. Note that Clang debug information works best at \&\fB\-O0\fR. At higher optimization levels, only line number information is currently available. .IP "\fB\-fexceptions\fR" 4 .IX Item "-fexceptions" Enable generation of unwind information, this allows exceptions to be thrown through Clang compiled stack frames. This is on by default in x86\-64. .IP "\fB\-ftrapv\fR" 4 .IX Item "-ftrapv" Generate code to catch integer overflow errors. Signed integer overflow is undefined in C, with this flag, extra code is generated to detect this and abort when it happens. .IP "\fB\-fvisibility\fR" 4 .IX Item "-fvisibility" This flag sets the default visibility level. .IP "\fB\-fcommon\fR" 4 .IX Item "-fcommon" This flag specifies that variables without initializers get common linkage. It can be disabled with \fB\-fno\-common\fR. .IP "\fB\-flto\fR \fB\-emit\-llvm\fR" 4 .IX Item "-flto -emit-llvm" Generate output files in \s-1LLVM\s0 formats, suitable for link time optimization. When used with \fB\-S\fR this generates \s-1LLVM\s0 intermediate language assembly files, otherwise this generates \s-1LLVM\s0 bitcode format object files (which may be passed to the linker depending on the stage selection options). .SS "Driver Options" .IX Subsection "Driver Options" .IP "\fB\-###\fR" 4 .IX Item "-###" Print the commands to run for this compilation. .IP "\fB\-\-help\fR" 4 .IX Item "--help" Display available options. .IP "\fB\-Qunused\-arguments\fR" 4 .IX Item "-Qunused-arguments" Don't emit warning for unused driver arguments. .IP "\fB\-Wa,\fR\fIargs\fR" 4 .IX Item "-Wa,args" Pass the comma separated arguments in \fIargs\fR to the assembler. .IP "\fB\-Wl,\fR\fIargs\fR" 4 .IX Item "-Wl,args" Pass the comma separated arguments in \fIargs\fR to the linker. .IP "\fB\-Wp,\fR\fIargs\fR" 4 .IX Item "-Wp,args" Pass the comma separated arguments in \fIargs\fR to the preprocessor. .IP "\fB\-Xanalyzer\fR \fIarg\fR" 4 .IX Item "-Xanalyzer arg" Pass \fIarg\fR to the static analyzer. .IP "\fB\-Xassembler\fR \fIarg\fR" 4 .IX Item "-Xassembler arg" Pass \fIarg\fR to the assembler. .IP "\fB\-Xlinker\fR \fIarg\fR" 4 .IX Item "-Xlinker arg" Pass \fIarg\fR to the linker. .IP "\fB\-Xpreprocessor\fR \fIarg\fR" 4 .IX Item "-Xpreprocessor arg" Pass \fIarg\fR to the preprocessor. .IP "\fB\-o\fR \fIfile\fR" 4 .IX Item "-o file" Write output to \fIfile\fR. .IP "\fB\-print\-file\-name\fR=\fIfile\fR" 4 .IX Item "-print-file-name=file" Print the full library path of \fIfile\fR. .IP "\fB\-print\-libgcc\-file\-name\fR" 4 .IX Item "-print-libgcc-file-name" Print the library path for \*(L"libgcc.a\*(R". .IP "\fB\-print\-prog\-name\fR=\fIname\fR" 4 .IX Item "-print-prog-name=name" Print the full program path of \fIname\fR. .IP "\fB\-print\-search\-dirs\fR" 4 .IX Item "-print-search-dirs" Print the paths used for finding libraries and programs. .IP "\fB\-save\-temps\fR" 4 .IX Item "-save-temps" Save intermediate compilation results. .IP "\fB\-integrated\-as\fR \fB\-no\-integrated\-as\fR" 4 .IX Item "-integrated-as -no-integrated-as" Used to enable and disable, respectively, the use of the integrated assembler. Whether the integrated assembler is on by default is target dependent. .IP "\fB\-time\fR" 4 .IX Item "-time" Time individual commands. .IP "\fB\-ftime\-report\fR" 4 .IX Item "-ftime-report" Print timing summary of each stage of compilation. .IP "\fB\-v\fR" 4 .IX Item "-v" Show commands to run and use verbose output. .SS "Diagnostics Options" .IX Subsection "Diagnostics Options" .IP "\fB\-fshow\-column\fR \fB\-fshow\-source\-location\fR \fB\-fcaret\-diagnostics\fR \fB\-fdiagnostics\-fixit\-info\fR \fB\-fdiagnostics\-parseable\-fixits\fR \fB\-fdiagnostics\-print\-source\-range\-info\fR \fB\-fprint\-source\-range\-info\fR \fB\-fdiagnostics\-show\-option\fR \fB\-fmessage\-length\fR" 4 .IX Item "-fshow-column -fshow-source-location -fcaret-diagnostics -fdiagnostics-fixit-info -fdiagnostics-parseable-fixits -fdiagnostics-print-source-range-info -fprint-source-range-info -fdiagnostics-show-option -fmessage-length" These options control how Clang prints out information about diagnostics (errors and warnings). Please see the Clang User's Manual for more information. .SS "Preprocessor Options" .IX Subsection "Preprocessor Options" .IP "\fB\-D\fR\fImacroname=value\fR" 4 .IX Item "-Dmacroname=value" Adds an implicit #define into the predefines buffer which is read before the source file is preprocessed. .IP "\fB\-U\fR\fImacroname\fR" 4 .IX Item "-Umacroname" Adds an implicit #undef into the predefines buffer which is read before the source file is preprocessed. .IP "\fB\-include\fR \fIfilename\fR" 4 .IX Item "-include filename" Adds an implicit #include into the predefines buffer which is read before the source file is preprocessed. .IP "\fB\-I\fR\fIdirectory\fR" 4 .IX Item "-Idirectory" Add the specified directory to the search path for include files. .IP "\fB\-F\fR\fIdirectory\fR" 4 .IX Item "-Fdirectory" Add the specified directory to the search path for framework include files. .IP "\fB\-nostdinc\fR" 4 .IX Item "-nostdinc" Do not search the standard system directories or compiler builtin directories for include files. .IP "\fB\-nostdlibinc\fR" 4 .IX Item "-nostdlibinc" Do not search the standard system directories for include files, but do search compiler builtin include directories. .IP "\fB\-nobuiltininc\fR" 4 .IX Item "-nobuiltininc" Do not search clang's builtin directory for include files. .SH "ENVIRONMENT" .IX Header "ENVIRONMENT" .IP "\fB\s-1TMPDIR\s0\fR, \fB\s-1TEMP\s0\fR, \fB\s-1TMP\s0\fR" 4 .IX Item "TMPDIR, TEMP, TMP" These environment variables are checked, in order, for the location to write temporary files used during the compilation process. .IP "\fB\s-1CPATH\s0\fR" 4 .IX Item "CPATH" If this environment variable is present, it is treated as a delimited list of paths to be added to the default system include path list. The -delimiter is the platform dependent delimitor, as used in the \fI\s-1PATH\s0\fR +delimiter is the platform dependent delimiter, as used in the \fI\s-1PATH\s0\fR environment variable. .Sp Empty components in the environment variable are ignored. .IP "\fBC_INCLUDE_PATH\fR, \fB\s-1OBJC_INCLUDE_PATH\s0\fR, \fB\s-1CPLUS_INCLUDE_PATH\s0\fR, \fB\s-1OBJCPLUS_INCLUDE_PATH\s0\fR" 4 .IX Item "C_INCLUDE_PATH, OBJC_INCLUDE_PATH, CPLUS_INCLUDE_PATH, OBJCPLUS_INCLUDE_PATH" These environment variables specify additional paths, as for \s-1CPATH\s0, which are only used when processing the appropriate language. .IP "\fB\s-1MACOSX_DEPLOYMENT_TARGET\s0\fR" 4 .IX Item "MACOSX_DEPLOYMENT_TARGET" If \-mmacosx\-version\-min is unspecified, the default deployment target is read from this environment variable. This option only affects darwin targets. .SH "BUGS" .IX Header "BUGS" To report bugs, please visit . Most bug reports should include preprocessed source files (use the \fB\-E\fR option) and the full output of the compiler, along with information to reproduce. .SH "SEE ALSO" .IX Header "SEE ALSO" .Vb 1 \& as(1), ld(1) .Ve .SH "AUTHOR" .IX Header "AUTHOR" Maintained by the Clang / \s-1LLVM\s0 Team (). Index: projects/nand/usr.bin/clang/tblgen/tblgen.1 =================================================================== --- projects/nand/usr.bin/clang/tblgen/tblgen.1 (revision 235262) +++ projects/nand/usr.bin/clang/tblgen/tblgen.1 (revision 235263) @@ -1,233 +1,233 @@ .\" $FreeBSD$ .\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .ie \nF \{\ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . nr % 0 . rr F .\} .el \{\ . de IX .. .\} .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "TBLGEN 1" .TH TBLGEN 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" tblgen \- Target Description To C++ Code Generator .SH "SYNOPSIS" .IX Header "SYNOPSIS" \&\fBtblgen\fR [\fIoptions\fR] [\fIfilename\fR] .SH "DESCRIPTION" .IX Header "DESCRIPTION" \&\fBtblgen\fR translates from target description (.td) files into \*(C+ code that can be included in the definition of an \s-1LLVM\s0 target library. Most users of \s-1LLVM\s0 will not need to use this program. It is only for assisting with writing an \s-1LLVM\s0 target backend. .PP The input and output of \fBtblgen\fR is beyond the scope of this short introduction. Please see the \fICodeGeneration\fR page in the \s-1LLVM\s0 documentation. .PP The \fIfilename\fR argument specifies the name of a Target Description (.td) file to read as input. .SH "OPTIONS" .IX Header "OPTIONS" .IP "\fB\-help\fR" 4 .IX Item "-help" Print a summary of command line options. .IP "\fB\-o\fR \fIfilename\fR" 4 .IX Item "-o filename" Specify the output file name. If \fIfilename\fR is \f(CW\*(C`\-\*(C'\fR, then \fBtblgen\fR sends its output to standard output. .IP "\fB\-I\fR \fIdirectory\fR" 4 .IX Item "-I directory" Specify where to find other target description files for inclusion. The \&\fIdirectory\fR value should be a full or partial path to a directory that contains target description files. .IP "\fB\-asmparsernum\fR \fIN\fR" 4 .IX Item "-asmparsernum N" Make \-gen\-asm\-parser emit assembly writer number \fIN\fR. .IP "\fB\-asmwriternum\fR \fIN\fR" 4 .IX Item "-asmwriternum N" Make \-gen\-asm\-writer emit assembly writer number \fIN\fR. .IP "\fB\-class\fR \fIclass Name\fR" 4 .IX Item "-class class Name" Print the enumeration list for this class. .IP "\fB\-print\-records\fR" 4 .IX Item "-print-records" Print all records to standard output (default). .IP "\fB\-print\-enums\fR" 4 .IX Item "-print-enums" -Print enumeration values for a class +Print enumeration values for a class. .IP "\fB\-print\-sets\fR" 4 .IX Item "-print-sets" Print expanded sets for testing \s-1DAG\s0 exprs. .IP "\fB\-gen\-emitter\fR" 4 .IX Item "-gen-emitter" Generate machine code emitter. .IP "\fB\-gen\-register\-info\fR" 4 .IX Item "-gen-register-info" Generate registers and register classes info. .IP "\fB\-gen\-instr\-info\fR" 4 .IX Item "-gen-instr-info" Generate instruction descriptions. .IP "\fB\-gen\-asm\-writer\fR" 4 .IX Item "-gen-asm-writer" Generate the assembly writer. .IP "\fB\-gen\-disassembler\fR" 4 .IX Item "-gen-disassembler" Generate disassembler. .IP "\fB\-gen\-pseudo\-lowering\fR" 4 .IX Item "-gen-pseudo-lowering" Generate pseudo instruction lowering. .IP "\fB\-gen\-dag\-isel\fR" 4 .IX Item "-gen-dag-isel" Generate a \s-1DAG\s0 (Directed Acycle Graph) instruction selector. .IP "\fB\-gen\-asm\-matcher\fR" 4 .IX Item "-gen-asm-matcher" Generate assembly instruction matcher. .IP "\fB\-gen\-dfa\-packetizer\fR" 4 .IX Item "-gen-dfa-packetizer" Generate \s-1DFA\s0 Packetizer for \s-1VLIW\s0 targets. .IP "\fB\-gen\-fast\-isel\fR" 4 .IX Item "-gen-fast-isel" Generate a \*(L"fast\*(R" instruction selector. .IP "\fB\-gen\-subtarget\fR" 4 .IX Item "-gen-subtarget" Generate subtarget enumerations. .IP "\fB\-gen\-intrinsic\fR" 4 .IX Item "-gen-intrinsic" Generate intrinsic information. .IP "\fB\-gen\-tgt\-intrinsic\fR" 4 .IX Item "-gen-tgt-intrinsic" Generate target intrinsic information. .IP "\fB\-gen\-enhanced\-disassembly\-info\fR" 4 .IX Item "-gen-enhanced-disassembly-info" Generate enhanced disassembly info. .IP "\fB\-version\fR" 4 .IX Item "-version" Show the version number of this program. .SH "EXIT STATUS" .IX Header "EXIT STATUS" If \fBtblgen\fR succeeds, it will exit with 0. Otherwise, if an error occurs, it will exit with a non-zero value. .SH "AUTHORS" .IX Header "AUTHORS" Maintained by The \s-1LLVM\s0 Team (). Index: projects/nand/usr.bin/csup/csup.1 =================================================================== --- projects/nand/usr.bin/csup/csup.1 (revision 235262) +++ projects/nand/usr.bin/csup/csup.1 (revision 235263) @@ -1,996 +1,996 @@ .\" Copyright 1996-2003 John D. Polstra. .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" .\" $Id: cvsup.1,v 1.70 2003/03/04 18:23:46 jdp Exp $ .\" $FreeBSD$ .\" .Dd February 1, 2006 .Dt CSUP 1 .Os FreeBSD .Sh NAME .Nm csup .Nd network distribution package for CVS repositories .Sh SYNOPSIS .Nm .Op Fl 146aksvzZ .Op Fl A Ar addr .Op Fl b Ar base .Op Fl c Ar collDir .Op Fl d Ar delLimit .Op Fl h Ar host .Op Fl i Ar pattern .Op Fl l Ar lockfile .Op Fl L Ar verbosity .Op Fl p Ar port .Op Fl r Ar maxRetries .Ar supfile .Sh DESCRIPTION .Nm is a software package for updating collections of files across a network. It is a rewrite of the .Nm CVSup software in C. This manual page describes the usage of the .Nm client program. .Pp Unlike more traditional network distribution packages, such as .Nm rdist and .Nm sup , .Nm has specific optimizations for distributing CVS repositories. .Nm takes advantage of the properties of CVS repositories and the files they contain (in particular, RCS files), enabling it to perform updates much faster than traditional systems. .Pp .Nm is a general-purpose network file updating package. It is extremely fast, even for collections of files which have nothing to do with CVS or RCS. .Sh OPTIONS The client program .Nm requires at least a single argument, .Ar supfile . It names a file describing one or more collections of files to be transferred and/or updated from the server. The .Ar supfile has a format similar to the corresponding file used by .Nm sup . In most cases, .Nm can use existing .Nm sup Ar supfiles . .Pp The following options are supported by .Nm : .Bl -tag -width Fl .It Fl 1 Disables automatic retries when transient failures occur. Without this option, a transient failure such as a dropped network connection causes .Nm to retry repeatedly, using randomized exponential backoff to space the retries. This option is equivalent to .Fl r Cm 0 . .It Fl 4 Forces .Nm to use IPv4 addresses only. .It Fl 6 Forces .Nm to use IPv6 addresses only. .It Fl a Requires the server to authenticate itself (prove its identity) to the client. If authentication of the server fails, the update is canceled. See .Sx AUTHENTICATION , below. .It Fl A Ar addr Specifies a local address to bind to when connecting to the server. The local address might be a hostname or a numeric host address string consisting of a dotted decimal IPv4 address or an IPv6 address. This may be useful on hosts which have multiple IP addresses. .It Fl b Ar base Specifies the base directory under which .Nm will maintain its bookkeeping files, overriding any .Cm base specifications in the .Ar supfile . .It Fl c Ar collDir Specifies the subdirectory of .Ar base where the information about the collections is maintained. The default is .Pa sup . .It Fl d Ar delLimit Specifies the maximum number of files that may be deleted in a single update run. Any attempt to exceed the limit results in a fatal error. This can provide some protection against temporary configuration mistakes on the server. The default limit is infinity. .It Fl h Ar host Specifies the server host to contact, overriding any .Cm host specifications in the .Ar supfile . .It Fl i Ar pattern Causes .Nm to include only files and directories matching .Ar pattern in the update. If a directory matches the pattern, then the entire subtree rooted at the directory is included. If this option is specified multiple times, the patterns are combined using the .Ql or operation. If no .Fl i options are given, the default is to update all files in each collection. .Pp The .Ar pattern is a standard file name pattern. It is interpreted relative to the collection's prefix directory. Slash characters are matched only by explicit slashes in the pattern. Leading periods in file name are not treated specially. .It Fl k Causes .Nm to keep the temporary copies of any incorrectly edited files, in the event of checksum mismatches. This option is for debugging, to help determine why the files were edited incorrectly. Regardless of whether this option is specified, the permanent versions of faulty files are replaced with correct versions obtained by transferring the files in their entirety. Such transfers are called fixups. .It Fl l Ar lockfile Creates and locks the .Ar lockfile while the update is in progress. If .Ar lockfile is already locked, .Nm fails without performing automatic retries. This option is useful when .Nm is executed periodically from .Nm cron . It prevents a job from interfering with an earlier job that is perhaps taking extra long because of network problems. .Pp The process-ID is written to the lock file in text form when the lock is successfully acquired. Upon termination of the update, the lock file is removed. .It Fl L Ar verbosity Sets the verbosity level for output. A level of 0 causes .Nm to be completely silent unless errors occur. A level of 1 (the default) causes each updated file to be listed. A level of 2 provides more detailed information about the updates performed on each file. All messages are directed to the standard output. .It Fl p Ar port Sets the TCP port to which .Nm attempts to connect on the server host. The default port is 5999. .It Fl r Ar maxRetries Limits the number of automatic retries that will be attempted when transient errors such as lost network connections are encountered. By default, .Nm will retry indefinitely until an update is successfully completed. The retries are spaced using randomized exponential backoff. Note that .Fl r Cm 0 is equivalent to the .Fl 1 option. .It Fl s Suppresses the check of each client file's status against what is recorded in the list file. Instead, the list file is assumed to be accurate. This option greatly reduces the amount of disk activity and results in faster updates with less load on the client host. However it should only be used if client's files are never modified locally in any way. Mirror sites may find this option beneficial to reduce the disk load on their systems. For safety, even mirror sites should run .Nm occasionally (perhaps once a day) without the .Fl s option. .Pp Without the .Fl s option, .Nm performs a .Xr stat 2 call on each file and verifies that its attributes match those recorded in the list file. This ensures that any file changes made outside of .Nm are detected and corrected. .Pp If the .Fl s option is used when one or more files have been modified locally, the results are undefined. Local file damage may remain uncorrected, updates may be missed, or .Nm may abort prematurely. .It Fl v Prints the version number and exits, without contacting the server. .It Fl z Enables compression for all collections, as if the .Cm compress keyword were added to every collection in the .Ar supfile . .It Fl Z Disables compression for all collections, as if the .Cm compress keyword were removed from every collection in the .Ar supfile . .El .Pp The .Ar supfile is a text file which specifies the file collections to be updated. Comments begin with .Ql # and extend to the end of the line. Lines that are empty except for comments and white space are ignored. Each remaining line begins with the name of a server-defined collection of files. Following the collection name on the line are zero or more keywords or keyword=value pairs. .Pp Default settings may be specified in lines whose collection name is .Cm *default . Such defaults will apply to subsequent lines in the .Ar supfile . Multiple .Cm *default lines may be present. New values augment or override any defaults specified earlier in the .Ar supfile . Values specified explicitly for a collection override any default values. .Pp The most commonly used keywords are: .Bl -tag -width Fl .It Cm release= Ns Ar releaseName This specifies the release of the files within a collection. Like collection names, release names are defined by the server configuration files. Usually there is only one release in each collection, but there may be any number. Collections which come from a CVS repository often use .Cm release=cvs by convention. Non-CVS collections conventionally use .Cm release=current . .It Cm base= Ns Ar base This specifies a directory under which .Nm will maintain its bookkeeping files, describing the state of each collection on the client machine. The .Ar base directory must already exist; .Nm will not create it. The default .Ar base directory is -.Pa /usr/local/etc/csup . +.Pa /usr/local/etc/cvsup . .It Cm prefix= Ns Ar prefix This is the directory under which updated files will be placed. By default, it is the same as .Ar base . If it is not an absolute pathname, it is interpreted relative to .Ar base . The .Ar prefix directory must already exist; .Nm will not create it. .Pp As a special case, if .Ar prefix is a symbolic link pointing to a nonexistent file named .Ql SKIP , then .Nm will skip the collection. The parameters associated with the collection are still checked for validity, but none of its files will be updated. This feature allows a site to use a standard .Ar supfile on several machines, yet control which collections get updated on a per-machine basis. .It Cm host= Ns Ar hostname This specifies the server machine from which all files will be taken. .Nm requires that all collections in a single run come from the same host. If you wish to update collections from several different hosts, you must run .Nm several times. .It Cm delete The presence of this keyword gives .Nm permission to delete files. If it is missing, no files will be deleted. .Pp The presence of the .Cm delete keyword puts .Nm into so-called .Em exact mode. In exact mode, .Nm does its best to make the client's files correspond to those on the server. This includes deleting individual deltas and symbolic tags from RCS files, as well as deleting entire files. In exact mode, .Nm verifies every edited file with a checksum, to ensure that the edits have produced a file identical to the master copy on the server. If the checksum test fails for a file, then .Nm falls back upon transferring the entire file. .Pp In general, .Nm deletes only files which are known to the server. Extra files present in the client's tree are left alone, even in exact mode. More precisely, .Nm is willing to delete two classes of files: .Bl -bullet -compact .It Files that were previously created or updated by .Nm itself. .It Checked-out versions of files which are marked as dead on the server. .El .It Cm use-rel-suffix Causes .Nm to append a suffix constructed from the release and tag to the name of each list file that it maintains. See .Sx THE LIST FILE for details. .It Cm compress This enables compression of all data sent across the network. Compression is quite effective, normally eliminating 65% to 75% of the bytes that would otherwise need to be transferred. However, it is costly in terms of CPU time on both the client and the server. On local area networks, compression is generally counter-productive; it actually slows down file updates. On links with speeds of 56K bits/second or less, compression is almost always beneficial. For network links with speeds between these two extremes, let experimentation be your guide. .Pp The .Fl z command line option enables the .Cm compress keyword for all collections, regardless of what is specified in the supfile. Likewise, the .Fl Z command line option disables the .Cm compress option for all collections. .Nm uses a looser checksum for RCS files, which ignores harmless differences in white space. Different versions of CVS and RCS produce a variety of differences in white space for the same RCS files. Thus the strict checksum can report spurious mismatches for files which are logically identical. This can lead to numerous unneeded .Dq fixups , and thus to slow updates. .It Cm umask= Ns Ar n Causes .Nm to use a umask value of .Ar n (an octal number) when updating the files in the collection. This option is ignored if .Cm preserve is specified. .El .Pp Some additional, more specialized keywords are described below. Unrecognized keywords are silently ignored for backward compatibility with .Nm sup . .Sh CVS MODE .Nm CVSup supports two primary modes of operation. They are called .Em CVS mode and .Em checkout mode. .Pp In CVS mode, the client receives copies of the actual RCS files making up the master CVS repository. CVS mode is the default mode of operation. It is appropriate when the user wishes to maintain a full copy of the CVS repository on the client machine. .Pp CVS mode is also appropriate for file collections which are not based upon a CVS repository. The files are simply transferred verbatim, without interpretation. .Sh CHECKOUT MODE In checkout mode, the client receives specific revisions of files, checked out directly from the server's CVS repository. Checkout mode allows the client to receive any version from the repository, without requiring any extra disk space on the server for storing multiple versions in checked-out form. Checkout mode provides much flexibility beyond that basic functionality, however. The client can specify any CVS symbolic tag, or any date, or both, and .Nm will provide the corresponding checked-out versions of the files in the repository. .Pp Checkout mode is selected on a per-collection basis, by the presence of one or both of the following keywords in the .Ar supfile : .Bl -tag -width Fl .It Cm tag= Ns Ar tagname This specifies a symbolic tag that should be used to select the revisions that are checked out from the CVS repository. The tag may refer to either a branch or a specific revision. It must be symbolic; numeric revision numbers are not supported. .Pp For the FreeBSD source repository, the most commonly used tags will be: .Bl -tag -width RELENG_6 .It Li RELENG_6 The .Ql stable branch. .It Li \&. The main branch (the .Ql current release). This is the default, if only the .Cm date keyword is given. .El .Sm off .It Xo Cm date= .Op Ar cc .Ar yy.mm.dd.hh.mm.ss .Xc .Sm on This specifies a date that should be used to select the revisions that are checked out from the CVS repository. The client will receive the revisions that were in effect at the specified date and time. .Pp At present, the date format is inflexible. All 17 or 19 characters must be specified, exactly as shown. For the years 2000 and beyond, specify the century .Ar cc . For earlier years, specify only the last two digits .Ar yy . Dates and times are considered to be GMT. The default date is .Ql \&. , which means .Dq as late as possible . .El .Pp To enable checkout mode, you must specify at least one of these keywords. If both are missing, .Nm defaults to CVS mode. .Pp If both a branch tag and a date are specified, then the revisions on the given branch, as of the given date, will be checked out. It is permitted, but not particularly useful, to specify a date with a specific release tag. .Pp In checkout mode, the tag and/or date may be changed between updates. For example, suppose that a collection has been transferred using the specification .Ql tag=. . The user could later change the specification to .Ql tag=RELENG_3 . This would cause .Nm to edit the checked-out files in such a way as to transform them from the .Ql current versions to the .Ql stable versions. In general, .Nm is willing to transform any tag/date combination into any other tag/date combination, by applying the intervening RCS deltas to the existing files. .Pp When transforming a collection of checked-out files from one tag to another, it is important to specify the .Cm list keyword in the .Ar supfile , to ensure that the same list file is used both before and after the transformation. The list file is described in .Sx THE LIST FILE , below. .Sh THE LIST FILE For efficiency, .Nm maintains a bookkeeping file for each collection, called the list file. The list file contains information about which files and revisions the client currently possesses. It also contains information used for verifying that the list file is consistent with the actual files in the client's tree. .Pp The list file is not strictly necessary. If it is deleted, or becomes inconsistent with the actual client files, .Nm falls back upon a less efficient method of identifying the client's files and performing its updates. Depending on .Nm csup Ns No 's mode of operation, the fallback method employs time stamps, checksums, or analysis of RCS files. .Pp Because the list file is not essential, .Nm is able to .Dq adopt an existing file tree acquired by FTP or from a CD-ROM. .Nm identifies the client's versions of the files, updates them as necessary, and creates a list file for future use. Adopting a foreign file tree is not as fast as performing a normal update. It also produces a heavier load on the server. .Pp The list file is stored in a collection-specific directory; see .Sx FILES for details. Its name always begins with .Ql checkouts . If the keyword .Cm use-rel-suffix is specified in the .Ar supfile , a suffix, formed from the release and tag, is appended to the name. The default suffix can be overridden by specifying an explicit suffix in the .Ar supfile : .Bl -tag -width Fl .It Cm list= Ns Ar suffix This specifies a suffix for the name of the list file. A leading dot is provided automatically. For example, .Ql list=stable would produce a list file named .Pa checkouts.stable , regardless of the release, tag, or .Cm use-rel-suffix keyword. .El .Sh REFUSE FILES The user can specify sets of files that he does not wish to receive. The files are specified as file name patterns in so-called .Em refuse files. The patterns are separated by whitespace, and multiple patterns are permitted on each line. Files and directories matching the patterns are neither updated nor deleted; they are simply ignored. .Pp There is currently no provision for comments in refuse files. .Pp The patterns are similar to those of .Xr sh 1 , except that there is no special treatment for slashes or for filenames that begin with a period. For example, the pattern .Ql *.c will match any file name ending with .Ql \&.c including those in subdirectories, such as .Ql foo/bar/lam.c . All patterns are interpreted relative to the collection's prefix directory. .Pp If the files are coming from a CVS repository, as is usually the case, then they will be RCS files. These have a .Ql \&,v suffix which must be taken into account in the patterns. For example, the FreeBSD documentation files are in a sub-directory of .Ar base called .Ql doc . If .Ql Makefile from that directory is not required then the line .Pp .Bl -item -compact -offset indent .It .Pa doc/Makefile .El .Pp will not work because the file on the server is called -.Ql Makefile,v. +.Ql Makefile,v . A better solution would be .Pp .Bl -item -compact -offset indent .It .Pa doc/Makefile* .El .Pp which will match whether .Ql Makefile is an RCS file or not. .Pp As another example, to receive the FreeBSD documentation files without the Japanese, Russian, and Chinese translations, create a refuse file containing the following lines: .Pp .Bl -item -compact -offset indent .It .Pa doc/ja* .It .Pa doc/ru* .It .Pa doc/zh* .El .Pp As many as three refuse files are examined for each .Ar supfile line. There can be a global refuse file named .Sm off .Ar base / Ar collDir Pa /refuse .Sm on which applies to all collections and releases. There can be a per-collection refuse file named .Sm off .Xo Ar base / Ar collDir / Ar collection .Pa /refuse .Xc .Sm on which applies to a specific collection. Finally, there can be a per-release and tag refuse file which applies only to a given release/tag combination within a collection. The name of the latter is formed by suffixing the name of the per-collection refuse file in the same manner as described above for the list file. None of the refuse files are required to exist. .Pp .Nm has a built-in default value of .Ar /usr/local/etc/cvsup for .Ar base and .Ar sup for .Ar collDir but it is possible to override both of these. The value of .Ar base can be changed using the .Fl b option or a .Ar base=pathname entry in the .Ar supfile . (If both are used the .Fl b option will override the .Ar supfile entry.) The value of .Ar collDir can only be changed with the .Fl c option; there is no .Ar supfile command to change it. .Pp As an example, suppose that the .Ar base and .Ar collDir both have their default values, and that the collection and release are .Ql src-all and .Ql cvs , respectively. Assume further that checkout mode is being used with .Ql tag=RELENG_3 . The three possible refuse files would then be named: .Pp .Bl -item -compact -offset indent .It .Pa /usr/local/etc/cvsup/sup/refuse .It .Pa /usr/local/etc/cvsup/sup/src-all/refuse .It .Pa /usr/local/etc/cvsup/sup/src-all/refuse.cvs:RELENG_3 .El .Pp If the .Ar supfile includes the command .Ar base=/foo the refuse files would be: .Pp .Bl -item -compact -offset indent .It .Pa /foo/sup/refuse .It .Pa /foo/sup/src-all/refuse .It .Pa /foo/sup/src-all/refuse.cvs:RELENG_3 .El .Pp If .Fl b .Ar /bar is used (even with .Ar base=/foo in the .Ar supfile ) : .Pp .Bl -item -compact -offset indent .It .Pa /bar/sup/refuse .It .Pa /bar/sup/src-all/refuse .It .Pa /bar/sup/src-all/refuse.cvs:RELENG_3 .El .Pp and with .Fl c .Ar stool as well: .Pp .Bl -item -compact -offset indent .It .Pa /bar/stool/refuse .It .Pa /bar/stool/src-all/refuse .It .Pa /bar/stool/src-all/refuse.cvs:RELENG_3 .El .Sh AUTHENTICATION .Nm implements an optional authentication mechanism which can be used by the client and server to verify each other's identities. Public CVSup servers normally do not enable authentication. .Nm users may ignore this section unless they have been informed that authentication is required by the administrator of their server. .Pp The authentication subsystem uses a challenge-response protocol which is immune to packet sniffing and replay attacks. No passwords are sent over the network in either direction. Both the client and the server can independently verify the identities of each other. .Pp The file .Li $ Ns Ev HOME Ns Pa /.csup/auth holds the information used for authentication. This file contains a record for each server that the client is allowed to access. Each record occupies one line in the file. Lines beginning with .Ql # are ignored, as are lines containing only white space. White space is significant everywhere else in the file. Fields are separated by .Ql \&: characters. .Pp Each record of the file has the following form: .Bd -literal -offset indent .Sm off .Xo Ar serverName No : Ar clientName No : .Ar password No : Ar comment .Xc .Sm on .Ed .Pp All fields must be present even if some of them are empty. .Ar ServerName is the name of the server to which the record applies. By convention, it is the canonical fully-qualified domain name of the server, e.g., .Ql CVSup177.FreeBSD.ORG . This must agree with the server's own idea of its name. The name is case-insensitive. .Pp .Ar ClientName is the name the client uses to gain access to the server. By convention, e-mail addresses are used for all client names, e.g., .Ql BillyJoe@FreeBSD.org . Client names are case-insensitive. .Pp .Ar Password is a secret string of characters that the client uses to prove its identity. It may not contain any .Ql \&: or newline characters. .Pp .Ar Comment may contain any additional information to identify the record. It is not interpreted by the program. .Pp To set up authentication for a given server, one must perform the following steps: .Bl -enum .It Obtain the official .Ar serverName from the administrator of the server or from some other source. .It Choose an appropriate .Ar clientName . It should be in the form of a valid e-mail address, to make it easy for the server administrator to contact the user if necessary. .It Choose an arbitrary secret .Ar password . .It Run the .Nm cpasswd utility, and type in the .Ar password when prompted for it. The utility will print out a line to send to the server administrator, and instruct you how to modify your .Li $ Ns Ev HOME Ns Pa /.csup/auth file. You should use a secure channel to send the line to the server administrator. .El .Pp Since .Li $ Ns Ev HOME Ns Pa /.csup/auth contains passwords, you should ensure that it is not readable by anyone except yourself. .Pp Authentication works independently in both directions. The server administrator controls whether you must prove your identity. You control whether to check the server's identity, by means of the .Fl a command line option. .Sh csup AND FIREWALLS In its default mode, .Nm will work through any firewall which permits outbound connections to port 5999 of the server host. .Sh USING csup WITH SOCKS .Nm can be used through a SOCKS proxy server with the standard .Nm runsocks command. Your .Nm executable needs to be dynamically-linked with the system libraries for .Nm runsocks to work properly. .Sh USING ssh PORT FORWARDING As an alternative to SOCKS, a user behind a firewall can penetrate it with the TCP port forwarding provided by the Secure Shell package .Nm ssh . The user must have a login account on the .Nm CVSup server host in order to do this. The procedure is as follows: .Bl -enum .It Establish a connection to the server host with .Nm ssh , like this: .Bd -literal ssh -f -x -L 5999:localhost:5999 serverhost sleep 60 .Ed .Pp Replace .Ar serverhost with the hostname of the CVSup server, but type .Ql localhost literally. This sets up the required port forwarding. You must start .Nm before the 60-second .Nm sleep finishes. Once the update has begun, .Nm ssh will keep the forwarded channels open as long as they are needed. .It Run .Nm on the local host, including the arguments .Ql -h localhost on the command line. .El .Sh FILES .Bl -tag -width base/sup/collection/checkouts*xx -compact .It Pa /usr/local/etc/cvsup Default .Ar base directory. .It Pa sup Default .Ar collDir subdirectory. .Sm off .It Xo Ar base / Ar collDir / Ar collection .Pa /checkouts* .Xc .Sm on List files. .El .Sh SEE ALSO .Xr cpasswd 1 , .Xr cvs 1 , .Xr rcsintro 1 , .Xr ssh 1 . .Sh AUTHORS .An -nosplit .An Maxime Henrion Aq mux@FreeBSD.org is the author of .Nm , the rewrite of .Nm CVSup in C. .An John Polstra Aq jdp@polstra.com is the author of .Nm CVSup . .Sh LEGALITIES CVSup is a registered trademark of John D. Polstra. .Pp .Nm is released under a 2-clauses BSD license. .Sh BUGS An RCS file is not recognized as such unless its name ends with .Ql \&,v . .Pp Any directory named .Ql Attic is assumed to be a CVS Attic, and is treated specially. Index: projects/nand/usr.bin/csup =================================================================== --- projects/nand/usr.bin/csup (revision 235262) +++ projects/nand/usr.bin/csup (revision 235263) Property changes on: projects/nand/usr.bin/csup ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/usr.bin/csup:r235157-235262 Index: projects/nand/usr.bin/fetch/fetch.1 =================================================================== --- projects/nand/usr.bin/fetch/fetch.1 (revision 235262) +++ projects/nand/usr.bin/fetch/fetch.1 (revision 235263) @@ -1,310 +1,310 @@ .\"- .\" Copyright (c) 2000-2012 Dag-Erling Smørgrav .\" All rights reserved. .\" Portions Copyright (c) 1999 Massachusetts Institute of Technology; used .\" by permission. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer .\" in this position and unchanged. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. The name of the author may not be used to endorse or promote products .\" derived from this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .Dd February 28, 2012 .Dt FETCH 1 .Os .Sh NAME .Nm fetch .Nd retrieve a file by Uniform Resource Locator .Sh SYNOPSIS .Nm .Op Fl 146AadFlMmnPpqRrsUv .Op Fl B Ar bytes .Op Fl i Ar file .Op Fl N Ar file .Op Fl o Ar file .Op Fl S Ar bytes .Op Fl T Ar seconds .Op Fl w Ar seconds .Ar URL ... .Nm .Op Fl 146AadFlMmnPpqRrsUv .Op Fl B Ar bytes .Op Fl i Ar file .Op Fl N Ar file .Op Fl o Ar file .Op Fl S Ar bytes .Op Fl T Ar seconds .Op Fl w Ar seconds .Fl h Ar host Fl f Ar file Oo Fl c Ar dir Oc .Sh DESCRIPTION The .Nm utility provides a command-line interface to the .Xr fetch 3 library. Its purpose is to retrieve the file(s) pointed to by the URL(s) on the command line. .Pp The following options are available: .Bl -tag -width Fl .It Fl 1 Stop and return exit code 0 at the first successfully retrieved file. .It Fl 4 Forces .Nm to use IPv4 addresses only. .It Fl 6 Forces .Nm to use IPv6 addresses only. .It Fl A Do not automatically follow ``temporary'' (302) redirects. Some broken Web sites will return a redirect instead of a not-found error when the requested object does not exist. .It Fl a Automatically retry the transfer upon soft failures. .It Fl B Ar bytes Specify the read buffer size in bytes. The default is 4096 bytes. Attempts to set a buffer size lower than this will be silently ignored. The number of reads actually performed is reported at verbosity level two or higher (see the .Fl v flag). .It Fl c Ar dir The file to retrieve is in directory .Ar dir on the remote host. This option is deprecated and is provided for backward compatibility only. .It Fl d Use a direct connection even if a proxy is configured. .It Fl F In combination with the .Fl r flag, forces a restart even if the local and remote files have different modification times. Implies .Fl R . .It Fl f Ar file The file to retrieve is named .Ar file on the remote host. This option is deprecated and is provided for backward compatibility only. .It Fl h Ar host The file to retrieve is located on the host .Ar host . This option is deprecated and is provided for backward compatibility only. .It Fl i Ar file If-Modified-Since mode: the remote file will only be retrieved if it is newer than .Ar file on the local host. (HTTP only) .It Fl l If the target is a file-scheme URL, make a symbolic link to the target rather than trying to copy it. .It Fl M .It Fl m Mirror mode: if the file already exists locally and has the same size and modification time as the remote file, it will not be fetched. Note that the .Fl m and .Fl r flags are mutually exclusive. .It Fl N Ar file Use .Ar file instead of .Pa ~/.netrc to look up login names and passwords for FTP sites. See .Xr ftp 1 for a description of the file format. This feature is experimental. .It Fl n Do not preserve the modification time of the transferred file. .It Fl o Ar file Set the output file name to .Ar file . By default, a ``pathname'' is extracted from the specified URI, and its basename is used as the name of the output file. A .Ar file argument of .Sq Li \&- indicates that results are to be directed to the standard output. If the .Ar file argument is a directory, fetched file(s) will be placed within the directory, with name(s) selected as in the default behaviour. .It Fl P .It Fl p Use passive FTP. These flags have no effect, since passive FTP is the default, but are provided for compatibility with earlier versions where active FTP was the default. To force active mode, set the .Ev FTP_PASSIVE_MODE environment variable to .Ql NO . .It Fl q Quiet mode. .It Fl R The output files are precious, and should not be deleted under any circumstances, even if the transfer failed or was incomplete. .It Fl r Restart a previously interrupted transfer. Note that the .Fl m and .Fl r flags are mutually exclusive. .It Fl S Ar bytes Require the file size reported by the server to match the specified value. If it does not, a message is printed and the file is not fetched. If the server does not support reporting file sizes, this option is ignored and the file is fetched unconditionally. .It Fl s Print the size in bytes of each requested file, without fetching it. .It Fl T Ar seconds Set timeout value to .Ar seconds . Overrides the environment variables .Ev FTP_TIMEOUT for FTP transfers or .Ev HTTP_TIMEOUT for HTTP transfers if set. .It Fl U When using passive FTP, allocate the port for the data connection from the low (default) port range. See .Xr ip 4 for details on how to specify which port range this corresponds to. .It Fl v Increase verbosity level. .It Fl w Ar seconds When the .Fl a flag is specified, wait this many seconds between successive retries. .El .Pp .Ar URL .Bd -literal :(//((:)?@)?(:)?)?/()? .Ed .Pp If .Nm receives a .Dv SIGINFO signal (see the .Cm status argument for .Xr stty 1 ) , the current transfer rate statistics will be written to the standard error output, in the same format as the standard completion message. .Sh ENVIRONMENT .Bl -tag -width HTTP_TIMEOUT .It Ev FTP_TIMEOUT Maximum time, in seconds, to wait before aborting an FTP connection. .It Ev HTTP_TIMEOUT Maximum time, in seconds, to wait before aborting an HTTP connection. .El .Pp See .Xr fetch 3 for a description of additional environment variables, including .Ev FETCH_BIND_ADDRESS , .Ev FTP_LOGIN , .Ev FTP_PASSIVE_MODE , .Ev FTP_PASSWORD , .Ev FTP_PROXY , .Ev ftp_proxy , .Ev HTTP_AUTH , .Ev HTTP_PROXY , .Ev http_proxy , .Ev HTTP_PROXY_AUTH , .Ev HTTP_REFERER , .Ev HTTP_USER_AGENT , .Ev NETRC , -.Ev NO_PROXY and +.Ev NO_PROXY No and .Ev no_proxy . .Sh EXIT STATUS The .Nm command returns zero on success, or one on failure. If multiple URLs are listed on the command line, .Nm will attempt to retrieve each one of them in turn, and will return zero only if they were all successfully retrieved. .Pp If the .Fl i argument is used and the remote file is not newer than the specified file then the command will still return success, although no file is transferred. .Sh SEE ALSO .Xr fetch 3 .Sh HISTORY The .Nm command appeared in .Fx 2.1.5 . This implementation first appeared in .Fx 4.1 . .Sh AUTHORS .An -nosplit The original implementation of .Nm was done by .An Jean-Marc Zucconi Aq jmz@FreeBSD.org . It was extensively re-worked for .Fx 2.2 by .An Garrett Wollman Aq wollman@FreeBSD.org , and later completely rewritten to use the .Xr fetch 3 library by .An Dag-Erling Sm\(/orgrav Aq des@FreeBSD.org . .Sh NOTES The .Fl b and .Fl t options are no longer supported and will generate warnings. They were workarounds for bugs in other OSes which this implementation does not trigger. .Pp One cannot both use the .Fl h , .Fl c and .Fl f options and specify URLs on the command line. Index: projects/nand/usr.bin/gprof/gprof.1 =================================================================== --- projects/nand/usr.bin/gprof/gprof.1 (revision 235262) +++ projects/nand/usr.bin/gprof/gprof.1 (revision 235263) @@ -1,328 +1,328 @@ .\" Copyright (c) 1983, 1990, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 4. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" @(#)gprof.1 8.1 (Berkeley) 6/6/93 .\" $FreeBSD$ .\" .Dd December 25, 2008 .Dt GPROF 1 .Os .Sh NAME .Nm gprof .Nd display call graph profile data .Sh SYNOPSIS .Nm .Op Fl abKlLsuz .Op Fl C Ar count .Op Fl e Ar name .Op Fl E Ar name .Op Fl f Ar name .Op Fl F Ar name .Op Fl k Ar fromname toname .Op Ar a.out Op Ar a.out.gmon ... .Sh DESCRIPTION The .Nm utility produces an execution profile of C, Pascal, or Fortran77 programs. The effect of called routines is incorporated in the profile of each caller. The profile data is taken from the call graph profile file which is created by programs that are compiled with the .Fl pg option of .Xr cc 1 , .Xr pc 1 , and .Xr f77 1 . The .Fl pg option also links in versions of the library routines that are compiled for profiling. By convention these libraries have their name suffixed with .Pa _p , i.e., the profiled version of .Pa libc.a is .Pa libc_p.a and if you specify libraries directly to the compiler or linker you can use .Fl l Ns Ar c_p instead of .Fl l Ns Ar c . Read the given object file (the default is -.Pa a.out) +.Pa a.out ) and establishes the relation between its symbol table and the call graph profile. The default graph profile file name is the name of the executable with the suffix .Pa .gmon appended. If more than one profile file is specified, the .Nm output shows the sum of the profile information in the given profile files. .Pp The .Nm utility calculates the amount of time spent in each routine. Next, these times are propagated along the edges of the call graph. Cycles are discovered, and calls into a cycle are made to share the time of the cycle. The first listing shows the functions sorted according to the time they represent including the time of their call graph descendants. Below each function entry is shown its (direct) call graph children, and how their times are propagated to this function. A similar display above the function shows how this function's time and the time of its descendants is propagated to its (direct) call graph parents. .Pp Cycles are also shown, with an entry for the cycle as a whole and a listing of the members of the cycle and their contributions to the time and call counts of the cycle. .Pp Second, a flat profile is given, similar to that provided by .Xr prof 1 . This listing gives the total execution times, the call counts, the time that the call spent in the routine itself, and the time that the call spent in the routine itself including its descendants. The units for the per-call times are normally milliseconds, but they are nanoseconds if the profiling clock frequency is 10 million or larger, and if a function appears to be never called then its total self time is printed as a percentage in the self time per call column. The very high profiling clock frequencies needed to get sufficient accuracy in the per-call times for short-lived programs are only implemented for .Dq high resolution (non-statistical) kernel profiling. .Pp Finally, an index of the function names is provided. .Pp The following options are available: .Bl -tag -width indent .It Fl a Suppress the printing of statically declared functions. If this option is given, all relevant information about the static function (e.g., time samples, calls to other functions, calls from other functions) belongs to the function loaded just before the static function in the .Pa a.out file. .It Fl b Suppress the printing of a description of each field in the profile. .It Fl C Ar count Find a minimal set of arcs that can be broken to eliminate all cycles with .Ar count or more members. Caution: the algorithm used to break cycles is exponential, so using this option may cause .Nm to run for a very long time. .It Fl e Ar name Suppress the printing of the graph profile entry for routine .Ar name and all its descendants (unless they have other ancestors that are not suppressed). More than one .Fl e option may be given. Only one .Ar name may be given with each .Fl e option. .It Fl E Ar name Suppress the printing of the graph profile entry for routine .Ar name (and its descendants) as .Fl e , above, and also excludes the time spent in .Ar name (and its descendants) from the total and percentage time computations. (For example, .Fl E .Ar mcount .Fl E .Ar mcleanup is the default.) .It Fl f Ar name Print the graph profile entry of only the specified routine .Ar name and its descendants. More than one .Fl f option may be given. Only one .Ar name may be given with each .Fl f option. .It Fl F Ar name Print the graph profile entry of only the routine .Ar name and its descendants (as .Fl f , above) and also uses only the times of the printed routines in total time and percentage computations. More than one .Fl F option may be given. Only one .Ar name may be given with each .Fl F option. The .Fl F option overrides the .Fl E option. .It Fl k Ar fromname Ar toname Will delete any arcs from routine .Ar fromname to routine .Ar toname . This can be used to break undesired cycles. More than one .Fl k option may be given. Only one pair of routine names may be given with each .Fl k option. .It Fl K Gather information about symbols from the currently-running kernel using the .Xr sysctl 3 and .Xr kldsym 2 interfaces. This forces the .Pa a.out argument to be ignored, and allows for symbols in .Xr kld 4 modules to be used. .It Fl l Suppress the printing of the call-graph profile. .It Fl L Suppress the printing of the flat profile. .It Fl s A profile file .Pa gmon.sum is produced that represents the sum of the profile information in all the specified profile files. This summary profile file may be given to later executions of gprof (probably also with a .Fl s ) to accumulate profile data across several runs of an .Pa a.out file. .It Fl u Suppress the printing of functions whose names are not visible to C programs. For the ELF object format, this means names that contain the .Ql .\& character. For the a.out object format, it means names that do not begin with a .Ql _ character. All relevant information about such functions belongs to the (non-suppressed) function with the next lowest address. This is useful for eliminating "functions" that are just labels inside other functions. .It Fl z Display routines that have zero usage (as shown by call counts and accumulated time). .El .Sh FILES .Bl -tag -width a.out.gmon -compact .It Pa a.out The namelist and text space. .It Pa a.out.gmon Dynamic call graph and profile. .It Pa gmon.sum Summarized dynamic call graph and profile. .El .Sh SEE ALSO .Xr cc 1 , .Xr profil 2 , .Xr clocks 7 .\" .Xr monitor 3 , .\" .Xr prof 1 .Rs .%T "An Execution Profiler for Modular Programs" .%A S. Graham .%A P. Kessler .%A M. McKusick .%J "Software - Practice and Experience" .%V 13 .%P pp. 671-685 .%D 1983 .Re .Rs .%T "gprof: A Call Graph Execution Profiler" .%A S. Graham .%A P. Kessler .%A M. McKusick .%J "Proceedings of the SIGPLAN '82 Symposium on Compiler Construction, SIGPLAN Notices" .%V 17 .%N 6 .%P pp. 120-126 .%D June 1982 .Re .Sh HISTORY The .Nm profiler appeared in .Bx 4.2 . .Sh BUGS The granularity of the sampling is shown, but remains statistical at best. We assume that the time for each execution of a function can be expressed by the total time for the function divided by the number of times the function is called. Thus the time propagated along the call graph arcs to the function's parents is directly proportional to the number of times that arc is traversed. .Pp Parents that are not themselves profiled will have the time of their profiled children propagated to them, but they will appear to be spontaneously invoked in the call graph listing, and will not have their time propagated further. Similarly, signal catchers, even though profiled, will appear to be spontaneous (although for more obscure reasons). Any profiled children of signal catchers should have their times propagated properly, unless the signal catcher was invoked during the execution of the profiling routine, in which case all is lost. .Pp The profiled program must call .Xr exit 3 or return normally for the profiling information to be saved in the graph profile file. Index: projects/nand/usr.bin/ipcrm/ipcrm.1 =================================================================== --- projects/nand/usr.bin/ipcrm/ipcrm.1 (revision 235262) +++ projects/nand/usr.bin/ipcrm/ipcrm.1 (revision 235263) @@ -1,119 +1,119 @@ .\" Copyright (c) 1994 Adam Glass .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. The name of the Author may not be used to endorse or promote products .\" derived from this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY Adam Glass ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL Adam Glass BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\"" .Dd December 12, 2007 .Dt IPCRM 1 .Os .Sh NAME .Nm ipcrm .Nd "remove the specified message queues, semaphore sets, and shared segments" .Sh SYNOPSIS .Nm .Op Fl W .Op Fl v .Op Fl q Ar msqid .Op Fl m Ar shmid .Op Fl s Ar semid .Op Fl Q Ar msgkey .Op Fl M Ar shmkey .Op Fl S Ar semkey .Ar ... .Sh DESCRIPTION The .Nm utility removes the specified message queues, semaphores and shared memory segments. These System V IPC objects can be specified by their creation ID or any associated key. .Pp The following options are generic: .Bl -tag -width indent .It Fl v If specified once with -W or with -1 for an object, it will show all removed objects. If specified twice with -W or with -1 for an objects, it will show all removed objects and all failed removals. .It Fl W Try to wipe all specified message queues, semaphores and shared memory segments. .It Fl y Use the .Xr kvm 3 interface instead of the .Xr sysctl 3 interface to extract the required information. If .Nm is to operate on the running system, using .Xr kvm 3 will require read privileges to .Pa /dev/kmem . .El .Pp The following options are used to specify which IPC objects will be removed. Any number and combination of these options can be used: .Bl -tag -width indent .It Fl q Ar msqid Remove the message queue associated with the ID .Ar msqid from the system. .It Fl m Ar shmid Mark the shared memory segment associated with ID .Ar shmid for removal. This marked segment will be destroyed after the last detach. .It Fl s Ar semid Remove the semaphore set associated with ID .Ar semid from the system. .It Fl Q Ar msgkey Remove the message queue associated with key .Ar msgkey from the system. .It Fl M Ar shmkey Mark the shared memory segment associated with key .Ar shmkey for removal. This marked segment will be destroyed after the last detach. .It Fl S Ar semkey Remove the semaphore set associated with key .Ar semkey from the system. .El .Pp The identifiers and keys associated with these System V IPC objects can be determined by using .Xr ipcs 1 . If the identifier or the key is -1, it will remove all these objects. .Sh SEE ALSO .Xr ipcs 1 .Sh HISTORY The wiping of all System V IPC objects was first implemented in -.Fx 6.4 and 7.1. +.Fx 6.4 No and 7.1. .Sh AUTHORS The original author was Adam Glass. The wiping of all System V IPC objects was thought up by Callum Gibson and extended and implemented by Edwin Groothuis. Index: projects/nand/usr.bin/join/join.1 =================================================================== --- projects/nand/usr.bin/join/join.1 (revision 235262) +++ projects/nand/usr.bin/join/join.1 (revision 235263) @@ -1,223 +1,223 @@ .\" Copyright (c) 1990, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" This code is derived from software contributed to Berkeley by .\" the Institute of Electrical and Electronics Engineers, Inc. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 4. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" @(#)join.1 8.3 (Berkeley) 4/28/95 .\" $FreeBSD$ .\" .Dd July 5, 2004 .Dt JOIN 1 .Os .Sh NAME .Nm join .Nd relational database operator .Sh SYNOPSIS .Nm .Oo .Fl a Ar file_number | Fl v Ar file_number .Oc .Op Fl e Ar string .Op Fl o Ar list .Op Fl t Ar char .Op Fl 1 Ar field .Op Fl 2 Ar field .Ar file1 .Ar file2 .Sh DESCRIPTION The .Nm utility performs an .Dq equality join on the specified files and writes the result to the standard output. The .Dq join field is the field in each file by which the files are compared. The first field in each line is used by default. There is one line in the output for each pair of lines in .Ar file1 and .Ar file2 which have identical join fields. Each output line consists of the join field, the remaining fields from .Ar file1 and then the remaining fields from .Ar file2 . .Pp The default field separators are tab and space characters. In this case, multiple tabs and spaces count as a single field separator, and leading tabs and spaces are ignored. The default output field separator is a single space character. .Pp Many of the options use file and field numbers. Both file numbers and field numbers are 1 based, i.e., the first file on the command line is file number 1 and the first field is field number 1. The following options are available: .Bl -tag -width indent .It Fl a Ar file_number In addition to the default output, produce a line for each unpairable line in file .Ar file_number . .It Fl e Ar string Replace empty output fields with .Ar string . .It Fl o Ar list The .Fl o option specifies the fields that will be output from each file for each line with matching join fields. Each element of .Ar list has either the form .Ar file_number . Ns Ar field , where .Ar file_number is a file number and .Ar field is a field number, or the form .Ql 0 .Pq zero , representing the join field. The elements of list must be either comma .Pq Ql \&, or whitespace separated. -(The latter requires quoting to protect it from the shell, or, a simpler +(The letter requires quoting to protect it from the shell, or, a simpler approach is to use multiple .Fl o options.) .It Fl t Ar char Use character .Ar char as a field delimiter for both input and output. Every occurrence of .Ar char in a line is significant. .It Fl v Ar file_number Do not display the default output, but display a line for each unpairable line in file .Ar file_number . The options .Fl v Cm 1 and .Fl v Cm 2 may be specified at the same time. .It Fl 1 Ar field Join on the .Ar field Ns 'th field of .Ar file1 . .It Fl 2 Ar field Join on the .Ar field Ns 'th field of .Ar file2 . .El .Pp When the default field delimiter characters are used, the files to be joined should be ordered in the collating sequence of .Xr sort 1 , using the .Fl b option, on the fields on which they are to be joined, otherwise .Nm may not report all field matches. When the field delimiter characters are specified by the .Fl t option, the collating sequence should be the same as .Xr sort 1 without the .Fl b option. .Pp If one of the arguments .Ar file1 or .Ar file2 is .Sq Fl , the standard input is used. .Sh EXIT STATUS .Ex -std .Sh COMPATIBILITY For compatibility with historic versions of .Nm , the following options are available: .Bl -tag -width indent .It Fl a In addition to the default output, produce a line for each unpairable line in both .Ar file1 and .Ar file2 . .It Fl j1 Ar field Join on the .Ar field Ns 'th field of .Ar file1 . .It Fl j2 Ar field Join on the .Ar field Ns 'th field of .Ar file2 . .It Fl j Ar field Join on the .Ar field Ns 'th field of both .Ar file1 and .Ar file2 . .It Fl o Ar list ... Historical implementations of .Nm permitted multiple arguments to the .Fl o option. These arguments were of the form .Ar file_number . Ns Ar field_number as described for the current .Fl o option. This has obvious difficulties in the presence of files named .Pa 1.2 . .El .Pp These options are available only so historic shell scripts do not require modification and should not be used. .Sh SEE ALSO .Xr awk 1 , .Xr comm 1 , .Xr paste 1 , .Xr sort 1 , .Xr uniq 1 .Sh STANDARDS The .Nm command conforms to .St -p1003.1-2001 . Index: projects/nand/usr.bin/limits/limits.1 =================================================================== --- projects/nand/usr.bin/limits/limits.1 (revision 235262) +++ projects/nand/usr.bin/limits/limits.1 (revision 235263) @@ -1,412 +1,412 @@ .\" Copyright (c) 1996 David Nugent .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, is permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice immediately at the beginning of the file, without modification, .\" this list of conditions, and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. This work was done expressly for inclusion into FreeBSD. Other use .\" is permitted provided this notation is included. .\" 4. Absolutely no warranty of function or purpose is made by the author .\" David Nugent. .\" 5. Modifications may be freely made to this file providing the above .\" conditions are met. .\" .\" $FreeBSD$ .\" -.Dd January 23, 2011 +.Dd January 23, 2012 .Dt LIMITS 1 .Os .Sh NAME .Nm limits .Nd set or display process resource limits .Sh SYNOPSIS .Nm .Op Fl C Ar class | Fl P Ar pid | Fl U Ar user .Op Fl SHB .Op Fl ea .Op Fl bcdflmnstuvpw Op Ar val .Nm .Op Fl C Ar class | Fl U Ar user .Op Fl SHB .Op Fl bcdflmnstuvpw Op Ar val .Op Fl E .Oo .Op Ar name Ns = Ns Ar value ... .Ar command .Oc .Sh DESCRIPTION The .Nm utility either prints or sets kernel resource limits, and may optionally set environment variables like .Xr env 1 and run a program with the selected resources. Three uses of the .Nm utility are possible: .Bl -tag -width indent .It Xo .Nm .Op Ar limitflags .Op Ar name Ns = Ns Ar value ... .Ar command .Xc This usage sets limits according to .Ar limitflags , optionally sets environment variables given as .Ar name Ns = Ns Ar value pairs, and then runs the specified .Ar command . .It Nm Op Ar limitflags This usage determines values of resource settings according to .Ar limitflags , does not attempt to set them and outputs these values to standard output. By default, this will output the current kernel resource settings active for the calling process. Using the .Fl C Ar class or .Fl U Ar user options, you may also display the current resource settings modified by the appropriate login class resource limit entries from the .Xr login.conf 5 login capabilities database. .It Nm Fl e Op Ar limitflags This usage determines values of resource settings according to .Ar limitflags , but does not set them itself. Like the previous usage, it outputs these values to standard output, except that it will emit them in .Ic eval format, suitable for the calling shell. The calling shell is determined by examining the entries in the .Pa /proc file system for the parent process. If the shell is known (i.e., it is one of .Nm sh , csh , bash , tcsh , ksh , pdksh or .Nm rc ) , .Nm emits .Ic limit or .Ic ulimit commands in the format understood by that shell. If the name of the shell cannot be determined, then the .Ic ulimit format used by .Xr sh 1 is used. .Pp This is very useful for setting limits used by scripts, or prior launching of daemons and other background tasks with specific resource limit settings, and provides the benefit of allowing global configuration of maximum resource usage by maintaining a central database of settings in the login class database. .Pp Within a shell script, .Nm will normally be used with eval within backticks as follows: .Pp .Dl "eval `limits -e -C daemon`" .Pp which causes the output of .Nm to be evaluated and set by the current shell. .El .Pp The value of .Ar limitflags specified in the above contains one or more of the following options: .Bl -tag -width ".Fl C Ar class" .It Fl C Ar class Use current resource values, modified by the resource entries applicable for the login class .Ar class . .It Fl U Ar user Use current resource values, modified by the resource entries applicable to the login class the .Ar user belongs to. If user does not belong to any class, then the resource capabilities for the .Dq Li default class are used, if it exists, or the .Dq Li root class if the user is a superuser account. .It Fl P Ar pid Select or set limits for the process identified by the .Ar pid . .It Fl S Select display or setting of .Dq soft (or current) resource limits. If specific limits settings follow this switch, only soft limits are affected unless overridden later with either the .Fl H or .Fl B options. .It Fl H Select display or setting of .Dq hard (or maximum) resource limits. If specific limits settings follow this switch, only hard limits are affected until overridden later with either the .Fl S or .Fl B options. .It Fl B Select display or setting of both .Dq soft (current) or .Dq hard (maximum) resource limits. If specific limits settings follow this switch, both soft and hard limits are affected until overridden later with either the .Fl S or .Fl H options. -.Fl e +.It Fl e Select .Dq "eval mode" formatting for output. This is valid only on display mode and cannot be used when running a command. The exact syntax used for output depends upon the type of shell from which .Nm is invoked. .It Fl b Op Ar val Select or set the .Va sbsize resource limit. .It Fl c Op Ar val Select or set (if .Ar val is specified) the .Va coredumpsize resource limit. A value of 0 disables core dumps. .It Fl d Op Ar val Select or set (if .Ar val is specified) the .Va datasize resource limit. .It Fl f Op Ar val Select or set the .Va filesize resource limit. .It Fl l Op Ar val Select or set the .Va memorylocked resource limit. .It Fl m Op Ar val Select or set the .Va memoryuse size limit. .It Fl n Op Ar val Select or set the .Va openfiles resource limit. The system-wide limit on the maximum number of open files per process can be viewed by examining the .Va kern.maxfilesperproc .Xr sysctl 8 variable. The total number of simultaneously open files in the entire system is limited to the value displayed by the .Va kern.maxfiles .Xr sysctl 8 variable. .It Fl s Op Ar val Select or set the .Va stacksize resource limit. .It Fl t Op Ar val Select or set the .Va cputime resource limit. .It Fl u Op Ar val Select or set the .Va maxproc resource limit. The system-wide limit on the maximum number of processes allowed per UID can be viewed by examining the .Va kern.maxprocperuid .Xr sysctl 8 variable. The maximum number of processes that can be running simultaneously in the entire system is limited to the value of the .Va kern.maxproc .Xr sysctl 8 variable. .It Fl v Op Ar val Select or set the .Va virtualmem resource limit. This limit encompasses the entire VM space for the user process and is inclusive of text, data, bss, stack, .Xr brk 2 , .Xr sbrk 2 and .Xr mmap 2 Ns 'd space. .It Fl p Op Ar val Select or set the .Va pseudoterminals resource limit. .It Fl w Op Ar val Select or set the .Va swapuse resource limit. .El .Pp Valid values for .Ar val in the above set of options consist of either the string .Dq Li infinity , .Dq Li inf , .Dq Li unlimited or .Dq Li unlimit for an infinite (or kernel-defined maximum) limit, or a numeric value optionally followed by a suffix. Values which relate to size default to a value in bytes, or one of the following suffixes may be used as a multiplier: .Pp .Bl -tag -offset indent -width 4n -compact .It Li b 512 byte blocks. .It Li k kilobytes (1024 bytes). .It Li m megabytes (1024*1024 bytes). .It Li g gigabytes. .It Li t terabytes. .El .Pp The .Va cputime resource defaults to a number of seconds, but a multiplier may be used, and as with size values, multiple values separated by a valid suffix are added together: .Pp .Bl -tag -offset indent -width 4n -compact .It Li s seconds. .It Li m minutes. .It Li h hours. .It Li d days. .It Li w weeks. .It Li y 365 day years. .El .Bl -tag -width ".Fl C Ar class" .It Fl E Cause .Nm to completely ignore the environment it inherits. .It Fl a Force all resource settings to be displayed even if other specific resource settings have been specified. For example, if you wish to disable core dumps when starting up the Usenet News system, but wish to set all other resource settings as well that apply to the .Dq Li news account, you might use: .Pp .Dl "eval `limits -U news -aBec 0`" .Pp As with the .Xr setrlimit 2 call, only the superuser may raise process .Dq hard resource limits. Non-root users may, however, lower them or change .Dq soft resource limits within to any value below the hard limit. When invoked to execute a program, the failure of .Nm to raise a hard limit is considered a fatal error. .El .Sh EXIT STATUS The .Nm utility exits with .Dv EXIT_FAILURE if usage is incorrect in any way; i.e., an invalid option, or set/display options are selected in the same invocation, .Fl e is used when running a program, etc. When run in display or eval mode, .Nm exits with a status of .Dv EXIT_SUCCESS . When run in command mode and execution of the command succeeds, the exit status will be whatever the executed program returns. .Sh SEE ALSO .Xr csh 1 , .Xr env 1 , .Xr limit 1 , .Xr sh 1 , .Xr getrlimit 2 , .Xr setrlimit 2 , .Xr login_cap 3 , .Xr login.conf 5 , .Xr rctl 8 , .Xr sysctl 8 .Sh BUGS The .Nm utility does not handle commands with equal .Pq Ql = signs in their names, for obvious reasons. .Pp When eval output is selected, the .Pa /proc file system must be installed and mounted for the shell to be correctly determined, and therefore output syntax correct for the running shell. The default output is valid for .Xr sh 1 , so this means that any usage of .Nm in eval mode prior mounting .Pa /proc may only occur in standard bourne shell scripts. .Pp The .Nm utility makes no effort to ensure that resource settings emitted or displayed are valid and settable by the current user. Only a superuser account may raise hard limits, and when doing so the .Fx kernel will silently lower limits to values less than specified if the values given are too high. Index: projects/nand/usr.bin/ncplogin/ncplogout.1 =================================================================== --- projects/nand/usr.bin/ncplogin/ncplogout.1 (revision 235262) +++ projects/nand/usr.bin/ncplogin/ncplogout.1 (revision 235263) @@ -1,56 +1,56 @@ .\" $FreeBSD$ .Dd September 15, 1999 .Dt NCPLOGOUT 1 .Os .Sh NAME .Nm ncplogout .Nd schedule permanent connection to close .Sh SYNOPSIS .Nm .Op Fl S Ar server .Op Fl U Ar user .Op Fl c Ar handle .Nm .Op Fl c Ar handle .No / Ns Ar server Ns : Ns Ar user .Sh DESCRIPTION The .Nm utility will schedule a connection created by .Xr ncplogin 1 command to be closed. If the connection is busy (i.e., used by other processes) it will be closed when the last process using it is terminated. This command is similar to the .Tn DOS .Pa logout.exe command. .Pp The options are: .Bl -tag -width indent .It Fl S Ar server Specify the name of the -.Tn Netware +.Tn NetWare server to which the connection should be terminated. Can be omitted if there is only one connection active. .It Fl U Ar user Specify the name of the user to use when identifying the connection. Can be omitted if there is only one connection active. .It Fl c Ar handle Close a connection by handle. A list of available handles can be obtained with the following command: .Pp .Dl "ncplist c" .El .Sh HISTORY The .Nm utility first appeared in .Fx 4.0 . .Sh AUTHORS .An Boris Popov Aq bp@butya.kz , .Aq bp@FreeBSD.org .Sh BUGS Please report any bugs to the author. Index: projects/nand/usr.bin/procstat =================================================================== --- projects/nand/usr.bin/procstat (revision 235262) +++ projects/nand/usr.bin/procstat (revision 235263) Property changes on: projects/nand/usr.bin/procstat ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/usr.bin/procstat:r235157-235262 Index: projects/nand/usr.bin/tftp/tftp.1 =================================================================== --- projects/nand/usr.bin/tftp/tftp.1 (revision 235262) +++ projects/nand/usr.bin/tftp/tftp.1 (revision 235263) @@ -1,272 +1,272 @@ .\" Copyright (c) 1990, 1993, 1994 .\" The Regents of the University of California. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 4. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" @(#)tftp.1 8.2 (Berkeley) 4/18/94 .\" $FreeBSD$ .\" .Dd June 22, 2011 .Dt TFTP 1 .Os .Sh NAME .Nm tftp .Nd trivial file transfer program .Sh SYNOPSIS .Nm .Op Ar host Op Ar port .Sh DESCRIPTION The .Nm utility is the user interface to the Internet .Tn TFTP (Trivial File Transfer Protocol), which allows users to transfer files to and from a remote machine. The remote .Ar host may be specified on the command line, in which case .Nm uses .Ar host as the default host for future transfers (see the .Cm connect command below). .Sh COMMANDS Once .Nm is running, it issues the prompt .Dq Li tftp> and recognizes the following commands: .Pp .Bl -tag -width verbose -compact .It Cm \&? Ar command-name ... Print help information. .Pp .It Cm ascii Shorthand for "mode ascii" .Pp .It Cm binary Shorthand for "mode binary" .Pp .It Cm blocksize Ar [size] Sets the TFTP blksize option in TFTP Read Request or Write Request packets to .Ar [size] as specified in RFC 2348. Valid values are between 8 and 65464. If no blocksize is specified, then by default a blocksize of 512 bytes will be used. .Pp .It Cm blocksize2 Ar [size] Sets the TFTP blksize2 option in TFTP Read Request or Write Request packets to .Ar [size] . Values are restricted to powers of 2 between 8 and 32768. This is a non-standard TFTP option. .Pp .It Cm connect Ar host Op Ar port Set the .Ar host (and optionally .Ar port ) for transfers. Note that the .Tn TFTP protocol, unlike the .Tn FTP protocol, does not maintain connections between transfers; thus, the .Cm connect command does not actually create a connection, but merely remembers what host is to be used for transfers. You do not have to use the .Cm connect command; the remote host can be specified as part of the .Cm get or .Cm put commands. .Pp .It Cm debug Ar level Enable or disable debugging levels during verbose output. The value of .Ar level can be one of -.Cm packet, simple, options, +.Cm packet , simple , options , or -.Cm access. +.Cm access . .Pp .It Cm get Oo Ar host : Oc Ns Ar file Op Ar localname .It Cm get Xo .Oo Ar host1 : Oc Ns Ar file1 .Oo Ar host2 : Oc Ns Ar file2 ... .Oo Ar hostN : Oc Ns Ar fileN .Xc Get one or more files from the remote host. When using the .Ar host argument, the .Ar host will be used as default host for future transfers. If .Ar localname is specified, the file is stored locally as .Ar localname , otherwise the original filename is used. Note that it is not possible to download two files at a time, only one, three, or more than three files, at a time. .Pp To specify an IPv6 numeric address for a host, wrap it using square brackets like .Dq Li [3ffe:2900:e00c:ffee::1234] : Ns Ar file to disambiguate the colons used in the IPv6 address from the colon separating the host and the filename. .Pp .It Cm mode Ar transfer-mode Set the mode for transfers; .Ar transfer-mode may be one of .Em ascii or .Em binary . The default is .Em ascii . .Pp .It Cm packetdrop [arg] Randomly drop .Ar arg out of 100 packets during a transfer. This is a debugging feature. .Pp .It Cm put Ar file Op Oo Ar host : Oc Ns Ar remotename .It Cm put Ar file1 file2 ... fileN Op Oo Ar host : Oc Ns Ar remote-directory Put a file or set of files to the remote host. When .Ar remotename is specified, the file is stored remotely as .Ar remotename , otherwise the original filename is used. If the .Ar remote-directory argument is used, the remote host is assumed to be a .Ux machine. To specify an IPv6 numeric address for a .Ar host , see the example under the .Cm get command. .Pp .It Cm options Ar [arg] Enable or disable support for TFTP options. The valid values of .Ar arg are .Cm on (enable RFC 2347 options), .Cm off (disable RFC 2347 options), and .Cm extra (toggle support for non-RFC defined options). .Pp .It Cm quit Exit .Nm . An end of file also exits. .Pp .It Cm rexmt Ar retransmission-timeout Set the per-packet retransmission timeout, in seconds. .Pp .It Cm rollover [arg] Specify the rollover option in TFTP Read Request or Write Request packets. After 65535 packets have been transmitted, set the block counter to .Ar arg . Valid values of .Ar arg are 0 and 1. This is a non-standard TFTP option. .Pp .It Cm status Show current status. .Pp .It Cm timeout Ar total-transmission-timeout Set the total transmission timeout, in seconds. .Pp .It Cm trace Toggle packet tracing. .Pp .It Cm verbose Toggle verbose mode. .El .Sh SEE ALSO .Xr tftpd 8 .Pp The following RFC's are supported: .Rs .%T RFC 1350: The TFTP Protocol (Revision 2) .Re .Rs .%T RFC 2347: TFTP Option Extension .Re .Rs .%T RFC 2348: TFTP Blocksize Option .Re .Rs .%T RFC 2349: TFTP Timeout Interval and Transfer Size Options .Re .Rs .%T RFC 3617: Uniform Resource Identifier (URI) Scheme and Applicability Statement for the Trivial File Transfer Protocol (TFTP) .Re .Pp The non-standard .Cm rollover and .Cm blksize2 TFTP options are mentioned here: .Rs .%T Extending TFTP .%U http://www.compuphase.com/tftp.htm .Re .Sh HISTORY The .Nm command appeared in .Bx 4.3 . .Pp Edwin Groothuis performed a major rewrite of the .Xr tftpd 8 and .Nm code to support RFC2348. .Sh NOTES Because there is no user-login or validation within the .Tn TFTP protocol, the remote site will probably have some sort of file-access restrictions in place. The exact methods are specific to each site and therefore difficult to document here. .Pp Files larger than 33488896 octets (65535 blocks) cannot be transferred without client and server supporting the TFTP blocksize option (RFC2348), or the non-standard TFTP rollover option. Index: projects/nand/usr.bin/unzip/unzip.1 =================================================================== --- projects/nand/usr.bin/unzip/unzip.1 (revision 235262) +++ projects/nand/usr.bin/unzip/unzip.1 (revision 235263) @@ -1,179 +1,189 @@ .\"- .\" Copyright (c) 2007-2008 Dag-Erling Smørgrav .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd October 21, 2010 +.Dd May 10, 2012 .Dt UNZIP 1 .Os .Sh NAME .Nm unzip .Nd extract files from a ZIP archive .Sh SYNOPSIS .Nm .Op Fl aCcfjLlnopqtuv .Op Fl d Ar dir .Ar zipfile .Sh DESCRIPTION .\" ... The following options are available: .Bl -tag -width Fl .It Fl a When extracting a text file, convert DOS-style line endings to Unix-style line endings. .It Fl C Match file names case-insensitively. .It Fl c Extract to stdout/screen. When extracting files from the zipfile, they are written to stdout. This is similar to .Fl p , but doesn't suppress normal output. .It Fl d Ar dir Extract files into the specified directory rather than the current directory. .It Fl f Update existing. Extract only files from the zipfile if a file with the same name already exists on disk and is older than the former. Otherwise, the file is silently skipped. .It Fl j Ignore directories stored in the zipfile; instead, extract all files directly into the extraction directory. .It Fl L Convert the names of the extracted files and directories to lowercase. .It Fl l List, rather than extract, the contents of the zipfile. .It Fl n No overwrite. When extracting a file from the zipfile, if a file with the same name already exists on disk, the file is silently skipped. .It Fl o Overwrite. When extracting a file from the zipfile, if a file with the same name already exists on disk, the existing file is replaced with the file from the zipfile. .It Fl p Extract to stdout. When extracting files from the zipfile, they are written to stdout. The normal output is suppressed as if .Fl q was specified. .It Fl q Quiet: print less information while extracting. .It Fl t Test: do not extract anything, but verify the checksum of every file in the archive. .It Fl u Update. When extracting a file from the zipfile, if a file with the same name already exists on disk, the existing file is replaced with the file from the zipfile if and only if the latter is newer than the former. Otherwise, the file is silently skipped. .It Fl v List verbosely, rather than extract, the contents of the zipfile. This differs from .Fl l by using the long listing. Note that most of the data is currently fake and does not reflect the content of the archive. .It Fl x Ar pattern Exclude files matching the pattern .Ar pattern . +.It Fl Z Ar mode +Emulate +.Xr zipinfo 1L +mode. +Enabling +.Xr zipinfo 1L +mode changes the way in which additional arguments are parsed. +Currently only +.Xr zipinfo 1L +mode 1 is supported, which lists the file names one per line. .El .Pp Note that only one of .Fl n , .Fl o , and .Fl u may be specified. If specified filename is .Qq - , then data is read from .Va stdin . .Sh ENVIRONMENT If the .Ev UNZIP_DEBUG environment variable is defined, the .Fl q command-line option has no effect, and additional debugging information will be printed to .Va stderr . .Sh COMPATIBILITY The .Nm utility aims to be sufficiently compatible with other implementations to serve as a drop-in replacement in the context of the .Xr ports 7 system. No attempt has been made to replicate functionality which is not required for that purpose. .Pp For compatibility reasons, command-line options will be recognized if they are listed not only before but also after the name of the zipfile. .Pp Normally, the .Fl a option should only affect files which are marked as text files in the zipfile's central directory. Since the .Xr archive 3 library reads zipfiles sequentially, and does not use the central directory, that information is not available to the .Nm utility. Instead, the .Nm utility will assume that a file is a text file if no non-ASCII characters are present within the first block of data decompressed for that file. If non-ASCII characters appear in subsequent blocks of data, a warning will be issued. .Pp The .Nm utility is only able to process ZIP archives handled by .Xr libarchive 3 . Depending on the installed version of .Xr libarchive , this may or may not include self-extracting archives. .Sh SEE ALSO .Xr libarchive 3 .Sh HISTORY The .Nm utility appeared in .Fx 8.0 . .Sh AUTHORS The .Nm utility and this manual page were written by .An Dag-Erling Sm\(/orgrav Aq des@FreeBSD.org . It uses the .Xr archive 3 library developed by .An Tim Kientzle Aq kientzle@FreeBSD.org . Index: projects/nand/usr.bin/who/who.1 =================================================================== --- projects/nand/usr.bin/who/who.1 (revision 235262) +++ projects/nand/usr.bin/who/who.1 (revision 235263) @@ -1,157 +1,157 @@ .\" Copyright (c) 1986, 1991, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 4. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" @(#)who.1 8.2 (Berkeley) 12/30/93 .\" $FreeBSD$ .\" .Dd February 11, 2012 .Dt WHO 1 .Os .Sh NAME .Nm who .Nd display who is on the system .Sh SYNOPSIS .Nm .Op Fl abHmqsTu .Op Cm am I .Op Ar file .Sh DESCRIPTION The .Nm utility displays information about currently logged in users. By default, this includes the login name, tty name, date and time of login and remote hostname if not local. .Pp The options are as follows: .Bl -tag -width indent .It Fl a Equivalent to .Fl bTu , with the exception that output isn't restricted to the time and date of the last system reboot. .It Fl b Write the time and date of the last system reboot. .It Fl H Write column headings above the output. .It Fl m Show information about the terminal attached to standard input only. .It Fl q .Dq "Quick mode" : List the names and number of logged in users in columns. All other command line options are ignored. .It Fl s Show the name, line and time fields only. This is the default. .It Fl T Indicate whether each user is accepting messages. One of the following characters is written: .Pp .Bl -tag -width 1n -compact .It Li + User is accepting messages. .It Li \&- User is not accepting messages. .It Li \&? An error occurred. .El .It Fl u Show idle time for each user in hours and minutes as .Ar hh Ns : Ns Ar mm , .Ql \&. -if the user has been idle less that a minute, and +if the user has been idle less than a minute, and .Dq Li old if the user has been idle more than 24 hours. .It Cm am I Equivalent to .Fl m . .El .Pp By default, .Nm gathers information from the file .Pa /var/run/utx.active . An alternate .Ar file may be specified which is usually .Pa /var/log/utx.log (or .Pa /var/log/utx.log.[0-6] depending on site policy as .Pa utx.log can grow quite large and daily versions may or may not be kept around after compression by .Xr ac 8 ) . The .Pa utx.log file contains a record of every login, logout, crash, shutdown and date change since .Pa utx.log was last truncated or created. .Pp If .Pa /var/log/utx.log is being used as the file, the user name may be empty or one of the special characters '|', '}' and '~'. Logouts produce an output line without any user name. For more information on the special characters, see .Xr getutxent 3 . .Sh ENVIRONMENT The .Ev COLUMNS , LANG , LC_ALL and .Ev LC_TIME environment variables affect the execution of .Nm as described in .Xr environ 7 . .Sh FILES .Bl -tag -width /var/log/utx.log.[0-6] -compact .It Pa /var/run/utx.active .It Pa /var/log/utx.log .It Pa /var/log/utx.log.[0-6] .El .Sh EXIT STATUS .Ex -std .Sh SEE ALSO .Xr last 1 , .Xr users 1 , .Xr w 1 , .Xr getutxent 3 .Sh STANDARDS The .Nm utility conforms to .St -p1003.1-2001 . .Sh HISTORY A .Nm command appeared in .At v1 . Index: projects/nand/usr.bin/whois/whois.1 =================================================================== --- projects/nand/usr.bin/whois/whois.1 (revision 235262) +++ projects/nand/usr.bin/whois/whois.1 (revision 235263) @@ -1,268 +1,268 @@ .\" Copyright (c) 1985, 1990, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 4. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" From: @(#)whois.1 8.1 (Berkeley) 6/6/93 .\" $FreeBSD$ .\" .Dd October 2, 2009 .Dt WHOIS 1 .Os .Sh NAME .Nm whois .Nd "Internet domain name and network number directory service" .Sh SYNOPSIS .Nm .Op Fl aAbfgiIklmQrR .Op Fl c Ar country-code | Fl h Ar host .Op Fl p Ar port .Ar name ... .Sh DESCRIPTION The .Nm utility looks up records in the databases maintained by several Network Information Centers .Pq Tn NICs . .Pp The options are as follows: .Bl -tag -width indent .It Fl a Use the American Registry for Internet Numbers .Pq Tn ARIN database. It contains network numbers used in those parts of the world covered neither by .Tn APNIC , AfriNIC , LACNIC , nor by .Tn RIPE . .Pp (Hint: All point of contact handles in the .Tn ARIN whois database end with .Qq Li -ARIN . ) .Pp .It Fl A Use the Asia/Pacific Network Information Center .Pq Tn APNIC database. It contains network numbers used in East Asia, Australia, New Zealand, and the Pacific islands. .It Fl b Use the Network Abuse Clearinghouse database. It contains addresses to which network abuse should be reported, indexed by domain name. .It Fl c Ar country-code This is the equivalent of using the .Fl h option with an argument of .Qq Ar country-code Ns Li .whois-servers.net . .It Fl f Use the African Network Information Centre .Pq Tn AfriNIC database. It contains network numbers used in Africa and the islands of the western Indian Ocean. .It Fl g Use the US non-military federal government database, which contains points of contact for subdomains of .Pa .GOV . .It Fl h Ar host Use the specified host instead of the default variant. Either a host name or an IP address may be specified. .Pp By default .Nm constructs the name of a whois server to use from the top-level domain .Pq Tn TLD of the supplied (single) argument, and appending .Qq Li .whois-servers.net . This effectively allows a suitable whois server to be selected automatically for a large number of .Tn TLDs . .Pp In the event that an IP address is specified, the whois server will default to the American Registry for Internet Numbers .Pq Tn ARIN . If a query to .Tn ARIN references .Tn APNIC , AfriNIC , LACNIC , or .Tn RIPE , that server will be queried also, provided that the .Fl Q option is not specified. .Pp If the query is not a domain name or IP address, .Nm will fall back to .Pa whois.crsnic.net . .It Fl i Use the Network Solutions Registry for Internet Numbers .Pq Pa whois.networksolutions.com database. It contains network numbers and domain contact information for most of .Pa .COM , .NET , .ORG and .Pa .EDU domains. .Pp .Sy NOTE ! The registration of these domains is now done by a number of independent and competing registrars and this database holds no information on the domains registered by organizations other than Network Solutions, Inc. Also, note that the .Tn InterNIC database .Pq Pa whois.internic.net is no longer handled by Network Solutions, Inc. For details, see .Pa http://www.internic.net/ . .Pp (Hint: Contact information, identified by the term .Em handle , can be looked up by prefixing .Qq Li "handle " to the .Tn NIC handle in the query.) .It Fl I Use the Internet Assigned Numbers Authority .Pq Tn IANA database. It contains network information for top-level domains. .It Fl k Use the National Internet Development Agency of Korea's .Pq Tn KRNIC database. It contains network numbers and domain contact information for Korea. .It Fl l Use the Latin American and Caribbean IP address Regional Registry .Pq Tn LACNIC database. It contains network numbers used in much of Latin America and the Caribbean. .It Fl m Use the Route Arbiter Database .Pq Tn RADB database. It contains route policy specifications for a large number of operators' networks. .It Fl p Ar port Connect to the whois server on .Ar port . If this option is not specified, .Nm defaults to port 43. .It Fl Q Do a quick lookup. This means that .Nm will not attempt to lookup the name in the authoritative whois server (if one is listed). This option has no effect when combined with any other options. .It Fl r Use the R\(aaeseaux IP Europ\(aaeens .Pq Tn RIPE database. It contains network numbers and domain contact information for Europe. .It Fl R Use the Russia Network Information Center .Pq Tn RIPN database. It contains network numbers and domain contact information for subdomains of .Pa .RU . This option is deprecated; use the .Fl c option with an argument of .Qq Li RU instead. +.El .Pp The operands specified to .Nm are treated independently and may be used as queries on different whois servers. -.El .Sh EXIT STATUS .Ex -std .Sh EXAMPLES Most types of data, such as domain names and .Tn IP addresses, can be used as arguments to .Nm without any options, and .Nm will choose the correct whois server to query. Some exceptions, where .Nm will not be able to handle data correctly, are detailed below. .Pp To obtain contact information about an administrator located in the Russian .Tn TLD domain .Qq Li RU , use the .Fl c option as shown in the following example, where .Ar CONTACT-ID is substituted with the actual contact identifier. .Pp .Dl "whois -c RU CONTACT-ID" .Pp (Note: This example is specific to the .Tn TLD .Qq Li RU , but other .Tn TLDs can be queried by using a similar syntax.) .Pp The following example demonstrates how to query a whois server using a non-standard port, where .Dq Li query-data is the query to be sent to .Dq Li whois.example.com on port .Dq Li rwhois (written numerically as 4321). .Pp .Dl "whois -h whois.example.com -p rwhois query-data" .Sh SEE ALSO .Rs .%A Ken Harrenstien .%A Vic White .%T NICNAME/WHOIS .%D 1 March 1982 .%O RFC 812 .Re .Sh HISTORY The .Nm command appeared in .Bx 4.3 . Index: projects/nand/usr.sbin/bsdinstall/scripts/mirrorselect =================================================================== --- projects/nand/usr.sbin/bsdinstall/scripts/mirrorselect (revision 235262) +++ projects/nand/usr.sbin/bsdinstall/scripts/mirrorselect (revision 235263) @@ -1,216 +1,216 @@ #!/bin/sh #- # Copyright (c) 2011 Nathan Whitehorn # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # $FreeBSD$ : ${DIALOG_OK=0} : ${DIALOG_CANCEL=1} : ${DIALOG_HELP=2} : ${DIALOG_EXTRA=3} : ${DIALOG_ITEM_HELP=4} : ${DIALOG_ESC=255} exec 3>&1 MIRROR=`dialog --backtitle "FreeBSD Installer" \ --title "Mirror Selection" --extra-button --extra-label "Other" \ --menu "Please select the site closest to you or \"other\" if you'd like to specify a different choice. Also note that not every site listed here carries more than the base distribution kits. Only Primary sites are guaranteed to carry the full range of possible distributions. Select a site that's close!" \ 0 0 0 \ ftp://ftp.freebsd.org "Main Site"\ ftp://snapshots.jp.freebsd.org "Snapshots Server Japan"\ ftp://snapshots.se.freebsd.org "Snapshots Server Sweden"\ ftp://ftp.freebsd.org "IPv6 Main Site"\ ftp://ftp3.ie.freebsd.org "IPv6 Ireland"\ ftp://ftp.il.freebsd.org "IPv6 Israel"\ ftp://ftp2.jp.freebsd.org "IPv6 Japan"\ ftp://ftp4.se.freebsd.org "IPv6 Sweden"\ ftp://ftp4.us.freebsd.org "IPv6 USA"\ ftp://ftp2.tr.freebsd.org "IPv6 Turkey"\ ftp://ftp1.freebsd.org "Primary"\ ftp://ftp2.freebsd.org "Primary #2"\ ftp://ftp3.freebsd.org "Primary #3"\ ftp://ftp4.freebsd.org "Primary #4"\ ftp://ftp5.freebsd.org "Primary #5"\ ftp://ftp6.freebsd.org "Primary #6"\ ftp://ftp7.freebsd.org "Primary #7"\ ftp://ftp8.freebsd.org "Primary #8"\ ftp://ftp9.freebsd.org "Primary #9"\ ftp://ftp10.freebsd.org "Primary #10"\ ftp://ftp11.freebsd.org "Primary #11"\ ftp://ftp12.freebsd.org "Primary #12"\ ftp://ftp13.freebsd.org "Primary #13"\ ftp://ftp14.freebsd.org "Primary #14"\ ftp://ftp.ar.freebsd.org "Argentina"\ ftp://ftp.au.freebsd.org "Australia"\ ftp://ftp2.au.freebsd.org "Australia #2"\ ftp://ftp3.au.freebsd.org "Australia #3"\ ftp://ftp.at.freebsd.org "Austria"\ ftp://ftp2.at.freebsd.org "Austria #2"\ ftp://ftp.br.freebsd.org "Brazil"\ ftp://ftp2.br.freebsd.org "Brazil #2"\ ftp://ftp3.br.freebsd.org "Brazil #3"\ ftp://ftp4.br.freebsd.org "Brazil #4"\ ftp://ftp5.br.freebsd.org "Brazil #5"\ ftp://ftp6.br.freebsd.org "Brazil #6"\ ftp://ftp7.br.freebsd.org "Brazil #7"\ ftp://ftp.ca.freebsd.org "Canada"\ ftp://ftp.cn.freebsd.org "China"\ ftp://ftp2.cn.freebsd.org "China #2"\ ftp://ftp.hr.freebsd.org "Croatia"\ ftp://ftp.cz.freebsd.org "Czech Republic"\ ftp://ftp.dk.freebsd.org "Denmark"\ ftp://ftp2.dk.freebsd.org "Denmark #2"\ ftp://ftp.ee.freebsd.org "Estonia"\ ftp://ftp.fi.freebsd.org "Finland"\ ftp://ftp.fr.freebsd.org "France"\ ftp://ftp2.fr.freebsd.org "IPv6 France #2"\ ftp://ftp3.fr.freebsd.org "France #3"\ - ftp://ftp4.fr.freebsd.org "France #4"\ + ftp://ftp4.fr.freebsd.org "IPv6 France #4"\ ftp://ftp5.fr.freebsd.org "France #5"\ ftp://ftp6.fr.freebsd.org "France #6"\ ftp://ftp8.fr.freebsd.org "IPv6 France #8"\ ftp://ftp.de.freebsd.org "Germany"\ ftp://ftp2.de.freebsd.org "Germany #2"\ ftp://ftp3.de.freebsd.org "Germany #3"\ ftp://ftp4.de.freebsd.org "Germany #4"\ ftp://ftp5.de.freebsd.org "Germany #5"\ ftp://ftp6.de.freebsd.org "Germany #6"\ ftp://ftp7.de.freebsd.org "Germany #7"\ ftp://ftp8.de.freebsd.org "Germany #8"\ ftp://ftp.gr.freebsd.org "Greece"\ ftp://ftp2.gr.freebsd.org "Greece #2"\ ftp://ftp.hu.freebsd.org "Hungary"\ ftp://ftp.is.freebsd.org "Iceland"\ ftp://ftp.ie.freebsd.org "Ireland"\ ftp://ftp2.ie.freebsd.org "Ireland #2"\ ftp://ftp3.ie.freebsd.org "Ireland #3"\ ftp://ftp.il.freebsd.org "Israel"\ ftp://ftp.it.freebsd.org "Italy"\ ftp://ftp.jp.freebsd.org "Japan"\ ftp://ftp2.jp.freebsd.org "Japan #2"\ ftp://ftp3.jp.freebsd.org "Japan #3"\ ftp://ftp4.jp.freebsd.org "Japan #4"\ ftp://ftp5.jp.freebsd.org "Japan #5"\ ftp://ftp6.jp.freebsd.org "Japan #6"\ ftp://ftp7.jp.freebsd.org "Japan #7"\ ftp://ftp8.jp.freebsd.org "Japan #8"\ ftp://ftp9.jp.freebsd.org "Japan #9"\ ftp://ftp.kr.freebsd.org "Korea"\ ftp://ftp2.kr.freebsd.org "Korea #2"\ ftp://ftp.lt.freebsd.org "Lithuania"\ ftp://ftp.nl.freebsd.org "Netherlands"\ ftp://ftp2.nl.freebsd.org "Netherlands #2"\ ftp://ftp.no.freebsd.org "Norway"\ ftp://ftp3.no.freebsd.org "Norway #3"\ ftp://ftp.pl.freebsd.org "Poland"\ ftp://ftp2.pl.freebsd.org "Poland #2"\ ftp://ftp5.pl.freebsd.org "Poland #5"\ ftp://ftp.pt.freebsd.org "Portugal"\ ftp://ftp2.pt.freebsd.org "Portugal #2"\ ftp://ftp4.pt.freebsd.org "Portugal #4"\ ftp://ftp.ro.freebsd.org "Romania"\ ftp://ftp.ru.freebsd.org "Russia"\ ftp://ftp2.ru.freebsd.org "Russia #2"\ ftp://ftp3.ru.freebsd.org "Russia #3"\ ftp://ftp4.ru.freebsd.org "Russia #4"\ ftp://ftp.sg.freebsd.org "Singapore"\ ftp://ftp.sk.freebsd.org "Slovak Republic"\ ftp://ftp.si.freebsd.org "Slovenia"\ ftp://ftp2.si.freebsd.org "Slovenia #2"\ ftp://ftp.za.freebsd.org "South Africa"\ ftp://ftp2.za.freebsd.org "South Africa #2"\ ftp://ftp3.za.freebsd.org "South Africa #3"\ ftp://ftp4.za.freebsd.org "South Africa #4"\ ftp://ftp.es.freebsd.org "Spain"\ ftp://ftp2.es.freebsd.org "Spain #2"\ ftp://ftp3.es.freebsd.org "Spain #3"\ ftp://ftp.se.freebsd.org "Sweden"\ ftp://ftp2.se.freebsd.org "Sweden #2"\ ftp://ftp3.se.freebsd.org "Sweden #3"\ ftp://ftp4.se.freebsd.org "Sweden #4"\ ftp://ftp5.se.freebsd.org "Sweden #5"\ ftp://ftp.ch.freebsd.org "Switzerland"\ ftp://ftp2.ch.freebsd.org "Switzerland #2"\ ftp://ftp.tw.freebsd.org "Taiwan"\ ftp://ftp2.tw.freebsd.org "Taiwan #2"\ ftp://ftp3.tw.freebsd.org "Taiwan #3"\ ftp://ftp4.tw.freebsd.org "Taiwan #4"\ ftp://ftp6.tw.freebsd.org "Taiwan #6"\ ftp://ftp11.tw.freebsd.org "Taiwan #11"\ ftp://ftp.tr.freebsd.org "Turkey"\ ftp://ftp2.tr.freebsd.org "Turkey #2"\ ftp://ftp.uk.freebsd.org "UK"\ ftp://ftp2.uk.freebsd.org "UK #2"\ ftp://ftp3.uk.freebsd.org "UK #3"\ ftp://ftp4.uk.freebsd.org "UK #4"\ ftp://ftp5.uk.freebsd.org "UK #5"\ ftp://ftp6.uk.freebsd.org "UK #6"\ ftp://ftp.ua.freebsd.org "Ukraine"\ ftp://ftp2.ua.freebsd.org "Ukraine #2"\ ftp://ftp5.ua.freebsd.org "Ukraine #5"\ ftp://ftp6.ua.freebsd.org "Ukraine #6"\ ftp://ftp7.ua.freebsd.org "Ukraine #7"\ ftp://ftp8.ua.freebsd.org "Ukraine #8"\ ftp://ftp1.us.freebsd.org "USA #1"\ ftp://ftp2.us.freebsd.org "USA #2"\ ftp://ftp3.us.freebsd.org "USA #3"\ ftp://ftp4.us.freebsd.org "USA #4"\ ftp://ftp5.us.freebsd.org "USA #5"\ ftp://ftp6.us.freebsd.org "USA #6"\ ftp://ftp7.us.freebsd.org "USA #7"\ ftp://ftp8.us.freebsd.org "USA #8"\ ftp://ftp9.us.freebsd.org "USA #9"\ ftp://ftp10.us.freebsd.org "USA #10"\ ftp://ftp11.us.freebsd.org "USA #11"\ ftp://ftp12.us.freebsd.org "USA #12"\ ftp://ftp13.us.freebsd.org "USA #13"\ ftp://ftp14.us.freebsd.org "USA #14"\ ftp://ftp15.us.freebsd.org "USA #15"\ 2>&1 1>&3` MIRROR_BUTTON=$? exec 3>&- BSDINSTALL_DISTSITE="$MIRROR/pub/FreeBSD/releases/`uname -m`/`uname -p`/`uname -r`" case $MIRROR_BUTTON in $DIALOG_CANCEL) exit 1 ;; $DIALOG_OK) ;; $DIALOG_EXTRA) exec 3>&1 BSDINSTALL_DISTSITE=`dialog --backtitle "FreeBSD Installer" \ --title "Mirror Selection" \ --inputbox "Please enter the URL to an alternate FreeBSD mirror:" \ 0 0 "$BSDINSTALL_DISTSITE" 2>&1 1>&3` MIRROR_BUTTON=$? exec 3>&- test $MIRROR_BUTTON -eq 0 || exec $0 $@ ;; esac export BSDINSTALL_DISTSITE echo $BSDINSTALL_DISTSITE >&2 Index: projects/nand/usr.sbin/jail =================================================================== --- projects/nand/usr.sbin/jail (revision 235262) +++ projects/nand/usr.sbin/jail (revision 235263) Property changes on: projects/nand/usr.sbin/jail ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/usr.sbin/jail:r235157-235262 Index: projects/nand/usr.sbin/ndiscvt =================================================================== --- projects/nand/usr.sbin/ndiscvt (revision 235262) +++ projects/nand/usr.sbin/ndiscvt (revision 235263) Property changes on: projects/nand/usr.sbin/ndiscvt ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/usr.sbin/ndiscvt:r235157-235262 Index: projects/nand/usr.sbin/pkg_install/updating/pkg_updating.1 =================================================================== --- projects/nand/usr.sbin/pkg_install/updating/pkg_updating.1 (revision 235262) +++ projects/nand/usr.sbin/pkg_install/updating/pkg_updating.1 (revision 235263) @@ -1,90 +1,90 @@ .\" .\" FreeBSD updating - Scan the installed ports and show all UPDATING entries .\" that affect one of the installed ports. Alternative a list of portnames .\" could be passed to pkg_updating .\" .\" "THE BEER-WARE LICENSE" (Revision 42): .\" wrote this file. As long as you retain this notice you .\" can do whatever you want with this stuff. If we meet some day, and you think .\" this stuff is worth it, you can buy me a beer in return. Beat Gätzi .\" .\" $FreeBSD$ .\" .Dd May 30, 2008 .Dt PKG_UPDATING 1 .Os .Sh NAME .Nm pkg_updating .Nd a utility for displaying UPDATING entries of software packages .Sh SYNOPSIS .Nm .Op Fl h .Op Fl d Ar date .Op Fl f Ar file .Op Ar pkg-name ... .Nm .Sh DESCRIPTION The .Nm command scans the installed ports and show all UPDATING entries that affect one of the installed ports. Alternative a list of pkg-names could be passed. .Sh OPTIONS The following command line options are supported: .Bl -tag -width indent .It Ar pkg-name ... UPDATING entries for the named packages are displayed. .It Fl d , -date Ar date Only entries newer than .Ar date are shown. Use a YYYYMMDD date format. .It Fl f , -file Ar file Defines a alternative location of the UPDATING .Ar file . .It Fl h , -help Print help message. .El .Sh ENVIRONMENT .Bl -tag -width PKG_DBDIR .It Ev PKG_DBDIR Specifies an alternative location for the installed package database. .It Ev PORTSDIR Location of the ports tree. .El .Sh FILES .Bl -tag -width /var/db/pkg -compact .It Pa /var/db/pkg Default location of the installed package database. .It Pa /usr/ports -The default ports directory and default location of the UPDATING file +The default ports directory and default location of the UPDATING file. .El .Sh EXAMPLES Shows all entries of all installed ports: .Dl % pkg_updating .Pp Shows all entries of all installed ports since 2007-01-01: .Dl % pkg_updating -d 20070101 .Pp Shows all entries for all apache and mysql ports: .Dl % pkg_updating apache mysql .Pp Shows all apache entries since 2006-01-01: .Dl % pkg_updating -d 20060101 apache .Pp Defines that the UPDATING file is in /tmp and shows all entries of all installed ports: .Dl % pkg_updating -f /tmp/UPDATING .Pp Fetch UPDATING file from ftp mirror and show all entries of all installed ports: .Dl % pkg_updating -f ftp://ftp.freebsd.org/pub/FreeBSD/ports/packages/UPDATING .Sh SEE ALSO .Xr pkg_add 1 , .Xr pkg_create 1 , .Xr pkg_delete 1 , .Xr pkg_version 1 .Sh AUTHORS .An Beat G\(:atzi Aq beat@chruetertee.ch .Sh CONTRIBUTORS .An Martin Tournoij Aq carpetsmoker@xs4all.nl .Sh BUGS Sure to be some. Index: projects/nand/usr.sbin/portsnap/portsnap/portsnap.sh =================================================================== --- projects/nand/usr.sbin/portsnap/portsnap/portsnap.sh (revision 235262) +++ projects/nand/usr.sbin/portsnap/portsnap/portsnap.sh (revision 235263) @@ -1,1092 +1,1095 @@ #!/bin/sh #- # Copyright 2004-2005 Colin Percival # All rights reserved # # Redistribution and use in source and binary forms, with or without # modification, are permitted providing that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # $FreeBSD$ #### Usage function -- called from command-line handling code. # Usage instructions. Options not listed: # --debug -- don't filter output from utilities # --no-stats -- don't show progress statistics while fetching files usage() { cat < serverlist_tried # Check that host(1) exists (i.e., that the system wasn't built with the # WITHOUT_BIND set) and don't try to find a mirror if it doesn't exist. if ! which -s host; then : > serverlist_full return 1 fi echo -n "Looking up ${SERVERNAME} mirrors... " # Issue the SRV query and pull out the Priority, Weight, and Target fields. # BIND 9 prints "$name has SRV record ..." while BIND 8 prints # "$name server selection ..."; we allow either format. MLIST="_http._tcp.${SERVERNAME}" host -t srv "${MLIST}" | sed -nE "s/${MLIST} (has SRV record|server selection) //p" | cut -f 1,2,4 -d ' ' | sed -e 's/\.$//' | sort > serverlist_full # If no records, give up -- we'll just use the server name we were given. if [ `wc -l < serverlist_full` -eq 0 ]; then echo "none found." return 1 fi # Report how many mirrors we found. echo `wc -l < serverlist_full` "mirrors found." # Generate a random seed for use in picking mirrors. If HTTP_PROXY # is set, this will be used to generate the seed; otherwise, the seed # will be random. if [ -n "${HTTP_PROXY}${http_proxy}" ]; then RANDVALUE=`sha256 -qs "${HTTP_PROXY}${http_proxy}" | tr -d 'a-f' | cut -c 1-9` else RANDVALUE=`jot -r 1 0 999999999` fi } # Pick a mirror. Returns 1 if we have run out of mirrors to try. fetch_pick_server() { # Generate a list of not-yet-tried mirrors sort serverlist_tried | comm -23 serverlist_full - > serverlist # Have we run out of mirrors? if [ `wc -l < serverlist` -eq 0 ]; then echo "No mirrors remaining, giving up." return 1 fi # Find the highest priority level (lowest numeric value). SRV_PRIORITY=`cut -f 1 -d ' ' serverlist | sort -n | head -1` # Add up the weights of the response lines at that priority level. SRV_WSUM=0; while read X; do case "$X" in ${SRV_PRIORITY}\ *) SRV_W=`echo $X | cut -f 2 -d ' '` SRV_WSUM=$(($SRV_WSUM + $SRV_W)) ;; esac done < serverlist # If all the weights are 0, pretend that they are all 1 instead. if [ ${SRV_WSUM} -eq 0 ]; then SRV_WSUM=`grep -E "^${SRV_PRIORITY} " serverlist | wc -l` SRV_W_ADD=1 else SRV_W_ADD=0 fi # Pick a value between 0 and the sum of the weights - 1 SRV_RND=`expr ${RANDVALUE} % ${SRV_WSUM}` # Read through the list of mirrors and set SERVERNAME. Write the line # corresponding to the mirror we selected into serverlist_tried so that # we won't try it again. while read X; do case "$X" in ${SRV_PRIORITY}\ *) SRV_W=`echo $X | cut -f 2 -d ' '` SRV_W=$(($SRV_W + $SRV_W_ADD)) if [ $SRV_RND -lt $SRV_W ]; then SERVERNAME=`echo $X | cut -f 3 -d ' '` echo "$X" >> serverlist_tried break else SRV_RND=$(($SRV_RND - $SRV_W)) fi ;; esac done < serverlist } # Check that we have a public key with an appropriate hash, or # fetch the key if it doesn't exist. Returns 1 if the key has # not yet been fetched. fetch_key() { if [ -r pub.ssl ] && [ `${SHA256} -q pub.ssl` = ${KEYPRINT} ]; then return 0 fi echo -n "Fetching public key from ${SERVERNAME}... " rm -f pub.ssl fetch ${QUIETFLAG} http://${SERVERNAME}/pub.ssl \ 2>${QUIETREDIR} || true if ! [ -r pub.ssl ]; then echo "failed." return 1 fi if ! [ `${SHA256} -q pub.ssl` = ${KEYPRINT} ]; then echo "key has incorrect hash." rm -f pub.ssl return 1 fi echo "done." } # Fetch a snapshot tag fetch_tag() { rm -f snapshot.ssl tag.new echo ${NDEBUG} "Fetching snapshot tag from ${SERVERNAME}... " fetch ${QUIETFLAG} http://${SERVERNAME}/$1.ssl \ 2>${QUIETREDIR} || true if ! [ -r $1.ssl ]; then echo "failed." return 1 fi openssl rsautl -pubin -inkey pub.ssl -verify \ < $1.ssl > tag.new 2>${QUIETREDIR} || true rm $1.ssl if ! [ `wc -l < tag.new` = 1 ] || ! grep -qE "^portsnap\|[0-9]{10}\|[0-9a-f]{64}" tag.new; then echo "invalid snapshot tag." return 1 fi echo "done." SNAPSHOTDATE=`cut -f 2 -d '|' < tag.new` SNAPSHOTHASH=`cut -f 3 -d '|' < tag.new` } # Sanity-check the date on a snapshot tag fetch_snapshot_tagsanity() { if [ `date "+%s"` -gt `expr ${SNAPSHOTDATE} + 31536000` ]; then echo "Snapshot appears to be more than a year old!" echo "(Is the system clock correct?)" echo "Cowardly refusing to proceed any further." return 1 fi if [ `date "+%s"` -lt `expr ${SNAPSHOTDATE} - 86400` ]; then echo -n "Snapshot appears to have been created more than " echo "one day into the future!" echo "(Is the system clock correct?)" echo "Cowardly refusing to proceed any further." return 1 fi } # Sanity-check the date on a snapshot update tag fetch_update_tagsanity() { fetch_snapshot_tagsanity || return 1 if [ ${OLDSNAPSHOTDATE} -gt ${SNAPSHOTDATE} ]; then echo -n "Latest snapshot on server is " echo "older than what we already have!" echo -n "Cowardly refusing to downgrade from " date -r ${OLDSNAPSHOTDATE} echo "to `date -r ${SNAPSHOTDATE}`." return 1 fi } # Compare old and new tags; return 1 if update is unnecessary fetch_update_neededp() { if [ ${OLDSNAPSHOTDATE} -eq ${SNAPSHOTDATE} ]; then echo -n "Latest snapshot on server matches " echo "what we already have." echo "No updates needed." rm tag.new return 1 fi if [ ${OLDSNAPSHOTHASH} = ${SNAPSHOTHASH} ]; then echo -n "Ports tree hasn't changed since " echo "last snapshot." echo "No updates needed." rm tag.new return 1 fi return 0 } # Fetch snapshot metadata file fetch_metadata() { rm -f ${SNAPSHOTHASH} tINDEX.new echo ${NDEBUG} "Fetching snapshot metadata... " fetch ${QUIETFLAG} http://${SERVERNAME}/t/${SNAPSHOTHASH} \ 2>${QUIETREDIR} || return if [ "`${SHA256} -q ${SNAPSHOTHASH}`" != ${SNAPSHOTHASH} ]; then echo "snapshot metadata corrupt." return 1 fi mv ${SNAPSHOTHASH} tINDEX.new echo "done." } # Warn user about bogus metadata fetch_metadata_freakout() { echo echo "Portsnap metadata is correctly signed, but contains" echo "at least one line which appears bogus." echo "Cowardly refusing to proceed any further." } # Sanity-check a snapshot metadata file fetch_metadata_sanity() { if grep -qvE "^[0-9A-Z.]+\|[0-9a-f]{64}$" tINDEX.new; then fetch_metadata_freakout return 1 fi if [ `look INDEX tINDEX.new | wc -l` != 1 ]; then echo echo "Portsnap metadata appears bogus." echo "Cowardly refusing to proceed any further." return 1 fi } # Take a list of ${oldhash}|${newhash} and output a list of needed patches fetch_make_patchlist() { IFS='|' echo "" 1>${QUIETREDIR} grep -vE "^([0-9a-f]{64})\|\1$" | while read X Y; do printf "Processing: $X $Y ...\r" 1>${QUIETREDIR} if [ -f "files/${Y}.gz" -o ! -f "files/${X}.gz" ]; then continue; fi echo "${X}|${Y}" done echo "" 1>${QUIETREDIR} IFS= } # Print user-friendly progress statistics fetch_progress() { LNC=0 while read x; do LNC=$(($LNC + 1)) if [ $(($LNC % 10)) = 0 ]; then echo -n $LNC elif [ $(($LNC % 2)) = 0 ]; then echo -n . fi done echo -n " " } # Sanity-check an index file fetch_index_sanity() { if grep -qvE "^[-_+./@0-9A-Za-z]+\|[0-9a-f]{64}$" INDEX.new || fgrep -q "./" INDEX.new; then fetch_metadata_freakout return 1 fi } # Verify a list of files fetch_snapshot_verify() { while read F; do if [ "`gunzip -c snap/${F} | ${SHA256} -q`" != ${F} ]; then echo "snapshot corrupt." return 1 fi done return 0 } # Fetch a snapshot tarball, extract, and verify. fetch_snapshot() { while ! fetch_tag snapshot; do fetch_pick_server || return 1 done fetch_snapshot_tagsanity || return 1 fetch_metadata || return 1 fetch_metadata_sanity || return 1 rm -rf snap/ # Don't ask fetch(1) to be quiet -- downloading a snapshot of ~ 35MB will # probably take a while, so the progrees reports that fetch(1) generates # will be useful for keeping the users' attention from drifting. echo "Fetching snapshot generated at `date -r ${SNAPSHOTDATE}`:" fetch -r http://${SERVERNAME}/s/${SNAPSHOTHASH}.tgz || return 1 echo -n "Extracting snapshot... " tar -xz --numeric-owner -f ${SNAPSHOTHASH}.tgz snap/ || return 1 rm ${SNAPSHOTHASH}.tgz echo "done." echo -n "Verifying snapshot integrity... " # Verify the metadata files cut -f 2 -d '|' tINDEX.new | fetch_snapshot_verify || return 1 # Extract the index rm -f INDEX.new gunzip -c snap/`look INDEX tINDEX.new | cut -f 2 -d '|'`.gz > INDEX.new fetch_index_sanity || return 1 # Verify the snapshot contents cut -f 2 -d '|' INDEX.new | fetch_snapshot_verify || return 1 echo "done." # Move files into their proper locations rm -f tag INDEX tINDEX rm -rf files mv tag.new tag mv tINDEX.new tINDEX mv INDEX.new INDEX mv snap/ files/ return 0 } # Update a compressed snapshot fetch_update() { rm -f patchlist diff OLD NEW filelist INDEX.new OLDSNAPSHOTDATE=`cut -f 2 -d '|' < tag` OLDSNAPSHOTHASH=`cut -f 3 -d '|' < tag` while ! fetch_tag latest; do fetch_pick_server || return 1 done fetch_update_tagsanity || return 1 fetch_update_neededp || return 0 fetch_metadata || return 1 fetch_metadata_sanity || return 1 echo -n "Updating from `date -r ${OLDSNAPSHOTDATE}` " echo "to `date -r ${SNAPSHOTDATE}`." # Generate a list of wanted metadata patches join -t '|' -o 1.2,2.2 tINDEX tINDEX.new | fetch_make_patchlist > patchlist # Attempt to fetch metadata patches echo -n "Fetching `wc -l < patchlist | tr -d ' '` " echo ${NDEBUG} "metadata patches.${DDSTATS}" tr '|' '-' < patchlist | lam -s "tp/" - -s ".gz" | xargs ${XARGST} ${PHTTPGET} ${SERVERNAME} \ 2>${STATSREDIR} | fetch_progress echo "done." # Attempt to apply metadata patches echo -n "Applying metadata patches... " IFS='|' while read X Y; do if [ ! -f "${X}-${Y}.gz" ]; then continue; fi gunzip -c < ${X}-${Y}.gz > diff gunzip -c < files/${X}.gz > OLD cut -c 2- diff | join -t '|' -v 2 - OLD > ptmp grep '^\+' diff | cut -c 2- | sort -k 1,1 -t '|' -m - ptmp > NEW if [ `${SHA256} -q NEW` = ${Y} ]; then mv NEW files/${Y} gzip -n files/${Y} fi rm -f diff OLD NEW ${X}-${Y}.gz ptmp done < patchlist 2>${QUIETREDIR} IFS= echo "done." # Update metadata without patches join -t '|' -v 2 tINDEX tINDEX.new | cut -f 2 -d '|' /dev/stdin patchlist | while read Y; do if [ ! -f "files/${Y}.gz" ]; then echo ${Y}; fi done > filelist echo -n "Fetching `wc -l < filelist | tr -d ' '` " echo ${NDEBUG} "metadata files... " lam -s "f/" - -s ".gz" < filelist | xargs ${XARGST} ${PHTTPGET} ${SERVERNAME} \ 2>${QUIETREDIR} while read Y; do echo -n "Verifying ${Y}... " 1>${QUIETREDIR} if [ `gunzip -c < ${Y}.gz | ${SHA256} -q` = ${Y} ]; then mv ${Y}.gz files/${Y}.gz else echo "metadata is corrupt." return 1 fi echo "ok." 1>${QUIETREDIR} done < filelist echo "done." # Extract the index echo -n "Extracting index... " 1>${QUIETREDIR} gunzip -c files/`look INDEX tINDEX.new | cut -f 2 -d '|'`.gz > INDEX.new fetch_index_sanity || return 1 # If we have decided to refuse certain updates, construct a hybrid index which # is equal to the old index for parts of the tree which we don't want to # update, and equal to the new index for parts of the tree which gets updates. # This means that we should always have a "complete snapshot" of the ports # tree -- with the caveat that it isn't actually a snapshot. if [ ! -z "${REFUSE}" ]; then echo "Refusing to download updates for ${REFUSE}" \ >${QUIETREDIR} grep -Ev "${REFUSE}" INDEX.new > INDEX.tmp grep -E "${REFUSE}" INDEX | sort -m -k 1,1 -t '|' - INDEX.tmp > INDEX.new rm -f INDEX.tmp fi # Generate a list of wanted ports patches echo -n "Generating list of wanted patches..." 1>${QUIETREDIR} join -t '|' -o 1.2,2.2 INDEX INDEX.new | fetch_make_patchlist > patchlist echo " done." 1>${QUIETREDIR} # Attempt to fetch ports patches echo -n "Fetching `wc -l < patchlist | tr -d ' '` " echo ${NDEBUG} "patches.${DDSTATS}" tr '|' '-' < patchlist | lam -s "bp/" - | xargs ${XARGST} ${PHTTPGET} ${SERVERNAME} \ 2>${STATSREDIR} | fetch_progress echo "done." # Attempt to apply ports patches PATCHCNT=`wc -l patchlist` echo "Applying patches... " IFS='|' I=0 while read X Y; do I=$(($I + 1)) F="${X}-${Y}" if [ ! -f "${F}" ]; then printf " Skipping ${F} (${I} of ${PATCHCNT}).\r" continue; fi echo " Processing ${F}..." 1>${QUIETREDIR} gunzip -c < files/${X}.gz > OLD ${BSPATCH} OLD NEW ${X}-${Y} if [ `${SHA256} -q NEW` = ${Y} ]; then mv NEW files/${Y} gzip -n files/${Y} fi rm -f diff OLD NEW ${X}-${Y} done < patchlist 2>${QUIETREDIR} IFS= echo "done." # Update ports without patches join -t '|' -v 2 INDEX INDEX.new | cut -f 2 -d '|' /dev/stdin patchlist | while read Y; do if [ ! -f "files/${Y}.gz" ]; then echo ${Y}; fi done > filelist echo -n "Fetching `wc -l < filelist | tr -d ' '` " echo ${NDEBUG} "new ports or files... " lam -s "f/" - -s ".gz" < filelist | xargs ${XARGST} ${PHTTPGET} ${SERVERNAME} \ 2>${QUIETREDIR} I=0 while read Y; do I=$(($I + 1)) printf " Processing ${Y} (${I} of ${PATCHCNT}).\r" 1>${QUIETREDIR} if [ `gunzip -c < ${Y}.gz | ${SHA256} -q` = ${Y} ]; then mv ${Y}.gz files/${Y}.gz else echo "snapshot is corrupt." return 1 fi done < filelist echo "done." # Remove files which are no longer needed cut -f 2 -d '|' tINDEX INDEX | sort > oldfiles cut -f 2 -d '|' tINDEX.new INDEX.new | sort | comm -13 - oldfiles | lam -s "files/" - -s ".gz" | xargs rm -f rm patchlist filelist oldfiles # We're done! mv INDEX.new INDEX mv tINDEX.new tINDEX mv tag.new tag return 0 } # Do the actual work involved in "fetch" / "cron". fetch_run() { fetch_pick_server_init && fetch_pick_server while ! fetch_key; do fetch_pick_server || return 1 done if ! [ -d files -a -r tag -a -r INDEX -a -r tINDEX ]; then fetch_snapshot || return 1 fi fetch_update || return 1 } # Build a ports INDEX file extract_make_index() { if ! look $1 ${WORKDIR}/tINDEX > /dev/null; then echo -n "$1 not provided by portsnap server; " echo "$2 not being generated." else gunzip -c "${WORKDIR}/files/`look $1 ${WORKDIR}/tINDEX | cut -f 2 -d '|'`.gz" | cat - ${LOCALDESC} | ${MKINDEX} /dev/stdin > ${PORTSDIR}/$2 fi } # Create INDEX, INDEX-5, INDEX-6 extract_indices() { echo -n "Building new INDEX files... " for PAIR in ${INDEXPAIRS}; do INDEXFILE=`echo ${PAIR} | cut -f 1 -d '|'` DESCRIBEFILE=`echo ${PAIR} | cut -f 2 -d '|'` extract_make_index ${DESCRIBEFILE} ${INDEXFILE} || return 1 done echo "done." } # Create .portsnap.INDEX; if we are REFUSEing to touch certain directories, # merge the values from any exiting .portsnap.INDEX file. extract_metadata() { if [ -z "${REFUSE}" ]; then sort ${WORKDIR}/INDEX > ${PORTSDIR}/.portsnap.INDEX elif [ -f ${PORTSDIR}/.portsnap.INDEX ]; then grep -E "${REFUSE}" ${PORTSDIR}/.portsnap.INDEX \ > ${PORTSDIR}/.portsnap.INDEX.tmp grep -vE "${REFUSE}" ${WORKDIR}/INDEX | sort | sort -m - ${PORTSDIR}/.portsnap.INDEX.tmp \ > ${PORTSDIR}/.portsnap.INDEX rm -f ${PORTSDIR}/.portsnap.INDEX.tmp else grep -vE "${REFUSE}" ${WORKDIR}/INDEX | sort \ > ${PORTSDIR}/.portsnap.INDEX fi } # Do the actual work involved in "extract" extract_run() { local IFS='|' mkdir -p ${PORTSDIR} || return 1 if ! if ! [ -z "${EXTRACTPATH}" ]; then grep "^${EXTRACTPATH}" ${WORKDIR}/INDEX elif ! [ -z "${REFUSE}" ]; then grep -vE "${REFUSE}" ${WORKDIR}/INDEX else cat ${WORKDIR}/INDEX fi | while read FILE HASH; do echo ${PORTSDIR}/${FILE} if ! [ -r "${WORKDIR}/files/${HASH}.gz" ]; then echo "files/${HASH}.gz not found -- snapshot corrupt." return 1 fi case ${FILE} in */) rm -rf ${PORTSDIR}/${FILE%/} mkdir -p ${PORTSDIR}/${FILE} tar -xz --numeric-owner -f ${WORKDIR}/files/${HASH}.gz \ -C ${PORTSDIR}/${FILE} ;; *) rm -f ${PORTSDIR}/${FILE} tar -xz --numeric-owner -f ${WORKDIR}/files/${HASH}.gz \ -C ${PORTSDIR} ${FILE} ;; esac done; then return 1 fi if [ ! -z "${EXTRACTPATH}" ]; then return 0; fi extract_metadata extract_indices } update_run_extract() { local IFS='|' # Install new files echo "Extracting new files:" if ! if ! [ -z "${REFUSE}" ]; then grep -vE "${REFUSE}" ${WORKDIR}/INDEX | sort else sort ${WORKDIR}/INDEX fi | comm -13 ${PORTSDIR}/.portsnap.INDEX - | while read FILE HASH; do echo ${PORTSDIR}/${FILE} if ! [ -r "${WORKDIR}/files/${HASH}.gz" ]; then echo "files/${HASH}.gz not found -- snapshot corrupt." return 1 fi case ${FILE} in */) mkdir -p ${PORTSDIR}/${FILE} tar -xz --numeric-owner -f ${WORKDIR}/files/${HASH}.gz \ -C ${PORTSDIR}/${FILE} ;; *) tar -xz --numeric-owner -f ${WORKDIR}/files/${HASH}.gz \ -C ${PORTSDIR} ${FILE} ;; esac done; then return 1 fi } # Do the actual work involved in "update" update_run() { if ! [ -z "${INDEXONLY}" ]; then extract_indices >/dev/null || return 1 return 0 fi if sort ${WORKDIR}/INDEX | cmp -s ${PORTSDIR}/.portsnap.INDEX -; then echo "Ports tree is already up to date." return 0 fi # If we are REFUSEing to touch certain directories, don't remove files # from those directories (even if they are out of date) echo -n "Removing old files and directories... " if ! [ -z "${REFUSE}" ]; then sort ${WORKDIR}/INDEX | comm -23 ${PORTSDIR}/.portsnap.INDEX - | cut -f 1 -d '|' | grep -vE "${REFUSE}" | lam -s "${PORTSDIR}/" - | sed -e 's|/$||' | xargs rm -rf else sort ${WORKDIR}/INDEX | comm -23 ${PORTSDIR}/.portsnap.INDEX - | cut -f 1 -d '|' | lam -s "${PORTSDIR}/" - | sed -e 's|/$||' | xargs rm -rf fi echo "done." update_run_extract || return 1 extract_metadata extract_indices } #### Main functions -- call parameter-handling and core functions # Using the command line, configuration file, and defaults, # set all the parameters which are needed later. get_params() { init_params parse_cmdline $@ sanity_conffile default_conffile parse_conffile default_params } # Fetch command. Make sure that we're being called # interactively, then run fetch_check_params and fetch_run cmd_fetch() { if [ ! -t 0 ]; then echo -n "`basename $0` fetch should not " echo "be run non-interactively." echo "Run `basename $0` cron instead." exit 1 fi fetch_check_params fetch_run || exit 1 } # Cron command. Make sure the parameters are sensible; wait # rand(3600) seconds; then fetch updates. While fetching updates, # send output to a temporary file; only print that file if the # fetching failed. cmd_cron() { fetch_check_params sleep `jot -r 1 0 3600` TMPFILE=`mktemp /tmp/portsnap.XXXXXX` || exit 1 if ! fetch_run >> ${TMPFILE}; then cat ${TMPFILE} rm ${TMPFILE} exit 1 fi rm ${TMPFILE} } # Extract command. Make sure the parameters are sensible, # then extract the ports tree (or part thereof). cmd_extract() { extract_check_params extract_run || exit 1 } # Update command. Make sure the parameters are sensible, # then update the ports tree. cmd_update() { update_check_params update_run || exit 1 } # Alfred command. Run 'fetch' or 'cron' depending on # whether stdin is a terminal; then run 'update' or # 'extract' depending on whether ${PORTSDIR} exists. cmd_alfred() { if [ -t 0 ]; then cmd_fetch else cmd_cron fi if [ -d ${PORTSDIR} ]; then cmd_update else cmd_extract fi } #### Entry point # Make sure we find utilities from the base system export PATH=/sbin:/bin:/usr/sbin:/usr/bin:${PATH} # Set LC_ALL in order to avoid problems with character ranges like [A-Z]. export LC_ALL=C get_params $@ for COMMAND in ${COMMANDS}; do cmd_${COMMAND} done Index: projects/nand/usr.sbin/rtadvctl =================================================================== --- projects/nand/usr.sbin/rtadvctl (revision 235262) +++ projects/nand/usr.sbin/rtadvctl (revision 235263) Property changes on: projects/nand/usr.sbin/rtadvctl ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/usr.sbin/rtadvctl:r235157-235262 Index: projects/nand/usr.sbin/rtadvd =================================================================== --- projects/nand/usr.sbin/rtadvd (revision 235262) +++ projects/nand/usr.sbin/rtadvd (revision 235263) Property changes on: projects/nand/usr.sbin/rtadvd ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/usr.sbin/rtadvd:r235157-235262 Index: projects/nand/usr.sbin/rtsold =================================================================== --- projects/nand/usr.sbin/rtsold (revision 235262) +++ projects/nand/usr.sbin/rtsold (revision 235263) Property changes on: projects/nand/usr.sbin/rtsold ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/usr.sbin/rtsold:r235157-235262 Index: projects/nand/usr.sbin/zic =================================================================== --- projects/nand/usr.sbin/zic (revision 235262) +++ projects/nand/usr.sbin/zic (revision 235263) Property changes on: projects/nand/usr.sbin/zic ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/usr.sbin/zic:r235157-235262 Index: projects/nand =================================================================== --- projects/nand (revision 235262) +++ projects/nand (revision 235263) Property changes on: projects/nand ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r235157-235262