Index: user/ngie/more-tests2/lib/clang/clang.build.mk =================================================================== --- user/ngie/more-tests2/lib/clang/clang.build.mk (revision 288954) +++ user/ngie/more-tests2/lib/clang/clang.build.mk (revision 288955) @@ -1,260 +1,262 @@ # $FreeBSD$ .include CLANG_SRCS= ${LLVM_SRCS}/tools/clang CFLAGS+= -I${LLVM_SRCS}/include -I${CLANG_SRCS}/include \ -I${LLVM_SRCS}/${SRCDIR} ${INCDIR:C/^/-I${LLVM_SRCS}\//} -I. \ -I${LLVM_SRCS}/../../lib/clang/include \ -DLLVM_ON_UNIX -DLLVM_ON_FREEBSD \ -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS #-DNDEBUG .if ${MK_CLANG_FULL} != "no" CFLAGS+= -DCLANG_ENABLE_ARCMT \ -DCLANG_ENABLE_STATIC_ANALYZER .endif # MK_CLANG_FULL # LLVM is not strict aliasing safe as of 12/31/2011 CFLAGS+= -fno-strict-aliasing TARGET_ARCH?= ${MACHINE_ARCH} BUILD_ARCH?= ${MACHINE_ARCH} .if ${TARGET_ARCH:Marm*hf*} != "" TARGET_ABI= gnueabihf .elif ${TARGET_ARCH:Marm*} != "" TARGET_ABI= gnueabi .else TARGET_ABI= unknown .endif TARGET_TRIPLE?= ${TARGET_ARCH:C/amd64/x86_64/:C/armv6hf/armv6/:C/arm64/aarch64/}-${TARGET_ABI}-freebsd11.0 BUILD_TRIPLE?= ${BUILD_ARCH:C/amd64/x86_64/:C/armv6hf/armv6/:C/arm64/aarch64/}-unknown-freebsd11.0 CFLAGS+= -DLLVM_DEFAULT_TARGET_TRIPLE=\"${TARGET_TRIPLE}\" \ -DLLVM_HOST_TRIPLE=\"${BUILD_TRIPLE}\" \ -DDEFAULT_SYSROOT=\"${TOOLS_PREFIX}\" CXXFLAGS+= -std=c++11 -fno-exceptions -fno-rtti CXXFLAGS.clang+= -stdlib=libc++ .PATH: ${LLVM_SRCS}/${SRCDIR} .if empty(TOOLSDIR) || !exists(${TOOLSDIR}/usr/bin/clang-tblgen) .if ${MACHINE} == "host" && defined(BOOTSTRAPPING_TOOLS) .if !empty(LEGACY_TOOLS) && exists(${LEGACY_TOOLS}/usr/bin/tblgen) TOOLSDIR= ${LEGACY_TOOLS} .endif .endif .if ${MK_STAGING} == "yes" && exists(${STAGE_HOST_OBJTOP:Uno}/usr/bin/tblgen) TOOLSDIR= ${STAGE_HOST_OBJTOP} .endif .if exists(${LEGACY_TOOLS:Uno}/usr/bin/tblgen) TOOLSDIR= ${LEGACY_TOOLS} .endif .endif TOOLSDIR?= .if !empty(TOOLSDIR) && exists(${TOOLSDIR}/usr/bin/clang-tblgen) TBLGEN= ${TOOLSDIR}/usr/bin/tblgen CLANG_TBLGEN= ${TOOLSDIR}/usr/bin/clang-tblgen .endif TBLGEN?= tblgen CLANG_TBLGEN?= clang-tblgen Intrinsics.inc.h: ${LLVM_SRCS}/include/llvm/IR/Intrinsics.td ${TBLGEN} -gen-intrinsic \ -I ${LLVM_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${LLVM_SRCS}/include/llvm/IR/Intrinsics.td .for arch in \ AArch64/AArch64 ARM/ARM Mips/Mips PowerPC/PPC Sparc/Sparc X86/X86 . for hdr in \ AsmMatcher/-gen-asm-matcher \ AsmWriter1/-gen-asm-writer,-asmwriternum=1 \ AsmWriter/-gen-asm-writer \ CallingConv/-gen-callingconv \ CodeEmitter/-gen-emitter \ DAGISel/-gen-dag-isel \ DisassemblerTables/-gen-disassembler \ FastISel/-gen-fast-isel \ InstrInfo/-gen-instr-info \ MCCodeEmitter/-gen-emitter \ MCPseudoLowering/-gen-pseudo-lowering \ RegisterInfo/-gen-register-info \ SubtargetInfo/-gen-subtarget ${arch:T}Gen${hdr:H:C/$/.inc.h/}: ${LLVM_SRCS}/lib/Target/${arch:H}/${arch:T}.td ${TBLGEN} ${hdr:T:C/,/ /g} \ -I ${LLVM_SRCS}/include -I ${LLVM_SRCS}/lib/Target/${arch:H} \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${LLVM_SRCS}/lib/Target/${arch:H}/${arch:T}.td . endfor .endfor Attrs.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-classes \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrDump.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-dump \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrHasAttributeImpl.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-has-attribute-impl \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrImpl.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-impl \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrList.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-list \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrParsedAttrImpl.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-parsed-attr-impl \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrParsedAttrKinds.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-parsed-attr-kinds \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrParsedAttrList.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-parsed-attr-list \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrParserStringSwitches.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-parser-string-switches \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrPCHRead.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-pch-read \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrPCHWrite.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-pch-write \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrSpellingListIndex.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-spelling-index \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrTemplateInstantiate.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-template-instantiate \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td AttrVisitor.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td ${CLANG_TBLGEN} -gen-clang-attr-ast-visitor \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/Attr.td CommentCommandInfo.inc.h: ${CLANG_SRCS}/include/clang/AST/CommentCommands.td ${CLANG_TBLGEN} -gen-clang-comment-command-info \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/AST/CommentCommands.td CommentCommandList.inc.h: ${CLANG_SRCS}/include/clang/AST/CommentCommands.td ${CLANG_TBLGEN} -gen-clang-comment-command-list \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/AST/CommentCommands.td CommentHTMLNamedCharacterReferences.inc.h: \ ${CLANG_SRCS}/include/clang/AST/CommentHTMLNamedCharacterReferences.td ${CLANG_TBLGEN} -gen-clang-comment-html-named-character-references \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/AST/CommentHTMLNamedCharacterReferences.td CommentHTMLTags.inc.h: ${CLANG_SRCS}/include/clang/AST/CommentHTMLTags.td ${CLANG_TBLGEN} -gen-clang-comment-html-tags \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/AST/CommentHTMLTags.td CommentHTMLTagsProperties.inc.h: \ ${CLANG_SRCS}/include/clang/AST/CommentHTMLTags.td ${CLANG_TBLGEN} -gen-clang-comment-html-tags-properties \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/AST/CommentHTMLTags.td CommentNodes.inc.h: ${CLANG_SRCS}/include/clang/Basic/CommentNodes.td ${CLANG_TBLGEN} -gen-clang-comment-nodes \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/CommentNodes.td DeclNodes.inc.h: ${CLANG_SRCS}/include/clang/Basic/DeclNodes.td ${CLANG_TBLGEN} -gen-clang-decl-nodes \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/DeclNodes.td StmtNodes.inc.h: ${CLANG_SRCS}/include/clang/Basic/StmtNodes.td ${CLANG_TBLGEN} -gen-clang-stmt-nodes \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/StmtNodes.td arm_neon.h: ${CLANG_SRCS}/include/clang/Basic/arm_neon.td ${CLANG_TBLGEN} -gen-arm-neon \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/arm_neon.td arm_neon.inc.h: ${CLANG_SRCS}/include/clang/Basic/arm_neon.td ${CLANG_TBLGEN} -gen-arm-neon-sema \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Basic/arm_neon.td DiagnosticGroups.inc.h: ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td ${CLANG_TBLGEN} -gen-clang-diag-groups \ -I ${CLANG_SRCS}/include/clang/Basic -d ${.TARGET:C/\.h$/.d/} \ -o ${.TARGET} ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td DiagnosticIndexName.inc.h: ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td ${CLANG_TBLGEN} -gen-clang-diags-index-name \ -I ${CLANG_SRCS}/include/clang/Basic -d ${.TARGET:C/\.h$/.d/} \ -o ${.TARGET} ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td .for hdr in AST Analysis Comment Common Driver Frontend Lex Parse Sema Serialization Diagnostic${hdr}Kinds.inc.h: ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td ${CLANG_TBLGEN} -gen-clang-diags-defs -clang-component=${hdr} \ -I ${CLANG_SRCS}/include/clang/Basic -d ${.TARGET:C/\.h$/.d/} \ -o ${.TARGET} ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td .endfor # XXX: Atrocious hack, need to clean this up later .if defined(LIB) && ${LIB} == "llvmlibdriver" Options.inc.h: ${LLVM_SRCS}/lib/LibDriver/Options.td ${TBLGEN} -gen-opt-parser-defs \ -I ${LLVM_SRCS}/include \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${LLVM_SRCS}/lib/LibDriver/Options.td .else Options.inc.h: ${CLANG_SRCS}/include/clang/Driver/Options.td ${TBLGEN} -gen-opt-parser-defs \ -I ${LLVM_SRCS}/include -I ${CLANG_SRCS}/include/clang/Driver \ -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/include/clang/Driver/Options.td .endif Checkers.inc.h: ${CLANG_SRCS}/lib/StaticAnalyzer/Checkers/Checkers.td ${CLANG_TBLGEN} -gen-clang-sa-checkers \ -I ${CLANG_SRCS}/include -d ${.TARGET:C/\.h$/.d/} -o ${.TARGET} \ ${CLANG_SRCS}/lib/StaticAnalyzer/Checkers/Checkers.td -.for dep in ${TGHDRS:C/$/.inc.d/} -. sinclude "${dep}" -.endfor +.if !make(depend) +. for dep in ${TGHDRS:C/$/.inc.d/} +. sinclude "${dep}" +. endfor +.endif SRCS+= ${TGHDRS:C/$/.inc.h/} DPSRCS+= ${TGHDRS:C/$/.inc.h/} CLEANFILES+= ${TGHDRS:C/$/.inc.h/} ${TGHDRS:C/$/.inc.d/} # if we are not doing explicit 'make depend', there is # nothing to cause these to be generated. beforebuild: ${SRCS:M*.inc.h} Index: user/ngie/more-tests2/lib/libarchive/test/Makefile =================================================================== --- user/ngie/more-tests2/lib/libarchive/test/Makefile (revision 288954) +++ user/ngie/more-tests2/lib/libarchive/test/Makefile (nonexistent) @@ -1,256 +0,0 @@ -# $FreeBSD$ - -LIBARCHIVEDIR= ${.CURDIR}/../../../contrib/libarchive - -MAN= - -PROG=libarchive_test -INTERNALPROG=yes # Don't install this; it's just for testing -LDADD= -L ${.OBJDIR}/.. -larchive -LIBADD= z bz2 lzma md crypto bsdxml -CFLAGS+= -g -CFLAGS+= -I${.CURDIR}/.. -I${.OBJDIR} -CFLAGS+= -I${LIBARCHIVEDIR}/libarchive -I${LIBARCHIVEDIR}/test_utils -CFLAGS+= -DHAVE_LIBLZMA=1 -DHAVE_LZMA_H=1 - -# Uncomment to link against dmalloc -#LDADD+= -L/usr/local/lib -ldmalloc -#CFLAGS+= -I/usr/local/include -DUSE_DMALLOC - -.PATH: ${LIBARCHIVEDIR}/libarchive/test -TESTS= \ - test_acl_freebsd_nfs4.c \ - test_acl_freebsd_posix1e.c \ - test_acl_nfs4.c \ - test_acl_pax.c \ - test_acl_posix1e.c \ - test_archive_api_feature.c \ - test_archive_clear_error.c \ - test_archive_cmdline.c \ - test_archive_crypto.c \ - test_archive_getdate.c \ - test_archive_match_time.c \ - test_archive_match_owner.c \ - test_archive_match_path.c \ - test_archive_pathmatch.c \ - test_archive_read_close_twice.c \ - test_archive_read_close_twice_open_fd.c \ - test_archive_read_close_twice_open_filename.c \ - test_archive_read_multiple_data_objects.c \ - test_archive_read_next_header_empty.c \ - test_archive_read_next_header_raw.c \ - test_archive_read_open2.c \ - test_archive_read_set_filter_option.c \ - test_archive_read_set_format_option.c \ - test_archive_read_set_option.c \ - test_archive_read_set_options.c \ - test_archive_read_support.c \ - test_archive_set_error.c \ - test_archive_string.c \ - test_archive_string_conversion.c \ - test_archive_write_add_filter_by_name.c \ - test_archive_write_set_filter_option.c \ - test_archive_write_set_format_by_name.c \ - test_archive_write_set_format_option.c \ - test_archive_write_set_option.c \ - test_archive_write_set_options.c \ - test_bad_fd.c \ - test_compat_bzip2.c \ - test_compat_cpio.c \ - test_compat_gtar.c \ - test_compat_gzip.c \ - test_compat_lzip.c \ - test_compat_lzma.c \ - test_compat_lzop.c \ - test_compat_mac.c \ - test_compat_pax_libarchive_2x.c \ - test_compat_solaris_tar_acl.c \ - test_compat_solaris_pax_sparse.c \ - test_compat_tar_hardlink.c \ - test_compat_uudecode.c \ - test_compat_xz.c \ - test_compat_zip.c \ - test_empty_write.c \ - test_entry.c \ - test_entry_strmode.c \ - test_extattr_freebsd.c \ - test_filter_count.c \ - test_fuzz.c \ - test_gnutar_filename_encoding.c \ - test_link_resolver.c \ - test_open_fd.c \ - test_open_failure.c \ - test_open_file.c \ - test_open_filename.c \ - test_pax_filename_encoding.c \ - test_read_data_large.c \ - test_read_disk.c \ - test_read_disk_directory_traversals.c \ - test_read_disk_entry_from_file.c \ - test_read_extract.c \ - test_read_file_nonexistent.c \ - test_read_filter_grzip.c \ - test_read_filter_lrzip.c \ - test_read_filter_lzop.c \ - test_read_filter_lzop_multiple_parts.c \ - test_read_filter_program.c \ - test_read_filter_program_signature.c \ - test_read_filter_uudecode.c \ - test_read_format_7zip.c \ - test_read_format_ar.c \ - test_read_format_cab.c \ - test_read_format_cab_filename.c \ - test_read_format_cpio_afio.c \ - test_read_format_cpio_bin.c \ - test_read_format_cpio_bin_Z.c \ - test_read_format_cpio_bin_be.c \ - test_read_format_cpio_bin_bz2.c \ - test_read_format_cpio_bin_gz.c \ - test_read_format_cpio_bin_lzip.c \ - test_read_format_cpio_bin_lzma.c \ - test_read_format_cpio_bin_xz.c \ - test_read_format_cpio_filename.c \ - test_read_format_cpio_odc.c \ - test_read_format_cpio_svr4_gzip.c \ - test_read_format_cpio_svr4c_Z.c \ - test_read_format_cpio_svr4_bzip2_rpm.c \ - test_read_format_cpio_svr4_gzip_rpm.c \ - test_read_format_empty.c \ - test_read_format_gtar_filename.c \ - test_read_format_gtar_gz.c \ - test_read_format_gtar_lzma.c \ - test_read_format_gtar_sparse.c \ - test_read_format_gtar_sparse_skip_entry.c \ - test_read_format_iso_Z.c \ - test_read_format_iso_multi_extent.c \ - test_read_format_iso_xorriso.c \ - test_read_format_isorr_rr_moved.c \ - test_read_format_isojoliet_bz2.c \ - test_read_format_isojoliet_long.c \ - test_read_format_isojoliet_rr.c \ - test_read_format_isojoliet_versioned.c \ - test_read_format_isorr_bz2.c \ - test_read_format_isorr_ce.c \ - test_read_format_isorr_new_bz2.c \ - test_read_format_isozisofs_bz2.c \ - test_read_format_lha.c \ - test_read_format_lha_filename.c \ - test_read_format_mtree.c \ - test_read_format_pax_bz2.c \ - test_read_format_rar.c \ - test_read_format_raw.c \ - test_read_format_tar.c \ - test_read_format_tar_empty_filename.c \ - test_read_format_tar_filename.c \ - test_read_format_tbz.c \ - test_read_format_tgz.c \ - test_read_format_tlz.c \ - test_read_format_txz.c \ - test_read_format_tz.c \ - test_read_format_ustar_filename.c \ - test_read_format_xar.c \ - test_read_format_zip.c \ - test_read_format_zip_comment_stored.c \ - test_read_format_zip_filename.c \ - test_read_format_zip_mac_metadata.c \ - test_read_format_zip_sfx.c \ - test_read_large.c \ - test_read_pax_truncated.c \ - test_read_position.c \ - test_read_set_format.c \ - test_read_truncated.c \ - test_read_truncated_filter.c \ - test_sparse_basic.c \ - test_tar_filenames.c \ - test_tar_large.c \ - test_ustar_filenames.c \ - test_ustar_filename_encoding.c \ - test_write_disk.c \ - test_write_disk_appledouble.c \ - test_write_disk_failures.c \ - test_write_disk_hardlink.c \ - test_write_disk_hfs_compression.c \ - test_write_disk_lookup.c \ - test_write_disk_mac_metadata.c \ - test_write_disk_no_hfs_compression.c \ - test_write_disk_perms.c \ - test_write_disk_secure.c \ - test_write_disk_sparse.c \ - test_write_disk_symlink.c \ - test_write_disk_times.c \ - test_write_filter_b64encode.c \ - test_write_filter_bzip2.c \ - test_write_filter_compress.c \ - test_write_filter_gzip.c \ - test_write_filter_gzip_timestamp.c \ - test_write_filter_lrzip.c \ - test_write_filter_lzip.c \ - test_write_filter_lzma.c \ - test_write_filter_lzop.c \ - test_write_filter_program.c \ - test_write_filter_uuencode.c \ - test_write_filter_xz.c \ - test_write_format_7zip.c \ - test_write_format_7zip_empty.c \ - test_write_format_7zip_large.c \ - test_write_format_ar.c \ - test_write_format_cpio.c \ - test_write_format_cpio_empty.c \ - test_write_format_cpio_newc.c \ - test_write_format_cpio_odc.c \ - test_write_format_gnutar.c \ - test_write_format_iso9660.c \ - test_write_format_iso9660_boot.c \ - test_write_format_iso9660_empty.c \ - test_write_format_iso9660_filename.c \ - test_write_format_iso9660_zisofs.c \ - test_write_format_mtree.c \ - test_write_format_mtree_absolute_path.c \ - test_write_format_mtree_classic.c \ - test_write_format_mtree_classic_indent.c \ - test_write_format_mtree_fflags.c \ - test_write_format_mtree_no_separator.c \ - test_write_format_mtree_quoted_filename.c \ - test_write_format_pax.c \ - test_write_format_shar_empty.c \ - test_write_format_tar.c \ - test_write_format_tar_empty.c \ - test_write_format_tar_sparse.c \ - test_write_format_tar_ustar.c \ - test_write_format_tar_v7tar.c \ - test_write_format_xar.c \ - test_write_format_xar_empty.c \ - test_write_format_zip.c \ - test_write_format_zip_empty.c \ - test_write_format_zip_no_compression.c \ - test_write_zip_set_compression_store.c \ - test_write_open_memory.c \ - test_zip_filename_encoding.c - -# Build the test program. -SRCS= \ - ${TESTS} \ - list.h \ - main.c \ - read_open_memory.c - -.PATH: ${LIBARCHIVEDIR}/test_utils -SRCS+= test_utils.c - -# Build libarchive_test and run it. -check test: libarchive_test - ./libarchive_test -r ${LIBARCHIVEDIR}/libarchive/test - -# list.h is just a list of all tests, as indicated by DEFINE_TEST macro lines -list.h: ${TESTS} Makefile - (cd ${LIBARCHIVEDIR}/libarchive/test; cat test_*.c) | \ - grep DEFINE_TEST > ${.OBJDIR}/list.h - -CLEANFILES += *.out *.o *.core *~ list.h .dirstamp .depend -CLEANDIRS += .deps .libs - -cleantest: - -chmod -R +w /tmp/libarchive_test.* - rm -rf /tmp/libarchive_test.* - -.include Property changes on: user/ngie/more-tests2/lib/libarchive/test/Makefile ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: user/ngie/more-tests2/lib/libarchive/test =================================================================== --- user/ngie/more-tests2/lib/libarchive/test (revision 288954) +++ user/ngie/more-tests2/lib/libarchive/test (nonexistent) Property changes on: user/ngie/more-tests2/lib/libarchive/test ___________________________________________________________________ Deleted: svn:ignore ## -1,3 +0,0 ## -.depend -libarchive_test -list.h Index: user/ngie/more-tests2/lib/libarchive/Makefile =================================================================== --- user/ngie/more-tests2/lib/libarchive/Makefile (revision 288954) +++ user/ngie/more-tests2/lib/libarchive/Makefile (revision 288955) @@ -1,408 +1,405 @@ # $FreeBSD$ .include LIBARCHIVEDIR= ${.CURDIR}/../../contrib/libarchive LIB= archive LIBADD= z bz2 lzma bsdxml CFLAGS+= -DHAVE_BZLIB_H=1 -DHAVE_LIBLZMA=1 -DHAVE_LZMA_H=1 # FreeBSD SHLIB_MAJOR value is managed as part of the FreeBSD system. # It has no real relation to the libarchive version number. SHLIB_MAJOR= 6 CFLAGS+= -DPLATFORM_CONFIG_H=\"${.CURDIR}/config_freebsd.h\" CFLAGS+= -I${.OBJDIR} .if ${MK_OPENSSL} != "no" CFLAGS+= -DWITH_OPENSSL LIBADD+= crypto .else LIBADD+= md .endif .if ${MK_ICONV} != "no" # TODO: This can be changed back to CFLAGS once iconv works correctly # with statically linked binaries. SHARED_CFLAGS+= -DHAVE_ICONV=1 -DHAVE_ICONV_H=1 -DICONV_CONST= .endif .if ${MACHINE_ARCH:Marm*} != "" || ${MACHINE_ARCH:Mmips*} != "" || \ ${MACHINE_ARCH:Msparc64*} != "" NO_WCAST_ALIGN= yes .if ${MACHINE_ARCH:M*64*} == "" CFLAGS+= -DPPMD_32BIT .endif .endif NO_WCAST_ALIGN.clang= .PATH: ${LIBARCHIVEDIR}/libarchive # Headers to be installed in /usr/include INCS= archive.h archive_entry.h # Sources to be compiled. SRCS= archive_acl.c \ archive_check_magic.c \ archive_cmdline.c \ archive_crypto.c \ archive_entry.c \ archive_entry_copy_stat.c \ archive_entry_link_resolver.c \ archive_entry_sparse.c \ archive_entry_stat.c \ archive_entry_strmode.c \ archive_entry_xattr.c \ archive_getdate.c \ archive_match.c \ archive_options.c \ archive_pathmatch.c \ archive_ppmd7.c \ archive_rb.c \ archive_read.c \ archive_read_append_filter.c \ archive_read_data_into_fd.c \ archive_read_disk_entry_from_file.c \ archive_read_disk_posix.c \ archive_read_disk_set_standard_lookup.c \ archive_read_extract.c \ archive_read_open_fd.c \ archive_read_open_file.c \ archive_read_open_filename.c \ archive_read_open_memory.c \ archive_read_set_format.c \ archive_read_set_options.c \ archive_read_support_filter_all.c \ archive_read_support_filter_bzip2.c \ archive_read_support_filter_compress.c \ archive_read_support_filter_gzip.c \ archive_read_support_filter_grzip.c \ archive_read_support_filter_lrzip.c \ archive_read_support_filter_lzop.c \ archive_read_support_filter_none.c \ archive_read_support_filter_program.c \ archive_read_support_filter_rpm.c \ archive_read_support_filter_uu.c \ archive_read_support_filter_xz.c \ archive_read_support_format_7zip.c \ archive_read_support_format_all.c \ archive_read_support_format_ar.c \ archive_read_support_format_by_code.c \ archive_read_support_format_cab.c \ archive_read_support_format_cpio.c \ archive_read_support_format_empty.c \ archive_read_support_format_iso9660.c \ archive_read_support_format_lha.c \ archive_read_support_format_mtree.c \ archive_read_support_format_rar.c \ archive_read_support_format_raw.c \ archive_read_support_format_tar.c \ archive_read_support_format_xar.c \ archive_read_support_format_zip.c \ archive_string.c \ archive_string_sprintf.c \ archive_util.c \ archive_virtual.c \ archive_write.c \ archive_write_add_filter.c \ archive_write_disk_acl.c \ archive_write_disk_set_standard_lookup.c \ archive_write_disk_posix.c \ archive_write_open_fd.c \ archive_write_open_file.c \ archive_write_open_filename.c \ archive_write_open_memory.c \ archive_write_add_filter_b64encode.c \ archive_write_add_filter_by_name.c \ archive_write_add_filter_bzip2.c \ archive_write_add_filter_compress.c \ archive_write_add_filter_grzip.c \ archive_write_add_filter_gzip.c \ archive_write_add_filter_lrzip.c \ archive_write_add_filter_lzop.c \ archive_write_add_filter_none.c \ archive_write_add_filter_program.c \ archive_write_add_filter_uuencode.c \ archive_write_add_filter_xz.c \ archive_write_set_format.c \ archive_write_set_format_7zip.c \ archive_write_set_format_ar.c \ archive_write_set_format_by_name.c \ archive_write_set_format_cpio.c \ archive_write_set_format_cpio_newc.c \ archive_write_set_format_gnutar.c \ archive_write_set_format_iso9660.c \ archive_write_set_format_mtree.c \ archive_write_set_format_pax.c \ archive_write_set_format_shar.c \ archive_write_set_format_ustar.c \ archive_write_set_format_v7tar.c \ archive_write_set_format_xar.c \ archive_write_set_format_zip.c \ archive_write_set_options.c \ filter_fork_posix.c # Man pages to be installed. MAN= archive_entry.3 \ archive_entry_acl.3 \ archive_entry_linkify.3 \ archive_entry_paths.3 \ archive_entry_perms.3 \ archive_entry_stat.3 \ archive_entry_time.3 \ archive_read.3 \ archive_read_data.3 \ archive_read_disk.3 \ archive_read_extract.3 \ archive_read_filter.3 \ archive_read_format.3 \ archive_read_free.3 \ archive_read_header.3 \ archive_read_new.3 \ archive_read_open.3 \ archive_read_set_options.3 \ archive_util.3 \ archive_write.3 \ archive_write_blocksize.3 \ archive_write_data.3 \ archive_write_disk.3 \ archive_write_filter.3 \ archive_write_finish_entry.3 \ archive_write_format.3 \ archive_write_free.3 \ archive_write_header.3 \ archive_write_new.3 \ archive_write_open.3 \ archive_write_set_options.3 \ cpio.5 \ libarchive.3 \ libarchive_changes.3 \ libarchive_internals.3 \ libarchive-formats.5 \ tar.5 # Symlink the man pages under each function name. MLINKS+= archive_entry.3 archive_entry_clear.3 MLINKS+= archive_entry.3 archive_entry_clone.3 MLINKS+= archive_entry.3 archive_entry_free.3 MLINKS+= archive_entry.3 archive_entry_new.3 MLINKS+= archive_entry_acl.3 archive_entry_acl_add_entry.3 MLINKS+= archive_entry_acl.3 archive_entry_acl_add_entry_w.3 MLINKS+= archive_entry_acl.3 archive_entry_acl_clear.3 MLINKS+= archive_entry_acl.3 archive_entry_acl_count.3 MLINKS+= archive_entry_acl.3 archive_entry_acl_next.3 MLINKS+= archive_entry_acl.3 archive_entry_acl_next_w.3 MLINKS+= archive_entry_acl.3 archive_entry_acl_reset.3 MLINKS+= archive_entry_acl.3 archive_entry_acl_text_w.3 MLINKS+= archive_entry_linkify.3 archive_entry_linkresolver.3 MLINKS+= archive_entry_linkify.3 archive_entry_linkresolver_new.3 MLINKS+= archive_entry_linkify.3 archive_entry_linkresolver_set_strategy.3 MLINKS+= archive_entry_linkify.3 archive_entry_linkresolver_free.3 MLINKS+= archive_entry_paths.3 archive_entry_copy_hardlink.3 MLINKS+= archive_entry_paths.3 archive_entry_copy_hardlink_w.3 MLINKS+= archive_entry_paths.3 archive_entry_copy_link.3 MLINKS+= archive_entry_paths.3 archive_entry_copy_link_w.3 MLINKS+= archive_entry_paths.3 archive_entry_copy_pathname.3 MLINKS+= archive_entry_paths.3 archive_entry_copy_pathname_w.3 MLINKS+= archive_entry_paths.3 archive_entry_copy_sourcepath.3 MLINKS+= archive_entry_paths.3 archive_entry_copy_symlink.3 MLINKS+= archive_entry_paths.3 archive_entry_copy_symlink_w.3 MLINKS+= archive_entry_paths.3 archive_entry_hardlink.3 MLINKS+= archive_entry_paths.3 archive_entry_hardlink_w.3 MLINKS+= archive_entry_paths.3 archive_entry_pathname.3 MLINKS+= archive_entry_paths.3 archive_entry_pathname_w.3 MLINKS+= archive_entry_paths.3 archive_entry_set_hardlink.3 MLINKS+= archive_entry_paths.3 archive_entry_set_link.3 MLINKS+= archive_entry_paths.3 archive_entry_set_pathname.3 MLINKS+= archive_entry_paths.3 archive_entry_set_symlink.3 MLINKS+= archive_entry_paths.3 archive_entry_symlink.3 MLINKS+= archive_entry_paths.3 archive_entry_symlink_w.3 MLINKS+= archive_entry_paths.3 archive_entry_update_symlink_utf8.3 MLINKS+= archive_entry_paths.3 archive_entry_update_hardlink_utf8.3 MLINKS+= archive_entry_perms.3 archive_entry_copy_fflags_text.3 MLINKS+= archive_entry_perms.3 archive_entry_copy_fflags_text_w.3 MLINKS+= archive_entry_perms.3 archive_entry_copy_gname.3 MLINKS+= archive_entry_perms.3 archive_entry_copy_gname_w.3 MLINKS+= archive_entry_perms.3 archive_entry_copy_uname.3 MLINKS+= archive_entry_perms.3 archive_entry_copy_uname_w.3 MLINKS+= archive_entry_perms.3 archive_entry_fflags.3 MLINKS+= archive_entry_perms.3 archive_entry_fflags_text.3 MLINKS+= archive_entry_perms.3 archive_entry_gid.3 MLINKS+= archive_entry_perms.3 archive_entry_gname.3 MLINKS+= archive_entry_perms.3 archive_entry_gname_w.3 MLINKS+= archive_entry_perms.3 archive_entry_set_fflags.3 MLINKS+= archive_entry_perms.3 archive_entry_set_gid.3 MLINKS+= archive_entry_perms.3 archive_entry_set_gname.3 MLINKS+= archive_entry_perms.3 archive_entry_perm.3 MLINKS+= archive_entry_perms.3 archive_entry_set_perm.3 MLINKS+= archive_entry_perms.3 archive_entry_set_uid.3 MLINKS+= archive_entry_perms.3 archive_entry_set_uname.3 MLINKS+= archive_entry_perms.3 archive_entry_strmode.3 MLINKS+= archive_entry_perms.3 archive_entry_uid.3 MLINKS+= archive_entry_perms.3 archive_entry_uname.3 MLINKS+= archive_entry_perms.3 archive_entry_uname_w.3 MLINKS+= archive_entry_perms.3 archive_entry_update_gname_utf8.3 MLINKS+= archive_entry_perms.3 archive_entry_update_uname_utf8.3 MLINKS+= archive_entry_stat.3 archive_entry_copy_stat.3 MLINKS+= archive_entry_stat.3 archive_entry_dev.3 MLINKS+= archive_entry_stat.3 archive_entry_dev_is_set.3 MLINKS+= archive_entry_stat.3 archive_entry_devmajor.3 MLINKS+= archive_entry_stat.3 archive_entry_devminor.3 MLINKS+= archive_entry_stat.3 archive_entry_filetype.3 MLINKS+= archive_entry_stat.3 archive_entry_ino.3 MLINKS+= archive_entry_stat.3 archive_entry_ino64.3 MLINKS+= archive_entry_stat.3 archive_entry_ino_is_set.3 MLINKS+= archive_entry_stat.3 archive_entry_mode.3 MLINKS+= archive_entry_stat.3 archive_entry_nlink.3 MLINKS+= archive_entry_stat.3 archive_entry_rdev.3 MLINKS+= archive_entry_stat.3 archive_entry_rdevmajor.3 MLINKS+= archive_entry_stat.3 archive_entry_rdevminor.3 MLINKS+= archive_entry_stat.3 archive_entry_set_dev.3 MLINKS+= archive_entry_stat.3 archive_entry_set_devmajor.3 MLINKS+= archive_entry_stat.3 archive_entry_set_devminor.3 MLINKS+= archive_entry_stat.3 archive_entry_set_filetype.3 MLINKS+= archive_entry_stat.3 archive_entry_set_ino.3 MLINKS+= archive_entry_stat.3 archive_entry_set_ino64.3 MLINKS+= archive_entry_stat.3 archive_entry_set_mode.3 MLINKS+= archive_entry_stat.3 archive_entry_set_nlink.3 MLINKS+= archive_entry_stat.3 archive_entry_set_rdev.3 MLINKS+= archive_entry_stat.3 archive_entry_set_rdevmajor.3 MLINKS+= archive_entry_stat.3 archive_entry_set_rdevminor.3 MLINKS+= archive_entry_stat.3 archive_entry_set_size.3 MLINKS+= archive_entry_stat.3 archive_entry_size.3 MLINKS+= archive_entry_stat.3 archive_entry_size_is_set.3 MLINKS+= archive_entry_stat.3 archive_entry_unset_size.3 MLINKS+= archive_entry_time.3 archive_entry_atime.3 MLINKS+= archive_entry_time.3 archive_entry_atime_is_set.3 MLINKS+= archive_entry_time.3 archive_entry_atime_nsec.3 MLINKS+= archive_entry_time.3 archive_entry_birthtime.3 MLINKS+= archive_entry_time.3 archive_entry_birthtime_is_set.3 MLINKS+= archive_entry_time.3 archive_entry_birthtime_nsec.3 MLINKS+= archive_entry_time.3 archive_entry_ctime.3 MLINKS+= archive_entry_time.3 archive_entry_ctime_is_set.3 MLINKS+= archive_entry_time.3 archive_entry_ctime_nsec.3 MLINKS+= archive_entry_time.3 archive_entry_mtime.3 MLINKS+= archive_entry_time.3 archive_entry_mtime_is_set.3 MLINKS+= archive_entry_time.3 archive_entry_mtime_nsec.3 MLINKS+= archive_entry_time.3 archive_entry_set_atime.3 MLINKS+= archive_entry_time.3 archive_entry_set_birthtime.3 MLINKS+= archive_entry_time.3 archive_entry_set_ctime.3 MLINKS+= archive_entry_time.3 archive_entry_set_mtime.3 MLINKS+= archive_entry_time.3 archive_entry_unset_atime.3 MLINKS+= archive_entry_time.3 archive_entry_unset_birthtime.3 MLINKS+= archive_entry_time.3 archive_entry_unset_ctime.3 MLINKS+= archive_entry_time.3 archive_entry_unset_mtime.3 MLINKS+= archive_read_data.3 archive_read_data_block.3 MLINKS+= archive_read_data.3 archive_read_data_into_fd.3 MLINKS+= archive_read_data.3 archive_read_data_skip.3 MLINKS+= archive_read_header.3 archive_read_next_header.3 MLINKS+= archive_read_header.3 archive_read_next_header2.3 MLINKS+= archive_read_extract.3 archive_read_extract2.3 MLINKS+= archive_read_extract.3 archive_read_extract_set_progress_callback.3 MLINKS+= archive_read_extract.3 archive_read_extract_set_skip_file.3 MLINKS+= archive_read_open.3 archive_read_open2.3 MLINKS+= archive_read_open.3 archive_read_open_FILE.3 MLINKS+= archive_read_open.3 archive_read_open_fd.3 MLINKS+= archive_read_open.3 archive_read_open_file.3 MLINKS+= archive_read_open.3 archive_read_open_filename.3 MLINKS+= archive_read_open.3 archive_read_open_memory.3 MLINKS+= archive_read_free.3 archive_read_close.3 MLINKS+= archive_read_free.3 archive_read_finish.3 MLINKS+= archive_read_filter.3 archive_read_support_filter_all.3 MLINKS+= archive_read_filter.3 archive_read_support_filter_bzip2.3 MLINKS+= archive_read_filter.3 archive_read_support_filter_compress.3 MLINKS+= archive_read_filter.3 archive_read_support_filter_gzip.3 MLINKS+= archive_read_filter.3 archive_read_support_filter_lzma.3 MLINKS+= archive_read_filter.3 archive_read_support_filter_none.3 MLINKS+= archive_read_filter.3 archive_read_support_filter_xz.3 MLINKS+= archive_read_filter.3 archive_read_support_filter_program.3 MLINKS+= archive_read_filter.3 archive_read_support_filter_program_signature.3 MLINKS+= archive_read_format.3 archive_read_support_format_7zip.3 MLINKS+= archive_read_format.3 archive_read_support_format_all.3 MLINKS+= archive_read_format.3 archive_read_support_format_ar.3 MLINKS+= archive_read_format.3 archive_read_support_format_by_code.3 MLINKS+= archive_read_format.3 archive_read_support_format_cab.3 MLINKS+= archive_read_format.3 archive_read_support_format_cpio.3 MLINKS+= archive_read_format.3 archive_read_support_format_empty.3 MLINKS+= archive_read_format.3 archive_read_support_format_iso9660.3 MLINKS+= archive_read_format.3 archive_read_support_format_lha.3 MLINKS+= archive_read_format.3 archive_read_support_format_mtree.3 MLINKS+= archive_read_format.3 archive_read_support_format_rar.3 MLINKS+= archive_read_format.3 archive_read_support_format_raw.3 MLINKS+= archive_read_format.3 archive_read_support_format_tar.3 MLINKS+= archive_read_format.3 archive_read_support_format_xar.3 MLINKS+= archive_read_format.3 archive_read_support_format_zip.3 MLINKS+= archive_read_disk.3 archive_read_disk_entry_from_file.3 MLINKS+= archive_read_disk.3 archive_read_disk_gname.3 MLINKS+= archive_read_disk.3 archive_read_disk_new.3 MLINKS+= archive_read_disk.3 archive_read_disk_set_gname_lookup.3 MLINKS+= archive_read_disk.3 archive_read_disk_set_standard_lookup.3 MLINKS+= archive_read_disk.3 archive_read_disk_set_symlink_hybrid.3 MLINKS+= archive_read_disk.3 archive_read_disk_set_symlink_logical.3 MLINKS+= archive_read_disk.3 archive_read_disk_set_symlink_physical.3 MLINKS+= archive_read_disk.3 archive_read_disk_set_uname_lookup.3 MLINKS+= archive_read_disk.3 archive_read_disk_uname.3 MLINKS+= archive_read_set_options.3 archive_read_set_filter_option.3 MLINKS+= archive_read_set_options.3 archive_read_set_format_option.3 MLINKS+= archive_read_set_options.3 archive_read_set_option.3 MLINKS+= archive_util.3 archive_clear_error.3 MLINKS+= archive_util.3 archive_compression.3 MLINKS+= archive_util.3 archive_compression_name.3 MLINKS+= archive_util.3 archive_copy_error.3 MLINKS+= archive_util.3 archive_errno.3 MLINKS+= archive_util.3 archive_error_string.3 MLINKS+= archive_util.3 archive_file_count.3 MLINKS+= archive_util.3 archive_filter_code.3 MLINKS+= archive_util.3 archive_filter_count.3 MLINKS+= archive_util.3 archive_filter_name.3 MLINKS+= archive_util.3 archive_format.3 MLINKS+= archive_util.3 archive_format_name.3 MLINKS+= archive_util.3 archive_position.3 MLINKS+= archive_util.3 archive_set_error.3 MLINKS+= archive_write_blocksize.3 archive_write_get_bytes_in_last_block.3 MLINKS+= archive_write_blocksize.3 archive_write_get_bytes_per_block.3 MLINKS+= archive_write_blocksize.3 archive_write_set_bytes_in_last_block.3 MLINKS+= archive_write_blocksize.3 archive_write_set_bytes_per_block.3 MLINKS+= archive_write_disk.3 archive_write_data_block.3 MLINKS+= archive_write_disk.3 archive_write_disk_new.3 MLINKS+= archive_write_disk.3 archive_write_disk_set_group_lookup.3 MLINKS+= archive_write_disk.3 archive_write_disk_set_options.3 MLINKS+= archive_write_disk.3 archive_write_disk_set_skip_file.3 MLINKS+= archive_write_disk.3 archive_write_disk_set_standard_lookup.3 MLINKS+= archive_write_disk.3 archive_write_disk_set_user_lookup.3 MLINKS+= archive_write_filter.3 archive_write_add_filter_bzip2.3 MLINKS+= archive_write_filter.3 archive_write_add_filter_compress.3 MLINKS+= archive_write_filter.3 archive_write_add_filter_gzip.3 MLINKS+= archive_write_filter.3 archive_write_add_filter_lzip.3 MLINKS+= archive_write_filter.3 archive_write_add_filter_lzma.3 MLINKS+= archive_write_filter.3 archive_write_add_filter_none.3 MLINKS+= archive_write_filter.3 archive_write_add_filter_program.3 MLINKS+= archive_write_filter.3 archive_write_add_filter_xz.3 MLINKS+= archive_write_format.3 archive_write_set_format_cpio.3 MLINKS+= archive_write_format.3 archive_write_set_format_pax.3 MLINKS+= archive_write_format.3 archive_write_set_format_pax_restricted.3 MLINKS+= archive_write_format.3 archive_write_set_format_shar.3 MLINKS+= archive_write_format.3 archive_write_set_format_shar_dump.3 MLINKS+= archive_write_format.3 archive_write_set_format_ustar.3 MLINKS+= archive_write_free.3 archive_write_close.3 MLINKS+= archive_write_free.3 archive_write_fail.3 MLINKS+= archive_write_free.3 archive_write_finish.3 MLINKS+= archive_write_open.3 archive_write_open_FILE.3 MLINKS+= archive_write_open.3 archive_write_open_fd.3 MLINKS+= archive_write_open.3 archive_write_open_file.3 MLINKS+= archive_write_open.3 archive_write_open_filename.3 MLINKS+= archive_write_open.3 archive_write_open_memory.3 MLINKS+= archive_write_set_options.3 archive_write_set_filter_option.3 MLINKS+= archive_write_set_options.3 archive_write_set_format_option.3 MLINKS+= archive_write_set_options.3 archive_write_set_option.3 MLINKS+= libarchive.3 archive.3 -.PHONY: check test clean-test -check test: - cd ${.CURDIR}/test && make obj && make test - -clean-test: - cd ${.CURDIR}/test && make clean +.if ${MK_TESTS} != "no" +#SUBDIR+= tests +.endif .include Index: user/ngie/more-tests2/lib/libarchive/tests/Makefile =================================================================== --- user/ngie/more-tests2/lib/libarchive/tests/Makefile (nonexistent) +++ user/ngie/more-tests2/lib/libarchive/tests/Makefile (revision 288955) @@ -0,0 +1,256 @@ +# $FreeBSD$ + +LIBARCHIVEDIR= ${SRCTOP}/contrib/libarchive + +MAN= + +PROG=libarchive_test +INTERNALPROG=yes # Don't install this; it's just for testing +LDADD= -L ${.OBJDIR}/.. -larchive +LIBADD= z bz2 lzma md crypto bsdxml +CFLAGS+= -g +CFLAGS+= -I${.CURDIR}/.. -I${.OBJDIR} +CFLAGS+= -I${LIBARCHIVEDIR}/libarchive -I${LIBARCHIVEDIR}/test_utils +CFLAGS+= -DHAVE_LIBLZMA=1 -DHAVE_LZMA_H=1 + +# Uncomment to link against dmalloc +#LDADD+= -L/usr/local/lib -ldmalloc +#CFLAGS+= -I/usr/local/include -DUSE_DMALLOC + +.PATH: ${LIBARCHIVEDIR}/libarchive/test +TESTS= \ + test_acl_freebsd_nfs4.c \ + test_acl_freebsd_posix1e.c \ + test_acl_nfs4.c \ + test_acl_pax.c \ + test_acl_posix1e.c \ + test_archive_api_feature.c \ + test_archive_clear_error.c \ + test_archive_cmdline.c \ + test_archive_crypto.c \ + test_archive_getdate.c \ + test_archive_match_time.c \ + test_archive_match_owner.c \ + test_archive_match_path.c \ + test_archive_pathmatch.c \ + test_archive_read_close_twice.c \ + test_archive_read_close_twice_open_fd.c \ + test_archive_read_close_twice_open_filename.c \ + test_archive_read_multiple_data_objects.c \ + test_archive_read_next_header_empty.c \ + test_archive_read_next_header_raw.c \ + test_archive_read_open2.c \ + test_archive_read_set_filter_option.c \ + test_archive_read_set_format_option.c \ + test_archive_read_set_option.c \ + test_archive_read_set_options.c \ + test_archive_read_support.c \ + test_archive_set_error.c \ + test_archive_string.c \ + test_archive_string_conversion.c \ + test_archive_write_add_filter_by_name.c \ + test_archive_write_set_filter_option.c \ + test_archive_write_set_format_by_name.c \ + test_archive_write_set_format_option.c \ + test_archive_write_set_option.c \ + test_archive_write_set_options.c \ + test_bad_fd.c \ + test_compat_bzip2.c \ + test_compat_cpio.c \ + test_compat_gtar.c \ + test_compat_gzip.c \ + test_compat_lzip.c \ + test_compat_lzma.c \ + test_compat_lzop.c \ + test_compat_mac.c \ + test_compat_pax_libarchive_2x.c \ + test_compat_solaris_tar_acl.c \ + test_compat_solaris_pax_sparse.c \ + test_compat_tar_hardlink.c \ + test_compat_uudecode.c \ + test_compat_xz.c \ + test_compat_zip.c \ + test_empty_write.c \ + test_entry.c \ + test_entry_strmode.c \ + test_extattr_freebsd.c \ + test_filter_count.c \ + test_fuzz.c \ + test_gnutar_filename_encoding.c \ + test_link_resolver.c \ + test_open_fd.c \ + test_open_failure.c \ + test_open_file.c \ + test_open_filename.c \ + test_pax_filename_encoding.c \ + test_read_data_large.c \ + test_read_disk.c \ + test_read_disk_directory_traversals.c \ + test_read_disk_entry_from_file.c \ + test_read_extract.c \ + test_read_file_nonexistent.c \ + test_read_filter_grzip.c \ + test_read_filter_lrzip.c \ + test_read_filter_lzop.c \ + test_read_filter_lzop_multiple_parts.c \ + test_read_filter_program.c \ + test_read_filter_program_signature.c \ + test_read_filter_uudecode.c \ + test_read_format_7zip.c \ + test_read_format_ar.c \ + test_read_format_cab.c \ + test_read_format_cab_filename.c \ + test_read_format_cpio_afio.c \ + test_read_format_cpio_bin.c \ + test_read_format_cpio_bin_Z.c \ + test_read_format_cpio_bin_be.c \ + test_read_format_cpio_bin_bz2.c \ + test_read_format_cpio_bin_gz.c \ + test_read_format_cpio_bin_lzip.c \ + test_read_format_cpio_bin_lzma.c \ + test_read_format_cpio_bin_xz.c \ + test_read_format_cpio_filename.c \ + test_read_format_cpio_odc.c \ + test_read_format_cpio_svr4_gzip.c \ + test_read_format_cpio_svr4c_Z.c \ + test_read_format_cpio_svr4_bzip2_rpm.c \ + test_read_format_cpio_svr4_gzip_rpm.c \ + test_read_format_empty.c \ + test_read_format_gtar_filename.c \ + test_read_format_gtar_gz.c \ + test_read_format_gtar_lzma.c \ + test_read_format_gtar_sparse.c \ + test_read_format_gtar_sparse_skip_entry.c \ + test_read_format_iso_Z.c \ + test_read_format_iso_multi_extent.c \ + test_read_format_iso_xorriso.c \ + test_read_format_isorr_rr_moved.c \ + test_read_format_isojoliet_bz2.c \ + test_read_format_isojoliet_long.c \ + test_read_format_isojoliet_rr.c \ + test_read_format_isojoliet_versioned.c \ + test_read_format_isorr_bz2.c \ + test_read_format_isorr_ce.c \ + test_read_format_isorr_new_bz2.c \ + test_read_format_isozisofs_bz2.c \ + test_read_format_lha.c \ + test_read_format_lha_filename.c \ + test_read_format_mtree.c \ + test_read_format_pax_bz2.c \ + test_read_format_rar.c \ + test_read_format_raw.c \ + test_read_format_tar.c \ + test_read_format_tar_empty_filename.c \ + test_read_format_tar_filename.c \ + test_read_format_tbz.c \ + test_read_format_tgz.c \ + test_read_format_tlz.c \ + test_read_format_txz.c \ + test_read_format_tz.c \ + test_read_format_ustar_filename.c \ + test_read_format_xar.c \ + test_read_format_zip.c \ + test_read_format_zip_comment_stored.c \ + test_read_format_zip_filename.c \ + test_read_format_zip_mac_metadata.c \ + test_read_format_zip_sfx.c \ + test_read_large.c \ + test_read_pax_truncated.c \ + test_read_position.c \ + test_read_set_format.c \ + test_read_truncated.c \ + test_read_truncated_filter.c \ + test_sparse_basic.c \ + test_tar_filenames.c \ + test_tar_large.c \ + test_ustar_filenames.c \ + test_ustar_filename_encoding.c \ + test_write_disk.c \ + test_write_disk_appledouble.c \ + test_write_disk_failures.c \ + test_write_disk_hardlink.c \ + test_write_disk_hfs_compression.c \ + test_write_disk_lookup.c \ + test_write_disk_mac_metadata.c \ + test_write_disk_no_hfs_compression.c \ + test_write_disk_perms.c \ + test_write_disk_secure.c \ + test_write_disk_sparse.c \ + test_write_disk_symlink.c \ + test_write_disk_times.c \ + test_write_filter_b64encode.c \ + test_write_filter_bzip2.c \ + test_write_filter_compress.c \ + test_write_filter_gzip.c \ + test_write_filter_gzip_timestamp.c \ + test_write_filter_lrzip.c \ + test_write_filter_lzip.c \ + test_write_filter_lzma.c \ + test_write_filter_lzop.c \ + test_write_filter_program.c \ + test_write_filter_uuencode.c \ + test_write_filter_xz.c \ + test_write_format_7zip.c \ + test_write_format_7zip_empty.c \ + test_write_format_7zip_large.c \ + test_write_format_ar.c \ + test_write_format_cpio.c \ + test_write_format_cpio_empty.c \ + test_write_format_cpio_newc.c \ + test_write_format_cpio_odc.c \ + test_write_format_gnutar.c \ + test_write_format_iso9660.c \ + test_write_format_iso9660_boot.c \ + test_write_format_iso9660_empty.c \ + test_write_format_iso9660_filename.c \ + test_write_format_iso9660_zisofs.c \ + test_write_format_mtree.c \ + test_write_format_mtree_absolute_path.c \ + test_write_format_mtree_classic.c \ + test_write_format_mtree_classic_indent.c \ + test_write_format_mtree_fflags.c \ + test_write_format_mtree_no_separator.c \ + test_write_format_mtree_quoted_filename.c \ + test_write_format_pax.c \ + test_write_format_shar_empty.c \ + test_write_format_tar.c \ + test_write_format_tar_empty.c \ + test_write_format_tar_sparse.c \ + test_write_format_tar_ustar.c \ + test_write_format_tar_v7tar.c \ + test_write_format_xar.c \ + test_write_format_xar_empty.c \ + test_write_format_zip.c \ + test_write_format_zip_empty.c \ + test_write_format_zip_no_compression.c \ + test_write_zip_set_compression_store.c \ + test_write_open_memory.c \ + test_zip_filename_encoding.c + +# Build the test program. +SRCS= \ + ${TESTS} \ + list.h \ + main.c \ + read_open_memory.c + +.PATH: ${LIBARCHIVEDIR}/test_utils +SRCS+= test_utils.c + +# Build libarchive_test and run it. +check test: libarchive_test + ./libarchive_test -r ${LIBARCHIVEDIR}/libarchive/test + +# list.h is just a list of all tests, as indicated by DEFINE_TEST macro lines +list.h: ${TESTS} Makefile + (cd ${LIBARCHIVEDIR}/libarchive/test; cat test_*.c) | \ + grep DEFINE_TEST > ${.OBJDIR}/list.h + +CLEANFILES += *.out *.o *.core *~ list.h .dirstamp .depend +CLEANDIRS += .deps .libs + +cleantest: + -chmod -R +w /tmp/libarchive_test.* + rm -rf /tmp/libarchive_test.* + +.include Property changes on: user/ngie/more-tests2/lib/libarchive/tests/Makefile ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: user/ngie/more-tests2/lib/libarchive/tests =================================================================== --- user/ngie/more-tests2/lib/libarchive/tests (nonexistent) +++ user/ngie/more-tests2/lib/libarchive/tests (revision 288955) Property changes on: user/ngie/more-tests2/lib/libarchive/tests ___________________________________________________________________ Added: svn:ignore ## -0,0 +1,3 ## +.depend +libarchive_test +list.h Index: user/ngie/more-tests2/lib/libc++/Makefile =================================================================== --- user/ngie/more-tests2/lib/libc++/Makefile (revision 288954) +++ user/ngie/more-tests2/lib/libc++/Makefile (revision 288955) @@ -1,220 +1,219 @@ # $FreeBSD$ .include LIBCXXRTDIR= ${.CURDIR}/../../contrib/libcxxrt HDRDIR= ${.CURDIR}/../../contrib/libc++/include SRCDIR= ${.CURDIR}/../../contrib/libc++/src CXXINCLUDEDIR= ${INCLUDEDIR}/c++/v${SHLIB_MAJOR} .PATH: ${SRCDIR} LIB= c++ SHLIB_MAJOR= 1 SHLIB_LDSCRIPT= libc++.ldscript SRCS+= algorithm.cpp\ bind.cpp\ chrono.cpp\ condition_variable.cpp\ debug.cpp\ exception.cpp\ future.cpp\ hash.cpp\ ios.cpp\ iostream.cpp\ locale.cpp\ memory.cpp\ mutex.cpp\ new.cpp\ optional.cpp\ random.cpp\ regex.cpp\ shared_mutex.cpp\ stdexcept.cpp\ string.cpp\ strstream.cpp\ system_error.cpp\ thread.cpp\ typeinfo.cpp\ utility.cpp\ valarray.cpp CXXRT_SRCS+= libelftc_dem_gnu3.c\ terminate.cc\ dynamic_cast.cc\ memory.cc\ auxhelper.cc\ exception.cc\ stdexcept.cc\ typeinfo.cc\ guard.cc .for _S in ${CXXRT_SRCS} STATICOBJS+= cxxrt_${_S:R}.o cxxrt_${_S}: ln -sf ${LIBCXXRTDIR}/${_S} ${.TARGET} .endfor WARNS= 0 CFLAGS+= -I${HDRDIR} -I${LIBCXXRTDIR} -nostdlib -DLIBCXXRT .if empty(CXXFLAGS:M-std=*) CXXFLAGS+= -std=c++11 .endif LIBADD+= cxxrt -LDFLAGS+= --verbose INCSGROUPS= STD EXP EXT STD_HEADERS= __bit_reference\ __config\ __debug\ __functional_03\ __functional_base\ __functional_base_03\ __hash_table\ __locale\ __mutex_base\ __refstring\ __split_buffer\ __sso_allocator\ __std_stream\ __tree\ __tuple\ __undef___deallocate\ __undef_min_max\ algorithm\ array\ atomic\ bitset\ cassert\ ccomplex\ cctype\ cerrno\ cfenv\ cfloat\ chrono\ cinttypes\ ciso646\ climits\ clocale\ cmath\ codecvt\ complex\ complex.h\ condition_variable\ csetjmp\ csignal\ cstdarg\ cstdbool\ cstddef\ cstdint\ cstdio\ cstdlib\ cstring\ ctgmath\ ctime\ cwchar\ cwctype\ deque\ exception\ forward_list\ fstream\ functional\ future\ initializer_list\ iomanip\ ios\ iosfwd\ iostream\ istream\ iterator\ limits\ list\ locale\ map\ memory\ mutex\ new\ numeric\ ostream\ queue\ random\ ratio\ regex\ scoped_allocator\ set\ shared_mutex\ sstream\ stack\ stdexcept\ streambuf\ string\ strstream\ system_error\ tgmath.h\ thread\ tuple\ type_traits\ typeindex\ typeinfo\ unordered_map\ unordered_set\ utility\ valarray\ vector RT_HEADERS= cxxabi.h\ unwind.h\ unwind-arm.h\ unwind-itanium.h .for hdr in ${STD_HEADERS} STD+= ${HDRDIR}/${hdr} INCSLINKS+= ${CXXINCLUDEDIR}/${hdr} ${CXXINCLUDEDIR}/tr1/${hdr} .endfor .for hdr in ${RT_HEADERS} STD+= ${LIBCXXRTDIR}/${hdr} .endfor STDDIR= ${CXXINCLUDEDIR} EXP_HEADERS= __config\ algorithm\ chrono\ dynarray\ optional\ ratio\ string_view\ system_error\ tuple\ type_traits\ utility .for hdr in ${EXP_HEADERS} EXP+= ${HDRDIR}/experimental/${hdr} .endfor EXPDIR= ${CXXINCLUDEDIR}/experimental EXT_HEADERS= __hash\ hash_map\ hash_set .for hdr in ${EXT_HEADERS} EXT+= ${HDRDIR}/ext/${hdr} .endfor EXTDIR= ${CXXINCLUDEDIR}/ext .if ${MK_GNUCXX} == "no" && ${COMPILER_TYPE} == "gcc" CLEANFILES+= libstdc++.so libstdc++.a afterinstall: ln -sf ${DESTDIR}${LIBDIR}/lib${LIB}.so \ ${.OBJDIR}/libstdc++.so ln -sf ${DESTDIR}${LIBDIR}/lib${LIB}.a \ ${.OBJDIR}/libstdc++.a .endif # avoid cyclic dependency CFLAGS+= -I${LIBCXXRTDIR} GENDIRDEPS_FILTER= N*/libcxxrt .include Index: user/ngie/more-tests2/lib/libprocstat/libprocstat.c =================================================================== --- user/ngie/more-tests2/lib/libprocstat/libprocstat.c (revision 288954) +++ user/ngie/more-tests2/lib/libprocstat/libprocstat.c (revision 288955) @@ -1,2533 +1,2537 @@ /*- * Copyright (c) 2009 Stanislav Sedov * Copyright (c) 1988, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #define _WANT_UCRED #include #undef _WANT_UCRED #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define _WANT_FILE #include #include #include #include #include #define _KERNEL #include #include #include #include #include #include #undef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "libprocstat_internal.h" #include "common_kvm.h" #include "core.h" int statfs(const char *, struct statfs *); /* XXX */ #define PROCSTAT_KVM 1 #define PROCSTAT_SYSCTL 2 #define PROCSTAT_CORE 3 static char **getargv(struct procstat *procstat, struct kinfo_proc *kp, size_t nchr, int env); static char *getmnton(kvm_t *kd, struct mount *m); static struct kinfo_vmentry * kinfo_getvmmap_core(struct procstat_core *core, int *cntp); static Elf_Auxinfo *procstat_getauxv_core(struct procstat_core *core, unsigned int *cntp); static Elf_Auxinfo *procstat_getauxv_sysctl(pid_t pid, unsigned int *cntp); static struct filestat_list *procstat_getfiles_kvm( struct procstat *procstat, struct kinfo_proc *kp, int mmapped); static struct filestat_list *procstat_getfiles_sysctl( struct procstat *procstat, struct kinfo_proc *kp, int mmapped); static int procstat_get_pipe_info_sysctl(struct filestat *fst, struct pipestat *pipe, char *errbuf); static int procstat_get_pipe_info_kvm(kvm_t *kd, struct filestat *fst, struct pipestat *pipe, char *errbuf); static int procstat_get_pts_info_sysctl(struct filestat *fst, struct ptsstat *pts, char *errbuf); static int procstat_get_pts_info_kvm(kvm_t *kd, struct filestat *fst, struct ptsstat *pts, char *errbuf); static int procstat_get_sem_info_sysctl(struct filestat *fst, struct semstat *sem, char *errbuf); static int procstat_get_sem_info_kvm(kvm_t *kd, struct filestat *fst, struct semstat *sem, char *errbuf); static int procstat_get_shm_info_sysctl(struct filestat *fst, struct shmstat *shm, char *errbuf); static int procstat_get_shm_info_kvm(kvm_t *kd, struct filestat *fst, struct shmstat *shm, char *errbuf); static int procstat_get_socket_info_sysctl(struct filestat *fst, struct sockstat *sock, char *errbuf); static int procstat_get_socket_info_kvm(kvm_t *kd, struct filestat *fst, struct sockstat *sock, char *errbuf); static int to_filestat_flags(int flags); static int procstat_get_vnode_info_kvm(kvm_t *kd, struct filestat *fst, struct vnstat *vn, char *errbuf); static int procstat_get_vnode_info_sysctl(struct filestat *fst, struct vnstat *vn, char *errbuf); static gid_t *procstat_getgroups_core(struct procstat_core *core, unsigned int *count); static gid_t * procstat_getgroups_kvm(kvm_t *kd, struct kinfo_proc *kp, unsigned int *count); static gid_t *procstat_getgroups_sysctl(pid_t pid, unsigned int *count); static struct kinfo_kstack *procstat_getkstack_sysctl(pid_t pid, int *cntp); static int procstat_getosrel_core(struct procstat_core *core, int *osrelp); static int procstat_getosrel_kvm(kvm_t *kd, struct kinfo_proc *kp, int *osrelp); static int procstat_getosrel_sysctl(pid_t pid, int *osrelp); static int procstat_getpathname_core(struct procstat_core *core, char *pathname, size_t maxlen); static int procstat_getpathname_sysctl(pid_t pid, char *pathname, size_t maxlen); static int procstat_getrlimit_core(struct procstat_core *core, int which, struct rlimit* rlimit); static int procstat_getrlimit_kvm(kvm_t *kd, struct kinfo_proc *kp, int which, struct rlimit* rlimit); static int procstat_getrlimit_sysctl(pid_t pid, int which, struct rlimit* rlimit); static int procstat_getumask_core(struct procstat_core *core, unsigned short *maskp); static int procstat_getumask_kvm(kvm_t *kd, struct kinfo_proc *kp, unsigned short *maskp); static int procstat_getumask_sysctl(pid_t pid, unsigned short *maskp); static int vntype2psfsttype(int type); void procstat_close(struct procstat *procstat) { assert(procstat); if (procstat->type == PROCSTAT_KVM) kvm_close(procstat->kd); else if (procstat->type == PROCSTAT_CORE) procstat_core_close(procstat->core); procstat_freeargv(procstat); procstat_freeenvv(procstat); free(procstat); } struct procstat * procstat_open_sysctl(void) { struct procstat *procstat; procstat = calloc(1, sizeof(*procstat)); if (procstat == NULL) { warn("malloc()"); return (NULL); } procstat->type = PROCSTAT_SYSCTL; return (procstat); } struct procstat * procstat_open_kvm(const char *nlistf, const char *memf) { struct procstat *procstat; kvm_t *kd; char buf[_POSIX2_LINE_MAX]; procstat = calloc(1, sizeof(*procstat)); if (procstat == NULL) { warn("malloc()"); return (NULL); } kd = kvm_openfiles(nlistf, memf, NULL, O_RDONLY, buf); if (kd == NULL) { warnx("kvm_openfiles(): %s", buf); free(procstat); return (NULL); } procstat->type = PROCSTAT_KVM; procstat->kd = kd; return (procstat); } struct procstat * procstat_open_core(const char *filename) { struct procstat *procstat; struct procstat_core *core; procstat = calloc(1, sizeof(*procstat)); if (procstat == NULL) { warn("malloc()"); return (NULL); } core = procstat_core_open(filename); if (core == NULL) { free(procstat); return (NULL); } procstat->type = PROCSTAT_CORE; procstat->core = core; return (procstat); } struct kinfo_proc * procstat_getprocs(struct procstat *procstat, int what, int arg, unsigned int *count) { struct kinfo_proc *p0, *p; size_t len, olen; int name[4]; int cnt; int error; assert(procstat); assert(count); p = NULL; if (procstat->type == PROCSTAT_KVM) { *count = 0; p0 = kvm_getprocs(procstat->kd, what, arg, &cnt); if (p0 == NULL || cnt <= 0) return (NULL); *count = cnt; len = *count * sizeof(*p); p = malloc(len); if (p == NULL) { warnx("malloc(%zu)", len); goto fail; } bcopy(p0, p, len); return (p); } else if (procstat->type == PROCSTAT_SYSCTL) { len = 0; name[0] = CTL_KERN; name[1] = KERN_PROC; name[2] = what; name[3] = arg; error = sysctl(name, 4, NULL, &len, NULL, 0); if (error < 0 && errno != EPERM) { warn("sysctl(kern.proc)"); goto fail; } if (len == 0) { warnx("no processes?"); goto fail; } do { len += len / 10; p = reallocf(p, len); if (p == NULL) { warnx("reallocf(%zu)", len); goto fail; } olen = len; error = sysctl(name, 4, p, &len, NULL, 0); } while (error < 0 && errno == ENOMEM && olen == len); if (error < 0 && errno != EPERM) { warn("sysctl(kern.proc)"); goto fail; } /* Perform simple consistency checks. */ if ((len % sizeof(*p)) != 0 || p->ki_structsize != sizeof(*p)) { warnx("kinfo_proc structure size mismatch (len = %zu)", len); goto fail; } *count = len / sizeof(*p); return (p); } else if (procstat->type == PROCSTAT_CORE) { p = procstat_core_get(procstat->core, PSC_TYPE_PROC, NULL, &len); if ((len % sizeof(*p)) != 0 || p->ki_structsize != sizeof(*p)) { warnx("kinfo_proc structure size mismatch"); goto fail; } *count = len / sizeof(*p); return (p); } else { warnx("unknown access method: %d", procstat->type); return (NULL); } fail: if (p) free(p); return (NULL); } void procstat_freeprocs(struct procstat *procstat __unused, struct kinfo_proc *p) { if (p != NULL) free(p); p = NULL; } struct filestat_list * procstat_getfiles(struct procstat *procstat, struct kinfo_proc *kp, int mmapped) { switch(procstat->type) { case PROCSTAT_KVM: return (procstat_getfiles_kvm(procstat, kp, mmapped)); case PROCSTAT_SYSCTL: case PROCSTAT_CORE: return (procstat_getfiles_sysctl(procstat, kp, mmapped)); default: warnx("unknown access method: %d", procstat->type); return (NULL); } } void procstat_freefiles(struct procstat *procstat, struct filestat_list *head) { struct filestat *fst, *tmp; STAILQ_FOREACH_SAFE(fst, head, next, tmp) { if (fst->fs_path != NULL) free(fst->fs_path); free(fst); } free(head); if (procstat->vmentries != NULL) { free(procstat->vmentries); procstat->vmentries = NULL; } if (procstat->files != NULL) { free(procstat->files); procstat->files = NULL; } } static struct filestat * filestat_new_entry(void *typedep, int type, int fd, int fflags, int uflags, int refcount, off_t offset, char *path, cap_rights_t *cap_rightsp) { struct filestat *entry; entry = calloc(1, sizeof(*entry)); if (entry == NULL) { warn("malloc()"); return (NULL); } entry->fs_typedep = typedep; entry->fs_fflags = fflags; entry->fs_uflags = uflags; entry->fs_fd = fd; entry->fs_type = type; entry->fs_ref_count = refcount; entry->fs_offset = offset; entry->fs_path = path; if (cap_rightsp != NULL) entry->fs_cap_rights = *cap_rightsp; else cap_rights_init(&entry->fs_cap_rights); return (entry); } static struct vnode * getctty(kvm_t *kd, struct kinfo_proc *kp) { struct pgrp pgrp; struct proc proc; struct session sess; int error; assert(kp); error = kvm_read_all(kd, (unsigned long)kp->ki_paddr, &proc, sizeof(proc)); if (error == 0) { warnx("can't read proc struct at %p for pid %d", kp->ki_paddr, kp->ki_pid); return (NULL); } if (proc.p_pgrp == NULL) return (NULL); error = kvm_read_all(kd, (unsigned long)proc.p_pgrp, &pgrp, sizeof(pgrp)); if (error == 0) { warnx("can't read pgrp struct at %p for pid %d", proc.p_pgrp, kp->ki_pid); return (NULL); } error = kvm_read_all(kd, (unsigned long)pgrp.pg_session, &sess, sizeof(sess)); if (error == 0) { warnx("can't read session struct at %p for pid %d", pgrp.pg_session, kp->ki_pid); return (NULL); } return (sess.s_ttyvp); } static struct filestat_list * procstat_getfiles_kvm(struct procstat *procstat, struct kinfo_proc *kp, int mmapped) { struct file file; struct filedesc filed; struct vm_map_entry vmentry; struct vm_object object; struct vmspace vmspace; vm_map_entry_t entryp; vm_map_t map; vm_object_t objp; struct vnode *vp; struct file **ofiles; struct filestat *entry; struct filestat_list *head; kvm_t *kd; void *data; int i, fflags; int prot, type; unsigned int nfiles; assert(procstat); kd = procstat->kd; if (kd == NULL) return (NULL); if (kp->ki_fd == NULL) return (NULL); if (!kvm_read_all(kd, (unsigned long)kp->ki_fd, &filed, sizeof(filed))) { warnx("can't read filedesc at %p", (void *)kp->ki_fd); return (NULL); } /* * Allocate list head. */ head = malloc(sizeof(*head)); if (head == NULL) return (NULL); STAILQ_INIT(head); /* root directory vnode, if one. */ if (filed.fd_rdir) { entry = filestat_new_entry(filed.fd_rdir, PS_FST_TYPE_VNODE, -1, PS_FST_FFLAG_READ, PS_FST_UFLAG_RDIR, 0, 0, NULL, NULL); if (entry != NULL) STAILQ_INSERT_TAIL(head, entry, next); } /* current working directory vnode. */ if (filed.fd_cdir) { entry = filestat_new_entry(filed.fd_cdir, PS_FST_TYPE_VNODE, -1, PS_FST_FFLAG_READ, PS_FST_UFLAG_CDIR, 0, 0, NULL, NULL); if (entry != NULL) STAILQ_INSERT_TAIL(head, entry, next); } /* jail root, if any. */ if (filed.fd_jdir) { entry = filestat_new_entry(filed.fd_jdir, PS_FST_TYPE_VNODE, -1, PS_FST_FFLAG_READ, PS_FST_UFLAG_JAIL, 0, 0, NULL, NULL); if (entry != NULL) STAILQ_INSERT_TAIL(head, entry, next); } /* ktrace vnode, if one */ if (kp->ki_tracep) { entry = filestat_new_entry(kp->ki_tracep, PS_FST_TYPE_VNODE, -1, PS_FST_FFLAG_READ | PS_FST_FFLAG_WRITE, PS_FST_UFLAG_TRACE, 0, 0, NULL, NULL); if (entry != NULL) STAILQ_INSERT_TAIL(head, entry, next); } /* text vnode, if one */ if (kp->ki_textvp) { entry = filestat_new_entry(kp->ki_textvp, PS_FST_TYPE_VNODE, -1, PS_FST_FFLAG_READ, PS_FST_UFLAG_TEXT, 0, 0, NULL, NULL); if (entry != NULL) STAILQ_INSERT_TAIL(head, entry, next); } /* Controlling terminal. */ if ((vp = getctty(kd, kp)) != NULL) { entry = filestat_new_entry(vp, PS_FST_TYPE_VNODE, -1, PS_FST_FFLAG_READ | PS_FST_FFLAG_WRITE, PS_FST_UFLAG_CTTY, 0, 0, NULL, NULL); if (entry != NULL) STAILQ_INSERT_TAIL(head, entry, next); } nfiles = filed.fd_lastfile + 1; ofiles = malloc(nfiles * sizeof(struct file *)); if (ofiles == NULL) { warn("malloc(%zu)", nfiles * sizeof(struct file *)); goto do_mmapped; } if (!kvm_read_all(kd, (unsigned long)filed.fd_ofiles, ofiles, nfiles * sizeof(struct file *))) { warnx("cannot read file structures at %p", (void *)filed.fd_ofiles); free(ofiles); goto do_mmapped; } for (i = 0; i <= filed.fd_lastfile; i++) { if (ofiles[i] == NULL) continue; if (!kvm_read_all(kd, (unsigned long)ofiles[i], &file, sizeof(struct file))) { warnx("can't read file %d at %p", i, (void *)ofiles[i]); continue; } switch (file.f_type) { case DTYPE_VNODE: type = PS_FST_TYPE_VNODE; data = file.f_vnode; break; case DTYPE_SOCKET: type = PS_FST_TYPE_SOCKET; data = file.f_data; break; case DTYPE_PIPE: type = PS_FST_TYPE_PIPE; data = file.f_data; break; case DTYPE_FIFO: type = PS_FST_TYPE_FIFO; data = file.f_vnode; break; #ifdef DTYPE_PTS case DTYPE_PTS: type = PS_FST_TYPE_PTS; data = file.f_data; break; #endif case DTYPE_SEM: type = PS_FST_TYPE_SEM; data = file.f_data; break; case DTYPE_SHM: type = PS_FST_TYPE_SHM; data = file.f_data; break; default: continue; } /* XXXRW: No capability rights support for kvm yet. */ entry = filestat_new_entry(data, type, i, to_filestat_flags(file.f_flag), 0, 0, 0, NULL, NULL); if (entry != NULL) STAILQ_INSERT_TAIL(head, entry, next); } free(ofiles); do_mmapped: /* * Process mmapped files if requested. */ if (mmapped) { if (!kvm_read_all(kd, (unsigned long)kp->ki_vmspace, &vmspace, sizeof(vmspace))) { warnx("can't read vmspace at %p", (void *)kp->ki_vmspace); goto exit; } map = &vmspace.vm_map; for (entryp = map->header.next; entryp != &kp->ki_vmspace->vm_map.header; entryp = vmentry.next) { if (!kvm_read_all(kd, (unsigned long)entryp, &vmentry, sizeof(vmentry))) { warnx("can't read vm_map_entry at %p", (void *)entryp); continue; } if (vmentry.eflags & MAP_ENTRY_IS_SUB_MAP) continue; if ((objp = vmentry.object.vm_object) == NULL) continue; for (; objp; objp = object.backing_object) { if (!kvm_read_all(kd, (unsigned long)objp, &object, sizeof(object))) { warnx("can't read vm_object at %p", (void *)objp); break; } } /* We want only vnode objects. */ if (object.type != OBJT_VNODE) continue; prot = vmentry.protection; fflags = 0; if (prot & VM_PROT_READ) fflags = PS_FST_FFLAG_READ; if ((vmentry.eflags & MAP_ENTRY_COW) == 0 && prot & VM_PROT_WRITE) fflags |= PS_FST_FFLAG_WRITE; /* * Create filestat entry. */ entry = filestat_new_entry(object.handle, PS_FST_TYPE_VNODE, -1, fflags, PS_FST_UFLAG_MMAP, 0, 0, NULL, NULL); if (entry != NULL) STAILQ_INSERT_TAIL(head, entry, next); } } exit: return (head); } /* * kinfo types to filestat translation. */ static int kinfo_type2fst(int kftype) { static struct { int kf_type; int fst_type; } kftypes2fst[] = { { KF_TYPE_CRYPTO, PS_FST_TYPE_CRYPTO }, { KF_TYPE_FIFO, PS_FST_TYPE_FIFO }, { KF_TYPE_KQUEUE, PS_FST_TYPE_KQUEUE }, { KF_TYPE_MQUEUE, PS_FST_TYPE_MQUEUE }, { KF_TYPE_NONE, PS_FST_TYPE_NONE }, { KF_TYPE_PIPE, PS_FST_TYPE_PIPE }, { KF_TYPE_PTS, PS_FST_TYPE_PTS }, { KF_TYPE_SEM, PS_FST_TYPE_SEM }, { KF_TYPE_SHM, PS_FST_TYPE_SHM }, { KF_TYPE_SOCKET, PS_FST_TYPE_SOCKET }, { KF_TYPE_VNODE, PS_FST_TYPE_VNODE }, { KF_TYPE_UNKNOWN, PS_FST_TYPE_UNKNOWN } }; #define NKFTYPES (sizeof(kftypes2fst) / sizeof(*kftypes2fst)) unsigned int i; for (i = 0; i < NKFTYPES; i++) if (kftypes2fst[i].kf_type == kftype) break; if (i == NKFTYPES) return (PS_FST_TYPE_UNKNOWN); return (kftypes2fst[i].fst_type); } /* * kinfo flags to filestat translation. */ static int kinfo_fflags2fst(int kfflags) { static struct { int kf_flag; int fst_flag; } kfflags2fst[] = { { KF_FLAG_APPEND, PS_FST_FFLAG_APPEND }, { KF_FLAG_ASYNC, PS_FST_FFLAG_ASYNC }, { KF_FLAG_CREAT, PS_FST_FFLAG_CREAT }, { KF_FLAG_DIRECT, PS_FST_FFLAG_DIRECT }, { KF_FLAG_EXCL, PS_FST_FFLAG_EXCL }, { KF_FLAG_EXEC, PS_FST_FFLAG_EXEC }, { KF_FLAG_EXLOCK, PS_FST_FFLAG_EXLOCK }, { KF_FLAG_FSYNC, PS_FST_FFLAG_SYNC }, { KF_FLAG_HASLOCK, PS_FST_FFLAG_HASLOCK }, { KF_FLAG_NOFOLLOW, PS_FST_FFLAG_NOFOLLOW }, { KF_FLAG_NONBLOCK, PS_FST_FFLAG_NONBLOCK }, { KF_FLAG_READ, PS_FST_FFLAG_READ }, { KF_FLAG_SHLOCK, PS_FST_FFLAG_SHLOCK }, { KF_FLAG_TRUNC, PS_FST_FFLAG_TRUNC }, { KF_FLAG_WRITE, PS_FST_FFLAG_WRITE } }; #define NKFFLAGS (sizeof(kfflags2fst) / sizeof(*kfflags2fst)) unsigned int i; int flags; flags = 0; for (i = 0; i < NKFFLAGS; i++) if ((kfflags & kfflags2fst[i].kf_flag) != 0) flags |= kfflags2fst[i].fst_flag; return (flags); } static int kinfo_uflags2fst(int fd) { switch (fd) { case KF_FD_TYPE_CTTY: return (PS_FST_UFLAG_CTTY); case KF_FD_TYPE_CWD: return (PS_FST_UFLAG_CDIR); case KF_FD_TYPE_JAIL: return (PS_FST_UFLAG_JAIL); case KF_FD_TYPE_TEXT: return (PS_FST_UFLAG_TEXT); case KF_FD_TYPE_TRACE: return (PS_FST_UFLAG_TRACE); case KF_FD_TYPE_ROOT: return (PS_FST_UFLAG_RDIR); } return (0); } static struct kinfo_file * kinfo_getfile_core(struct procstat_core *core, int *cntp) { int cnt; size_t len; char *buf, *bp, *eb; struct kinfo_file *kif, *kp, *kf; buf = procstat_core_get(core, PSC_TYPE_FILES, NULL, &len); if (buf == NULL) return (NULL); /* * XXXMG: The code below is just copy&past from libutil. * The code duplication can be avoided if libutil * is extended to provide something like: * struct kinfo_file *kinfo_getfile_from_buf(const char *buf, * size_t len, int *cntp); */ /* Pass 1: count items */ cnt = 0; bp = buf; eb = buf + len; while (bp < eb) { kf = (struct kinfo_file *)(uintptr_t)bp; if (kf->kf_structsize == 0) break; bp += kf->kf_structsize; cnt++; } kif = calloc(cnt, sizeof(*kif)); if (kif == NULL) { free(buf); return (NULL); } bp = buf; eb = buf + len; kp = kif; /* Pass 2: unpack */ while (bp < eb) { kf = (struct kinfo_file *)(uintptr_t)bp; if (kf->kf_structsize == 0) break; /* Copy/expand into pre-zeroed buffer */ memcpy(kp, kf, kf->kf_structsize); /* Advance to next packed record */ bp += kf->kf_structsize; /* Set field size to fixed length, advance */ kp->kf_structsize = sizeof(*kp); kp++; } free(buf); *cntp = cnt; return (kif); /* Caller must free() return value */ } static struct filestat_list * procstat_getfiles_sysctl(struct procstat *procstat, struct kinfo_proc *kp, int mmapped) { struct kinfo_file *kif, *files; struct kinfo_vmentry *kve, *vmentries; struct filestat_list *head; struct filestat *entry; char *path; off_t offset; int cnt, fd, fflags; int i, type, uflags; int refcount; cap_rights_t cap_rights; assert(kp); if (kp->ki_fd == NULL) return (NULL); switch(procstat->type) { case PROCSTAT_SYSCTL: files = kinfo_getfile(kp->ki_pid, &cnt); break; case PROCSTAT_CORE: files = kinfo_getfile_core(procstat->core, &cnt); break; default: assert(!"invalid type"); } if (files == NULL && errno != EPERM) { warn("kinfo_getfile()"); return (NULL); } procstat->files = files; /* * Allocate list head. */ head = malloc(sizeof(*head)); if (head == NULL) return (NULL); STAILQ_INIT(head); for (i = 0; i < cnt; i++) { kif = &files[i]; type = kinfo_type2fst(kif->kf_type); fd = kif->kf_fd >= 0 ? kif->kf_fd : -1; fflags = kinfo_fflags2fst(kif->kf_flags); uflags = kinfo_uflags2fst(kif->kf_fd); refcount = kif->kf_ref_count; offset = kif->kf_offset; if (*kif->kf_path != '\0') path = strdup(kif->kf_path); else path = NULL; cap_rights = kif->kf_cap_rights; /* * Create filestat entry. */ entry = filestat_new_entry(kif, type, fd, fflags, uflags, refcount, offset, path, &cap_rights); if (entry != NULL) STAILQ_INSERT_TAIL(head, entry, next); } if (mmapped != 0) { vmentries = procstat_getvmmap(procstat, kp, &cnt); procstat->vmentries = vmentries; if (vmentries == NULL || cnt == 0) goto fail; for (i = 0; i < cnt; i++) { kve = &vmentries[i]; if (kve->kve_type != KVME_TYPE_VNODE) continue; fflags = 0; if (kve->kve_protection & KVME_PROT_READ) fflags = PS_FST_FFLAG_READ; if ((kve->kve_flags & KVME_FLAG_COW) == 0 && kve->kve_protection & KVME_PROT_WRITE) fflags |= PS_FST_FFLAG_WRITE; offset = kve->kve_offset; refcount = kve->kve_ref_count; if (*kve->kve_path != '\0') path = strdup(kve->kve_path); else path = NULL; entry = filestat_new_entry(kve, PS_FST_TYPE_VNODE, -1, fflags, PS_FST_UFLAG_MMAP, refcount, offset, path, NULL); if (entry != NULL) STAILQ_INSERT_TAIL(head, entry, next); } } fail: return (head); } int procstat_get_pipe_info(struct procstat *procstat, struct filestat *fst, struct pipestat *ps, char *errbuf) { assert(ps); if (procstat->type == PROCSTAT_KVM) { return (procstat_get_pipe_info_kvm(procstat->kd, fst, ps, errbuf)); } else if (procstat->type == PROCSTAT_SYSCTL || procstat->type == PROCSTAT_CORE) { return (procstat_get_pipe_info_sysctl(fst, ps, errbuf)); } else { warnx("unknown access method: %d", procstat->type); if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "error"); return (1); } } static int procstat_get_pipe_info_kvm(kvm_t *kd, struct filestat *fst, struct pipestat *ps, char *errbuf) { struct pipe pi; void *pipep; assert(kd); assert(ps); assert(fst); bzero(ps, sizeof(*ps)); pipep = fst->fs_typedep; if (pipep == NULL) goto fail; if (!kvm_read_all(kd, (unsigned long)pipep, &pi, sizeof(struct pipe))) { warnx("can't read pipe at %p", (void *)pipep); goto fail; } ps->addr = (uintptr_t)pipep; ps->peer = (uintptr_t)pi.pipe_peer; ps->buffer_cnt = pi.pipe_buffer.cnt; return (0); fail: if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "error"); return (1); } static int procstat_get_pipe_info_sysctl(struct filestat *fst, struct pipestat *ps, char *errbuf __unused) { struct kinfo_file *kif; assert(ps); assert(fst); bzero(ps, sizeof(*ps)); kif = fst->fs_typedep; if (kif == NULL) return (1); ps->addr = kif->kf_un.kf_pipe.kf_pipe_addr; ps->peer = kif->kf_un.kf_pipe.kf_pipe_peer; ps->buffer_cnt = kif->kf_un.kf_pipe.kf_pipe_buffer_cnt; return (0); } int procstat_get_pts_info(struct procstat *procstat, struct filestat *fst, struct ptsstat *pts, char *errbuf) { assert(pts); if (procstat->type == PROCSTAT_KVM) { return (procstat_get_pts_info_kvm(procstat->kd, fst, pts, errbuf)); } else if (procstat->type == PROCSTAT_SYSCTL || procstat->type == PROCSTAT_CORE) { return (procstat_get_pts_info_sysctl(fst, pts, errbuf)); } else { warnx("unknown access method: %d", procstat->type); if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "error"); return (1); } } static int procstat_get_pts_info_kvm(kvm_t *kd, struct filestat *fst, struct ptsstat *pts, char *errbuf) { struct tty tty; void *ttyp; assert(kd); assert(pts); assert(fst); bzero(pts, sizeof(*pts)); ttyp = fst->fs_typedep; if (ttyp == NULL) goto fail; if (!kvm_read_all(kd, (unsigned long)ttyp, &tty, sizeof(struct tty))) { warnx("can't read tty at %p", (void *)ttyp); goto fail; } pts->dev = dev2udev(kd, tty.t_dev); (void)kdevtoname(kd, tty.t_dev, pts->devname); return (0); fail: if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "error"); return (1); } static int procstat_get_pts_info_sysctl(struct filestat *fst, struct ptsstat *pts, char *errbuf __unused) { struct kinfo_file *kif; assert(pts); assert(fst); bzero(pts, sizeof(*pts)); kif = fst->fs_typedep; if (kif == NULL) return (0); pts->dev = kif->kf_un.kf_pts.kf_pts_dev; strlcpy(pts->devname, kif->kf_path, sizeof(pts->devname)); return (0); } int procstat_get_sem_info(struct procstat *procstat, struct filestat *fst, struct semstat *sem, char *errbuf) { assert(sem); if (procstat->type == PROCSTAT_KVM) { return (procstat_get_sem_info_kvm(procstat->kd, fst, sem, errbuf)); } else if (procstat->type == PROCSTAT_SYSCTL || procstat->type == PROCSTAT_CORE) { return (procstat_get_sem_info_sysctl(fst, sem, errbuf)); } else { warnx("unknown access method: %d", procstat->type); if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "error"); return (1); } } static int procstat_get_sem_info_kvm(kvm_t *kd, struct filestat *fst, struct semstat *sem, char *errbuf) { struct ksem ksem; void *ksemp; char *path; int i; assert(kd); assert(sem); assert(fst); bzero(sem, sizeof(*sem)); ksemp = fst->fs_typedep; if (ksemp == NULL) goto fail; if (!kvm_read_all(kd, (unsigned long)ksemp, &ksem, sizeof(struct ksem))) { warnx("can't read ksem at %p", (void *)ksemp); goto fail; } sem->mode = S_IFREG | ksem.ks_mode; sem->value = ksem.ks_value; if (fst->fs_path == NULL && ksem.ks_path != NULL) { path = malloc(MAXPATHLEN); for (i = 0; i < MAXPATHLEN - 1; i++) { if (!kvm_read_all(kd, (unsigned long)ksem.ks_path + i, path + i, 1)) break; if (path[i] == '\0') break; } path[i] = '\0'; if (i == 0) free(path); else fst->fs_path = path; } return (0); fail: if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "error"); return (1); } static int procstat_get_sem_info_sysctl(struct filestat *fst, struct semstat *sem, char *errbuf __unused) { struct kinfo_file *kif; assert(sem); assert(fst); bzero(sem, sizeof(*sem)); kif = fst->fs_typedep; if (kif == NULL) return (0); sem->value = kif->kf_un.kf_sem.kf_sem_value; sem->mode = kif->kf_un.kf_sem.kf_sem_mode; return (0); } int procstat_get_shm_info(struct procstat *procstat, struct filestat *fst, struct shmstat *shm, char *errbuf) { assert(shm); if (procstat->type == PROCSTAT_KVM) { return (procstat_get_shm_info_kvm(procstat->kd, fst, shm, errbuf)); } else if (procstat->type == PROCSTAT_SYSCTL || procstat->type == PROCSTAT_CORE) { return (procstat_get_shm_info_sysctl(fst, shm, errbuf)); } else { warnx("unknown access method: %d", procstat->type); if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "error"); return (1); } } static int procstat_get_shm_info_kvm(kvm_t *kd, struct filestat *fst, struct shmstat *shm, char *errbuf) { struct shmfd shmfd; void *shmfdp; char *path; int i; assert(kd); assert(shm); assert(fst); bzero(shm, sizeof(*shm)); shmfdp = fst->fs_typedep; if (shmfdp == NULL) goto fail; if (!kvm_read_all(kd, (unsigned long)shmfdp, &shmfd, sizeof(struct shmfd))) { warnx("can't read shmfd at %p", (void *)shmfdp); goto fail; } shm->mode = S_IFREG | shmfd.shm_mode; shm->size = shmfd.shm_size; if (fst->fs_path == NULL && shmfd.shm_path != NULL) { path = malloc(MAXPATHLEN); for (i = 0; i < MAXPATHLEN - 1; i++) { if (!kvm_read_all(kd, (unsigned long)shmfd.shm_path + i, path + i, 1)) break; if (path[i] == '\0') break; } path[i] = '\0'; if (i == 0) free(path); else fst->fs_path = path; } return (0); fail: if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "error"); return (1); } static int procstat_get_shm_info_sysctl(struct filestat *fst, struct shmstat *shm, char *errbuf __unused) { struct kinfo_file *kif; assert(shm); assert(fst); bzero(shm, sizeof(*shm)); kif = fst->fs_typedep; if (kif == NULL) return (0); shm->size = kif->kf_un.kf_file.kf_file_size; shm->mode = kif->kf_un.kf_file.kf_file_mode; return (0); } int procstat_get_vnode_info(struct procstat *procstat, struct filestat *fst, struct vnstat *vn, char *errbuf) { assert(vn); if (procstat->type == PROCSTAT_KVM) { return (procstat_get_vnode_info_kvm(procstat->kd, fst, vn, errbuf)); } else if (procstat->type == PROCSTAT_SYSCTL || procstat->type == PROCSTAT_CORE) { return (procstat_get_vnode_info_sysctl(fst, vn, errbuf)); } else { warnx("unknown access method: %d", procstat->type); if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "error"); return (1); } } static int procstat_get_vnode_info_kvm(kvm_t *kd, struct filestat *fst, struct vnstat *vn, char *errbuf) { /* Filesystem specific handlers. */ #define FSTYPE(fst) {#fst, fst##_filestat} struct { const char *tag; int (*handler)(kvm_t *kd, struct vnode *vp, struct vnstat *vn); } fstypes[] = { FSTYPE(devfs), FSTYPE(isofs), FSTYPE(msdosfs), FSTYPE(nfs), FSTYPE(smbfs), FSTYPE(udf), FSTYPE(ufs), #ifdef LIBPROCSTAT_ZFS FSTYPE(zfs), #endif }; #define NTYPES (sizeof(fstypes) / sizeof(*fstypes)) struct vnode vnode; char tagstr[12]; void *vp; int error; unsigned int i; assert(kd); assert(vn); assert(fst); vp = fst->fs_typedep; if (vp == NULL) goto fail; error = kvm_read_all(kd, (unsigned long)vp, &vnode, sizeof(vnode)); if (error == 0) { warnx("can't read vnode at %p", (void *)vp); goto fail; } bzero(vn, sizeof(*vn)); vn->vn_type = vntype2psfsttype(vnode.v_type); if (vnode.v_type == VNON || vnode.v_type == VBAD) return (0); error = kvm_read_all(kd, (unsigned long)vnode.v_tag, tagstr, sizeof(tagstr)); if (error == 0) { warnx("can't read v_tag at %p", (void *)vp); goto fail; } tagstr[sizeof(tagstr) - 1] = '\0'; /* * Find appropriate handler. */ for (i = 0; i < NTYPES; i++) if (!strcmp(fstypes[i].tag, tagstr)) { if (fstypes[i].handler(kd, &vnode, vn) != 0) { goto fail; } break; } if (i == NTYPES) { if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "?(%s)", tagstr); return (1); } vn->vn_mntdir = getmnton(kd, vnode.v_mount); if ((vnode.v_type == VBLK || vnode.v_type == VCHR) && vnode.v_rdev != NULL){ vn->vn_dev = dev2udev(kd, vnode.v_rdev); (void)kdevtoname(kd, vnode.v_rdev, vn->vn_devname); } else { vn->vn_dev = -1; } return (0); fail: if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "error"); return (1); } /* * kinfo vnode type to filestat translation. */ static int kinfo_vtype2fst(int kfvtype) { static struct { int kf_vtype; int fst_vtype; } kfvtypes2fst[] = { { KF_VTYPE_VBAD, PS_FST_VTYPE_VBAD }, { KF_VTYPE_VBLK, PS_FST_VTYPE_VBLK }, { KF_VTYPE_VCHR, PS_FST_VTYPE_VCHR }, { KF_VTYPE_VDIR, PS_FST_VTYPE_VDIR }, { KF_VTYPE_VFIFO, PS_FST_VTYPE_VFIFO }, { KF_VTYPE_VLNK, PS_FST_VTYPE_VLNK }, { KF_VTYPE_VNON, PS_FST_VTYPE_VNON }, { KF_VTYPE_VREG, PS_FST_VTYPE_VREG }, { KF_VTYPE_VSOCK, PS_FST_VTYPE_VSOCK } }; #define NKFVTYPES (sizeof(kfvtypes2fst) / sizeof(*kfvtypes2fst)) unsigned int i; for (i = 0; i < NKFVTYPES; i++) if (kfvtypes2fst[i].kf_vtype == kfvtype) break; if (i == NKFVTYPES) return (PS_FST_VTYPE_UNKNOWN); return (kfvtypes2fst[i].fst_vtype); } static int procstat_get_vnode_info_sysctl(struct filestat *fst, struct vnstat *vn, char *errbuf) { struct statfs stbuf; struct kinfo_file *kif; struct kinfo_vmentry *kve; uint64_t fileid; uint64_t size; char *name, *path; uint32_t fsid; uint16_t mode; uint32_t rdev; int vntype; int status; assert(fst); assert(vn); bzero(vn, sizeof(*vn)); if (fst->fs_typedep == NULL) return (1); if (fst->fs_uflags & PS_FST_UFLAG_MMAP) { kve = fst->fs_typedep; fileid = kve->kve_vn_fileid; fsid = kve->kve_vn_fsid; mode = kve->kve_vn_mode; path = kve->kve_path; rdev = kve->kve_vn_rdev; size = kve->kve_vn_size; vntype = kinfo_vtype2fst(kve->kve_vn_type); status = kve->kve_status; } else { kif = fst->fs_typedep; fileid = kif->kf_un.kf_file.kf_file_fileid; fsid = kif->kf_un.kf_file.kf_file_fsid; mode = kif->kf_un.kf_file.kf_file_mode; path = kif->kf_path; rdev = kif->kf_un.kf_file.kf_file_rdev; size = kif->kf_un.kf_file.kf_file_size; vntype = kinfo_vtype2fst(kif->kf_vnode_type); status = kif->kf_status; } vn->vn_type = vntype; if (vntype == PS_FST_VTYPE_VNON || vntype == PS_FST_VTYPE_VBAD) return (0); if ((status & KF_ATTR_VALID) == 0) { if (errbuf != NULL) { snprintf(errbuf, _POSIX2_LINE_MAX, "? (no info available)"); } return (1); } if (path && *path) { statfs(path, &stbuf); vn->vn_mntdir = strdup(stbuf.f_mntonname); } else vn->vn_mntdir = strdup("-"); vn->vn_dev = rdev; if (vntype == PS_FST_VTYPE_VBLK) { name = devname(rdev, S_IFBLK); if (name != NULL) strlcpy(vn->vn_devname, name, sizeof(vn->vn_devname)); } else if (vntype == PS_FST_VTYPE_VCHR) { name = devname(vn->vn_dev, S_IFCHR); if (name != NULL) strlcpy(vn->vn_devname, name, sizeof(vn->vn_devname)); } vn->vn_fsid = fsid; vn->vn_fileid = fileid; vn->vn_size = size; vn->vn_mode = mode; return (0); } int procstat_get_socket_info(struct procstat *procstat, struct filestat *fst, struct sockstat *sock, char *errbuf) { assert(sock); if (procstat->type == PROCSTAT_KVM) { return (procstat_get_socket_info_kvm(procstat->kd, fst, sock, errbuf)); } else if (procstat->type == PROCSTAT_SYSCTL || procstat->type == PROCSTAT_CORE) { return (procstat_get_socket_info_sysctl(fst, sock, errbuf)); } else { warnx("unknown access method: %d", procstat->type); if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "error"); return (1); } } static int procstat_get_socket_info_kvm(kvm_t *kd, struct filestat *fst, struct sockstat *sock, char *errbuf) { struct domain dom; struct inpcb inpcb; struct protosw proto; struct socket s; struct unpcb unpcb; ssize_t len; void *so; assert(kd); assert(sock); assert(fst); bzero(sock, sizeof(*sock)); so = fst->fs_typedep; if (so == NULL) goto fail; sock->so_addr = (uintptr_t)so; /* fill in socket */ if (!kvm_read_all(kd, (unsigned long)so, &s, sizeof(struct socket))) { warnx("can't read sock at %p", (void *)so); goto fail; } /* fill in protosw entry */ if (!kvm_read_all(kd, (unsigned long)s.so_proto, &proto, sizeof(struct protosw))) { warnx("can't read protosw at %p", (void *)s.so_proto); goto fail; } /* fill in domain */ if (!kvm_read_all(kd, (unsigned long)proto.pr_domain, &dom, sizeof(struct domain))) { warnx("can't read domain at %p", (void *)proto.pr_domain); goto fail; } if ((len = kvm_read(kd, (unsigned long)dom.dom_name, sock->dname, sizeof(sock->dname) - 1)) < 0) { warnx("can't read domain name at %p", (void *)dom.dom_name); sock->dname[0] = '\0'; } else sock->dname[len] = '\0'; /* * Fill in known data. */ sock->type = s.so_type; sock->proto = proto.pr_protocol; sock->dom_family = dom.dom_family; sock->so_pcb = (uintptr_t)s.so_pcb; /* * Protocol specific data. */ switch(dom.dom_family) { case AF_INET: case AF_INET6: if (proto.pr_protocol == IPPROTO_TCP) { if (s.so_pcb) { if (kvm_read(kd, (u_long)s.so_pcb, (char *)&inpcb, sizeof(struct inpcb)) != sizeof(struct inpcb)) { warnx("can't read inpcb at %p", (void *)s.so_pcb); } else sock->inp_ppcb = (uintptr_t)inpcb.inp_ppcb; } } break; case AF_UNIX: if (s.so_pcb) { if (kvm_read(kd, (u_long)s.so_pcb, (char *)&unpcb, sizeof(struct unpcb)) != sizeof(struct unpcb)){ warnx("can't read unpcb at %p", (void *)s.so_pcb); } else if (unpcb.unp_conn) { sock->so_rcv_sb_state = s.so_rcv.sb_state; sock->so_snd_sb_state = s.so_snd.sb_state; sock->unp_conn = (uintptr_t)unpcb.unp_conn; } } break; default: break; } return (0); fail: if (errbuf != NULL) snprintf(errbuf, _POSIX2_LINE_MAX, "error"); return (1); } static int procstat_get_socket_info_sysctl(struct filestat *fst, struct sockstat *sock, char *errbuf __unused) { struct kinfo_file *kif; assert(sock); assert(fst); bzero(sock, sizeof(*sock)); kif = fst->fs_typedep; if (kif == NULL) return (0); /* * Fill in known data. */ sock->type = kif->kf_sock_type; sock->proto = kif->kf_sock_protocol; sock->dom_family = kif->kf_sock_domain; sock->so_pcb = kif->kf_un.kf_sock.kf_sock_pcb; strlcpy(sock->dname, kif->kf_path, sizeof(sock->dname)); bcopy(&kif->kf_sa_local, &sock->sa_local, kif->kf_sa_local.ss_len); bcopy(&kif->kf_sa_peer, &sock->sa_peer, kif->kf_sa_peer.ss_len); /* * Protocol specific data. */ switch(sock->dom_family) { case AF_INET: case AF_INET6: if (sock->proto == IPPROTO_TCP) sock->inp_ppcb = kif->kf_un.kf_sock.kf_sock_inpcb; break; case AF_UNIX: if (kif->kf_un.kf_sock.kf_sock_unpconn != 0) { sock->so_rcv_sb_state = kif->kf_un.kf_sock.kf_sock_rcv_sb_state; sock->so_snd_sb_state = kif->kf_un.kf_sock.kf_sock_snd_sb_state; sock->unp_conn = kif->kf_un.kf_sock.kf_sock_unpconn; } break; default: break; } return (0); } /* * Descriptor flags to filestat translation. */ static int to_filestat_flags(int flags) { static struct { int flag; int fst_flag; } fstflags[] = { { FREAD, PS_FST_FFLAG_READ }, { FWRITE, PS_FST_FFLAG_WRITE }, { O_APPEND, PS_FST_FFLAG_APPEND }, { O_ASYNC, PS_FST_FFLAG_ASYNC }, { O_CREAT, PS_FST_FFLAG_CREAT }, { O_DIRECT, PS_FST_FFLAG_DIRECT }, { O_EXCL, PS_FST_FFLAG_EXCL }, { O_EXEC, PS_FST_FFLAG_EXEC }, { O_EXLOCK, PS_FST_FFLAG_EXLOCK }, { O_NOFOLLOW, PS_FST_FFLAG_NOFOLLOW }, { O_NONBLOCK, PS_FST_FFLAG_NONBLOCK }, { O_SHLOCK, PS_FST_FFLAG_SHLOCK }, { O_SYNC, PS_FST_FFLAG_SYNC }, { O_TRUNC, PS_FST_FFLAG_TRUNC } }; #define NFSTFLAGS (sizeof(fstflags) / sizeof(*fstflags)) int fst_flags; unsigned int i; fst_flags = 0; for (i = 0; i < NFSTFLAGS; i++) if (flags & fstflags[i].flag) fst_flags |= fstflags[i].fst_flag; return (fst_flags); } /* * Vnode type to filestate translation. */ static int vntype2psfsttype(int type) { static struct { int vtype; int fst_vtype; } vt2fst[] = { { VBAD, PS_FST_VTYPE_VBAD }, { VBLK, PS_FST_VTYPE_VBLK }, { VCHR, PS_FST_VTYPE_VCHR }, { VDIR, PS_FST_VTYPE_VDIR }, { VFIFO, PS_FST_VTYPE_VFIFO }, { VLNK, PS_FST_VTYPE_VLNK }, { VNON, PS_FST_VTYPE_VNON }, { VREG, PS_FST_VTYPE_VREG }, { VSOCK, PS_FST_VTYPE_VSOCK } }; #define NVFTYPES (sizeof(vt2fst) / sizeof(*vt2fst)) unsigned int i, fst_type; fst_type = PS_FST_VTYPE_UNKNOWN; for (i = 0; i < NVFTYPES; i++) { if (type == vt2fst[i].vtype) { fst_type = vt2fst[i].fst_vtype; break; } } return (fst_type); } static char * getmnton(kvm_t *kd, struct mount *m) { struct mount mnt; static struct mtab { struct mtab *next; struct mount *m; char mntonname[MNAMELEN + 1]; } *mhead = NULL; struct mtab *mt; for (mt = mhead; mt != NULL; mt = mt->next) if (m == mt->m) return (mt->mntonname); if (!kvm_read_all(kd, (unsigned long)m, &mnt, sizeof(struct mount))) { warnx("can't read mount table at %p", (void *)m); return (NULL); } if ((mt = malloc(sizeof (struct mtab))) == NULL) err(1, NULL); mt->m = m; bcopy(&mnt.mnt_stat.f_mntonname[0], &mt->mntonname[0], MNAMELEN); mt->mntonname[MNAMELEN] = '\0'; mt->next = mhead; mhead = mt; return (mt->mntonname); } /* * Auxiliary structures and functions to get process environment or * command line arguments. */ struct argvec { char *buf; size_t bufsize; char **argv; size_t argc; }; static struct argvec * argvec_alloc(size_t bufsize) { struct argvec *av; av = malloc(sizeof(*av)); if (av == NULL) return (NULL); av->bufsize = bufsize; av->buf = malloc(av->bufsize); if (av->buf == NULL) { free(av); return (NULL); } av->argc = 32; av->argv = malloc(sizeof(char *) * av->argc); if (av->argv == NULL) { free(av->buf); free(av); return (NULL); } return av; } static void argvec_free(struct argvec * av) { free(av->argv); free(av->buf); free(av); } static char ** getargv(struct procstat *procstat, struct kinfo_proc *kp, size_t nchr, int env) { int error, name[4], argc, i; struct argvec *av, **avp; enum psc_type type; size_t len; char *p, **argv; assert(procstat); assert(kp); if (procstat->type == PROCSTAT_KVM) { warnx("can't use kvm access method"); return (NULL); } if (procstat->type != PROCSTAT_SYSCTL && procstat->type != PROCSTAT_CORE) { warnx("unknown access method: %d", procstat->type); return (NULL); } if (nchr == 0 || nchr > ARG_MAX) nchr = ARG_MAX; avp = (struct argvec **)(env ? &procstat->argv : &procstat->envv); av = *avp; if (av == NULL) { av = argvec_alloc(nchr); if (av == NULL) { warn("malloc(%zu)", nchr); return (NULL); } *avp = av; } else if (av->bufsize < nchr) { av->buf = reallocf(av->buf, nchr); if (av->buf == NULL) { warn("malloc(%zu)", nchr); return (NULL); } } if (procstat->type == PROCSTAT_SYSCTL) { name[0] = CTL_KERN; name[1] = KERN_PROC; name[2] = env ? KERN_PROC_ENV : KERN_PROC_ARGS; name[3] = kp->ki_pid; len = nchr; error = sysctl(name, 4, av->buf, &len, NULL, 0); if (error != 0 && errno != ESRCH && errno != EPERM) warn("sysctl(kern.proc.%s)", env ? "env" : "args"); if (error != 0 || len == 0) return (NULL); } else /* procstat->type == PROCSTAT_CORE */ { type = env ? PSC_TYPE_ENVV : PSC_TYPE_ARGV; len = nchr; if (procstat_core_get(procstat->core, type, av->buf, &len) == NULL) { return (NULL); } } argv = av->argv; argc = av->argc; i = 0; for (p = av->buf; p < av->buf + len; p += strlen(p) + 1) { argv[i++] = p; if (i < argc) continue; /* Grow argv. */ argc += argc; argv = realloc(argv, sizeof(char *) * argc); if (argv == NULL) { warn("malloc(%zu)", sizeof(char *) * argc); return (NULL); } av->argv = argv; av->argc = argc; } argv[i] = NULL; return (argv); } /* * Return process command line arguments. */ char ** procstat_getargv(struct procstat *procstat, struct kinfo_proc *p, size_t nchr) { return (getargv(procstat, p, nchr, 0)); } /* * Free the buffer allocated by procstat_getargv(). */ void procstat_freeargv(struct procstat *procstat) { if (procstat->argv != NULL) { argvec_free(procstat->argv); procstat->argv = NULL; } } /* * Return process environment. */ char ** procstat_getenvv(struct procstat *procstat, struct kinfo_proc *p, size_t nchr) { return (getargv(procstat, p, nchr, 1)); } /* * Free the buffer allocated by procstat_getenvv(). */ void procstat_freeenvv(struct procstat *procstat) { if (procstat->envv != NULL) { argvec_free(procstat->envv); procstat->envv = NULL; } } static struct kinfo_vmentry * kinfo_getvmmap_core(struct procstat_core *core, int *cntp) { int cnt; size_t len; char *buf, *bp, *eb; struct kinfo_vmentry *kiv, *kp, *kv; buf = procstat_core_get(core, PSC_TYPE_VMMAP, NULL, &len); if (buf == NULL) return (NULL); /* * XXXMG: The code below is just copy&past from libutil. * The code duplication can be avoided if libutil * is extended to provide something like: * struct kinfo_vmentry *kinfo_getvmmap_from_buf(const char *buf, * size_t len, int *cntp); */ /* Pass 1: count items */ cnt = 0; bp = buf; eb = buf + len; while (bp < eb) { kv = (struct kinfo_vmentry *)(uintptr_t)bp; + if (kv->kve_structsize == 0) + break; bp += kv->kve_structsize; cnt++; } kiv = calloc(cnt, sizeof(*kiv)); if (kiv == NULL) { free(buf); return (NULL); } bp = buf; eb = buf + len; kp = kiv; /* Pass 2: unpack */ while (bp < eb) { kv = (struct kinfo_vmentry *)(uintptr_t)bp; + if (kv->kve_structsize == 0) + break; /* Copy/expand into pre-zeroed buffer */ memcpy(kp, kv, kv->kve_structsize); /* Advance to next packed record */ bp += kv->kve_structsize; /* Set field size to fixed length, advance */ kp->kve_structsize = sizeof(*kp); kp++; } free(buf); *cntp = cnt; return (kiv); /* Caller must free() return value */ } struct kinfo_vmentry * procstat_getvmmap(struct procstat *procstat, struct kinfo_proc *kp, unsigned int *cntp) { switch(procstat->type) { case PROCSTAT_KVM: warnx("kvm method is not supported"); return (NULL); case PROCSTAT_SYSCTL: return (kinfo_getvmmap(kp->ki_pid, cntp)); case PROCSTAT_CORE: return (kinfo_getvmmap_core(procstat->core, cntp)); default: warnx("unknown access method: %d", procstat->type); return (NULL); } } void procstat_freevmmap(struct procstat *procstat __unused, struct kinfo_vmentry *vmmap) { free(vmmap); } static gid_t * procstat_getgroups_kvm(kvm_t *kd, struct kinfo_proc *kp, unsigned int *cntp) { struct proc proc; struct ucred ucred; gid_t *groups; size_t len; assert(kd != NULL); assert(kp != NULL); if (!kvm_read_all(kd, (unsigned long)kp->ki_paddr, &proc, sizeof(proc))) { warnx("can't read proc struct at %p for pid %d", kp->ki_paddr, kp->ki_pid); return (NULL); } if (proc.p_ucred == NOCRED) return (NULL); if (!kvm_read_all(kd, (unsigned long)proc.p_ucred, &ucred, sizeof(ucred))) { warnx("can't read ucred struct at %p for pid %d", proc.p_ucred, kp->ki_pid); return (NULL); } len = ucred.cr_ngroups * sizeof(gid_t); groups = malloc(len); if (groups == NULL) { warn("malloc(%zu)", len); return (NULL); } if (!kvm_read_all(kd, (unsigned long)ucred.cr_groups, groups, len)) { warnx("can't read groups at %p for pid %d", ucred.cr_groups, kp->ki_pid); free(groups); return (NULL); } *cntp = ucred.cr_ngroups; return (groups); } static gid_t * procstat_getgroups_sysctl(pid_t pid, unsigned int *cntp) { int mib[4]; size_t len; gid_t *groups; mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = KERN_PROC_GROUPS; mib[3] = pid; len = (sysconf(_SC_NGROUPS_MAX) + 1) * sizeof(gid_t); groups = malloc(len); if (groups == NULL) { warn("malloc(%zu)", len); return (NULL); } if (sysctl(mib, 4, groups, &len, NULL, 0) == -1) { warn("sysctl: kern.proc.groups: %d", pid); free(groups); return (NULL); } *cntp = len / sizeof(gid_t); return (groups); } static gid_t * procstat_getgroups_core(struct procstat_core *core, unsigned int *cntp) { size_t len; gid_t *groups; groups = procstat_core_get(core, PSC_TYPE_GROUPS, NULL, &len); if (groups == NULL) return (NULL); *cntp = len / sizeof(gid_t); return (groups); } gid_t * procstat_getgroups(struct procstat *procstat, struct kinfo_proc *kp, unsigned int *cntp) { switch(procstat->type) { case PROCSTAT_KVM: return (procstat_getgroups_kvm(procstat->kd, kp, cntp)); case PROCSTAT_SYSCTL: return (procstat_getgroups_sysctl(kp->ki_pid, cntp)); case PROCSTAT_CORE: return (procstat_getgroups_core(procstat->core, cntp)); default: warnx("unknown access method: %d", procstat->type); return (NULL); } } void procstat_freegroups(struct procstat *procstat __unused, gid_t *groups) { free(groups); } static int procstat_getumask_kvm(kvm_t *kd, struct kinfo_proc *kp, unsigned short *maskp) { struct filedesc fd; assert(kd != NULL); assert(kp != NULL); if (kp->ki_fd == NULL) return (-1); if (!kvm_read_all(kd, (unsigned long)kp->ki_fd, &fd, sizeof(fd))) { warnx("can't read filedesc at %p for pid %d", kp->ki_fd, kp->ki_pid); return (-1); } *maskp = fd.fd_cmask; return (0); } static int procstat_getumask_sysctl(pid_t pid, unsigned short *maskp) { int error; int mib[4]; size_t len; mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = KERN_PROC_UMASK; mib[3] = pid; len = sizeof(*maskp); error = sysctl(mib, 4, maskp, &len, NULL, 0); if (error != 0 && errno != ESRCH && errno != EPERM) warn("sysctl: kern.proc.umask: %d", pid); return (error); } static int procstat_getumask_core(struct procstat_core *core, unsigned short *maskp) { size_t len; unsigned short *buf; buf = procstat_core_get(core, PSC_TYPE_UMASK, NULL, &len); if (buf == NULL) return (-1); if (len < sizeof(*maskp)) { free(buf); return (-1); } *maskp = *buf; free(buf); return (0); } int procstat_getumask(struct procstat *procstat, struct kinfo_proc *kp, unsigned short *maskp) { switch(procstat->type) { case PROCSTAT_KVM: return (procstat_getumask_kvm(procstat->kd, kp, maskp)); case PROCSTAT_SYSCTL: return (procstat_getumask_sysctl(kp->ki_pid, maskp)); case PROCSTAT_CORE: return (procstat_getumask_core(procstat->core, maskp)); default: warnx("unknown access method: %d", procstat->type); return (-1); } } static int procstat_getrlimit_kvm(kvm_t *kd, struct kinfo_proc *kp, int which, struct rlimit* rlimit) { struct proc proc; unsigned long offset; assert(kd != NULL); assert(kp != NULL); assert(which >= 0 && which < RLIM_NLIMITS); if (!kvm_read_all(kd, (unsigned long)kp->ki_paddr, &proc, sizeof(proc))) { warnx("can't read proc struct at %p for pid %d", kp->ki_paddr, kp->ki_pid); return (-1); } if (proc.p_limit == NULL) return (-1); offset = (unsigned long)proc.p_limit + sizeof(struct rlimit) * which; if (!kvm_read_all(kd, offset, rlimit, sizeof(*rlimit))) { warnx("can't read rlimit struct at %p for pid %d", (void *)offset, kp->ki_pid); return (-1); } return (0); } static int procstat_getrlimit_sysctl(pid_t pid, int which, struct rlimit* rlimit) { int error, name[5]; size_t len; name[0] = CTL_KERN; name[1] = KERN_PROC; name[2] = KERN_PROC_RLIMIT; name[3] = pid; name[4] = which; len = sizeof(struct rlimit); error = sysctl(name, 5, rlimit, &len, NULL, 0); if (error < 0 && errno != ESRCH) { warn("sysctl: kern.proc.rlimit: %d", pid); return (-1); } if (error < 0 || len != sizeof(struct rlimit)) return (-1); return (0); } static int procstat_getrlimit_core(struct procstat_core *core, int which, struct rlimit* rlimit) { size_t len; struct rlimit* rlimits; if (which < 0 || which >= RLIM_NLIMITS) { errno = EINVAL; warn("getrlimit: which"); return (-1); } rlimits = procstat_core_get(core, PSC_TYPE_RLIMIT, NULL, &len); if (rlimits == NULL) return (-1); if (len < sizeof(struct rlimit) * RLIM_NLIMITS) { free(rlimits); return (-1); } *rlimit = rlimits[which]; return (0); } int procstat_getrlimit(struct procstat *procstat, struct kinfo_proc *kp, int which, struct rlimit* rlimit) { switch(procstat->type) { case PROCSTAT_KVM: return (procstat_getrlimit_kvm(procstat->kd, kp, which, rlimit)); case PROCSTAT_SYSCTL: return (procstat_getrlimit_sysctl(kp->ki_pid, which, rlimit)); case PROCSTAT_CORE: return (procstat_getrlimit_core(procstat->core, which, rlimit)); default: warnx("unknown access method: %d", procstat->type); return (-1); } } static int procstat_getpathname_sysctl(pid_t pid, char *pathname, size_t maxlen) { int error, name[4]; size_t len; name[0] = CTL_KERN; name[1] = KERN_PROC; name[2] = KERN_PROC_PATHNAME; name[3] = pid; len = maxlen; error = sysctl(name, 4, pathname, &len, NULL, 0); if (error != 0 && errno != ESRCH) warn("sysctl: kern.proc.pathname: %d", pid); if (len == 0) pathname[0] = '\0'; return (error); } static int procstat_getpathname_core(struct procstat_core *core, char *pathname, size_t maxlen) { struct kinfo_file *files; int cnt, i, result; files = kinfo_getfile_core(core, &cnt); if (files == NULL) return (-1); result = -1; for (i = 0; i < cnt; i++) { if (files[i].kf_fd != KF_FD_TYPE_TEXT) continue; strncpy(pathname, files[i].kf_path, maxlen); result = 0; break; } free(files); return (result); } int procstat_getpathname(struct procstat *procstat, struct kinfo_proc *kp, char *pathname, size_t maxlen) { switch(procstat->type) { case PROCSTAT_KVM: /* XXX: Return empty string. */ if (maxlen > 0) pathname[0] = '\0'; return (0); case PROCSTAT_SYSCTL: return (procstat_getpathname_sysctl(kp->ki_pid, pathname, maxlen)); case PROCSTAT_CORE: return (procstat_getpathname_core(procstat->core, pathname, maxlen)); default: warnx("unknown access method: %d", procstat->type); return (-1); } } static int procstat_getosrel_kvm(kvm_t *kd, struct kinfo_proc *kp, int *osrelp) { struct proc proc; assert(kd != NULL); assert(kp != NULL); if (!kvm_read_all(kd, (unsigned long)kp->ki_paddr, &proc, sizeof(proc))) { warnx("can't read proc struct at %p for pid %d", kp->ki_paddr, kp->ki_pid); return (-1); } *osrelp = proc.p_osrel; return (0); } static int procstat_getosrel_sysctl(pid_t pid, int *osrelp) { int error, name[4]; size_t len; name[0] = CTL_KERN; name[1] = KERN_PROC; name[2] = KERN_PROC_OSREL; name[3] = pid; len = sizeof(*osrelp); error = sysctl(name, 4, osrelp, &len, NULL, 0); if (error != 0 && errno != ESRCH) warn("sysctl: kern.proc.osrel: %d", pid); return (error); } static int procstat_getosrel_core(struct procstat_core *core, int *osrelp) { size_t len; int *buf; buf = procstat_core_get(core, PSC_TYPE_OSREL, NULL, &len); if (buf == NULL) return (-1); if (len < sizeof(*osrelp)) { free(buf); return (-1); } *osrelp = *buf; free(buf); return (0); } int procstat_getosrel(struct procstat *procstat, struct kinfo_proc *kp, int *osrelp) { switch(procstat->type) { case PROCSTAT_KVM: return (procstat_getosrel_kvm(procstat->kd, kp, osrelp)); case PROCSTAT_SYSCTL: return (procstat_getosrel_sysctl(kp->ki_pid, osrelp)); case PROCSTAT_CORE: return (procstat_getosrel_core(procstat->core, osrelp)); default: warnx("unknown access method: %d", procstat->type); return (-1); } } #define PROC_AUXV_MAX 256 #if __ELF_WORD_SIZE == 64 static const char *elf32_sv_names[] = { "Linux ELF32", "FreeBSD ELF32", }; static int is_elf32_sysctl(pid_t pid) { int error, name[4]; size_t len, i; static char sv_name[256]; name[0] = CTL_KERN; name[1] = KERN_PROC; name[2] = KERN_PROC_SV_NAME; name[3] = pid; len = sizeof(sv_name); error = sysctl(name, 4, sv_name, &len, NULL, 0); if (error != 0 || len == 0) return (0); for (i = 0; i < sizeof(elf32_sv_names) / sizeof(*elf32_sv_names); i++) { if (strncmp(sv_name, elf32_sv_names[i], sizeof(sv_name)) == 0) return (1); } return (0); } static Elf_Auxinfo * procstat_getauxv32_sysctl(pid_t pid, unsigned int *cntp) { Elf_Auxinfo *auxv; Elf32_Auxinfo *auxv32; void *ptr; size_t len; unsigned int i, count; int name[4]; name[0] = CTL_KERN; name[1] = KERN_PROC; name[2] = KERN_PROC_AUXV; name[3] = pid; len = PROC_AUXV_MAX * sizeof(Elf32_Auxinfo); auxv = NULL; auxv32 = malloc(len); if (auxv32 == NULL) { warn("malloc(%zu)", len); goto out; } if (sysctl(name, 4, auxv32, &len, NULL, 0) == -1) { if (errno != ESRCH && errno != EPERM) warn("sysctl: kern.proc.auxv: %d: %d", pid, errno); goto out; } count = len / sizeof(Elf_Auxinfo); auxv = malloc(count * sizeof(Elf_Auxinfo)); if (auxv == NULL) { warn("malloc(%zu)", count * sizeof(Elf_Auxinfo)); goto out; } for (i = 0; i < count; i++) { /* * XXX: We expect that values for a_type on a 32-bit platform * are directly mapped to values on 64-bit one, which is not * necessarily true. */ auxv[i].a_type = auxv32[i].a_type; ptr = &auxv32[i].a_un; auxv[i].a_un.a_val = *((uint32_t *)ptr); } *cntp = count; out: free(auxv32); return (auxv); } #endif /* __ELF_WORD_SIZE == 64 */ static Elf_Auxinfo * procstat_getauxv_sysctl(pid_t pid, unsigned int *cntp) { Elf_Auxinfo *auxv; int name[4]; size_t len; #if __ELF_WORD_SIZE == 64 if (is_elf32_sysctl(pid)) return (procstat_getauxv32_sysctl(pid, cntp)); #endif name[0] = CTL_KERN; name[1] = KERN_PROC; name[2] = KERN_PROC_AUXV; name[3] = pid; len = PROC_AUXV_MAX * sizeof(Elf_Auxinfo); auxv = malloc(len); if (auxv == NULL) { warn("malloc(%zu)", len); return (NULL); } if (sysctl(name, 4, auxv, &len, NULL, 0) == -1) { if (errno != ESRCH && errno != EPERM) warn("sysctl: kern.proc.auxv: %d: %d", pid, errno); free(auxv); return (NULL); } *cntp = len / sizeof(Elf_Auxinfo); return (auxv); } static Elf_Auxinfo * procstat_getauxv_core(struct procstat_core *core, unsigned int *cntp) { Elf_Auxinfo *auxv; size_t len; auxv = procstat_core_get(core, PSC_TYPE_AUXV, NULL, &len); if (auxv == NULL) return (NULL); *cntp = len / sizeof(Elf_Auxinfo); return (auxv); } Elf_Auxinfo * procstat_getauxv(struct procstat *procstat, struct kinfo_proc *kp, unsigned int *cntp) { switch(procstat->type) { case PROCSTAT_KVM: warnx("kvm method is not supported"); return (NULL); case PROCSTAT_SYSCTL: return (procstat_getauxv_sysctl(kp->ki_pid, cntp)); case PROCSTAT_CORE: return (procstat_getauxv_core(procstat->core, cntp)); default: warnx("unknown access method: %d", procstat->type); return (NULL); } } void procstat_freeauxv(struct procstat *procstat __unused, Elf_Auxinfo *auxv) { free(auxv); } static struct kinfo_kstack * procstat_getkstack_sysctl(pid_t pid, int *cntp) { struct kinfo_kstack *kkstp; int error, name[4]; size_t len; name[0] = CTL_KERN; name[1] = KERN_PROC; name[2] = KERN_PROC_KSTACK; name[3] = pid; len = 0; error = sysctl(name, 4, NULL, &len, NULL, 0); if (error < 0 && errno != ESRCH && errno != EPERM && errno != ENOENT) { warn("sysctl: kern.proc.kstack: %d", pid); return (NULL); } if (error == -1 && errno == ENOENT) { warnx("sysctl: kern.proc.kstack unavailable" " (options DDB or options STACK required in kernel)"); return (NULL); } if (error == -1) return (NULL); kkstp = malloc(len); if (kkstp == NULL) { warn("malloc(%zu)", len); return (NULL); } if (sysctl(name, 4, kkstp, &len, NULL, 0) == -1) { warn("sysctl: kern.proc.pid: %d", pid); free(kkstp); return (NULL); } *cntp = len / sizeof(*kkstp); return (kkstp); } struct kinfo_kstack * procstat_getkstack(struct procstat *procstat, struct kinfo_proc *kp, unsigned int *cntp) { switch(procstat->type) { case PROCSTAT_KVM: warnx("kvm method is not supported"); return (NULL); case PROCSTAT_SYSCTL: return (procstat_getkstack_sysctl(kp->ki_pid, cntp)); case PROCSTAT_CORE: warnx("core method is not supported"); return (NULL); default: warnx("unknown access method: %d", procstat->type); return (NULL); } } void procstat_freekstack(struct procstat *procstat __unused, struct kinfo_kstack *kkstp) { free(kkstp); } Index: user/ngie/more-tests2/lib/libutil/kinfo_getvmmap.c =================================================================== --- user/ngie/more-tests2/lib/libutil/kinfo_getvmmap.c (revision 288954) +++ user/ngie/more-tests2/lib/libutil/kinfo_getvmmap.c (revision 288955) @@ -1,73 +1,77 @@ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include "libutil.h" struct kinfo_vmentry * kinfo_getvmmap(pid_t pid, int *cntp) { int mib[4]; int error; int cnt; size_t len; char *buf, *bp, *eb; struct kinfo_vmentry *kiv, *kp, *kv; *cntp = 0; len = 0; mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = KERN_PROC_VMMAP; mib[3] = pid; error = sysctl(mib, 4, NULL, &len, NULL, 0); if (error) return (NULL); len = len * 4 / 3; buf = malloc(len); if (buf == NULL) return (NULL); error = sysctl(mib, 4, buf, &len, NULL, 0); if (error) { free(buf); return (NULL); } /* Pass 1: count items */ cnt = 0; bp = buf; eb = buf + len; while (bp < eb) { kv = (struct kinfo_vmentry *)(uintptr_t)bp; + if (kv->kve_structsize == 0) + break; bp += kv->kve_structsize; cnt++; } kiv = calloc(cnt, sizeof(*kiv)); if (kiv == NULL) { free(buf); return (NULL); } bp = buf; eb = buf + len; kp = kiv; /* Pass 2: unpack */ while (bp < eb) { kv = (struct kinfo_vmentry *)(uintptr_t)bp; + if (kv->kve_structsize == 0) + break; /* Copy/expand into pre-zeroed buffer */ memcpy(kp, kv, kv->kve_structsize); /* Advance to next packed record */ bp += kv->kve_structsize; /* Set field size to fixed length, advance */ kp->kve_structsize = sizeof(*kp); kp++; } free(buf); *cntp = cnt; return (kiv); /* Caller must free() return value */ } Index: user/ngie/more-tests2/lib/libutil =================================================================== --- user/ngie/more-tests2/lib/libutil (revision 288954) +++ user/ngie/more-tests2/lib/libutil (revision 288955) Property changes on: user/ngie/more-tests2/lib/libutil ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/lib/libutil:r288944-288954 Index: user/ngie/more-tests2/release/doc/en_US.ISO8859-1/relnotes/article.xml =================================================================== --- user/ngie/more-tests2/release/doc/en_US.ISO8859-1/relnotes/article.xml (revision 288954) +++ user/ngie/more-tests2/release/doc/en_US.ISO8859-1/relnotes/article.xml (revision 288955) @@ -1,1803 +1,1796 @@ %release; %sponsor; %vendor; ]>
&os; &release.current; Release Notes The &os; Project $FreeBSD$ - + 2015 The &os; Documentation Project &tm-attrib.freebsd; &tm-attrib.ibm; &tm-attrib.ieee; &tm-attrib.intel; &tm-attrib.sparc; &tm-attrib.general; The release notes for &os; &release.current; contain a summary of the changes made to the &os; base system on the &release.branch; development line. This document lists applicable security advisories that were issued since the last release, as well as significant changes to the &os; kernel and userland. Some brief remarks on upgrading are also presented. Introduction This document contains the release notes for &os; &release.current;. It describes recently added, changed, or deleted features of &os;. It also provides some notes on upgrading from previous versions of &os;. The &release.type; distribution to which these release notes apply represents the latest point along the &release.branch; development branch since &release.branch; was created. Information regarding pre-built, binary &release.type; distributions along this branch can be found at &release.url;. The &release.type; distribution to which these release notes apply represents a point along the &release.branch; development branch between &release.prev; and the future &release.next;. Information regarding pre-built, binary &release.type; distributions along this branch can be found at &release.url;. This distribution of &os; &release.current; is a &release.type; distribution. It can be found at &release.url; or any of its mirrors. More information on obtaining this (or other) &release.type; distributions of &os; can be found in the Obtaining &os; appendix to the &os; Handbook. All users are encouraged to consult the release errata before installing &os;. The errata document is updated with late-breaking information discovered late in the release cycle or after the release. Typically, it contains information on known bugs, security advisories, and corrections to documentation. An up-to-date copy of the errata for &os; &release.current; can be found on the &os; Web site. This document describes the most user-visible new or changed features in &os; since &release.prev;. In general, changes described here are unique to the &release.branch; branch unless specifically marked as &merged; features. Typical release note items document recent security advisories issued after &release.prev;, new drivers or hardware support, new commands or options, major bug fixes, or contributed software upgrades. They may also list changes to major ports/packages or release engineering practices. Clearly the release notes cannot list every single change made to &os; between releases; this document focuses primarily on security advisories, user-visible changes, and major architectural improvements. Upgrading from Previous Releases of &os; Binary upgrades between RELEASE versions (and snapshots of the various security branches) are supported using the &man.freebsd-update.8; utility. The binary upgrade procedure will update unmodified userland utilities, as well as unmodified GENERIC kernels distributed as a part of an official &os; release. The &man.freebsd-update.8; utility requires that the host being upgraded have Internet connectivity. Source-based upgrades (those based on recompiling the &os; base system from source code) from previous versions are supported, according to the instructions in /usr/src/UPDATING. Upgrading &os; should only be attempted after backing up all data and configuration files. Security and Errata This section lists the various Security Advisories and Errata Notices since &release.prev;. Security Advisories &security; Errata Notices &errata; Userland This section covers changes and additions to userland applications, contributed software, and system utilities. Userland Configuration Changes The default &man.newsyslog.conf.5; now includes files in the /etc/newsyslog.conf.d/ and /usr/local/etc/newsyslog.conf.d/ directories by default for &man.newsyslog.8;. The &man.mailwrapper.8; utility has been updated to use &man.mailer.conf.5; from the LOCALBASE environment variable, which defaults to /usr/local if unset. The MK_ARM_EABI &man.src.conf.5; option has been removed. The ntp suite has been updated to version 4.2.8p3. Userland Application Changes The &man.casperd.8; daemon has been added, which provides access to functionality that is not available in the capability mode sandbox. When unable to load a kernel module with &man.kldload.8;, a message informing to view output of &man.dmesg.8; is now printed, opposed to the previous output Exec format error.. Allow &man.pciconf.8; to identify PCI devices that are attached to a driver to be identified by their device name instead of just the selector. Additionally, an optional device argument to the -l flag to restrict the output to only listing details about a single device. A new flag, onifconsole has been added to /etc/ttys. This allows the system to provide a login prompt via serial console if the device is an active kernel console, otherwise it is equivalent to off. Support for displaying VPD for PCI devices via &man.pciconf.8; has been added. &man.ping.8; protects against malicious network packets using the Capsicum framework to drop privileges. The &man.ps.1; utility has been updated to include the -J flag, used to filter output by matching &man.jail.8; IDs and names. Additionally, argument 0 can be used to -J to only list processes running on the host system. The &man.top.1; utility has been updated to filter by &man.jail.8; ID or name, in followup to the &man.ps.1; change in r265229. The &man.pmcstat.8; utility has been updated to include a new flag, -l, which ends event collection after the specified number of seconds. The &man.ps.1; utility has been updated to include a new keyword, tracer, which displays the PID of the tracing process. Support for adding empty partitions has been added to the &man.mkimg.1; utility. The &man.primes.6; utility has been updated to correctly enumerate prime numbers between 4295098369 and 3825123056546413050, which prior to this change, it would be possible for returned values to be incorrectly identified as prime numbers. The &man.mkimg.1; utility has been updated to include three options used to print information about &man.mkimg.1; itself: Option Output --version The current version of the &man.mkimg.1; utility --formats The disk image file formats supported by &man.mkimg.1; --schemes The partition schemes supported by &man.mkimg.1; Userland &man.ctf.5; support in &man.dtrace.1; has been added. With this change, &man.dtrace.1; is able to resolve type info for function and USDT probe arguments, and function return values. The &man.elfdump.1; utility has been updated to support capability mode provided by &man.capsicum.4;. The &man.fstyp.8; utility has been added, which is used to determine the filesystem on a specified device. The libedit library has been updated to support UTF-8, which additionally provides unicode support to &man.sh.1;. The &man.mkimg.1; utility has been updated to support the MBR EFI partition type. The &man.ptrace.2; system call has been updated include support for Altivec registers on &os;/&arch.powerpc;. A new device control utility, &man.devctl.8; has been added, which allows making administrative changes to individual devices, such as attaching and detaching drivers, and enabling and disabling devices. The &man.devctl.8; utility uses the new &man.devctl.3; library. The &man.netstat.1; utility has been updated to link against the &man.libxo.3; shared library. A new flag, -c, has been added to the &man.mkimg.1; utility, which allows specifying the capacity of the target disk image. The &man.uefisign.8; utility has been added. The &man.freebsd-update.8; utility has been updated to prevent fetching updated binary patches when a previous upgrade has not been thoroughly completed. A regression in the &man.libarchive.3; library that would prevent a directory from being included in the archive when --one-file-system is used has been fixed. The &man.ar.1; utility has been updated to set ARCHIVE_EXTRACT_SECURE_SYMLINKS and ARCHIVE_EXTRACT_SECURE_NODOTDOT to disallow directory traversal when extracting an archive, similar to &man.tar.1;. A race condition in &man.wc.1; that would cause final results to be sent to &man.stderr.4; when receiving the SIGINFO signal has been fixed. The &man.chflags.1;, &man.chgrp.1;, &man.chmod.1;, and &man.chown.8; utilities now affect symbolic links when the -R flag is specified, as documented in &man.symlink.7;. The &man.date.1; utility has been updated to print the modification time of the file passed as an argument to the -r flag, improving compatibility with the GNU &man.date.1; utility behavior. The &man.pw.8; utility has been updated with a new flag, -R, that sets the root directory within which the utility will operate. The &man.lockstat.1; utility has been updated with several improvements: Spin locks are now reported as the amount of time spinning, instead of loop iterations. Reader locks are now recognized as adaptive that can spin on &os;. Lock aquisition events for successful reader try-lock events are now reported. Spin and block events are now reported before lock acquisition events. The &man.fstyp.8; utility has been updated to be able to detect &man.zfs.8; and &man.geli.8; filesystems. The &man.mkimg.1; utility has been updated to include support for NTFS filesystems in both MBR and GPT partitioning schemes. The &man.quota.1; utility has been updated to include support for IPv6. The &man.jail.8; utility has been updated to include a new flag, -l, which ensures a clean environment in the target jail when used. Additionally, &man.jail.8; will run a shell within the target jail when run no commands are specified. The &man.w.1; utility has been updated to display the full IPv6 remote address of the host from which a user is connected. The &man.jail.8; framework has been updated to allow mounting &man.linprocfs.5; and &man.linsysfs.5; within a jail. The &man.patch.1; utility has been updated to include a new option to the -V flag, none, which disables backup file creation when applying a patch. The &man.ar.1; utility now enables deterministic mode (-D) by default. This behavior can be disabled by specifying the -U flag. The &man.xargs.1; utility has been updated to allow specifying 0 as an argument to the -P (parallel mode) flag, which allows creating as many concurrent processes as possible. The &man.patch.1; utility has been updated to remove the automatic checkout feature. A new utility, &man.sesutil.8;, has been added, which is used to manage &man.ses.4; devices. The &man.pciconf.8; utility has been updated to use the PCI ID database from the misc/pciids package, if present, falling back to the PCI ID database in the &os; base system. The &man.ifconfig.8; utility has been updated to always exit with an error code if an important &man.ioctl.2; fails. Contributed Software - &man.lldb.1; has been updated to - upstream snapshot version r196259. - &man.byacc.1; has been updated to version 20140101. - libc++ has - been updated to version 3.4. - OpenSSH has been updated to 6.5p1. mdocml has been updated to version 1.12.3. - LLVM and - Clang have been updated to - version 3.4. - The binutils suite of utilities has been updated to include upstream patches that add new relocations for &arch.powerpc; support. The ELF Tool Chain has been updated to upstream revision r3136. The texinfo utility and info pages were removed from the base system. The print/texinfo port should be installed on systems where info pages are needed. The ELF object manipulation tools addr2line, elfcopy (strip), nm, readelf, size, and strings were switched to the versions from the ELF Tool Chain project. The libedit library has been updated to include UTF-8 support, adding UTF-8 support to the &man.sh.1; shell. The &man.xz.1; utility has been updated to support multi-threaded compression. The elftoolchain utilities have been updated to version 3179. The &man.xz.1; utility has been updated to version 5.2.1. The &man.nvi.1; utility has been updated to version 2.1.3. The &man.wpa.supplicant.8; and &man.hostapd.8; utilities have been updated to version 2.4. The &man.resolvconf.8; utility has been updated to version 3.7.0. bmake has been updated to version 20150606. sendmail has been updated to 8.15.2. Starting with &os; 11.0 and sendmail 8.15, sendmail uses uncompressed IPv6 addresses by default, i.e., they will not contain ::. For example, instead of ::1, it will be 0:0:0:0:0:0:0:1. This permits a zero subnet to have a more specific match, such as different map entries for IPv6:0:0 versus IPv6:0. This change requires that configuration data (including maps, files, classes, custom ruleset, etc.) must use the same format, so make certain such configuration data is upgrading. As a very simple check search for patterns like 'IPv6:[0-9a-fA-F:]*::' and 'IPv6::'. To return to the old behavior, set the m4 option confUSE_COMPRESSED_IPV6_ADDRESSES or the cf option UseCompressedIPv6Addresses. The &man.tcpdump.1; utility has been updated to version 4.7.4. OpenSSL has been updated to version 1.0.1p. The &man.ssh.1; utility has been updated to re-implement hostname canonicalization before locating the host in known_hosts. The &man.libarchive.3; library has been updated to properly skip a sparse file entry in a &man.tar.1; file, which would previously produce errors. The apr library used by &man.svnlite.1; has been updated to version 1.5.2. The serf library used by &man.svnlite.1; has been updated to version 1.3.8. The &man.svnlite.1; utility has been updated to version 1.8.14. The sqlite3 library used by &man.svnlite.1; and &man.kerberos.8; has been updated to version 3.8.11.1. Timezone data files have been updated to version 2015f. The &man.acpi.4; subsystem has been updated to version 20150818. The &man.unbound.8; utility has been updated to version 1.5.4. &man.jemalloc.3; has been updated to version 4.0.2. The &man.file.1; utility has been updated to version 5.25. The &man.nc.1; utility has been updated to the OpenBSD 5.8 version. + + LLVM and + Clang have been updated to + version 3.7.0. Installation and Configuration Tools The &man.bsdinstall.8; partition editor and &man.sade.8; utility have been updated to include native ZFS support. The &os; installation utility, &man.bsdinstall.8;, has been updated to set the canmount &man.zfs.8; property to off for the /var dataset, preventing the contents of directories within /var from conflicting when using multiple boot environments, such as that provided by sysutils/beadm. The &man.bsdconfig.8; utility has been updated to skip the initial &man.tzsetup.8; UTC versus wall-clock time prompt when run in a virtual machine, determined when the kern.vm_guest &man.sysctl.8; is set to 1. The &man.bsdinstall.8; utility has been updated to use the new &man.dpv.3; library to display progress when extracting the &os; distributions. Support for detecting and implementing aligning partitions on 1Mb boundaries has been added to &man.bsdinstall.8;. Support for detecting and implementing a workaround for various laptops and motherboards that do not boot properly from GPT-partitioned disks has been added to &man.bsdinstall.8;. Additionally, the active flag will be set on the partition when needed. Support for selecting the partitioning scheme when installing on the UFS filesystem has been added to &man.bsdinstall.8;. <filename class="directory">/etc/rc.d</filename> Scripts The &man.rc.8; subsystem has been updated to allow configuring services in ${LOCALBASE}/etc/rc.conf.d/. If LOCALBASE is unset, it defaults to /usr/local. A new &man.rc.8; script, growfs, has been added, which will resize the root filesystem on boot if /firstboot exists. The mrouted &man.rc.8; script has been removed from the base system. An equivalent script is available from the net/mrouted port. A new &man.rc.8; script, iovctl, has been added, which allows automatically starting the &man.iovctl.8; utility at boot. The &man.service.8; utility has been updated to honor entries within /etc/rc.conf.d/. <filename class="directory">/etc/periodic</filename> Scripts The daily &man.periodic.8; script 110.clean-tmps has been updated to avoid crossing filesystem mount boundaries when cleaning files in /tmp. A new &man.periodic.8; script, 510.status-world-kernel, has been added, which evaluates the running userland and kernel versions from the &man.uname.1; -U and -K arguments, and prints an error if the system userland and kernel are not in sync. Runtime Libraries and API The Blowfish &man.crypt.3; default format has been changed to $2b$. The &man.readline.3; library is now statically linked in software within the base system, and the shared library is no longer installed, allowing the Ports Collection to use a modern version of the library. The &man.strptime.3; library has been updated to add support for POSIX-2001 features %U and %W. The &man.dl.iterate.phdr.3; library has been changed to always return the path name of the ELF object in the dlpi_name structure member. The &man.libxo.3; library has been imported to the base system. A userland library for Chelsio Terminator 5 based iWARP cards has been added, allowing userland RDMA applications to work over compatible NICs. The &man.gpio.3; library has been added, providing a wrapper around the &man.gpio.4; kernel interface. The &man.procctl.2; system call has been updated to include a facility for non-&man.init.8; processes to be declared as the reaper of child processes and their decendants. The futimens() and utimensat() system calls have been added. See &man.utimensat.2; for more information. The &man.elf.3; compile-time dependency has been removed from dtri.o, which allows adding DTrace probes to userland applications and libraries without also linking against &man.elf.3;. The &man.setmode.3; function has been updated to consistently set errno on failure. The &man.qsort.3; functions have been updated to be able to handle 32-bit aligned data on 64-bit platforms, also providing a significant improvement in 32-bit workloads. Several standard include headers have been updated to use of gcc attributes, such as __result_use_check(), __alloc_size(), and __nonnull(). Support for file verification in MAC has been added. The libgomp library is now only built when building GCC from the base system. An up-to-date version is available in the Ports Collection as devel/libiomp5-devel. The stdlib.h and malloc.h headers have been updated to make use of the gcc alloc_align() attribute. The Blowfish &man.crypt.3; library has been updated to support $2y$ hashes. The &man.execl.3; and &man.execlp.3; library functions have been updated to use the __sentinel gcc attribute. ABI Compatibility The &linux; compatibility version has been updated to 2.6.18. The compat.linux.osrelease &man.sysctl.8; is evaluated when building the emulators/linux-c6 and related ports. The stack protector has been upgraded to the "strong" level, elevating the protection against buffer overflows. While this significantly improves the security of the system, extensive testing was done to ensure there are no measurable side effects in performance or functionality. Kernel This section covers changes to kernel configurations, system tuning, and system control parameters that are not otherwise categorized. Kernel Bug Fixes A kernel bug that inhibited proper functionality of the dev.cpu.0.freq &man.sysctl.8; on &intel; processors with Turbo Boost ™ enabled has been fixed. Support for &man.dtrace.1; stack tracing has been fixed for &os;/&arch.powerpc;, using the trapexit() and asttrapexit() functions instead of checking within addressed kernel space. A kernel panic triggered when destroying a &man.vnet.9; &man.jail.8; configured with &man.gif.4; has been fixed. A kernel panic triggered when destroying a &man.vnet.9; &man.jail.8; configured with &man.gre.4; has been fixed. A bug in &man.ipfw.4; that could potentially lead to a kernel panic when using &man.dummynet.4; at layer 2 has been fixed. The kernel RPC has been updated to include several enhancements: The 45 MiB limit on requests queued for &man.nfsd.8; threads has been removed. Avoids unnecessary throttling by not deferring accounting for completed requests. Fixes an integer overflow and signedness bugs. Support for &man.dtrace.1; has been added for the Book-E ™. The &man.kqueue.2; system call has been updated to handle write events to files larger than 2 gigabytes. Kernel Configuration The IMAGACT_BINMISC kernel configuration option has been enabled by default, which enables application execution through emulators, such as Qemu. The VT kernel configuration file has been removed, and the &man.vt.4; driver is included in the GENERIC kernel. To enable &man.vt.4;, enter set kern.vty=vt at the &man.loader.8; prompt during boot, or add kern.vty=vt to &man.loader.conf.5; and reboot the system. The &man.config.8; utility has been updated to allow using a non-standard src/ tree, specified as an argument to the -s flag. The &os;/&arch.powerpc64; kernel now builds as a position-independent executable, allowing the kernel to be loaded into and run from any physical or virtual address. This change requires an update to &man.loader.8;. The userland and kernel must be updated before rebooting the system. A new module for creating rpi.dtb has been added for the Raspberry Pi. The rpi.dtb module is now installed to /boot/dtb/ by default for the Raspberry Pi system. Kernel support for Vector-Scalar eXtension (VSX) found on POWER7 and POWER8 hardware has been added. The &man.pmap.9; implementation for 64-bit &powerpc; processors has been overhaulded to improve concurrency. A new module for creating the dtb module for AM335x systems has been added. The PAE_TABLES kernel configuration option has been added for &os;/&arch.i386;, which instructs &man.pmap.9; to use PAE format for page tables while maintaining a 32-bit physical address size elsewhere in the kernel. The use of this option can enhance application-level security by enabling the creation of no execute mappings on modern &arch.i386; processors. Unlike the PAE option, PAE_TABLES preserves kernel binary interface (KBI) compatibility with non-PAE kernels, allowing non-PAE kernel modules and drivers to work with a PAE_TABLES-enabled kernel. Additionally, system limits are tuned for 4GB maximum RAM, avoiding kernel virtual address space (KVA) exhaustion. The SIFTR kernel configuration has been added, allowing building &man.siftr.4; statically into the kernel. The &arch.arm; boot loader, ubldr, is now relocatable. In addition, ubldr.bin is now created during build time, which is a stripped binary with an entry point of 0, providing the ability to specify the load address by running go ${loadaddr} in u-boot. The &man.nvd.4; and &man.nvme.4; drivers are now included in the GENERIC kernel configuration by default. A new kernel configuration option, EM_MULTIQUEUE, has been added which enables multi-queue support in the &man.em.4; driver. Multi-queue support in the &man.em.4; driver is not officially supported by &intel;. The GENERIC kernel configuration has been updated to include the IPSEC option by default. Initial NUMA affinity and policy configuration has been added. See &man.numactl.1;, and &man.numa.getaffinity.2;, for usage details. The &man.pms.4; driver has been added to the GENERIC kernel configuration for supported architectures. The CUBIEBOARD2 kernel configuration has been renamed to A20. Kernel debugging symbols are now installed to /usr/lib/debug/boot/kernel/. To retain the previous behavior, add KERN_DEBUGDIR="" to &man.src.conf.5;. System Tuning and Controls The &man.hwpmc.4; default and maximum callchain depths have been increased. The default has been increased from 16 to 32, and the maximum increased from 32 to 128. The kern.osrelease and kern.osreldate are now configurable &man.jail.8; parameters. The &man.devfs.5; device filesystem has been changed to update timestamps for read/write operations using seconds precision. A new &man.sysctl.8;, vfs.devfs.dotimes has been added, which when set to a non-zero value, enables default precision timestamps for these operations. A new &man.sysctl.8;, kern.racct.enable, has been added, which when set to a non-zero value allows using &man.rctl.8; with the GENERIC kernel. A new kernel configuration option, RACCT_DISABLED has also been added. The GENERIC kernel configuration now includes RACCT and RCTL by default. To enable RACCT and RCTL on a system using the GENERIC kernel configuration, add kern.racct.enable=1 to &man.loader.conf.5;, and reboot the system. A new &man.sysctl.8;, net.inet.tcp.hostcache.purgenow, has been added, which when set to 1 during runtime will flush all net.inet.tcp.hostcache entries. A new &man.sysctl.8;, hw.model, has been added, which displays CPU model information. The &man.uart.4; driver has been updated to allow tuning packets per second captured during runtime. Devices and Drivers This section covers changes and additions to devices and device drivers since &release.prev;. Device Drivers Support for GPS ports has been added to &man.uhso.4;. The &man.full.4; device has been added, and the lindev(4) device has been removed. Prior to this change, lindev(4) provided only the /dev/full character device, returning ENOSPC on write attempts. As this device is not specific to &linux;, a native &os; version has been added. Hardware context support has been added to the drm/i915 driver, adding support for Mesa 9.2 and later. The &man.vt.4; driver has been updated, replacing the bitmapped kern.vt.spclkeys &man.sysctl.8; with individual kern.vt.kbd_* variants. The &man.hpet.4; driver has been updated to create a /dev/hpetN device, providing access to HPET from userspace. The drm code has been updated to match &linux; version 3.8.13. The &man.psm.4; driver has been updated to include improved support for newer Synaptics ® touchpads and the ClickPad ® mouse on newer Lenovo ™ laptops. Support for the Freescale PCI Root Complex device has been added. The &man.cyapa.4; driver has been added, supporting the Cypress APA I2C trackpad. The &man.isl.4; driver has been added, supporting the Intersil I2C ISL29018 digital ambient light sensor. Storage Drivers The &man.mpr.4; device has been added, providing support for LSI Fusion-MPT 3 12Gb SCSI/SATA controllers. The &man.mrsas.4; driver has been added, providing support for LSI MegaRAID SAS controllers. The &man.mfi.4; driver will attach to the controller, by default. To enable &man.mrsas.4; add hw.mfi.mrsas_enable=1 to /boot/loader.conf, which turns off &man.mfi.4; device probing. At this time, the &man.mfiutil.8; utility and the &os; version of MegaCLI and StorCli do not work with &man.mrsas.4;. The &man.ctl.4; subsystem has been updated, increasing the ports limit from 128 to 256, and LUN limit from 256 to 1024. The asr(4) driver has been removed, and is no longer supported. The &man.hptnr.4; driver has been updated to version 1.1.1. The &man.pms.4; driver has been added, providing support for the PMC Sierra line of SAS/SATA host bus adapters. The &man.ioat.4; driver has been added, providing support for the PSE (Platform Storage Extension). The CTL High Availability implementation has been rewritten. The &man.ctl.4; driver has been updated to support CD-ROM and removable devices. Network Drivers Support for Broadcom chipsets BCM57764, BCM57767, BCM57782, BCM57786 and BCM57787 has been added to &man.bge.4;. Support for the &intel; Centrino™ Wireless-N 135 chipset has been added. Firmware for &intel; Centrino™ Wireless-N 105 devices has been added to the base system. The deprecated nve(4) driver has been removed. Users of NVIDIA nForce MCP network adapters are advised to use the &man.nfe.4; driver instead, which has been the default driver for this hardware since &os; 7.0. The if_nf10bmac(4) device has been added, providing support for NetFPGA-10G Embedded CPU Ethernet Core. The if_nf10bmac(4) driver operates on the FPGA, and is not suited for the PCI host interface. The &man.ath.hal.4; driver has been updated to support the Atheros AR1111 chipset. Support for the &intel; Centrino™ Wireless-N 105 chipset has been added. Support for the &man.cxgbe.4; Terminator 5 (T5) 10G/40G cards has been added to &man.netmap.4;. The &man.alc.4; driver has been updated to support AR816x and AR817x ethernet controllers. The &man.pf.4; packet filter default hash has been changed from Jenkins to Murmur3, providing a 3-percent performance increase in packets-per-second. The &man.vxlan.4; driver has been added, which creates a virtual Layer 2 (Ethernet) network overlaid in a Layer 3 (IP/UDP) network. The &man.vxlan.4; driver is analogous to &man.vlan.4;, but is designed to be better suited for large, multiple-tenant datacenter environments. The &man.gre.4; driver has been significantly overhauled, and has been split into two separate modules, &man.gre.4; and &man.me.4;. The &man.ral.4; driver has been updated to support the RT5390 and RT5392 chipsets. The &man.sfxge.4; driver has been updated to support Solarflare Flareon Ultra 7000-series chipsets. The &man.em.4; driver has been updated with improved transmission queue hang detection. The &man.cdce.4; driver has been updated to include support for the RTL8153 chipset. The &man.iwm.4; driver has been imported from OpenBSD, providing support for &intel; 3160/7260/7265 wireless chipsets. The &man.em.4; driver has been updated to allow disabling CRC stripping. The &man.pf.4; implementation has been updated to remove support for the scrub fragment crop|drop-ovl filtering rule. Systems with this rule in &man.pf.conf.5; will implicitly be converted to the scrub fragment reassemble filtering rule, without necessary intervention. The &man.em.4; driver has been updated to support the Skylake I219 chipset. The &man.lagg.4; driver has been updated to remove support for the fec protocol. Hardware Support This section covers general hardware support for physical machines, hypervisors, and virtualization environments, as well as hardware changes and updates that do not otherwise fit in other sections of this document. Hardware Support The &man.asmc.4; driver has been updated to support the &apple; MacMini 3,1. Support for &os;/ia64 has been dropped as of &os; 11. An issue that could cause a system to hang when entering ACPI S3 state (suspend to RAM) has been corrected in the &man.acpi.4; and &man.pci.4; drivers. The power management unit subsystem has been updated to support power button events on certain &arch.powerpc; hardware, such as aluminum PowerBook ®. The &man.hwpmc.4; driver has been updated to correct performance counter sampling on G4 (MPC74xxx) and G5 class processors. The OpenCrypto framework has been updated to include AES-ICM and AES-GCM modes, both of which have also been added to the &man.aesni.4; driver. The &man.hwpmc.4; driver has been updated to support the Freescale e500 core. The &man.ig4.4; driver has been added, providing support for the fourth generation &intel; I2C SMBus. The &man.uart.4; driver has been updated to support AMT devices on newer systems. Initial SMP support has been added to the &os;/&arch.arm64; port. Virtualization Support Support for the Virtual Interrupt Delivery feature of &intel; VT-x is enabled if supported by the CPU. This feature can be disabled by running sysctl hw.vmm.vmx.use_apic_vid=0. Additionally, to persist this setting across reboots, add hw.vmm.vmx.use_apic_vid=0 to /etc/sysctl.conf. Support for Posted Interrupt Processing is enabled if supported by the CPU. This feature can be disabled by running sysctl hw.vmm.vmx.use_apic_pir=0. Additionally, to persist this setting across reboots, add hw.vmm.vmx.use_apic_pir=0 to /etc/sysctl.conf. Unmapped IO support has been added to &man.virtio_blk.4;. Unmapped IO support has been added to &man.virtio_scsi.4;. The &man.virtio_random.4; driver has been added to harvest entropy from the host system. &os;/&arch.i386; guests can be run under bhyve. Support for running a &os;/&arch.amd64; Xen guest instance as PVH guest has been added. PVH mode, short for Para-Virtualized Hardware, uses para-virtualized drivers for boot and I/O, and uses hardware virtualization extensions for all other tasks, without the need for emulation. The &man.bhyve.8; hypervisor has been updated to support &amd; processors with SVM and AMD-V hardware extensions. The &man.virtio.console.4; driver has been added, which provides an interface to VirtIO console devices through a &man.tty.4; device. The &man.bhyve.8; hypervisor has been updated to support DSM TRIM commands for virtual AHCI disks. Support for the QEMU virt system has been added. The Hyper-V™ drivers have been updated with several enhancements: The &man.hv.vmbus.4; driver now has multi-channel support. The &man.hv.storvsc.4; driver now has scatter/gather support, in addition to performance improvements. The &man.hv.kvp.4; driver has received several bug fixes. Support for &man.xen.4; para-virtualized domU kernels has been removed. The &man.hv.netvsc.4; driver has been updated to support checksum offloading and TSO. The &man.xen.4; driver has been updated to include support for blkif indirect segment I/O. ARM Support The &man.nand.4; device is enabled for ARM devices by default. Support for the Exynos 5420 Octa system has been added. The SMP option has been enabled for all Exynos 5 systems supported by &os;. Support for the Toradex Apalis i.MX6 development board has been added. An issue that could cause instability when detecting SD cards on the Raspberry Pi SOC has been fixed. The bcm2835_cpufreq driver has been added, which supports CPU frequency and voltage control on the Raspberry Pi SOC. Support to turn off the BeagleBone Black system with the &man.shutdown.8; -p flag or by invoking &man.poweroff.8; has been added. Audio transmission drivers have been added for Digital Audio Multiplexer (AUDMUXM), Smart Direct Memory Access Controller (SDMA), and Syncronous Serial Interface (SSI). Initial support for the ARM AArch64 architecture has been added. Kernel support for Thumb-2 userland has been added. Support for the hardware power button on the BeagleBone Black system has been added. Initial ACPI support has been added for &os;/&arch.arm64;. Support for 1-Wire devices has been added, providing support for 1-Wire hardware through &man.gpio.4;. See &man.ow.4;, &man.owc.4;, and &man.ow.temp.4; for more information. Support for the HiSilicon HI6220 SoC has been added. Storage This section covers changes and additions to file systems and other storage subsystems, both local and networked. General Storage The &man.ctl.4; LUN mapping has been rewritten, replacing iSCSI-specific mapping mechanisms with a new mechanism that works for any port. The &man.ctld.8; utility has been updated to allow controlling non-iSCSI &man.ctl.4; ports. The &man.autofs.5; subsystem has been updated to include a new &man.auto.master.5; map, -media, which allows automatically mounting removable media, such as CD drives or USB flash drives. The &man.autofs.5; subsystem has been updated to include a new &man.auto.master.5; map, -noauto, which handles &man.fstab.5; entries set to noauto. The GELI class has been updated to support the BIO_DELETE &man.g.bio.9; bio_cmd field, providing TRIM/UNMAP support on GELI-backed SSD storage providers. Networked Storage The new filesystem automount facility, &man.autofs.5;, has been added. The new &man.autofs.5; facility is similar to that found in other &unix;-like operating systems, such as OS X™ and Solaris™. The &man.autofs.5; facility uses a &sun;-compatible &man.auto.master.5; configuration file, and is administered with the &man.automount.8; userland utility, and the &man.automountd.8; and &man.autounmountd.8; daemons. Support for the timeo, actimeo, noac, and proto options have been added to &man.mount.nfs.8;. ZFS The arc_meta_limit statistics are now visible through the kstat &man.sysctl.8;. As a result of this change, the vfs.zfs.arc_meta_used &man.sysctl.8; has been removed, and replaced with the kstat.zfs.misc.arcstats.arc_meta_used &man.sysctl.8;. The &man.zfs.8; l2arc code has been updated to take ashift into account when gathering buffers to be written to the l2arc device. &man.geom.4; Support for the disklabel64 partitioning scheme has been added to &man.gpart.8;. Support for the apple-boot, apple-hfs, and apple-ufs MBR partitioning schemes have been added to &man.gpart.8;. The &man.gpart.8; utility has been updated to include a new attribute for GPT partitions, lenovofix, which when set, which works around BIOS compatibility issues reported on several Lenovo ™ laptops. Boot Loader Changes This section covers the boot loader, boot menu, and other boot-related changes. Boot Loader Changes The memory test run at boot time on &os;/&arch.amd64; platforms has been disabled by default. A new &man.ttys.5; class, 3wire, has been added. This is similar to the existing terminal classes, but does not have a defined baudrate. The &man.vt.4; driver has been made the default system console driver. The &man.syscons.4; driver is still available, and can be enabled by adding kern.vty=sc in &man.loader.conf.5;. Alternatively, &man.syscons.4; can be enabled at boot time by entering set kern.vty=sc at the &man.loader.8; prompt. Support for bzipfs has been added to the EFI loader. The boot loader has been updated to support entering the GELI passphrase before loading the kernel. To enable this behavior, add geom_eli_passphrase_prompt="YES" to &man.loader.conf.5;. The &man.ttys.5; file for &os;/&arch.arm; has been updated to enable ttyu1, ttyu2, and ttyu3 by default, if the callin port is an active console port. Boot Menu Changes   Networking This section describes changes that affect networking in &os;. Network Protocols Support for the IPX network transport protocol has been removed, and will not be supported in &os; 11 and later releases. Support for PLPMTUD blackhole detection (RFC 4821) has been added to the &man.tcp.4; stack, disabled by default. New control tunables have been added: Tunable Description net.inet.tcp.pmtud_blackhole_detection Enables or disables PLPMTUD blackhole detection net.inet.tcp.pmtud_blackhole_mss MSS to try for IPv4 net.inet.tcp.v6pmtud_blackhole_mss MSS to try for IPv6 New monitoring &man.sysctl.8;s haven been added: Tunable Description net.inet.tcp.pmtud_blackhole_activated Number of times the code was activated to attempt downshifting the MSS net.inet.tcp.pmtud_blackhole_min_activated Number of times the blackhole MSS was used in an attempt to downshift net.inet.tcp.pmtud_blackhole_failed Number of times that the blackhole failed to connect after downshifting the MSS Support for IP identification for atomic datagrams (RFC 6864) has been added. Support for this feature can be toggled with the net.inet.ip.rfc6864 &man.sysctl.8;, which is enabled by default. The IPSEC has been updated to include support for AES modes on both software-only and hardware-backed (&man.aesni.4;) systems. The network stack has been updated to fix handling of IPv6 On-Link redirects. Ports Collection and Package Infrastructure This section covers changes to the &os; Ports Collection, package infrastructure, and package maintenance and installation tools. Infrastructure Changes   Packaging Changes   Documentation This section covers changes to the &os; Documentation Project sources and toolchain. Documentation Source Changes   Documentation Toolchain Changes   Release Engineering and Integration This section convers changes that are specific to the &os; Release Engineering processes. Integration Changes The Release Engineering build tools have been updated to include support for producing virtual machine disk images for various cloud hosting providers. The Release Engineering build tools have been updated to use multi-threaded &man.xz.1;. By default, the number of &man.xz.1; threads is set to the number of cores available. The Release Engineering build tools have been updated to include support for building &os;/&arch.arm64; virtual machine and memory stick installation images. The Release Engineering build tools have been updated to support building &os;/&arch.arm; images without external utilities for supported boards where a corresponding u-boot port exists in the Ports Collection. The &os;/&arch.i386; memory stick installation images are now created using the &man.mkimg.1; utility, matching the way the &os;/&arch.amd64; images are created.
Index: user/ngie/more-tests2/share/man/man5/core.5 =================================================================== --- user/ngie/more-tests2/share/man/man5/core.5 (revision 288954) +++ user/ngie/more-tests2/share/man/man5/core.5 (revision 288955) @@ -1,151 +1,164 @@ .\" Copyright (c) 1980, 1991, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" @(#)core.5 8.3 (Berkeley) 12/11/93 .\" $FreeBSD$ .\" -.Dd September 2, 2015 +.Dd October 5, 2015 .Dt CORE 5 .Os .Sh NAME .Nm core .Nd memory image file format .Sh SYNOPSIS .In sys/param.h .Sh DESCRIPTION A small number of signals which cause abnormal termination of a process also cause a record of the process's in-core state to be written to disk for later examination by one of the available debuggers. (See .Xr sigaction 2 . ) This memory image is written to a file named by default .Nm programname.core in the working directory; provided the terminated process had write permission in the directory, and provided the abnormality did not cause a system crash. (In this event, the decision to save the core file is arbitrary, see .Xr savecore 8 . ) .Pp The maximum size of a core file is limited by .Xr setrlimit 2 . Files which would be larger than the limit are not created. .Pp The name of the file is controlled via the .Xr sysctl 8 variable .Va kern.corefile . The contents of this variable describes a filename to store the core image to. This filename can be absolute, or relative (which will resolve to the current working directory of the program generating it). .Pp The following format specifiers may be used in the .Va kern.corefile sysctl to insert additional information into the resulting core file name: .Bl -tag -width "1234567890" -compact -offset "12345" .It Em \&%H Machine hostname. .It Em \&%I An index starting at zero until the sysctl .Em debug.ncores is reached. This can be useful for limiting the number of corefiles generated by a particular process. .It Em \&%N process name. .It Em \&%P processes PID. .It Em \&%U process UID. .El .Pp The name defaults to .Em \&%N.core , yielding the traditional .Fx behaviour. .Pp By default, a process that changes user or group credentials whether real or effective will not create a corefile. This behaviour can be changed to generate a core dump by setting the .Xr sysctl 8 variable .Va kern.sugid_coredump to 1. .Pp Corefiles can be compressed by the kernel if the following item is included in the kernel configuration file: .Bl -tag -width "1234567890" -compact -offset "12345" .It options GZIO .El .Pp When the GZIO option is included, the following sysctls control whether core files will be compressed: .Bl -tag -width "kern.compress_user_cores_gzlevel" -compact -offset "12345" .It Em kern.compress_user_cores_gzlevel Gzip compression level. Defaults to 6. .It Em kern.compress_user_cores Actually compress user cores. Compressed core files will have a suffix of .Ql .gz appended to them. .El .Sh NOTES Corefiles are written with open file descriptor information as an ELF note. By default, file paths are packed to only use as much space as needed. However, file paths can change at any time, including during core dump, and this can result in truncated file descriptor data. .Pp All file descriptor information can be preserved by disabling packing. This potentially wastes up to PATH_MAX bytes per open fd. Packing is disabled with .Dl sysctl kern.coredump_pack_fileinfo=0 . +.Pp +Similarly, corefiles are written with vmmap information as an ELF note, which +contains file paths. +By default, they are packed to only use as much space as +needed. +By the same mechanism as for the open files note, these paths can also +change at any time and result in a truncated note. +.Pp +All vmmap information can be preserved by disabling packing. +Like the file information, this potentially wastes up to PATH_MAX bytes per +mapped object. +Packing is disabled with +.Dl sysctl kern.coredump_pack_vmmapinfo=0 . .Sh EXAMPLES In order to store all core images in per-user private areas under .Pa /var/coredumps , the following .Xr sysctl 8 command can be used: .Pp .Dl sysctl kern.corefile=/var/coredumps/\&%U/\&%N.core .Sh SEE ALSO .Xr gdb 1 , .Xr kgdb 1 , .Xr setrlimit 2 , .Xr sigaction 2 , .Xr sysctl 8 .Sh HISTORY A .Nm file format appeared in .At v6 . Index: user/ngie/more-tests2/share =================================================================== --- user/ngie/more-tests2/share (revision 288954) +++ user/ngie/more-tests2/share (revision 288955) Property changes on: user/ngie/more-tests2/share ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/share:r288944-288954 Index: user/ngie/more-tests2/sys/dev/drm2/i915/i915_drv.c =================================================================== --- user/ngie/more-tests2/sys/dev/drm2/i915/i915_drv.c (revision 288954) +++ user/ngie/more-tests2/sys/dev/drm2/i915/i915_drv.c (revision 288955) @@ -1,1055 +1,1054 @@ /* i915_drv.c -- Intel i915 driver -*- linux-c -*- * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com */ /*- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Gareth Hughes * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include "fb_if.h" /* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */ static drm_pci_id_list_t i915_pciidlist[] = { i915_PCI_IDS }; #define INTEL_VGA_DEVICE(id, info_) { \ .device = id, \ .info = info_, \ } static const struct intel_device_info intel_i830_info = { .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_845g_info = { .gen = 2, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i85x_info = { .gen = 2, .is_i85x = 1, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i865g_info = { .gen = 2, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915g_info = { .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915gm_info = { .gen = 3, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .supports_tv = 1, }; static const struct intel_device_info intel_i945g_info = { .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i945gm_info = { .gen = 3, .is_i945gm = 1, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .supports_tv = 1, }; static const struct intel_device_info intel_i965g_info = { .gen = 4, .is_broadwater = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_i965gm_info = { .gen = 4, .is_crestline = 1, .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, .has_overlay = 1, .supports_tv = 1, }; static const struct intel_device_info intel_g33_info = { .gen = 3, .is_g33 = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_g45_info = { .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_gm45_info = { .gen = 4, .is_g4x = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .supports_tv = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_pineview_info = { .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_ironlake_d_info = { .gen = 5, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, .has_pch_split = 1, }; static const struct intel_device_info intel_ironlake_m_info = { .gen = 5, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 0, /* disabled due to buggy hardware */ .has_bsd_ring = 1, .has_pch_split = 1, }; static const struct intel_device_info intel_sandybridge_d_info = { .gen = 6, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { .gen = 6, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, }; static const struct intel_device_info intel_ivybridge_d_info = { .is_ivybridge = 1, .gen = 7, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, }; static const struct intel_device_info intel_ivybridge_m_info = { .is_ivybridge = 1, .gen = 7, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, }; static const struct intel_device_info intel_valleyview_m_info = { .gen = 7, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 0, .has_bsd_ring = 1, .has_blt_ring = 1, .is_valleyview = 1, .not_supported = 1, }; static const struct intel_device_info intel_valleyview_d_info = { .gen = 7, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 0, .has_bsd_ring = 1, .has_blt_ring = 1, .is_valleyview = 1, .not_supported = 1, }; static const struct intel_device_info intel_haswell_d_info = { .is_haswell = 1, .gen = 7, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, .not_supported = 1, }; static const struct intel_device_info intel_haswell_m_info = { .is_haswell = 1, .gen = 7, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, .not_supported = 1, }; static const struct intel_gfx_device_id { int device; const struct intel_device_info *info; } pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */ INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */ INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */ INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */ INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */ INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), {0, 0} }; static int i915_enable_unsupported; static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; drm_kms_helper_poll_disable(dev); #if 0 pci_save_state(dev->pdev); #endif /* If KMS is active, we do the leavevt stuff here */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { int error = i915_gem_idle(dev); if (error) { device_printf(dev->dev, "GEM idle failed, resume might fail\n"); return error; } drm_irq_uninstall(dev); } i915_save_state(dev); intel_opregion_fini(dev); /* Modeset on resume, not lid events */ dev_priv->modeset_on_lid = 0; return 0; } static int i915_suspend(device_t kdev) { struct drm_device *dev; int error; dev = device_get_softc(kdev); if (dev == NULL || dev->dev_private == NULL) { DRM_ERROR("DRM not initialized, aborting suspend.\n"); return ENODEV; } DRM_DEBUG_KMS("starting suspend\n"); error = i915_drm_freeze(dev); if (error) return (-error); error = bus_generic_suspend(kdev); DRM_DEBUG_KMS("finished suspend %d\n", error); return (error); } static int i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; if (drm_core_check_feature(dev, DRIVER_MODESET)) { DRM_LOCK(dev); i915_gem_restore_gtt_mappings(dev); DRM_UNLOCK(dev); } i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (HAS_PCH_SPLIT(dev)) ironlake_init_pch_refclk(dev); DRM_LOCK(dev); dev_priv->mm.suspended = 0; error = i915_gem_init_hw(dev); DRM_UNLOCK(dev); intel_modeset_init_hw(dev); sx_xlock(&dev->mode_config.mutex); drm_mode_config_reset(dev); sx_xunlock(&dev->mode_config.mutex); drm_irq_install(dev); sx_xlock(&dev->mode_config.mutex); /* Resume the modeset for every activated CRTC */ drm_helper_resume_force_mode(dev); sx_xunlock(&dev->mode_config.mutex); } intel_opregion_init(dev); dev_priv->modeset_on_lid = 0; return error; } static int i915_resume(device_t kdev) { struct drm_device *dev; int ret; dev = device_get_softc(kdev); DRM_DEBUG_KMS("starting resume\n"); #if 0 if (pci_enable_device(dev->pdev)) return -EIO; pci_set_master(dev->pdev); #endif ret = i915_drm_thaw(dev); if (ret != 0) return (-ret); drm_kms_helper_poll_enable(dev); ret = bus_generic_resume(kdev); DRM_DEBUG_KMS("finished resume %d\n", ret); return (ret); } static int i915_probe(device_t kdev) { const struct intel_device_info *info; int error; error = drm_probe_helper(kdev, i915_pciidlist); if (error != 0) return (-error); info = i915_get_device_id(pci_get_device(kdev)); if (info == NULL) return (ENXIO); return (0); } int i915_modeset; static int i915_attach(device_t kdev) { if (i915_modeset == 1) i915_driver_info.driver_features |= DRIVER_MODESET; return (-drm_attach_helper(kdev, i915_pciidlist, &i915_driver_info)); } static struct fb_info * i915_fb_helper_getinfo(device_t kdev) { struct intel_fbdev *ifbdev; drm_i915_private_t *dev_priv; struct drm_device *dev; struct fb_info *info; dev = device_get_softc(kdev); dev_priv = dev->dev_private; ifbdev = dev_priv->fbdev; if (ifbdev == NULL) return (NULL); info = ifbdev->helper.fbdev; return (info); } const struct intel_device_info * i915_get_device_id(int device) { const struct intel_gfx_device_id *did; for (did = &pciidlist[0]; did->device != 0; did++) { if (did->device != device) continue; if (did->info->not_supported && !i915_enable_unsupported) return (NULL); return (did->info); } return (NULL); } static device_method_t i915_methods[] = { /* Device interface */ DEVMETHOD(device_probe, i915_probe), DEVMETHOD(device_attach, i915_attach), DEVMETHOD(device_suspend, i915_suspend), DEVMETHOD(device_resume, i915_resume), DEVMETHOD(device_detach, drm_generic_detach), /* Framebuffer service methods */ DEVMETHOD(fb_getinfo, i915_fb_helper_getinfo), DEVMETHOD_END }; static driver_t i915_driver = { "drmn", i915_methods, sizeof(struct drm_device) }; extern devclass_t drm_devclass; DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0, SI_ORDER_ANY); MODULE_DEPEND(i915kms, drmn, 1, 1, 1); MODULE_DEPEND(i915kms, agp, 1, 1, 1); MODULE_DEPEND(i915kms, iicbus, 1, 1, 1); MODULE_DEPEND(i915kms, iic, 1, 1, 1); MODULE_DEPEND(i915kms, iicbb, 1, 1, 1); int intel_iommu_enabled = 0; TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled); int intel_iommu_gfx_mapped = 0; TUNABLE_INT("drm.i915.intel_iommu_gfx_mapped", &intel_iommu_gfx_mapped); int i915_prefault_disable; TUNABLE_INT("drm.i915.prefault_disable", &i915_prefault_disable); int i915_semaphores = -1; TUNABLE_INT("drm.i915.semaphores", &i915_semaphores); static int i915_try_reset = 1; TUNABLE_INT("drm.i915.try_reset", &i915_try_reset); unsigned int i915_lvds_downclock = 0; TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock); int i915_vbt_sdvo_panel_type = -1; TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type); unsigned int i915_powersave = 1; TUNABLE_INT("drm.i915.powersave", &i915_powersave); int i915_enable_fbc = 0; TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc); int i915_enable_rc6 = 0; TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6); int i915_lvds_channel_mode; TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode); int i915_panel_use_ssc = -1; TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc); int i915_panel_ignore_lid = 0; TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid); int i915_panel_invert_brightness; TUNABLE_INT("drm.i915.panel_invert_brightness", &i915_panel_invert_brightness); int i915_modeset = 1; TUNABLE_INT("drm.i915.modeset", &i915_modeset); int i915_enable_ppgtt = -1; TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt); int i915_enable_hangcheck = 1; TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck); TUNABLE_INT("drm.i915.enable_unsupported", &i915_enable_unsupported); #define PCI_VENDOR_INTEL 0x8086 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 void intel_detect_pch(struct drm_device *dev) { struct drm_i915_private *dev_priv; device_t pch; uint32_t id; dev_priv = dev->dev_private; pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA); if (pch != NULL && pci_get_vendor(pch) == PCI_VENDOR_INTEL) { id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK; if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_IBX; dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_CPT; dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found CougarPoint PCH\n"); } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { /* PantherPoint is CPT compatible */ dev_priv->pch_type = PCH_CPT; dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found PatherPoint PCH\n"); } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_LPT; dev_priv->num_pch_pll = 0; DRM_DEBUG_KMS("Found LynxPoint PCH\n"); } else DRM_DEBUG_KMS("No PCH detected\n"); KASSERT(dev_priv->num_pch_pll <= I915_NUM_PLLS, ("num_pch_pll %d\n", dev_priv->num_pch_pll)); } else DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n"); } bool i915_semaphore_is_enabled(struct drm_device *dev) { if (INTEL_INFO(dev)->gen < 6) return 0; if (i915_semaphores >= 0) return i915_semaphores; /* Enable semaphores on SNB when IO remapping is off */ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) return false; return 1; } void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { int count; count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) DELAY(10); I915_WRITE_NOTRACE(FORCEWAKE, 1); POSTING_READ(FORCEWAKE); count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) DELAY(10); } void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) { int count; count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1)) DELAY(10); I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); POSTING_READ(FORCEWAKE_MT); count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0) DELAY(10); } void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { mtx_lock(&dev_priv->gt_lock); if (dev_priv->forcewake_count++ == 0) dev_priv->display.force_wake_get(dev_priv); mtx_unlock(&dev_priv->gt_lock); } static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) { u32 gtfifodbg; gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); if ((gtfifodbg & GT_FIFO_CPU_ERROR_MASK) != 0) { printf("MMIO read or write has been dropped %x\n", gtfifodbg); I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); } } void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); /* The below doubles as a POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); /* The below doubles as a POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { mtx_lock(&dev_priv->gt_lock); if (--dev_priv->forcewake_count == 0) dev_priv->display.force_wake_put(dev_priv); mtx_unlock(&dev_priv->gt_lock); } int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) { int ret = 0; if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { int loop = 500; u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { DELAY(10); fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); } if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) { printf("%s loop\n", __func__); ++ret; } dev_priv->gt_fifo_count = fifo; } dev_priv->gt_fifo_count--; return (ret); } void vlv_force_wake_get(struct drm_i915_private *dev_priv) { int count; count = 0; /* Already awake? */ if ((I915_READ(0x130094) & 0xa1) == 0xa1) return; I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); POSTING_READ(FORCEWAKE_VLV); count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0) DELAY(10); } void vlv_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); /* FIXME: confirm VLV behavior with Punit folks */ POSTING_READ(FORCEWAKE_VLV); } static int i8xx_do_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int onems; if (IS_I85X(dev)) return -ENODEV; onems = hz / 1000; if (onems == 0) onems = 1; I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); POSTING_READ(D_STATE); if (IS_I830(dev) || IS_845G(dev)) { I915_WRITE(DEBUG_RESET_I830, DEBUG_RESET_DISPLAY | DEBUG_RESET_RENDER | DEBUG_RESET_FULL); POSTING_READ(DEBUG_RESET_I830); pause("i8xxrst1", onems); I915_WRITE(DEBUG_RESET_I830, 0); POSTING_READ(DEBUG_RESET_I830); } pause("i8xxrst2", onems); I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); POSTING_READ(D_STATE); return 0; } static int i965_reset_complete(struct drm_device *dev) { u8 gdrst; gdrst = pci_read_config(dev->dev, I965_GDRST, 1); return (gdrst & GRDOM_RESET_ENABLE) == 0; } static int i965_do_reset(struct drm_device *dev) { int ret; u8 gdrst; /* * Set the domains we want to reset (GRDOM/bits 2 and 3) as * well as the reset bit (GR/bit 0). Setting the GR bit * triggers the reset; when done, the hardware will clear it. */ gdrst = pci_read_config(dev->dev, I965_GDRST, 1); pci_write_config(dev->dev, I965_GDRST, gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE, 1); ret = wait_for(i965_reset_complete(dev), 500); if (ret) return ret; /* We can't reset render&media without also resetting display ... */ gdrst = pci_read_config(dev->dev, I965_GDRST, 1); pci_write_config(dev->dev, I965_GDRST, gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE, 1); return wait_for(i965_reset_complete(dev), 500); } static int ironlake_do_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 gdrst; int ret; gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); if (ret) return ret; /* We can't reset render&media without also resetting display ... */ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); } static int gen6_do_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; /* Hold gt_lock across reset to prevent any register access * with forcewake not set correctly */ mtx_lock(&dev_priv->gt_lock); /* Reset the chip */ /* GEN6_GDRST is not in the gt power well, no need to check * for fifo space for the write or forcewake the chip for * the read */ I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); /* Spin waiting for the device to ack the reset request */ ret = _intel_wait_for(dev, (I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500, 0, "915rst"); /* If reset with a user forcewake, try to restore, otherwise turn it off */ if (dev_priv->forcewake_count) dev_priv->display.force_wake_get(dev_priv); else dev_priv->display.force_wake_put(dev_priv); /* Restore fifo count */ dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); mtx_unlock(&dev_priv->gt_lock); return (ret); } int intel_gpu_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret = -ENODEV; switch (INTEL_INFO(dev)->gen) { case 7: case 6: ret = gen6_do_reset(dev); break; case 5: ret = ironlake_do_reset(dev); break; case 4: ret = i965_do_reset(dev); break; - case 3: case 2: ret = i8xx_do_reset(dev); break; } /* Also reset the gpu hangman. */ if (dev_priv->stop_rings) { DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n"); dev_priv->stop_rings = 0; if (ret == -ENODEV) { DRM_ERROR("Reset not implemented, but ignoring " "error for simulated gpu hangs\n"); ret = 0; } } return ret; } /** * i915_reset - reset chip after a hang * @dev: drm device to reset * * Reset the chip. Useful if a hang is detected. Returns zero on successful * reset or otherwise an error code. * * Procedure is fairly simple: * - reset the chip using the reset reg * - re-init context state * - re-init hardware status page * - re-init ring buffer * - re-init interrupt state * - re-init display */ int i915_reset(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; int ret; if (!i915_try_reset) return 0; if (!sx_try_xlock(&dev->dev_struct_lock)) return (-EBUSY); dev_priv->stop_rings = 0; i915_gem_reset(dev); ret = -ENODEV; if (time_second - dev_priv->last_gpu_reset < 5) DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); else ret = intel_gpu_reset(dev); dev_priv->last_gpu_reset = time_second; if (ret) { DRM_ERROR("Failed to reset chip.\n"); DRM_UNLOCK(dev); return ret; } /* Ok, now get things going again... */ /* * Everything depends on having the GTT running, so we need to start * there. Fortunately we don't need to do this unless we reset the * chip at a PCI level. * * Next we need to restore the context, but we don't use those * yet either... * * Ring buffer needs to be re-initialized in the KMS case, or if X * was running at the time of the reset (i.e. we weren't VT * switched away). */ if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { struct intel_ring_buffer *ring; int i; dev_priv->mm.suspended = 0; i915_gem_init_swizzling(dev); for_each_ring(ring, dev_priv, i) ring->init(ring); i915_gem_context_init(dev); i915_gem_init_ppgtt(dev); /* * It would make sense to re-init all the other hw state, at * least the rps/rc6/emon init done within modeset_init_hw. For * some unknown reason, this blows up my ilk, so don't. */ DRM_UNLOCK(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_modeset_init_hw(dev); drm_irq_uninstall(dev); drm_irq_install(dev); } else { DRM_UNLOCK(dev); } return 0; } /* We give fast paths for the really cool registers */ #define NEEDS_FORCE_WAKE(dev_priv, reg) \ (((dev_priv)->info->gen >= 6) && \ ((reg) < 0x40000) && \ ((reg) != FORCEWAKE)) && \ (!IS_VALLEYVIEW((dev_priv)->dev)) #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x val = 0; \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ mtx_lock(&dev_priv->gt_lock); \ if (dev_priv->forcewake_count == 0) \ dev_priv->display.force_wake_get(dev_priv); \ val = DRM_READ##y(dev_priv->mmio_map, reg); \ if (dev_priv->forcewake_count == 0) \ dev_priv->display.force_wake_put(dev_priv); \ mtx_unlock(&dev_priv->gt_lock); \ } else { \ val = DRM_READ##y(dev_priv->mmio_map, reg); \ } \ trace_i915_reg_rw(false, reg, val, sizeof(val)); \ return val; \ } __i915_read(8, 8) __i915_read(16, 16) __i915_read(32, 32) __i915_read(64, 64) #undef __i915_read #define __i915_write(x, y) \ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ u32 __fifo_ret = 0; \ trace_i915_reg_rw(true, reg, val, sizeof(val)); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ DRM_WRITE##y(dev_priv->mmio_map, reg, val); \ if (__predict_false(__fifo_ret)) { \ gen6_gt_check_fifodbg(dev_priv); \ } \ } __i915_write(8, 8) __i915_write(16, 16) __i915_write(32, 32) __i915_write(64, 64) #undef __i915_write Index: user/ngie/more-tests2/sys/kern/imgact_elf.c =================================================================== --- user/ngie/more-tests2/sys/kern/imgact_elf.c (revision 288954) +++ user/ngie/more-tests2/sys/kern/imgact_elf.c (revision 288955) @@ -1,2228 +1,2234 @@ /*- * Copyright (c) 2000 David O'Brien * Copyright (c) 1995-1996 Søren Schmidt * Copyright (c) 1996 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_capsicum.h" #include "opt_compat.h" #include "opt_gzio.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ELF_NOTE_ROUNDSIZE 4 #define OLD_EI_BRAND 8 static int __elfN(check_header)(const Elf_Ehdr *hdr); static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, const char *interp, int interp_name_len, int32_t *osrel); static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry, size_t pagesize); static int __elfN(load_section)(struct image_params *imgp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, size_t pagesize); static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel); static boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel); static boolean_t __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote, int32_t *osrel); static vm_prot_t __elfN(trans_prot)(Elf_Word); static Elf_Word __elfN(untrans_prot)(vm_prot_t); SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0, ""); #define CORE_BUF_SIZE (16 * 1024) int __elfN(fallback_brand) = -1; SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); static int elf_legacy_coredump = 0; SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, &elf_legacy_coredump, 0, ""); int __elfN(nxstack) = #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ 1; #else 0; #endif SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, nxstack, CTLFLAG_RW, &__elfN(nxstack), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack"); #if __ELF_WORD_SIZE == 32 #if defined(__amd64__) int i386_read_exec = 0; SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0, "enable execution from readable segments"); #endif #endif static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; #define trunc_page_ps(va, ps) ((va) & ~(ps - 1)) #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1)) #define aligned(a, t) (trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a)) static const char FREEBSD_ABI_VENDOR[] = "FreeBSD"; Elf_Brandnote __elfN(freebsd_brandnote) = { .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), .hdr.n_descsz = sizeof(int32_t), .hdr.n_type = 1, .vendor = FREEBSD_ABI_VENDOR, .flags = BN_TRANSLATE_OSREL, .trans_osrel = __elfN(freebsd_trans_osrel) }; static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel) { uintptr_t p; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); *osrel = *(const int32_t *)(p); return (TRUE); } static const char GNU_ABI_VENDOR[] = "GNU"; static int GNU_KFREEBSD_ABI_DESC = 3; Elf_Brandnote __elfN(kfreebsd_brandnote) = { .hdr.n_namesz = sizeof(GNU_ABI_VENDOR), .hdr.n_descsz = 16, /* XXX at least 16 */ .hdr.n_type = 1, .vendor = GNU_ABI_VENDOR, .flags = BN_TRANSLATE_OSREL, .trans_osrel = kfreebsd_trans_osrel }; static boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel) { const Elf32_Word *desc; uintptr_t p; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); desc = (const Elf32_Word *)p; if (desc[0] != GNU_KFREEBSD_ABI_DESC) return (FALSE); /* * Debian GNU/kFreeBSD embed the earliest compatible kernel version * (__FreeBSD_version: Rxx) in the LSB way. */ *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3]; return (TRUE); } int __elfN(insert_brand_entry)(Elf_Brandinfo *entry) { int i; for (i = 0; i < MAX_BRANDS; i++) { if (elf_brand_list[i] == NULL) { elf_brand_list[i] = entry; break; } } if (i == MAX_BRANDS) { printf("WARNING: %s: could not insert brandinfo entry: %p\n", __func__, entry); return (-1); } return (0); } int __elfN(remove_brand_entry)(Elf_Brandinfo *entry) { int i; for (i = 0; i < MAX_BRANDS; i++) { if (elf_brand_list[i] == entry) { elf_brand_list[i] = NULL; break; } } if (i == MAX_BRANDS) return (-1); return (0); } int __elfN(brand_inuse)(Elf_Brandinfo *entry) { struct proc *p; int rval = FALSE; sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { if (p->p_sysent == entry->sysvec) { rval = TRUE; break; } } sx_sunlock(&allproc_lock); return (rval); } static Elf_Brandinfo * __elfN(get_brandinfo)(struct image_params *imgp, const char *interp, int interp_name_len, int32_t *osrel) { const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; Elf_Brandinfo *bi; boolean_t ret; int i; /* * We support four types of branding -- (1) the ELF EI_OSABI field * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string * branding w/in the ELF header, (3) path of the `interp_path' * field, and (4) the ".note.ABI-tag" ELF section. */ /* Look for an ".note.ABI-tag" ELF section */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL) continue; if (hdr->e_machine == bi->machine && (bi->flags & (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) { ret = __elfN(check_note)(imgp, bi->brand_note, osrel); if (ret) return (bi); } } /* If the executable has a brand, search for it in the brand list. */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY) continue; if (hdr->e_machine == bi->machine && (hdr->e_ident[EI_OSABI] == bi->brand || strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND], bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0)) return (bi); } /* No known brand, see if the header is recognized by any brand */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY || bi->header_supported == NULL) continue; if (hdr->e_machine == bi->machine) { ret = bi->header_supported(imgp); if (ret) return (bi); } } /* Lacking a known brand, search for a recognized interpreter. */ if (interp != NULL) { for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY) continue; if (hdr->e_machine == bi->machine && /* ELF image p_filesz includes terminating zero */ strlen(bi->interp_path) + 1 == interp_name_len && strncmp(interp, bi->interp_path, interp_name_len) == 0) return (bi); } } /* Lacking a recognized interpreter, try the default brand */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY) continue; if (hdr->e_machine == bi->machine && __elfN(fallback_brand) == bi->brand) return (bi); } return (NULL); } static int __elfN(check_header)(const Elf_Ehdr *hdr) { Elf_Brandinfo *bi; int i; if (!IS_ELF(*hdr) || hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || hdr->e_ident[EI_DATA] != ELF_TARG_DATA || hdr->e_ident[EI_VERSION] != EV_CURRENT || hdr->e_phentsize != sizeof(Elf_Phdr) || hdr->e_version != ELF_TARG_VER) return (ENOEXEC); /* * Make sure we have at least one brand for this machine. */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi != NULL && bi->machine == hdr->e_machine) break; } if (i == MAX_BRANDS) return (ENOEXEC); return (0); } static int __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot) { struct sf_buf *sf; int error; vm_offset_t off; /* * Create the page if it doesn't exist yet. Ignore errors. */ vm_map_lock(map); vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), VM_PROT_ALL, VM_PROT_ALL, 0); vm_map_unlock(map); /* * Find the page from the underlying object. */ if (object) { sf = vm_imgact_map_page(object, offset); if (sf == NULL) return (KERN_FAILURE); off = offset - trunc_page(offset); error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, end - start); vm_imgact_unmap_page(sf); if (error) { return (KERN_FAILURE); } } return (KERN_SUCCESS); } static int __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow) { struct sf_buf *sf; vm_offset_t off; vm_size_t sz; int error, rv; if (start != trunc_page(start)) { rv = __elfN(map_partial)(map, object, offset, start, round_page(start), prot); if (rv) return (rv); offset += round_page(start) - start; start = round_page(start); } if (end != round_page(end)) { rv = __elfN(map_partial)(map, object, offset + trunc_page(end) - start, trunc_page(end), end, prot); if (rv) return (rv); end = trunc_page(end); } if (end > start) { if (offset & PAGE_MASK) { /* * The mapping is not page aligned. This means we have * to copy the data. Sigh. */ rv = vm_map_find(map, NULL, 0, &start, end - start, 0, VMFS_NO_SPACE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0); if (rv) return (rv); if (object == NULL) return (KERN_SUCCESS); for (; start < end; start += sz) { sf = vm_imgact_map_page(object, offset); if (sf == NULL) return (KERN_FAILURE); off = offset - trunc_page(offset); sz = end - start; if (sz > PAGE_SIZE - off) sz = PAGE_SIZE - off; error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, sz); vm_imgact_unmap_page(sf); if (error) { return (KERN_FAILURE); } offset += sz; } rv = KERN_SUCCESS; } else { vm_object_reference(object); vm_map_lock(map); rv = vm_map_insert(map, object, offset, start, end, prot, VM_PROT_ALL, cow); vm_map_unlock(map); if (rv != KERN_SUCCESS) vm_object_deallocate(object); } return (rv); } else { return (KERN_SUCCESS); } } static int __elfN(load_section)(struct image_params *imgp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, size_t pagesize) { struct sf_buf *sf; size_t map_len; vm_map_t map; vm_object_t object; vm_offset_t map_addr; int error, rv, cow; size_t copy_len; vm_offset_t file_addr; /* * It's necessary to fail if the filsz + offset taken from the * header is greater than the actual file pager object's size. * If we were to allow this, then the vm_map_find() below would * walk right off the end of the file object and into the ether. * * While I'm here, might as well check for something else that * is invalid: filsz cannot be greater than memsz. */ if ((off_t)filsz + offset > imgp->attr->va_size || filsz > memsz) { uprintf("elf_load_section: truncated ELF file\n"); return (ENOEXEC); } object = imgp->object; map = &imgp->proc->p_vmspace->vm_map; map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize); file_addr = trunc_page_ps(offset, pagesize); /* * We have two choices. We can either clear the data in the last page * of an oversized mapping, or we can start the anon mapping a page * early and copy the initialized data into that first page. We * choose the second.. */ if (memsz > filsz) map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr; else map_len = round_page_ps(offset + filsz, pagesize) - file_addr; if (map_len != 0) { /* cow flags: don't dump readonly sections in core */ cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); rv = __elfN(map_insert)(map, object, file_addr, /* file offset */ map_addr, /* virtual start */ map_addr + map_len,/* virtual end */ prot, cow); if (rv != KERN_SUCCESS) return (EINVAL); /* we can stop now if we've covered it all */ if (memsz == filsz) { return (0); } } /* * We have to get the remaining bit of the file into the first part * of the oversized map segment. This is normally because the .data * segment in the file is extended to provide bss. It's a neat idea * to try and save a page, but it's a pain in the behind to implement. */ copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize); map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize); map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) - map_addr; /* This had damn well better be true! */ if (map_len != 0) { rv = __elfN(map_insert)(map, NULL, 0, map_addr, map_addr + map_len, VM_PROT_ALL, 0); if (rv != KERN_SUCCESS) { return (EINVAL); } } if (copy_len != 0) { vm_offset_t off; sf = vm_imgact_map_page(object, offset + filsz); if (sf == NULL) return (EIO); /* send the page fragment to user space */ off = trunc_page_ps(offset + filsz, pagesize) - trunc_page(offset + filsz); error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)map_addr, copy_len); vm_imgact_unmap_page(sf); if (error) { return (error); } } /* * set it to the specified protection. * XXX had better undo the damage from pasting over the cracks here! */ vm_map_protect(map, trunc_page(map_addr), round_page(map_addr + map_len), prot, FALSE); return (0); } /* * Load the file "file" into memory. It may be either a shared object * or an executable. * * The "addr" reference parameter is in/out. On entry, it specifies * the address where a shared object should be loaded. If the file is * an executable, this value is ignored. On exit, "addr" specifies * where the file was actually loaded. * * The "entry" reference parameter is out only. On exit, it specifies * the entry point for the loaded file. */ static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry, size_t pagesize) { struct { struct nameidata nd; struct vattr attr; struct image_params image_params; } *tempdata; const Elf_Ehdr *hdr = NULL; const Elf_Phdr *phdr = NULL; struct nameidata *nd; struct vattr *attr; struct image_params *imgp; vm_prot_t prot; u_long rbase; u_long base_addr = 0; int error, i, numsegs; #ifdef CAPABILITY_MODE /* * XXXJA: This check can go away once we are sufficiently confident * that the checks in namei() are correct. */ if (IN_CAPABILITY_MODE(curthread)) return (ECAPMODE); #endif tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); nd = &tempdata->nd; attr = &tempdata->attr; imgp = &tempdata->image_params; /* * Initialize part of the common data */ imgp->proc = p; imgp->attr = attr; imgp->firstpage = NULL; imgp->image_header = NULL; imgp->object = NULL; imgp->execlabel = NULL; NDINIT(nd, LOOKUP, LOCKLEAF | FOLLOW, UIO_SYSSPACE, file, curthread); if ((error = namei(nd)) != 0) { nd->ni_vp = NULL; goto fail; } NDFREE(nd, NDF_ONLY_PNBUF); imgp->vp = nd->ni_vp; /* * Check permissions, modes, uid, etc on the file, and "open" it. */ error = exec_check_permissions(imgp); if (error) goto fail; error = exec_map_first_page(imgp); if (error) goto fail; /* * Also make certain that the interpreter stays the same, so set * its VV_TEXT flag, too. */ VOP_SET_TEXT(nd->ni_vp); imgp->object = nd->ni_vp->v_object; hdr = (const Elf_Ehdr *)imgp->image_header; if ((error = __elfN(check_header)(hdr)) != 0) goto fail; if (hdr->e_type == ET_DYN) rbase = *addr; else if (hdr->e_type == ET_EXEC) rbase = 0; else { error = ENOEXEC; goto fail; } /* Only support headers that fit within first page for now */ if ((hdr->e_phoff > PAGE_SIZE) || (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) { error = ENOEXEC; goto fail; } phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); if (!aligned(phdr, Elf_Addr)) { error = ENOEXEC; goto fail; } for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) { /* Loadable segment */ prot = __elfN(trans_prot)(phdr[i].p_flags); error = __elfN(load_section)(imgp, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, phdr[i].p_memsz, phdr[i].p_filesz, prot, pagesize); if (error != 0) goto fail; /* * Establish the base address if this is the * first segment. */ if (numsegs == 0) base_addr = trunc_page(phdr[i].p_vaddr + rbase); numsegs++; } } *addr = base_addr; *entry = (unsigned long)hdr->e_entry + rbase; fail: if (imgp->firstpage) exec_unmap_first_page(imgp); if (nd->ni_vp) vput(nd->ni_vp); free(tempdata, M_TEMP); return (error); } static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) { const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; const Elf_Phdr *phdr; Elf_Auxargs *elf_auxargs; struct vmspace *vmspace; vm_prot_t prot; u_long text_size = 0, data_size = 0, total_size = 0; u_long text_addr = 0, data_addr = 0; u_long seg_size, seg_addr; u_long addr, baddr, et_dyn_addr, entry = 0, proghdr = 0; int32_t osrel = 0; int error = 0, i, n, interp_name_len = 0; const char *err_str = NULL, *interp = NULL, *newinterp = NULL; Elf_Brandinfo *brand_info; char *path; struct sysentvec *sv; /* * Do we have a valid ELF header ? * * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later * if particular brand doesn't support it. */ if (__elfN(check_header)(hdr) != 0 || (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) return (-1); /* * From here on down, we return an errno, not -1, as we've * detected an ELF file. */ if ((hdr->e_phoff > PAGE_SIZE) || (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) { /* Only support headers in first page for now */ uprintf("Program headers not in the first page\n"); return (ENOEXEC); } phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); if (!aligned(phdr, Elf_Addr)) { uprintf("Unaligned program headers\n"); return (ENOEXEC); } n = 0; baddr = 0; for (i = 0; i < hdr->e_phnum; i++) { switch (phdr[i].p_type) { case PT_LOAD: if (n == 0) baddr = phdr[i].p_vaddr; n++; break; case PT_INTERP: /* Path to interpreter */ if (phdr[i].p_filesz > MAXPATHLEN || phdr[i].p_offset > PAGE_SIZE || phdr[i].p_filesz > PAGE_SIZE - phdr[i].p_offset) { uprintf("Invalid PT_INTERP\n"); return (ENOEXEC); } interp = imgp->image_header + phdr[i].p_offset; interp_name_len = phdr[i].p_filesz; break; case PT_GNU_STACK: if (__elfN(nxstack)) imgp->stack_prot = __elfN(trans_prot)(phdr[i].p_flags); imgp->stack_sz = phdr[i].p_memsz; break; } } brand_info = __elfN(get_brandinfo)(imgp, interp, interp_name_len, &osrel); if (brand_info == NULL) { uprintf("ELF binary type \"%u\" not known.\n", hdr->e_ident[EI_OSABI]); return (ENOEXEC); } if (hdr->e_type == ET_DYN) { if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { uprintf("Cannot execute shared object\n"); return (ENOEXEC); } /* * Honour the base load address from the dso if it is * non-zero for some reason. */ if (baddr == 0) et_dyn_addr = ET_DYN_LOAD_ADDR; else et_dyn_addr = 0; } else et_dyn_addr = 0; sv = brand_info->sysvec; if (interp != NULL && brand_info->interp_newpath != NULL) newinterp = brand_info->interp_newpath; /* * Avoid a possible deadlock if the current address space is destroyed * and that address space maps the locked vnode. In the common case, * the locked vnode's v_usecount is decremented but remains greater * than zero. Consequently, the vnode lock is not needed by vrele(). * However, in cases where the vnode lock is external, such as nullfs, * v_usecount may become zero. * * The VV_TEXT flag prevents modifications to the executable while * the vnode is unlocked. */ VOP_UNLOCK(imgp->vp, 0); error = exec_new_vmspace(imgp, sv); imgp->proc->p_sysent = sv; vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); if (error) return (error); for (i = 0; i < hdr->e_phnum; i++) { switch (phdr[i].p_type) { case PT_LOAD: /* Loadable segment */ if (phdr[i].p_memsz == 0) break; prot = __elfN(trans_prot)(phdr[i].p_flags); error = __elfN(load_section)(imgp, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr, phdr[i].p_memsz, phdr[i].p_filesz, prot, sv->sv_pagesize); if (error != 0) return (error); /* * If this segment contains the program headers, * remember their virtual address for the AT_PHDR * aux entry. Static binaries don't usually include * a PT_PHDR entry. */ if (phdr[i].p_offset == 0 && hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize <= phdr[i].p_filesz) proghdr = phdr[i].p_vaddr + hdr->e_phoff + et_dyn_addr; seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr); seg_size = round_page(phdr[i].p_memsz + phdr[i].p_vaddr + et_dyn_addr - seg_addr); /* * Make the largest executable segment the official * text segment and all others data. * * Note that obreak() assumes that data_addr + * data_size == end of data load area, and the ELF * file format expects segments to be sorted by * address. If multiple data segments exist, the * last one will be used. */ if (phdr[i].p_flags & PF_X && text_size < seg_size) { text_size = seg_size; text_addr = seg_addr; } else { data_size = seg_size; data_addr = seg_addr; } total_size += seg_size; break; case PT_PHDR: /* Program header table info */ proghdr = phdr[i].p_vaddr + et_dyn_addr; break; default: break; } } if (data_addr == 0 && data_size == 0) { data_addr = text_addr; data_size = text_size; } entry = (u_long)hdr->e_entry + et_dyn_addr; /* * Check limits. It should be safe to check the * limits after loading the segments since we do * not actually fault in all the segments pages. */ PROC_LOCK(imgp->proc); if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA)) err_str = "Data segment size exceeds process limit"; else if (text_size > maxtsiz) err_str = "Text segment size exceeds system limit"; else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM)) err_str = "Total segment size exceeds process limit"; else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0) err_str = "Data segment size exceeds resource limit"; else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0) err_str = "Total segment size exceeds resource limit"; if (err_str != NULL) { PROC_UNLOCK(imgp->proc); uprintf("%s\n", err_str); return (ENOMEM); } vmspace = imgp->proc->p_vmspace; vmspace->vm_tsize = text_size >> PAGE_SHIFT; vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; vmspace->vm_dsize = data_size >> PAGE_SHIFT; vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; /* * We load the dynamic linker where a userland call * to mmap(0, ...) would put it. The rationale behind this * calculation is that it leaves room for the heap to grow to * its maximum allowed size. */ addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(curthread, RLIMIT_DATA)); PROC_UNLOCK(imgp->proc); imgp->entry_addr = entry; if (interp != NULL) { int have_interp = FALSE; VOP_UNLOCK(imgp->vp, 0); if (brand_info->emul_path != NULL && brand_info->emul_path[0] != '\0') { path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path, interp); error = __elfN(load_file)(imgp->proc, path, &addr, &imgp->entry_addr, sv->sv_pagesize); free(path, M_TEMP); if (error == 0) have_interp = TRUE; } if (!have_interp && newinterp != NULL) { error = __elfN(load_file)(imgp->proc, newinterp, &addr, &imgp->entry_addr, sv->sv_pagesize); if (error == 0) have_interp = TRUE; } if (!have_interp) { error = __elfN(load_file)(imgp->proc, interp, &addr, &imgp->entry_addr, sv->sv_pagesize); } vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); if (error != 0) { uprintf("ELF interpreter %s not found\n", interp); return (error); } } else addr = et_dyn_addr; /* * Construct auxargs table (used by the fixup routine) */ elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); elf_auxargs->execfd = -1; elf_auxargs->phdr = proghdr; elf_auxargs->phent = hdr->e_phentsize; elf_auxargs->phnum = hdr->e_phnum; elf_auxargs->pagesz = PAGE_SIZE; elf_auxargs->base = addr; elf_auxargs->flags = 0; elf_auxargs->entry = entry; elf_auxargs->hdr_eflags = hdr->e_flags; imgp->auxargs = elf_auxargs; imgp->interpreted = 0; imgp->reloc_base = addr; imgp->proc->p_osrel = osrel; return (error); } #define suword __CONCAT(suword, __ELF_WORD_SIZE) int __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp) { Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; Elf_Addr *base; Elf_Addr *pos; base = (Elf_Addr *)*stack_base; pos = base + (imgp->args->argc + imgp->args->envc + 2); if (args->execfd != -1) AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); AUXARGS_ENTRY(pos, AT_PHENT, args->phent); AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); AUXARGS_ENTRY(pos, AT_BASE, args->base); #ifdef AT_EHDRFLAGS AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags); #endif if (imgp->execpathp != 0) AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp); AUXARGS_ENTRY(pos, AT_OSRELDATE, imgp->proc->p_ucred->cr_prison->pr_osreldate); if (imgp->canary != 0) { AUXARGS_ENTRY(pos, AT_CANARY, imgp->canary); AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen); } AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus); if (imgp->pagesizes != 0) { AUXARGS_ENTRY(pos, AT_PAGESIZES, imgp->pagesizes); AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen); } if (imgp->sysent->sv_timekeep_base != 0) { AUXARGS_ENTRY(pos, AT_TIMEKEEP, imgp->sysent->sv_timekeep_base); } AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot : imgp->sysent->sv_stackprot); AUXARGS_ENTRY(pos, AT_NULL, 0); free(imgp->auxargs, M_TEMP); imgp->auxargs = NULL; base--; suword(base, (long)imgp->args->argc); *stack_base = (register_t *)base; return (0); } /* * Code for generating ELF core dumps. */ typedef void (*segment_callback)(vm_map_entry_t, void *); /* Closure for cb_put_phdr(). */ struct phdr_closure { Elf_Phdr *phdr; /* Program header to fill in */ Elf_Off offset; /* Offset of segment in core file */ }; /* Closure for cb_size_segment(). */ struct sseg_closure { int count; /* Count of writable segments. */ size_t size; /* Total size of all writable segments. */ }; typedef void (*outfunc_t)(void *, struct sbuf *, size_t *); struct note_info { int type; /* Note type. */ outfunc_t outfunc; /* Output function. */ void *outarg; /* Argument for the output function. */ size_t outsize; /* Output size. */ TAILQ_ENTRY(note_info) link; /* Link to the next note info. */ }; TAILQ_HEAD(note_info_list, note_info); /* Coredump output parameters. */ struct coredump_params { off_t offset; struct ucred *active_cred; struct ucred *file_cred; struct thread *td; struct vnode *vp; struct gzio_stream *gzs; }; static void cb_put_phdr(vm_map_entry_t, void *); static void cb_size_segment(vm_map_entry_t, void *); static int core_write(struct coredump_params *, void *, size_t, off_t, enum uio_seg); static void each_writable_segment(struct thread *, segment_callback, void *); static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t, struct note_info_list *, size_t); static void __elfN(prepare_notes)(struct thread *, struct note_info_list *, size_t *); static void __elfN(puthdr)(struct thread *, void *, size_t, int, size_t); static void __elfN(putnote)(struct note_info *, struct sbuf *); static size_t register_note(struct note_info_list *, int, outfunc_t, void *); static int sbuf_drain_core_output(void *, const char *, int); static int sbuf_drain_count(void *arg, const char *data, int len); static void __elfN(note_fpregset)(void *, struct sbuf *, size_t *); static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *); static void __elfN(note_prstatus)(void *, struct sbuf *, size_t *); static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *); static void __elfN(note_thrmisc)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *); static void note_procstat_files(void *, struct sbuf *, size_t *); static void note_procstat_groups(void *, struct sbuf *, size_t *); static void note_procstat_osrel(void *, struct sbuf *, size_t *); static void note_procstat_rlimit(void *, struct sbuf *, size_t *); static void note_procstat_umask(void *, struct sbuf *, size_t *); static void note_procstat_vmmap(void *, struct sbuf *, size_t *); #ifdef GZIO extern int compress_user_cores_gzlevel; /* * Write out a core segment to the compression stream. */ static int compress_chunk(struct coredump_params *p, char *base, char *buf, u_int len) { u_int chunk_len; int error; while (len > 0) { chunk_len = MIN(len, CORE_BUF_SIZE); copyin(base, buf, chunk_len); error = gzio_write(p->gzs, buf, chunk_len); if (error != 0) break; base += chunk_len; len -= chunk_len; } return (error); } static int core_gz_write(void *base, size_t len, off_t offset, void *arg) { return (core_write((struct coredump_params *)arg, base, len, offset, UIO_SYSSPACE)); } #endif /* GZIO */ static int core_write(struct coredump_params *p, void *base, size_t len, off_t offset, enum uio_seg seg) { return (vn_rdwr_inchunks(UIO_WRITE, p->vp, base, len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED, p->active_cred, p->file_cred, NULL, p->td)); } static int core_output(void *base, size_t len, off_t offset, struct coredump_params *p, void *tmpbuf) { #ifdef GZIO if (p->gzs != NULL) return (compress_chunk(p, base, tmpbuf, len)); #endif return (core_write(p, base, len, offset, UIO_USERSPACE)); } /* * Drain into a core file. */ static int sbuf_drain_core_output(void *arg, const char *data, int len) { struct coredump_params *p; int error, locked; p = (struct coredump_params *)arg; /* * Some kern_proc out routines that print to this sbuf may * call us with the process lock held. Draining with the * non-sleepable lock held is unsafe. The lock is needed for * those routines when dumping a live process. In our case we * can safely release the lock before draining and acquire * again after. */ locked = PROC_LOCKED(p->td->td_proc); if (locked) PROC_UNLOCK(p->td->td_proc); #ifdef GZIO if (p->gzs != NULL) error = gzio_write(p->gzs, __DECONST(char *, data), len); else #endif error = core_write(p, __DECONST(void *, data), len, p->offset, UIO_SYSSPACE); if (locked) PROC_LOCK(p->td->td_proc); if (error != 0) return (-error); p->offset += len; return (len); } /* * Drain into a counter. */ static int sbuf_drain_count(void *arg, const char *data __unused, int len) { size_t *sizep; sizep = (size_t *)arg; *sizep += len; return (len); } int __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) { struct ucred *cred = td->td_ucred; int error = 0; struct sseg_closure seginfo; struct note_info_list notelst; struct coredump_params params; struct note_info *ninfo; void *hdr, *tmpbuf; size_t hdrsize, notesz, coresize; boolean_t compress; compress = (flags & IMGACT_CORE_COMPRESS) != 0; hdr = NULL; tmpbuf = NULL; TAILQ_INIT(¬elst); /* Size the program segments. */ seginfo.count = 0; seginfo.size = 0; each_writable_segment(td, cb_size_segment, &seginfo); /* * Collect info about the core file header area. */ hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count); __elfN(prepare_notes)(td, ¬elst, ¬esz); coresize = round_page(hdrsize + notesz) + seginfo.size; /* Set up core dump parameters. */ params.offset = 0; params.active_cred = cred; params.file_cred = NOCRED; params.td = td; params.vp = vp; params.gzs = NULL; #ifdef RACCT if (racct_enable) { PROC_LOCK(td->td_proc); error = racct_add(td->td_proc, RACCT_CORE, coresize); PROC_UNLOCK(td->td_proc); if (error != 0) { error = EFAULT; goto done; } } #endif if (coresize >= limit) { error = EFAULT; goto done; } #ifdef GZIO /* Create a compression stream if necessary. */ if (compress) { params.gzs = gzio_init(core_gz_write, GZIO_DEFLATE, CORE_BUF_SIZE, compress_user_cores_gzlevel, ¶ms); if (params.gzs == NULL) { error = EFAULT; goto done; } tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO); } #endif /* * Allocate memory for building the header, fill it up, * and write it out following the notes. */ hdr = malloc(hdrsize, M_TEMP, M_WAITOK); if (hdr == NULL) { error = EINVAL; goto done; } error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst, notesz); /* Write the contents of all of the writable segments. */ if (error == 0) { Elf_Phdr *php; off_t offset; int i; php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; offset = round_page(hdrsize + notesz); for (i = 0; i < seginfo.count; i++) { error = core_output((caddr_t)(uintptr_t)php->p_vaddr, php->p_filesz, offset, ¶ms, tmpbuf); if (error != 0) break; offset += php->p_filesz; php++; } #ifdef GZIO if (error == 0 && compress) error = gzio_flush(params.gzs); #endif } if (error) { log(LOG_WARNING, "Failed to write core file for process %s (error %d)\n", curproc->p_comm, error); } done: #ifdef GZIO if (compress) { free(tmpbuf, M_TEMP); if (params.gzs != NULL) gzio_fini(params.gzs); } #endif while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) { TAILQ_REMOVE(¬elst, ninfo, link); free(ninfo, M_TEMP); } if (hdr != NULL) free(hdr, M_TEMP); return (error); } /* * A callback for each_writable_segment() to write out the segment's * program header entry. */ static void cb_put_phdr(entry, closure) vm_map_entry_t entry; void *closure; { struct phdr_closure *phc = (struct phdr_closure *)closure; Elf_Phdr *phdr = phc->phdr; phc->offset = round_page(phc->offset); phdr->p_type = PT_LOAD; phdr->p_offset = phc->offset; phdr->p_vaddr = entry->start; phdr->p_paddr = 0; phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; phdr->p_align = PAGE_SIZE; phdr->p_flags = __elfN(untrans_prot)(entry->protection); phc->offset += phdr->p_filesz; phc->phdr++; } /* * A callback for each_writable_segment() to gather information about * the number of segments and their total size. */ static void cb_size_segment(entry, closure) vm_map_entry_t entry; void *closure; { struct sseg_closure *ssc = (struct sseg_closure *)closure; ssc->count++; ssc->size += entry->end - entry->start; } /* * For each writable segment in the process's memory map, call the given * function with a pointer to the map entry and some arbitrary * caller-supplied data. */ static void each_writable_segment(td, func, closure) struct thread *td; segment_callback func; void *closure; { struct proc *p = td->td_proc; vm_map_t map = &p->p_vmspace->vm_map; vm_map_entry_t entry; vm_object_t backing_object, object; boolean_t ignore_entry; vm_map_lock_read(map); for (entry = map->header.next; entry != &map->header; entry = entry->next) { /* * Don't dump inaccessible mappings, deal with legacy * coredump mode. * * Note that read-only segments related to the elf binary * are marked MAP_ENTRY_NOCOREDUMP now so we no longer * need to arbitrarily ignore such segments. */ if (elf_legacy_coredump) { if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) continue; } else { if ((entry->protection & VM_PROT_ALL) == 0) continue; } /* * Dont include memory segment in the coredump if * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in * madvise(2). Do not dump submaps (i.e. parts of the * kernel map). */ if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP)) continue; if ((object = entry->object.vm_object) == NULL) continue; /* Ignore memory-mapped devices and such things. */ VM_OBJECT_RLOCK(object); while ((backing_object = object->backing_object) != NULL) { VM_OBJECT_RLOCK(backing_object); VM_OBJECT_RUNLOCK(object); object = backing_object; } ignore_entry = object->type != OBJT_DEFAULT && object->type != OBJT_SWAP && object->type != OBJT_VNODE && object->type != OBJT_PHYS; VM_OBJECT_RUNLOCK(object); if (ignore_entry) continue; (*func)(entry, closure); } vm_map_unlock_read(map); } /* * Write the core file header to the file, including padding up to * the page boundary. */ static int __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr, size_t hdrsize, struct note_info_list *notelst, size_t notesz) { struct note_info *ninfo; struct sbuf *sb; int error; /* Fill in the header. */ bzero(hdr, hdrsize); __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz); sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_core_output, p); sbuf_start_section(sb, NULL); sbuf_bcat(sb, hdr, hdrsize); TAILQ_FOREACH(ninfo, notelst, link) __elfN(putnote)(ninfo, sb); /* Align up to a page boundary for the program segments. */ sbuf_end_section(sb, -1, PAGE_SIZE, 0); error = sbuf_finish(sb); sbuf_delete(sb); return (error); } static void __elfN(prepare_notes)(struct thread *td, struct note_info_list *list, size_t *sizep) { struct proc *p; struct thread *thr; size_t size; p = td->td_proc; size = 0; size += register_note(list, NT_PRPSINFO, __elfN(note_prpsinfo), p); /* * To have the debugger select the right thread (LWP) as the initial * thread, we dump the state of the thread passed to us in td first. * This is the thread that causes the core dump and thus likely to * be the right thread one wants to have selected in the debugger. */ thr = td; while (thr != NULL) { size += register_note(list, NT_PRSTATUS, __elfN(note_prstatus), thr); size += register_note(list, NT_FPREGSET, __elfN(note_fpregset), thr); size += register_note(list, NT_THRMISC, __elfN(note_thrmisc), thr); size += register_note(list, -1, __elfN(note_threadmd), thr); thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) : TAILQ_NEXT(thr, td_plist); if (thr == td) thr = TAILQ_NEXT(thr, td_plist); } size += register_note(list, NT_PROCSTAT_PROC, __elfN(note_procstat_proc), p); size += register_note(list, NT_PROCSTAT_FILES, note_procstat_files, p); size += register_note(list, NT_PROCSTAT_VMMAP, note_procstat_vmmap, p); size += register_note(list, NT_PROCSTAT_GROUPS, note_procstat_groups, p); size += register_note(list, NT_PROCSTAT_UMASK, note_procstat_umask, p); size += register_note(list, NT_PROCSTAT_RLIMIT, note_procstat_rlimit, p); size += register_note(list, NT_PROCSTAT_OSREL, note_procstat_osrel, p); size += register_note(list, NT_PROCSTAT_PSSTRINGS, __elfN(note_procstat_psstrings), p); size += register_note(list, NT_PROCSTAT_AUXV, __elfN(note_procstat_auxv), p); *sizep = size; } static void __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs, size_t notesz) { Elf_Ehdr *ehdr; Elf_Phdr *phdr; struct phdr_closure phc; ehdr = (Elf_Ehdr *)hdr; phdr = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)); ehdr->e_ident[EI_MAG0] = ELFMAG0; ehdr->e_ident[EI_MAG1] = ELFMAG1; ehdr->e_ident[EI_MAG2] = ELFMAG2; ehdr->e_ident[EI_MAG3] = ELFMAG3; ehdr->e_ident[EI_CLASS] = ELF_CLASS; ehdr->e_ident[EI_DATA] = ELF_DATA; ehdr->e_ident[EI_VERSION] = EV_CURRENT; ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; ehdr->e_ident[EI_ABIVERSION] = 0; ehdr->e_ident[EI_PAD] = 0; ehdr->e_type = ET_CORE; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 ehdr->e_machine = ELF_ARCH32; #else ehdr->e_machine = ELF_ARCH; #endif ehdr->e_version = EV_CURRENT; ehdr->e_entry = 0; ehdr->e_phoff = sizeof(Elf_Ehdr); ehdr->e_flags = 0; ehdr->e_ehsize = sizeof(Elf_Ehdr); ehdr->e_phentsize = sizeof(Elf_Phdr); ehdr->e_phnum = numsegs + 1; ehdr->e_shentsize = sizeof(Elf_Shdr); ehdr->e_shnum = 0; ehdr->e_shstrndx = SHN_UNDEF; /* * Fill in the program header entries. */ /* The note segement. */ phdr->p_type = PT_NOTE; phdr->p_offset = hdrsize; phdr->p_vaddr = 0; phdr->p_paddr = 0; phdr->p_filesz = notesz; phdr->p_memsz = 0; phdr->p_flags = PF_R; phdr->p_align = ELF_NOTE_ROUNDSIZE; phdr++; /* All the writable segments from the program. */ phc.phdr = phdr; phc.offset = round_page(hdrsize + notesz); each_writable_segment(td, cb_put_phdr, &phc); } static size_t register_note(struct note_info_list *list, int type, outfunc_t out, void *arg) { struct note_info *ninfo; size_t size, notesize; size = 0; out(arg, NULL, &size); ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); ninfo->type = type; ninfo->outfunc = out; ninfo->outarg = arg; ninfo->outsize = size; TAILQ_INSERT_TAIL(list, ninfo, link); if (type == -1) return (size); notesize = sizeof(Elf_Note) + /* note header */ roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + /* note name */ roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ return (notesize); } static size_t append_note_data(const void *src, void *dst, size_t len) { size_t padded_len; padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE); if (dst != NULL) { bcopy(src, dst, len); bzero((char *)dst + len, padded_len - len); } return (padded_len); } size_t __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp) { Elf_Note *note; char *buf; size_t notesize; buf = dst; if (buf != NULL) { note = (Elf_Note *)buf; note->n_namesz = sizeof(FREEBSD_ABI_VENDOR); note->n_descsz = size; note->n_type = type; buf += sizeof(*note); buf += append_note_data(FREEBSD_ABI_VENDOR, buf, sizeof(FREEBSD_ABI_VENDOR)); append_note_data(src, buf, size); if (descp != NULL) *descp = buf; } notesize = sizeof(Elf_Note) + /* note header */ roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + /* note name */ roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ return (notesize); } static void __elfN(putnote)(struct note_info *ninfo, struct sbuf *sb) { Elf_Note note; ssize_t old_len, sect_len; size_t new_len, descsz, i; if (ninfo->type == -1) { ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); return; } note.n_namesz = sizeof(FREEBSD_ABI_VENDOR); note.n_descsz = ninfo->outsize; note.n_type = ninfo->type; sbuf_bcat(sb, ¬e, sizeof(note)); sbuf_start_section(sb, &old_len); sbuf_bcat(sb, FREEBSD_ABI_VENDOR, sizeof(FREEBSD_ABI_VENDOR)); sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); if (note.n_descsz == 0) return; sbuf_start_section(sb, &old_len); ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); if (sect_len < 0) return; new_len = (size_t)sect_len; descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE); if (new_len < descsz) { /* * It is expected that individual note emitters will correctly * predict their expected output size and fill up to that size * themselves, padding in a format-specific way if needed. * However, in case they don't, just do it here with zeros. */ for (i = 0; i < descsz - new_len; i++) sbuf_putc(sb, 0); } else if (new_len > descsz) { /* * We can't always truncate sb -- we may have drained some * of it already. */ KASSERT(new_len == descsz, ("%s: Note type %u changed as we " "read it (%zu > %zu). Since it is longer than " "expected, this coredump's notes are corrupt. THIS " "IS A BUG in the note_procstat routine for type %u.\n", __func__, (unsigned)note.n_type, new_len, descsz, (unsigned)note.n_type)); } } /* * Miscellaneous note out functions. */ #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 #include typedef struct prstatus32 elf_prstatus_t; typedef struct prpsinfo32 elf_prpsinfo_t; typedef struct fpreg32 elf_prfpregset_t; typedef struct fpreg32 elf_fpregset_t; typedef struct reg32 elf_gregset_t; typedef struct thrmisc32 elf_thrmisc_t; #define ELF_KERN_PROC_MASK KERN_PROC_MASK32 typedef struct kinfo_proc32 elf_kinfo_proc_t; typedef uint32_t elf_ps_strings_t; #else typedef prstatus_t elf_prstatus_t; typedef prpsinfo_t elf_prpsinfo_t; typedef prfpregset_t elf_prfpregset_t; typedef prfpregset_t elf_fpregset_t; typedef gregset_t elf_gregset_t; typedef thrmisc_t elf_thrmisc_t; #define ELF_KERN_PROC_MASK 0 typedef struct kinfo_proc elf_kinfo_proc_t; typedef vm_offset_t elf_ps_strings_t; #endif static void __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; elf_prpsinfo_t *psinfo; p = (struct proc *)arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*psinfo), ("invalid size")); psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK); psinfo->pr_version = PRPSINFO_VERSION; psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t); strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); /* * XXX - We don't fill in the command line arguments properly * yet. */ strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs)); sbuf_bcat(sb, psinfo, sizeof(*psinfo)); free(psinfo, M_TEMP); } *sizep = sizeof(*psinfo); } static void __elfN(note_prstatus)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; elf_prstatus_t *status; td = (struct thread *)arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*status), ("invalid size")); status = malloc(sizeof(*status), M_TEMP, M_ZERO | M_WAITOK); status->pr_version = PRSTATUS_VERSION; status->pr_statussz = sizeof(elf_prstatus_t); status->pr_gregsetsz = sizeof(elf_gregset_t); status->pr_fpregsetsz = sizeof(elf_fpregset_t); status->pr_osreldate = osreldate; status->pr_cursig = td->td_proc->p_sig; status->pr_pid = td->td_tid; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 fill_regs32(td, &status->pr_reg); #else fill_regs(td, &status->pr_reg); #endif sbuf_bcat(sb, status, sizeof(*status)); free(status, M_TEMP); } *sizep = sizeof(*status); } static void __elfN(note_fpregset)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; elf_prfpregset_t *fpregset; td = (struct thread *)arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*fpregset), ("invalid size")); fpregset = malloc(sizeof(*fpregset), M_TEMP, M_ZERO | M_WAITOK); #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 fill_fpregs32(td, fpregset); #else fill_fpregs(td, fpregset); #endif sbuf_bcat(sb, fpregset, sizeof(*fpregset)); free(fpregset, M_TEMP); } *sizep = sizeof(*fpregset); } static void __elfN(note_thrmisc)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; elf_thrmisc_t thrmisc; td = (struct thread *)arg; if (sb != NULL) { KASSERT(*sizep == sizeof(thrmisc), ("invalid size")); bzero(&thrmisc._pad, sizeof(thrmisc._pad)); strcpy(thrmisc.pr_tname, td->td_name); sbuf_bcat(sb, &thrmisc, sizeof(thrmisc)); } *sizep = sizeof(thrmisc); } /* * Allow for MD specific notes, as well as any MD * specific preparations for writing MI notes. */ static void __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; void *buf; size_t size; td = (struct thread *)arg; size = *sizep; if (size != 0 && sb != NULL) buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK); else buf = NULL; size = 0; __elfN(dump_thread)(td, buf, &size); KASSERT(sb == NULL || *sizep == size, ("invalid size")); if (size != 0 && sb != NULL) sbuf_bcat(sb, buf, size); free(buf, M_TEMP); *sizep = size; } #ifdef KINFO_PROC_SIZE CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); #endif static void __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + p->p_numthreads * sizeof(elf_kinfo_proc_t); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(elf_kinfo_proc_t); sbuf_bcat(sb, &structsize, sizeof(structsize)); sx_slock(&proctree_lock); PROC_LOCK(p); kern_proc_out(p, sb, ELF_KERN_PROC_MASK); sx_sunlock(&proctree_lock); } *sizep = size; } #ifdef KINFO_FILE_SIZE CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE); #endif static void note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size, sect_sz, i; ssize_t start_len, sect_len; int structsize, filedesc_flags; if (coredump_pack_fileinfo) filedesc_flags = KERN_FILEDESC_PACK_KINFO; else filedesc_flags = 0; p = (struct proc *)arg; structsize = sizeof(struct kinfo_file); if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_count, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_filedesc_out(p, sb, -1, filedesc_flags); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { sbuf_start_section(sb, &start_len); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize), filedesc_flags); sect_len = sbuf_end_section(sb, start_len, 0, 0); if (sect_len < 0) return; sect_sz = sect_len; KASSERT(sect_sz <= *sizep, ("kern_proc_filedesc_out did not respect maxlen; " "requested %zu, got %zu", *sizep - sizeof(structsize), sect_sz - sizeof(structsize))); for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++) sbuf_putc(sb, 0); } } #ifdef KINFO_VMENTRY_SIZE CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); #endif static void note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; - int structsize; + int structsize, vmmap_flags; + if (coredump_pack_vmmapinfo) + vmmap_flags = KERN_VMMAP_PACK_KINFO; + else + vmmap_flags = 0; + p = (struct proc *)arg; + structsize = sizeof(struct kinfo_vmentry); if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_count, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); - kern_proc_vmmap_out(p, sb); + kern_proc_vmmap_out(p, sb, -1, vmmap_flags); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { - structsize = sizeof(struct kinfo_vmentry); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); - kern_proc_vmmap_out(p, sb); + kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize), + vmmap_flags); } } static void note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(gid_t); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups * sizeof(gid_t)); } *sizep = size; } static void note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + sizeof(p->p_fd->fd_cmask); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(p->p_fd->fd_cmask); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &p->p_fd->fd_cmask, sizeof(p->p_fd->fd_cmask)); } *sizep = size; } static void note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; struct rlimit rlim[RLIM_NLIMITS]; size_t size; int structsize, i; p = (struct proc *)arg; size = sizeof(structsize) + sizeof(rlim); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(rlim); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); for (i = 0; i < RLIM_NLIMITS; i++) lim_rlimit_proc(p, i, &rlim[i]); PROC_UNLOCK(p); sbuf_bcat(sb, rlim, sizeof(rlim)); } *sizep = size; } static void note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + sizeof(p->p_osrel); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(p->p_osrel); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel)); } *sizep = size; } static void __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; elf_ps_strings_t ps_strings; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + sizeof(ps_strings); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(ps_strings); #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 ps_strings = PTROUT(p->p_sysent->sv_psstrings); #else ps_strings = p->p_sysent->sv_psstrings; #endif sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &ps_strings, sizeof(ps_strings)); } *sizep = size; } static void __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_count, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PHOLD(p); proc_getauxv(curthread, p, sb); PRELE(p); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { structsize = sizeof(Elf_Auxinfo); sbuf_bcat(sb, &structsize, sizeof(structsize)); PHOLD(p); proc_getauxv(curthread, p, sb); PRELE(p); } } static boolean_t __elfN(parse_notes)(struct image_params *imgp, Elf_Brandnote *checknote, int32_t *osrel, const Elf_Phdr *pnote) { const Elf_Note *note, *note0, *note_end; const char *note_name; int i; if (pnote == NULL || pnote->p_offset > PAGE_SIZE || pnote->p_filesz > PAGE_SIZE - pnote->p_offset) return (FALSE); note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset); note_end = (const Elf_Note *)(imgp->image_header + pnote->p_offset + pnote->p_filesz); for (i = 0; i < 100 && note >= note0 && note < note_end; i++) { if (!aligned(note, Elf32_Addr) || (const char *)note_end - (const char *)note < sizeof(Elf_Note)) return (FALSE); if (note->n_namesz != checknote->hdr.n_namesz || note->n_descsz != checknote->hdr.n_descsz || note->n_type != checknote->hdr.n_type) goto nextnote; note_name = (const char *)(note + 1); if (note_name + checknote->hdr.n_namesz >= (const char *)note_end || strncmp(checknote->vendor, note_name, checknote->hdr.n_namesz) != 0) goto nextnote; /* * Fetch the osreldate for binary * from the ELF OSABI-note if necessary. */ if ((checknote->flags & BN_TRANSLATE_OSREL) != 0 && checknote->trans_osrel != NULL) return (checknote->trans_osrel(note, osrel)); return (TRUE); nextnote: note = (const Elf_Note *)((const char *)(note + 1) + roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) + roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE)); } return (FALSE); } /* * Try to find the appropriate ABI-note section for checknote, * fetch the osreldate for binary from the ELF OSABI-note. Only the * first page of the image is searched, the same as for headers. */ static boolean_t __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote, int32_t *osrel) { const Elf_Phdr *phdr; const Elf_Ehdr *hdr; int i; hdr = (const Elf_Ehdr *)imgp->image_header; phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); for (i = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp, checknote, osrel, &phdr[i])) return (TRUE); } return (FALSE); } /* * Tell kern_execve.c about it, with a little help from the linker. */ static struct execsw __elfN(execsw) = { __CONCAT(exec_, __elfN(imgact)), __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) }; EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); static vm_prot_t __elfN(trans_prot)(Elf_Word flags) { vm_prot_t prot; prot = 0; if (flags & PF_X) prot |= VM_PROT_EXECUTE; if (flags & PF_W) prot |= VM_PROT_WRITE; if (flags & PF_R) prot |= VM_PROT_READ; #if __ELF_WORD_SIZE == 32 #if defined(__amd64__) if (i386_read_exec && (flags & PF_R)) prot |= VM_PROT_EXECUTE; #endif #endif return (prot); } static Elf_Word __elfN(untrans_prot)(vm_prot_t prot) { Elf_Word flags; flags = 0; if (prot & VM_PROT_EXECUTE) flags |= PF_X; if (prot & VM_PROT_READ) flags |= PF_R; if (prot & VM_PROT_WRITE) flags |= PF_W; return (flags); } Index: user/ngie/more-tests2/sys/kern/kern_exec.c =================================================================== --- user/ngie/more-tests2/sys/kern/kern_exec.c (revision 288954) +++ user/ngie/more-tests2/sys/kern/kern_exec.c (revision 288955) @@ -1,1600 +1,1605 @@ /*- * Copyright (c) 1993, David Greenman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_capsicum.h" #include "opt_hwpmc_hooks.h" #include "opt_ktrace.h" #include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif #include #include #include #ifdef KDTRACE_HOOKS #include dtrace_execexit_func_t dtrace_fasttrap_exec; #endif SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE1(proc, kernel, , exec, "char *"); SDT_PROBE_DEFINE1(proc, kernel, , exec__failure, "int"); SDT_PROBE_DEFINE1(proc, kernel, , exec__success, "char *"); MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); int coredump_pack_fileinfo = 1; SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN, &coredump_pack_fileinfo, 0, "Enable file path packing in 'procstat -f' coredump notes"); +int coredump_pack_vmmapinfo = 1; +SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN, + &coredump_pack_vmmapinfo, 0, + "Enable file path packing in 'procstat -v' coredump notes"); + static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS); static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS); static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS); static int do_execve(struct thread *td, struct image_args *args, struct mac *mac_p); /* XXX This should be vm_size_t. */ SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD, NULL, 0, sysctl_kern_ps_strings, "LU", ""); /* XXX This should be vm_size_t. */ SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD| CTLFLAG_CAPRD, NULL, 0, sysctl_kern_usrstack, "LU", ""); SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD, NULL, 0, sysctl_kern_stackprot, "I", ""); u_long ps_arg_cache_limit = PAGE_SIZE / 16; SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW, &ps_arg_cache_limit, 0, ""); static int disallow_high_osrel; SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW, &disallow_high_osrel, 0, "Disallow execution of binaries built for higher version of the world"); static int map_at_zero = 0; SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0, "Permit processes to map an object at virtual address 0."); static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS) { struct proc *p; int error; p = curproc; #ifdef SCTL_MASK32 if (req->flags & SCTL_MASK32) { unsigned int val; val = (unsigned int)p->p_sysent->sv_psstrings; error = SYSCTL_OUT(req, &val, sizeof(val)); } else #endif error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings, sizeof(p->p_sysent->sv_psstrings)); return error; } static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS) { struct proc *p; int error; p = curproc; #ifdef SCTL_MASK32 if (req->flags & SCTL_MASK32) { unsigned int val; val = (unsigned int)p->p_sysent->sv_usrstack; error = SYSCTL_OUT(req, &val, sizeof(val)); } else #endif error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack, sizeof(p->p_sysent->sv_usrstack)); return error; } static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS) { struct proc *p; p = curproc; return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot, sizeof(p->p_sysent->sv_stackprot))); } /* * Each of the items is a pointer to a `const struct execsw', hence the * double pointer here. */ static const struct execsw **execsw; #ifndef _SYS_SYSPROTO_H_ struct execve_args { char *fname; char **argv; char **envv; }; #endif int sys_execve(struct thread *td, struct execve_args *uap) { struct image_args args; struct vmspace *oldvmspace; int error; error = pre_execve(td, &oldvmspace); if (error != 0) return (error); error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, uap->argv, uap->envv); if (error == 0) error = kern_execve(td, &args, NULL); post_execve(td, error, oldvmspace); return (error); } #ifndef _SYS_SYSPROTO_H_ struct fexecve_args { int fd; char **argv; char **envv; } #endif int sys_fexecve(struct thread *td, struct fexecve_args *uap) { struct image_args args; struct vmspace *oldvmspace; int error; error = pre_execve(td, &oldvmspace); if (error != 0) return (error); error = exec_copyin_args(&args, NULL, UIO_SYSSPACE, uap->argv, uap->envv); if (error == 0) { args.fd = uap->fd; error = kern_execve(td, &args, NULL); } post_execve(td, error, oldvmspace); return (error); } #ifndef _SYS_SYSPROTO_H_ struct __mac_execve_args { char *fname; char **argv; char **envv; struct mac *mac_p; }; #endif int sys___mac_execve(struct thread *td, struct __mac_execve_args *uap) { #ifdef MAC struct image_args args; struct vmspace *oldvmspace; int error; error = pre_execve(td, &oldvmspace); if (error != 0) return (error); error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, uap->argv, uap->envv); if (error == 0) error = kern_execve(td, &args, uap->mac_p); post_execve(td, error, oldvmspace); return (error); #else return (ENOSYS); #endif } int pre_execve(struct thread *td, struct vmspace **oldvmspace) { struct proc *p; int error; KASSERT(td == curthread, ("non-current thread %p", td)); error = 0; p = td->td_proc; if ((p->p_flag & P_HADTHREADS) != 0) { PROC_LOCK(p); if (thread_single(p, SINGLE_BOUNDARY) != 0) error = ERESTART; PROC_UNLOCK(p); } KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0, ("nested execve")); *oldvmspace = p->p_vmspace; return (error); } void post_execve(struct thread *td, int error, struct vmspace *oldvmspace) { struct proc *p; KASSERT(td == curthread, ("non-current thread %p", td)); p = td->td_proc; if ((p->p_flag & P_HADTHREADS) != 0) { PROC_LOCK(p); /* * If success, we upgrade to SINGLE_EXIT state to * force other threads to suicide. */ if (error == 0) thread_single(p, SINGLE_EXIT); else thread_single_end(p, SINGLE_BOUNDARY); PROC_UNLOCK(p); } if ((td->td_pflags & TDP_EXECVMSPC) != 0) { KASSERT(p->p_vmspace != oldvmspace, ("oldvmspace still used")); vmspace_free(oldvmspace); td->td_pflags &= ~TDP_EXECVMSPC; } } /* * XXX: kern_execve has the astonishing property of not always returning to * the caller. If sufficiently bad things happen during the call to * do_execve(), it can end up calling exit1(); as a result, callers must * avoid doing anything which they might need to undo (e.g., allocating * memory). */ int kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p) { AUDIT_ARG_ARGV(args->begin_argv, args->argc, args->begin_envv - args->begin_argv); AUDIT_ARG_ENVV(args->begin_envv, args->envc, args->endp - args->begin_envv); return (do_execve(td, args, mac_p)); } /* * In-kernel implementation of execve(). All arguments are assumed to be * userspace pointers from the passed thread. */ static int do_execve(td, args, mac_p) struct thread *td; struct image_args *args; struct mac *mac_p; { struct proc *p = td->td_proc; struct nameidata nd; struct ucred *newcred = NULL, *oldcred; struct uidinfo *euip = NULL; register_t *stack_base; int error, i; struct image_params image_params, *imgp; struct vattr attr; int (*img_first)(struct image_params *); struct pargs *oldargs = NULL, *newargs = NULL; struct sigacts *oldsigacts, *newsigacts; #ifdef KTRACE struct vnode *tracevp = NULL; struct ucred *tracecred = NULL; #endif struct vnode *oldtextvp = NULL, *newtextvp; cap_rights_t rights; int credential_changing; int textset; #ifdef MAC struct label *interpvplabel = NULL; int will_transition; #endif #ifdef HWPMC_HOOKS struct pmckern_procexec pe; #endif static const char fexecv_proc_title[] = "(fexecv)"; imgp = &image_params; /* * Lock the process and set the P_INEXEC flag to indicate that * it should be left alone until we're done here. This is * necessary to avoid race conditions - e.g. in ptrace() - * that might allow a local user to illicitly obtain elevated * privileges. */ PROC_LOCK(p); KASSERT((p->p_flag & P_INEXEC) == 0, ("%s(): process already has P_INEXEC flag", __func__)); p->p_flag |= P_INEXEC; PROC_UNLOCK(p); /* * Initialize part of the common data */ bzero(imgp, sizeof(*imgp)); imgp->proc = p; imgp->attr = &attr; imgp->args = args; #ifdef MAC error = mac_execve_enter(imgp, mac_p); if (error) goto exec_fail; #endif /* * Translate the file name. namei() returns a vnode pointer * in ni_vp amoung other things. * * XXXAUDIT: It would be desirable to also audit the name of the * interpreter if this is an interpreted binary. */ if (args->fname != NULL) { NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME | AUDITVNODE1, UIO_SYSSPACE, args->fname, td); } SDT_PROBE1(proc, kernel, , exec, args->fname); interpret: if (args->fname != NULL) { #ifdef CAPABILITY_MODE /* * While capability mode can't reach this point via direct * path arguments to execve(), we also don't allow * interpreters to be used in capability mode (for now). * Catch indirect lookups and return a permissions error. */ if (IN_CAPABILITY_MODE(td)) { error = ECAPMODE; goto exec_fail; } #endif error = namei(&nd); if (error) goto exec_fail; newtextvp = nd.ni_vp; imgp->vp = newtextvp; } else { AUDIT_ARG_FD(args->fd); /* * Descriptors opened only with O_EXEC or O_RDONLY are allowed. */ error = fgetvp_exec(td, args->fd, cap_rights_init(&rights, CAP_FEXECVE), &newtextvp); if (error) goto exec_fail; vn_lock(newtextvp, LK_EXCLUSIVE | LK_RETRY); AUDIT_ARG_VNODE1(newtextvp); imgp->vp = newtextvp; } /* * Check file permissions (also 'opens' file) */ error = exec_check_permissions(imgp); if (error) goto exec_fail_dealloc; imgp->object = imgp->vp->v_object; if (imgp->object != NULL) vm_object_reference(imgp->object); /* * Set VV_TEXT now so no one can write to the executable while we're * activating it. * * Remember if this was set before and unset it in case this is not * actually an executable image. */ textset = VOP_IS_TEXT(imgp->vp); VOP_SET_TEXT(imgp->vp); error = exec_map_first_page(imgp); if (error) goto exec_fail_dealloc; imgp->proc->p_osrel = 0; /* * If the current process has a special image activator it * wants to try first, call it. For example, emulating shell * scripts differently. */ error = -1; if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL) error = img_first(imgp); /* * Loop through the list of image activators, calling each one. * An activator returns -1 if there is no match, 0 on success, * and an error otherwise. */ for (i = 0; error == -1 && execsw[i]; ++i) { if (execsw[i]->ex_imgact == NULL || execsw[i]->ex_imgact == img_first) { continue; } error = (*execsw[i]->ex_imgact)(imgp); } if (error) { if (error == -1) { if (textset == 0) VOP_UNSET_TEXT(imgp->vp); error = ENOEXEC; } goto exec_fail_dealloc; } /* * Special interpreter operation, cleanup and loop up to try to * activate the interpreter. */ if (imgp->interpreted) { exec_unmap_first_page(imgp); /* * VV_TEXT needs to be unset for scripts. There is a short * period before we determine that something is a script where * VV_TEXT will be set. The vnode lock is held over this * entire period so nothing should illegitimately be blocked. */ VOP_UNSET_TEXT(imgp->vp); /* free name buffer and old vnode */ if (args->fname != NULL) NDFREE(&nd, NDF_ONLY_PNBUF); #ifdef MAC mac_execve_interpreter_enter(newtextvp, &interpvplabel); #endif if (imgp->opened) { VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td); imgp->opened = 0; } vput(newtextvp); vm_object_deallocate(imgp->object); imgp->object = NULL; /* set new name to that of the interpreter */ NDINIT(&nd, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME, UIO_SYSSPACE, imgp->interpreter_name, td); args->fname = imgp->interpreter_name; goto interpret; } /* * NB: We unlock the vnode here because it is believed that none * of the sv_copyout_strings/sv_fixup operations require the vnode. */ VOP_UNLOCK(imgp->vp, 0); /* * Do the best to calculate the full path to the image file. */ if (imgp->auxargs != NULL && ((args->fname != NULL && args->fname[0] == '/') || vn_fullpath(td, imgp->vp, &imgp->execpath, &imgp->freepath) != 0)) imgp->execpath = args->fname; if (disallow_high_osrel && P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) { error = ENOEXEC; uprintf("Osrel %d for image %s too high\n", p->p_osrel, imgp->execpath != NULL ? imgp->execpath : ""); vn_lock(imgp->vp, LK_SHARED | LK_RETRY); goto exec_fail_dealloc; } /* ABI enforces the use of Capsicum. Switch into capabilities mode. */ if (SV_PROC_FLAG(p, SV_CAPSICUM)) sys_cap_enter(td, NULL); /* * Copy out strings (args and env) and initialize stack base */ if (p->p_sysent->sv_copyout_strings) stack_base = (*p->p_sysent->sv_copyout_strings)(imgp); else stack_base = exec_copyout_strings(imgp); /* * If custom stack fixup routine present for this process * let it do the stack setup. * Else stuff argument count as first item on stack */ if (p->p_sysent->sv_fixup != NULL) (*p->p_sysent->sv_fixup)(&stack_base, imgp); else suword(--stack_base, imgp->args->argc); if (args->fdp != NULL) { /* Install a brand new file descriptor table. */ fdinstall_remapped(td, args->fdp); args->fdp = NULL; } else { /* * Keep on using the existing file descriptor table. For * security and other reasons, the file descriptor table * cannot be shared after an exec. */ fdunshare(td); /* close files on exec */ fdcloseexec(td); } /* * Malloc things before we need locks. */ i = imgp->args->begin_envv - imgp->args->begin_argv; /* Cache arguments if they fit inside our allowance */ if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { newargs = pargs_alloc(i); bcopy(imgp->args->begin_argv, newargs->ar_args, i); } vn_lock(imgp->vp, LK_SHARED | LK_RETRY); /* * For security and other reasons, signal handlers cannot * be shared after an exec. The new process gets a copy of the old * handlers. In execsigs(), the new process will have its signals * reset. */ if (sigacts_shared(p->p_sigacts)) { oldsigacts = p->p_sigacts; newsigacts = sigacts_alloc(); sigacts_copy(newsigacts, oldsigacts); } else { oldsigacts = NULL; newsigacts = NULL; /* satisfy gcc */ } PROC_LOCK(p); if (oldsigacts) p->p_sigacts = newsigacts; oldcred = p->p_ucred; /* Stop profiling */ stopprofclock(p); /* reset caught signals */ execsigs(p); /* name this process - nameiexec(p, ndp) */ bzero(p->p_comm, sizeof(p->p_comm)); if (args->fname) bcopy(nd.ni_cnd.cn_nameptr, p->p_comm, min(nd.ni_cnd.cn_namelen, MAXCOMLEN)); else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0) bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title)); bcopy(p->p_comm, td->td_name, sizeof(td->td_name)); #ifdef KTR sched_clear_tdname(td); #endif /* * mark as execed, wakeup the process that vforked (if any) and tell * it that it now has its own resources back */ p->p_flag |= P_EXEC; if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0) p->p_flag2 &= ~P2_NOTRACE; if (p->p_flag & P_PPWAIT) { p->p_flag &= ~(P_PPWAIT | P_PPTRACE); cv_broadcast(&p->p_pwait); } /* * Implement image setuid/setgid. * * Don't honor setuid/setgid if the filesystem prohibits it or if * the process is being traced. * * We disable setuid/setgid/etc in compatibility mode on the basis * that most setugid applications are not written with that * environment in mind, and will therefore almost certainly operate * incorrectly. In principle there's no reason that setugid * applications might not be useful in capability mode, so we may want * to reconsider this conservative design choice in the future. * * XXXMAC: For the time being, use NOSUID to also prohibit * transitions on the file system. */ credential_changing = 0; credential_changing |= (attr.va_mode & S_ISUID) && oldcred->cr_uid != attr.va_uid; credential_changing |= (attr.va_mode & S_ISGID) && oldcred->cr_gid != attr.va_gid; #ifdef MAC will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp, interpvplabel, imgp); credential_changing |= will_transition; #endif if (credential_changing && #ifdef CAPABILITY_MODE ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) && #endif (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && (p->p_flag & P_TRACED) == 0) { /* * Turn off syscall tracing for set-id programs, except for * root. Record any set-id flags first to make sure that * we do not regain any tracing during a possible block. */ setsugid(p); #ifdef KTRACE if (p->p_tracecred != NULL && priv_check_cred(p->p_tracecred, PRIV_DEBUG_DIFFCRED, 0)) ktrprocexec(p, &tracecred, &tracevp); #endif /* * Close any file descriptors 0..2 that reference procfs, * then make sure file descriptors 0..2 are in use. * * Both fdsetugidsafety() and fdcheckstd() may call functions * taking sleepable locks, so temporarily drop our locks. */ PROC_UNLOCK(p); VOP_UNLOCK(imgp->vp, 0); fdsetugidsafety(td); error = fdcheckstd(td); if (error != 0) goto done1; newcred = crdup(oldcred); euip = uifind(attr.va_uid); vn_lock(imgp->vp, LK_SHARED | LK_RETRY); PROC_LOCK(p); /* * Set the new credentials. */ if (attr.va_mode & S_ISUID) change_euid(newcred, euip); if (attr.va_mode & S_ISGID) change_egid(newcred, attr.va_gid); #ifdef MAC if (will_transition) { mac_vnode_execve_transition(oldcred, newcred, imgp->vp, interpvplabel, imgp); } #endif /* * Implement correct POSIX saved-id behavior. * * XXXMAC: Note that the current logic will save the * uid and gid if a MAC domain transition occurs, even * though maybe it shouldn't. */ change_svuid(newcred, newcred->cr_uid); change_svgid(newcred, newcred->cr_gid); proc_set_cred(p, newcred); } else { if (oldcred->cr_uid == oldcred->cr_ruid && oldcred->cr_gid == oldcred->cr_rgid) p->p_flag &= ~P_SUGID; /* * Implement correct POSIX saved-id behavior. * * XXX: It's not clear that the existing behavior is * POSIX-compliant. A number of sources indicate that the * saved uid/gid should only be updated if the new ruid is * not equal to the old ruid, or the new euid is not equal * to the old euid and the new euid is not equal to the old * ruid. The FreeBSD code always updates the saved uid/gid. * Also, this code uses the new (replaced) euid and egid as * the source, which may or may not be the right ones to use. */ if (oldcred->cr_svuid != oldcred->cr_uid || oldcred->cr_svgid != oldcred->cr_gid) { PROC_UNLOCK(p); VOP_UNLOCK(imgp->vp, 0); newcred = crdup(oldcred); vn_lock(imgp->vp, LK_SHARED | LK_RETRY); PROC_LOCK(p); change_svuid(newcred, newcred->cr_uid); change_svgid(newcred, newcred->cr_gid); proc_set_cred(p, newcred); } } /* * Store the vp for use in procfs. This vnode was referenced by namei * or fgetvp_exec. */ oldtextvp = p->p_textvp; p->p_textvp = newtextvp; #ifdef KDTRACE_HOOKS /* * Tell the DTrace fasttrap provider about the exec if it * has declared an interest. */ if (dtrace_fasttrap_exec) dtrace_fasttrap_exec(p); #endif /* * Notify others that we exec'd, and clear the P_INEXEC flag * as we're now a bona fide freshly-execed process. */ KNOTE_LOCKED(&p->p_klist, NOTE_EXEC); p->p_flag &= ~P_INEXEC; /* clear "fork but no exec" flag, as we _are_ execing */ p->p_acflag &= ~AFORK; /* * Free any previous argument cache and replace it with * the new argument cache, if any. */ oldargs = p->p_args; p->p_args = newargs; newargs = NULL; #ifdef HWPMC_HOOKS /* * Check if system-wide sampling is in effect or if the * current process is using PMCs. If so, do exec() time * processing. This processing needs to happen AFTER the * P_INEXEC flag is cleared. * * The proc lock needs to be released before taking the PMC * SX. */ if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) { PROC_UNLOCK(p); VOP_UNLOCK(imgp->vp, 0); pe.pm_credentialschanged = credential_changing; pe.pm_entryaddr = imgp->entry_addr; PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe); vn_lock(imgp->vp, LK_SHARED | LK_RETRY); } else PROC_UNLOCK(p); #else /* !HWPMC_HOOKS */ PROC_UNLOCK(p); #endif /* Set values passed into the program in registers. */ if (p->p_sysent->sv_setregs) (*p->p_sysent->sv_setregs)(td, imgp, (u_long)(uintptr_t)stack_base); else exec_setregs(td, imgp, (u_long)(uintptr_t)stack_base); vfs_mark_atime(imgp->vp, td->td_ucred); SDT_PROBE1(proc, kernel, , exec__success, args->fname); VOP_UNLOCK(imgp->vp, 0); done1: /* * Free any resources malloc'd earlier that we didn't use. */ if (euip != NULL) uifree(euip); if (newcred != NULL) crfree(oldcred); /* * Handle deferred decrement of ref counts. */ if (oldtextvp != NULL) vrele(oldtextvp); #ifdef KTRACE if (tracevp != NULL) vrele(tracevp); if (tracecred != NULL) crfree(tracecred); #endif vn_lock(imgp->vp, LK_SHARED | LK_RETRY); pargs_drop(oldargs); pargs_drop(newargs); if (oldsigacts != NULL) sigacts_free(oldsigacts); exec_fail_dealloc: /* * free various allocated resources */ if (imgp->firstpage != NULL) exec_unmap_first_page(imgp); if (imgp->vp != NULL) { if (args->fname) NDFREE(&nd, NDF_ONLY_PNBUF); if (imgp->opened) VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td); if (error != 0) vput(imgp->vp); else VOP_UNLOCK(imgp->vp, 0); } if (imgp->object != NULL) vm_object_deallocate(imgp->object); free(imgp->freepath, M_TEMP); if (error == 0) { PROC_LOCK(p); td->td_dbgflags |= TDB_EXEC; PROC_UNLOCK(p); /* * Stop the process here if its stop event mask has * the S_EXEC bit set. */ STOPEVENT(p, S_EXEC, 0); goto done2; } exec_fail: /* we're done here, clear P_INEXEC */ PROC_LOCK(p); p->p_flag &= ~P_INEXEC; PROC_UNLOCK(p); SDT_PROBE1(proc, kernel, , exec__failure, error); done2: #ifdef MAC mac_execve_exit(imgp); mac_execve_interpreter_exit(interpvplabel); #endif exec_free_args(args); if (error && imgp->vmspace_destroyed) { /* sorry, no more process anymore. exit gracefully */ exit1(td, 0, SIGABRT); /* NOT REACHED */ } #ifdef KTRACE if (error == 0) ktrprocctor(p); #endif return (error); } int exec_map_first_page(imgp) struct image_params *imgp; { int rv, i; int initial_pagein; vm_page_t ma[VM_INITIAL_PAGEIN]; vm_object_t object; if (imgp->firstpage != NULL) exec_unmap_first_page(imgp); object = imgp->vp->v_object; if (object == NULL) return (EACCES); VM_OBJECT_WLOCK(object); #if VM_NRESERVLEVEL > 0 vm_object_color(object, 0); #endif ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL); if (ma[0]->valid != VM_PAGE_BITS_ALL) { initial_pagein = VM_INITIAL_PAGEIN; if (initial_pagein > object->size) initial_pagein = object->size; for (i = 1; i < initial_pagein; i++) { if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) { if (ma[i]->valid) break; if (vm_page_tryxbusy(ma[i])) break; } else { ma[i] = vm_page_alloc(object, i, VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED); if (ma[i] == NULL) break; } } initial_pagein = i; rv = vm_pager_get_pages(object, ma, initial_pagein, 0); if (rv != VM_PAGER_OK) { vm_page_lock(ma[0]); vm_page_free(ma[0]); vm_page_unlock(ma[0]); VM_OBJECT_WUNLOCK(object); return (EIO); } } vm_page_xunbusy(ma[0]); vm_page_lock(ma[0]); vm_page_hold(ma[0]); vm_page_activate(ma[0]); vm_page_unlock(ma[0]); VM_OBJECT_WUNLOCK(object); imgp->firstpage = sf_buf_alloc(ma[0], 0); imgp->image_header = (char *)sf_buf_kva(imgp->firstpage); return (0); } void exec_unmap_first_page(imgp) struct image_params *imgp; { vm_page_t m; if (imgp->firstpage != NULL) { m = sf_buf_page(imgp->firstpage); sf_buf_free(imgp->firstpage); imgp->firstpage = NULL; vm_page_lock(m); vm_page_unhold(m); vm_page_unlock(m); } } /* * Destroy old address space, and allocate a new stack * The new stack is only SGROWSIZ large because it is grown * automatically in trap.c. */ int exec_new_vmspace(imgp, sv) struct image_params *imgp; struct sysentvec *sv; { int error; struct proc *p = imgp->proc; struct vmspace *vmspace = p->p_vmspace; vm_object_t obj; struct rlimit rlim_stack; vm_offset_t sv_minuser, stack_addr; vm_map_t map; u_long ssiz; imgp->vmspace_destroyed = 1; imgp->sysent = sv; /* May be called with Giant held */ EVENTHANDLER_INVOKE(process_exec, p, imgp); /* * Blow away entire process VM, if address space not shared, * otherwise, create a new VM space so that other threads are * not disrupted */ map = &vmspace->vm_map; if (map_at_zero) sv_minuser = sv->sv_minuser; else sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE); if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv_minuser && vm_map_max(map) == sv->sv_maxuser) { shmexit(vmspace); pmap_remove_pages(vmspace_pmap(vmspace)); vm_map_remove(map, vm_map_min(map), vm_map_max(map)); } else { error = vmspace_exec(p, sv_minuser, sv->sv_maxuser); if (error) return (error); vmspace = p->p_vmspace; map = &vmspace->vm_map; } /* Map a shared page */ obj = sv->sv_shared_page_obj; if (obj != NULL) { vm_object_reference(obj); error = vm_map_fixed(map, obj, 0, sv->sv_shared_page_base, sv->sv_shared_page_len, VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_READ | VM_PROT_EXECUTE, MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE); if (error) { vm_object_deallocate(obj); return (error); } } /* Allocate a new stack */ if (imgp->stack_sz != 0) { ssiz = trunc_page(imgp->stack_sz); PROC_LOCK(p); lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack); PROC_UNLOCK(p); if (ssiz > rlim_stack.rlim_max) ssiz = rlim_stack.rlim_max; if (ssiz > rlim_stack.rlim_cur) { rlim_stack.rlim_cur = ssiz; kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack); } } else if (sv->sv_maxssiz != NULL) { ssiz = *sv->sv_maxssiz; } else { ssiz = maxssiz; } stack_addr = sv->sv_usrstack - ssiz; error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz, obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot : sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN); if (error) return (error); /* * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they * are still used to enforce the stack rlimit on the process stack. */ vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; vmspace->vm_maxsaddr = (char *)stack_addr; return (0); } /* * Copy out argument and environment strings from the old process address * space into the temporary string buffer. */ int exec_copyin_args(struct image_args *args, char *fname, enum uio_seg segflg, char **argv, char **envv) { u_long argp, envp; int error; size_t length; bzero(args, sizeof(*args)); if (argv == NULL) return (EFAULT); /* * Allocate demand-paged memory for the file name, argument, and * environment strings. */ error = exec_alloc_args(args); if (error != 0) return (error); /* * Copy the file name. */ if (fname != NULL) { args->fname = args->buf; error = (segflg == UIO_SYSSPACE) ? copystr(fname, args->fname, PATH_MAX, &length) : copyinstr(fname, args->fname, PATH_MAX, &length); if (error != 0) goto err_exit; } else length = 0; args->begin_argv = args->buf + length; args->endp = args->begin_argv; args->stringspace = ARG_MAX; /* * extract arguments first */ for (;;) { error = fueword(argv++, &argp); if (error == -1) { error = EFAULT; goto err_exit; } if (argp == 0) break; error = copyinstr((void *)(uintptr_t)argp, args->endp, args->stringspace, &length); if (error != 0) { if (error == ENAMETOOLONG) error = E2BIG; goto err_exit; } args->stringspace -= length; args->endp += length; args->argc++; } args->begin_envv = args->endp; /* * extract environment strings */ if (envv) { for (;;) { error = fueword(envv++, &envp); if (error == -1) { error = EFAULT; goto err_exit; } if (envp == 0) break; error = copyinstr((void *)(uintptr_t)envp, args->endp, args->stringspace, &length); if (error != 0) { if (error == ENAMETOOLONG) error = E2BIG; goto err_exit; } args->stringspace -= length; args->endp += length; args->envc++; } } return (0); err_exit: exec_free_args(args); return (error); } int exec_copyin_data_fds(struct thread *td, struct image_args *args, const void *data, size_t datalen, const int *fds, size_t fdslen) { struct filedesc *ofdp; const char *p; int *kfds; int error; memset(args, '\0', sizeof(*args)); ofdp = td->td_proc->p_fd; if (datalen >= ARG_MAX || fdslen > ofdp->fd_lastfile + 1) return (E2BIG); error = exec_alloc_args(args); if (error != 0) return (error); args->begin_argv = args->buf; args->stringspace = ARG_MAX; if (datalen > 0) { /* * Argument buffer has been provided. Copy it into the * kernel as a single string and add a terminating null * byte. */ error = copyin(data, args->begin_argv, datalen); if (error != 0) goto err_exit; args->begin_argv[datalen] = '\0'; args->endp = args->begin_argv + datalen + 1; args->stringspace -= datalen + 1; /* * Traditional argument counting. Count the number of * null bytes. */ for (p = args->begin_argv; p < args->endp; ++p) if (*p == '\0') ++args->argc; } else { /* No argument buffer provided. */ args->endp = args->begin_argv; } /* There are no environment variables. */ args->begin_envv = args->endp; /* Create new file descriptor table. */ kfds = malloc(fdslen * sizeof(int), M_TEMP, M_WAITOK); error = copyin(fds, kfds, fdslen * sizeof(int)); if (error != 0) { free(kfds, M_TEMP); goto err_exit; } error = fdcopy_remapped(ofdp, kfds, fdslen, &args->fdp); free(kfds, M_TEMP); if (error != 0) goto err_exit; return (0); err_exit: exec_free_args(args); return (error); } /* * Allocate temporary demand-paged, zero-filled memory for the file name, * argument, and environment strings. Returns zero if the allocation succeeds * and ENOMEM otherwise. */ int exec_alloc_args(struct image_args *args) { args->buf = (char *)kmap_alloc_wait(exec_map, PATH_MAX + ARG_MAX); return (args->buf != NULL ? 0 : ENOMEM); } void exec_free_args(struct image_args *args) { if (args->buf != NULL) { kmap_free_wakeup(exec_map, (vm_offset_t)args->buf, PATH_MAX + ARG_MAX); args->buf = NULL; } if (args->fname_buf != NULL) { free(args->fname_buf, M_TEMP); args->fname_buf = NULL; } if (args->fdp != NULL) fdescfree_remapped(args->fdp); } /* * Copy strings out to the new process address space, constructing new arg * and env vector tables. Return a pointer to the base so that it can be used * as the initial stack pointer. */ register_t * exec_copyout_strings(imgp) struct image_params *imgp; { int argc, envc; char **vectp; char *stringp; uintptr_t destp; register_t *stack_base; struct ps_strings *arginfo; struct proc *p; size_t execpath_len; int szsigcode, szps; char canary[sizeof(long) * 8]; szps = sizeof(pagesizes[0]) * MAXPAGESIZES; /* * Calculate string base and vector table pointers. * Also deal with signal trampoline code for this exec type. */ if (imgp->execpath != NULL && imgp->auxargs != NULL) execpath_len = strlen(imgp->execpath) + 1; else execpath_len = 0; p = imgp->proc; szsigcode = 0; arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings; if (p->p_sysent->sv_sigcode_base == 0) { if (p->p_sysent->sv_szsigcode != NULL) szsigcode = *(p->p_sysent->sv_szsigcode); } destp = (uintptr_t)arginfo; /* * install sigcode */ if (szsigcode != 0) { destp -= szsigcode; destp = rounddown2(destp, sizeof(void *)); copyout(p->p_sysent->sv_sigcode, (void *)destp, szsigcode); } /* * Copy the image path for the rtld. */ if (execpath_len != 0) { destp -= execpath_len; imgp->execpathp = destp; copyout(imgp->execpath, (void *)destp, execpath_len); } /* * Prepare the canary for SSP. */ arc4rand(canary, sizeof(canary), 0); destp -= sizeof(canary); imgp->canary = destp; copyout(canary, (void *)destp, sizeof(canary)); imgp->canarylen = sizeof(canary); /* * Prepare the pagesizes array. */ destp -= szps; destp = rounddown2(destp, sizeof(void *)); imgp->pagesizes = destp; copyout(pagesizes, (void *)destp, szps); imgp->pagesizeslen = szps; destp -= ARG_MAX - imgp->args->stringspace; destp = rounddown2(destp, sizeof(void *)); /* * If we have a valid auxargs ptr, prepare some room * on the stack. */ if (imgp->auxargs) { /* * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for * lower compatibility. */ imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size : (AT_COUNT * 2); /* * The '+ 2' is for the null pointers at the end of each of * the arg and env vector sets,and imgp->auxarg_size is room * for argument of Runtime loader. */ vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2 + imgp->auxarg_size) * sizeof(char *)); } else { /* * The '+ 2' is for the null pointers at the end of each of * the arg and env vector sets */ vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2) * sizeof(char *)); } /* * vectp also becomes our initial stack base */ stack_base = (register_t *)vectp; stringp = imgp->args->begin_argv; argc = imgp->args->argc; envc = imgp->args->envc; /* * Copy out strings - arguments and environment. */ copyout(stringp, (void *)destp, ARG_MAX - imgp->args->stringspace); /* * Fill in "ps_strings" struct for ps, w, etc. */ suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp); suword32(&arginfo->ps_nargvstr, argc); /* * Fill in argument portion of vector table. */ for (; argc > 0; --argc) { suword(vectp++, (long)(intptr_t)destp); while (*stringp++ != 0) destp++; destp++; } /* a null vector table pointer separates the argp's from the envp's */ suword(vectp++, 0); suword(&arginfo->ps_envstr, (long)(intptr_t)vectp); suword32(&arginfo->ps_nenvstr, envc); /* * Fill in environment portion of vector table. */ for (; envc > 0; --envc) { suword(vectp++, (long)(intptr_t)destp); while (*stringp++ != 0) destp++; destp++; } /* end of vector table is a null pointer */ suword(vectp, 0); return (stack_base); } /* * Check permissions of file to execute. * Called with imgp->vp locked. * Return 0 for success or error code on failure. */ int exec_check_permissions(imgp) struct image_params *imgp; { struct vnode *vp = imgp->vp; struct vattr *attr = imgp->attr; struct thread *td; int error, writecount; td = curthread; /* Get file attributes */ error = VOP_GETATTR(vp, attr, td->td_ucred); if (error) return (error); #ifdef MAC error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp); if (error) return (error); #endif /* * 1) Check if file execution is disabled for the filesystem that * this file resides on. * 2) Ensure that at least one execute bit is on. Otherwise, a * privileged user will always succeed, and we don't want this * to happen unless the file really is executable. * 3) Ensure that the file is a regular file. */ if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || (attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 || (attr->va_type != VREG)) return (EACCES); /* * Zero length files can't be exec'd */ if (attr->va_size == 0) return (ENOEXEC); /* * Check for execute permission to file based on current credentials. */ error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); if (error) return (error); /* * Check number of open-for-writes on the file and deny execution * if there are any. */ error = VOP_GET_WRITECOUNT(vp, &writecount); if (error != 0) return (error); if (writecount != 0) return (ETXTBSY); /* * Call filesystem specific open routine (which does nothing in the * general case). */ error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); if (error == 0) imgp->opened = 1; return (error); } /* * Exec handler registration */ int exec_register(execsw_arg) const struct execsw *execsw_arg; { const struct execsw **es, **xs, **newexecsw; int count = 2; /* New slot and trailing NULL */ if (execsw) for (es = execsw; *es; es++) count++; newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); if (newexecsw == NULL) return (ENOMEM); xs = newexecsw; if (execsw) for (es = execsw; *es; es++) *xs++ = *es; *xs++ = execsw_arg; *xs = NULL; if (execsw) free(execsw, M_TEMP); execsw = newexecsw; return (0); } int exec_unregister(execsw_arg) const struct execsw *execsw_arg; { const struct execsw **es, **xs, **newexecsw; int count = 1; if (execsw == NULL) panic("unregister with no handlers left?\n"); for (es = execsw; *es; es++) { if (*es == execsw_arg) break; } if (*es == NULL) return (ENOENT); for (es = execsw; *es; es++) if (*es != execsw_arg) count++; newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); if (newexecsw == NULL) return (ENOMEM); xs = newexecsw; for (es = execsw; *es; es++) if (*es != execsw_arg) *xs++ = *es; *xs = NULL; if (execsw) free(execsw, M_TEMP); execsw = newexecsw; return (0); } Index: user/ngie/more-tests2/sys/kern/kern_fork.c =================================================================== --- user/ngie/more-tests2/sys/kern/kern_fork.c (revision 288954) +++ user/ngie/more-tests2/sys/kern/kern_fork.c (revision 288955) @@ -1,1072 +1,1085 @@ /*- * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_ktrace.h" #include "opt_kstack_pages.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KDTRACE_HOOKS #include dtrace_fork_func_t dtrace_fasttrap_fork; #endif SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE3(proc, kernel, , create, "struct proc *", "struct proc *", "int"); #ifndef _SYS_SYSPROTO_H_ struct fork_args { int dummy; }; #endif /* ARGSUSED */ int sys_fork(struct thread *td, struct fork_args *uap) { int error; struct proc *p2; error = fork1(td, RFFDG | RFPROC, 0, &p2, NULL, 0, NULL); if (error == 0) { td->td_retval[0] = p2->p_pid; td->td_retval[1] = 0; } return (error); } /* ARGUSED */ int sys_pdfork(td, uap) struct thread *td; struct pdfork_args *uap; { int error, fd; struct proc *p2; /* * It is necessary to return fd by reference because 0 is a valid file * descriptor number, and the child needs to be able to distinguish * itself from the parent using the return value. */ error = fork1(td, RFFDG | RFPROC | RFPROCDESC, 0, &p2, &fd, uap->flags, NULL); if (error == 0) { td->td_retval[0] = p2->p_pid; td->td_retval[1] = 0; error = copyout(&fd, uap->fdp, sizeof(fd)); } return (error); } /* ARGSUSED */ int sys_vfork(struct thread *td, struct vfork_args *uap) { int error, flags; struct proc *p2; flags = RFFDG | RFPROC | RFPPWAIT | RFMEM; error = fork1(td, flags, 0, &p2, NULL, 0, NULL); if (error == 0) { td->td_retval[0] = p2->p_pid; td->td_retval[1] = 0; } return (error); } int sys_rfork(struct thread *td, struct rfork_args *uap) { struct proc *p2; int error; /* Don't allow kernel-only flags. */ if ((uap->flags & RFKERNELONLY) != 0) return (EINVAL); AUDIT_ARG_FFLAGS(uap->flags); error = fork1(td, uap->flags, 0, &p2, NULL, 0, NULL); if (error == 0) { td->td_retval[0] = p2 ? p2->p_pid : 0; td->td_retval[1] = 0; } return (error); } int nprocs = 1; /* process 0 */ int lastpid = 0; SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0, "Last used PID"); /* * Random component to lastpid generation. We mix in a random factor to make * it a little harder to predict. We sanity check the modulus value to avoid * doing it in critical paths. Don't let it be too small or we pointlessly * waste randomness entropy, and don't let it be impossibly large. Using a * modulus that is too big causes a LOT more process table scans and slows * down fork processing as the pidchecked caching is defeated. */ static int randompid = 0; static int sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) { int error, pid; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error != 0) return(error); sx_xlock(&allproc_lock); pid = randompid; error = sysctl_handle_int(oidp, &pid, 0, req); if (error == 0 && req->newptr != NULL) { if (pid < 0 || pid > pid_max - 100) /* out of range */ pid = pid_max - 100; else if (pid < 2) /* NOP */ pid = 0; else if (pid < 100) /* Make it reasonable */ pid = 100; randompid = pid; } sx_xunlock(&allproc_lock); return (error); } SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); static int fork_findpid(int flags) { struct proc *p; int trypid; static int pidchecked = 0; /* * Requires allproc_lock in order to iterate over the list * of processes, and proctree_lock to access p_pgrp. */ sx_assert(&allproc_lock, SX_LOCKED); sx_assert(&proctree_lock, SX_LOCKED); /* * Find an unused process ID. We remember a range of unused IDs * ready to use (from lastpid+1 through pidchecked-1). * * If RFHIGHPID is set (used during system boot), do not allocate * low-numbered pids. */ trypid = lastpid + 1; if (flags & RFHIGHPID) { if (trypid < 10) trypid = 10; } else { if (randompid) trypid += arc4random() % randompid; } retry: /* * If the process ID prototype has wrapped around, * restart somewhat above 0, as the low-numbered procs * tend to include daemons that don't exit. */ if (trypid >= pid_max) { trypid = trypid % pid_max; if (trypid < 100) trypid += 100; pidchecked = 0; } if (trypid >= pidchecked) { int doingzomb = 0; pidchecked = PID_MAX; /* * Scan the active and zombie procs to check whether this pid * is in use. Remember the lowest pid that's greater * than trypid, so we can avoid checking for a while. * * Avoid reuse of the process group id, session id or * the reaper subtree id. Note that for process group * and sessions, the amount of reserved pids is * limited by process limit. For the subtree ids, the * id is kept reserved only while there is a * non-reaped process in the subtree, so amount of * reserved pids is limited by process limit times * two. */ p = LIST_FIRST(&allproc); again: for (; p != NULL; p = LIST_NEXT(p, p_list)) { while (p->p_pid == trypid || p->p_reapsubtree == trypid || (p->p_pgrp != NULL && (p->p_pgrp->pg_id == trypid || (p->p_session != NULL && p->p_session->s_sid == trypid)))) { trypid++; if (trypid >= pidchecked) goto retry; } if (p->p_pid > trypid && pidchecked > p->p_pid) pidchecked = p->p_pid; if (p->p_pgrp != NULL) { if (p->p_pgrp->pg_id > trypid && pidchecked > p->p_pgrp->pg_id) pidchecked = p->p_pgrp->pg_id; if (p->p_session != NULL && p->p_session->s_sid > trypid && pidchecked > p->p_session->s_sid) pidchecked = p->p_session->s_sid; } } if (!doingzomb) { doingzomb = 1; p = LIST_FIRST(&zombproc); goto again; } } /* * RFHIGHPID does not mess with the lastpid counter during boot. */ if (flags & RFHIGHPID) pidchecked = 0; else lastpid = trypid; return (trypid); } static int fork_norfproc(struct thread *td, int flags) { int error; struct proc *p1; KASSERT((flags & RFPROC) == 0, ("fork_norfproc called with RFPROC set")); p1 = td->td_proc; if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) && (flags & (RFCFDG | RFFDG))) { PROC_LOCK(p1); if (thread_single(p1, SINGLE_BOUNDARY)) { PROC_UNLOCK(p1); return (ERESTART); } PROC_UNLOCK(p1); } error = vm_forkproc(td, NULL, NULL, NULL, flags); if (error) goto fail; /* * Close all file descriptors. */ if (flags & RFCFDG) { struct filedesc *fdtmp; fdtmp = fdinit(td->td_proc->p_fd, false); fdescfree(td); p1->p_fd = fdtmp; } /* * Unshare file descriptors (from parent). */ if (flags & RFFDG) fdunshare(td); fail: if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) && (flags & (RFCFDG | RFFDG))) { PROC_LOCK(p1); thread_single_end(p1, SINGLE_BOUNDARY); PROC_UNLOCK(p1); } return (error); } static void do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2, struct vmspace *vm2, int pdflags) { struct proc *p1, *pptr; int p2_held, trypid; struct filedesc *fd; struct filedesc_to_leader *fdtol; struct sigacts *newsigacts; sx_assert(&proctree_lock, SX_SLOCKED); sx_assert(&allproc_lock, SX_XLOCKED); p2_held = 0; p1 = td->td_proc; /* * Increment the nprocs resource before blocking can occur. There * are hard-limits as to the number of processes that can run. */ nprocs++; trypid = fork_findpid(flags); sx_sunlock(&proctree_lock); p2->p_state = PRS_NEW; /* protect against others */ p2->p_pid = trypid; AUDIT_ARG_PID(p2->p_pid); LIST_INSERT_HEAD(&allproc, p2, p_list); allproc_gen++; LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); tidhash_add(td2); PROC_LOCK(p2); PROC_LOCK(p1); sx_xunlock(&allproc_lock); bcopy(&p1->p_startcopy, &p2->p_startcopy, __rangeof(struct proc, p_startcopy, p_endcopy)); pargs_hold(p2->p_args); PROC_UNLOCK(p1); bzero(&p2->p_startzero, __rangeof(struct proc, p_startzero, p_endzero)); /* Tell the prison that we exist. */ prison_proc_hold(p2->p_ucred->cr_prison); PROC_UNLOCK(p2); /* * Malloc things while we don't hold any locks. */ if (flags & RFSIGSHARE) newsigacts = NULL; else newsigacts = sigacts_alloc(); /* * Copy filedesc. */ if (flags & RFCFDG) { fd = fdinit(p1->p_fd, false); fdtol = NULL; } else if (flags & RFFDG) { fd = fdcopy(p1->p_fd); fdtol = NULL; } else { fd = fdshare(p1->p_fd); if (p1->p_fdtol == NULL) p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL, p1->p_leader); if ((flags & RFTHREAD) != 0) { /* * Shared file descriptor table, and shared * process leaders. */ fdtol = p1->p_fdtol; FILEDESC_XLOCK(p1->p_fd); fdtol->fdl_refcount++; FILEDESC_XUNLOCK(p1->p_fd); } else { /* * Shared file descriptor table, and different * process leaders. */ fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p1->p_fd, p2); } } /* * Make a proc table entry for the new process. * Start by zeroing the section of proc that is zero-initialized, * then copy the section that is copied directly from the parent. */ PROC_LOCK(p2); PROC_LOCK(p1); bzero(&td2->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &td2->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name)); td2->td_sigstk = td->td_sigstk; td2->td_flags = TDF_INMEM; td2->td_lend_user_pri = PRI_MAX; #ifdef VIMAGE td2->td_vnet = NULL; td2->td_vnet_lpush = NULL; #endif /* * Allow the scheduler to initialize the child. */ thread_lock(td); sched_fork(td, td2); thread_unlock(td); /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. */ p2->p_flag = P_INMEM; p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC); p2->p_swtick = ticks; if (p1->p_flag & P_PROFIL) startprofclock(p2); /* * Whilst the proc lock is held, copy the VM domain data out * using the VM domain method. */ vm_domain_policy_init(&p2->p_vm_dom_policy); vm_domain_policy_localcopy(&p2->p_vm_dom_policy, &p1->p_vm_dom_policy); if (flags & RFSIGSHARE) { p2->p_sigacts = sigacts_hold(p1->p_sigacts); } else { sigacts_copy(newsigacts, p1->p_sigacts); p2->p_sigacts = newsigacts; } if (flags & RFTSIGZMB) p2->p_sigparent = RFTSIGNUM(flags); else if (flags & RFLINUXTHPN) p2->p_sigparent = SIGUSR1; else p2->p_sigparent = SIGCHLD; p2->p_textvp = p1->p_textvp; p2->p_fd = fd; p2->p_fdtol = fdtol; if (p1->p_flag2 & P2_INHERIT_PROTECTED) { p2->p_flag |= P_PROTECTED; p2->p_flag2 |= P2_INHERIT_PROTECTED; } /* * p_limit is copy-on-write. Bump its refcount. */ lim_fork(p1, p2); thread_cow_get_proc(td2, p2); pstats_fork(p1->p_stats, p2->p_stats); PROC_UNLOCK(p1); PROC_UNLOCK(p2); /* Bump references to the text vnode (for procfs). */ if (p2->p_textvp) vref(p2->p_textvp); /* * Set up linkage for kernel based threading. */ if ((flags & RFTHREAD) != 0) { mtx_lock(&ppeers_lock); p2->p_peers = p1->p_peers; p1->p_peers = p2; p2->p_leader = p1->p_leader; mtx_unlock(&ppeers_lock); PROC_LOCK(p1->p_leader); if ((p1->p_leader->p_flag & P_WEXIT) != 0) { PROC_UNLOCK(p1->p_leader); /* * The task leader is exiting, so process p1 is * going to be killed shortly. Since p1 obviously * isn't dead yet, we know that the leader is either * sending SIGKILL's to all the processes in this * task or is sleeping waiting for all the peers to * exit. We let p1 complete the fork, but we need * to go ahead and kill the new process p2 since * the task leader may not get a chance to send * SIGKILL to it. We leave it on the list so that * the task leader will wait for this new process * to commit suicide. */ PROC_LOCK(p2); kern_psignal(p2, SIGKILL); PROC_UNLOCK(p2); } else PROC_UNLOCK(p1->p_leader); } else { p2->p_peers = NULL; p2->p_leader = p2; } sx_xlock(&proctree_lock); PGRP_LOCK(p1->p_pgrp); PROC_LOCK(p2); PROC_LOCK(p1); /* * Preserve some more flags in subprocess. P_PROFIL has already * been preserved. */ p2->p_flag |= p1->p_flag & P_SUGID; td2->td_pflags |= td->td_pflags & TDP_ALTSTACK; SESS_LOCK(p1->p_session); if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) p2->p_flag |= P_CONTROLT; SESS_UNLOCK(p1->p_session); if (flags & RFPPWAIT) p2->p_flag |= P_PPWAIT; p2->p_pgrp = p1->p_pgrp; LIST_INSERT_AFTER(p1, p2, p_pglist); PGRP_UNLOCK(p1->p_pgrp); LIST_INIT(&p2->p_children); LIST_INIT(&p2->p_orphans); callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0); /* * If PF_FORK is set, the child process inherits the * procfs ioctl flags from its parent. */ if (p1->p_pfsflags & PF_FORK) { p2->p_stops = p1->p_stops; p2->p_pfsflags = p1->p_pfsflags; } /* * This begins the section where we must prevent the parent * from being swapped. */ _PHOLD(p1); PROC_UNLOCK(p1); /* * Attach the new process to its parent. * * If RFNOWAIT is set, the newly created process becomes a child * of init. This effectively disassociates the child from the * parent. */ if ((flags & RFNOWAIT) != 0) { pptr = p1->p_reaper; p2->p_reaper = pptr; } else { p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ? p1 : p1->p_reaper; pptr = p1; } p2->p_pptr = pptr; LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); LIST_INIT(&p2->p_reaplist); LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling); if (p2->p_reaper == p1) p2->p_reapsubtree = p2->p_pid; sx_xunlock(&proctree_lock); /* Inform accounting that we have forked. */ p2->p_acflag = AFORK; PROC_UNLOCK(p2); #ifdef KTRACE ktrprocfork(p1, p2); #endif /* * Finish creating the child process. It will return via a different * execution path later. (ie: directly into user mode) */ vm_forkproc(td, p2, td2, vm2, flags); if (flags == (RFFDG | RFPROC)) { PCPU_INC(cnt.v_forks); PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { PCPU_INC(cnt.v_vforks); PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else if (p1 == &proc0) { PCPU_INC(cnt.v_kthreads); PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else { PCPU_INC(cnt.v_rforks); PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } /* * Associate the process descriptor with the process before anything * can happen that might cause that process to need the descriptor. * However, don't do this until after fork(2) can no longer fail. */ if (flags & RFPROCDESC) procdesc_new(p2, pdflags); /* * Both processes are set up, now check if any loadable modules want * to adjust anything. */ EVENTHANDLER_INVOKE(process_fork, p1, p2, flags); /* * Set the child start time and mark the process as being complete. */ PROC_LOCK(p2); PROC_LOCK(p1); microuptime(&p2->p_stats->p_start); PROC_SLOCK(p2); p2->p_state = PRS_NORMAL; PROC_SUNLOCK(p2); #ifdef KDTRACE_HOOKS /* * Tell the DTrace fasttrap provider about the new process so that any * tracepoints inherited from the parent can be removed. We have to do * this only after p_state is PRS_NORMAL since the fasttrap module will * use pfind() later on. */ if ((flags & RFMEM) == 0 && dtrace_fasttrap_fork) dtrace_fasttrap_fork(p1, p2); #endif if ((p1->p_flag & (P_TRACED | P_FOLLOWFORK)) == (P_TRACED | P_FOLLOWFORK)) { /* * Arrange for debugger to receive the fork event. * * We can report PL_FLAG_FORKED regardless of * P_FOLLOWFORK settings, but it does not make a sense * for runaway child. */ td->td_dbgflags |= TDB_FORK; td->td_dbg_forked = p2->p_pid; td2->td_dbgflags |= TDB_STOPATFORK; _PHOLD(p2); p2_held = 1; } if (flags & RFPPWAIT) { td->td_pflags |= TDP_RFPPWAIT; td->td_rfppwait_p = p2; } PROC_UNLOCK(p2); if ((flags & RFSTOPPED) == 0) { /* * If RFSTOPPED not requested, make child runnable and * add to run queue. */ thread_lock(td2); TD_SET_CAN_RUN(td2); sched_add(td2, SRQ_BORING); thread_unlock(td2); } /* * Now can be swapped. */ _PRELE(p1); PROC_UNLOCK(p1); /* * Tell any interested parties about the new process. */ knote_fork(&p1->p_klist, p2->p_pid); SDT_PROBE3(proc, kernel, , create, p2, p1, flags); /* * Wait until debugger is attached to child. */ PROC_LOCK(p2); while ((td2->td_dbgflags & TDB_STOPATFORK) != 0) cv_wait(&p2->p_dbgwait, &p2->p_mtx); if (p2_held) _PRELE(p2); PROC_UNLOCK(p2); } int fork1(struct thread *td, int flags, int pages, struct proc **procp, int *procdescp, int pdflags, struct filecaps *fcaps) { struct proc *p1; struct proc *newproc; int ok; struct thread *td2; struct vmspace *vm2; vm_ooffset_t mem_charged; int error; static int curfail; static struct timeval lastfail; struct file *fp_procdesc = NULL; /* Check for the undefined or unimplemented flags. */ if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0) return (EINVAL); /* Signal value requires RFTSIGZMB. */ if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0) return (EINVAL); /* Can't copy and clear. */ if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) return (EINVAL); /* Check the validity of the signal number. */ if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG) return (EINVAL); if ((flags & RFPROCDESC) != 0) { /* Can't not create a process yet get a process descriptor. */ if ((flags & RFPROC) == 0) return (EINVAL); /* Must provide a place to put a procdesc if creating one. */ if (procdescp == NULL) return (EINVAL); } p1 = td->td_proc; /* * Here we don't create a new process, but we divorce * certain parts of a process from itself. */ if ((flags & RFPROC) == 0) { *procp = NULL; return (fork_norfproc(td, flags)); } /* * If required, create a process descriptor in the parent first; we * will abandon it if something goes wrong. We don't finit() until * later. */ if (flags & RFPROCDESC) { error = falloc_caps(td, &fp_procdesc, procdescp, 0, fcaps); if (error != 0) return (error); } mem_charged = 0; vm2 = NULL; if (pages == 0) pages = kstack_pages; /* Allocate new proc. */ newproc = uma_zalloc(proc_zone, M_WAITOK); td2 = FIRST_THREAD_IN_PROC(newproc); if (td2 == NULL) { td2 = thread_alloc(pages); if (td2 == NULL) { error = ENOMEM; goto fail2; } proc_linkup(newproc, td2); } else { if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) { if (td2->td_kstack != 0) vm_thread_dispose(td2); if (!thread_alloc_stack(td2, pages)) { error = ENOMEM; goto fail2; } } } if ((flags & RFMEM) == 0) { vm2 = vmspace_fork(p1->p_vmspace, &mem_charged); if (vm2 == NULL) { error = ENOMEM; goto fail2; } if (!swap_reserve(mem_charged)) { /* * The swap reservation failed. The accounting * from the entries of the copied vm2 will be * substracted in vmspace_free(), so force the * reservation there. */ swap_reserve_force(mem_charged); error = ENOMEM; goto fail2; } } else vm2 = NULL; /* * XXX: This is ugly; when we copy resource usage, we need to bump * per-cred resource counters. */ proc_set_cred_init(newproc, crhold(td->td_ucred)); /* * Initialize resource accounting for the child process. */ error = racct_proc_fork(p1, newproc); if (error != 0) { error = EAGAIN; goto fail1; } #ifdef MAC mac_proc_init(newproc); #endif knlist_init_mtx(&newproc->p_klist, &newproc->p_mtx); STAILQ_INIT(&newproc->p_ktr); /* We have to lock the process tree while we look for a pid. */ sx_slock(&proctree_lock); /* * Although process entries are dynamically created, we still keep * a global limit on the maximum number we will create. Don't allow * a nonprivileged user to use the last ten processes; don't let root * exceed the limit. The variable nprocs is the current number of * processes, maxproc is the limit. */ sx_xlock(&allproc_lock); if ((nprocs >= maxproc - 10 && priv_check_cred(td->td_ucred, PRIV_MAXPROC, 0) != 0) || nprocs >= maxproc) { error = EAGAIN; goto fail; } /* * Increment the count of procs running with this uid. Don't allow * a nonprivileged user to exceed their current limit. * * XXXRW: Can we avoid privilege here if it's not needed? */ error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0); if (error == 0) ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0); else { ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_NPROC)); } if (ok) { do_fork(td, flags, newproc, td2, vm2, pdflags); /* * Return child proc pointer to parent. */ *procp = newproc; if (flags & RFPROCDESC) { procdesc_finit(newproc->p_procdesc, fp_procdesc); fdrop(fp_procdesc, td); } racct_proc_fork_done(newproc); return (0); } error = EAGAIN; fail: sx_sunlock(&proctree_lock); if (ppsratecheck(&lastfail, &curfail, 1)) printf("maxproc limit exceeded by uid %u (pid %d); see tuning(7) and login.conf(5)\n", td->td_ucred->cr_ruid, p1->p_pid); sx_xunlock(&allproc_lock); #ifdef MAC mac_proc_destroy(newproc); #endif racct_proc_exit(newproc); fail1: crfree(newproc->p_ucred); newproc->p_ucred = NULL; fail2: if (vm2 != NULL) vmspace_free(vm2); uma_zfree(proc_zone, newproc); if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) { fdclose(td, fp_procdesc, *procdescp); fdrop(fp_procdesc, td); } pause("fork", hz / 2); return (error); } /* * Handle the return of a child process from fork1(). This function * is called from the MD fork_trampoline() entry point. */ void fork_exit(void (*callout)(void *, struct trapframe *), void *arg, struct trapframe *frame) { struct proc *p; struct thread *td; struct thread *dtd; td = curthread; p = td->td_proc; KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new")); CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)", td, td->td_sched, p->p_pid, td->td_name); sched_fork_exit(td); /* * Processes normally resume in mi_switch() after being * cpu_switch()'ed to, but when children start up they arrive here * instead, so we must do much the same things as mi_switch() would. */ if ((dtd = PCPU_GET(deadthread))) { PCPU_SET(deadthread, NULL); thread_stash(dtd); } thread_unlock(td); /* * cpu_set_fork_handler intercepts this function call to * have this call a non-return function to stay in kernel mode. * initproc has its own fork handler, but it does return. */ KASSERT(callout != NULL, ("NULL callout in fork_exit")); callout(arg, frame); /* * Check if a kernel thread misbehaved and returned from its main * function. */ if (p->p_flag & P_KTHREAD) { printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n", td->td_name, p->p_pid); kproc_exit(0); } mtx_assert(&Giant, MA_NOTOWNED); if (p->p_sysent->sv_schedtail != NULL) (p->p_sysent->sv_schedtail)(td); } /* * Simplified back end of syscall(), used when returning from fork() * directly into user mode. Giant is not held on entry, and must not * be held on return. This function is passed in to fork_exit() as the * first parameter and is called when returning to a new userland process. */ void fork_return(struct thread *td, struct trapframe *frame) { struct proc *p, *dbg; + p = td->td_proc; if (td->td_dbgflags & TDB_STOPATFORK) { - p = td->td_proc; sx_xlock(&proctree_lock); PROC_LOCK(p); if ((p->p_pptr->p_flag & (P_TRACED | P_FOLLOWFORK)) == (P_TRACED | P_FOLLOWFORK)) { /* * If debugger still wants auto-attach for the * parent's children, do it now. */ dbg = p->p_pptr->p_pptr; p->p_flag |= P_TRACED; p->p_oppid = p->p_pptr->p_pid; CTR2(KTR_PTRACE, "fork_return: attaching to new child pid %d: oppid %d", p->p_pid, p->p_oppid); proc_reparent(p, dbg); sx_xunlock(&proctree_lock); - td->td_dbgflags |= TDB_CHILD; + td->td_dbgflags |= TDB_CHILD | TDB_SCX; ptracestop(td, SIGSTOP); - td->td_dbgflags &= ~TDB_CHILD; + td->td_dbgflags &= ~(TDB_CHILD | TDB_SCX); } else { /* * ... otherwise clear the request. */ sx_xunlock(&proctree_lock); td->td_dbgflags &= ~TDB_STOPATFORK; cv_broadcast(&p->p_dbgwait); } + PROC_UNLOCK(p); + } else if (p->p_flag & P_TRACED) { + /* + * This is the start of a new thread in a traced + * process. Report a system call exit event. + */ + PROC_LOCK(p); + td->td_dbgflags |= TDB_SCX; + _STOPEVENT(p, S_SCX, td->td_dbg_sc_code); + if ((p->p_stops & S_PT_SCX) != 0) + ptracestop(td, SIGTRAP); + td->td_dbgflags &= ~TDB_SCX; PROC_UNLOCK(p); } userret(td, frame); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) ktrsysret(SYS_fork, 0, 0); #endif } Index: user/ngie/more-tests2/sys/kern/kern_proc.c =================================================================== --- user/ngie/more-tests2/sys/kern/kern_proc.c (revision 288954) +++ user/ngie/more-tests2/sys/kern/kern_proc.c (revision 288955) @@ -1,3061 +1,3074 @@ /*- * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include "opt_ddb.h" #include "opt_ktrace.h" #include "opt_kstack_pages.h" #include "opt_stack.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include #include #include #include #include #include #include #include #ifdef COMPAT_FREEBSD32 #include #include #endif SDT_PROVIDER_DEFINE(proc); SDT_PROBE_DEFINE4(proc, kernel, ctor, entry, "struct proc *", "int", "void *", "int"); SDT_PROBE_DEFINE4(proc, kernel, ctor, return, "struct proc *", "int", "void *", "int"); SDT_PROBE_DEFINE4(proc, kernel, dtor, entry, "struct proc *", "int", "void *", "struct thread *"); SDT_PROBE_DEFINE3(proc, kernel, dtor, return, "struct proc *", "int", "void *"); SDT_PROBE_DEFINE3(proc, kernel, init, entry, "struct proc *", "int", "int"); SDT_PROBE_DEFINE3(proc, kernel, init, return, "struct proc *", "int", "int"); MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); MALLOC_DEFINE(M_SESSION, "session", "session header"); static MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); static void doenterpgrp(struct proc *, struct pgrp *); static void orphanpg(struct pgrp *pg); static void fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp); static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp); static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread); static void pgadjustjobc(struct pgrp *pgrp, int entering); static void pgdelete(struct pgrp *); static int proc_ctor(void *mem, int size, void *arg, int flags); static void proc_dtor(void *mem, int size, void *arg); static int proc_init(void *mem, int size, int flags); static void proc_fini(void *mem, int size); static void pargs_free(struct pargs *pa); static struct proc *zpfind_locked(pid_t pid); /* * Other process lists */ struct pidhashhead *pidhashtbl; u_long pidhash; struct pgrphashhead *pgrphashtbl; u_long pgrphash; struct proclist allproc; struct proclist zombproc; struct sx allproc_lock; struct sx proctree_lock; struct mtx ppeers_lock; uma_zone_t proc_zone; int kstack_pages = KSTACK_PAGES; SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "Kernel stack size in pages"); static int vmmap_skip_res_cnt = 0; SYSCTL_INT(_kern, OID_AUTO, proc_vmmap_skip_resident_count, CTLFLAG_RW, &vmmap_skip_res_cnt, 0, "Skip calculation of the pages resident count in kern.proc.vmmap"); CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); #ifdef COMPAT_FREEBSD32 CTASSERT(sizeof(struct kinfo_proc32) == KINFO_PROC32_SIZE); #endif /* * Initialize global process hashing structures. */ void procinit() { sx_init(&allproc_lock, "allproc"); sx_init(&proctree_lock, "proctree"); mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF); LIST_INIT(&allproc); LIST_INIT(&zombproc); pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash); pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash); proc_zone = uma_zcreate("PROC", sched_sizeof_proc(), proc_ctor, proc_dtor, proc_init, proc_fini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); uihashinit(); } /* * Prepare a proc for use. */ static int proc_ctor(void *mem, int size, void *arg, int flags) { struct proc *p; p = (struct proc *)mem; SDT_PROBE4(proc, kernel, ctor , entry, p, size, arg, flags); EVENTHANDLER_INVOKE(process_ctor, p); SDT_PROBE4(proc, kernel, ctor , return, p, size, arg, flags); return (0); } /* * Reclaim a proc after use. */ static void proc_dtor(void *mem, int size, void *arg) { struct proc *p; struct thread *td; /* INVARIANTS checks go here */ p = (struct proc *)mem; td = FIRST_THREAD_IN_PROC(p); SDT_PROBE4(proc, kernel, dtor, entry, p, size, arg, td); if (td != NULL) { #ifdef INVARIANTS KASSERT((p->p_numthreads == 1), ("bad number of threads in exiting process")); KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr")); #endif /* Free all OSD associated to this thread. */ osd_thread_exit(td); } EVENTHANDLER_INVOKE(process_dtor, p); if (p->p_ksi != NULL) KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue")); SDT_PROBE3(proc, kernel, dtor, return, p, size, arg); } /* * Initialize type-stable parts of a proc (when newly created). */ static int proc_init(void *mem, int size, int flags) { struct proc *p; p = (struct proc *)mem; SDT_PROBE3(proc, kernel, init, entry, p, size, flags); p->p_sched = (struct p_sched *)&p[1]; mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK | MTX_NEW); mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_NEW); mtx_init(&p->p_statmtx, "pstatl", NULL, MTX_SPIN | MTX_NEW); mtx_init(&p->p_itimmtx, "pitiml", NULL, MTX_SPIN | MTX_NEW); mtx_init(&p->p_profmtx, "pprofl", NULL, MTX_SPIN | MTX_NEW); cv_init(&p->p_pwait, "ppwait"); cv_init(&p->p_dbgwait, "dbgwait"); TAILQ_INIT(&p->p_threads); /* all threads in proc */ EVENTHANDLER_INVOKE(process_init, p); p->p_stats = pstats_alloc(); SDT_PROBE3(proc, kernel, init, return, p, size, flags); return (0); } /* * UMA should ensure that this function is never called. * Freeing a proc structure would violate type stability. */ static void proc_fini(void *mem, int size) { #ifdef notnow struct proc *p; p = (struct proc *)mem; EVENTHANDLER_INVOKE(process_fini, p); pstats_free(p->p_stats); thread_free(FIRST_THREAD_IN_PROC(p)); mtx_destroy(&p->p_mtx); if (p->p_ksi != NULL) ksiginfo_free(p->p_ksi); #else panic("proc reclaimed"); #endif } /* * Is p an inferior of the current process? */ int inferior(struct proc *p) { sx_assert(&proctree_lock, SX_LOCKED); PROC_LOCK_ASSERT(p, MA_OWNED); for (; p != curproc; p = proc_realparent(p)) { if (p->p_pid == 0) return (0); } return (1); } struct proc * pfind_locked(pid_t pid) { struct proc *p; sx_assert(&allproc_lock, SX_LOCKED); LIST_FOREACH(p, PIDHASH(pid), p_hash) { if (p->p_pid == pid) { PROC_LOCK(p); if (p->p_state == PRS_NEW) { PROC_UNLOCK(p); p = NULL; } break; } } return (p); } /* * Locate a process by number; return only "live" processes -- i.e., neither * zombies nor newly born but incompletely initialized processes. By not * returning processes in the PRS_NEW state, we allow callers to avoid * testing for that condition to avoid dereferencing p_ucred, et al. */ struct proc * pfind(pid_t pid) { struct proc *p; sx_slock(&allproc_lock); p = pfind_locked(pid); sx_sunlock(&allproc_lock); return (p); } static struct proc * pfind_tid_locked(pid_t tid) { struct proc *p; struct thread *td; sx_assert(&allproc_lock, SX_LOCKED); FOREACH_PROC_IN_SYSTEM(p) { PROC_LOCK(p); if (p->p_state == PRS_NEW) { PROC_UNLOCK(p); continue; } FOREACH_THREAD_IN_PROC(p, td) { if (td->td_tid == tid) goto found; } PROC_UNLOCK(p); } found: return (p); } /* * Locate a process group by number. * The caller must hold proctree_lock. */ struct pgrp * pgfind(pgid) register pid_t pgid; { register struct pgrp *pgrp; sx_assert(&proctree_lock, SX_LOCKED); LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) { if (pgrp->pg_id == pgid) { PGRP_LOCK(pgrp); return (pgrp); } } return (NULL); } /* * Locate process and do additional manipulations, depending on flags. */ int pget(pid_t pid, int flags, struct proc **pp) { struct proc *p; int error; sx_slock(&allproc_lock); if (pid <= PID_MAX) { p = pfind_locked(pid); if (p == NULL && (flags & PGET_NOTWEXIT) == 0) p = zpfind_locked(pid); } else if ((flags & PGET_NOTID) == 0) { p = pfind_tid_locked(pid); } else { p = NULL; } sx_sunlock(&allproc_lock); if (p == NULL) return (ESRCH); if ((flags & PGET_CANSEE) != 0) { error = p_cansee(curthread, p); if (error != 0) goto errout; } if ((flags & PGET_CANDEBUG) != 0) { error = p_candebug(curthread, p); if (error != 0) goto errout; } if ((flags & PGET_ISCURRENT) != 0 && curproc != p) { error = EPERM; goto errout; } if ((flags & PGET_NOTWEXIT) != 0 && (p->p_flag & P_WEXIT) != 0) { error = ESRCH; goto errout; } if ((flags & PGET_NOTINEXEC) != 0 && (p->p_flag & P_INEXEC) != 0) { /* * XXXRW: Not clear ESRCH is the right error during proc * execve(). */ error = ESRCH; goto errout; } if ((flags & PGET_HOLD) != 0) { _PHOLD(p); PROC_UNLOCK(p); } *pp = p; return (0); errout: PROC_UNLOCK(p); return (error); } /* * Create a new process group. * pgid must be equal to the pid of p. * Begin a new session if required. */ int enterpgrp(p, pgid, pgrp, sess) register struct proc *p; pid_t pgid; struct pgrp *pgrp; struct session *sess; { sx_assert(&proctree_lock, SX_XLOCKED); KASSERT(pgrp != NULL, ("enterpgrp: pgrp == NULL")); KASSERT(p->p_pid == pgid, ("enterpgrp: new pgrp and pid != pgid")); KASSERT(pgfind(pgid) == NULL, ("enterpgrp: pgrp with pgid exists")); KASSERT(!SESS_LEADER(p), ("enterpgrp: session leader attempted setpgrp")); mtx_init(&pgrp->pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK); if (sess != NULL) { /* * new session */ mtx_init(&sess->s_mtx, "session", NULL, MTX_DEF); PROC_LOCK(p); p->p_flag &= ~P_CONTROLT; PROC_UNLOCK(p); PGRP_LOCK(pgrp); sess->s_leader = p; sess->s_sid = p->p_pid; refcount_init(&sess->s_count, 1); sess->s_ttyvp = NULL; sess->s_ttydp = NULL; sess->s_ttyp = NULL; bcopy(p->p_session->s_login, sess->s_login, sizeof(sess->s_login)); pgrp->pg_session = sess; KASSERT(p == curproc, ("enterpgrp: mksession and p != curproc")); } else { pgrp->pg_session = p->p_session; sess_hold(pgrp->pg_session); PGRP_LOCK(pgrp); } pgrp->pg_id = pgid; LIST_INIT(&pgrp->pg_members); /* * As we have an exclusive lock of proctree_lock, * this should not deadlock. */ LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); pgrp->pg_jobc = 0; SLIST_INIT(&pgrp->pg_sigiolst); PGRP_UNLOCK(pgrp); doenterpgrp(p, pgrp); return (0); } /* * Move p to an existing process group */ int enterthispgrp(p, pgrp) register struct proc *p; struct pgrp *pgrp; { sx_assert(&proctree_lock, SX_XLOCKED); PROC_LOCK_ASSERT(p, MA_NOTOWNED); PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED); SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED); KASSERT(pgrp->pg_session == p->p_session, ("%s: pgrp's session %p, p->p_session %p.\n", __func__, pgrp->pg_session, p->p_session)); KASSERT(pgrp != p->p_pgrp, ("%s: p belongs to pgrp.", __func__)); doenterpgrp(p, pgrp); return (0); } /* * Move p to a process group */ static void doenterpgrp(p, pgrp) struct proc *p; struct pgrp *pgrp; { struct pgrp *savepgrp; sx_assert(&proctree_lock, SX_XLOCKED); PROC_LOCK_ASSERT(p, MA_NOTOWNED); PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); PGRP_LOCK_ASSERT(p->p_pgrp, MA_NOTOWNED); SESS_LOCK_ASSERT(p->p_session, MA_NOTOWNED); savepgrp = p->p_pgrp; /* * Adjust eligibility of affected pgrps to participate in job control. * Increment eligibility counts before decrementing, otherwise we * could reach 0 spuriously during the first call. */ fixjobc(p, pgrp, 1); fixjobc(p, p->p_pgrp, 0); PGRP_LOCK(pgrp); PGRP_LOCK(savepgrp); PROC_LOCK(p); LIST_REMOVE(p, p_pglist); p->p_pgrp = pgrp; PROC_UNLOCK(p); LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); PGRP_UNLOCK(savepgrp); PGRP_UNLOCK(pgrp); if (LIST_EMPTY(&savepgrp->pg_members)) pgdelete(savepgrp); } /* * remove process from process group */ int leavepgrp(p) register struct proc *p; { struct pgrp *savepgrp; sx_assert(&proctree_lock, SX_XLOCKED); savepgrp = p->p_pgrp; PGRP_LOCK(savepgrp); PROC_LOCK(p); LIST_REMOVE(p, p_pglist); p->p_pgrp = NULL; PROC_UNLOCK(p); PGRP_UNLOCK(savepgrp); if (LIST_EMPTY(&savepgrp->pg_members)) pgdelete(savepgrp); return (0); } /* * delete a process group */ static void pgdelete(pgrp) register struct pgrp *pgrp; { struct session *savesess; struct tty *tp; sx_assert(&proctree_lock, SX_XLOCKED); PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED); /* * Reset any sigio structures pointing to us as a result of * F_SETOWN with our pgid. */ funsetownlst(&pgrp->pg_sigiolst); PGRP_LOCK(pgrp); tp = pgrp->pg_session->s_ttyp; LIST_REMOVE(pgrp, pg_hash); savesess = pgrp->pg_session; PGRP_UNLOCK(pgrp); /* Remove the reference to the pgrp before deallocating it. */ if (tp != NULL) { tty_lock(tp); tty_rel_pgrp(tp, pgrp); } mtx_destroy(&pgrp->pg_mtx); free(pgrp, M_PGRP); sess_release(savesess); } static void pgadjustjobc(pgrp, entering) struct pgrp *pgrp; int entering; { PGRP_LOCK(pgrp); if (entering) pgrp->pg_jobc++; else { --pgrp->pg_jobc; if (pgrp->pg_jobc == 0) orphanpg(pgrp); } PGRP_UNLOCK(pgrp); } /* * Adjust pgrp jobc counters when specified process changes process group. * We count the number of processes in each process group that "qualify" * the group for terminal job control (those with a parent in a different * process group of the same session). If that count reaches zero, the * process group becomes orphaned. Check both the specified process' * process group and that of its children. * entering == 0 => p is leaving specified group. * entering == 1 => p is entering specified group. */ void fixjobc(p, pgrp, entering) register struct proc *p; register struct pgrp *pgrp; int entering; { register struct pgrp *hispgrp; register struct session *mysession; sx_assert(&proctree_lock, SX_LOCKED); PROC_LOCK_ASSERT(p, MA_NOTOWNED); PGRP_LOCK_ASSERT(pgrp, MA_NOTOWNED); SESS_LOCK_ASSERT(pgrp->pg_session, MA_NOTOWNED); /* * Check p's parent to see whether p qualifies its own process * group; if so, adjust count for p's process group. */ mysession = pgrp->pg_session; if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && hispgrp->pg_session == mysession) pgadjustjobc(pgrp, entering); /* * Check this process' children to see whether they qualify * their process groups; if so, adjust counts for children's * process groups. */ LIST_FOREACH(p, &p->p_children, p_sibling) { hispgrp = p->p_pgrp; if (hispgrp == pgrp || hispgrp->pg_session != mysession) continue; PROC_LOCK(p); if (p->p_state == PRS_ZOMBIE) { PROC_UNLOCK(p); continue; } PROC_UNLOCK(p); pgadjustjobc(hispgrp, entering); } } /* * A process group has become orphaned; * if there are any stopped processes in the group, * hang-up all process in that group. */ static void orphanpg(pg) struct pgrp *pg; { register struct proc *p; PGRP_LOCK_ASSERT(pg, MA_OWNED); LIST_FOREACH(p, &pg->pg_members, p_pglist) { PROC_LOCK(p); if (P_SHOULDSTOP(p) == P_STOPPED_SIG) { PROC_UNLOCK(p); LIST_FOREACH(p, &pg->pg_members, p_pglist) { PROC_LOCK(p); kern_psignal(p, SIGHUP); kern_psignal(p, SIGCONT); PROC_UNLOCK(p); } return; } PROC_UNLOCK(p); } } void sess_hold(struct session *s) { refcount_acquire(&s->s_count); } void sess_release(struct session *s) { if (refcount_release(&s->s_count)) { if (s->s_ttyp != NULL) { tty_lock(s->s_ttyp); tty_rel_sess(s->s_ttyp, s); } mtx_destroy(&s->s_mtx); free(s, M_SESSION); } } #ifdef DDB DB_SHOW_COMMAND(pgrpdump, pgrpdump) { register struct pgrp *pgrp; register struct proc *p; register int i; for (i = 0; i <= pgrphash; i++) { if (!LIST_EMPTY(&pgrphashtbl[i])) { printf("\tindx %d\n", i); LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) { printf( "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n", (void *)pgrp, (long)pgrp->pg_id, (void *)pgrp->pg_session, pgrp->pg_session->s_count, (void *)LIST_FIRST(&pgrp->pg_members)); LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { printf("\t\tpid %ld addr %p pgrp %p\n", (long)p->p_pid, (void *)p, (void *)p->p_pgrp); } } } } } #endif /* DDB */ /* * Calculate the kinfo_proc members which contain process-wide * informations. * Must be called with the target process locked. */ static void fill_kinfo_aggregate(struct proc *p, struct kinfo_proc *kp) { struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); kp->ki_estcpu = 0; kp->ki_pctcpu = 0; FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); kp->ki_pctcpu += sched_pctcpu(td); kp->ki_estcpu += td->td_estcpu; thread_unlock(td); } } /* * Clear kinfo_proc and fill in any information that is common * to all threads in the process. * Must be called with the target process locked. */ static void fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp) { struct thread *td0; struct tty *tp; struct session *sp; struct ucred *cred; struct sigacts *ps; /* For proc_realparent. */ sx_assert(&proctree_lock, SX_LOCKED); PROC_LOCK_ASSERT(p, MA_OWNED); bzero(kp, sizeof(*kp)); kp->ki_structsize = sizeof(*kp); kp->ki_paddr = p; kp->ki_addr =/* p->p_addr; */0; /* XXX */ kp->ki_args = p->p_args; kp->ki_textvp = p->p_textvp; #ifdef KTRACE kp->ki_tracep = p->p_tracevp; kp->ki_traceflag = p->p_traceflag; #endif kp->ki_fd = p->p_fd; kp->ki_vmspace = p->p_vmspace; kp->ki_flag = p->p_flag; kp->ki_flag2 = p->p_flag2; cred = p->p_ucred; if (cred) { kp->ki_uid = cred->cr_uid; kp->ki_ruid = cred->cr_ruid; kp->ki_svuid = cred->cr_svuid; kp->ki_cr_flags = 0; if (cred->cr_flags & CRED_FLAG_CAPMODE) kp->ki_cr_flags |= KI_CRF_CAPABILITY_MODE; /* XXX bde doesn't like KI_NGROUPS */ if (cred->cr_ngroups > KI_NGROUPS) { kp->ki_ngroups = KI_NGROUPS; kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW; } else kp->ki_ngroups = cred->cr_ngroups; bcopy(cred->cr_groups, kp->ki_groups, kp->ki_ngroups * sizeof(gid_t)); kp->ki_rgid = cred->cr_rgid; kp->ki_svgid = cred->cr_svgid; /* If jailed(cred), emulate the old P_JAILED flag. */ if (jailed(cred)) { kp->ki_flag |= P_JAILED; /* If inside the jail, use 0 as a jail ID. */ if (cred->cr_prison != curthread->td_ucred->cr_prison) kp->ki_jid = cred->cr_prison->pr_id; } strlcpy(kp->ki_loginclass, cred->cr_loginclass->lc_name, sizeof(kp->ki_loginclass)); } ps = p->p_sigacts; if (ps) { mtx_lock(&ps->ps_mtx); kp->ki_sigignore = ps->ps_sigignore; kp->ki_sigcatch = ps->ps_sigcatch; mtx_unlock(&ps->ps_mtx); } if (p->p_state != PRS_NEW && p->p_state != PRS_ZOMBIE && p->p_vmspace != NULL) { struct vmspace *vm = p->p_vmspace; kp->ki_size = vm->vm_map.size; kp->ki_rssize = vmspace_resident_count(vm); /*XXX*/ FOREACH_THREAD_IN_PROC(p, td0) { if (!TD_IS_SWAPPED(td0)) kp->ki_rssize += td0->td_kstack_pages; } kp->ki_swrss = vm->vm_swrss; kp->ki_tsize = vm->vm_tsize; kp->ki_dsize = vm->vm_dsize; kp->ki_ssize = vm->vm_ssize; } else if (p->p_state == PRS_ZOMBIE) kp->ki_stat = SZOMB; if (kp->ki_flag & P_INMEM) kp->ki_sflag = PS_INMEM; else kp->ki_sflag = 0; /* Calculate legacy swtime as seconds since 'swtick'. */ kp->ki_swtime = (ticks - p->p_swtick) / hz; kp->ki_pid = p->p_pid; kp->ki_nice = p->p_nice; kp->ki_fibnum = p->p_fibnum; kp->ki_start = p->p_stats->p_start; timevaladd(&kp->ki_start, &boottime); PROC_STATLOCK(p); rufetch(p, &kp->ki_rusage); kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime); calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime); PROC_STATUNLOCK(p); calccru(p, &kp->ki_childutime, &kp->ki_childstime); /* Some callers want child times in a single value. */ kp->ki_childtime = kp->ki_childstime; timevaladd(&kp->ki_childtime, &kp->ki_childutime); FOREACH_THREAD_IN_PROC(p, td0) kp->ki_cow += td0->td_cow; tp = NULL; if (p->p_pgrp) { kp->ki_pgid = p->p_pgrp->pg_id; kp->ki_jobc = p->p_pgrp->pg_jobc; sp = p->p_pgrp->pg_session; if (sp != NULL) { kp->ki_sid = sp->s_sid; SESS_LOCK(sp); strlcpy(kp->ki_login, sp->s_login, sizeof(kp->ki_login)); if (sp->s_ttyvp) kp->ki_kiflag |= KI_CTTY; if (SESS_LEADER(p)) kp->ki_kiflag |= KI_SLEADER; /* XXX proctree_lock */ tp = sp->s_ttyp; SESS_UNLOCK(sp); } } if ((p->p_flag & P_CONTROLT) && tp != NULL) { kp->ki_tdev = tty_udev(tp); kp->ki_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID; if (tp->t_session) kp->ki_tsid = tp->t_session->s_sid; } else kp->ki_tdev = NODEV; if (p->p_comm[0] != '\0') strlcpy(kp->ki_comm, p->p_comm, sizeof(kp->ki_comm)); if (p->p_sysent && p->p_sysent->sv_name != NULL && p->p_sysent->sv_name[0] != '\0') strlcpy(kp->ki_emul, p->p_sysent->sv_name, sizeof(kp->ki_emul)); kp->ki_siglist = p->p_siglist; kp->ki_xstat = KW_EXITCODE(p->p_xexit, p->p_xsig); kp->ki_acflag = p->p_acflag; kp->ki_lock = p->p_lock; if (p->p_pptr) { kp->ki_ppid = proc_realparent(p)->p_pid; if (p->p_flag & P_TRACED) kp->ki_tracer = p->p_pptr->p_pid; } } /* * Fill in information that is thread specific. Must be called with * target process locked. If 'preferthread' is set, overwrite certain * process-related fields that are maintained for both threads and * processes. */ static void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread) { struct proc *p; p = td->td_proc; kp->ki_tdaddr = td; PROC_LOCK_ASSERT(p, MA_OWNED); if (preferthread) PROC_STATLOCK(p); thread_lock(td); if (td->td_wmesg != NULL) strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg)); else bzero(kp->ki_wmesg, sizeof(kp->ki_wmesg)); strlcpy(kp->ki_tdname, td->td_name, sizeof(kp->ki_tdname)); if (TD_ON_LOCK(td)) { kp->ki_kiflag |= KI_LOCKBLOCK; strlcpy(kp->ki_lockname, td->td_lockname, sizeof(kp->ki_lockname)); } else { kp->ki_kiflag &= ~KI_LOCKBLOCK; bzero(kp->ki_lockname, sizeof(kp->ki_lockname)); } if (p->p_state == PRS_NORMAL) { /* approximate. */ if (TD_ON_RUNQ(td) || TD_CAN_RUN(td) || TD_IS_RUNNING(td)) { kp->ki_stat = SRUN; } else if (P_SHOULDSTOP(p)) { kp->ki_stat = SSTOP; } else if (TD_IS_SLEEPING(td)) { kp->ki_stat = SSLEEP; } else if (TD_ON_LOCK(td)) { kp->ki_stat = SLOCK; } else { kp->ki_stat = SWAIT; } } else if (p->p_state == PRS_ZOMBIE) { kp->ki_stat = SZOMB; } else { kp->ki_stat = SIDL; } /* Things in the thread */ kp->ki_wchan = td->td_wchan; kp->ki_pri.pri_level = td->td_priority; kp->ki_pri.pri_native = td->td_base_pri; /* * Note: legacy fields; clamp at the old NOCPU value and/or * the maximum u_char CPU value. */ if (td->td_lastcpu == NOCPU) kp->ki_lastcpu_old = NOCPU_OLD; else if (td->td_lastcpu > MAXCPU_OLD) kp->ki_lastcpu_old = MAXCPU_OLD; else kp->ki_lastcpu_old = td->td_lastcpu; if (td->td_oncpu == NOCPU) kp->ki_oncpu_old = NOCPU_OLD; else if (td->td_oncpu > MAXCPU_OLD) kp->ki_oncpu_old = MAXCPU_OLD; else kp->ki_oncpu_old = td->td_oncpu; kp->ki_lastcpu = td->td_lastcpu; kp->ki_oncpu = td->td_oncpu; kp->ki_tdflags = td->td_flags; kp->ki_tid = td->td_tid; kp->ki_numthreads = p->p_numthreads; kp->ki_pcb = td->td_pcb; kp->ki_kstack = (void *)td->td_kstack; kp->ki_slptime = (ticks - td->td_slptick) / hz; kp->ki_pri.pri_class = td->td_pri_class; kp->ki_pri.pri_user = td->td_user_pri; if (preferthread) { rufetchtd(td, &kp->ki_rusage); kp->ki_runtime = cputick2usec(td->td_rux.rux_runtime); kp->ki_pctcpu = sched_pctcpu(td); kp->ki_estcpu = td->td_estcpu; kp->ki_cow = td->td_cow; } /* We can't get this anymore but ps etc never used it anyway. */ kp->ki_rqindex = 0; if (preferthread) kp->ki_siglist = td->td_siglist; kp->ki_sigmask = td->td_sigmask; thread_unlock(td); if (preferthread) PROC_STATUNLOCK(p); } /* * Fill in a kinfo_proc structure for the specified process. * Must be called with the target process locked. */ void fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp) { MPASS(FIRST_THREAD_IN_PROC(p) != NULL); fill_kinfo_proc_only(p, kp); fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0); fill_kinfo_aggregate(p, kp); } struct pstats * pstats_alloc(void) { return (malloc(sizeof(struct pstats), M_SUBPROC, M_ZERO|M_WAITOK)); } /* * Copy parts of p_stats; zero the rest of p_stats (statistics). */ void pstats_fork(struct pstats *src, struct pstats *dst) { bzero(&dst->pstat_startzero, __rangeof(struct pstats, pstat_startzero, pstat_endzero)); bcopy(&src->pstat_startcopy, &dst->pstat_startcopy, __rangeof(struct pstats, pstat_startcopy, pstat_endcopy)); } void pstats_free(struct pstats *ps) { free(ps, M_SUBPROC); } static struct proc * zpfind_locked(pid_t pid) { struct proc *p; sx_assert(&allproc_lock, SX_LOCKED); LIST_FOREACH(p, &zombproc, p_list) { if (p->p_pid == pid) { PROC_LOCK(p); break; } } return (p); } /* * Locate a zombie process by number */ struct proc * zpfind(pid_t pid) { struct proc *p; sx_slock(&allproc_lock); p = zpfind_locked(pid); sx_sunlock(&allproc_lock); return (p); } #ifdef COMPAT_FREEBSD32 /* * This function is typically used to copy out the kernel address, so * it can be replaced by assignment of zero. */ static inline uint32_t ptr32_trim(void *ptr) { uintptr_t uptr; uptr = (uintptr_t)ptr; return ((uptr > UINT_MAX) ? 0 : uptr); } #define PTRTRIM_CP(src,dst,fld) \ do { (dst).fld = ptr32_trim((src).fld); } while (0) static void freebsd32_kinfo_proc_out(const struct kinfo_proc *ki, struct kinfo_proc32 *ki32) { int i; bzero(ki32, sizeof(struct kinfo_proc32)); ki32->ki_structsize = sizeof(struct kinfo_proc32); CP(*ki, *ki32, ki_layout); PTRTRIM_CP(*ki, *ki32, ki_args); PTRTRIM_CP(*ki, *ki32, ki_paddr); PTRTRIM_CP(*ki, *ki32, ki_addr); PTRTRIM_CP(*ki, *ki32, ki_tracep); PTRTRIM_CP(*ki, *ki32, ki_textvp); PTRTRIM_CP(*ki, *ki32, ki_fd); PTRTRIM_CP(*ki, *ki32, ki_vmspace); PTRTRIM_CP(*ki, *ki32, ki_wchan); CP(*ki, *ki32, ki_pid); CP(*ki, *ki32, ki_ppid); CP(*ki, *ki32, ki_pgid); CP(*ki, *ki32, ki_tpgid); CP(*ki, *ki32, ki_sid); CP(*ki, *ki32, ki_tsid); CP(*ki, *ki32, ki_jobc); CP(*ki, *ki32, ki_tdev); CP(*ki, *ki32, ki_siglist); CP(*ki, *ki32, ki_sigmask); CP(*ki, *ki32, ki_sigignore); CP(*ki, *ki32, ki_sigcatch); CP(*ki, *ki32, ki_uid); CP(*ki, *ki32, ki_ruid); CP(*ki, *ki32, ki_svuid); CP(*ki, *ki32, ki_rgid); CP(*ki, *ki32, ki_svgid); CP(*ki, *ki32, ki_ngroups); for (i = 0; i < KI_NGROUPS; i++) CP(*ki, *ki32, ki_groups[i]); CP(*ki, *ki32, ki_size); CP(*ki, *ki32, ki_rssize); CP(*ki, *ki32, ki_swrss); CP(*ki, *ki32, ki_tsize); CP(*ki, *ki32, ki_dsize); CP(*ki, *ki32, ki_ssize); CP(*ki, *ki32, ki_xstat); CP(*ki, *ki32, ki_acflag); CP(*ki, *ki32, ki_pctcpu); CP(*ki, *ki32, ki_estcpu); CP(*ki, *ki32, ki_slptime); CP(*ki, *ki32, ki_swtime); CP(*ki, *ki32, ki_cow); CP(*ki, *ki32, ki_runtime); TV_CP(*ki, *ki32, ki_start); TV_CP(*ki, *ki32, ki_childtime); CP(*ki, *ki32, ki_flag); CP(*ki, *ki32, ki_kiflag); CP(*ki, *ki32, ki_traceflag); CP(*ki, *ki32, ki_stat); CP(*ki, *ki32, ki_nice); CP(*ki, *ki32, ki_lock); CP(*ki, *ki32, ki_rqindex); CP(*ki, *ki32, ki_oncpu); CP(*ki, *ki32, ki_lastcpu); /* XXX TODO: wrap cpu value as appropriate */ CP(*ki, *ki32, ki_oncpu_old); CP(*ki, *ki32, ki_lastcpu_old); bcopy(ki->ki_tdname, ki32->ki_tdname, TDNAMLEN + 1); bcopy(ki->ki_wmesg, ki32->ki_wmesg, WMESGLEN + 1); bcopy(ki->ki_login, ki32->ki_login, LOGNAMELEN + 1); bcopy(ki->ki_lockname, ki32->ki_lockname, LOCKNAMELEN + 1); bcopy(ki->ki_comm, ki32->ki_comm, COMMLEN + 1); bcopy(ki->ki_emul, ki32->ki_emul, KI_EMULNAMELEN + 1); bcopy(ki->ki_loginclass, ki32->ki_loginclass, LOGINCLASSLEN + 1); CP(*ki, *ki32, ki_tracer); CP(*ki, *ki32, ki_flag2); CP(*ki, *ki32, ki_fibnum); CP(*ki, *ki32, ki_cr_flags); CP(*ki, *ki32, ki_jid); CP(*ki, *ki32, ki_numthreads); CP(*ki, *ki32, ki_tid); CP(*ki, *ki32, ki_pri); freebsd32_rusage_out(&ki->ki_rusage, &ki32->ki_rusage); freebsd32_rusage_out(&ki->ki_rusage_ch, &ki32->ki_rusage_ch); PTRTRIM_CP(*ki, *ki32, ki_pcb); PTRTRIM_CP(*ki, *ki32, ki_kstack); PTRTRIM_CP(*ki, *ki32, ki_udata); CP(*ki, *ki32, ki_sflag); CP(*ki, *ki32, ki_tdflags); } #endif int kern_proc_out(struct proc *p, struct sbuf *sb, int flags) { struct thread *td; struct kinfo_proc ki; #ifdef COMPAT_FREEBSD32 struct kinfo_proc32 ki32; #endif int error; PROC_LOCK_ASSERT(p, MA_OWNED); MPASS(FIRST_THREAD_IN_PROC(p) != NULL); error = 0; fill_kinfo_proc(p, &ki); if ((flags & KERN_PROC_NOTHREADS) != 0) { #ifdef COMPAT_FREEBSD32 if ((flags & KERN_PROC_MASK32) != 0) { freebsd32_kinfo_proc_out(&ki, &ki32); if (sbuf_bcat(sb, &ki32, sizeof(ki32)) != 0) error = ENOMEM; } else #endif if (sbuf_bcat(sb, &ki, sizeof(ki)) != 0) error = ENOMEM; } else { FOREACH_THREAD_IN_PROC(p, td) { fill_kinfo_thread(td, &ki, 1); #ifdef COMPAT_FREEBSD32 if ((flags & KERN_PROC_MASK32) != 0) { freebsd32_kinfo_proc_out(&ki, &ki32); if (sbuf_bcat(sb, &ki32, sizeof(ki32)) != 0) error = ENOMEM; } else #endif if (sbuf_bcat(sb, &ki, sizeof(ki)) != 0) error = ENOMEM; if (error != 0) break; } } PROC_UNLOCK(p); return (error); } static int sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags, int doingzomb) { struct sbuf sb; struct kinfo_proc ki; struct proc *np; int error, error2; pid_t pid; pid = p->p_pid; sbuf_new_for_sysctl(&sb, (char *)&ki, sizeof(ki), req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); error = kern_proc_out(p, &sb, flags); error2 = sbuf_finish(&sb); sbuf_delete(&sb); if (error != 0) return (error); else if (error2 != 0) return (error2); if (doingzomb) np = zpfind(pid); else { if (pid == 0) return (0); np = pfind(pid); } if (np == NULL) return (ESRCH); if (np != p) { PROC_UNLOCK(np); return (ESRCH); } PROC_UNLOCK(np); return (0); } static int sysctl_kern_proc(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; int flags, doingzomb, oid_number; int error = 0; oid_number = oidp->oid_number; if (oid_number != KERN_PROC_ALL && (oid_number & KERN_PROC_INC_THREAD) == 0) flags = KERN_PROC_NOTHREADS; else { flags = 0; oid_number &= ~KERN_PROC_INC_THREAD; } #ifdef COMPAT_FREEBSD32 if (req->flags & SCTL_MASK32) flags |= KERN_PROC_MASK32; #endif if (oid_number == KERN_PROC_PID) { if (namelen != 1) return (EINVAL); error = sysctl_wire_old_buffer(req, 0); if (error) return (error); sx_slock(&proctree_lock); error = pget((pid_t)name[0], PGET_CANSEE, &p); if (error == 0) error = sysctl_out_proc(p, req, flags, 0); sx_sunlock(&proctree_lock); return (error); } switch (oid_number) { case KERN_PROC_ALL: if (namelen != 0) return (EINVAL); break; case KERN_PROC_PROC: if (namelen != 0 && namelen != 1) return (EINVAL); break; default: if (namelen != 1) return (EINVAL); break; } if (!req->oldptr) { /* overestimate by 5 procs */ error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); if (error) return (error); } error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sx_slock(&proctree_lock); sx_slock(&allproc_lock); for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) { if (!doingzomb) p = LIST_FIRST(&allproc); else p = LIST_FIRST(&zombproc); for (; p != 0; p = LIST_NEXT(p, p_list)) { /* * Skip embryonic processes. */ PROC_LOCK(p); if (p->p_state == PRS_NEW) { PROC_UNLOCK(p); continue; } KASSERT(p->p_ucred != NULL, ("process credential is NULL for non-NEW proc")); /* * Show a user only appropriate processes. */ if (p_cansee(curthread, p)) { PROC_UNLOCK(p); continue; } /* * TODO - make more efficient (see notes below). * do by session. */ switch (oid_number) { case KERN_PROC_GID: if (p->p_ucred->cr_gid != (gid_t)name[0]) { PROC_UNLOCK(p); continue; } break; case KERN_PROC_PGRP: /* could do this by traversing pgrp */ if (p->p_pgrp == NULL || p->p_pgrp->pg_id != (pid_t)name[0]) { PROC_UNLOCK(p); continue; } break; case KERN_PROC_RGID: if (p->p_ucred->cr_rgid != (gid_t)name[0]) { PROC_UNLOCK(p); continue; } break; case KERN_PROC_SESSION: if (p->p_session == NULL || p->p_session->s_sid != (pid_t)name[0]) { PROC_UNLOCK(p); continue; } break; case KERN_PROC_TTY: if ((p->p_flag & P_CONTROLT) == 0 || p->p_session == NULL) { PROC_UNLOCK(p); continue; } /* XXX proctree_lock */ SESS_LOCK(p->p_session); if (p->p_session->s_ttyp == NULL || tty_udev(p->p_session->s_ttyp) != (dev_t)name[0]) { SESS_UNLOCK(p->p_session); PROC_UNLOCK(p); continue; } SESS_UNLOCK(p->p_session); break; case KERN_PROC_UID: if (p->p_ucred->cr_uid != (uid_t)name[0]) { PROC_UNLOCK(p); continue; } break; case KERN_PROC_RUID: if (p->p_ucred->cr_ruid != (uid_t)name[0]) { PROC_UNLOCK(p); continue; } break; case KERN_PROC_PROC: break; default: break; } error = sysctl_out_proc(p, req, flags, doingzomb); if (error) { sx_sunlock(&allproc_lock); sx_sunlock(&proctree_lock); return (error); } } } sx_sunlock(&allproc_lock); sx_sunlock(&proctree_lock); return (0); } struct pargs * pargs_alloc(int len) { struct pargs *pa; pa = malloc(sizeof(struct pargs) + len, M_PARGS, M_WAITOK); refcount_init(&pa->ar_ref, 1); pa->ar_length = len; return (pa); } static void pargs_free(struct pargs *pa) { free(pa, M_PARGS); } void pargs_hold(struct pargs *pa) { if (pa == NULL) return; refcount_acquire(&pa->ar_ref); } void pargs_drop(struct pargs *pa) { if (pa == NULL) return; if (refcount_release(&pa->ar_ref)) pargs_free(pa); } static int proc_read_mem(struct thread *td, struct proc *p, vm_offset_t offset, void* buf, size_t len) { struct iovec iov; struct uio uio; iov.iov_base = (caddr_t)buf; iov.iov_len = len; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = offset; uio.uio_resid = (ssize_t)len; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_READ; uio.uio_td = td; return (proc_rwmem(p, &uio)); } static int proc_read_string(struct thread *td, struct proc *p, const char *sptr, char *buf, size_t len) { size_t i; int error; error = proc_read_mem(td, p, (vm_offset_t)sptr, buf, len); /* * Reading the chunk may validly return EFAULT if the string is shorter * than the chunk and is aligned at the end of the page, assuming the * next page is not mapped. So if EFAULT is returned do a fallback to * one byte read loop. */ if (error == EFAULT) { for (i = 0; i < len; i++, buf++, sptr++) { error = proc_read_mem(td, p, (vm_offset_t)sptr, buf, 1); if (error != 0) return (error); if (*buf == '\0') break; } error = 0; } return (error); } #define PROC_AUXV_MAX 256 /* Safety limit on auxv size. */ enum proc_vector_type { PROC_ARG, PROC_ENV, PROC_AUX, }; #ifdef COMPAT_FREEBSD32 static int get_proc_vector32(struct thread *td, struct proc *p, char ***proc_vectorp, size_t *vsizep, enum proc_vector_type type) { struct freebsd32_ps_strings pss; Elf32_Auxinfo aux; vm_offset_t vptr, ptr; uint32_t *proc_vector32; char **proc_vector; size_t vsize, size; int i, error; error = proc_read_mem(td, p, (vm_offset_t)(p->p_sysent->sv_psstrings), &pss, sizeof(pss)); if (error != 0) return (error); switch (type) { case PROC_ARG: vptr = (vm_offset_t)PTRIN(pss.ps_argvstr); vsize = pss.ps_nargvstr; if (vsize > ARG_MAX) return (ENOEXEC); size = vsize * sizeof(int32_t); break; case PROC_ENV: vptr = (vm_offset_t)PTRIN(pss.ps_envstr); vsize = pss.ps_nenvstr; if (vsize > ARG_MAX) return (ENOEXEC); size = vsize * sizeof(int32_t); break; case PROC_AUX: vptr = (vm_offset_t)PTRIN(pss.ps_envstr) + (pss.ps_nenvstr + 1) * sizeof(int32_t); if (vptr % 4 != 0) return (ENOEXEC); for (ptr = vptr, i = 0; i < PROC_AUXV_MAX; i++) { error = proc_read_mem(td, p, ptr, &aux, sizeof(aux)); if (error != 0) return (error); if (aux.a_type == AT_NULL) break; ptr += sizeof(aux); } if (aux.a_type != AT_NULL) return (ENOEXEC); vsize = i + 1; size = vsize * sizeof(aux); break; default: KASSERT(0, ("Wrong proc vector type: %d", type)); return (EINVAL); } proc_vector32 = malloc(size, M_TEMP, M_WAITOK); error = proc_read_mem(td, p, vptr, proc_vector32, size); if (error != 0) goto done; if (type == PROC_AUX) { *proc_vectorp = (char **)proc_vector32; *vsizep = vsize; return (0); } proc_vector = malloc(vsize * sizeof(char *), M_TEMP, M_WAITOK); for (i = 0; i < (int)vsize; i++) proc_vector[i] = PTRIN(proc_vector32[i]); *proc_vectorp = proc_vector; *vsizep = vsize; done: free(proc_vector32, M_TEMP); return (error); } #endif static int get_proc_vector(struct thread *td, struct proc *p, char ***proc_vectorp, size_t *vsizep, enum proc_vector_type type) { struct ps_strings pss; Elf_Auxinfo aux; vm_offset_t vptr, ptr; char **proc_vector; size_t vsize, size; int error, i; #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(p, SV_ILP32) != 0) return (get_proc_vector32(td, p, proc_vectorp, vsizep, type)); #endif error = proc_read_mem(td, p, (vm_offset_t)(p->p_sysent->sv_psstrings), &pss, sizeof(pss)); if (error != 0) return (error); switch (type) { case PROC_ARG: vptr = (vm_offset_t)pss.ps_argvstr; vsize = pss.ps_nargvstr; if (vsize > ARG_MAX) return (ENOEXEC); size = vsize * sizeof(char *); break; case PROC_ENV: vptr = (vm_offset_t)pss.ps_envstr; vsize = pss.ps_nenvstr; if (vsize > ARG_MAX) return (ENOEXEC); size = vsize * sizeof(char *); break; case PROC_AUX: /* * The aux array is just above env array on the stack. Check * that the address is naturally aligned. */ vptr = (vm_offset_t)pss.ps_envstr + (pss.ps_nenvstr + 1) * sizeof(char *); #if __ELF_WORD_SIZE == 64 if (vptr % sizeof(uint64_t) != 0) #else if (vptr % sizeof(uint32_t) != 0) #endif return (ENOEXEC); /* * We count the array size reading the aux vectors from the * stack until AT_NULL vector is returned. So (to keep the code * simple) we read the process stack twice: the first time here * to find the size and the second time when copying the vectors * to the allocated proc_vector. */ for (ptr = vptr, i = 0; i < PROC_AUXV_MAX; i++) { error = proc_read_mem(td, p, ptr, &aux, sizeof(aux)); if (error != 0) return (error); if (aux.a_type == AT_NULL) break; ptr += sizeof(aux); } /* * If the PROC_AUXV_MAX entries are iterated over, and we have * not reached AT_NULL, it is most likely we are reading wrong * data: either the process doesn't have auxv array or data has * been modified. Return the error in this case. */ if (aux.a_type != AT_NULL) return (ENOEXEC); vsize = i + 1; size = vsize * sizeof(aux); break; default: KASSERT(0, ("Wrong proc vector type: %d", type)); return (EINVAL); /* In case we are built without INVARIANTS. */ } proc_vector = malloc(size, M_TEMP, M_WAITOK); if (proc_vector == NULL) return (ENOMEM); error = proc_read_mem(td, p, vptr, proc_vector, size); if (error != 0) { free(proc_vector, M_TEMP); return (error); } *proc_vectorp = proc_vector; *vsizep = vsize; return (0); } #define GET_PS_STRINGS_CHUNK_SZ 256 /* Chunk size (bytes) for ps_strings operations. */ static int get_ps_strings(struct thread *td, struct proc *p, struct sbuf *sb, enum proc_vector_type type) { size_t done, len, nchr, vsize; int error, i; char **proc_vector, *sptr; char pss_string[GET_PS_STRINGS_CHUNK_SZ]; PROC_ASSERT_HELD(p); /* * We are not going to read more than 2 * (PATH_MAX + ARG_MAX) bytes. */ nchr = 2 * (PATH_MAX + ARG_MAX); error = get_proc_vector(td, p, &proc_vector, &vsize, type); if (error != 0) return (error); for (done = 0, i = 0; i < (int)vsize && done < nchr; i++) { /* * The program may have scribbled into its argv array, e.g. to * remove some arguments. If that has happened, break out * before trying to read from NULL. */ if (proc_vector[i] == NULL) break; for (sptr = proc_vector[i]; ; sptr += GET_PS_STRINGS_CHUNK_SZ) { error = proc_read_string(td, p, sptr, pss_string, sizeof(pss_string)); if (error != 0) goto done; len = strnlen(pss_string, GET_PS_STRINGS_CHUNK_SZ); if (done + len >= nchr) len = nchr - done - 1; sbuf_bcat(sb, pss_string, len); if (len != GET_PS_STRINGS_CHUNK_SZ) break; done += GET_PS_STRINGS_CHUNK_SZ; } sbuf_bcat(sb, "", 1); done += len + 1; } done: free(proc_vector, M_TEMP); return (error); } int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb) { return (get_ps_strings(curthread, p, sb, PROC_ARG)); } int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb) { return (get_ps_strings(curthread, p, sb, PROC_ENV)); } int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb) { size_t vsize, size; char **auxv; int error; error = get_proc_vector(td, p, &auxv, &vsize, PROC_AUX); if (error == 0) { #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(p, SV_ILP32) != 0) size = vsize * sizeof(Elf32_Auxinfo); else #endif size = vsize * sizeof(Elf_Auxinfo); if (sbuf_bcat(sb, auxv, size) != 0) error = ENOMEM; free(auxv, M_TEMP); } return (error); } /* * This sysctl allows a process to retrieve the argument list or process * title for another process without groping around in the address space * of the other process. It also allow a process to set its own "process * title to a string of its own choice. */ static int sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct pargs *newpa, *pa; struct proc *p; struct sbuf sb; int flags, error = 0, error2; if (namelen != 1) return (EINVAL); flags = PGET_CANSEE; if (req->newptr != NULL) flags |= PGET_ISCURRENT; error = pget((pid_t)name[0], flags, &p); if (error) return (error); pa = p->p_args; if (pa != NULL) { pargs_hold(pa); PROC_UNLOCK(p); error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); pargs_drop(pa); } else if ((p->p_flag & (P_WEXIT | P_SYSTEM)) == 0) { _PHOLD(p); PROC_UNLOCK(p); sbuf_new_for_sysctl(&sb, NULL, GET_PS_STRINGS_CHUNK_SZ, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); error = proc_getargv(curthread, p, &sb); error2 = sbuf_finish(&sb); PRELE(p); sbuf_delete(&sb); if (error == 0 && error2 != 0) error = error2; } else { PROC_UNLOCK(p); } if (error != 0 || req->newptr == NULL) return (error); if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) return (ENOMEM); newpa = pargs_alloc(req->newlen); error = SYSCTL_IN(req, newpa->ar_args, req->newlen); if (error != 0) { pargs_free(newpa); return (error); } PROC_LOCK(p); pa = p->p_args; p->p_args = newpa; PROC_UNLOCK(p); pargs_drop(pa); return (0); } /* * This sysctl allows a process to retrieve environment of another process. */ static int sysctl_kern_proc_env(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; struct sbuf sb; int error, error2; if (namelen != 1) return (EINVAL); error = pget((pid_t)name[0], PGET_WANTREAD, &p); if (error != 0) return (error); if ((p->p_flag & P_SYSTEM) != 0) { PRELE(p); return (0); } sbuf_new_for_sysctl(&sb, NULL, GET_PS_STRINGS_CHUNK_SZ, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); error = proc_getenvv(curthread, p, &sb); error2 = sbuf_finish(&sb); PRELE(p); sbuf_delete(&sb); return (error != 0 ? error : error2); } /* * This sysctl allows a process to retrieve ELF auxiliary vector of * another process. */ static int sysctl_kern_proc_auxv(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; struct sbuf sb; int error, error2; if (namelen != 1) return (EINVAL); error = pget((pid_t)name[0], PGET_WANTREAD, &p); if (error != 0) return (error); if ((p->p_flag & P_SYSTEM) != 0) { PRELE(p); return (0); } sbuf_new_for_sysctl(&sb, NULL, GET_PS_STRINGS_CHUNK_SZ, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); error = proc_getauxv(curthread, p, &sb); error2 = sbuf_finish(&sb); PRELE(p); sbuf_delete(&sb); return (error != 0 ? error : error2); } /* * This sysctl allows a process to retrieve the path of the executable for * itself or another process. */ static int sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS) { pid_t *pidp = (pid_t *)arg1; unsigned int arglen = arg2; struct proc *p; struct vnode *vp; char *retbuf, *freebuf; int error; if (arglen != 1) return (EINVAL); if (*pidp == -1) { /* -1 means this process */ p = req->td->td_proc; } else { error = pget(*pidp, PGET_CANSEE, &p); if (error != 0) return (error); } vp = p->p_textvp; if (vp == NULL) { if (*pidp != -1) PROC_UNLOCK(p); return (0); } vref(vp); if (*pidp != -1) PROC_UNLOCK(p); error = vn_fullpath(req->td, vp, &retbuf, &freebuf); vrele(vp); if (error) return (error); error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1); free(freebuf, M_TEMP); return (error); } static int sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS) { struct proc *p; char *sv_name; int *name; int namelen; int error; namelen = arg2; if (namelen != 1) return (EINVAL); name = (int *)arg1; error = pget((pid_t)name[0], PGET_CANSEE, &p); if (error != 0) return (error); sv_name = p->p_sysent->sv_name; PROC_UNLOCK(p); return (sysctl_handle_string(oidp, sv_name, 0, req)); } #ifdef KINFO_OVMENTRY_SIZE CTASSERT(sizeof(struct kinfo_ovmentry) == KINFO_OVMENTRY_SIZE); #endif #ifdef COMPAT_FREEBSD7 static int sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS) { vm_map_entry_t entry, tmp_entry; unsigned int last_timestamp; char *fullpath, *freepath; struct kinfo_ovmentry *kve; struct vattr va; struct ucred *cred; int error, *name; struct vnode *vp; struct proc *p; vm_map_t map; struct vmspace *vm; name = (int *)arg1; error = pget((pid_t)name[0], PGET_WANTREAD, &p); if (error != 0) return (error); vm = vmspace_acquire_ref(p); if (vm == NULL) { PRELE(p); return (ESRCH); } kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK); map = &vm->vm_map; vm_map_lock_read(map); for (entry = map->header.next; entry != &map->header; entry = entry->next) { vm_object_t obj, tobj, lobj; vm_offset_t addr; if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) continue; bzero(kve, sizeof(*kve)); kve->kve_structsize = sizeof(*kve); kve->kve_private_resident = 0; obj = entry->object.vm_object; if (obj != NULL) { VM_OBJECT_RLOCK(obj); if (obj->shadow_count == 1) kve->kve_private_resident = obj->resident_page_count; } kve->kve_resident = 0; addr = entry->start; while (addr < entry->end) { if (pmap_extract(map->pmap, addr)) kve->kve_resident++; addr += PAGE_SIZE; } for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) { if (tobj != obj) VM_OBJECT_RLOCK(tobj); if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); lobj = tobj; } kve->kve_start = (void*)entry->start; kve->kve_end = (void*)entry->end; kve->kve_offset = (off_t)entry->offset; if (entry->protection & VM_PROT_READ) kve->kve_protection |= KVME_PROT_READ; if (entry->protection & VM_PROT_WRITE) kve->kve_protection |= KVME_PROT_WRITE; if (entry->protection & VM_PROT_EXECUTE) kve->kve_protection |= KVME_PROT_EXEC; if (entry->eflags & MAP_ENTRY_COW) kve->kve_flags |= KVME_FLAG_COW; if (entry->eflags & MAP_ENTRY_NEEDS_COPY) kve->kve_flags |= KVME_FLAG_NEEDS_COPY; if (entry->eflags & MAP_ENTRY_NOCOREDUMP) kve->kve_flags |= KVME_FLAG_NOCOREDUMP; last_timestamp = map->timestamp; vm_map_unlock_read(map); kve->kve_fileid = 0; kve->kve_fsid = 0; freepath = NULL; fullpath = ""; if (lobj) { vp = NULL; switch (lobj->type) { case OBJT_DEFAULT: kve->kve_type = KVME_TYPE_DEFAULT; break; case OBJT_VNODE: kve->kve_type = KVME_TYPE_VNODE; vp = lobj->handle; vref(vp); break; case OBJT_SWAP: if ((lobj->flags & OBJ_TMPFS_NODE) != 0) { kve->kve_type = KVME_TYPE_VNODE; if ((lobj->flags & OBJ_TMPFS) != 0) { vp = lobj->un_pager.swp.swp_tmpfs; vref(vp); } } else { kve->kve_type = KVME_TYPE_SWAP; } break; case OBJT_DEVICE: kve->kve_type = KVME_TYPE_DEVICE; break; case OBJT_PHYS: kve->kve_type = KVME_TYPE_PHYS; break; case OBJT_DEAD: kve->kve_type = KVME_TYPE_DEAD; break; case OBJT_SG: kve->kve_type = KVME_TYPE_SG; break; default: kve->kve_type = KVME_TYPE_UNKNOWN; break; } if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); kve->kve_ref_count = obj->ref_count; kve->kve_shadow_count = obj->shadow_count; VM_OBJECT_RUNLOCK(obj); if (vp != NULL) { vn_fullpath(curthread, vp, &fullpath, &freepath); cred = curthread->td_ucred; vn_lock(vp, LK_SHARED | LK_RETRY); if (VOP_GETATTR(vp, &va, cred) == 0) { kve->kve_fileid = va.va_fileid; kve->kve_fsid = va.va_fsid; } vput(vp); } } else { kve->kve_type = KVME_TYPE_NONE; kve->kve_ref_count = 0; kve->kve_shadow_count = 0; } strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path)); if (freepath != NULL) free(freepath, M_TEMP); error = SYSCTL_OUT(req, kve, sizeof(*kve)); vm_map_lock_read(map); if (error) break; if (last_timestamp != map->timestamp) { vm_map_lookup_entry(map, addr - 1, &tmp_entry); entry = tmp_entry; } } vm_map_unlock_read(map); vmspace_free(vm); PRELE(p); free(kve, M_TEMP); return (error); } #endif /* COMPAT_FREEBSD7 */ #ifdef KINFO_VMENTRY_SIZE CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); #endif static void kern_proc_vmmap_resident(vm_map_t map, vm_map_entry_t entry, struct kinfo_vmentry *kve) { vm_object_t obj, tobj; vm_page_t m, m_adv; vm_offset_t addr; vm_paddr_t locked_pa; vm_pindex_t pi, pi_adv, pindex; locked_pa = 0; obj = entry->object.vm_object; addr = entry->start; m_adv = NULL; pi = OFF_TO_IDX(entry->offset); for (; addr < entry->end; addr += IDX_TO_OFF(pi_adv), pi += pi_adv) { if (m_adv != NULL) { m = m_adv; } else { pi_adv = OFF_TO_IDX(entry->end - addr); pindex = pi; for (tobj = obj;; tobj = tobj->backing_object) { m = vm_page_find_least(tobj, pindex); if (m != NULL) { if (m->pindex == pindex) break; if (pi_adv > m->pindex - pindex) { pi_adv = m->pindex - pindex; m_adv = m; } } if (tobj->backing_object == NULL) goto next; pindex += OFF_TO_IDX(tobj-> backing_object_offset); } } m_adv = NULL; if (m->psind != 0 && addr + pagesizes[1] <= entry->end && (addr & (pagesizes[1] - 1)) == 0 && (pmap_mincore(map->pmap, addr, &locked_pa) & MINCORE_SUPER) != 0) { kve->kve_flags |= KVME_FLAG_SUPER; pi_adv = OFF_TO_IDX(pagesizes[1]); } else { /* * We do not test the found page on validity. * Either the page is busy and being paged in, * or it was invalidated. The first case * should be counted as resident, the second * is not so clear; we do account both. */ pi_adv = 1; } kve->kve_resident += pi_adv; next:; } PA_UNLOCK_COND(locked_pa); } /* * Must be called with the process locked and will return unlocked. */ int -kern_proc_vmmap_out(struct proc *p, struct sbuf *sb) +kern_proc_vmmap_out(struct proc *p, struct sbuf *sb, ssize_t maxlen, int flags) { vm_map_entry_t entry, tmp_entry; struct vattr va; vm_map_t map; vm_object_t obj, tobj, lobj; char *fullpath, *freepath; struct kinfo_vmentry *kve; struct ucred *cred; struct vnode *vp; struct vmspace *vm; vm_offset_t addr; unsigned int last_timestamp; int error; PROC_LOCK_ASSERT(p, MA_OWNED); _PHOLD(p); PROC_UNLOCK(p); vm = vmspace_acquire_ref(p); if (vm == NULL) { PRELE(p); return (ESRCH); } - kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK); + kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK | M_ZERO); error = 0; map = &vm->vm_map; vm_map_lock_read(map); for (entry = map->header.next; entry != &map->header; entry = entry->next) { if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) continue; addr = entry->end; bzero(kve, sizeof(*kve)); obj = entry->object.vm_object; if (obj != NULL) { for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { VM_OBJECT_RLOCK(tobj); lobj = tobj; } if (obj->backing_object == NULL) kve->kve_private_resident = obj->resident_page_count; if (!vmmap_skip_res_cnt) kern_proc_vmmap_resident(map, entry, kve); for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) { if (tobj != obj && tobj != lobj) VM_OBJECT_RUNLOCK(tobj); } } else { lobj = NULL; } kve->kve_start = entry->start; kve->kve_end = entry->end; kve->kve_offset = entry->offset; if (entry->protection & VM_PROT_READ) kve->kve_protection |= KVME_PROT_READ; if (entry->protection & VM_PROT_WRITE) kve->kve_protection |= KVME_PROT_WRITE; if (entry->protection & VM_PROT_EXECUTE) kve->kve_protection |= KVME_PROT_EXEC; if (entry->eflags & MAP_ENTRY_COW) kve->kve_flags |= KVME_FLAG_COW; if (entry->eflags & MAP_ENTRY_NEEDS_COPY) kve->kve_flags |= KVME_FLAG_NEEDS_COPY; if (entry->eflags & MAP_ENTRY_NOCOREDUMP) kve->kve_flags |= KVME_FLAG_NOCOREDUMP; if (entry->eflags & MAP_ENTRY_GROWS_UP) kve->kve_flags |= KVME_FLAG_GROWS_UP; if (entry->eflags & MAP_ENTRY_GROWS_DOWN) kve->kve_flags |= KVME_FLAG_GROWS_DOWN; last_timestamp = map->timestamp; vm_map_unlock_read(map); freepath = NULL; fullpath = ""; if (lobj != NULL) { vp = NULL; switch (lobj->type) { case OBJT_DEFAULT: kve->kve_type = KVME_TYPE_DEFAULT; break; case OBJT_VNODE: kve->kve_type = KVME_TYPE_VNODE; vp = lobj->handle; vref(vp); break; case OBJT_SWAP: if ((lobj->flags & OBJ_TMPFS_NODE) != 0) { kve->kve_type = KVME_TYPE_VNODE; if ((lobj->flags & OBJ_TMPFS) != 0) { vp = lobj->un_pager.swp.swp_tmpfs; vref(vp); } } else { kve->kve_type = KVME_TYPE_SWAP; } break; case OBJT_DEVICE: kve->kve_type = KVME_TYPE_DEVICE; break; case OBJT_PHYS: kve->kve_type = KVME_TYPE_PHYS; break; case OBJT_DEAD: kve->kve_type = KVME_TYPE_DEAD; break; case OBJT_SG: kve->kve_type = KVME_TYPE_SG; break; case OBJT_MGTDEVICE: kve->kve_type = KVME_TYPE_MGTDEVICE; break; default: kve->kve_type = KVME_TYPE_UNKNOWN; break; } if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); kve->kve_ref_count = obj->ref_count; kve->kve_shadow_count = obj->shadow_count; VM_OBJECT_RUNLOCK(obj); if (vp != NULL) { vn_fullpath(curthread, vp, &fullpath, &freepath); kve->kve_vn_type = vntype_to_kinfo(vp->v_type); cred = curthread->td_ucred; vn_lock(vp, LK_SHARED | LK_RETRY); if (VOP_GETATTR(vp, &va, cred) == 0) { kve->kve_vn_fileid = va.va_fileid; kve->kve_vn_fsid = va.va_fsid; kve->kve_vn_mode = MAKEIMODE(va.va_type, va.va_mode); kve->kve_vn_size = va.va_size; kve->kve_vn_rdev = va.va_rdev; kve->kve_status = KF_ATTR_VALID; } vput(vp); } } else { kve->kve_type = KVME_TYPE_NONE; kve->kve_ref_count = 0; kve->kve_shadow_count = 0; } strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path)); if (freepath != NULL) free(freepath, M_TEMP); /* Pack record size down */ - kve->kve_structsize = offsetof(struct kinfo_vmentry, kve_path) + - strlen(kve->kve_path) + 1; + if ((flags & KERN_VMMAP_PACK_KINFO) != 0) + kve->kve_structsize = + offsetof(struct kinfo_vmentry, kve_path) + + strlen(kve->kve_path) + 1; + else + kve->kve_structsize = sizeof(*kve); kve->kve_structsize = roundup(kve->kve_structsize, sizeof(uint64_t)); + + /* Halt filling and truncate rather than exceeding maxlen */ + if (maxlen != -1 && maxlen < kve->kve_structsize) { + error = 0; + vm_map_lock_read(map); + break; + } else if (maxlen != -1) + maxlen -= kve->kve_structsize; + if (sbuf_bcat(sb, kve, kve->kve_structsize) != 0) error = ENOMEM; vm_map_lock_read(map); if (error != 0) break; if (last_timestamp != map->timestamp) { vm_map_lookup_entry(map, addr - 1, &tmp_entry); entry = tmp_entry; } } vm_map_unlock_read(map); vmspace_free(vm); PRELE(p); free(kve, M_TEMP); return (error); } static int sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS) { struct proc *p; struct sbuf sb; int error, error2, *name; name = (int *)arg1; sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_vmentry), req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p); if (error != 0) { sbuf_delete(&sb); return (error); } - error = kern_proc_vmmap_out(p, &sb); + error = kern_proc_vmmap_out(p, &sb, -1, KERN_VMMAP_PACK_KINFO); error2 = sbuf_finish(&sb); sbuf_delete(&sb); return (error != 0 ? error : error2); } #if defined(STACK) || defined(DDB) static int sysctl_kern_proc_kstack(SYSCTL_HANDLER_ARGS) { struct kinfo_kstack *kkstp; int error, i, *name, numthreads; lwpid_t *lwpidarray; struct thread *td; struct stack *st; struct sbuf sb; struct proc *p; name = (int *)arg1; error = pget((pid_t)name[0], PGET_NOTINEXEC | PGET_WANTREAD, &p); if (error != 0) return (error); kkstp = malloc(sizeof(*kkstp), M_TEMP, M_WAITOK); st = stack_create(); lwpidarray = NULL; numthreads = 0; PROC_LOCK(p); repeat: if (numthreads < p->p_numthreads) { if (lwpidarray != NULL) { free(lwpidarray, M_TEMP); lwpidarray = NULL; } numthreads = p->p_numthreads; PROC_UNLOCK(p); lwpidarray = malloc(sizeof(*lwpidarray) * numthreads, M_TEMP, M_WAITOK | M_ZERO); PROC_LOCK(p); goto repeat; } i = 0; /* * XXXRW: During the below loop, execve(2) and countless other sorts * of changes could have taken place. Should we check to see if the * vmspace has been replaced, or the like, in order to prevent * giving a snapshot that spans, say, execve(2), with some threads * before and some after? Among other things, the credentials could * have changed, in which case the right to extract debug info might * no longer be assured. */ FOREACH_THREAD_IN_PROC(p, td) { KASSERT(i < numthreads, ("sysctl_kern_proc_kstack: numthreads")); lwpidarray[i] = td->td_tid; i++; } numthreads = i; for (i = 0; i < numthreads; i++) { td = thread_find(p, lwpidarray[i]); if (td == NULL) { continue; } bzero(kkstp, sizeof(*kkstp)); (void)sbuf_new(&sb, kkstp->kkst_trace, sizeof(kkstp->kkst_trace), SBUF_FIXEDLEN); thread_lock(td); kkstp->kkst_tid = td->td_tid; if (TD_IS_SWAPPED(td)) { kkstp->kkst_state = KKST_STATE_SWAPPED; } else if (TD_IS_RUNNING(td)) { if (stack_save_td_running(st, td) == 0) kkstp->kkst_state = KKST_STATE_STACKOK; else kkstp->kkst_state = KKST_STATE_RUNNING; } else { kkstp->kkst_state = KKST_STATE_STACKOK; stack_save_td(st, td); } thread_unlock(td); PROC_UNLOCK(p); stack_sbuf_print(&sb, st); sbuf_finish(&sb); sbuf_delete(&sb); error = SYSCTL_OUT(req, kkstp, sizeof(*kkstp)); PROC_LOCK(p); if (error) break; } _PRELE(p); PROC_UNLOCK(p); if (lwpidarray != NULL) free(lwpidarray, M_TEMP); stack_destroy(st); free(kkstp, M_TEMP); return (error); } #endif /* * This sysctl allows a process to retrieve the full list of groups from * itself or another process. */ static int sysctl_kern_proc_groups(SYSCTL_HANDLER_ARGS) { pid_t *pidp = (pid_t *)arg1; unsigned int arglen = arg2; struct proc *p; struct ucred *cred; int error; if (arglen != 1) return (EINVAL); if (*pidp == -1) { /* -1 means this process */ p = req->td->td_proc; PROC_LOCK(p); } else { error = pget(*pidp, PGET_CANSEE, &p); if (error != 0) return (error); } cred = crhold(p->p_ucred); PROC_UNLOCK(p); error = SYSCTL_OUT(req, cred->cr_groups, cred->cr_ngroups * sizeof(gid_t)); crfree(cred); return (error); } /* * This sysctl allows a process to retrieve or/and set the resource limit for * another process. */ static int sysctl_kern_proc_rlimit(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct rlimit rlim; struct proc *p; u_int which; int flags, error; if (namelen != 2) return (EINVAL); which = (u_int)name[1]; if (which >= RLIM_NLIMITS) return (EINVAL); if (req->newptr != NULL && req->newlen != sizeof(rlim)) return (EINVAL); flags = PGET_HOLD | PGET_NOTWEXIT; if (req->newptr != NULL) flags |= PGET_CANDEBUG; else flags |= PGET_CANSEE; error = pget((pid_t)name[0], flags, &p); if (error != 0) return (error); /* * Retrieve limit. */ if (req->oldptr != NULL) { PROC_LOCK(p); lim_rlimit_proc(p, which, &rlim); PROC_UNLOCK(p); } error = SYSCTL_OUT(req, &rlim, sizeof(rlim)); if (error != 0) goto errout; /* * Set limit. */ if (req->newptr != NULL) { error = SYSCTL_IN(req, &rlim, sizeof(rlim)); if (error == 0) error = kern_proc_setrlimit(curthread, p, which, &rlim); } errout: PRELE(p); return (error); } /* * This sysctl allows a process to retrieve ps_strings structure location of * another process. */ static int sysctl_kern_proc_ps_strings(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; vm_offset_t ps_strings; int error; #ifdef COMPAT_FREEBSD32 uint32_t ps_strings32; #endif if (namelen != 1) return (EINVAL); error = pget((pid_t)name[0], PGET_CANDEBUG, &p); if (error != 0) return (error); #ifdef COMPAT_FREEBSD32 if ((req->flags & SCTL_MASK32) != 0) { /* * We return 0 if the 32 bit emulation request is for a 64 bit * process. */ ps_strings32 = SV_PROC_FLAG(p, SV_ILP32) != 0 ? PTROUT(p->p_sysent->sv_psstrings) : 0; PROC_UNLOCK(p); error = SYSCTL_OUT(req, &ps_strings32, sizeof(ps_strings32)); return (error); } #endif ps_strings = p->p_sysent->sv_psstrings; PROC_UNLOCK(p); error = SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings)); return (error); } /* * This sysctl allows a process to retrieve umask of another process. */ static int sysctl_kern_proc_umask(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; int error; u_short fd_cmask; if (namelen != 1) return (EINVAL); error = pget((pid_t)name[0], PGET_WANTREAD, &p); if (error != 0) return (error); FILEDESC_SLOCK(p->p_fd); fd_cmask = p->p_fd->fd_cmask; FILEDESC_SUNLOCK(p->p_fd); PRELE(p); error = SYSCTL_OUT(req, &fd_cmask, sizeof(fd_cmask)); return (error); } /* * This sysctl allows a process to set and retrieve binary osreldate of * another process. */ static int sysctl_kern_proc_osrel(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; int flags, error, osrel; if (namelen != 1) return (EINVAL); if (req->newptr != NULL && req->newlen != sizeof(osrel)) return (EINVAL); flags = PGET_HOLD | PGET_NOTWEXIT; if (req->newptr != NULL) flags |= PGET_CANDEBUG; else flags |= PGET_CANSEE; error = pget((pid_t)name[0], flags, &p); if (error != 0) return (error); error = SYSCTL_OUT(req, &p->p_osrel, sizeof(p->p_osrel)); if (error != 0) goto errout; if (req->newptr != NULL) { error = SYSCTL_IN(req, &osrel, sizeof(osrel)); if (error != 0) goto errout; if (osrel < 0) { error = EINVAL; goto errout; } p->p_osrel = osrel; } errout: PRELE(p); return (error); } static int sysctl_kern_proc_sigtramp(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct proc *p; struct kinfo_sigtramp kst; const struct sysentvec *sv; int error; #ifdef COMPAT_FREEBSD32 struct kinfo_sigtramp32 kst32; #endif if (namelen != 1) return (EINVAL); error = pget((pid_t)name[0], PGET_CANDEBUG, &p); if (error != 0) return (error); sv = p->p_sysent; #ifdef COMPAT_FREEBSD32 if ((req->flags & SCTL_MASK32) != 0) { bzero(&kst32, sizeof(kst32)); if (SV_PROC_FLAG(p, SV_ILP32)) { if (sv->sv_sigcode_base != 0) { kst32.ksigtramp_start = sv->sv_sigcode_base; kst32.ksigtramp_end = sv->sv_sigcode_base + *sv->sv_szsigcode; } else { kst32.ksigtramp_start = sv->sv_psstrings - *sv->sv_szsigcode; kst32.ksigtramp_end = sv->sv_psstrings; } } PROC_UNLOCK(p); error = SYSCTL_OUT(req, &kst32, sizeof(kst32)); return (error); } #endif bzero(&kst, sizeof(kst)); if (sv->sv_sigcode_base != 0) { kst.ksigtramp_start = (char *)sv->sv_sigcode_base; kst.ksigtramp_end = (char *)sv->sv_sigcode_base + *sv->sv_szsigcode; } else { kst.ksigtramp_start = (char *)sv->sv_psstrings - *sv->sv_szsigcode; kst.ksigtramp_end = (char *)sv->sv_psstrings; } PROC_UNLOCK(p); error = SYSCTL_OUT(req, &kst, sizeof(kst)); return (error); } SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT| CTLFLAG_MPSAFE, 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_GID, gid, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_RGID, rgid, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_SESSION, sid, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, KERN_PROC_PROC, proc, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Return process table, no threads"); static SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_CAPWR | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, sysctl_kern_proc_args, "Process argument list"); static SYSCTL_NODE(_kern_proc, KERN_PROC_ENV, env, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_env, "Process environment"); static SYSCTL_NODE(_kern_proc, KERN_PROC_AUXV, auxv, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_auxv, "Process ELF auxiliary vector"); static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_pathname, "Process executable path"); static SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_sv_name, "Process syscall vector name (ABI type)"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_GID | KERN_PROC_INC_THREAD), gid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_INC_THREAD), pgrp_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_RGID | KERN_PROC_INC_THREAD), rgid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_SESSION | KERN_PROC_INC_THREAD), sid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_INC_THREAD), tty_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_INC_THREAD), uid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_INC_THREAD), ruid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Process table"); static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc, "Return process table, no threads"); #ifdef COMPAT_FREEBSD7 static SYSCTL_NODE(_kern_proc, KERN_PROC_OVMMAP, ovmmap, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_ovmmap, "Old Process vm map entries"); #endif static SYSCTL_NODE(_kern_proc, KERN_PROC_VMMAP, vmmap, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_vmmap, "Process vm map entries"); #if defined(STACK) || defined(DDB) static SYSCTL_NODE(_kern_proc, KERN_PROC_KSTACK, kstack, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_kstack, "Process kernel stacks"); #endif static SYSCTL_NODE(_kern_proc, KERN_PROC_GROUPS, groups, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_groups, "Process groups"); static SYSCTL_NODE(_kern_proc, KERN_PROC_RLIMIT, rlimit, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, sysctl_kern_proc_rlimit, "Process resource limits"); static SYSCTL_NODE(_kern_proc, KERN_PROC_PS_STRINGS, ps_strings, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_ps_strings, "Process ps_strings location"); static SYSCTL_NODE(_kern_proc, KERN_PROC_UMASK, umask, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_umask, "Process umask"); static SYSCTL_NODE(_kern_proc, KERN_PROC_OSREL, osrel, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, sysctl_kern_proc_osrel, "Process binary osreldate"); static SYSCTL_NODE(_kern_proc, KERN_PROC_SIGTRAMP, sigtramp, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_kern_proc_sigtramp, "Process signal trampoline location"); int allproc_gen; void stop_all_proc(void) { struct proc *cp, *p; int r, gen; bool restart, seen_stopped, seen_exiting, stopped_some; cp = curproc; /* * stop_all_proc() assumes that all process which have * usermode must be stopped, except current process, for * obvious reasons. Since other threads in the process * establishing global stop could unstop something, disable * calls from multithreaded processes as precaution. The * service must not be user-callable anyway. */ KASSERT((cp->p_flag & P_HADTHREADS) == 0 || (cp->p_flag & P_KTHREAD) != 0, ("mt stop_all_proc")); allproc_loop: sx_xlock(&allproc_lock); gen = allproc_gen; seen_exiting = seen_stopped = stopped_some = restart = false; LIST_REMOVE(cp, p_list); LIST_INSERT_HEAD(&allproc, cp, p_list); for (;;) { p = LIST_NEXT(cp, p_list); if (p == NULL) break; LIST_REMOVE(cp, p_list); LIST_INSERT_AFTER(p, cp, p_list); PROC_LOCK(p); if ((p->p_flag & (P_KTHREAD | P_SYSTEM | P_TOTAL_STOP)) != 0) { PROC_UNLOCK(p); continue; } if ((p->p_flag & P_WEXIT) != 0) { seen_exiting = true; PROC_UNLOCK(p); continue; } if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { /* * Stopped processes are tolerated when there * are no other processes which might continue * them. P_STOPPED_SINGLE but not * P_TOTAL_STOP process still has at least one * thread running. */ seen_stopped = true; PROC_UNLOCK(p); continue; } _PHOLD(p); sx_xunlock(&allproc_lock); r = thread_single(p, SINGLE_ALLPROC); if (r != 0) restart = true; else stopped_some = true; _PRELE(p); PROC_UNLOCK(p); sx_xlock(&allproc_lock); } /* Catch forked children we did not see in iteration. */ if (gen != allproc_gen) restart = true; sx_xunlock(&allproc_lock); if (restart || stopped_some || seen_exiting || seen_stopped) { kern_yield(PRI_USER); goto allproc_loop; } } void resume_all_proc(void) { struct proc *cp, *p; cp = curproc; sx_xlock(&allproc_lock); LIST_REMOVE(cp, p_list); LIST_INSERT_HEAD(&allproc, cp, p_list); for (;;) { p = LIST_NEXT(cp, p_list); if (p == NULL) break; LIST_REMOVE(cp, p_list); LIST_INSERT_AFTER(p, cp, p_list); PROC_LOCK(p); if ((p->p_flag & P_TOTAL_STOP) != 0) { sx_xunlock(&allproc_lock); _PHOLD(p); thread_single_end(p, SINGLE_ALLPROC); _PRELE(p); PROC_UNLOCK(p); sx_xlock(&allproc_lock); } else { PROC_UNLOCK(p); } } sx_xunlock(&allproc_lock); } #define TOTAL_STOP_DEBUG 1 #ifdef TOTAL_STOP_DEBUG volatile static int ap_resume; #include static int sysctl_debug_stop_all_proc(SYSCTL_HANDLER_ARGS) { int error, val; val = 0; ap_resume = 0; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (val != 0) { stop_all_proc(); syncer_suspend(); while (ap_resume == 0) ; syncer_resume(); resume_all_proc(); } return (0); } SYSCTL_PROC(_debug, OID_AUTO, stop_all_proc, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, __DEVOLATILE(int *, &ap_resume), 0, sysctl_debug_stop_all_proc, "I", ""); #endif Index: user/ngie/more-tests2/sys/kern/subr_syscall.c =================================================================== --- user/ngie/more-tests2/sys/kern/subr_syscall.c (revision 288954) +++ user/ngie/more-tests2/sys/kern/subr_syscall.c (revision 288955) @@ -1,251 +1,252 @@ /*- * Copyright (C) 1994, David Greenman * Copyright (c) 1990, 1993 * The Regents of the University of California. All rights reserved. * Copyright (C) 2010 Konstantin Belousov * * This code is derived from software contributed to Berkeley by * the University of Utah, and William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 */ #include "opt_capsicum.h" #include "opt_ktrace.h" __FBSDID("$FreeBSD$"); #include #include #ifdef KTRACE #include #include #endif #include static inline int syscallenter(struct thread *td, struct syscall_args *sa) { struct proc *p; int error, traced; PCPU_INC(cnt.v_syscall); p = td->td_proc; td->td_pticks = 0; if (td->td_cowgen != p->p_cowgen) thread_cow_update(td); traced = (p->p_flag & P_TRACED) != 0; if (traced || td->td_dbgflags & TDB_USERWR) { PROC_LOCK(p); td->td_dbgflags &= ~TDB_USERWR; if (traced) td->td_dbgflags |= TDB_SCE; PROC_UNLOCK(p); } error = (p->p_sysent->sv_fetch_syscall_args)(td, sa); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSCALL)) ktrsyscall(sa->code, sa->narg, sa->args); #endif KTR_START4(KTR_SYSC, "syscall", syscallname(p, sa->code), (uintptr_t)td, "pid:%d", td->td_proc->p_pid, "arg0:%p", sa->args[0], "arg1:%p", sa->args[1], "arg2:%p", sa->args[2]); if (error == 0) { STOPEVENT(p, S_SCE, sa->narg); - if (p->p_flag & P_TRACED && p->p_stops & S_PT_SCE) { + if (p->p_flag & P_TRACED) { PROC_LOCK(p); td->td_dbg_sc_code = sa->code; td->td_dbg_sc_narg = sa->narg; - ptracestop((td), SIGTRAP); + if (p->p_stops & S_PT_SCE) + ptracestop((td), SIGTRAP); PROC_UNLOCK(p); } if (td->td_dbgflags & TDB_USERWR) { /* * Reread syscall number and arguments if * debugger modified registers or memory. */ error = (p->p_sysent->sv_fetch_syscall_args)(td, sa); PROC_LOCK(p); td->td_dbg_sc_code = sa->code; td->td_dbg_sc_narg = sa->narg; PROC_UNLOCK(p); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSCALL)) ktrsyscall(sa->code, sa->narg, sa->args); #endif if (error != 0) goto retval; } #ifdef CAPABILITY_MODE /* * In capability mode, we only allow access to system calls * flagged with SYF_CAPENABLED. */ if (IN_CAPABILITY_MODE(td) && !(sa->callp->sy_flags & SYF_CAPENABLED)) { error = ECAPMODE; goto retval; } #endif error = syscall_thread_enter(td, sa->callp); if (error != 0) goto retval; #ifdef KDTRACE_HOOKS /* * If the systrace module has registered it's probe * callback and if there is a probe active for the * syscall 'entry', process the probe. */ if (systrace_probe_func != NULL && sa->callp->sy_entry != 0) (*systrace_probe_func)(sa->callp->sy_entry, sa->code, sa->callp, sa->args, 0); #endif AUDIT_SYSCALL_ENTER(sa->code, td); error = (sa->callp->sy_call)(td, sa->args); AUDIT_SYSCALL_EXIT(error, td); /* Save the latest error return value. */ if ((td->td_pflags & TDP_NERRNO) == 0) td->td_errno = error; #ifdef KDTRACE_HOOKS /* * If the systrace module has registered it's probe * callback and if there is a probe active for the * syscall 'return', process the probe. */ if (systrace_probe_func != NULL && sa->callp->sy_return != 0) (*systrace_probe_func)(sa->callp->sy_return, sa->code, sa->callp, NULL, (error) ? -1 : td->td_retval[0]); #endif syscall_thread_exit(td, sa->callp); } retval: KTR_STOP4(KTR_SYSC, "syscall", syscallname(p, sa->code), (uintptr_t)td, "pid:%d", td->td_proc->p_pid, "error:%d", error, "retval0:%#lx", td->td_retval[0], "retval1:%#lx", td->td_retval[1]); if (traced) { PROC_LOCK(p); td->td_dbgflags &= ~TDB_SCE; PROC_UNLOCK(p); } (p->p_sysent->sv_set_syscall_retval)(td, error); return (error); } static inline void syscallret(struct thread *td, int error, struct syscall_args *sa) { struct proc *p, *p2; int traced; p = td->td_proc; /* * Handle reschedule and other end-of-syscall issues */ userret(td, td->td_frame); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) { ktrsysret(sa->code, (td->td_pflags & TDP_NERRNO) == 0 ? error : td->td_errno, td->td_retval[0]); } #endif td->td_pflags &= ~TDP_NERRNO; if (p->p_flag & P_TRACED) { traced = 1; PROC_LOCK(p); td->td_dbgflags |= TDB_SCX; PROC_UNLOCK(p); } else traced = 0; /* * This works because errno is findable through the * register set. If we ever support an emulation where this * is not the case, this code will need to be revisited. */ STOPEVENT(p, S_SCX, sa->code); if (traced || (td->td_dbgflags & (TDB_EXEC | TDB_FORK)) != 0) { PROC_LOCK(p); /* * If tracing the execed process, trap to the debugger * so that breakpoints can be set before the program * executes. If debugger requested tracing of syscall * returns, do it now too. */ if (traced && ((td->td_dbgflags & (TDB_FORK | TDB_EXEC)) != 0 || (p->p_stops & S_PT_SCX) != 0)) ptracestop(td, SIGTRAP); td->td_dbgflags &= ~(TDB_SCX | TDB_EXEC | TDB_FORK); PROC_UNLOCK(p); } if (td->td_pflags & TDP_RFPPWAIT) { /* * Preserve synchronization semantics of vfork. If * waiting for child to exec or exit, fork set * P_PPWAIT on child, and there we sleep on our proc * (in case of exit). * * Do it after the ptracestop() above is finished, to * not block our debugger until child execs or exits * to finish vfork wait. */ td->td_pflags &= ~TDP_RFPPWAIT; p2 = td->td_rfppwait_p; again: PROC_LOCK(p2); while (p2->p_flag & P_PPWAIT) { PROC_LOCK(p); if (thread_suspend_check_needed()) { PROC_UNLOCK(p2); thread_suspend_check(0); PROC_UNLOCK(p); goto again; } else { PROC_UNLOCK(p); } cv_timedwait(&p2->p_pwait, &p2->p_mtx, hz); } PROC_UNLOCK(p2); } } Index: user/ngie/more-tests2/sys/sys/exec.h =================================================================== --- user/ngie/more-tests2/sys/sys/exec.h (revision 288954) +++ user/ngie/more-tests2/sys/sys/exec.h (revision 288955) @@ -1,130 +1,131 @@ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)exec.h 8.3 (Berkeley) 1/21/94 * $FreeBSD$ */ #ifndef _SYS_EXEC_H_ #define _SYS_EXEC_H_ /* * Before ps_args existed, the following structure, found at the top of * the user stack of each user process, was used by ps(1) to locate * environment and argv strings. Normally ps_argvstr points to the * argv vector, and ps_nargvstr is the same as the program's argc. The * fields ps_envstr and ps_nenvstr are the equivalent for the environment. * * Programs should now use setproctitle(3) to change ps output. * setproctitle() always informs the kernel with sysctl and sets the * pointers in ps_strings. The kern.proc.args sysctl first tries p_args. * If p_args is NULL, it then falls back to reading ps_strings and following * the pointers. */ struct ps_strings { char **ps_argvstr; /* first of 0 or more argument strings */ unsigned int ps_nargvstr; /* the number of argument strings */ char **ps_envstr; /* first of 0 or more environment strings */ unsigned int ps_nenvstr; /* the number of environment strings */ }; /* * Address of ps_strings structure (in user space). * Prefer the kern.ps_strings or kern.proc.ps_strings sysctls to this constant. */ #define PS_STRINGS (USRSTACK - sizeof(struct ps_strings)) #define SPARE_USRSPACE 4096 struct image_params; struct execsw { int (*ex_imgact)(struct image_params *); const char *ex_name; }; #include #ifdef _KERNEL #include int exec_map_first_page(struct image_params *); void exec_unmap_first_page(struct image_params *); int exec_register(const struct execsw *); int exec_unregister(const struct execsw *); extern int coredump_pack_fileinfo; +extern int coredump_pack_vmmapinfo; /* * note: name##_mod cannot be const storage because the * linker_file_sysinit() function modifies _file in the * moduledata_t. */ #include #define EXEC_SET(name, execsw_arg) \ static int __CONCAT(name,_modevent)(module_t mod, int type, \ void *data) \ { \ struct execsw *exec = (struct execsw *)data; \ int error = 0; \ switch (type) { \ case MOD_LOAD: \ /* printf(#name " module loaded\n"); */ \ error = exec_register(exec); \ if (error) \ printf(__XSTRING(name) "register failed\n"); \ break; \ case MOD_UNLOAD: \ /* printf(#name " module unloaded\n"); */ \ error = exec_unregister(exec); \ if (error) \ printf(__XSTRING(name) " unregister failed\n");\ break; \ default: \ error = EOPNOTSUPP; \ break; \ } \ return error; \ } \ static moduledata_t __CONCAT(name,_mod) = { \ __XSTRING(name), \ __CONCAT(name,_modevent), \ (void *)& execsw_arg \ }; \ DECLARE_MODULE_TIED(name, __CONCAT(name,_mod), SI_SUB_EXEC, \ SI_ORDER_ANY) #endif #endif Index: user/ngie/more-tests2/sys/sys/proc.h =================================================================== --- user/ngie/more-tests2/sys/sys/proc.h (revision 288954) +++ user/ngie/more-tests2/sys/sys/proc.h (revision 288955) @@ -1,1051 +1,1051 @@ /*- * Copyright (c) 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)proc.h 8.15 (Berkeley) 5/19/95 * $FreeBSD$ */ #ifndef _SYS_PROC_H_ #define _SYS_PROC_H_ #include /* For struct callout. */ #include /* For struct klist. */ #include #ifndef _KERNEL #include #endif #include #include #include #include #include #include #include /* XXX. */ #include #include #include #include #include #ifndef _KERNEL #include /* For structs itimerval, timeval. */ #else #include #endif #include #include #include #include /* Machine-dependent proc substruct. */ /* * One structure allocated per session. * * List of locks * (m) locked by s_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct session { u_int s_count; /* Ref cnt; pgrps in session - atomic. */ struct proc *s_leader; /* (m + e) Session leader. */ struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */ struct cdev_priv *s_ttydp; /* (m) Device of controlling tty. */ struct tty *s_ttyp; /* (e) Controlling tty. */ pid_t s_sid; /* (c) Session ID. */ /* (m) Setlogin() name: */ char s_login[roundup(MAXLOGNAME, sizeof(long))]; struct mtx s_mtx; /* Mutex to protect members. */ }; /* * One structure allocated per process group. * * List of locks * (m) locked by pg_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct pgrp { LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */ LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */ struct session *pg_session; /* (c) Pointer to session. */ struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */ pid_t pg_id; /* (c) Process group id. */ int pg_jobc; /* (m) Job control process count. */ struct mtx pg_mtx; /* Mutex to protect members */ }; /* * pargs, used to hold a copy of the command line, if it had a sane length. */ struct pargs { u_int ar_ref; /* Reference count. */ u_int ar_length; /* Length. */ u_char ar_args[1]; /* Arguments. */ }; /*- * Description of a process. * * This structure contains the information needed to manage a thread of * control, known in UN*X as a process; it has references to substructures * containing descriptions of things that the process uses, but may share * with related processes. The process structure and the substructures * are always addressable except for those marked "(CPU)" below, * which might be addressable only on a processor on which the process * is running. * * Below is a key of locks used to protect each member of struct proc. The * lock is indicated by a reference to a specific character in parens in the * associated comment. * * - not yet protected * a - only touched by curproc or parent during fork/wait * b - created at fork, never changes * (exception aiods switch vmspaces, but they are also * marked 'P_SYSTEM' so hopefully it will be left alone) * c - locked by proc mtx * d - locked by allproc_lock lock * e - locked by proctree_lock lock * f - session mtx * g - process group mtx * h - callout_lock mtx * i - by curproc or the master session mtx * j - locked by proc slock * k - only accessed by curthread * k*- only accessed by curthread and from an interrupt * l - the attaching proc or attaching proc parent * m - Giant * n - not locked, lazy * o - ktrace lock * q - td_contested lock * r - p_peers lock * t - thread lock * u - process stat lock * w - process timer lock * x - created at fork, only changes during single threading in exec * y - created at first aio, doesn't change until exit or exec at which * point we are single-threaded and only curthread changes it * z - zombie threads lock * * If the locking key specifies two identifiers (for example, p_pptr) then * either lock is sufficient for read access, but both locks must be held * for write access. */ struct cpuset; struct filecaps; struct kaioinfo; struct kaudit_record; struct kdtrace_proc; struct kdtrace_thread; struct mqueue_notifier; struct nlminfo; struct p_sched; struct proc; struct procdesc; struct racct; struct sbuf; struct sleepqueue; struct syscall_args; struct td_sched; struct thread; struct trapframe; struct turnstile; /* * XXX: Does this belong in resource.h or resourcevar.h instead? * Resource usage extension. The times in rusage structs in the kernel are * never up to date. The actual times are kept as runtimes and tick counts * (with control info in the "previous" times), and are converted when * userland asks for rusage info. Backwards compatibility prevents putting * this directly in the user-visible rusage struct. * * Locking for p_rux: (cu) means (u) for p_rux and (c) for p_crux. * Locking for td_rux: (t) for all fields. */ struct rusage_ext { uint64_t rux_runtime; /* (cu) Real time. */ uint64_t rux_uticks; /* (cu) Statclock hits in user mode. */ uint64_t rux_sticks; /* (cu) Statclock hits in sys mode. */ uint64_t rux_iticks; /* (cu) Statclock hits in intr mode. */ uint64_t rux_uu; /* (c) Previous user time in usec. */ uint64_t rux_su; /* (c) Previous sys time in usec. */ uint64_t rux_tu; /* (c) Previous total time in usec. */ }; /* * Kernel runnable context (thread). * This is what is put to sleep and reactivated. * Thread context. Processes may have multiple threads. */ struct thread { struct mtx *volatile td_lock; /* replaces sched lock */ struct proc *td_proc; /* (*) Associated process. */ TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */ TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */ TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */ TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */ LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */ struct cpuset *td_cpuset; /* (t) CPU affinity mask. */ struct seltd *td_sel; /* Select queue/channel. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */ struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */ struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */ struct vm_domain_policy td_vm_dom_policy; /* (c) current numa domain policy */ lwpid_t td_tid; /* (b) Thread ID. */ sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */ #define td_siglist td_sigqueue.sq_signals u_char td_lend_user_pri; /* (t) Lend user pri. */ /* Cleared during fork1() */ #define td_startzero td_flags int td_flags; /* (t) TDF_* flags. */ int td_inhibitors; /* (t) Why can not run. */ int td_pflags; /* (k) Private thread (TDP_*) flags. */ int td_dupfd; /* (k) Ret value from fdopen. XXX */ int td_sqqueue; /* (t) Sleepqueue queue blocked on. */ void *td_wchan; /* (t) Sleep address. */ const char *td_wmesg; /* (t) Reason for sleep. */ - int td_lastcpu; /* (t) Last cpu we were on. */ - int td_oncpu; /* (t) Which cpu we are on. */ volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */ u_char td_tsqueue; /* (t) Turnstile queue blocked on. */ short td_locks; /* (k) Debug: count of non-spin locks */ short td_rw_rlocks; /* (k) Count of rwlock read locks. */ short td_lk_slocks; /* (k) Count of lockmgr shared locks. */ short td_stopsched; /* (k) Scheduler stopped. */ struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */ const char *td_lockname; /* (t) Name of lock blocked on. */ LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */ struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */ int td_intr_nesting_level; /* (k) Interrupt recursion. */ int td_pinned; /* (k) Temporary cpu pin count. */ struct ucred *td_ucred; /* (k) Reference to credentials. */ struct plimit *td_limit; /* (k) Resource limits. */ u_int td_estcpu; /* (t) estimated cpu utilization */ int td_slptick; /* (t) Time at sleep. */ int td_blktick; /* (t) Time spent blocked. */ int td_swvoltick; /* (t) Time at last SW_VOL switch. */ u_int td_cow; /* (*) Number of copy-on-write faults */ struct rusage td_ru; /* (t) rusage information. */ struct rusage_ext td_rux; /* (t) Internal rusage information. */ uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */ uint64_t td_runtime; /* (t) How many cpu ticks we've run. */ u_int td_pticks; /* (t) Statclock hits for profiling */ u_int td_sticks; /* (t) Statclock hits in system mode. */ u_int td_iticks; /* (t) Statclock hits in intr mode. */ u_int td_uticks; /* (t) Statclock hits in user mode. */ int td_intrval; /* (t) Return value for sleepq. */ sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */ volatile u_int td_generation; /* (k) For detection of preemption */ stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */ int td_xsig; /* (c) Signal for ptrace */ u_long td_profil_addr; /* (k) Temporary addr until AST. */ u_int td_profil_ticks; /* (k) Temporary ticks until AST. */ char td_name[MAXCOMLEN + 1]; /* (*) Thread name. */ struct file *td_fpop; /* (k) file referencing cdev under op */ int td_dbgflags; /* (c) Userland debugger flags */ struct ksiginfo td_dbgksi; /* (c) ksi reflected to debugger. */ int td_ng_outbound; /* (k) Thread entered ng from above. */ struct osd td_osd; /* (k) Object specific data. */ struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */ pid_t td_dbg_forked; /* (c) Child pid for debugger. */ u_int td_vp_reserv; /* (k) Count of reserved vnodes. */ int td_no_sleeping; /* (k) Sleeping disabled count. */ int td_dom_rr_idx; /* (k) RR Numa domain selection. */ void *td_su; /* (k) FFS SU private */ - u_int td_dbg_sc_code; /* (c) Syscall code to debugger. */ - u_int td_dbg_sc_narg; /* (c) Syscall arg count to debugger.*/ #define td_endzero td_sigmask /* Copied during fork1() or create_thread(). */ #define td_startcopy td_endzero sigset_t td_sigmask; /* (c) Current signal mask. */ u_char td_rqindex; /* (t) Run queue index. */ u_char td_base_pri; /* (t) Thread base kernel priority. */ u_char td_priority; /* (t) Thread active priority. */ u_char td_pri_class; /* (t) Scheduling class. */ u_char td_user_pri; /* (t) User pri from estcpu and nice. */ u_char td_base_user_pri; /* (t) Base user pri */ + u_int td_dbg_sc_code; /* (c) Syscall code to debugger. */ + u_int td_dbg_sc_narg; /* (c) Syscall arg count to debugger.*/ #define td_endcopy td_pcb /* * Fields that must be manually set in fork1() or create_thread() * or already have been set in the allocator, constructor, etc. */ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ enum { TDS_INACTIVE = 0x0, TDS_INHIBITED, TDS_CAN_RUN, TDS_RUNQ, TDS_RUNNING } td_state; /* (t) thread state */ union { register_t tdu_retval[2]; off_t tdu_off; } td_uretoff; /* (k) Syscall aux returns. */ #define td_retval td_uretoff.tdu_retval u_int td_cowgen; /* (k) Generation of COW pointers. */ struct callout td_slpcallout; /* (h) Callout for sleep. */ struct trapframe *td_frame; /* (k) */ struct vm_object *td_kstack_obj;/* (a) Kstack object. */ vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */ int td_kstack_pages; /* (a) Size of the kstack. */ volatile u_int td_critnest; /* (k*) Critical section nest level. */ struct mdthread td_md; /* (k) Any machine-dependent fields. */ struct td_sched *td_sched; /* (*) Scheduler-specific data. */ struct kaudit_record *td_ar; /* (k) Active audit record, if any. */ struct lpohead td_lprof[2]; /* (a) lock profiling objects. */ struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */ int td_errno; /* Error returned by last syscall. */ struct vnet *td_vnet; /* (k) Effective vnet. */ const char *td_vnet_lpush; /* (k) Debugging vnet push / pop. */ struct trapframe *td_intr_frame;/* (k) Frame of the current irq */ struct proc *td_rfppwait_p; /* (k) The vforked child */ struct vm_page **td_ma; /* (k) uio pages held */ int td_ma_cnt; /* (k) size of *td_ma */ void *td_emuldata; /* Emulator state data */ + int td_lastcpu; /* (t) Last cpu we were on. */ + int td_oncpu; /* (t) Which cpu we are on. */ }; struct mtx *thread_lock_block(struct thread *); void thread_lock_unblock(struct thread *, struct mtx *); void thread_lock_set(struct thread *, struct mtx *); #define THREAD_LOCK_ASSERT(td, type) \ do { \ struct mtx *__m = (td)->td_lock; \ if (__m != &blocked_lock) \ mtx_assert(__m, (type)); \ } while (0) #ifdef INVARIANTS #define THREAD_LOCKPTR_ASSERT(td, lock) \ do { \ struct mtx *__m = (td)->td_lock; \ KASSERT((__m == &blocked_lock || __m == (lock)), \ ("Thread %p lock %p does not match %p", td, __m, (lock))); \ } while (0) #define TD_LOCKS_INC(td) ((td)->td_locks++) #define TD_LOCKS_DEC(td) ((td)->td_locks--) #else #define THREAD_LOCKPTR_ASSERT(td, lock) #define TD_LOCKS_INC(td) #define TD_LOCKS_DEC(td) #endif /* * Flags kept in td_flags: * To change these you MUST have the scheduler lock. */ #define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */ #define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */ #define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */ #define TDF_SINTR 0x00000008 /* Sleep is interruptible. */ #define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */ #define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */ #define TDF_CANSWAP 0x00000040 /* Thread can be swapped. */ #define TDF_SLEEPABORT 0x00000080 /* sleepq_abort was called. */ #define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */ #define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */ #define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */ #define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */ #define TDF_TIMOFAIL 0x00001000 /* Timeout from sleep after we were awake. */ #define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */ #define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */ #define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */ #define TDF_NEEDRESCHED 0x00010000 /* Thread needs to yield. */ #define TDF_NEEDSIGCHK 0x00020000 /* Thread may need signal delivery. */ #define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */ #define TDF_UNUSED19 0x00080000 /* --available-- */ #define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */ #define TDF_UNUSED21 0x00200000 /* --available-- */ #define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */ #define TDF_UNUSED23 0x00800000 /* --available-- */ #define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */ #define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */ #define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */ #define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */ #define TDF_ALRMPEND 0x10000000 /* Pending SIGVTALRM needs to be posted. */ #define TDF_PROFPEND 0x20000000 /* Pending SIGPROF needs to be posted. */ #define TDF_MACPEND 0x40000000 /* AST-based MAC event pending. */ /* Userland debug flags */ #define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */ #define TDB_XSIG 0x00000002 /* Thread is exchanging signal under trace */ #define TDB_USERWR 0x00000004 /* Debugger modified memory or registers */ #define TDB_SCE 0x00000008 /* Thread performs syscall enter */ #define TDB_SCX 0x00000010 /* Thread performs syscall exit */ #define TDB_EXEC 0x00000020 /* TDB_SCX from exec(2) family */ #define TDB_FORK 0x00000040 /* TDB_SCX from fork(2) that created new process */ #define TDB_STOPATFORK 0x00000080 /* Stop at the return from fork (child only) */ #define TDB_CHILD 0x00000100 /* New child indicator for ptrace() */ /* * "Private" flags kept in td_pflags: * These are only written by curthread and thus need no locking. */ #define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */ #define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */ #define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */ #define TDP_BUFNEED 0x00000008 /* Do not recurse into the buf flush */ #define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */ #define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */ #define TDP_DEADLKTREAT 0x00000040 /* Lock aquisition - deadlock treatment. */ #define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */ #define TDP_UNUSED9 0x00000100 /* --available-- */ #define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */ #define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */ #define TDP_SYNCIO 0x00000800 /* Local override, disable async i/o. */ #define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */ #define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */ #define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */ #define TDP_SCHED4 0x00008000 /* Reserved for scheduler private use */ #define TDP_GEOM 0x00010000 /* Settle GEOM before finishing syscall */ #define TDP_SOFTDEP 0x00020000 /* Stuck processing softdep worklist */ #define TDP_NORUNNINGBUF 0x00040000 /* Ignore runningbufspace check */ #define TDP_WAKEUP 0x00080000 /* Don't sleep in umtx cond_wait */ #define TDP_INBDFLUSH 0x00100000 /* Already in BO_BDFLUSH, do not recurse */ #define TDP_KTHREAD 0x00200000 /* This is an official kernel thread */ #define TDP_CALLCHAIN 0x00400000 /* Capture thread's callchain */ #define TDP_IGNSUSP 0x00800000 /* Permission to ignore the MNTK_SUSPEND* */ #define TDP_AUDITREC 0x01000000 /* Audit record pending on thread */ #define TDP_RFPPWAIT 0x02000000 /* Handle RFPPWAIT on syscall exit */ #define TDP_RESETSPUR 0x04000000 /* Reset spurious page fault history. */ #define TDP_NERRNO 0x08000000 /* Last errno is already in td_errno */ #define TDP_UIOHELD 0x10000000 /* Current uio has pages held in td_ma */ #define TDP_UNUSED29 0x20000000 /* --available-- */ #define TDP_EXECVMSPC 0x40000000 /* Execve destroyed old vmspace */ /* * Reasons that the current thread can not be run yet. * More than one may apply. */ #define TDI_SUSPENDED 0x0001 /* On suspension queue. */ #define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */ #define TDI_SWAPPED 0x0004 /* Stack not in mem. Bad juju if run. */ #define TDI_LOCK 0x0008 /* Stopped on a lock. */ #define TDI_IWAIT 0x0010 /* Awaiting interrupt. */ #define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING) #define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL) #define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED) #define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED) #define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK) #define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT) #define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING) #define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ) #define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN) #define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED) #define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED) #define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD) #define TD_SET_INHIB(td, inhib) do { \ (td)->td_state = TDS_INHIBITED; \ (td)->td_inhibitors |= (inhib); \ } while (0) #define TD_CLR_INHIB(td, inhib) do { \ if (((td)->td_inhibitors & (inhib)) && \ (((td)->td_inhibitors &= ~(inhib)) == 0)) \ (td)->td_state = TDS_CAN_RUN; \ } while (0) #define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING) #define TD_SET_SWAPPED(td) TD_SET_INHIB((td), TDI_SWAPPED) #define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK) #define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED) #define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT) #define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING) #define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING) #define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED) #define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK) #define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED) #define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT) #define TD_SET_RUNNING(td) (td)->td_state = TDS_RUNNING #define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ #define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN /* * Process structure. */ struct proc { LIST_ENTRY(proc) p_list; /* (d) List of all processes. */ TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */ struct mtx p_slock; /* process spin lock */ struct ucred *p_ucred; /* (c) Process owner's identity. */ struct filedesc *p_fd; /* (b) Open files. */ struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */ struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */ struct plimit *p_limit; /* (c) Resource limits. */ struct callout p_limco; /* (c) Limit callout handle */ struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */ int p_flag; /* (c) P_* flags. */ int p_flag2; /* (c) P2_* flags. */ enum { PRS_NEW = 0, /* In creation */ PRS_NORMAL, /* threads can be run. */ PRS_ZOMBIE } p_state; /* (j/c) Process status. */ pid_t p_pid; /* (b) Process identifier. */ LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */ LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */ struct proc *p_pptr; /* (c + e) Pointer to parent process. */ LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */ LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */ struct proc *p_reaper; /* (e) My reaper. */ LIST_HEAD(, proc) p_reaplist; /* (e) List of my descendants (if I am reaper). */ LIST_ENTRY(proc) p_reapsibling; /* (e) List of siblings - descendants of the same reaper. */ struct mtx p_mtx; /* (n) Lock for this struct. */ struct mtx p_statmtx; /* Lock for the stats */ struct mtx p_itimmtx; /* Lock for the virt/prof timers */ struct mtx p_profmtx; /* Lock for the profiling */ struct ksiginfo *p_ksi; /* Locked by parent proc lock */ sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */ #define p_siglist p_sigqueue.sq_signals /* The following fields are all zeroed upon creation in fork. */ #define p_startzero p_oppid pid_t p_oppid; /* (c + e) Save ppid in ptrace. XXX */ struct vmspace *p_vmspace; /* (b) Address space. */ u_int p_swtick; /* (c) Tick when swapped in or out. */ u_int p_cowgen; /* (c) Generation of COW pointers. */ struct itimerval p_realtimer; /* (c) Alarm timer. */ struct rusage p_ru; /* (a) Exit information. */ struct rusage_ext p_rux; /* (cu) Internal resource usage. */ struct rusage_ext p_crux; /* (c) Internal child resource usage. */ int p_profthreads; /* (c) Num threads in addupc_task. */ volatile int p_exitthreads; /* (j) Number of threads exiting */ int p_traceflag; /* (o) Kernel trace points. */ struct vnode *p_tracevp; /* (c + o) Trace to vnode. */ struct ucred *p_tracecred; /* (o) Credentials to trace with. */ struct vnode *p_textvp; /* (b) Vnode of executable. */ u_int p_lock; /* (c) Proclock (prevent swap) count. */ struct sigiolst p_sigiolst; /* (c) List of sigio sources. */ int p_sigparent; /* (c) Signal to parent on exit. */ int p_sig; /* (n) For core dump/debugger XXX. */ u_long p_code; /* (n) For core dump/debugger XXX. */ u_int p_stops; /* (c) Stop event bitmask. */ u_int p_stype; /* (c) Stop event type. */ char p_step; /* (c) Process is stopped. */ u_char p_pfsflags; /* (c) Procfs flags. */ struct nlminfo *p_nlminfo; /* (?) Only used by/for lockd. */ struct kaioinfo *p_aioinfo; /* (y) ASYNC I/O info. */ struct thread *p_singlethread;/* (c + j) If single threading this is it */ int p_suspcount; /* (j) Num threads in suspended mode. */ struct thread *p_xthread; /* (c) Trap thread */ int p_boundary_count;/* (j) Num threads at user boundary */ int p_pendingcnt; /* how many signals are pending */ struct itimers *p_itimers; /* (c) POSIX interval timers. */ struct procdesc *p_procdesc; /* (e) Process descriptor, if any. */ u_int p_treeflag; /* (e) P_TREE flags */ /* End area that is zeroed on creation. */ #define p_endzero p_magic /* The following fields are all copied upon creation in fork. */ #define p_startcopy p_endzero u_int p_magic; /* (b) Magic number. */ int p_osrel; /* (x) osreldate for the binary (from ELF note, if any) */ char p_comm[MAXCOMLEN + 1]; /* (b) Process name. */ struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */ struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */ struct pargs *p_args; /* (c) Process arguments. */ rlim_t p_cpulimit; /* (c) Current CPU limit in seconds. */ signed char p_nice; /* (c) Process "nice" value. */ int p_fibnum; /* in this routing domain XXX MRT */ pid_t p_reapsubtree; /* (e) Pid of the direct child of the reaper which spawned our subtree. */ u_int p_xexit; /* (c) Exit code. */ u_int p_xsig; /* (c) Stop/kill sig. */ /* End area that is copied on creation. */ #define p_endcopy p_xsig struct knlist p_klist; /* (c) Knotes attached to this proc. */ int p_numthreads; /* (c) Number of threads. */ struct mdproc p_md; /* Any machine-dependent fields. */ struct callout p_itcallout; /* (h + c) Interval timer callout. */ u_short p_acflag; /* (c) Accounting flags. */ struct proc *p_peers; /* (r) */ struct proc *p_leader; /* (b) */ void *p_emuldata; /* (c) Emulator state data. */ struct label *p_label; /* (*) Proc (not subject) MAC label. */ struct p_sched *p_sched; /* (*) Scheduler-specific data. */ STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */ LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/ struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */ struct cv p_pwait; /* (*) wait cv for exit/exec. */ struct cv p_dbgwait; /* (*) wait cv for debugger attach after fork. */ uint64_t p_prev_runtime; /* (c) Resource usage accounting. */ struct racct *p_racct; /* (b) Resource accounting. */ u_char p_throttled; /* (c) Flag for racct pcpu throttling */ struct vm_domain_policy p_vm_dom_policy; /* (c) process default VM domain, or -1 */ /* * An orphan is the child that has beed re-parented to the * debugger as a result of attaching to it. Need to keep * track of them for parent to be able to collect the exit * status of what used to be children. */ LIST_ENTRY(proc) p_orphan; /* (e) List of orphan processes. */ LIST_HEAD(, proc) p_orphans; /* (e) Pointer to list of orphans. */ }; #define p_session p_pgrp->pg_session #define p_pgid p_pgrp->pg_id #define NOCPU (-1) /* For when we aren't on a CPU. */ #define NOCPU_OLD (255) #define MAXCPU_OLD (254) #define PROC_SLOCK(p) mtx_lock_spin(&(p)->p_slock) #define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock) #define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type)) #define PROC_STATLOCK(p) mtx_lock_spin(&(p)->p_statmtx) #define PROC_STATUNLOCK(p) mtx_unlock_spin(&(p)->p_statmtx) #define PROC_STATLOCK_ASSERT(p, type) mtx_assert(&(p)->p_statmtx, (type)) #define PROC_ITIMLOCK(p) mtx_lock_spin(&(p)->p_itimmtx) #define PROC_ITIMUNLOCK(p) mtx_unlock_spin(&(p)->p_itimmtx) #define PROC_ITIMLOCK_ASSERT(p, type) mtx_assert(&(p)->p_itimmtx, (type)) #define PROC_PROFLOCK(p) mtx_lock_spin(&(p)->p_profmtx) #define PROC_PROFUNLOCK(p) mtx_unlock_spin(&(p)->p_profmtx) #define PROC_PROFLOCK_ASSERT(p, type) mtx_assert(&(p)->p_profmtx, (type)) /* These flags are kept in p_flag. */ #define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock. */ #define P_CONTROLT 0x00002 /* Has a controlling terminal. */ #define P_KTHREAD 0x00004 /* Kernel thread (*). */ #define P_FOLLOWFORK 0x00008 /* Attach parent debugger to children. */ #define P_PPWAIT 0x00010 /* Parent is waiting for child to exec/exit. */ #define P_PROFIL 0x00020 /* Has started profiling. */ #define P_STOPPROF 0x00040 /* Has thread requesting to stop profiling. */ #define P_HADTHREADS 0x00080 /* Has had threads (no cleanup shortcuts) */ #define P_SUGID 0x00100 /* Had set id privileges since last exec. */ #define P_SYSTEM 0x00200 /* System proc: no sigs, stats or swapping. */ #define P_SINGLE_EXIT 0x00400 /* Threads suspending should exit, not wait. */ #define P_TRACED 0x00800 /* Debugged process being traced. */ #define P_WAITED 0x01000 /* Someone is waiting for us. */ #define P_WEXIT 0x02000 /* Working on exiting. */ #define P_EXEC 0x04000 /* Process called exec. */ #define P_WKILLED 0x08000 /* Killed, go to kernel/user boundary ASAP. */ #define P_CONTINUED 0x10000 /* Proc has continued from a stopped state. */ #define P_STOPPED_SIG 0x20000 /* Stopped due to SIGSTOP/SIGTSTP. */ #define P_STOPPED_TRACE 0x40000 /* Stopped because of tracing. */ #define P_STOPPED_SINGLE 0x80000 /* Only 1 thread can continue (not to user). */ #define P_PROTECTED 0x100000 /* Do not kill on memory overcommit. */ #define P_SIGEVENT 0x200000 /* Process pending signals changed. */ #define P_SINGLE_BOUNDARY 0x400000 /* Threads should suspend at user boundary. */ #define P_HWPMC 0x800000 /* Process is using HWPMCs */ #define P_JAILED 0x1000000 /* Process is in jail. */ #define P_TOTAL_STOP 0x2000000 /* Stopped in stop_all_proc. */ #define P_INEXEC 0x4000000 /* Process is in execve(). */ #define P_STATCHILD 0x8000000 /* Child process stopped or exited. */ #define P_INMEM 0x10000000 /* Loaded into memory. */ #define P_SWAPPINGOUT 0x20000000 /* Process is being swapped out. */ #define P_SWAPPINGIN 0x40000000 /* Process is being swapped in. */ #define P_PPTRACE 0x80000000 /* PT_TRACEME by vforked child. */ #define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE) #define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED) #define P_KILLED(p) ((p)->p_flag & P_WKILLED) /* These flags are kept in p_flag2. */ #define P2_INHERIT_PROTECTED 0x00000001 /* New children get P_PROTECTED. */ #define P2_NOTRACE 0x00000002 /* No ptrace(2) attach or coredumps. */ #define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on exec(2). */ #define P2_AST_SU 0x00000008 /* Handles SU ast for kthreads. */ /* Flags protected by proctree_lock, kept in p_treeflags. */ #define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */ #define P_TREE_FIRST_ORPHAN 0x00000002 /* First element of orphan list */ #define P_TREE_REAPER 0x00000004 /* Reaper of subtree */ /* * These were process status values (p_stat), now they are only used in * legacy conversion code. */ #define SIDL 1 /* Process being created by fork. */ #define SRUN 2 /* Currently runnable. */ #define SSLEEP 3 /* Sleeping on an address. */ #define SSTOP 4 /* Process debugging or suspension. */ #define SZOMB 5 /* Awaiting collection by parent. */ #define SWAIT 6 /* Waiting for interrupt. */ #define SLOCK 7 /* Blocked on a lock. */ #define P_MAGIC 0xbeefface #ifdef _KERNEL /* Types and flags for mi_switch(). */ #define SW_TYPE_MASK 0xff /* First 8 bits are switch type */ #define SWT_NONE 0 /* Unspecified switch. */ #define SWT_PREEMPT 1 /* Switching due to preemption. */ #define SWT_OWEPREEMPT 2 /* Switching due to opepreempt. */ #define SWT_TURNSTILE 3 /* Turnstile contention. */ #define SWT_SLEEPQ 4 /* Sleepq wait. */ #define SWT_SLEEPQTIMO 5 /* Sleepq timeout wait. */ #define SWT_RELINQUISH 6 /* yield call. */ #define SWT_NEEDRESCHED 7 /* NEEDRESCHED was set. */ #define SWT_IDLE 8 /* Switching from the idle thread. */ #define SWT_IWAIT 9 /* Waiting for interrupts. */ #define SWT_SUSPEND 10 /* Thread suspended. */ #define SWT_REMOTEPREEMPT 11 /* Remote processor preempted. */ #define SWT_REMOTEWAKEIDLE 12 /* Remote processor preempted idle. */ #define SWT_COUNT 13 /* Number of switch types. */ /* Flags */ #define SW_VOL 0x0100 /* Voluntary switch. */ #define SW_INVOL 0x0200 /* Involuntary switch. */ #define SW_PREEMPT 0x0400 /* The invol switch is a preemption */ /* How values for thread_single(). */ #define SINGLE_NO_EXIT 0 #define SINGLE_EXIT 1 #define SINGLE_BOUNDARY 2 #define SINGLE_ALLPROC 3 #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_PARGS); MALLOC_DECLARE(M_PGRP); MALLOC_DECLARE(M_SESSION); MALLOC_DECLARE(M_SUBPROC); #endif #define FOREACH_PROC_IN_SYSTEM(p) \ LIST_FOREACH((p), &allproc, p_list) #define FOREACH_THREAD_IN_PROC(p, td) \ TAILQ_FOREACH((td), &(p)->p_threads, td_plist) #define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads) /* * We use process IDs <= pid_max <= PID_MAX; PID_MAX + 1 must also fit * in a pid_t, as it is used to represent "no process group". */ #define PID_MAX 99999 #define NO_PID 100000 extern pid_t pid_max; #define SESS_LEADER(p) ((p)->p_session->s_leader == (p)) #define STOPEVENT(p, e, v) do { \ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, \ "checking stopevent %d", (e)); \ if ((p)->p_stops & (e)) { \ PROC_LOCK(p); \ stopevent((p), (e), (v)); \ PROC_UNLOCK(p); \ } \ } while (0) #define _STOPEVENT(p, e, v) do { \ PROC_LOCK_ASSERT(p, MA_OWNED); \ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &p->p_mtx.lock_object, \ "checking stopevent %d", (e)); \ if ((p)->p_stops & (e)) \ stopevent((p), (e), (v)); \ } while (0) /* Lock and unlock a process. */ #define PROC_LOCK(p) mtx_lock(&(p)->p_mtx) #define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx) #define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx) #define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx) #define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type)) /* Lock and unlock a process group. */ #define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx) #define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx) #define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx) #define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type)) #define PGRP_LOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_LOCK(pg); \ } while (0) #define PGRP_UNLOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_UNLOCK(pg); \ } while (0) /* Lock and unlock a session. */ #define SESS_LOCK(s) mtx_lock(&(s)->s_mtx) #define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx) #define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx) #define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type)) /* Hold process U-area in memory, normally for ptrace/procfs work. */ #define PHOLD(p) do { \ PROC_LOCK(p); \ _PHOLD(p); \ PROC_UNLOCK(p); \ } while (0) #define _PHOLD(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ ("PHOLD of exiting process")); \ (p)->p_lock++; \ if (((p)->p_flag & P_INMEM) == 0) \ faultin((p)); \ } while (0) #define PROC_ASSERT_HELD(p) do { \ KASSERT((p)->p_lock > 0, ("process not held")); \ } while (0) #define PRELE(p) do { \ PROC_LOCK((p)); \ _PRELE((p)); \ PROC_UNLOCK((p)); \ } while (0) #define _PRELE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ PROC_ASSERT_HELD(p); \ (--(p)->p_lock); \ if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \ wakeup(&(p)->p_lock); \ } while (0) #define PROC_ASSERT_NOT_HELD(p) do { \ KASSERT((p)->p_lock == 0, ("process held")); \ } while (0) #define PROC_UPDATE_COW(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ (p)->p_cowgen++; \ } while (0) /* Check whether a thread is safe to be swapped out. */ #define thread_safetoswapout(td) ((td)->td_flags & TDF_CANSWAP) /* Control whether or not it is safe for curthread to sleep. */ #define THREAD_NO_SLEEPING() ((curthread)->td_no_sleeping++) #define THREAD_SLEEPING_OK() ((curthread)->td_no_sleeping--) #define THREAD_CAN_SLEEP() ((curthread)->td_no_sleeping == 0) #define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; extern u_long pidhash; #define TIDHASH(tid) (&tidhashtbl[(tid) & tidhash]) extern LIST_HEAD(tidhashhead, thread) *tidhashtbl; extern u_long tidhash; extern struct rwlock tidhash_lock; #define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; extern u_long pgrphash; extern struct sx allproc_lock; extern int allproc_gen; extern struct sx proctree_lock; extern struct mtx ppeers_lock; extern struct proc proc0; /* Process slot for swapper. */ extern struct thread thread0; /* Primary thread in proc0. */ extern struct vmspace vmspace0; /* VM space for proc0. */ extern int hogticks; /* Limit on kernel cpu hogs. */ extern int lastpid; extern int nprocs, maxproc; /* Current and max number of procs. */ extern int maxprocperuid; /* Max procs per uid. */ extern u_long ps_arg_cache_limit; LIST_HEAD(proclist, proc); TAILQ_HEAD(procqueue, proc); TAILQ_HEAD(threadqueue, thread); extern struct proclist allproc; /* List of all processes. */ extern struct proclist zombproc; /* List of zombie processes. */ extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */ extern struct uma_zone *proc_zone; struct proc *pfind(pid_t); /* Find process by id. */ struct proc *pfind_locked(pid_t pid); struct pgrp *pgfind(pid_t); /* Find process group by id. */ struct proc *zpfind(pid_t); /* Find zombie process by id. */ /* * pget() flags. */ #define PGET_HOLD 0x00001 /* Hold the process. */ #define PGET_CANSEE 0x00002 /* Check against p_cansee(). */ #define PGET_CANDEBUG 0x00004 /* Check against p_candebug(). */ #define PGET_ISCURRENT 0x00008 /* Check that the found process is current. */ #define PGET_NOTWEXIT 0x00010 /* Check that the process is not in P_WEXIT. */ #define PGET_NOTINEXEC 0x00020 /* Check that the process is not in P_INEXEC. */ #define PGET_NOTID 0x00040 /* Do not assume tid if pid > PID_MAX. */ #define PGET_WANTREAD (PGET_HOLD | PGET_CANDEBUG | PGET_NOTWEXIT) int pget(pid_t pid, int flags, struct proc **pp); void ast(struct trapframe *framep); struct thread *choosethread(void); int cr_cansignal(struct ucred *cred, struct proc *proc, int signum); int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess); int enterthispgrp(struct proc *p, struct pgrp *pgrp); void faultin(struct proc *p); void fixjobc(struct proc *p, struct pgrp *pgrp, int entering); int fork1(struct thread *, int, int, struct proc **, int *, int, struct filecaps *); void fork_exit(void (*)(void *, struct trapframe *), void *, struct trapframe *); void fork_return(struct thread *, struct trapframe *); int inferior(struct proc *p); void kern_yield(int); void kick_proc0(void); int leavepgrp(struct proc *p); int maybe_preempt(struct thread *td); void maybe_yield(void); void mi_switch(int flags, struct thread *newtd); int p_candebug(struct thread *td, struct proc *p); int p_cansee(struct thread *td, struct proc *p); int p_cansched(struct thread *td, struct proc *p); int p_cansignal(struct thread *td, struct proc *p, int signum); int p_canwait(struct thread *td, struct proc *p); struct pargs *pargs_alloc(int len); void pargs_drop(struct pargs *pa); void pargs_hold(struct pargs *pa); int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb); void procinit(void); void proc_linkup0(struct proc *p, struct thread *td); void proc_linkup(struct proc *p, struct thread *td); struct proc *proc_realparent(struct proc *child); void proc_reap(struct thread *td, struct proc *p, int *status, int options); void proc_reparent(struct proc *child, struct proc *newparent); struct pstats *pstats_alloc(void); void pstats_fork(struct pstats *src, struct pstats *dst); void pstats_free(struct pstats *ps); void reaper_abandon_children(struct proc *p, bool exiting); int securelevel_ge(struct ucred *cr, int level); int securelevel_gt(struct ucred *cr, int level); void sess_hold(struct session *); void sess_release(struct session *); int setrunnable(struct thread *); void setsugid(struct proc *p); int should_yield(void); int sigonstack(size_t sp); void stopevent(struct proc *, u_int, u_int); struct thread *tdfind(lwpid_t, pid_t); void threadinit(void); void tidhash_add(struct thread *); void tidhash_remove(struct thread *); void cpu_idle(int); int cpu_idle_wakeup(int); extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */ void cpu_switch(struct thread *, struct thread *, struct mtx *); void cpu_throw(struct thread *, struct thread *) __dead2; void unsleep(struct thread *); void userret(struct thread *, struct trapframe *); void cpu_exit(struct thread *); void exit1(struct thread *, int, int) __dead2; int cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa); void cpu_fork(struct thread *, struct proc *, struct thread *, int); void cpu_set_fork_handler(struct thread *, void (*)(void *), void *); void cpu_set_syscall_retval(struct thread *, int); void cpu_set_upcall(struct thread *td, struct thread *td0); void cpu_set_upcall_kse(struct thread *, void (*)(void *), void *, stack_t *); int cpu_set_user_tls(struct thread *, void *tls_base); void cpu_thread_alloc(struct thread *); void cpu_thread_clean(struct thread *); void cpu_thread_exit(struct thread *); void cpu_thread_free(struct thread *); void cpu_thread_swapin(struct thread *); void cpu_thread_swapout(struct thread *); struct thread *thread_alloc(int pages); int thread_alloc_stack(struct thread *, int pages); void thread_cow_get_proc(struct thread *newtd, struct proc *p); void thread_cow_get(struct thread *newtd, struct thread *td); void thread_cow_free(struct thread *td); void thread_cow_update(struct thread *td); int thread_create(struct thread *td, struct rtprio *rtp, int (*initialize_thread)(struct thread *, void *), void *thunk); void thread_exit(void) __dead2; void thread_free(struct thread *td); void thread_link(struct thread *td, struct proc *p); void thread_reap(void); int thread_single(struct proc *p, int how); void thread_single_end(struct proc *p, int how); void thread_stash(struct thread *td); void thread_stopped(struct proc *p); void childproc_stopped(struct proc *child, int reason); void childproc_continued(struct proc *child); void childproc_exited(struct proc *child); int thread_suspend_check(int how); bool thread_suspend_check_needed(void); void thread_suspend_switch(struct thread *, struct proc *p); void thread_suspend_one(struct thread *td); void thread_unlink(struct thread *td); void thread_unsuspend(struct proc *p); void thread_wait(struct proc *p); struct thread *thread_find(struct proc *p, lwpid_t tid); void stop_all_proc(void); void resume_all_proc(void); static __inline int curthread_pflags_set(int flags) { struct thread *td; int save; td = curthread; save = ~flags | (td->td_pflags & flags); td->td_pflags |= flags; return (save); } static __inline void curthread_pflags_restore(int save) { curthread->td_pflags &= save; } #endif /* _KERNEL */ #endif /* !_SYS_PROC_H_ */ Index: user/ngie/more-tests2/sys/sys/user.h =================================================================== --- user/ngie/more-tests2/sys/sys/user.h (revision 288954) +++ user/ngie/more-tests2/sys/sys/user.h (revision 288955) @@ -1,564 +1,568 @@ /*- * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. * Copyright (c) 2007 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)user.h 8.2 (Berkeley) 9/23/93 * $FreeBSD$ */ #ifndef _SYS_USER_H_ #define _SYS_USER_H_ #include #ifndef _KERNEL /* stuff that *used* to be included by user.h, or is now needed */ #include #include #include #include #include #include #include #include #include #include /* XXX */ #include /* XXX */ #include /* XXX */ #include /* XXX */ #endif /* !_KERNEL */ #ifndef _SYS_RESOURCEVAR_H_ #include #endif #ifndef _SYS_SIGNALVAR_H_ #include #endif #ifndef _SYS_SOCKET_VAR_H_ #include #endif #include /* * KERN_PROC subtype ops return arrays of selected proc structure entries: * * This struct includes several arrays of spare space, with different arrays * for different standard C-types. When adding new variables to this struct, * the space for byte-aligned data should be taken from the ki_sparestring, * pointers from ki_spareptrs, word-aligned data from ki_spareints, and * doubleword-aligned data from ki_sparelongs. Make sure the space for new * variables come from the array which matches the size and alignment of * those variables on ALL hardware platforms, and then adjust the appropriate * KI_NSPARE_* value(s) to match. * * Always verify that sizeof(struct kinfo_proc) == KINFO_PROC_SIZE on all * platforms after you have added new variables. Note that if you change * the value of KINFO_PROC_SIZE, then many userland programs will stop * working until they are recompiled! * * Once you have added the new field, you will need to add code to initialize * it in two places: function fill_kinfo_proc in sys/kern/kern_proc.c and * function kvm_proclist in lib/libkvm/kvm_proc.c . */ #define KI_NSPARE_INT 4 #define KI_NSPARE_LONG 12 #define KI_NSPARE_PTR 6 #ifndef _KERNEL #ifndef KINFO_PROC_SIZE #error "Unknown architecture" #endif #endif /* !_KERNEL */ #define WMESGLEN 8 /* size of returned wchan message */ #define LOCKNAMELEN 8 /* size of returned lock name */ #define TDNAMLEN 16 /* size of returned thread name */ #define COMMLEN 19 /* size of returned ki_comm name */ #define KI_EMULNAMELEN 16 /* size of returned ki_emul */ #define KI_NGROUPS 16 /* number of groups in ki_groups */ #define LOGNAMELEN 17 /* size of returned ki_login */ #define LOGINCLASSLEN 17 /* size of returned ki_loginclass */ #ifndef BURN_BRIDGES #define OCOMMLEN TDNAMLEN #define ki_ocomm ki_tdname #endif /* Flags for the process credential. */ #define KI_CRF_CAPABILITY_MODE 0x00000001 /* * Steal a bit from ki_cr_flags to indicate that the cred had more than * KI_NGROUPS groups. */ #define KI_CRF_GRP_OVERFLOW 0x80000000 struct kinfo_proc { int ki_structsize; /* size of this structure */ int ki_layout; /* reserved: layout identifier */ struct pargs *ki_args; /* address of command arguments */ struct proc *ki_paddr; /* address of proc */ struct user *ki_addr; /* kernel virtual addr of u-area */ struct vnode *ki_tracep; /* pointer to trace file */ struct vnode *ki_textvp; /* pointer to executable file */ struct filedesc *ki_fd; /* pointer to open file info */ struct vmspace *ki_vmspace; /* pointer to kernel vmspace struct */ void *ki_wchan; /* sleep address */ pid_t ki_pid; /* Process identifier */ pid_t ki_ppid; /* parent process id */ pid_t ki_pgid; /* process group id */ pid_t ki_tpgid; /* tty process group id */ pid_t ki_sid; /* Process session ID */ pid_t ki_tsid; /* Terminal session ID */ short ki_jobc; /* job control counter */ short ki_spare_short1; /* unused (just here for alignment) */ dev_t ki_tdev; /* controlling tty dev */ sigset_t ki_siglist; /* Signals arrived but not delivered */ sigset_t ki_sigmask; /* Current signal mask */ sigset_t ki_sigignore; /* Signals being ignored */ sigset_t ki_sigcatch; /* Signals being caught by user */ uid_t ki_uid; /* effective user id */ uid_t ki_ruid; /* Real user id */ uid_t ki_svuid; /* Saved effective user id */ gid_t ki_rgid; /* Real group id */ gid_t ki_svgid; /* Saved effective group id */ short ki_ngroups; /* number of groups */ short ki_spare_short2; /* unused (just here for alignment) */ gid_t ki_groups[KI_NGROUPS]; /* groups */ vm_size_t ki_size; /* virtual size */ segsz_t ki_rssize; /* current resident set size in pages */ segsz_t ki_swrss; /* resident set size before last swap */ segsz_t ki_tsize; /* text size (pages) XXX */ segsz_t ki_dsize; /* data size (pages) XXX */ segsz_t ki_ssize; /* stack size (pages) */ u_short ki_xstat; /* Exit status for wait & stop signal */ u_short ki_acflag; /* Accounting flags */ fixpt_t ki_pctcpu; /* %cpu for process during ki_swtime */ u_int ki_estcpu; /* Time averaged value of ki_cpticks */ u_int ki_slptime; /* Time since last blocked */ u_int ki_swtime; /* Time swapped in or out */ u_int ki_cow; /* number of copy-on-write faults */ u_int64_t ki_runtime; /* Real time in microsec */ struct timeval ki_start; /* starting time */ struct timeval ki_childtime; /* time used by process children */ long ki_flag; /* P_* flags */ long ki_kiflag; /* KI_* flags (below) */ int ki_traceflag; /* Kernel trace points */ char ki_stat; /* S* process status */ signed char ki_nice; /* Process "nice" value */ char ki_lock; /* Process lock (prevent swap) count */ char ki_rqindex; /* Run queue index */ u_char ki_oncpu_old; /* Which cpu we are on (legacy) */ u_char ki_lastcpu_old; /* Last cpu we were on (legacy) */ char ki_tdname[TDNAMLEN+1]; /* thread name */ char ki_wmesg[WMESGLEN+1]; /* wchan message */ char ki_login[LOGNAMELEN+1]; /* setlogin name */ char ki_lockname[LOCKNAMELEN+1]; /* lock name */ char ki_comm[COMMLEN+1]; /* command name */ char ki_emul[KI_EMULNAMELEN+1]; /* emulation name */ char ki_loginclass[LOGINCLASSLEN+1]; /* login class */ /* * When adding new variables, take space for char-strings from the * front of ki_sparestrings, and ints from the end of ki_spareints. * That way the spare room from both arrays will remain contiguous. */ char ki_sparestrings[50]; /* spare string space */ int ki_spareints[KI_NSPARE_INT]; /* spare room for growth */ int ki_oncpu; /* Which cpu we are on */ int ki_lastcpu; /* Last cpu we were on */ int ki_tracer; /* Pid of tracing process */ int ki_flag2; /* P2_* flags */ int ki_fibnum; /* Default FIB number */ u_int ki_cr_flags; /* Credential flags */ int ki_jid; /* Process jail ID */ int ki_numthreads; /* XXXKSE number of threads in total */ lwpid_t ki_tid; /* XXXKSE thread id */ struct priority ki_pri; /* process priority */ struct rusage ki_rusage; /* process rusage statistics */ /* XXX - most fields in ki_rusage_ch are not (yet) filled in */ struct rusage ki_rusage_ch; /* rusage of children processes */ struct pcb *ki_pcb; /* kernel virtual addr of pcb */ void *ki_kstack; /* kernel virtual addr of stack */ void *ki_udata; /* User convenience pointer */ struct thread *ki_tdaddr; /* address of thread */ /* * When adding new variables, take space for pointers from the * front of ki_spareptrs, and longs from the end of ki_sparelongs. * That way the spare room from both arrays will remain contiguous. */ void *ki_spareptrs[KI_NSPARE_PTR]; /* spare room for growth */ long ki_sparelongs[KI_NSPARE_LONG]; /* spare room for growth */ long ki_sflag; /* PS_* flags */ long ki_tdflags; /* XXXKSE kthread flag */ }; void fill_kinfo_proc(struct proc *, struct kinfo_proc *); /* XXX - the following two defines are temporary */ #define ki_childstime ki_rusage_ch.ru_stime #define ki_childutime ki_rusage_ch.ru_utime /* * Legacy PS_ flag. This moved to p_flag but is maintained for * compatibility. */ #define PS_INMEM 0x00001 /* Loaded into memory. */ /* ki_sessflag values */ #define KI_CTTY 0x00000001 /* controlling tty vnode active */ #define KI_SLEADER 0x00000002 /* session leader */ #define KI_LOCKBLOCK 0x00000004 /* proc blocked on lock ki_lockname */ /* * This used to be the per-process structure containing data that * isn't needed in core when the process is swapped out, but now it * remains only for the benefit of a.out core dumps. */ struct user { struct pstats u_stats; /* *p_stats */ struct kinfo_proc u_kproc; /* eproc */ }; /* * The KERN_PROC_FILE sysctl allows a process to dump the file descriptor * array of another process. */ #define KF_ATTR_VALID 0x0001 #define KF_TYPE_NONE 0 #define KF_TYPE_VNODE 1 #define KF_TYPE_SOCKET 2 #define KF_TYPE_PIPE 3 #define KF_TYPE_FIFO 4 #define KF_TYPE_KQUEUE 5 #define KF_TYPE_CRYPTO 6 #define KF_TYPE_MQUEUE 7 #define KF_TYPE_SHM 8 #define KF_TYPE_SEM 9 #define KF_TYPE_PTS 10 #define KF_TYPE_PROCDESC 11 #define KF_TYPE_UNKNOWN 255 #define KF_VTYPE_VNON 0 #define KF_VTYPE_VREG 1 #define KF_VTYPE_VDIR 2 #define KF_VTYPE_VBLK 3 #define KF_VTYPE_VCHR 4 #define KF_VTYPE_VLNK 5 #define KF_VTYPE_VSOCK 6 #define KF_VTYPE_VFIFO 7 #define KF_VTYPE_VBAD 8 #define KF_VTYPE_UNKNOWN 255 #define KF_FD_TYPE_CWD -1 /* Current working directory */ #define KF_FD_TYPE_ROOT -2 /* Root directory */ #define KF_FD_TYPE_JAIL -3 /* Jail directory */ #define KF_FD_TYPE_TRACE -4 /* Ktrace vnode */ #define KF_FD_TYPE_TEXT -5 /* Text vnode */ #define KF_FD_TYPE_CTTY -6 /* Controlling terminal */ #define KF_FLAG_READ 0x00000001 #define KF_FLAG_WRITE 0x00000002 #define KF_FLAG_APPEND 0x00000004 #define KF_FLAG_ASYNC 0x00000008 #define KF_FLAG_FSYNC 0x00000010 #define KF_FLAG_NONBLOCK 0x00000020 #define KF_FLAG_DIRECT 0x00000040 #define KF_FLAG_HASLOCK 0x00000080 #define KF_FLAG_SHLOCK 0x00000100 #define KF_FLAG_EXLOCK 0x00000200 #define KF_FLAG_NOFOLLOW 0x00000400 #define KF_FLAG_CREAT 0x00000800 #define KF_FLAG_TRUNC 0x00001000 #define KF_FLAG_EXCL 0x00002000 #define KF_FLAG_EXEC 0x00004000 /* * Old format. Has variable hidden padding due to alignment. * This is a compatability hack for pre-build 7.1 packages. */ #if defined(__amd64__) #define KINFO_OFILE_SIZE 1328 #endif #if defined(__i386__) #define KINFO_OFILE_SIZE 1324 #endif struct kinfo_ofile { int kf_structsize; /* Size of kinfo_file. */ int kf_type; /* Descriptor type. */ int kf_fd; /* Array index. */ int kf_ref_count; /* Reference count. */ int kf_flags; /* Flags. */ /* XXX Hidden alignment padding here on amd64 */ off_t kf_offset; /* Seek location. */ int kf_vnode_type; /* Vnode type. */ int kf_sock_domain; /* Socket domain. */ int kf_sock_type; /* Socket type. */ int kf_sock_protocol; /* Socket protocol. */ char kf_path[PATH_MAX]; /* Path to file, if any. */ struct sockaddr_storage kf_sa_local; /* Socket address. */ struct sockaddr_storage kf_sa_peer; /* Peer address. */ }; #if defined(__amd64__) || defined(__i386__) /* * This size should never be changed. If you really need to, you must provide * backward ABI compatibility by allocating a new sysctl MIB that will return * the new structure. The current structure has to be returned by the current * sysctl MIB. See how it is done for the kinfo_ofile structure. */ #define KINFO_FILE_SIZE 1392 #endif struct kinfo_file { int kf_structsize; /* Variable size of record. */ int kf_type; /* Descriptor type. */ int kf_fd; /* Array index. */ int kf_ref_count; /* Reference count. */ int kf_flags; /* Flags. */ int kf_pad0; /* Round to 64 bit alignment. */ int64_t kf_offset; /* Seek location. */ int kf_vnode_type; /* Vnode type. */ int kf_sock_domain; /* Socket domain. */ int kf_sock_type; /* Socket type. */ int kf_sock_protocol; /* Socket protocol. */ struct sockaddr_storage kf_sa_local; /* Socket address. */ struct sockaddr_storage kf_sa_peer; /* Peer address. */ union { struct { /* Address of so_pcb. */ uint64_t kf_sock_pcb; /* Address of inp_ppcb. */ uint64_t kf_sock_inpcb; /* Address of unp_conn. */ uint64_t kf_sock_unpconn; /* Send buffer state. */ uint16_t kf_sock_snd_sb_state; /* Receive buffer state. */ uint16_t kf_sock_rcv_sb_state; /* Round to 64 bit alignment. */ uint32_t kf_sock_pad0; } kf_sock; struct { /* Global file id. */ uint64_t kf_file_fileid; /* File size. */ uint64_t kf_file_size; /* Vnode filesystem id. */ uint32_t kf_file_fsid; /* File device. */ uint32_t kf_file_rdev; /* File mode. */ uint16_t kf_file_mode; /* Round to 64 bit alignment. */ uint16_t kf_file_pad0; uint32_t kf_file_pad1; } kf_file; struct { uint32_t kf_sem_value; uint16_t kf_sem_mode; } kf_sem; struct { uint64_t kf_pipe_addr; uint64_t kf_pipe_peer; uint32_t kf_pipe_buffer_cnt; /* Round to 64 bit alignment. */ uint32_t kf_pipe_pad0[3]; } kf_pipe; struct { uint32_t kf_pts_dev; /* Round to 64 bit alignment. */ uint32_t kf_pts_pad0[7]; } kf_pts; struct { pid_t kf_pid; } kf_proc; } kf_un; uint16_t kf_status; /* Status flags. */ uint16_t kf_pad1; /* Round to 32 bit alignment. */ int _kf_ispare0; /* Space for more stuff. */ cap_rights_t kf_cap_rights; /* Capability rights. */ uint64_t _kf_cap_spare; /* Space for future cap_rights_t. */ /* Truncated before copyout in sysctl */ char kf_path[PATH_MAX]; /* Path to file, if any. */ }; /* * The KERN_PROC_VMMAP sysctl allows a process to dump the VM layout of * another process as a series of entries. */ #define KVME_TYPE_NONE 0 #define KVME_TYPE_DEFAULT 1 #define KVME_TYPE_VNODE 2 #define KVME_TYPE_SWAP 3 #define KVME_TYPE_DEVICE 4 #define KVME_TYPE_PHYS 5 #define KVME_TYPE_DEAD 6 #define KVME_TYPE_SG 7 #define KVME_TYPE_MGTDEVICE 8 #define KVME_TYPE_UNKNOWN 255 #define KVME_PROT_READ 0x00000001 #define KVME_PROT_WRITE 0x00000002 #define KVME_PROT_EXEC 0x00000004 #define KVME_FLAG_COW 0x00000001 #define KVME_FLAG_NEEDS_COPY 0x00000002 #define KVME_FLAG_NOCOREDUMP 0x00000004 #define KVME_FLAG_SUPER 0x00000008 #define KVME_FLAG_GROWS_UP 0x00000010 #define KVME_FLAG_GROWS_DOWN 0x00000020 #if defined(__amd64__) #define KINFO_OVMENTRY_SIZE 1168 #endif #if defined(__i386__) #define KINFO_OVMENTRY_SIZE 1128 #endif struct kinfo_ovmentry { int kve_structsize; /* Size of kinfo_vmmapentry. */ int kve_type; /* Type of map entry. */ void *kve_start; /* Starting address. */ void *kve_end; /* Finishing address. */ int kve_flags; /* Flags on map entry. */ int kve_resident; /* Number of resident pages. */ int kve_private_resident; /* Number of private pages. */ int kve_protection; /* Protection bitmask. */ int kve_ref_count; /* VM obj ref count. */ int kve_shadow_count; /* VM obj shadow count. */ char kve_path[PATH_MAX]; /* Path to VM obj, if any. */ void *_kve_pspare[8]; /* Space for more stuff. */ off_t kve_offset; /* Mapping offset in object */ uint64_t kve_fileid; /* inode number if vnode */ dev_t kve_fsid; /* dev_t of vnode location */ int _kve_ispare[3]; /* Space for more stuff. */ }; #if defined(__amd64__) || defined(__i386__) #define KINFO_VMENTRY_SIZE 1160 #endif struct kinfo_vmentry { int kve_structsize; /* Variable size of record. */ int kve_type; /* Type of map entry. */ uint64_t kve_start; /* Starting address. */ uint64_t kve_end; /* Finishing address. */ uint64_t kve_offset; /* Mapping offset in object */ uint64_t kve_vn_fileid; /* inode number if vnode */ uint32_t kve_vn_fsid; /* dev_t of vnode location */ int kve_flags; /* Flags on map entry. */ int kve_resident; /* Number of resident pages. */ int kve_private_resident; /* Number of private pages. */ int kve_protection; /* Protection bitmask. */ int kve_ref_count; /* VM obj ref count. */ int kve_shadow_count; /* VM obj shadow count. */ int kve_vn_type; /* Vnode type. */ uint64_t kve_vn_size; /* File size. */ uint32_t kve_vn_rdev; /* Device id if device. */ uint16_t kve_vn_mode; /* File mode. */ uint16_t kve_status; /* Status flags. */ int _kve_ispare[12]; /* Space for more stuff. */ /* Truncated before copyout in sysctl */ char kve_path[PATH_MAX]; /* Path to VM obj, if any. */ }; /* * The "vm.objects" sysctl provides a list of all VM objects in the system * via an array of these entries. */ struct kinfo_vmobject { int kvo_structsize; /* Variable size of record. */ int kvo_type; /* Object type: KVME_TYPE_*. */ uint64_t kvo_size; /* Object size in pages. */ uint64_t kvo_vn_fileid; /* inode number if vnode. */ uint32_t kvo_vn_fsid; /* dev_t of vnode location. */ int kvo_ref_count; /* Reference count. */ int kvo_shadow_count; /* Shadow count. */ int kvo_memattr; /* Memory attribute. */ uint64_t kvo_resident; /* Number of resident pages. */ uint64_t kvo_active; /* Number of active pages. */ uint64_t kvo_inactive; /* Number of inactive pages. */ uint64_t _kvo_qspare[8]; uint32_t _kvo_ispare[8]; char kvo_path[PATH_MAX]; /* Pathname, if any. */ }; /* * The KERN_PROC_KSTACK sysctl allows a process to dump the kernel stacks of * another process as a series of entries. Each stack is represented by a * series of symbol names and offsets as generated by stack_sbuf_print(9). */ #define KKST_MAXLEN 1024 #define KKST_STATE_STACKOK 0 /* Stack is valid. */ #define KKST_STATE_SWAPPED 1 /* Stack swapped out. */ #define KKST_STATE_RUNNING 2 /* Stack ephemeral. */ #if defined(__amd64__) || defined(__i386__) #define KINFO_KSTACK_SIZE 1096 #endif struct kinfo_kstack { lwpid_t kkst_tid; /* ID of thread. */ int kkst_state; /* Validity of stack. */ char kkst_trace[KKST_MAXLEN]; /* String representing stack. */ int _kkst_ispare[16]; /* Space for more stuff. */ }; struct kinfo_sigtramp { void *ksigtramp_start; void *ksigtramp_end; void *ksigtramp_spare[4]; }; #ifdef _KERNEL /* Flags for kern_proc_out function. */ #define KERN_PROC_NOTHREADS 0x1 #define KERN_PROC_MASK32 0x2 /* Flags for kern_proc_filedesc_out. */ #define KERN_FILEDESC_PACK_KINFO 0x00000001U + +/* Flags for kern_proc_vmmap_out. */ +#define KERN_VMMAP_PACK_KINFO 0x00000001U struct sbuf; /* * The kern_proc out functions are helper functions to dump process * miscellaneous kinfo structures to sbuf. The main consumers are KERN_PROC * sysctls but they may also be used by other kernel subsystems. * * The functions manipulate the process locking state and expect the process * to be locked on enter. On return the process is unlocked. */ int kern_proc_filedesc_out(struct proc *p, struct sbuf *sb, ssize_t maxlen, int flags); int kern_proc_cwd_out(struct proc *p, struct sbuf *sb, ssize_t maxlen); int kern_proc_out(struct proc *p, struct sbuf *sb, int flags); -int kern_proc_vmmap_out(struct proc *p, struct sbuf *sb); +int kern_proc_vmmap_out(struct proc *p, struct sbuf *sb, ssize_t maxlen, + int flags); int vntype_to_kinfo(int vtype); #endif /* !_KERNEL */ #endif Index: user/ngie/more-tests2/sys =================================================================== --- user/ngie/more-tests2/sys (revision 288954) +++ user/ngie/more-tests2/sys (revision 288955) Property changes on: user/ngie/more-tests2/sys ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys:r288944-288954 Index: user/ngie/more-tests2/tests/sys/kern/Makefile =================================================================== --- user/ngie/more-tests2/tests/sys/kern/Makefile (revision 288954) +++ user/ngie/more-tests2/tests/sys/kern/Makefile (revision 288955) @@ -1,16 +1,17 @@ # $FreeBSD$ TESTSDIR= ${TESTSBASE}/sys/kern ATF_TESTS_C+= kern_descrip_test ATF_TESTS_C+= ptrace_test ATF_TESTS_C+= unix_seqpacket_test TEST_METADATA.unix_seqpacket_test+= timeout="15" +LDADD.ptrace_test+= -lpthread LDADD.unix_seqpacket_test+= -lpthread WARNS?= 5 TESTS_SUBDIRS+= execve .include Index: user/ngie/more-tests2/tests/sys/kern/ptrace_test.c =================================================================== --- user/ngie/more-tests2/tests/sys/kern/ptrace_test.c (revision 288954) +++ user/ngie/more-tests2/tests/sys/kern/ptrace_test.c (revision 288955) @@ -1,973 +1,1202 @@ /*- * Copyright (c) 2015 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include #include +#include #include #include #include #include #include /* * A variant of ATF_REQUIRE that is suitable for use in child * processes. This only works if the parent process is tripped up by * the early exit and fails some requirement itself. */ #define CHILD_REQUIRE(exp) do { \ if (!(exp)) \ child_fail_require(__FILE__, __LINE__, \ #exp " not met"); \ } while (0) static __dead2 void child_fail_require(const char *file, int line, const char *str) { char buf[128]; snprintf(buf, sizeof(buf), "%s:%d: %s\n", file, line, str); write(2, buf, strlen(buf)); _exit(32); } static void trace_me(void) { /* Attach the parent process as a tracer of this process. */ CHILD_REQUIRE(ptrace(PT_TRACE_ME, 0, NULL, 0) != -1); /* Trigger a stop. */ raise(SIGSTOP); } static void attach_child(pid_t pid) { pid_t wpid; int status; ATF_REQUIRE(ptrace(PT_ATTACH, pid, NULL, 0) == 0); wpid = waitpid(pid, &status, 0); ATF_REQUIRE(wpid == pid); ATF_REQUIRE(WIFSTOPPED(status)); ATF_REQUIRE(WSTOPSIG(status) == SIGSTOP); } static void wait_for_zombie(pid_t pid) { /* * Wait for a process to exit. This is kind of gross, but * there is not a better way. */ for (;;) { struct kinfo_proc kp; size_t len; int mib[4]; mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = KERN_PROC_PID; mib[3] = pid; len = sizeof(kp); if (sysctl(mib, nitems(mib), &kp, &len, NULL, 0) == -1) { /* The KERN_PROC_PID sysctl fails for zombies. */ ATF_REQUIRE(errno == ESRCH); break; } usleep(5000); } } /* * Verify that a parent debugger process "sees" the exit of a debugged * process exactly once when attached via PT_TRACE_ME. */ ATF_TC_WITHOUT_HEAD(ptrace__parent_wait_after_trace_me); ATF_TC_BODY(ptrace__parent_wait_after_trace_me, tc) { pid_t child, wpid; int status; ATF_REQUIRE((child = fork()) != -1); if (child == 0) { /* Child process. */ trace_me(); _exit(1); } /* Parent process. */ /* The first wait() should report the stop from SIGSTOP. */ wpid = waitpid(child, &status, 0); ATF_REQUIRE(wpid == child); ATF_REQUIRE(WIFSTOPPED(status)); ATF_REQUIRE(WSTOPSIG(status) == SIGSTOP); /* Continue the child ignoring the SIGSTOP. */ ATF_REQUIRE(ptrace(PT_CONTINUE, child, (caddr_t)1, 0) != -1); /* The second wait() should report the exit status. */ wpid = waitpid(child, &status, 0); ATF_REQUIRE(wpid == child); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 1); /* The child should no longer exist. */ wpid = waitpid(child, &status, 0); ATF_REQUIRE(wpid == -1); ATF_REQUIRE(errno == ECHILD); } /* * Verify that a parent debugger process "sees" the exit of a debugged * process exactly once when attached via PT_ATTACH. */ ATF_TC_WITHOUT_HEAD(ptrace__parent_wait_after_attach); ATF_TC_BODY(ptrace__parent_wait_after_attach, tc) { pid_t child, wpid; int cpipe[2], status; char c; ATF_REQUIRE(pipe(cpipe) == 0); ATF_REQUIRE((child = fork()) != -1); if (child == 0) { /* Child process. */ close(cpipe[0]); /* Wait for the parent to attach. */ CHILD_REQUIRE(read(cpipe[1], &c, sizeof(c)) == 0); _exit(1); } close(cpipe[1]); /* Parent process. */ /* Attach to the child process. */ attach_child(child); /* Continue the child ignoring the SIGSTOP. */ ATF_REQUIRE(ptrace(PT_CONTINUE, child, (caddr_t)1, 0) != -1); /* Signal the child to exit. */ close(cpipe[0]); /* The second wait() should report the exit status. */ wpid = waitpid(child, &status, 0); ATF_REQUIRE(wpid == child); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 1); /* The child should no longer exist. */ wpid = waitpid(child, &status, 0); ATF_REQUIRE(wpid == -1); ATF_REQUIRE(errno == ECHILD); } /* * Verify that a parent process "sees" the exit of a debugged process only * after the debugger has seen it. */ ATF_TC_WITHOUT_HEAD(ptrace__parent_sees_exit_after_child_debugger); ATF_TC_BODY(ptrace__parent_sees_exit_after_child_debugger, tc) { pid_t child, debugger, wpid; int cpipe[2], dpipe[2], status; char c; ATF_REQUIRE(pipe(cpipe) == 0); ATF_REQUIRE((child = fork()) != -1); if (child == 0) { /* Child process. */ close(cpipe[0]); /* Wait for parent to be ready. */ CHILD_REQUIRE(read(cpipe[1], &c, sizeof(c)) == sizeof(c)); _exit(1); } close(cpipe[1]); ATF_REQUIRE(pipe(dpipe) == 0); ATF_REQUIRE((debugger = fork()) != -1); if (debugger == 0) { /* Debugger process. */ close(dpipe[0]); CHILD_REQUIRE(ptrace(PT_ATTACH, child, NULL, 0) != -1); wpid = waitpid(child, &status, 0); CHILD_REQUIRE(wpid == child); CHILD_REQUIRE(WIFSTOPPED(status)); CHILD_REQUIRE(WSTOPSIG(status) == SIGSTOP); CHILD_REQUIRE(ptrace(PT_CONTINUE, child, (caddr_t)1, 0) != -1); /* Signal parent that debugger is attached. */ CHILD_REQUIRE(write(dpipe[1], &c, sizeof(c)) == sizeof(c)); /* Wait for parent's failed wait. */ CHILD_REQUIRE(read(dpipe[1], &c, sizeof(c)) == 0); wpid = waitpid(child, &status, 0); CHILD_REQUIRE(wpid == child); CHILD_REQUIRE(WIFEXITED(status)); CHILD_REQUIRE(WEXITSTATUS(status) == 1); _exit(0); } close(dpipe[1]); /* Parent process. */ /* Wait for the debugger to attach to the child. */ ATF_REQUIRE(read(dpipe[0], &c, sizeof(c)) == sizeof(c)); /* Release the child. */ ATF_REQUIRE(write(cpipe[0], &c, sizeof(c)) == sizeof(c)); ATF_REQUIRE(read(cpipe[0], &c, sizeof(c)) == 0); close(cpipe[0]); wait_for_zombie(child); /* * This wait should return a pid of 0 to indicate no status to * report. The parent should see the child as non-exited * until the debugger sees the exit. */ wpid = waitpid(child, &status, WNOHANG); ATF_REQUIRE(wpid == 0); /* Signal the debugger to wait for the child. */ close(dpipe[0]); /* Wait for the debugger. */ wpid = waitpid(debugger, &status, 0); ATF_REQUIRE(wpid == debugger); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 0); /* The child process should now be ready. */ wpid = waitpid(child, &status, WNOHANG); ATF_REQUIRE(wpid == child); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 1); } /* * Verify that a parent process "sees" the exit of a debugged process * only after a non-direct-child debugger has seen it. In particular, * various wait() calls in the parent must avoid failing with ESRCH by * checking the parent's orphan list for the debugee. */ ATF_TC_WITHOUT_HEAD(ptrace__parent_sees_exit_after_unrelated_debugger); ATF_TC_BODY(ptrace__parent_sees_exit_after_unrelated_debugger, tc) { pid_t child, debugger, fpid, wpid; int cpipe[2], dpipe[2], status; char c; ATF_REQUIRE(pipe(cpipe) == 0); ATF_REQUIRE((child = fork()) != -1); if (child == 0) { /* Child process. */ close(cpipe[0]); /* Wait for parent to be ready. */ CHILD_REQUIRE(read(cpipe[1], &c, sizeof(c)) == sizeof(c)); _exit(1); } close(cpipe[1]); ATF_REQUIRE(pipe(dpipe) == 0); ATF_REQUIRE((debugger = fork()) != -1); if (debugger == 0) { /* Debugger parent. */ /* * Fork again and drop the debugger parent so that the * debugger is not a child of the main parent. */ CHILD_REQUIRE((fpid = fork()) != -1); if (fpid != 0) _exit(2); /* Debugger process. */ close(dpipe[0]); CHILD_REQUIRE(ptrace(PT_ATTACH, child, NULL, 0) != -1); wpid = waitpid(child, &status, 0); CHILD_REQUIRE(wpid == child); CHILD_REQUIRE(WIFSTOPPED(status)); CHILD_REQUIRE(WSTOPSIG(status) == SIGSTOP); CHILD_REQUIRE(ptrace(PT_CONTINUE, child, (caddr_t)1, 0) != -1); /* Signal parent that debugger is attached. */ CHILD_REQUIRE(write(dpipe[1], &c, sizeof(c)) == sizeof(c)); /* Wait for parent's failed wait. */ CHILD_REQUIRE(read(dpipe[1], &c, sizeof(c)) == sizeof(c)); wpid = waitpid(child, &status, 0); CHILD_REQUIRE(wpid == child); CHILD_REQUIRE(WIFEXITED(status)); CHILD_REQUIRE(WEXITSTATUS(status) == 1); _exit(0); } close(dpipe[1]); /* Parent process. */ /* Wait for the debugger parent process to exit. */ wpid = waitpid(debugger, &status, 0); ATF_REQUIRE(wpid == debugger); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 2); /* A WNOHANG wait here should see the non-exited child. */ wpid = waitpid(child, &status, WNOHANG); ATF_REQUIRE(wpid == 0); /* Wait for the debugger to attach to the child. */ ATF_REQUIRE(read(dpipe[0], &c, sizeof(c)) == sizeof(c)); /* Release the child. */ ATF_REQUIRE(write(cpipe[0], &c, sizeof(c)) == sizeof(c)); ATF_REQUIRE(read(cpipe[0], &c, sizeof(c)) == 0); close(cpipe[0]); wait_for_zombie(child); /* * This wait should return a pid of 0 to indicate no status to * report. The parent should see the child as non-exited * until the debugger sees the exit. */ wpid = waitpid(child, &status, WNOHANG); ATF_REQUIRE(wpid == 0); /* Signal the debugger to wait for the child. */ ATF_REQUIRE(write(dpipe[0], &c, sizeof(c)) == sizeof(c)); /* Wait for the debugger. */ ATF_REQUIRE(read(dpipe[0], &c, sizeof(c)) == 0); close(dpipe[0]); /* The child process should now be ready. */ wpid = waitpid(child, &status, WNOHANG); ATF_REQUIRE(wpid == child); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 1); } /* * The parent process should always act the same regardless of how the * debugger is attached to it. */ static __dead2 void -follow_fork_parent(void) +follow_fork_parent(bool use_vfork) { pid_t fpid, wpid; int status; - CHILD_REQUIRE((fpid = fork()) != -1); + if (use_vfork) + CHILD_REQUIRE((fpid = vfork()) != -1); + else + CHILD_REQUIRE((fpid = fork()) != -1); if (fpid == 0) /* Child */ _exit(2); wpid = waitpid(fpid, &status, 0); CHILD_REQUIRE(wpid == fpid); CHILD_REQUIRE(WIFEXITED(status)); CHILD_REQUIRE(WEXITSTATUS(status) == 2); _exit(1); } /* * Helper routine for follow fork tests. This waits for two stops * that report both "sides" of a fork. It returns the pid of the new * child process. */ static pid_t -handle_fork_events(pid_t parent) +handle_fork_events(pid_t parent, struct ptrace_lwpinfo *ppl) { struct ptrace_lwpinfo pl; bool fork_reported[2]; pid_t child, wpid; int i, status; fork_reported[0] = false; fork_reported[1] = false; child = -1; /* * Each process should report a fork event. The parent should * report a PL_FLAG_FORKED event, and the child should report * a PL_FLAG_CHILD event. */ for (i = 0; i < 2; i++) { wpid = wait(&status); ATF_REQUIRE(wpid > 0); ATF_REQUIRE(WIFSTOPPED(status)); ATF_REQUIRE(ptrace(PT_LWPINFO, wpid, (caddr_t)&pl, sizeof(pl)) != -1); ATF_REQUIRE((pl.pl_flags & (PL_FLAG_FORKED | PL_FLAG_CHILD)) != 0); ATF_REQUIRE((pl.pl_flags & (PL_FLAG_FORKED | PL_FLAG_CHILD)) != (PL_FLAG_FORKED | PL_FLAG_CHILD)); if (pl.pl_flags & PL_FLAG_CHILD) { ATF_REQUIRE(wpid != parent); ATF_REQUIRE(WSTOPSIG(status) == SIGSTOP); ATF_REQUIRE(!fork_reported[1]); if (child == -1) child = wpid; else ATF_REQUIRE(child == wpid); + if (ppl != NULL) + ppl[1] = pl; fork_reported[1] = true; } else { ATF_REQUIRE(wpid == parent); ATF_REQUIRE(WSTOPSIG(status) == SIGTRAP); ATF_REQUIRE(!fork_reported[0]); if (child == -1) child = pl.pl_child_pid; else ATF_REQUIRE(child == pl.pl_child_pid); + if (ppl != NULL) + ppl[0] = pl; fork_reported[0] = true; } } return (child); } /* * Verify that a new child process is stopped after a followed fork and * that the traced parent sees the exit of the child after the debugger * when both processes remain attached to the debugger. */ ATF_TC_WITHOUT_HEAD(ptrace__follow_fork_both_attached); ATF_TC_BODY(ptrace__follow_fork_both_attached, tc) { pid_t children[2], fpid, wpid; int status; ATF_REQUIRE((fpid = fork()) != -1); if (fpid == 0) { trace_me(); - follow_fork_parent(); + follow_fork_parent(false); } /* Parent process. */ children[0] = fpid; /* The first wait() should report the stop from SIGSTOP. */ wpid = waitpid(children[0], &status, 0); ATF_REQUIRE(wpid == children[0]); ATF_REQUIRE(WIFSTOPPED(status)); ATF_REQUIRE(WSTOPSIG(status) == SIGSTOP); ATF_REQUIRE(ptrace(PT_FOLLOW_FORK, children[0], NULL, 1) != -1); /* Continue the child ignoring the SIGSTOP. */ ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); - children[1] = handle_fork_events(children[0]); + children[1] = handle_fork_events(children[0], NULL); ATF_REQUIRE(children[1] > 0); ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); ATF_REQUIRE(ptrace(PT_CONTINUE, children[1], (caddr_t)1, 0) != -1); /* * The child can't exit until the grandchild reports status, so the * grandchild should report its exit first to the debugger. */ wpid = wait(&status); ATF_REQUIRE(wpid == children[1]); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 2); wpid = wait(&status); ATF_REQUIRE(wpid == children[0]); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 1); wpid = wait(&status); ATF_REQUIRE(wpid == -1); ATF_REQUIRE(errno == ECHILD); } /* * Verify that a new child process is stopped after a followed fork * and that the traced parent sees the exit of the child when the new * child process is detached after it reports its fork. */ ATF_TC_WITHOUT_HEAD(ptrace__follow_fork_child_detached); ATF_TC_BODY(ptrace__follow_fork_child_detached, tc) { pid_t children[2], fpid, wpid; int status; ATF_REQUIRE((fpid = fork()) != -1); if (fpid == 0) { trace_me(); - follow_fork_parent(); + follow_fork_parent(false); } /* Parent process. */ children[0] = fpid; /* The first wait() should report the stop from SIGSTOP. */ wpid = waitpid(children[0], &status, 0); ATF_REQUIRE(wpid == children[0]); ATF_REQUIRE(WIFSTOPPED(status)); ATF_REQUIRE(WSTOPSIG(status) == SIGSTOP); ATF_REQUIRE(ptrace(PT_FOLLOW_FORK, children[0], NULL, 1) != -1); /* Continue the child ignoring the SIGSTOP. */ ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); - children[1] = handle_fork_events(children[0]); + children[1] = handle_fork_events(children[0], NULL); ATF_REQUIRE(children[1] > 0); ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); ATF_REQUIRE(ptrace(PT_DETACH, children[1], (caddr_t)1, 0) != -1); /* * Should not see any status from the grandchild now, only the * child. */ wpid = wait(&status); ATF_REQUIRE(wpid == children[0]); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 1); wpid = wait(&status); ATF_REQUIRE(wpid == -1); ATF_REQUIRE(errno == ECHILD); } /* * Verify that a new child process is stopped after a followed fork * and that the traced parent sees the exit of the child when the * traced parent is detached after the fork. */ ATF_TC_WITHOUT_HEAD(ptrace__follow_fork_parent_detached); ATF_TC_BODY(ptrace__follow_fork_parent_detached, tc) { pid_t children[2], fpid, wpid; int status; ATF_REQUIRE((fpid = fork()) != -1); if (fpid == 0) { trace_me(); - follow_fork_parent(); + follow_fork_parent(false); } /* Parent process. */ children[0] = fpid; /* The first wait() should report the stop from SIGSTOP. */ wpid = waitpid(children[0], &status, 0); ATF_REQUIRE(wpid == children[0]); ATF_REQUIRE(WIFSTOPPED(status)); ATF_REQUIRE(WSTOPSIG(status) == SIGSTOP); ATF_REQUIRE(ptrace(PT_FOLLOW_FORK, children[0], NULL, 1) != -1); /* Continue the child ignoring the SIGSTOP. */ ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); - children[1] = handle_fork_events(children[0]); + children[1] = handle_fork_events(children[0], NULL); ATF_REQUIRE(children[1] > 0); ATF_REQUIRE(ptrace(PT_DETACH, children[0], (caddr_t)1, 0) != -1); ATF_REQUIRE(ptrace(PT_CONTINUE, children[1], (caddr_t)1, 0) != -1); /* * The child can't exit until the grandchild reports status, so the * grandchild should report its exit first to the debugger. * * Even though the child process is detached, it is still a * child of the debugger, so it will still report it's exit * after the grandchild. */ wpid = wait(&status); ATF_REQUIRE(wpid == children[1]); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 2); wpid = wait(&status); ATF_REQUIRE(wpid == children[0]); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 1); wpid = wait(&status); ATF_REQUIRE(wpid == -1); ATF_REQUIRE(errno == ECHILD); } static void attach_fork_parent(int cpipe[2]) { pid_t fpid; close(cpipe[0]); /* Double-fork to disassociate from the debugger. */ CHILD_REQUIRE((fpid = fork()) != -1); if (fpid != 0) _exit(3); /* Send the pid of the disassociated child to the debugger. */ fpid = getpid(); CHILD_REQUIRE(write(cpipe[1], &fpid, sizeof(fpid)) == sizeof(fpid)); /* Wait for the debugger to attach. */ CHILD_REQUIRE(read(cpipe[1], &fpid, sizeof(fpid)) == 0); } /* * Verify that a new child process is stopped after a followed fork and * that the traced parent sees the exit of the child after the debugger * when both processes remain attached to the debugger. In this test * the parent that forks is not a direct child of the debugger. */ ATF_TC_WITHOUT_HEAD(ptrace__follow_fork_both_attached_unrelated_debugger); ATF_TC_BODY(ptrace__follow_fork_both_attached_unrelated_debugger, tc) { pid_t children[2], fpid, wpid; int cpipe[2], status; ATF_REQUIRE(pipe(cpipe) == 0); ATF_REQUIRE((fpid = fork()) != -1); if (fpid == 0) { attach_fork_parent(cpipe); - follow_fork_parent(); + follow_fork_parent(false); } /* Parent process. */ close(cpipe[1]); /* Wait for the direct child to exit. */ wpid = waitpid(fpid, &status, 0); ATF_REQUIRE(wpid == fpid); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 3); /* Read the pid of the fork parent. */ ATF_REQUIRE(read(cpipe[0], &children[0], sizeof(children[0])) == sizeof(children[0])); /* Attach to the fork parent. */ attach_child(children[0]); ATF_REQUIRE(ptrace(PT_FOLLOW_FORK, children[0], NULL, 1) != -1); /* Continue the fork parent ignoring the SIGSTOP. */ ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); /* Signal the fork parent to continue. */ close(cpipe[0]); - children[1] = handle_fork_events(children[0]); + children[1] = handle_fork_events(children[0], NULL); ATF_REQUIRE(children[1] > 0); ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); ATF_REQUIRE(ptrace(PT_CONTINUE, children[1], (caddr_t)1, 0) != -1); /* * The fork parent can't exit until the child reports status, * so the child should report its exit first to the debugger. */ wpid = wait(&status); ATF_REQUIRE(wpid == children[1]); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 2); wpid = wait(&status); ATF_REQUIRE(wpid == children[0]); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 1); wpid = wait(&status); ATF_REQUIRE(wpid == -1); ATF_REQUIRE(errno == ECHILD); } /* * Verify that a new child process is stopped after a followed fork * and that the traced parent sees the exit of the child when the new * child process is detached after it reports its fork. In this test * the parent that forks is not a direct child of the debugger. */ ATF_TC_WITHOUT_HEAD(ptrace__follow_fork_child_detached_unrelated_debugger); ATF_TC_BODY(ptrace__follow_fork_child_detached_unrelated_debugger, tc) { pid_t children[2], fpid, wpid; int cpipe[2], status; ATF_REQUIRE(pipe(cpipe) == 0); ATF_REQUIRE((fpid = fork()) != -1); if (fpid == 0) { attach_fork_parent(cpipe); - follow_fork_parent(); + follow_fork_parent(false); } /* Parent process. */ close(cpipe[1]); /* Wait for the direct child to exit. */ wpid = waitpid(fpid, &status, 0); ATF_REQUIRE(wpid == fpid); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 3); /* Read the pid of the fork parent. */ ATF_REQUIRE(read(cpipe[0], &children[0], sizeof(children[0])) == sizeof(children[0])); /* Attach to the fork parent. */ attach_child(children[0]); ATF_REQUIRE(ptrace(PT_FOLLOW_FORK, children[0], NULL, 1) != -1); /* Continue the fork parent ignoring the SIGSTOP. */ ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); /* Signal the fork parent to continue. */ close(cpipe[0]); - children[1] = handle_fork_events(children[0]); + children[1] = handle_fork_events(children[0], NULL); ATF_REQUIRE(children[1] > 0); ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); ATF_REQUIRE(ptrace(PT_DETACH, children[1], (caddr_t)1, 0) != -1); /* * Should not see any status from the child now, only the fork * parent. */ wpid = wait(&status); ATF_REQUIRE(wpid == children[0]); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 1); wpid = wait(&status); ATF_REQUIRE(wpid == -1); ATF_REQUIRE(errno == ECHILD); } /* * Verify that a new child process is stopped after a followed fork * and that the traced parent sees the exit of the child when the * traced parent is detached after the fork. In this test the parent * that forks is not a direct child of the debugger. */ ATF_TC_WITHOUT_HEAD(ptrace__follow_fork_parent_detached_unrelated_debugger); ATF_TC_BODY(ptrace__follow_fork_parent_detached_unrelated_debugger, tc) { pid_t children[2], fpid, wpid; int cpipe[2], status; ATF_REQUIRE(pipe(cpipe) == 0); ATF_REQUIRE((fpid = fork()) != -1); if (fpid == 0) { attach_fork_parent(cpipe); - follow_fork_parent(); + follow_fork_parent(false); } /* Parent process. */ close(cpipe[1]); /* Wait for the direct child to exit. */ wpid = waitpid(fpid, &status, 0); ATF_REQUIRE(wpid == fpid); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 3); /* Read the pid of the fork parent. */ ATF_REQUIRE(read(cpipe[0], &children[0], sizeof(children[0])) == sizeof(children[0])); /* Attach to the fork parent. */ attach_child(children[0]); ATF_REQUIRE(ptrace(PT_FOLLOW_FORK, children[0], NULL, 1) != -1); /* Continue the fork parent ignoring the SIGSTOP. */ ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); /* Signal the fork parent to continue. */ close(cpipe[0]); - children[1] = handle_fork_events(children[0]); + children[1] = handle_fork_events(children[0], NULL); ATF_REQUIRE(children[1] > 0); ATF_REQUIRE(ptrace(PT_DETACH, children[0], (caddr_t)1, 0) != -1); ATF_REQUIRE(ptrace(PT_CONTINUE, children[1], (caddr_t)1, 0) != -1); /* * Should not see any status from the fork parent now, only * the child. */ wpid = wait(&status); ATF_REQUIRE(wpid == children[1]); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 2); wpid = wait(&status); ATF_REQUIRE(wpid == -1); ATF_REQUIRE(errno == ECHILD); } /* * Verify that a child process does not see an unrelated debugger as its * parent but sees its original parent process. */ ATF_TC_WITHOUT_HEAD(ptrace__getppid); ATF_TC_BODY(ptrace__getppid, tc) { pid_t child, debugger, ppid, wpid; int cpipe[2], dpipe[2], status; char c; ATF_REQUIRE(pipe(cpipe) == 0); ATF_REQUIRE((child = fork()) != -1); if (child == 0) { /* Child process. */ close(cpipe[0]); /* Wait for parent to be ready. */ CHILD_REQUIRE(read(cpipe[1], &c, sizeof(c)) == sizeof(c)); /* Report the parent PID to the parent. */ ppid = getppid(); CHILD_REQUIRE(write(cpipe[1], &ppid, sizeof(ppid)) == sizeof(ppid)); _exit(1); } close(cpipe[1]); ATF_REQUIRE(pipe(dpipe) == 0); ATF_REQUIRE((debugger = fork()) != -1); if (debugger == 0) { /* Debugger process. */ close(dpipe[0]); CHILD_REQUIRE(ptrace(PT_ATTACH, child, NULL, 0) != -1); wpid = waitpid(child, &status, 0); CHILD_REQUIRE(wpid == child); CHILD_REQUIRE(WIFSTOPPED(status)); CHILD_REQUIRE(WSTOPSIG(status) == SIGSTOP); CHILD_REQUIRE(ptrace(PT_CONTINUE, child, (caddr_t)1, 0) != -1); /* Signal parent that debugger is attached. */ CHILD_REQUIRE(write(dpipe[1], &c, sizeof(c)) == sizeof(c)); /* Wait for traced child to exit. */ wpid = waitpid(child, &status, 0); CHILD_REQUIRE(wpid == child); CHILD_REQUIRE(WIFEXITED(status)); CHILD_REQUIRE(WEXITSTATUS(status) == 1); _exit(0); } close(dpipe[1]); /* Parent process. */ /* Wait for the debugger to attach to the child. */ ATF_REQUIRE(read(dpipe[0], &c, sizeof(c)) == sizeof(c)); /* Release the child. */ ATF_REQUIRE(write(cpipe[0], &c, sizeof(c)) == sizeof(c)); /* Read the parent PID from the child. */ ATF_REQUIRE(read(cpipe[0], &ppid, sizeof(ppid)) == sizeof(ppid)); close(cpipe[0]); ATF_REQUIRE(ppid == getpid()); /* Wait for the debugger. */ wpid = waitpid(debugger, &status, 0); ATF_REQUIRE(wpid == debugger); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 0); /* The child process should now be ready. */ wpid = waitpid(child, &status, WNOHANG); ATF_REQUIRE(wpid == child); ATF_REQUIRE(WIFEXITED(status)); ATF_REQUIRE(WEXITSTATUS(status) == 1); } +/* + * Verify that pl_syscall_code in struct ptrace_lwpinfo for a new + * child process created via fork() reports the correct value. + */ +ATF_TC_WITHOUT_HEAD(ptrace__new_child_pl_syscall_code_fork); +ATF_TC_BODY(ptrace__new_child_pl_syscall_code_fork, tc) +{ + struct ptrace_lwpinfo pl[2]; + pid_t children[2], fpid, wpid; + int status; + + ATF_REQUIRE((fpid = fork()) != -1); + if (fpid == 0) { + trace_me(); + follow_fork_parent(false); + } + + /* Parent process. */ + children[0] = fpid; + + /* The first wait() should report the stop from SIGSTOP. */ + wpid = waitpid(children[0], &status, 0); + ATF_REQUIRE(wpid == children[0]); + ATF_REQUIRE(WIFSTOPPED(status)); + ATF_REQUIRE(WSTOPSIG(status) == SIGSTOP); + + ATF_REQUIRE(ptrace(PT_FOLLOW_FORK, children[0], NULL, 1) != -1); + + /* Continue the child ignoring the SIGSTOP. */ + ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); + + /* Wait for both halves of the fork event to get reported. */ + children[1] = handle_fork_events(children[0], pl); + ATF_REQUIRE(children[1] > 0); + + ATF_REQUIRE((pl[0].pl_flags & PL_FLAG_SCX) != 0); + ATF_REQUIRE((pl[1].pl_flags & PL_FLAG_SCX) != 0); + ATF_REQUIRE(pl[0].pl_syscall_code == SYS_fork); + ATF_REQUIRE(pl[0].pl_syscall_code == pl[1].pl_syscall_code); + ATF_REQUIRE(pl[0].pl_syscall_narg == pl[1].pl_syscall_narg); + + ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); + ATF_REQUIRE(ptrace(PT_CONTINUE, children[1], (caddr_t)1, 0) != -1); + + /* + * The child can't exit until the grandchild reports status, so the + * grandchild should report its exit first to the debugger. + */ + wpid = wait(&status); + ATF_REQUIRE(wpid == children[1]); + ATF_REQUIRE(WIFEXITED(status)); + ATF_REQUIRE(WEXITSTATUS(status) == 2); + + wpid = wait(&status); + ATF_REQUIRE(wpid == children[0]); + ATF_REQUIRE(WIFEXITED(status)); + ATF_REQUIRE(WEXITSTATUS(status) == 1); + + wpid = wait(&status); + ATF_REQUIRE(wpid == -1); + ATF_REQUIRE(errno == ECHILD); +} + +/* + * Verify that pl_syscall_code in struct ptrace_lwpinfo for a new + * child process created via vfork() reports the correct value. + */ +ATF_TC_WITHOUT_HEAD(ptrace__new_child_pl_syscall_code_vfork); +ATF_TC_BODY(ptrace__new_child_pl_syscall_code_vfork, tc) +{ + struct ptrace_lwpinfo pl[2]; + pid_t children[2], fpid, wpid; + int status; + + ATF_REQUIRE((fpid = fork()) != -1); + if (fpid == 0) { + trace_me(); + follow_fork_parent(true); + } + + /* Parent process. */ + children[0] = fpid; + + /* The first wait() should report the stop from SIGSTOP. */ + wpid = waitpid(children[0], &status, 0); + ATF_REQUIRE(wpid == children[0]); + ATF_REQUIRE(WIFSTOPPED(status)); + ATF_REQUIRE(WSTOPSIG(status) == SIGSTOP); + + ATF_REQUIRE(ptrace(PT_FOLLOW_FORK, children[0], NULL, 1) != -1); + + /* Continue the child ignoring the SIGSTOP. */ + ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); + + /* Wait for both halves of the fork event to get reported. */ + children[1] = handle_fork_events(children[0], pl); + ATF_REQUIRE(children[1] > 0); + + ATF_REQUIRE((pl[0].pl_flags & PL_FLAG_SCX) != 0); + ATF_REQUIRE((pl[1].pl_flags & PL_FLAG_SCX) != 0); + ATF_REQUIRE(pl[0].pl_syscall_code == SYS_vfork); + ATF_REQUIRE(pl[0].pl_syscall_code == pl[1].pl_syscall_code); + ATF_REQUIRE(pl[0].pl_syscall_narg == pl[1].pl_syscall_narg); + + ATF_REQUIRE(ptrace(PT_CONTINUE, children[0], (caddr_t)1, 0) != -1); + ATF_REQUIRE(ptrace(PT_CONTINUE, children[1], (caddr_t)1, 0) != -1); + + /* + * The child can't exit until the grandchild reports status, so the + * grandchild should report its exit first to the debugger. + */ + wpid = wait(&status); + ATF_REQUIRE(wpid == children[1]); + ATF_REQUIRE(WIFEXITED(status)); + ATF_REQUIRE(WEXITSTATUS(status) == 2); + + wpid = wait(&status); + ATF_REQUIRE(wpid == children[0]); + ATF_REQUIRE(WIFEXITED(status)); + ATF_REQUIRE(WEXITSTATUS(status) == 1); + + wpid = wait(&status); + ATF_REQUIRE(wpid == -1); + ATF_REQUIRE(errno == ECHILD); +} + +static void * +simple_thread(void *arg __unused) +{ + + pthread_exit(NULL); +} + +/* + * Verify that pl_syscall_code in struct ptrace_lwpinfo for a new + * thread reports the correct value. + */ +ATF_TC_WITHOUT_HEAD(ptrace__new_child_pl_syscall_code_thread); +ATF_TC_BODY(ptrace__new_child_pl_syscall_code_thread, tc) +{ + struct ptrace_lwpinfo pl; + pid_t fpid, wpid; + lwpid_t main; + int status; + + ATF_REQUIRE((fpid = fork()) != -1); + if (fpid == 0) { + pthread_t thread; + + trace_me(); + + CHILD_REQUIRE(pthread_create(&thread, NULL, simple_thread, + NULL) == 0); + CHILD_REQUIRE(pthread_join(thread, NULL) == 0); + exit(1); + } + + /* The first wait() should report the stop from SIGSTOP. */ + wpid = waitpid(fpid, &status, 0); + ATF_REQUIRE(wpid == fpid); + ATF_REQUIRE(WIFSTOPPED(status)); + ATF_REQUIRE(WSTOPSIG(status) == SIGSTOP); + + ATF_REQUIRE(ptrace(PT_LWPINFO, wpid, (caddr_t)&pl, + sizeof(pl)) != -1); + main = pl.pl_lwpid; + + /* + * Continue the child ignoring the SIGSTOP and tracing all + * system call exits. + */ + ATF_REQUIRE(ptrace(PT_TO_SCX, fpid, (caddr_t)1, 0) != -1); + + /* + * Wait for the new thread to arrive. pthread_create() might + * invoke any number of system calls. For now we just wait + * for the new thread to arrive and make sure it reports a + * valid system call code. If ptrace grows thread event + * reporting then this test can be made more precise. + */ + for (;;) { + wpid = waitpid(fpid, &status, 0); + ATF_REQUIRE(wpid == fpid); + ATF_REQUIRE(WIFSTOPPED(status)); + ATF_REQUIRE(WSTOPSIG(status) == SIGTRAP); + + ATF_REQUIRE(ptrace(PT_LWPINFO, wpid, (caddr_t)&pl, + sizeof(pl)) != -1); + ATF_REQUIRE((pl.pl_flags & PL_FLAG_SCX) != 0); + ATF_REQUIRE(pl.pl_syscall_code != 0); + if (pl.pl_lwpid != main) + /* New thread seen. */ + break; + + ATF_REQUIRE(ptrace(PT_CONTINUE, fpid, (caddr_t)1, 0) == 0); + } + + /* Wait for the child to exit. */ + ATF_REQUIRE(ptrace(PT_CONTINUE, fpid, (caddr_t)1, 0) == 0); + for (;;) { + wpid = waitpid(fpid, &status, 0); + ATF_REQUIRE(wpid == fpid); + if (WIFEXITED(status)) + break; + + ATF_REQUIRE(WIFSTOPPED(status)); + ATF_REQUIRE(WSTOPSIG(status) == SIGTRAP); + ATF_REQUIRE(ptrace(PT_CONTINUE, fpid, (caddr_t)1, 0) == 0); + } + + ATF_REQUIRE(WEXITSTATUS(status) == 1); + + wpid = wait(&status); + ATF_REQUIRE(wpid == -1); + ATF_REQUIRE(errno == ECHILD); +} + ATF_TP_ADD_TCS(tp) { ATF_TP_ADD_TC(tp, ptrace__parent_wait_after_trace_me); ATF_TP_ADD_TC(tp, ptrace__parent_wait_after_attach); ATF_TP_ADD_TC(tp, ptrace__parent_sees_exit_after_child_debugger); ATF_TP_ADD_TC(tp, ptrace__parent_sees_exit_after_unrelated_debugger); ATF_TP_ADD_TC(tp, ptrace__follow_fork_both_attached); ATF_TP_ADD_TC(tp, ptrace__follow_fork_child_detached); ATF_TP_ADD_TC(tp, ptrace__follow_fork_parent_detached); ATF_TP_ADD_TC(tp, ptrace__follow_fork_both_attached_unrelated_debugger); ATF_TP_ADD_TC(tp, ptrace__follow_fork_child_detached_unrelated_debugger); ATF_TP_ADD_TC(tp, ptrace__follow_fork_parent_detached_unrelated_debugger); ATF_TP_ADD_TC(tp, ptrace__getppid); + ATF_TP_ADD_TC(tp, ptrace__new_child_pl_syscall_code_fork); + ATF_TP_ADD_TC(tp, ptrace__new_child_pl_syscall_code_vfork); + ATF_TP_ADD_TC(tp, ptrace__new_child_pl_syscall_code_thread); return (atf_no_error()); } Index: user/ngie/more-tests2/usr.bin/truss/syscalls.c =================================================================== --- user/ngie/more-tests2/usr.bin/truss/syscalls.c (revision 288954) +++ user/ngie/more-tests2/usr.bin/truss/syscalls.c (revision 288955) @@ -1,1719 +1,1726 @@ /* * Copyright 1997 Sean Eric Fagan * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Sean Eric Fagan * 4. Neither the name of the author may be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * This file has routines used to print out system calls and their * arguments. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "truss.h" #include "extern.h" #include "syscall.h" /* 64-bit alignment on 32-bit platforms. */ #if !defined(__LP64__) && defined(__powerpc__) #define QUAD_ALIGN 1 #else #define QUAD_ALIGN 0 #endif /* Number of slots needed for a 64-bit argument. */ #ifdef __LP64__ #define QUAD_SLOTS 1 #else #define QUAD_SLOTS 2 #endif /* * This should probably be in its own file, sorted alphabetically. */ static struct syscall decoded_syscalls[] = { - { .name = "fcntl", .ret_type = 1, .nargs = 3, - .args = { { Int, 0 }, { Fcntl, 1 }, { Fcntlflag, 2 } } }, - { .name = "rfork", .ret_type = 1, .nargs = 1, - .args = { { Rforkflags, 0 } } }, - { .name = "linux_readlink", .ret_type = 1, .nargs = 3, - .args = { { Name, 0 }, { Name | OUT, 1 }, { Int, 2 } } }, - { .name = "linux_socketcall", .ret_type = 1, .nargs = 2, - .args = { { Int, 0 }, { LinuxSockArgs, 1 } } }, - { .name = "getpgid", .ret_type = 1, .nargs = 1, - .args = { { Int, 0 } } }, - { .name = "getsid", .ret_type = 1, .nargs = 1, - .args = { { Int, 0 } } }, - { .name = "readlink", .ret_type = 1, .nargs = 3, - .args = { { Name, 0 }, { Readlinkres | OUT, 1 }, { Int, 2 } } }, - { .name = "readlinkat", .ret_type = 1, .nargs = 4, - .args = { { Atfd, 0 }, { Name, 1 }, { Readlinkres | OUT, 2 }, + /* Native ABI */ + { .name = "__getcwd", .ret_type = 1, .nargs = 2, + .args = { { Name | OUT, 0 }, { Int, 1 } } }, + { .name = "_umtx_op", .ret_type = 1, .nargs = 5, + .args = { { Ptr, 0 }, { Umtxop, 1 }, { LongHex, 2 }, { Ptr, 3 }, + { Ptr, 4 } } }, + { .name = "accept", .ret_type = 1, .nargs = 3, + .args = { { Int, 0 }, { Sockaddr | OUT, 1 }, { Ptr | OUT, 2 } } }, + { .name = "access", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { Accessmode, 1 } } }, + { .name = "bind", .ret_type = 1, .nargs = 3, + .args = { { Int, 0 }, { Sockaddr | IN, 1 }, { Int, 2 } } }, + { .name = "bindat", .ret_type = 1, .nargs = 4, + .args = { { Atfd, 0 }, { Int, 1 }, { Sockaddr | IN, 2 }, { Int, 3 } } }, - { .name = "lseek", .ret_type = 2, .nargs = 3, - .args = { { Int, 0 }, { QuadHex, 1 + QUAD_ALIGN }, - { Whence, 1 + QUAD_SLOTS + QUAD_ALIGN } } }, - { .name = "linux_lseek", .ret_type = 2, .nargs = 3, - .args = { { Int, 0 }, { Int, 1 }, { Whence, 2 } } }, - { .name = "mmap", .ret_type = 1, .nargs = 6, - .args = { { Ptr, 0 }, { Int, 1 }, { Mprot, 2 }, { Mmapflags, 3 }, - { Int, 4 }, { QuadHex, 5 + QUAD_ALIGN } } }, - { .name = "linux_mkdir", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { Int, 1 } } }, - { .name = "mprotect", .ret_type = 1, .nargs = 3, - .args = { { Ptr, 0 }, { Int, 1 }, { Mprot, 2 } } }, - { .name = "open", .ret_type = 1, .nargs = 3, - .args = { { Name | IN, 0 }, { Open, 1 }, { Octal, 2 } } }, - { .name = "openat", .ret_type = 1, .nargs = 4, - .args = { { Atfd, 0 }, { Name | IN, 1 }, { Open, 2 }, - { Octal, 3 } } }, - { .name = "mkdir", .ret_type = 1, .nargs = 2, - .args = { { Name, 0 }, { Octal, 1 } } }, - { .name = "mkdirat", .ret_type = 1, .nargs = 3, - .args = { { Atfd, 0 }, { Name, 1 }, { Octal, 2 } } }, - { .name = "linux_open", .ret_type = 1, .nargs = 3, - .args = { { Name, 0 }, { Hex, 1 }, { Octal, 2 } } }, - { .name = "close", .ret_type = 1, .nargs = 1, - .args = { { Int, 0 } } }, - { .name = "link", .ret_type = 1, .nargs = 2, - .args = { { Name, 0 }, { Name, 1 } } }, - { .name = "linkat", .ret_type = 1, .nargs = 5, - .args = { { Atfd, 0 }, { Name, 1 }, { Atfd, 2 }, { Name, 3 }, - { Atflags, 4 } } }, - { .name = "unlink", .ret_type = 1, .nargs = 1, - .args = { { Name, 0 } } }, - { .name = "unlinkat", .ret_type = 1, .nargs = 3, - .args = { { Atfd, 0 }, { Name, 1 }, { Atflags, 2 } } }, + { .name = "break", .ret_type = 1, .nargs = 1, + .args = { { Ptr, 0 } } }, { .name = "chdir", .ret_type = 1, .nargs = 1, .args = { { Name, 0 } } }, - { .name = "chroot", .ret_type = 1, .nargs = 1, - .args = { { Name, 0 } } }, - { .name = "mkfifo", .ret_type = 1, .nargs = 2, - .args = { { Name, 0 }, { Octal, 1 } } }, - { .name = "mkfifoat", .ret_type = 1, .nargs = 3, - .args = { { Atfd, 0 }, { Name, 1 }, { Octal, 2 } } }, - { .name = "mknod", .ret_type = 1, .nargs = 3, - .args = { { Name, 0 }, { Octal, 1 }, { Int, 2 } } }, - { .name = "mknodat", .ret_type = 1, .nargs = 4, - .args = { { Atfd, 0 }, { Name, 1 }, { Octal, 2 }, { Int, 3 } } }, + { .name = "chflags", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { Hex, 1 } } }, { .name = "chmod", .ret_type = 1, .nargs = 2, .args = { { Name, 0 }, { Octal, 1 } } }, + { .name = "chown", .ret_type = 1, .nargs = 3, + .args = { { Name, 0 }, { Int, 1 }, { Int, 2 } } }, + { .name = "chroot", .ret_type = 1, .nargs = 1, + .args = { { Name, 0 } } }, + { .name = "clock_gettime", .ret_type = 1, .nargs = 2, + .args = { { Int, 0 }, { Timespec | OUT, 1 } } }, + { .name = "close", .ret_type = 1, .nargs = 1, + .args = { { Int, 0 } } }, + { .name = "connect", .ret_type = 1, .nargs = 3, + .args = { { Int, 0 }, { Sockaddr | IN, 1 }, { Int, 2 } } }, + { .name = "connectat", .ret_type = 1, .nargs = 4, + .args = { { Atfd, 0 }, { Int, 1 }, { Sockaddr | IN, 2 }, + { Int, 3 } } }, + { .name = "eaccess", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { Accessmode, 1 } } }, + { .name = "execve", .ret_type = 1, .nargs = 3, + .args = { { Name | IN, 0 }, { ExecArgs | IN, 1 }, + { ExecEnv | IN, 2 } } }, + { .name = "exit", .ret_type = 0, .nargs = 1, + .args = { { Hex, 0 } } }, + { .name = "faccessat", .ret_type = 1, .nargs = 4, + .args = { { Atfd, 0 }, { Name | IN, 1 }, { Accessmode, 2 }, + { Atflags, 3 } } }, { .name = "fchmod", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Octal, 1 } } }, - { .name = "lchmod", .ret_type = 1, .nargs = 2, - .args = { { Name, 0 }, { Octal, 1 } } }, { .name = "fchmodat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Name, 1 }, { Octal, 2 }, { Atflags, 3 } } }, - { .name = "chown", .ret_type = 1, .nargs = 3, - .args = { { Name, 0 }, { Int, 1 }, { Int, 2 } } }, { .name = "fchown", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Int, 1 }, { Int, 2 } } }, - { .name = "lchown", .ret_type = 1, .nargs = 3, - .args = { { Name, 0 }, { Int, 1 }, { Int, 2 } } }, { .name = "fchownat", .ret_type = 1, .nargs = 5, .args = { { Atfd, 0 }, { Name, 1 }, { Int, 2 }, { Int, 3 }, { Atflags, 4 } } }, - { .name = "linux_stat64", .ret_type = 1, .nargs = 3, - .args = { { Name | IN, 0 }, { Ptr | OUT, 1 }, { Ptr | IN, 1 } } }, - { .name = "mount", .ret_type = 1, .nargs = 4, - .args = { { Name, 0 }, { Name, 1 }, { Int, 2 }, { Ptr, 3 } } }, - { .name = "umount", .ret_type = 1, .nargs = 2, - .args = { { Name, 0 }, { Int, 2 } } }, + { .name = "fcntl", .ret_type = 1, .nargs = 3, + .args = { { Int, 0 }, { Fcntl, 1 }, { Fcntlflag, 2 } } }, { .name = "fstat", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Stat | OUT, 1 } } }, { .name = "fstatat", .ret_type = 1, .nargs = 4, .args = { { Atfd, 0 }, { Name | IN, 1 }, { Stat | OUT, 2 }, { Atflags, 3 } } }, - { .name = "stat", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { Stat | OUT, 1 } } }, - { .name = "statfs", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { StatFs | OUT, 1 } } }, { .name = "fstatfs", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { StatFs | OUT, 1 } } }, - { .name = "lstat", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { Stat | OUT, 1 } } }, - { .name = "linux_newstat", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { Ptr | OUT, 1 } } }, - { .name = "linux_access", .ret_type = 1, .nargs = 2, - .args = { { Name, 0 }, { Accessmode, 1 } } }, - { .name = "linux_newfstat", .ret_type = 1, .nargs = 2, - .args = { { Int, 0 }, { Ptr | OUT, 1 } } }, - { .name = "write", .ret_type = 1, .nargs = 3, - .args = { { Int, 0 }, { BinString | IN, 1 }, { Int, 2 } } }, - { .name = "ioctl", .ret_type = 1, .nargs = 3, - .args = { { Int, 0 }, { Ioctl, 1 }, { Hex, 2 } } }, - { .name = "break", .ret_type = 1, .nargs = 1, - .args = { { Ptr, 0 } } }, - { .name = "exit", .ret_type = 0, .nargs = 1, - .args = { { Hex, 0 } } }, - { .name = "access", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { Accessmode, 1 } } }, - { .name = "eaccess", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { Accessmode, 1 } } }, - { .name = "faccessat", .ret_type = 1, .nargs = 4, - .args = { { Atfd, 0 }, { Name | IN, 1 }, { Accessmode, 2 }, - { Atflags, 3 } } }, - { .name = "sigaction", .ret_type = 1, .nargs = 3, - .args = { { Signal, 0 }, { Sigaction | IN, 1 }, - { Sigaction | OUT, 2 } } }, - { .name = "accept", .ret_type = 1, .nargs = 3, - .args = { { Int, 0 }, { Sockaddr | OUT, 1 }, { Ptr | OUT, 2 } } }, - { .name = "bind", .ret_type = 1, .nargs = 3, - .args = { { Int, 0 }, { Sockaddr | IN, 1 }, { Int, 2 } } }, - { .name = "bindat", .ret_type = 1, .nargs = 4, - .args = { { Atfd, 0 }, { Int, 1 }, { Sockaddr | IN, 2 }, - { Int, 3 } } }, - { .name = "connect", .ret_type = 1, .nargs = 3, - .args = { { Int, 0 }, { Sockaddr | IN, 1 }, { Int, 2 } } }, - { .name = "connectat", .ret_type = 1, .nargs = 4, - .args = { { Atfd, 0 }, { Int, 1 }, { Sockaddr | IN, 2 }, - { Int, 3 } } }, + { .name = "ftruncate", .ret_type = 1, .nargs = 2, + .args = { { Int | IN, 0 }, { QuadHex | IN, 1 + QUAD_ALIGN } } }, + { .name = "futimens", .ret_type = 1, .nargs = 2, + .args = { { Int, 0 }, { Timespec2 | IN, 1 } } }, + { .name = "futimes", .ret_type = 1, .nargs = 2, + .args = { { Int, 0 }, { Timeval2 | IN, 1 } } }, + { .name = "futimesat", .ret_type = 1, .nargs = 3, + .args = { { Atfd, 0 }, { Name | IN, 1 }, { Timeval2 | IN, 2 } } }, + { .name = "getitimer", .ret_type = 1, .nargs = 2, + .args = { { Int, 0 }, { Itimerval | OUT, 2 } } }, { .name = "getpeername", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Sockaddr | OUT, 1 }, { Ptr | OUT, 2 } } }, + { .name = "getpgid", .ret_type = 1, .nargs = 1, + .args = { { Int, 0 } } }, + { .name = "getrlimit", .ret_type = 1, .nargs = 2, + .args = { { Resource, 0 }, { Rlimit | OUT, 1 } } }, + { .name = "getrusage", .ret_type = 1, .nargs = 2, + .args = { { Int, 0 }, { Rusage | OUT, 1 } } }, + { .name = "getsid", .ret_type = 1, .nargs = 1, + .args = { { Int, 0 } } }, { .name = "getsockname", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Sockaddr | OUT, 1 }, { Ptr | OUT, 2 } } }, - { .name = "recvfrom", .ret_type = 1, .nargs = 6, - .args = { { Int, 0 }, { BinString | OUT, 1 }, { Int, 2 }, { Hex, 3 }, - { Sockaddr | OUT, 4 }, { Ptr | OUT, 5 } } }, - { .name = "sendto", .ret_type = 1, .nargs = 6, - .args = { { Int, 0 }, { BinString | IN, 1 }, { Int, 2 }, { Hex, 3 }, - { Sockaddr | IN, 4 }, { Ptr | IN, 5 } } }, - { .name = "execve", .ret_type = 1, .nargs = 3, - .args = { { Name | IN, 0 }, { ExecArgs | IN, 1 }, - { ExecEnv | IN, 2 } } }, - { .name = "linux_execve", .ret_type = 1, .nargs = 3, - .args = { { Name | IN, 0 }, { ExecArgs | IN, 1 }, - { ExecEnv | IN, 2 } } }, - { .name = "kldload", .ret_type = 1, .nargs = 1, + { .name = "gettimeofday", .ret_type = 1, .nargs = 2, + .args = { { Timeval | OUT, 0 }, { Ptr, 1 } } }, + { .name = "ioctl", .ret_type = 1, .nargs = 3, + .args = { { Int, 0 }, { Ioctl, 1 }, { Hex, 2 } } }, + { .name = "kevent", .ret_type = 1, .nargs = 6, + .args = { { Int, 0 }, { Kevent, 1 }, { Int, 2 }, { Kevent | OUT, 3 }, + { Int, 4 }, { Timespec, 5 } } }, + { .name = "kill", .ret_type = 1, .nargs = 2, + .args = { { Int | IN, 0 }, { Signal | IN, 1 } } }, + { .name = "kldfind", .ret_type = 1, .nargs = 1, .args = { { Name | IN, 0 } } }, - { .name = "kldunload", .ret_type = 1, .nargs = 1, + { .name = "kldfirstmod", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, - { .name = "kldfind", .ret_type = 1, .nargs = 1, + { .name = "kldload", .ret_type = 1, .nargs = 1, .args = { { Name | IN, 0 } } }, { .name = "kldnext", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, { .name = "kldstat", .ret_type = 1, .nargs = 2, .args = { { Int, 0 }, { Ptr, 1 } } }, - { .name = "kldfirstmod", .ret_type = 1, .nargs = 1, + { .name = "kldunload", .ret_type = 1, .nargs = 1, .args = { { Int, 0 } } }, + { .name = "kse_release", .ret_type = 0, .nargs = 1, + .args = { { Timespec, 0 } } }, + { .name = "lchflags", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { Hex, 1 } } }, + { .name = "lchmod", .ret_type = 1, .nargs = 2, + .args = { { Name, 0 }, { Octal, 1 } } }, + { .name = "lchown", .ret_type = 1, .nargs = 3, + .args = { { Name, 0 }, { Int, 1 }, { Int, 2 } } }, + { .name = "link", .ret_type = 1, .nargs = 2, + .args = { { Name, 0 }, { Name, 1 } } }, + { .name = "linkat", .ret_type = 1, .nargs = 5, + .args = { { Atfd, 0 }, { Name, 1 }, { Atfd, 2 }, { Name, 3 }, + { Atflags, 4 } } }, + { .name = "lseek", .ret_type = 2, .nargs = 3, + .args = { { Int, 0 }, { QuadHex, 1 + QUAD_ALIGN }, + { Whence, 1 + QUAD_SLOTS + QUAD_ALIGN } } }, + { .name = "lstat", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { Stat | OUT, 1 } } }, + { .name = "lutimes", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { Timeval2 | IN, 1 } } }, + { .name = "mkdir", .ret_type = 1, .nargs = 2, + .args = { { Name, 0 }, { Octal, 1 } } }, + { .name = "mkdirat", .ret_type = 1, .nargs = 3, + .args = { { Atfd, 0 }, { Name, 1 }, { Octal, 2 } } }, + { .name = "mkfifo", .ret_type = 1, .nargs = 2, + .args = { { Name, 0 }, { Octal, 1 } } }, + { .name = "mkfifoat", .ret_type = 1, .nargs = 3, + .args = { { Atfd, 0 }, { Name, 1 }, { Octal, 2 } } }, + { .name = "mknod", .ret_type = 1, .nargs = 3, + .args = { { Name, 0 }, { Octal, 1 }, { Int, 2 } } }, + { .name = "mknodat", .ret_type = 1, .nargs = 4, + .args = { { Atfd, 0 }, { Name, 1 }, { Octal, 2 }, { Int, 3 } } }, + { .name = "mmap", .ret_type = 1, .nargs = 6, + .args = { { Ptr, 0 }, { Int, 1 }, { Mprot, 2 }, { Mmapflags, 3 }, + { Int, 4 }, { QuadHex, 5 + QUAD_ALIGN } } }, { .name = "modfind", .ret_type = 1, .nargs = 1, .args = { { Name | IN, 0 } } }, + { .name = "mount", .ret_type = 1, .nargs = 4, + .args = { { Name, 0 }, { Name, 1 }, { Int, 2 }, { Ptr, 3 } } }, + { .name = "mprotect", .ret_type = 1, .nargs = 3, + .args = { { Ptr, 0 }, { Int, 1 }, { Mprot, 2 } } }, + { .name = "munmap", .ret_type = 1, .nargs = 2, + .args = { { Ptr, 0 }, { Int, 1 } } }, { .name = "nanosleep", .ret_type = 1, .nargs = 1, .args = { { Timespec, 0 } } }, + { .name = "open", .ret_type = 1, .nargs = 3, + .args = { { Name | IN, 0 }, { Open, 1 }, { Octal, 2 } } }, + { .name = "openat", .ret_type = 1, .nargs = 4, + .args = { { Atfd, 0 }, { Name | IN, 1 }, { Open, 2 }, + { Octal, 3 } } }, + { .name = "pathconf", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { Pathconf, 1 } } }, + { .name = "pipe", .ret_type = 1, .nargs = 1, + .args = { { PipeFds | OUT, 0 } } }, + { .name = "pipe2", .ret_type = 1, .nargs = 2, + .args = { { Ptr, 0 }, { Open, 1 } } }, + { .name = "poll", .ret_type = 1, .nargs = 3, + .args = { { Pollfd, 0 }, { Int, 1 }, { Int, 2 } } }, + { .name = "posix_openpt", .ret_type = 1, .nargs = 1, + .args = { { Open, 0 } } }, + { .name = "procctl", .ret_type = 1, .nargs = 4, + .args = { { Idtype, 0 }, { Quad, 1 + QUAD_ALIGN }, + { Procctl, 1 + QUAD_ALIGN + QUAD_SLOTS }, + { Ptr, 2 + QUAD_ALIGN + QUAD_SLOTS } } }, + { .name = "read", .ret_type = 1, .nargs = 3, + .args = { { Int, 0 }, { BinString | OUT, 1 }, { Int, 2 } } }, + { .name = "readlink", .ret_type = 1, .nargs = 3, + .args = { { Name, 0 }, { Readlinkres | OUT, 1 }, { Int, 2 } } }, + { .name = "readlinkat", .ret_type = 1, .nargs = 4, + .args = { { Atfd, 0 }, { Name, 1 }, { Readlinkres | OUT, 2 }, + { Int, 3 } } }, + { .name = "recvfrom", .ret_type = 1, .nargs = 6, + .args = { { Int, 0 }, { BinString | OUT, 1 }, { Int, 2 }, { Hex, 3 }, + { Sockaddr | OUT, 4 }, { Ptr | OUT, 5 } } }, + { .name = "rename", .ret_type = 1, .nargs = 2, + .args = { { Name, 0 }, { Name, 1 } } }, + { .name = "renameat", .ret_type = 1, .nargs = 4, + .args = { { Atfd, 0 }, { Name, 1 }, { Atfd, 2 }, { Name, 3 } } }, + { .name = "rfork", .ret_type = 1, .nargs = 1, + .args = { { Rforkflags, 0 } } }, { .name = "select", .ret_type = 1, .nargs = 5, .args = { { Int, 0 }, { Fd_set, 1 }, { Fd_set, 2 }, { Fd_set, 3 }, { Timeval, 4 } } }, - { .name = "poll", .ret_type = 1, .nargs = 3, - .args = { { Pollfd, 0 }, { Int, 1 }, { Int, 2 } } }, - { .name = "gettimeofday", .ret_type = 1, .nargs = 2, - .args = { { Timeval | OUT, 0 }, { Ptr, 1 } } }, - { .name = "clock_gettime", .ret_type = 1, .nargs = 2, - .args = { { Int, 0 }, { Timespec | OUT, 1 } } }, - { .name = "getitimer", .ret_type = 1, .nargs = 2, - .args = { { Int, 0 }, { Itimerval | OUT, 2 } } }, + { .name = "sendto", .ret_type = 1, .nargs = 6, + .args = { { Int, 0 }, { BinString | IN, 1 }, { Int, 2 }, { Hex, 3 }, + { Sockaddr | IN, 4 }, { Ptr | IN, 5 } } }, { .name = "setitimer", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Itimerval, 1 }, { Itimerval | OUT, 2 } } }, - { .name = "kse_release", .ret_type = 0, .nargs = 1, - .args = { { Timespec, 0 } } }, - { .name = "kevent", .ret_type = 1, .nargs = 6, - .args = { { Int, 0 }, { Kevent, 1 }, { Int, 2 }, { Kevent | OUT, 3 }, - { Int, 4 }, { Timespec, 5 } } }, + { .name = "setrlimit", .ret_type = 1, .nargs = 2, + .args = { { Resource, 0 }, { Rlimit | IN, 1 } } }, + { .name = "shutdown", .ret_type = 1, .nargs = 2, + .args = { { Int, 0 }, { Shutdown, 1 } } }, + { .name = "sigaction", .ret_type = 1, .nargs = 3, + .args = { { Signal, 0 }, { Sigaction | IN, 1 }, + { Sigaction | OUT, 2 } } }, { .name = "sigpending", .ret_type = 1, .nargs = 1, .args = { { Sigset | OUT, 0 } } }, { .name = "sigprocmask", .ret_type = 1, .nargs = 3, .args = { { Sigprocmask, 0 }, { Sigset, 1 }, { Sigset | OUT, 2 } } }, { .name = "sigqueue", .ret_type = 1, .nargs = 3, .args = { { Int, 0 }, { Signal, 1 }, { LongHex, 2 } } }, { .name = "sigreturn", .ret_type = 1, .nargs = 1, .args = { { Ptr, 0 } } }, { .name = "sigsuspend", .ret_type = 1, .nargs = 1, .args = { { Sigset | IN, 0 } } }, { .name = "sigtimedwait", .ret_type = 1, .nargs = 3, .args = { { Sigset | IN, 0 }, { Ptr, 1 }, { Timespec | IN, 2 } } }, { .name = "sigwait", .ret_type = 1, .nargs = 2, .args = { { Sigset | IN, 0 }, { Ptr, 1 } } }, { .name = "sigwaitinfo", .ret_type = 1, .nargs = 2, .args = { { Sigset | IN, 0 }, { Ptr, 1 } } }, - { .name = "unmount", .ret_type = 1, .nargs = 2, - .args = { { Name, 0 }, { Int, 1 } } }, { .name = "socket", .ret_type = 1, .nargs = 3, .args = { { Sockdomain, 0 }, { Socktype, 1 }, { Int, 2 } } }, - { .name = "getrusage", .ret_type = 1, .nargs = 2, - .args = { { Int, 0 }, { Rusage | OUT, 1 } } }, - { .name = "__getcwd", .ret_type = 1, .nargs = 2, - .args = { { Name | OUT, 0 }, { Int, 1 } } }, - { .name = "shutdown", .ret_type = 1, .nargs = 2, - .args = { { Int, 0 }, { Shutdown, 1 } } }, - { .name = "getrlimit", .ret_type = 1, .nargs = 2, - .args = { { Resource, 0 }, { Rlimit | OUT, 1 } } }, - { .name = "setrlimit", .ret_type = 1, .nargs = 2, - .args = { { Resource, 0 }, { Rlimit | IN, 1 } } }, - { .name = "utimes", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { Timeval2 | IN, 1 } } }, - { .name = "lutimes", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { Timeval2 | IN, 1 } } }, - { .name = "futimes", .ret_type = 1, .nargs = 2, - .args = { { Int, 0 }, { Timeval2 | IN, 1 } } }, - { .name = "futimesat", .ret_type = 1, .nargs = 3, - .args = { { Atfd, 0 }, { Name | IN, 1 }, { Timeval2 | IN, 2 } } }, - { .name = "futimens", .ret_type = 1, .nargs = 2, - .args = { { Int, 0 }, { Timespec2 | IN, 1 } } }, - { .name = "utimensat", .ret_type = 1, .nargs = 4, - .args = { { Atfd, 0 }, { Name | IN, 1 }, { Timespec2 | IN, 2 }, - { Atflags, 3 } } }, - { .name = "chflags", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { Hex, 1 } } }, - { .name = "lchflags", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { Hex, 1 } } }, - { .name = "pathconf", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { Pathconf, 1 } } }, - { .name = "pipe", .ret_type = 1, .nargs = 1, - .args = { { PipeFds | OUT, 0 } } }, - { .name = "pipe2", .ret_type = 1, .nargs = 2, - .args = { { Ptr, 0 }, { Open, 1 } } }, - { .name = "truncate", .ret_type = 1, .nargs = 2, - .args = { { Name | IN, 0 }, { QuadHex | IN, 1 + QUAD_ALIGN } } }, - { .name = "ftruncate", .ret_type = 1, .nargs = 2, - .args = { { Int | IN, 0 }, { QuadHex | IN, 1 + QUAD_ALIGN } } }, - { .name = "kill", .ret_type = 1, .nargs = 2, - .args = { { Int | IN, 0 }, { Signal | IN, 1 } } }, - { .name = "munmap", .ret_type = 1, .nargs = 2, - .args = { { Ptr, 0 }, { Int, 1 } } }, - { .name = "read", .ret_type = 1, .nargs = 3, - .args = { { Int, 0 }, { BinString | OUT, 1 }, { Int, 2 } } }, - { .name = "rename", .ret_type = 1, .nargs = 2, - .args = { { Name, 0 }, { Name, 1 } } }, - { .name = "renameat", .ret_type = 1, .nargs = 4, - .args = { { Atfd, 0 }, { Name, 1 }, { Atfd, 2 }, { Name, 3 } } }, + { .name = "stat", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { Stat | OUT, 1 } } }, + { .name = "statfs", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { StatFs | OUT, 1 } } }, { .name = "symlink", .ret_type = 1, .nargs = 2, .args = { { Name, 0 }, { Name, 1 } } }, { .name = "symlinkat", .ret_type = 1, .nargs = 3, .args = { { Name, 0 }, { Atfd, 1 }, { Name, 2 } } }, - { .name = "posix_openpt", .ret_type = 1, .nargs = 1, - .args = { { Open, 0 } } }, + { .name = "sysarch", .ret_type = 1, .nargs = 2, + .args = { { Sysarch, 0 }, { Ptr, 1 } } }, + { .name = "thr_kill", .ret_type = 1, .nargs = 2, + .args = { { Long, 0 }, { Signal, 1 } } }, + { .name = "thr_self", .ret_type = 1, .nargs = 1, + .args = { { Ptr, 0 } } }, + { .name = "truncate", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { QuadHex | IN, 1 + QUAD_ALIGN } } }, +#if 0 + /* Does not exist */ + { .name = "umount", .ret_type = 1, .nargs = 2, + .args = { { Name, 0 }, { Int, 2 } } }, +#endif + { .name = "unlink", .ret_type = 1, .nargs = 1, + .args = { { Name, 0 } } }, + { .name = "unlinkat", .ret_type = 1, .nargs = 3, + .args = { { Atfd, 0 }, { Name, 1 }, { Atflags, 2 } } }, + { .name = "unmount", .ret_type = 1, .nargs = 2, + .args = { { Name, 0 }, { Int, 1 } } }, + { .name = "utimensat", .ret_type = 1, .nargs = 4, + .args = { { Atfd, 0 }, { Name | IN, 1 }, { Timespec2 | IN, 2 }, + { Atflags, 3 } } }, + { .name = "utimes", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { Timeval2 | IN, 1 } } }, { .name = "wait4", .ret_type = 1, .nargs = 4, .args = { { Int, 0 }, { ExitStatus | OUT, 1 }, { Waitoptions, 2 }, { Rusage | OUT, 3 } } }, { .name = "wait6", .ret_type = 1, .nargs = 6, .args = { { Idtype, 0 }, { Quad, 1 + QUAD_ALIGN }, { ExitStatus | OUT, 1 + QUAD_ALIGN + QUAD_SLOTS }, { Waitoptions, 2 + QUAD_ALIGN + QUAD_SLOTS }, { Rusage | OUT, 3 + QUAD_ALIGN + QUAD_SLOTS }, { Ptr, 4 + QUAD_ALIGN + QUAD_SLOTS } } }, - { .name = "procctl", .ret_type = 1, .nargs = 4, - .args = { { Idtype, 0 }, { Quad, 1 + QUAD_ALIGN }, - { Procctl, 1 + QUAD_ALIGN + QUAD_SLOTS }, - { Ptr, 2 + QUAD_ALIGN + QUAD_SLOTS } } }, - { .name = "sysarch", .ret_type = 1, .nargs = 2, - .args = { { Sysarch, 0 }, { Ptr, 1 } } }, - { .name = "_umtx_op", .ret_type = 1, .nargs = 5, - .args = { { Ptr, 0 }, { Umtxop, 1 }, { LongHex, 2 }, { Ptr, 3 }, - { Ptr, 4 } } }, - { .name = "thr_kill", .ret_type = 1, .nargs = 2, - .args = { { Long, 0 }, { Signal, 1 } } }, - { .name = "thr_self", .ret_type = 1, .nargs = 1, - .args = { { Ptr, 0 } } }, + { .name = "write", .ret_type = 1, .nargs = 3, + .args = { { Int, 0 }, { BinString | IN, 1 }, { Int, 2 } } }, + + /* Linux ABI */ + { .name = "linux_access", .ret_type = 1, .nargs = 2, + .args = { { Name, 0 }, { Accessmode, 1 } } }, + { .name = "linux_execve", .ret_type = 1, .nargs = 3, + .args = { { Name | IN, 0 }, { ExecArgs | IN, 1 }, + { ExecEnv | IN, 2 } } }, + { .name = "linux_lseek", .ret_type = 2, .nargs = 3, + .args = { { Int, 0 }, { Int, 1 }, { Whence, 2 } } }, + { .name = "linux_mkdir", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { Int, 1 } } }, + { .name = "linux_newfstat", .ret_type = 1, .nargs = 2, + .args = { { Int, 0 }, { Ptr | OUT, 1 } } }, + { .name = "linux_newstat", .ret_type = 1, .nargs = 2, + .args = { { Name | IN, 0 }, { Ptr | OUT, 1 } } }, + { .name = "linux_open", .ret_type = 1, .nargs = 3, + .args = { { Name, 0 }, { Hex, 1 }, { Octal, 2 } } }, + { .name = "linux_readlink", .ret_type = 1, .nargs = 3, + .args = { { Name, 0 }, { Name | OUT, 1 }, { Int, 2 } } }, + { .name = "linux_socketcall", .ret_type = 1, .nargs = 2, + .args = { { Int, 0 }, { LinuxSockArgs, 1 } } }, + { .name = "linux_stat64", .ret_type = 1, .nargs = 3, + .args = { { Name | IN, 0 }, { Ptr | OUT, 1 }, { Ptr | IN, 1 } } }, + { .name = 0 }, }; static STAILQ_HEAD(, syscall) syscalls; /* Xlat idea taken from strace */ struct xlat { int val; const char *str; }; #define X(a) { a, #a }, #define XEND { 0, NULL } static struct xlat kevent_filters[] = { X(EVFILT_READ) X(EVFILT_WRITE) X(EVFILT_AIO) X(EVFILT_VNODE) X(EVFILT_PROC) X(EVFILT_SIGNAL) X(EVFILT_TIMER) X(EVFILT_PROCDESC) X(EVFILT_FS) X(EVFILT_LIO) X(EVFILT_USER) X(EVFILT_SENDFILE) XEND }; static struct xlat kevent_flags[] = { X(EV_ADD) X(EV_DELETE) X(EV_ENABLE) X(EV_DISABLE) X(EV_ONESHOT) X(EV_CLEAR) X(EV_RECEIPT) X(EV_DISPATCH) X(EV_FORCEONESHOT) X(EV_DROP) X(EV_FLAG1) X(EV_ERROR) X(EV_EOF) XEND }; static struct xlat kevent_user_ffctrl[] = { X(NOTE_FFNOP) X(NOTE_FFAND) X(NOTE_FFOR) X(NOTE_FFCOPY) XEND }; static struct xlat kevent_rdwr_fflags[] = { X(NOTE_LOWAT) X(NOTE_FILE_POLL) XEND }; static struct xlat kevent_vnode_fflags[] = { X(NOTE_DELETE) X(NOTE_WRITE) X(NOTE_EXTEND) X(NOTE_ATTRIB) X(NOTE_LINK) X(NOTE_RENAME) X(NOTE_REVOKE) XEND }; static struct xlat kevent_proc_fflags[] = { X(NOTE_EXIT) X(NOTE_FORK) X(NOTE_EXEC) X(NOTE_TRACK) X(NOTE_TRACKERR) X(NOTE_CHILD) XEND }; static struct xlat kevent_timer_fflags[] = { X(NOTE_SECONDS) X(NOTE_MSECONDS) X(NOTE_USECONDS) X(NOTE_NSECONDS) XEND }; static struct xlat poll_flags[] = { X(POLLSTANDARD) X(POLLIN) X(POLLPRI) X(POLLOUT) X(POLLERR) X(POLLHUP) X(POLLNVAL) X(POLLRDNORM) X(POLLRDBAND) X(POLLWRBAND) X(POLLINIGNEOF) XEND }; static struct xlat mmap_flags[] = { X(MAP_SHARED) X(MAP_PRIVATE) X(MAP_FIXED) X(MAP_RESERVED0020) X(MAP_RESERVED0040) X(MAP_RESERVED0080) X(MAP_RESERVED0100) X(MAP_HASSEMAPHORE) X(MAP_STACK) X(MAP_NOSYNC) X(MAP_ANON) X(MAP_EXCL) X(MAP_NOCORE) X(MAP_PREFAULT_READ) #ifdef MAP_32BIT X(MAP_32BIT) #endif XEND }; static struct xlat mprot_flags[] = { X(PROT_NONE) X(PROT_READ) X(PROT_WRITE) X(PROT_EXEC) XEND }; static struct xlat whence_arg[] = { X(SEEK_SET) X(SEEK_CUR) X(SEEK_END) X(SEEK_DATA) X(SEEK_HOLE) XEND }; static struct xlat sigaction_flags[] = { X(SA_ONSTACK) X(SA_RESTART) X(SA_RESETHAND) X(SA_NOCLDSTOP) X(SA_NODEFER) X(SA_NOCLDWAIT) X(SA_SIGINFO) XEND }; static struct xlat fcntl_arg[] = { X(F_DUPFD) X(F_GETFD) X(F_SETFD) X(F_GETFL) X(F_SETFL) X(F_GETOWN) X(F_SETOWN) X(F_OGETLK) X(F_OSETLK) X(F_OSETLKW) X(F_DUP2FD) X(F_GETLK) X(F_SETLK) X(F_SETLKW) X(F_SETLK_REMOTE) X(F_READAHEAD) X(F_RDAHEAD) X(F_DUPFD_CLOEXEC) X(F_DUP2FD_CLOEXEC) XEND }; static struct xlat fcntlfd_arg[] = { X(FD_CLOEXEC) XEND }; static struct xlat fcntlfl_arg[] = { X(O_APPEND) X(O_ASYNC) X(O_FSYNC) X(O_NONBLOCK) X(O_NOFOLLOW) X(FRDAHEAD) X(O_DIRECT) XEND }; static struct xlat sockdomain_arg[] = { X(PF_UNSPEC) X(PF_LOCAL) X(PF_UNIX) X(PF_INET) X(PF_IMPLINK) X(PF_PUP) X(PF_CHAOS) X(PF_NETBIOS) X(PF_ISO) X(PF_OSI) X(PF_ECMA) X(PF_DATAKIT) X(PF_CCITT) X(PF_SNA) X(PF_DECnet) X(PF_DLI) X(PF_LAT) X(PF_HYLINK) X(PF_APPLETALK) X(PF_ROUTE) X(PF_LINK) X(PF_XTP) X(PF_COIP) X(PF_CNT) X(PF_SIP) X(PF_IPX) X(PF_RTIP) X(PF_PIP) X(PF_ISDN) X(PF_KEY) X(PF_INET6) X(PF_NATM) X(PF_ATM) X(PF_NETGRAPH) X(PF_SLOW) X(PF_SCLUSTER) X(PF_ARP) X(PF_BLUETOOTH) X(PF_IEEE80211) X(PF_INET_SDP) X(PF_INET6_SDP) XEND }; static struct xlat socktype_arg[] = { X(SOCK_STREAM) X(SOCK_DGRAM) X(SOCK_RAW) X(SOCK_RDM) X(SOCK_SEQPACKET) XEND }; static struct xlat open_flags[] = { X(O_RDONLY) X(O_WRONLY) X(O_RDWR) X(O_ACCMODE) X(O_NONBLOCK) X(O_APPEND) X(O_SHLOCK) X(O_EXLOCK) X(O_ASYNC) X(O_FSYNC) X(O_NOFOLLOW) X(O_CREAT) X(O_TRUNC) X(O_EXCL) X(O_NOCTTY) X(O_DIRECT) X(O_DIRECTORY) X(O_EXEC) X(O_TTY_INIT) X(O_CLOEXEC) X(O_VERIFY) XEND }; static struct xlat shutdown_arg[] = { X(SHUT_RD) X(SHUT_WR) X(SHUT_RDWR) XEND }; static struct xlat resource_arg[] = { X(RLIMIT_CPU) X(RLIMIT_FSIZE) X(RLIMIT_DATA) X(RLIMIT_STACK) X(RLIMIT_CORE) X(RLIMIT_RSS) X(RLIMIT_MEMLOCK) X(RLIMIT_NPROC) X(RLIMIT_NOFILE) X(RLIMIT_SBSIZE) X(RLIMIT_VMEM) X(RLIMIT_NPTS) X(RLIMIT_SWAP) X(RLIMIT_KQUEUES) XEND }; static struct xlat pathconf_arg[] = { X(_PC_LINK_MAX) X(_PC_MAX_CANON) X(_PC_MAX_INPUT) X(_PC_NAME_MAX) X(_PC_PATH_MAX) X(_PC_PIPE_BUF) X(_PC_CHOWN_RESTRICTED) X(_PC_NO_TRUNC) X(_PC_VDISABLE) X(_PC_ASYNC_IO) X(_PC_PRIO_IO) X(_PC_SYNC_IO) X(_PC_ALLOC_SIZE_MIN) X(_PC_FILESIZEBITS) X(_PC_REC_INCR_XFER_SIZE) X(_PC_REC_MAX_XFER_SIZE) X(_PC_REC_MIN_XFER_SIZE) X(_PC_REC_XFER_ALIGN) X(_PC_SYMLINK_MAX) X(_PC_ACL_EXTENDED) X(_PC_ACL_PATH_MAX) X(_PC_CAP_PRESENT) X(_PC_INF_PRESENT) X(_PC_MAC_PRESENT) X(_PC_ACL_NFS4) X(_PC_MIN_HOLE_SIZE) XEND }; static struct xlat rfork_flags[] = { X(RFFDG) X(RFPROC) X(RFMEM) X(RFNOWAIT) X(RFCFDG) X(RFTHREAD) X(RFSIGSHARE) X(RFLINUXTHPN) X(RFTSIGZMB) X(RFPPWAIT) XEND }; static struct xlat wait_options[] = { X(WNOHANG) X(WUNTRACED) X(WCONTINUED) X(WNOWAIT) X(WEXITED) X(WTRAPPED) XEND }; static struct xlat idtype_arg[] = { X(P_PID) X(P_PPID) X(P_PGID) X(P_SID) X(P_CID) X(P_UID) X(P_GID) X(P_ALL) X(P_LWPID) X(P_TASKID) X(P_PROJID) X(P_POOLID) X(P_JAILID) X(P_CTID) X(P_CPUID) X(P_PSETID) XEND }; static struct xlat procctl_arg[] = { X(PROC_SPROTECT) X(PROC_REAP_ACQUIRE) X(PROC_REAP_RELEASE) X(PROC_REAP_STATUS) X(PROC_REAP_GETPIDS) X(PROC_REAP_KILL) X(PROC_TRACE_CTL) X(PROC_TRACE_STATUS) XEND }; static struct xlat umtx_ops[] = { X(UMTX_OP_RESERVED0) X(UMTX_OP_RESERVED1) X(UMTX_OP_WAIT) X(UMTX_OP_WAKE) X(UMTX_OP_MUTEX_TRYLOCK) X(UMTX_OP_MUTEX_LOCK) X(UMTX_OP_MUTEX_UNLOCK) X(UMTX_OP_SET_CEILING) X(UMTX_OP_CV_WAIT) X(UMTX_OP_CV_SIGNAL) X(UMTX_OP_CV_BROADCAST) X(UMTX_OP_WAIT_UINT) X(UMTX_OP_RW_RDLOCK) X(UMTX_OP_RW_WRLOCK) X(UMTX_OP_RW_UNLOCK) X(UMTX_OP_WAIT_UINT_PRIVATE) X(UMTX_OP_WAKE_PRIVATE) X(UMTX_OP_MUTEX_WAIT) X(UMTX_OP_MUTEX_WAKE) X(UMTX_OP_SEM_WAIT) X(UMTX_OP_SEM_WAKE) X(UMTX_OP_NWAKE_PRIVATE) X(UMTX_OP_MUTEX_WAKE2) X(UMTX_OP_SEM2_WAIT) X(UMTX_OP_SEM2_WAKE) XEND }; static struct xlat at_flags[] = { X(AT_EACCESS) X(AT_SYMLINK_NOFOLLOW) X(AT_SYMLINK_FOLLOW) X(AT_REMOVEDIR) XEND }; static struct xlat access_modes[] = { X(R_OK) X(W_OK) X(X_OK) XEND }; static struct xlat sysarch_ops[] = { #if defined(__i386__) || defined(__amd64__) X(I386_GET_LDT) X(I386_SET_LDT) X(I386_GET_IOPERM) X(I386_SET_IOPERM) X(I386_VM86) X(I386_GET_FSBASE) X(I386_SET_FSBASE) X(I386_GET_GSBASE) X(I386_SET_GSBASE) X(I386_GET_XFPUSTATE) X(AMD64_GET_FSBASE) X(AMD64_SET_FSBASE) X(AMD64_GET_GSBASE) X(AMD64_SET_GSBASE) X(AMD64_GET_XFPUSTATE) #endif XEND }; static struct xlat linux_socketcall_ops[] = { X(LINUX_SOCKET) X(LINUX_BIND) X(LINUX_CONNECT) X(LINUX_LISTEN) X(LINUX_ACCEPT) X(LINUX_GETSOCKNAME) X(LINUX_GETPEERNAME) X(LINUX_SOCKETPAIR) X(LINUX_SEND) X(LINUX_RECV) X(LINUX_SENDTO) X(LINUX_RECVFROM) X(LINUX_SHUTDOWN) X(LINUX_SETSOCKOPT) X(LINUX_GETSOCKOPT) X(LINUX_SENDMSG) X(LINUX_RECVMSG) XEND }; static struct xlat sigprocmask_ops[] = { X(SIG_BLOCK) X(SIG_UNBLOCK) X(SIG_SETMASK) XEND }; #undef X #undef XEND /* * Searches an xlat array for a value, and returns it if found. Otherwise * return a string representation. */ static const char * lookup(struct xlat *xlat, int val, int base) { static char tmp[16]; for (; xlat->str != NULL; xlat++) if (xlat->val == val) return (xlat->str); switch (base) { case 8: sprintf(tmp, "0%o", val); break; case 16: sprintf(tmp, "0x%x", val); break; case 10: sprintf(tmp, "%u", val); break; default: errx(1,"Unknown lookup base"); break; } return (tmp); } static const char * xlookup(struct xlat *xlat, int val) { return (lookup(xlat, val, 16)); } /* * Searches an xlat array containing bitfield values. Remaining bits * set after removing the known ones are printed at the end: * IN|0x400. */ static char * xlookup_bits(struct xlat *xlat, int val) { int len, rem; static char str[512]; len = 0; rem = val; for (; xlat->str != NULL; xlat++) { if ((xlat->val & rem) == xlat->val) { /* * Don't print the "all-bits-zero" string unless all * bits are really zero. */ if (xlat->val == 0 && val != 0) continue; len += sprintf(str + len, "%s|", xlat->str); rem &= ~(xlat->val); } } /* * If we have leftover bits or didn't match anything, print * the remainder. */ if (rem || len == 0) len += sprintf(str + len, "0x%x", rem); if (len && str[len - 1] == '|') len--; str[len] = 0; return (str); } void init_syscalls(void) { struct syscall *sc; STAILQ_INIT(&syscalls); for (sc = decoded_syscalls; sc->name != NULL; sc++) STAILQ_INSERT_HEAD(&syscalls, sc, entries); } /* * If/when the list gets big, it might be desirable to do it * as a hash table or binary search. */ struct syscall * get_syscall(const char *name, int nargs) { struct syscall *sc; int i; if (name == NULL) return (NULL); STAILQ_FOREACH(sc, &syscalls, entries) if (strcmp(name, sc->name) == 0) return (sc); /* It is unknown. Add it into the list. */ #if DEBUG fprintf(stderr, "unknown syscall %s -- setting args to %d\n", name, nargs); #endif sc = calloc(1, sizeof(struct syscall)); sc->name = strdup(name); sc->ret_type = 1; sc->nargs = nargs; for (i = 0; i < nargs; i++) { sc->args[i].offset = i; /* Treat all unknown arguments as LongHex. */ sc->args[i].type = LongHex; } STAILQ_INSERT_HEAD(&syscalls, sc, entries); return (sc); } /* * Copy a fixed amount of bytes from the process. */ static int get_struct(pid_t pid, void *offset, void *buf, int len) { struct ptrace_io_desc iorequest; iorequest.piod_op = PIOD_READ_D; iorequest.piod_offs = offset; iorequest.piod_addr = buf; iorequest.piod_len = len; if (ptrace(PT_IO, pid, (caddr_t)&iorequest, 0) < 0) return (-1); return (0); } #define MAXSIZE 4096 /* * Copy a string from the process. Note that it is * expected to be a C string, but if max is set, it will * only get that much. */ static char * get_string(pid_t pid, void *addr, int max) { struct ptrace_io_desc iorequest; char *buf, *nbuf; size_t offset, size, totalsize; offset = 0; if (max) size = max + 1; else { /* Read up to the end of the current page. */ size = PAGE_SIZE - ((uintptr_t)addr % PAGE_SIZE); if (size > MAXSIZE) size = MAXSIZE; } totalsize = size; buf = malloc(totalsize); if (buf == NULL) return (NULL); for (;;) { iorequest.piod_op = PIOD_READ_D; iorequest.piod_offs = (char *)addr + offset; iorequest.piod_addr = buf + offset; iorequest.piod_len = size; if (ptrace(PT_IO, pid, (caddr_t)&iorequest, 0) < 0) { free(buf); return (NULL); } if (memchr(buf + offset, '\0', size) != NULL) return (buf); offset += size; if (totalsize < MAXSIZE && max == 0) { size = MAXSIZE - totalsize; if (size > PAGE_SIZE) size = PAGE_SIZE; nbuf = realloc(buf, totalsize + size); if (nbuf == NULL) { buf[totalsize - 1] = '\0'; return (buf); } buf = nbuf; totalsize += size; } else { buf[totalsize - 1] = '\0'; return (buf); } } } static char * strsig2(int sig) { static char tmp[sizeof(int) * 3 + 1]; char *ret; ret = strsig(sig); if (ret == NULL) { snprintf(tmp, sizeof(tmp), "%d", sig); ret = tmp; } return (ret); } static void print_kevent(FILE *fp, struct kevent *ke, int input) { switch (ke->filter) { case EVFILT_READ: case EVFILT_WRITE: case EVFILT_VNODE: case EVFILT_PROC: case EVFILT_TIMER: case EVFILT_PROCDESC: fprintf(fp, "%ju", (uintmax_t)ke->ident); break; case EVFILT_SIGNAL: fputs(strsig2(ke->ident), fp); break; default: fprintf(fp, "%p", (void *)ke->ident); } fprintf(fp, ",%s,%s,", xlookup(kevent_filters, ke->filter), xlookup_bits(kevent_flags, ke->flags)); switch (ke->filter) { case EVFILT_READ: case EVFILT_WRITE: fputs(xlookup_bits(kevent_rdwr_fflags, ke->fflags), fp); break; case EVFILT_VNODE: fputs(xlookup_bits(kevent_vnode_fflags, ke->fflags), fp); break; case EVFILT_PROC: case EVFILT_PROCDESC: fputs(xlookup_bits(kevent_proc_fflags, ke->fflags), fp); break; case EVFILT_TIMER: fputs(xlookup_bits(kevent_timer_fflags, ke->fflags), fp); break; case EVFILT_USER: { int ctrl, data; ctrl = ke->fflags & NOTE_FFCTRLMASK; data = ke->fflags & NOTE_FFLAGSMASK; if (input) { fputs(xlookup(kevent_user_ffctrl, ctrl), fp); if (ke->fflags & NOTE_TRIGGER) fputs("|NOTE_TRIGGER", fp); if (data != 0) fprintf(fp, "|%#x", data); } else { fprintf(fp, "%#x", data); } break; } default: fprintf(fp, "%#x", ke->fflags); } fprintf(fp, ",%p,%p", (void *)ke->data, (void *)ke->udata); } /* * Converts a syscall argument into a string. Said string is * allocated via malloc(), so needs to be free()'d. sc is * a pointer to the syscall description (see above); args is * an array of all of the system call arguments. */ char * print_arg(struct syscall_args *sc, unsigned long *args, long *retval, struct trussinfo *trussinfo) { FILE *fp; char *tmp; size_t tmplen; pid_t pid; fp = open_memstream(&tmp, &tmplen); pid = trussinfo->curthread->proc->pid; switch (sc->type & ARG_MASK) { case Hex: fprintf(fp, "0x%x", (int)args[sc->offset]); break; case Octal: fprintf(fp, "0%o", (int)args[sc->offset]); break; case Int: fprintf(fp, "%d", (int)args[sc->offset]); break; case LongHex: fprintf(fp, "0x%lx", args[sc->offset]); break; case Long: fprintf(fp, "%ld", args[sc->offset]); break; case Name: { /* NULL-terminated string. */ char *tmp2; tmp2 = get_string(pid, (void*)args[sc->offset], 0); fprintf(fp, "\"%s\"", tmp2); free(tmp2); break; } case BinString: { /* * Binary block of data that might have printable characters. * XXX If type|OUT, assume that the length is the syscall's * return value. Otherwise, assume that the length of the block * is in the next syscall argument. */ int max_string = trussinfo->strsize; char tmp2[max_string + 1], *tmp3; int len; int truncated = 0; if (sc->type & OUT) len = retval[0]; else len = args[sc->offset + 1]; /* * Don't print more than max_string characters, to avoid word * wrap. If we have to truncate put some ... after the string. */ if (len > max_string) { len = max_string; truncated = 1; } if (len && get_struct(pid, (void*)args[sc->offset], &tmp2, len) != -1) { tmp3 = malloc(len * 4 + 1); while (len) { if (strvisx(tmp3, tmp2, len, VIS_CSTYLE|VIS_TAB|VIS_NL) <= max_string) break; len--; truncated = 1; }; fprintf(fp, "\"%s\"%s", tmp3, truncated ? "..." : ""); free(tmp3); } else { fprintf(fp, "0x%lx", args[sc->offset]); } break; } case ExecArgs: case ExecEnv: case StringArray: { uintptr_t addr; union { char *strarray[0]; char buf[PAGE_SIZE]; } u; char *string; size_t len; u_int first, i; /* * Only parse argv[] and environment arrays from exec calls * if requested. */ if (((sc->type & ARG_MASK) == ExecArgs && (trussinfo->flags & EXECVEARGS) == 0) || ((sc->type & ARG_MASK) == ExecEnv && (trussinfo->flags & EXECVEENVS) == 0)) { fprintf(fp, "0x%lx", args[sc->offset]); break; } /* * Read a page of pointers at a time. Punt if the top-level * pointer is not aligned. Note that the first read is of * a partial page. */ addr = args[sc->offset]; if (addr % sizeof(char *) != 0) { fprintf(fp, "0x%lx", args[sc->offset]); break; } len = PAGE_SIZE - (addr & PAGE_MASK); if (get_struct(pid, (void *)addr, u.buf, len) == -1) { fprintf(fp, "0x%lx", args[sc->offset]); break; } fputc('[', fp); first = 1; i = 0; while (u.strarray[i] != NULL) { string = get_string(pid, u.strarray[i], 0); fprintf(fp, "%s \"%s\"", first ? "" : ",", string); free(string); first = 0; i++; if (i == len / sizeof(char *)) { addr += len; len = PAGE_SIZE; if (get_struct(pid, (void *)addr, u.buf, len) == -1) { fprintf(fp, ", "); break; } i = 0; } } fputs(" ]", fp); break; } #ifdef __LP64__ case Quad: fprintf(fp, "%ld", args[sc->offset]); break; case QuadHex: fprintf(fp, "0x%lx", args[sc->offset]); break; #else case Quad: case QuadHex: { unsigned long long ll; #if _BYTE_ORDER == _LITTLE_ENDIAN ll = (unsigned long long)args[sc->offset + 1] << 32 | args[sc->offset]; #else ll = (unsigned long long)args[sc->offset] << 32 | args[sc->offset + 1]; #endif if ((sc->type & ARG_MASK) == Quad) fprintf(fp, "%lld", ll); else fprintf(fp, "0x%llx", ll); break; } #endif case Ptr: fprintf(fp, "0x%lx", args[sc->offset]); break; case Readlinkres: { char *tmp2; if (retval[0] == -1) break; tmp2 = get_string(pid, (void*)args[sc->offset], retval[0]); fprintf(fp, "\"%s\"", tmp2); free(tmp2); break; } case Ioctl: { const char *temp; unsigned long cmd; cmd = args[sc->offset]; temp = ioctlname(cmd); if (temp) fputs(temp, fp); else { fprintf(fp, "0x%lx { IO%s%s 0x%lx('%c'), %lu, %lu }", cmd, cmd & IOC_OUT ? "R" : "", cmd & IOC_IN ? "W" : "", IOCGROUP(cmd), isprint(IOCGROUP(cmd)) ? (char)IOCGROUP(cmd) : '?', cmd & 0xFF, IOCPARM_LEN(cmd)); } break; } case Timespec: { struct timespec ts; if (get_struct(pid, (void *)args[sc->offset], &ts, sizeof(ts)) != -1) fprintf(fp, "{ %jd.%09ld }", (intmax_t)ts.tv_sec, ts.tv_nsec); else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Timespec2: { struct timespec ts[2]; const char *sep; unsigned int i; if (get_struct(pid, (void *)args[sc->offset], &ts, sizeof(ts)) != -1) { fputs("{ ", fp); sep = ""; for (i = 0; i < nitems(ts); i++) { fputs(sep, fp); sep = ", "; switch (ts[i].tv_nsec) { case UTIME_NOW: fprintf(fp, "UTIME_NOW"); break; case UTIME_OMIT: fprintf(fp, "UTIME_OMIT"); break; default: fprintf(fp, "%jd.%09ld", (intmax_t)ts[i].tv_sec, ts[i].tv_nsec); break; } } fputs(" }", fp); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Timeval: { struct timeval tv; if (get_struct(pid, (void *)args[sc->offset], &tv, sizeof(tv)) != -1) fprintf(fp, "{ %jd.%06ld }", (intmax_t)tv.tv_sec, tv.tv_usec); else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Timeval2: { struct timeval tv[2]; if (get_struct(pid, (void *)args[sc->offset], &tv, sizeof(tv)) != -1) fprintf(fp, "{ %jd.%06ld, %jd.%06ld }", (intmax_t)tv[0].tv_sec, tv[0].tv_usec, (intmax_t)tv[1].tv_sec, tv[1].tv_usec); else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Itimerval: { struct itimerval itv; if (get_struct(pid, (void *)args[sc->offset], &itv, sizeof(itv)) != -1) fprintf(fp, "{ %jd.%06ld, %jd.%06ld }", (intmax_t)itv.it_interval.tv_sec, itv.it_interval.tv_usec, (intmax_t)itv.it_value.tv_sec, itv.it_value.tv_usec); else fprintf(fp, "0x%lx", args[sc->offset]); break; } case LinuxSockArgs: { struct linux_socketcall_args largs; if (get_struct(pid, (void *)args[sc->offset], (void *)&largs, sizeof(largs)) != -1) fprintf(fp, "{ %s, 0x%lx }", lookup(linux_socketcall_ops, largs.what, 10), (long unsigned int)largs.args); else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Pollfd: { /* * XXX: A Pollfd argument expects the /next/ syscall argument * to be the number of fds in the array. This matches the poll * syscall. */ struct pollfd *pfd; int numfds = args[sc->offset + 1]; size_t bytes = sizeof(struct pollfd) * numfds; int i; if ((pfd = malloc(bytes)) == NULL) err(1, "Cannot malloc %zu bytes for pollfd array", bytes); if (get_struct(pid, (void *)args[sc->offset], pfd, bytes) != -1) { fputs("{", fp); for (i = 0; i < numfds; i++) { fprintf(fp, " %d/%s", pfd[i].fd, xlookup_bits(poll_flags, pfd[i].events)); } fputs(" }", fp); } else { fprintf(fp, "0x%lx", args[sc->offset]); } free(pfd); break; } case Fd_set: { /* * XXX: A Fd_set argument expects the /first/ syscall argument * to be the number of fds in the array. This matches the * select syscall. */ fd_set *fds; int numfds = args[0]; size_t bytes = _howmany(numfds, _NFDBITS) * _NFDBITS; int i; if ((fds = malloc(bytes)) == NULL) err(1, "Cannot malloc %zu bytes for fd_set array", bytes); if (get_struct(pid, (void *)args[sc->offset], fds, bytes) != -1) { fputs("{", fp); for (i = 0; i < numfds; i++) { if (FD_ISSET(i, fds)) fprintf(fp, " %d", i); } fputs(" }", fp); } else fprintf(fp, "0x%lx", args[sc->offset]); free(fds); break; } case Signal: fputs(strsig2(args[sc->offset]), fp); break; case Sigset: { long sig; sigset_t ss; int i, first; sig = args[sc->offset]; if (get_struct(pid, (void *)args[sc->offset], (void *)&ss, sizeof(ss)) == -1) { fprintf(fp, "0x%lx", args[sc->offset]); break; } fputs("{ ", fp); first = 1; for (i = 1; i < sys_nsig; i++) { if (sigismember(&ss, i)) { fprintf(fp, "%s%s", !first ? "|" : "", strsig(i)); first = 0; } } if (!first) fputc(' ', fp); fputc('}', fp); break; } case Sigprocmask: { fputs(xlookup(sigprocmask_ops, args[sc->offset]), fp); break; } case Fcntlflag: { /* XXX: Output depends on the value of the previous argument. */ switch (args[sc->offset - 1]) { case F_SETFD: fputs(xlookup_bits(fcntlfd_arg, args[sc->offset]), fp); break; case F_SETFL: fputs(xlookup_bits(fcntlfl_arg, args[sc->offset]), fp); break; case F_GETFD: case F_GETFL: case F_GETOWN: break; default: fprintf(fp, "0x%lx", args[sc->offset]); break; } break; } case Open: fputs(xlookup_bits(open_flags, args[sc->offset]), fp); break; case Fcntl: fputs(xlookup(fcntl_arg, args[sc->offset]), fp); break; case Mprot: fputs(xlookup_bits(mprot_flags, args[sc->offset]), fp); break; case Mmapflags: { int align, flags; /* * MAP_ALIGNED can't be handled by xlookup_bits(), so * generate that string manually and prepend it to the * string from xlookup_bits(). Have to be careful to * avoid outputting MAP_ALIGNED|0 if MAP_ALIGNED is * the only flag. */ flags = args[sc->offset] & ~MAP_ALIGNMENT_MASK; align = args[sc->offset] & MAP_ALIGNMENT_MASK; if (align != 0) { if (align == MAP_ALIGNED_SUPER) fputs("MAP_ALIGNED_SUPER", fp); else fprintf(fp, "MAP_ALIGNED(%d)", align >> MAP_ALIGNMENT_SHIFT); if (flags == 0) break; fputc('|', fp); } fputs(xlookup_bits(mmap_flags, flags), fp); break; } case Whence: fputs(xlookup(whence_arg, args[sc->offset]), fp); break; case Sockdomain: fputs(xlookup(sockdomain_arg, args[sc->offset]), fp); break; case Socktype: { int type, flags; flags = args[sc->offset] & (SOCK_CLOEXEC | SOCK_NONBLOCK); type = args[sc->offset] & ~flags; fputs(xlookup(socktype_arg, type), fp); if (flags & SOCK_CLOEXEC) fprintf(fp, "|SOCK_CLOEXEC"); if (flags & SOCK_NONBLOCK) fprintf(fp, "|SOCK_NONBLOCK"); break; } case Shutdown: fputs(xlookup(shutdown_arg, args[sc->offset]), fp); break; case Resource: fputs(xlookup(resource_arg, args[sc->offset]), fp); break; case Pathconf: fputs(xlookup(pathconf_arg, args[sc->offset]), fp); break; case Rforkflags: fputs(xlookup_bits(rfork_flags, args[sc->offset]), fp); break; case Sockaddr: { char addr[64]; struct sockaddr_in *lsin; struct sockaddr_in6 *lsin6; struct sockaddr_un *sun; struct sockaddr *sa; socklen_t len; u_char *q; if (args[sc->offset] == 0) { fputs("NULL", fp); break; } /* * Extract the address length from the next argument. If * this is an output sockaddr (OUT is set), then the * next argument is a pointer to a socklen_t. Otherwise * the next argument contains a socklen_t by value. */ if (sc->type & OUT) { if (get_struct(pid, (void *)args[sc->offset + 1], &len, sizeof(len)) == -1) { fprintf(fp, "0x%lx", args[sc->offset]); break; } } else len = args[sc->offset + 1]; /* If the length is too small, just bail. */ if (len < sizeof(*sa)) { fprintf(fp, "0x%lx", args[sc->offset]); break; } sa = calloc(1, len); if (get_struct(pid, (void *)args[sc->offset], sa, len) == -1) { free(sa); fprintf(fp, "0x%lx", args[sc->offset]); break; } switch (sa->sa_family) { case AF_INET: if (len < sizeof(*lsin)) goto sockaddr_short; lsin = (struct sockaddr_in *)(void *)sa; inet_ntop(AF_INET, &lsin->sin_addr, addr, sizeof(addr)); fprintf(fp, "{ AF_INET %s:%d }", addr, htons(lsin->sin_port)); break; case AF_INET6: if (len < sizeof(*lsin6)) goto sockaddr_short; lsin6 = (struct sockaddr_in6 *)(void *)sa; inet_ntop(AF_INET6, &lsin6->sin6_addr, addr, sizeof(addr)); fprintf(fp, "{ AF_INET6 [%s]:%d }", addr, htons(lsin6->sin6_port)); break; case AF_UNIX: sun = (struct sockaddr_un *)sa; fprintf(fp, "{ AF_UNIX \"%.*s\" }", (int)(len - offsetof(struct sockaddr_un, sun_path)), sun->sun_path); break; default: sockaddr_short: fprintf(fp, "{ sa_len = %d, sa_family = %d, sa_data = {", (int)sa->sa_len, (int)sa->sa_family); for (q = (u_char *)sa->sa_data; q < (u_char *)sa + len; q++) fprintf(fp, "%s 0x%02x", q == (u_char *)sa->sa_data ? "" : ",", *q); fputs(" } }", fp); } free(sa); break; } case Sigaction: { struct sigaction sa; if (get_struct(pid, (void *)args[sc->offset], &sa, sizeof(sa)) != -1) { fputs("{ ", fp); if (sa.sa_handler == SIG_DFL) fputs("SIG_DFL", fp); else if (sa.sa_handler == SIG_IGN) fputs("SIG_IGN", fp); else fprintf(fp, "%p", sa.sa_handler); fprintf(fp, " %s ss_t }", xlookup_bits(sigaction_flags, sa.sa_flags)); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Kevent: { /* * XXX XXX: The size of the array is determined by either the * next syscall argument, or by the syscall return value, * depending on which argument number we are. This matches the * kevent syscall, but luckily that's the only syscall that uses * them. */ struct kevent *ke; int numevents = -1; size_t bytes; int i; if (sc->offset == 1) numevents = args[sc->offset+1]; else if (sc->offset == 3 && retval[0] != -1) numevents = retval[0]; if (numevents >= 0) { bytes = sizeof(struct kevent) * numevents; if ((ke = malloc(bytes)) == NULL) err(1, "Cannot malloc %zu bytes for kevent array", bytes); } else ke = NULL; if (numevents >= 0 && get_struct(pid, (void *)args[sc->offset], ke, bytes) != -1) { fputc('{', fp); for (i = 0; i < numevents; i++) { fputc(' ', fp); print_kevent(fp, &ke[i], sc->offset == 1); } fputs(" }", fp); } else { fprintf(fp, "0x%lx", args[sc->offset]); } free(ke); break; } case Stat: { struct stat st; if (get_struct(pid, (void *)args[sc->offset], &st, sizeof(st)) != -1) { char mode[12]; strmode(st.st_mode, mode); fprintf(fp, "{ mode=%s,inode=%ju,size=%jd,blksize=%ld }", mode, (uintmax_t)st.st_ino, (intmax_t)st.st_size, (long)st.st_blksize); } else { fprintf(fp, "0x%lx", args[sc->offset]); } break; } case StatFs: { unsigned int i; struct statfs buf; if (get_struct(pid, (void *)args[sc->offset], &buf, sizeof(buf)) != -1) { char fsid[17]; bzero(fsid, sizeof(fsid)); if (buf.f_fsid.val[0] != 0 || buf.f_fsid.val[1] != 0) { for (i = 0; i < sizeof(buf.f_fsid); i++) snprintf(&fsid[i*2], sizeof(fsid) - (i*2), "%02x", ((u_char *)&buf.f_fsid)[i]); } fprintf(fp, "{ fstypename=%s,mntonname=%s,mntfromname=%s," "fsid=%s }", buf.f_fstypename, buf.f_mntonname, buf.f_mntfromname, fsid); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Rusage: { struct rusage ru; if (get_struct(pid, (void *)args[sc->offset], &ru, sizeof(ru)) != -1) { fprintf(fp, "{ u=%jd.%06ld,s=%jd.%06ld,in=%ld,out=%ld }", (intmax_t)ru.ru_utime.tv_sec, ru.ru_utime.tv_usec, (intmax_t)ru.ru_stime.tv_sec, ru.ru_stime.tv_usec, ru.ru_inblock, ru.ru_oublock); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Rlimit: { struct rlimit rl; if (get_struct(pid, (void *)args[sc->offset], &rl, sizeof(rl)) != -1) { fprintf(fp, "{ cur=%ju,max=%ju }", rl.rlim_cur, rl.rlim_max); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case ExitStatus: { int status; if (get_struct(pid, (void *)args[sc->offset], &status, sizeof(status)) != -1) { fputs("{ ", fp); if (WIFCONTINUED(status)) fputs("CONTINUED", fp); else if (WIFEXITED(status)) fprintf(fp, "EXITED,val=%d", WEXITSTATUS(status)); else if (WIFSIGNALED(status)) fprintf(fp, "SIGNALED,sig=%s%s", strsig2(WTERMSIG(status)), WCOREDUMP(status) ? ",cored" : ""); else fprintf(fp, "STOPPED,sig=%s", strsig2(WTERMSIG(status))); fputs(" }", fp); } else fprintf(fp, "0x%lx", args[sc->offset]); break; } case Waitoptions: fputs(xlookup_bits(wait_options, args[sc->offset]), fp); break; case Idtype: fputs(xlookup(idtype_arg, args[sc->offset]), fp); break; case Procctl: fputs(xlookup(procctl_arg, args[sc->offset]), fp); break; case Umtxop: fputs(xlookup(umtx_ops, args[sc->offset]), fp); break; case Atfd: if ((int)args[sc->offset] == AT_FDCWD) fputs("AT_FDCWD", fp); else fprintf(fp, "%d", (int)args[sc->offset]); break; case Atflags: fputs(xlookup_bits(at_flags, args[sc->offset]), fp); break; case Accessmode: if (args[sc->offset] == F_OK) fputs("F_OK", fp); else fputs(xlookup_bits(access_modes, args[sc->offset]), fp); break; case Sysarch: fputs(xlookup(sysarch_ops, args[sc->offset]), fp); break; case PipeFds: /* * The pipe() system call in the kernel returns its * two file descriptors via return values. However, * the interface exposed by libc is that pipe() * accepts a pointer to an array of descriptors. * Format the output to match the libc API by printing * the returned file descriptors as a fake argument. * * Overwrite the first retval to signal a successful * return as well. */ fprintf(fp, "{ %ld, %ld }", retval[0], retval[1]); retval[0] = 0; break; default: errx(1, "Invalid argument type %d\n", sc->type & ARG_MASK); } fclose(fp); return (tmp); } /* * Print (to outfile) the system call and its arguments. Note that * nargs is the number of arguments (not the number of words; this is * potentially confusing, I know). */ void print_syscall(struct trussinfo *trussinfo, const char *name, int nargs, char **s_args) { struct timespec timediff; int i, len; len = 0; if (trussinfo->flags & FOLLOWFORKS) len += fprintf(trussinfo->outfile, "%5d: ", trussinfo->curthread->proc->pid); if (name != NULL && (strcmp(name, "execve") == 0 || strcmp(name, "exit") == 0)) { clock_gettime(CLOCK_REALTIME, &trussinfo->curthread->after); } if (trussinfo->flags & ABSOLUTETIMESTAMPS) { timespecsubt(&trussinfo->curthread->after, &trussinfo->start_time, &timediff); len += fprintf(trussinfo->outfile, "%jd.%09ld ", (intmax_t)timediff.tv_sec, timediff.tv_nsec); } if (trussinfo->flags & RELATIVETIMESTAMPS) { timespecsubt(&trussinfo->curthread->after, &trussinfo->curthread->before, &timediff); len += fprintf(trussinfo->outfile, "%jd.%09ld ", (intmax_t)timediff.tv_sec, timediff.tv_nsec); } len += fprintf(trussinfo->outfile, "%s(", name); for (i = 0; i < nargs; i++) { if (s_args[i]) len += fprintf(trussinfo->outfile, "%s", s_args[i]); else len += fprintf(trussinfo->outfile, ""); len += fprintf(trussinfo->outfile, "%s", i < (nargs - 1) ? "," : ""); } len += fprintf(trussinfo->outfile, ")"); for (i = 0; i < 6 - (len / 8); i++) fprintf(trussinfo->outfile, "\t"); } void print_syscall_ret(struct trussinfo *trussinfo, const char *name, int nargs, char **s_args, int errorp, long *retval, struct syscall *sc) { struct timespec timediff; if (trussinfo->flags & COUNTONLY) { clock_gettime(CLOCK_REALTIME, &trussinfo->curthread->after); timespecsubt(&trussinfo->curthread->after, &trussinfo->curthread->before, &timediff); timespecadd(&sc->time, &timediff, &sc->time); sc->ncalls++; if (errorp) sc->nerror++; return; } print_syscall(trussinfo, name, nargs, s_args); fflush(trussinfo->outfile); if (errorp) fprintf(trussinfo->outfile, " ERR#%ld '%s'\n", retval[0], strerror(retval[0])); #ifndef __LP64__ else if (sc->ret_type == 2) { off_t off; #if _BYTE_ORDER == _LITTLE_ENDIAN off = (off_t)retval[1] << 32 | retval[0]; #else off = (off_t)retval[0] << 32 | retval[1]; #endif fprintf(trussinfo->outfile, " = %jd (0x%jx)\n", (intmax_t)off, (intmax_t)off); } #endif else fprintf(trussinfo->outfile, " = %ld (0x%lx)\n", retval[0], retval[0]); } void print_summary(struct trussinfo *trussinfo) { struct timespec total = {0, 0}; struct syscall *sc; int ncall, nerror; fprintf(trussinfo->outfile, "%-20s%15s%8s%8s\n", "syscall", "seconds", "calls", "errors"); ncall = nerror = 0; STAILQ_FOREACH(sc, &syscalls, entries) if (sc->ncalls) { fprintf(trussinfo->outfile, "%-20s%5jd.%09ld%8d%8d\n", sc->name, (intmax_t)sc->time.tv_sec, sc->time.tv_nsec, sc->ncalls, sc->nerror); timespecadd(&total, &sc->time, &total); ncall += sc->ncalls; nerror += sc->nerror; } fprintf(trussinfo->outfile, "%20s%15s%8s%8s\n", "", "-------------", "-------", "-------"); fprintf(trussinfo->outfile, "%-20s%5jd.%09ld%8d%8d\n", "", (intmax_t)total.tv_sec, total.tv_nsec, ncall, nerror); } Index: user/ngie/more-tests2 =================================================================== --- user/ngie/more-tests2 (revision 288954) +++ user/ngie/more-tests2 (revision 288955) Property changes on: user/ngie/more-tests2 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r288944-288954