Index: head/ObsoleteFiles.inc =================================================================== --- head/ObsoleteFiles.inc (revision 339926) +++ head/ObsoleteFiles.inc (revision 339927) @@ -1,10617 +1,10619 @@ # # $FreeBSD$ # # This file lists old files (OLD_FILES), libraries (OLD_LIBS) and # directories (OLD_DIRS) which should get removed at an update. Recently # removed entries first (with the date as a comment). Dynamic libraries are # special cased (OLD_LIBS). Static libraries or the generic links to # the dynamic libraries (lib*.so) should (if you don't know why to make an # exception, make this a "must") be viewed as normal files (OLD_FILES). # # In case of a complete directory hierarchy the sorting is in depth first # order. # # The file is partitioned: OLD_FILES first, then OLD_LIBS and OLD_DIRS last. # # Before you commit changes to this file please check if any entries in # tools/build/mk/OptionalObsoleteFiles.inc can be removed. The following # command tells which files are listed more than once regardless of some # architecture specific conditionals, so you can not blindly trust the # output: # ( grep '+=' /usr/src/ObsoleteFiles.inc | sort -u ; \ # grep '+=' /usr/src/tools/build/mk/OptionalObsoleteFiles.inc | sort -u) | \ # sort | uniq -d # # To find regular duplicates not dependent on optional components, you can # also use something that will not give you false positives, e.g.: # for t in `make -V TARGETS universe`; do # __MAKE_CONF=/dev/null make -f Makefile.inc1 TARGET=$t \ # -V OLD_FILES -V OLD_LIBS -V OLD_DIRS check-old | \ # xargs -n1 | sort | uniq -d; # done # # For optional components, you can use the following to see if some entries # in OptionalObsoleteFiles.inc have been obsoleted by ObsoleteFiles.inc # for o in tools/build/options/WITH*; do # __MAKE_CONF=/dev/null make -f Makefile.inc1 -D${o##*/} \ # -V OLD_FILES -V OLD_LIBS -V OLD_DIRS check-old | \ # xargs -n1 | sort | uniq -d; # done +# 20181030: malloc_domain(9) KPI change +OLD_FILES+=share/man/man9/malloc_domain.9.gz # 20181026: joy(4) removal OLD_FILES+=usr/share/man/man4/joy.4.gz # 20181025: OpenSSL libraries version bump to avoid conflict with ports OLD_LIBS+=lib/libcrypto.so.9 OLD_LIBS+=usr/lib/libssl.so.9 OLD_LIBS+=usr/lib32/libcrypto.so.9 OLD_LIBS+=usr/lib32/libssl.so.9 # 20181021: mse(4) removal OLD_FILES+=usr/share/man/man4/mse.4.gz # 20181015: Stale libcasper(3) files following r329452 OLD_LIBS+=lib/casper/libcap_sysctl.so.0 OLD_LIBS+=lib/casper/libcap_grp.so.0 OLD_LIBS+=lib/casper/libcap_pwd.so.0 OLD_LIBS+=lib/casper/libcap_random.so.0 OLD_LIBS+=lib/casper/libcap_dns.so.0 OLD_LIBS+=lib/casper/libcap_syslog.so.0 OLD_LIBS+=usr/lib32/libcap_sysctl.so.0 OLD_LIBS+=usr/lib32/libcap_grp.so.0 OLD_LIBS+=usr/lib32/libcap_pwd.so.0 OLD_LIBS+=usr/lib32/libcap_random.so.0 OLD_LIBS+=usr/lib32/libcap_dns.so.0 OLD_LIBS+=usr/lib32/libcap_syslog.so.0 # 20181009: OpenSSL 1.1.1 OLD_FILES+=usr/include/openssl/des_old.h OLD_FILES+=usr/include/openssl/dso.h OLD_FILES+=usr/include/openssl/krb5_asn.h OLD_FILES+=usr/include/openssl/kssl.h OLD_FILES+=usr/include/openssl/pqueue.h OLD_FILES+=usr/include/openssl/ssl23.h OLD_FILES+=usr/include/openssl/ui_compat.h OLD_FILES+=usr/share/openssl/man/man1/dss1.1.gz OLD_FILES+=usr/share/openssl/man/man1/md2.1.gz OLD_FILES+=usr/share/openssl/man/man1/md4.1.gz OLD_FILES+=usr/share/openssl/man/man1/md5.1.gz OLD_FILES+=usr/share/openssl/man/man1/mdc2.1.gz OLD_FILES+=usr/share/openssl/man/man1/ripemd160.1.gz OLD_FILES+=usr/share/openssl/man/man1/sha.1.gz OLD_FILES+=usr/share/openssl/man/man1/sha1.1.gz OLD_FILES+=usr/share/openssl/man/man1/sha224.1.gz OLD_FILES+=usr/share/openssl/man/man1/sha256.1.gz OLD_FILES+=usr/share/openssl/man/man1/sha384.1.gz OLD_FILES+=usr/share/openssl/man/man1/sha512.1.gz OLD_FILES+=usr/share/openssl/man/man1/x509v3_config.1.gz OLD_FILES+=usr/share/openssl/man/man3/ASN1_STRING_length_set.3.gz OLD_FILES+=usr/share/openssl/man/man3/BIO_get_conn_int_port.3.gz OLD_FILES+=usr/share/openssl/man/man3/BIO_get_conn_ip.3.gz OLD_FILES+=usr/share/openssl/man/man3/BIO_set.3.gz OLD_FILES+=usr/share/openssl/man/man3/BIO_set_conn_int_port.3.gz OLD_FILES+=usr/share/openssl/man/man3/BIO_set_conn_ip.3.gz OLD_FILES+=usr/share/openssl/man/man3/BN_BLINDING_get_thread_id.3.gz OLD_FILES+=usr/share/openssl/man/man3/BN_BLINDING_set_thread_id.3.gz OLD_FILES+=usr/share/openssl/man/man3/BN_BLINDING_thread_id.3.gz OLD_FILES+=usr/share/openssl/man/man3/BN_CTX_init.3.gz OLD_FILES+=usr/share/openssl/man/man3/BN_MONT_CTX_init.3.gz OLD_FILES+=usr/share/openssl/man/man3/BN_RECP_CTX_init.3.gz OLD_FILES+=usr/share/openssl/man/man3/BN_init.3.gz OLD_FILES+=usr/share/openssl/man/man3/BUF_memdup.3.gz OLD_FILES+=usr/share/openssl/man/man3/BUF_memdup.3.gz OLD_FILES+=usr/share/openssl/man/man3/BUF_strdup.3.gz OLD_FILES+=usr/share/openssl/man/man3/BUF_strlcat.3.gz OLD_FILES+=usr/share/openssl/man/man3/BUF_strlcpy.3.gz OLD_FILES+=usr/share/openssl/man/man3/BUF_strndup.3.gz OLD_FILES+=usr/share/openssl/man/man3/CMS_set1_signer_cert.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_cmp.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_cpy.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_current.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_get_callback.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_hash.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_set_callback.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_destroy_dynlockid.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_get_new_dynlockid.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_lock.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_num_locks.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_dynlock_create_callback.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_dynlock_destroy_callback.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_dynlock_lock_callback.3.gz OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_locking_callback.3.gz OLD_FILES+=usr/share/openssl/man/man3/DES_ede3_cbcm_encrypt.3.gz OLD_FILES+=usr/share/openssl/man/man3/DES_enc_read.3.gz OLD_FILES+=usr/share/openssl/man/man3/DES_enc_write.3.gz OLD_FILES+=usr/share/openssl/man/man3/EC_KEY_get_key_method_data.3.gz OLD_FILES+=usr/share/openssl/man/man3/EC_KEY_insert_key_method_data.3.gz OLD_FILES+=usr/share/openssl/man/man3/EC_POINT_set_Jprojective_coordinates.3.gz OLD_FILES+=usr/share/openssl/man/man3/ERR_load_UI_strings.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_CIPHER_CTX_cleanup.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_CIPHER_CTX_init.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_MAX_MD_SIZE.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_cleanup.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_create.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_destroy.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_init.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEVP_PKEY_CTX_set_app_data.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_CTX_set_rsa_rsa_keygen_bits.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_get_default_digest.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_dss.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_dss1.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_sha.3.gz OLD_FILES+=usr/share/openssl/man/man3/HMAC_CTX_cleanup.3.gz OLD_FILES+=usr/share/openssl/man/man3/HMAC_CTX_init.3.gz OLD_FILES+=usr/share/openssl/man/man3/HMAC_cleanup.3.gz OLD_FILES+=usr/share/openssl/man/man3/OPENSSL_ia32cap_loc.3.gz OLD_FILES+=usr/share/openssl/man/man3/PEM.3.gz OLD_FILES+=usr/share/openssl/man/man3/RAND_SSLeay.3.gz OLD_FILES+=usr/share/openssl/man/man3/RSA_PKCS1_SSLeay.3.gz OLD_FILES+=usr/share/openssl/man/man3/RSA_null_method.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_get_ex_new_index.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_need_tmp_rsa.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_custom_cli_ext.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_default_read_ahead.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_ecdh_auto.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_tmp_rsa.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_tmp_rsa_callback.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_SESSION_get_ex_new_index.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_add_session.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_flush_sessions.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_get_accept_state.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_get_ex_new_index.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_get_msg_callback_arg.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_need_tmp_rsa.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_remove_session.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_set_ecdh_auto.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_set_tmp_rsa.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSL_set_tmp_rsa_callback.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSLeay.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSLeay_add_ssl_algorithms.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSLeay_version.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSLv2_client_method.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSLv2_method.3.gz OLD_FILES+=usr/share/openssl/man/man3/SSLv2_server_method.3.gz OLD_FILES+=usr/share/openssl/man/man3/X509_STORE_CTX_set_chain.3.gz OLD_FILES+=usr/share/openssl/man/man3/X509_STORE_CTX_trusted_stack.3.gz OLD_FILES+=usr/share/openssl/man/man3/bio.3.gz OLD_FILES+=usr/share/openssl/man/man3/blowfish.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_add_words.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_check_top.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_cmp_words.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_div_words.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_dump.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_expand.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_expand2.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_fix_top.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_internal.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_mul_add_words.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_mul_comba4.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_mul_comba8.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_mul_high.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_mul_low_normal.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_mul_low_recursive.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_mul_normal.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_mul_part_recursive.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_mul_recursive.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_mul_words.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_print.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_set_high.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_set_low.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_set_max.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_comba4.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_comba8.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_normal.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_recursive.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_words.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_sub_words.3.gz OLD_FILES+=usr/share/openssl/man/man3/bn_wexpand.3.gz OLD_FILES+=usr/share/openssl/man/man3/buffer.3.gz OLD_FILES+=usr/share/openssl/man/man3/crypto.3.gz OLD_FILES+=usr/share/openssl/man/man3/d2i_ECPKParameters_bio.3.gz OLD_FILES+=usr/share/openssl/man/man3/d2i_ECPKParameters_fp.3.gz OLD_FILES+=usr/share/openssl/man/man3/d2i_ECPrivate_key.3.gz OLD_FILES+=usr/share/openssl/man/man3/d2i_Netscape_RSA.3.gz OLD_FILES+=usr/share/openssl/man/man3/d2i_PKCS8PrivateKey.3.gz OLD_FILES+=usr/share/openssl/man/man3/d2i_Private_key.3.gz OLD_FILES+=usr/share/openssl/man/man3/d2i_X509_bio.3.gz OLD_FILES+=usr/share/openssl/man/man3/d2i_X509_fp.3.gz OLD_FILES+=usr/share/openssl/man/man3/des.3.gz OLD_FILES+=usr/share/openssl/man/man3/des_read_2passwords.3.gz OLD_FILES+=usr/share/openssl/man/man3/des_read_password.3.gz OLD_FILES+=usr/share/openssl/man/man3/des_read_pw.3.gz OLD_FILES+=usr/share/openssl/man/man3/des_read_pw_string.3.gz OLD_FILES+=usr/share/openssl/man/man3/dh.3.gz OLD_FILES+=usr/share/openssl/man/man3/dsa.3.gz OLD_FILES+=usr/share/openssl/man/man3/ec.3.gz OLD_FILES+=usr/share/openssl/man/man3/ecdsa.3.gz OLD_FILES+=usr/share/openssl/man/man3/engine.3.gz OLD_FILES+=usr/share/openssl/man/man3/err.3.gz OLD_FILES+=usr/share/openssl/man/man3/evp.3.gz OLD_FILES+=usr/share/openssl/man/man3/hmac.3.gz OLD_FILES+=usr/share/openssl/man/man3/i2d_ECPKParameters_bio.3.gz OLD_FILES+=usr/share/openssl/man/man3/i2d_ECPKParameters_fp.3.gz OLD_FILES+=usr/share/openssl/man/man3/i2d_Netscape_RSA.3.gz OLD_FILES+=usr/share/openssl/man/man3/i2d_X509_bio.3.gz OLD_FILES+=usr/share/openssl/man/man3/i2d_X509_fp.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_delete.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_doall.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_doall_arg.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_error.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_free.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_insert.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_new.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_node_stats.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_node_stats_bio.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_node_usage_stats.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_node_usage_stats_bio.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_retrieve.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_stats.3.gz OLD_FILES+=usr/share/openssl/man/man3/lh_stats_bio.3.gz OLD_FILES+=usr/share/openssl/man/man3/lhash.3.gz OLD_FILES+=usr/share/openssl/man/man3/md5.3.gz OLD_FILES+=usr/share/openssl/man/man3/mdc2.3.gz OLD_FILES+=usr/share/openssl/man/man3/pem.3.gz OLD_FILES+=usr/share/openssl/man/man3/rand.3.gz OLD_FILES+=usr/share/openssl/man/man3/rc4.3.gz OLD_FILES+=usr/share/openssl/man/man3/ripemd.3.gz OLD_FILES+=usr/share/openssl/man/man3/rsa.3.gz OLD_FILES+=usr/share/openssl/man/man3/sha.3.gz OLD_FILES+=usr/share/openssl/man/man3/ssl.3.gz OLD_FILES+=usr/share/openssl/man/man3/threads.3.gz OLD_FILES+=usr/share/openssl/man/man3/ui.3.gz OLD_FILES+=usr/share/openssl/man/man3/ui_compat.3.gz OLD_FILES+=usr/share/openssl/man/man3/x509.3.gz OLD_LIBS+=lib/libcrypto.so.8 OLD_LIBS+=usr/lib/engines/lib4758cca.so OLD_LIBS+=usr/lib/engines/libaep.so OLD_LIBS+=usr/lib/engines/libatalla.so OLD_LIBS+=usr/lib/engines/libcapi.so OLD_LIBS+=usr/lib/engines/libchil.so OLD_LIBS+=usr/lib/engines/libcswift.so OLD_LIBS+=usr/lib/engines/libgost.so OLD_LIBS+=usr/lib/engines/libnuron.so OLD_LIBS+=usr/lib/engines/libsureware.so OLD_LIBS+=usr/lib/engines/libubsec.so OLD_LIBS+=usr/lib/libssl.so.8 OLD_LIBS+=usr/lib32/libcrypto.so.8 OLD_LIBS+=usr/lib32/lib4758cca.so OLD_LIBS+=usr/lib32/libaep.so OLD_LIBS+=usr/lib32/libatalla.so OLD_LIBS+=usr/lib32/libcapi.so OLD_LIBS+=usr/lib32/libchil.so OLD_LIBS+=usr/lib32/libcswift.so OLD_LIBS+=usr/lib32/libgost.so OLD_LIBS+=usr/lib32/libnuron.so OLD_LIBS+=usr/lib32/libsureware.so OLD_LIBS+=usr/lib32/libubsec.so OLD_LIBS+=usr/lib32/libssl.so.8 # 20180824: libbe(3) SHLIBDIR fixed to reflect correct location OLD_LIBS+=usr/lib/libbe.so.1 # 20180819: Remove deprecated arc4random(3) stir/addrandom interfaces OLD_FILES+=usr/share/man/man3/arc4random_addrandom.3.gz OLD_FILES+=usr/share/man/man3/arc4random_stir.3.gz # 20180819: send-pr(1) placeholder removal OLD_FILES+=usr/bin/send-pr # 20180725: Cleanup old libcasper.so.0 OLD_LIBS+=lib/libcasper.so.0 # 20180722: indent(1) option renamed, test files follow OLD_FILES+=usr/bin/indent/tests/nsac.0 OLD_FILES+=usr/bin/indent/tests/nsac.0.pro OLD_FILES+=usr/bin/indent/tests/nsac.0.stdout OLD_FILES+=usr/bin/indent/tests/sac.0 OLD_FILES+=usr/bin/indent/tests/sac.0.pro OLD_FILES+=usr/bin/indent/tests/sac.0.stdout # 20180721: move of libmlx5.so.1 and libibverbs.so.1 OLD_LIBS+=usr/lib/libmlx5.so.1 OLD_LIBS+=usr/lib/libibverbs.so.1 # 20180710: old numa cleanup OLD_FILES+=usr/include/sys/numa.h OLD_FILES+=usr/share/man/man2/numa_getaffinity.2.gz OLD_FILES+=usr/share/man/man2/numa_setaffinity.2.gz OLD_FILES+=usr/share/man/man1/numactl.1.gz OLD_FILES+=usr/bin/numactl # 20180630: new clang import which bumps version from 6.0.0 to 6.0.1. OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/esan_interface.h OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/hwasan_interface.h OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/scudo_interface.h OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/tsan_interface.h OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/6.0.0/include/sanitizer OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_cmath.h OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_complex_builtins.h OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_intrinsics.h OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_math_forward_declares.h OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_runtime_wrapper.h OLD_FILES+=usr/lib/clang/6.0.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/6.0.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/6.0.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/6.0.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/altivec.h OLD_FILES+=usr/lib/clang/6.0.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/arm64intr.h OLD_FILES+=usr/lib/clang/6.0.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/6.0.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/6.0.0/include/armintr.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512bitalgintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512ifmaintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512ifmavlintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512pfintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vbmi2intrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vbmiintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vbmivlintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlbitalgintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlcdintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlvbmi2intrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlvnniintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vnniintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vpopcntdqintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vpopcntdqvlintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/cetintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/clflushoptintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/clwbintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/clzerointrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/cpuid.h OLD_FILES+=usr/lib/clang/6.0.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/gfniintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/htmintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/immintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/lwpintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/6.0.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/6.0.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/module.modulemap OLD_FILES+=usr/lib/clang/6.0.0/include/msa.h OLD_FILES+=usr/lib/clang/6.0.0/include/mwaitxintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/opencl-c.h OLD_FILES+=usr/lib/clang/6.0.0/include/pkuintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/s390intrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/vadefs.h OLD_FILES+=usr/lib/clang/6.0.0/include/vaesintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/vecintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/vpclmulqdqintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/xopintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/xsavecintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/xsaveintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/xsaveoptintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/xsavesintrin.h OLD_FILES+=usr/lib/clang/6.0.0/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/6.0.0/include OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-i386.so OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-x86_64.so OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-armhf.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats-i386.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats-x86_64.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats_client-i386.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/6.0.0/lib/freebsd OLD_DIRS+=usr/lib/clang/6.0.0/lib OLD_DIRS+=usr/lib/clang/6.0.0 # 20180615: asf(8) removed OLD_FILES+=usr/sbin/asf OLD_FILES+=usr/share/man/man8/asf.8.gz # 20180609: obsolete libc++ files missed from the 5.0.0 import OLD_FILES+=usr/include/c++/v1/__refstring OLD_FILES+=usr/include/c++/v1/__undef_min_max OLD_FILES+=usr/include/c++/v1/tr1/__refstring OLD_FILES+=usr/include/c++/v1/tr1/__undef_min_max # 20180607: remove nls support from grep OLD_FILES+=usr/share/nls/pt_BR.ISO8859-1/grep.cat OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/grep.cat OLD_FILES+=usr/share/nls/ja_JP.SJIS/grep.cat OLD_FILES+=usr/share/nls/ja_JP.eucJP/grep.cat OLD_FILES+=usr/share/nls/gl_ES.ISO8859-1/grep.cat OLD_FILES+=usr/share/nls/zh_CN.UTF-8/grep.cat OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/grep.cat OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/grep.cat OLD_FILES+=usr/share/nls/uk_UA.UTF-8/grep.cat OLD_FILES+=usr/share/nls/ja_JP.UTF-8/grep.cat # 20180517: retire vxge OLD_FILES+=usr/share/man/man4/if_vxge.4.gz OLD_FILES+=usr/share/man/man4/vxge.4.gz # 20180512: Rename Unbound tools OLD_FILES+=usr/sbin/unbound OLD_FILES+=usr/sbin/unbound-anchor OLD_FILES+=usr/sbin/unbound-checkconf OLD_FILES+=usr/sbin/unbound-control OLD_FILES+=usr/share/man/man5/unbound.conf.5.gz OLD_FILES+=usr/share/man/man8/unbound-anchor.8.gz OLD_FILES+=usr/share/man/man8/unbound-checkconf.8.gz OLD_FILES+=usr/share/man/man8/unbound-control.8.gz OLD_FILES+=usr/share/man/man8/unbound.8.gz # 20180508: retire nxge OLD_FILES+=usr/share/man/man4/if_nxge.4.gz OLD_FILES+=usr/share/man/man4/nxge.4.gz # 20180505: rhosts OLD_FILES+=usr/share/skel/dot.rhosts # 20180502: retire ixgb OLD_FILES+=usr/share/man/man4/if_ixgb.4.gz OLD_FILES+=usr/share/man/man4/ixgb.4.gz # 20180501: retire lmc OLD_FILES+=usr/include/dev/lmc/if_lmc.h OLD_DIRS+=usr/include/dev/lmc OLD_FILES+=usr/sbin/lmcconfig OLD_FILES+=usr/share/man/man4/lmc.4.gz OLD_FILES+=usr/share/man/man4/if_lmc.4.gz OLD_FILES+=usr/share/man/man8/lmcconfig.8.gz # 20180417: remove fuswintr and suswintr OLD_FILES+=usr/share/man/man9/fuswintr.9.gz OLD_FILES+=usr/share/man/man9/suswintr.9.gz # 20180413: remove Arcnet support OLD_FILES+=usr/include/net/if_arc.h OLD_FILES+=usr/share/man/man4/cm.4.gz # 20180409: remove FDDI support OLD_FILES+=usr/include/net/fddi.h OLD_FILES+=usr/share/man/man4/fpa.4.gz # 20180319: remove /boot/overlays, replaced by /boot/dtb/overlays OLD_DIRS+=boot/overlays # 20180311: remove sys/sys/i386/include/pcaudioio.h .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/pcaudioio.h .endif # 20180310: remove sys/sys/dataacq.h OLD_FILES+=usr/include/sys/dataacq.h # 20180306: remove DTrace scripts made obsolete by dwatch(1) OLD_FILES+=usr/share/dtrace/watch_execve OLD_FILES+=usr/share/dtrace/watch_kill OLD_FILES+=usr/share/dtrace/watch_vop_remove # 20180212: move devmatch OLD_FILES+=usr/sbin/devmatch # 20180211: remove usb.conf OLD_FILES+=etc/devd/usb.conf # 20180208: remove c_rehash(1) OLD_FILES+=usr/share/openssl/man/man1/c_rehash.1.gz # 20180206: remove gdbtui OLD_FILES+=usr/bin/gdbtui # 20180201: Obsolete forth files OLD_FILES+=boot/pcibios.4th # 20180114: new clang import which bumps version from 5.0.1 to 6.0.0. OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/esan_interface.h OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/tsan_interface.h OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/5.0.1/include/sanitizer OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_cmath.h OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_complex_builtins.h OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_intrinsics.h OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_math_forward_declares.h OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_runtime_wrapper.h OLD_FILES+=usr/lib/clang/5.0.1/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/5.0.1/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/5.0.1/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/5.0.1/include/adxintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/altivec.h OLD_FILES+=usr/lib/clang/5.0.1/include/ammintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/arm_acle.h OLD_FILES+=usr/lib/clang/5.0.1/include/arm_neon.h OLD_FILES+=usr/lib/clang/5.0.1/include/armintr.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx2intrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512ifmaintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512ifmavlintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512pfintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vbmiintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vbmivlintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vlcdintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vpopcntdqintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/avxintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/bmiintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/clflushoptintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/clzerointrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/cpuid.h OLD_FILES+=usr/lib/clang/5.0.1/include/emmintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/f16cintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/fma4intrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/fmaintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/htmintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/ia32intrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/immintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/lwpintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/mm3dnow.h OLD_FILES+=usr/lib/clang/5.0.1/include/mm_malloc.h OLD_FILES+=usr/lib/clang/5.0.1/include/mmintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/module.modulemap OLD_FILES+=usr/lib/clang/5.0.1/include/msa.h OLD_FILES+=usr/lib/clang/5.0.1/include/mwaitxintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/nmmintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/opencl-c.h OLD_FILES+=usr/lib/clang/5.0.1/include/pkuintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/pmmintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/popcntintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/rtmintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/s390intrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/shaintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/smmintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/tbmintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/tmmintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/vadefs.h OLD_FILES+=usr/lib/clang/5.0.1/include/vecintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/wmmintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/x86intrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/xmmintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/xopintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/xsavecintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/xsaveintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/xsaveoptintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/xsavesintrin.h OLD_FILES+=usr/lib/clang/5.0.1/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/5.0.1/include OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-i386.so OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-x86_64.so OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-armhf.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats-i386.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats-x86_64.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats_client-i386.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/5.0.1/lib/freebsd OLD_DIRS+=usr/lib/clang/5.0.1/lib OLD_DIRS+=usr/lib/clang/5.0.1 # 20180109: Remove vestiges of digi(4) driver OLD_FILES+=usr/include/sys/digiio.h OLD_FILES+=usr/sbin/digictl OLD_FILES+=usr/share/man/man8/digictl.8.gz # 20180107: Convert remaining geli(8) tests to ATF OLD_FILES+=tests/sys/geom/class/eli/nokey_test.sh OLD_FILES+=tests/sys/geom/class/eli/readonly_test.sh # 20180106: Convert most geli(8) tests to ATF OLD_FILES+=tests/sys/geom/class/eli/attach_d_test.sh OLD_FILES+=tests/sys/geom/class/eli/configure_b_B_test.sh OLD_FILES+=tests/sys/geom/class/eli/detach_l_test.sh OLD_FILES+=tests/sys/geom/class/eli/init_B_test.sh OLD_FILES+=tests/sys/geom/class/eli/init_J_test.sh OLD_FILES+=tests/sys/geom/class/eli/init_a_test.sh OLD_FILES+=tests/sys/geom/class/eli/init_alias_test.sh OLD_FILES+=tests/sys/geom/class/eli/init_i_P_test.sh OLD_FILES+=tests/sys/geom/class/eli/integrity_copy_test.sh OLD_FILES+=tests/sys/geom/class/eli/integrity_data_test.sh OLD_FILES+=tests/sys/geom/class/eli/integrity_hmac_test.sh OLD_FILES+=tests/sys/geom/class/eli/onetime_a_test.sh OLD_FILES+=tests/sys/geom/class/eli/onetime_d_test.sh # 20171230: Remove /etc/skel from mtree OLD_DIRS+=/etc/skel # 20171208: Remove basename_r(3) OLD_FILES+=usr/share/man/man3/basename_r.3.gz # 20171204: Move fdformat man page from volume 1 to volume 8. OLD_FILES+=usr/share/man/man1/fdformat.1.gz # 20171203: libproc version bump OLD_LIBS+=usr/lib/libproc.so.4 OLD_LIBS+=usr/lib32/libproc.so.4 # 20171203: new clang import which bumps version from 5.0.0 to 5.0.1. OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/esan_interface.h OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/tsan_interface.h OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/5.0.0/include/sanitizer OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_cmath.h OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_complex_builtins.h OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_intrinsics.h OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_math_forward_declares.h OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_runtime_wrapper.h OLD_FILES+=usr/lib/clang/5.0.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/5.0.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/5.0.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/5.0.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/altivec.h OLD_FILES+=usr/lib/clang/5.0.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/5.0.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/5.0.0/include/armintr.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512ifmaintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512ifmavlintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512pfintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vbmiintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vbmivlintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vlcdintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vpopcntdqintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/clflushoptintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/clzerointrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/cpuid.h OLD_FILES+=usr/lib/clang/5.0.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/htmintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/immintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/lwpintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/5.0.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/5.0.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/module.modulemap OLD_FILES+=usr/lib/clang/5.0.0/include/msa.h OLD_FILES+=usr/lib/clang/5.0.0/include/mwaitxintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/opencl-c.h OLD_FILES+=usr/lib/clang/5.0.0/include/pkuintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/s390intrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/vadefs.h OLD_FILES+=usr/lib/clang/5.0.0/include/vecintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/xopintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/xsavecintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/xsaveintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/xsaveoptintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/xsavesintrin.h OLD_FILES+=usr/lib/clang/5.0.0/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/5.0.0/include OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-i386.so OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-x86_64.so OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-armhf.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats-i386.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats-x86_64.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-i386.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/5.0.0/lib/freebsd OLD_DIRS+=usr/lib/clang/5.0.0/lib OLD_DIRS+=usr/lib/clang/5.0.0 # 20171118: Remove old etc casper files OLD_FILES+=etc/casper/system.dns OLD_FILES+=etc/casper/system.grp OLD_FILES+=etc/casper/system.pwd OLD_FILES+=etc/casper/system.random OLD_FILES+=etc/casper/system.sysctl OLD_DIRS+=etc/casper # 20171116: lint(1) removal OLD_FILES+=usr/bin/lint OLD_FILES+=usr/libexec/lint1 OLD_FILES+=usr/libexec/lint2 OLD_FILES+=usr/libdata/lint/llib-lposix.ln OLD_FILES+=usr/libdata/lint/llib-lstdc.ln OLD_FILES+=usr/share/man/man1/lint.1.gz OLD_FILES+=usr/share/man/man7/lint.7.gz OLD_DIRS+=usr/libdata/lint # 20171114: Removal of all fortune datfiles other than freebsd-tips OLD_FILES+=usr/share/games/fortune/fortunes OLD_FILES+=usr/share/games/fortune/fortunes.dat OLD_FILES+=usr/share/games/fortune/gerrold.limerick OLD_FILES+=usr/share/games/fortune/gerrold.limerick.dat OLD_FILES+=usr/share/games/fortune/limerick OLD_FILES+=usr/share/games/fortune/limerick.dat OLD_FILES+=usr/share/games/fortune/murphy OLD_FILES+=usr/share/games/fortune/murphy-o OLD_FILES+=usr/share/games/fortune/murphy-o.dat OLD_FILES+=usr/share/games/fortune/murphy.dat OLD_FILES+=usr/share/games/fortune/startrek OLD_FILES+=usr/share/games/fortune/startrek.dat OLD_FILES+=usr/share/games/fortune/zippy OLD_FILES+=usr/share/games/fortune/zippy.dat # 20171112: Removal of eqnchar definition OLD_FILES+=usr/share/misc/eqnchar # 20171110: Removal of mailaddr man page OLD_FILES+=usr/share/man/man7/mailaddr.7.gz # 20171108: badsect(8) removal OLD_FILES+=sbin/badsect OLD_FILES+=rescue/badsect OLD_FILES+=usr/share/man/man8/badsect.8.gz # 20171105: fixing lib/libclang_rt CRTARCH for arm:armv[67]. .if ${MACHINE_ARCH:Marmv[67]*} != "" && \ (!defined(CPUTYPE) || ${CPUTYPE:M*soft*} == "") OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-arm.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-arm.a OLD_LIBS+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-arm.so OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan_cxx-arm.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.safestack-arm.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats-arm.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-arm.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-arm.a OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a .endif # 20171104: libcap_random should be in /lib not in /usr/lib OLD_LIBS+=usr/lib/libcap_random.so.0 # 20171104: Casper can work only as shared library OLD_FILES+=usr/lib/libcap_dns.a OLD_FILES+=usr/lib/libcap_dns_p.a OLD_FILES+=usr/lib/libcap_grp.a OLD_FILES+=usr/lib/libcap_grp_p.a OLD_FILES+=usr/lib/libcap_pwd.a OLD_FILES+=usr/lib/libcap_pwd_p.a OLD_FILES+=usr/lib/libcap_random.a OLD_FILES+=usr/lib/libcap_random_p.a OLD_FILES+=usr/lib/libcap_sysctl.a OLD_FILES+=usr/lib/libcap_sysctl_p.a OLD_FILES+=usr/lib/libcasper.a OLD_FILES+=usr/lib/libcasper_p.a OLD_FILES+=usr/lib32/libcap_dns.a OLD_FILES+=usr/lib32/libcap_dns_p.a OLD_FILES+=usr/lib32/libcap_grp.a OLD_FILES+=usr/lib32/libcap_grp_p.a OLD_FILES+=usr/lib32/libcap_pwd.a OLD_FILES+=usr/lib32/libcap_pwd_p.a OLD_FILES+=usr/lib32/libcap_random.a OLD_FILES+=usr/lib32/libcap_random_p.a OLD_FILES+=usr/lib32/libcap_sysctl.a OLD_FILES+=usr/lib32/libcap_sysctl_p.a OLD_FILES+=usr/lib32/libcasper.a OLD_FILES+=usr/lib32/libcasper_p.a # 20171031: Removal of adding_user man page OLD_FILES+=usr/share/man/man7/adding_user.7.gz # 20171031: Disconnected libpathconv tests OLD_DIRS+=usr/tests/lib/libpathconv # 20171017: Removal of mbpool(9) OLD_FILES+=usr/include/sys/mbpool.h OLD_FILES+=usr/share/man/man9/mbpool.9.gz OLD_FILES+=usr/share/man/man9/mbp_destroy.9.gz OLD_FILES+=usr/share/man/man9/mbp_alloc.9.gz OLD_FILES+=usr/share/man/man9/mbp_ext_free.9.gz OLD_FILES+=usr/share/man/man9/mbp_count.9.gz OLD_FILES+=usr/share/man/man9/mbp_card_free.9.gz OLD_FILES+=usr/share/man/man9/mbp_get_keep.9.gz OLD_FILES+=usr/share/man/man9/mbp_free.9.gz OLD_FILES+=usr/share/man/man9/mbp_get.9.gz OLD_FILES+=usr/share/man/man9/mbp_create.9.gz OLD_FILES+=usr/share/man/man9/mbp_sync.9.gz # 20171010: Remove libstand OLD_FILES+=usr/lib/libstand.a OLD_FILES+=usr/lib/libstand_p.a OLD_FILES+=usr/lib32/libstand.a OLD_FILES+=usr/lib32/libstand_p.a OLD_FILES+=usr/include/stand.h OLD_FILES+=usr/share/man/man3/libstand.3.gz # 20171003: remove RCMDS OLD_FILES+=bin/rcp OLD_FILES+=rescue/rcp OLD_FILES+=usr/bin/rlogin OLD_FILES+=usr/bin/rsh OLD_FILES+=usr/libexec/rlogind OLD_FILES+=usr/libexec/rshd OLD_FILES+=usr/share/man/man1/rcp.1.gz OLD_FILES+=usr/share/man/man1/rlogin.1.gz OLD_FILES+=usr/share/man/man1/rsh.1.gz OLD_FILES+=usr/share/man/man8/rlogind.8.gz OLD_FILES+=usr/share/man/man8/rshd.8.gz # 20170927: crshared OLD_FILES+=usr/share/man/man9/crshared.9.gz # 20170927: procctl OLD_FILES+=usr/share/man/man8/procctl.8.gz OLD_FILES+=usr/sbin/procctl # 20170926: remove unneeded man aliases and locales directory OLD_FILES+=usr/share/man/en.ISO8859-1/man1 OLD_FILES+=usr/share/man/en.ISO8859-1/man2 OLD_FILES+=usr/share/man/en.ISO8859-1/man3 OLD_FILES+=usr/share/man/en.ISO8859-1/man4 OLD_FILES+=usr/share/man/en.ISO8859-1/man5 OLD_FILES+=usr/share/man/en.ISO8859-1/man6 OLD_FILES+=usr/share/man/en.ISO8859-1/man7 OLD_FILES+=usr/share/man/en.ISO8859-1/man8 OLD_FILES+=usr/share/man/en.ISO8859-1/man9 OLD_DIRS+=usr/share/man/en.ISO8859-1 OLD_FILES+=usr/share/man/en.ISO8859-1/mandoc.db OLD_FILES+=usr/share/man/en.UTF-8/man1 OLD_FILES+=usr/share/man/en.UTF-8/man2 OLD_FILES+=usr/share/man/en.UTF-8/man3 OLD_FILES+=usr/share/man/en.UTF-8/man4 OLD_FILES+=usr/share/man/en.UTF-8/man5 OLD_FILES+=usr/share/man/en.UTF-8/man6 OLD_FILES+=usr/share/man/en.UTF-8/man7 OLD_FILES+=usr/share/man/en.UTF-8/man8 OLD_FILES+=usr/share/man/en.UTF-8/man9 OLD_FILES+=usr/share/man/en.UTF-8/mandoc.db OLD_DIRS+=usr/share/man/en.UTF-8 OLD_FILES+=usr/share/man/en.ISO8859-15 OLD_FILES+=usr/share/openssl/man/en.ISO8859-1/man1 OLD_FILES+=usr/share/openssl/man/en.ISO8859-1/man3 OLD_FILES+=usr/share/openssl/man/en.ISO8859-1/mandoc.db OLD_DIRS+=usr/share/openssl/man/en.ISO8859-1 OLD_FILES+=usr/share/openssl/man/en.ISO8859-15 OLD_DIRS+=usr/share/man/ja/man1 OLD_DIRS+=usr/share/man/ja/man2 OLD_DIRS+=usr/share/man/ja/man3 OLD_DIRS+=usr/share/man/ja/man4 OLD_DIRS+=usr/share/man/ja/man5 OLD_DIRS+=usr/share/man/ja/man6 OLD_DIRS+=usr/share/man/ja/man7 OLD_DIRS+=usr/share/man/ja/man8 OLD_DIRS+=usr/share/man/ja/man9 OLD_DIRS+=usr/share/man/ja # 20170913: remove unneeded catman utility OLD_FILES+=etc/periodic/weekly/330.catman OLD_FILES+=usr/bin/catman OLD_FILES+=usr/libexec/catman.local OLD_FILES+=usr/share/man/man1/catman.1.gz OLD_FILES+=usr/share/man/man8/catman.local.8.gz OLD_DIRS+=usr/share/man/cat1 OLD_DIRS+=usr/share/man/cat2 OLD_DIRS+=usr/share/man/cat3 OLD_DIRS+=usr/share/man/cat4/amd64 OLD_DIRS+=usr/share/man/cat4/arm OLD_DIRS+=usr/share/man/cat4/i386 OLD_DIRS+=usr/share/man/cat4/powerpc OLD_DIRS+=usr/share/man/cat4/sparc64 OLD_DIRS+=usr/share/man/cat4 OLD_DIRS+=usr/share/man/cat5 OLD_DIRS+=usr/share/man/cat6 OLD_DIRS+=usr/share/man/cat7 OLD_DIRS+=usr/share/man/cat8/amd64 OLD_DIRS+=usr/share/man/cat8/arm OLD_DIRS+=usr/share/man/cat8/i386 OLD_DIRS+=usr/share/man/cat8/powerpc OLD_DIRS+=usr/share/man/cat8/sparc64 OLD_DIRS+=usr/share/man/cat8 OLD_DIRS+=usr/share/man/cat9 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat1 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat2 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat3 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/amd64 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/arm OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/i386 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/powerpc OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/sparc64 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat5 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat6 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat7 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/amd64 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/arm OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/i386 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/powerpc OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/sparc64 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8 OLD_DIRS+=usr/share/man/en.ISO8859-1/cat9 OLD_DIRS+=usr/share/man/en.UTF-8/cat1 OLD_DIRS+=usr/share/man/en.UTF-8/cat2 OLD_DIRS+=usr/share/man/en.UTF-8/cat3 OLD_DIRS+=usr/share/man/en.UTF-8/cat4/amd64 OLD_DIRS+=usr/share/man/en.UTF-8/cat4/arm OLD_DIRS+=usr/share/man/en.UTF-8/cat4/i386 OLD_DIRS+=usr/share/man/en.UTF-8/cat4/powerpc OLD_DIRS+=usr/share/man/en.UTF-8/cat4/sparc64 OLD_DIRS+=usr/share/man/en.UTF-8/cat4 OLD_DIRS+=usr/share/man/en.UTF-8/cat5 OLD_DIRS+=usr/share/man/en.UTF-8/cat6 OLD_DIRS+=usr/share/man/en.UTF-8/cat7 OLD_DIRS+=usr/share/man/en.UTF-8/cat8/amd64 OLD_DIRS+=usr/share/man/en.UTF-8/cat8/arm OLD_DIRS+=usr/share/man/en.UTF-8/cat8/i386 OLD_DIRS+=usr/share/man/en.UTF-8/cat8/powerpc OLD_DIRS+=usr/share/man/en.UTF-8/cat8/sparc64 OLD_DIRS+=usr/share/man/en.UTF-8/cat8 OLD_DIRS+=usr/share/man/en.UTF-8/cat9 OLD_DIRS+=usr/share/man/ja/cat1 OLD_DIRS+=usr/share/man/ja/cat2 OLD_DIRS+=usr/share/man/ja/cat3 OLD_DIRS+=usr/share/man/ja/cat4/amd64 OLD_DIRS+=usr/share/man/ja/cat4/arm OLD_DIRS+=usr/share/man/ja/cat4/i386 OLD_DIRS+=usr/share/man/ja/cat4/powerpc OLD_DIRS+=usr/share/man/ja/cat4/sparc64 OLD_DIRS+=usr/share/man/ja/cat4 OLD_DIRS+=usr/share/man/ja/cat5 OLD_DIRS+=usr/share/man/ja/cat6 OLD_DIRS+=usr/share/man/ja/cat7 OLD_DIRS+=usr/share/man/ja/cat8/amd64 OLD_DIRS+=usr/share/man/ja/cat8/arm OLD_DIRS+=usr/share/man/ja/cat8/powerpc OLD_DIRS+=usr/share/man/ja/cat8/sparc64 OLD_DIRS+=usr/share/man/ja/cat8 OLD_DIRS+=usr/share/man/ja/cat9 OLD_DIRS+=usr/share/openssl/man/cat1 OLD_DIRS+=usr/share/openssl/man/cat3 OLD_DIRS+=usr/share/openssl/man/en.ISO8859-1/cat1 OLD_DIRS+=usr/share/openssl/man/en.ISO8859-1/cat3 # 20170802: ksyms(4) ioctl interface was removed OLD_FILES+=usr/include/sys/ksyms.h # 20170722: new clang import which bumps version from 4.0.0 to 5.0.0. OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/esan_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/4.0.0/include/sanitizer OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_cmath.h OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_complex_builtins.h OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_intrinsics.h OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_math_forward_declares.h OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_runtime_wrapper.h OLD_FILES+=usr/lib/clang/4.0.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/4.0.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/4.0.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/4.0.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/altivec.h OLD_FILES+=usr/lib/clang/4.0.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/4.0.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/4.0.0/include/armintr.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512ifmaintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512ifmavlintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512pfintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vbmiintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vbmivlintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlcdintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/clflushoptintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/cpuid.h OLD_FILES+=usr/lib/clang/4.0.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/htmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/immintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/4.0.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/4.0.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/module.modulemap OLD_FILES+=usr/lib/clang/4.0.0/include/msa.h OLD_FILES+=usr/lib/clang/4.0.0/include/mwaitxintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/opencl-c.h OLD_FILES+=usr/lib/clang/4.0.0/include/pkuintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/s390intrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/vadefs.h OLD_FILES+=usr/lib/clang/4.0.0/include/vecintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xopintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xsavecintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xsaveintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xsaveoptintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xsavesintrin.h OLD_FILES+=usr/lib/clang/4.0.0/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/4.0.0/include OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-i386.so OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-x86_64.so OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats_client-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/4.0.0/lib/freebsd OLD_DIRS+=usr/lib/clang/4.0.0/lib OLD_DIRS+=usr/lib/clang/4.0.0 OLD_FILES+=usr/bin/llvm-pdbdump # 20170610: chown-f_test replaced by chown_test OLD_FILES+=usr/tests/usr.sbin/chown/chown-f_test # 20170609: drop obsolete manpage link (if_rtwn.ko -> rtwn.ko) OLD_FILES+=usr/share/man/man4/if_rtwn.4.gz # 20170531: removal of groff OLD_FILES+=usr/bin/addftinfo OLD_FILES+=usr/bin/afmtodit OLD_FILES+=usr/bin/checknr OLD_FILES+=usr/bin/colcrt OLD_FILES+=usr/bin/eqn OLD_FILES+=usr/bin/grn OLD_FILES+=usr/bin/grodvi OLD_FILES+=usr/bin/groff OLD_FILES+=usr/bin/grog OLD_FILES+=usr/bin/grolbp OLD_FILES+=usr/bin/grolj4 OLD_FILES+=usr/bin/grops OLD_FILES+=usr/bin/grotty OLD_FILES+=usr/bin/hpftodit OLD_FILES+=usr/bin/indxbib OLD_FILES+=usr/bin/lkbib OLD_FILES+=usr/bin/lookbib OLD_FILES+=usr/bin/mmroff OLD_FILES+=usr/bin/neqn OLD_FILES+=usr/bin/nroff OLD_FILES+=usr/bin/pfbtops OLD_FILES+=usr/bin/pic OLD_FILES+=usr/bin/post-grohtml OLD_FILES+=usr/bin/pre-grohtml OLD_FILES+=usr/bin/psroff OLD_FILES+=usr/bin/refer OLD_FILES+=usr/bin/tbl OLD_FILES+=usr/bin/tfmtodit OLD_FILES+=usr/bin/troff OLD_FILES+=usr/bin/vgrind OLD_FILES+=usr/libexec/vfontedpr OLD_FILES+=usr/share/dict/eign OLD_FILES+=usr/share/groff_font/devX100-12/CB OLD_FILES+=usr/share/groff_font/devX100-12/CBI OLD_FILES+=usr/share/groff_font/devX100-12/CI OLD_FILES+=usr/share/groff_font/devX100-12/CR OLD_FILES+=usr/share/groff_font/devX100-12/DESC OLD_FILES+=usr/share/groff_font/devX100-12/HB OLD_FILES+=usr/share/groff_font/devX100-12/HBI OLD_FILES+=usr/share/groff_font/devX100-12/HI OLD_FILES+=usr/share/groff_font/devX100-12/HR OLD_FILES+=usr/share/groff_font/devX100-12/NB OLD_FILES+=usr/share/groff_font/devX100-12/NBI OLD_FILES+=usr/share/groff_font/devX100-12/NI OLD_FILES+=usr/share/groff_font/devX100-12/NR OLD_FILES+=usr/share/groff_font/devX100-12/S OLD_FILES+=usr/share/groff_font/devX100-12/TB OLD_FILES+=usr/share/groff_font/devX100-12/TBI OLD_FILES+=usr/share/groff_font/devX100-12/TI OLD_FILES+=usr/share/groff_font/devX100-12/TR OLD_DIRS+=usr/share/groff_font/devX100-12 OLD_FILES+=usr/share/groff_font/devX100/CB OLD_FILES+=usr/share/groff_font/devX100/CBI OLD_FILES+=usr/share/groff_font/devX100/CI OLD_FILES+=usr/share/groff_font/devX100/CR OLD_FILES+=usr/share/groff_font/devX100/DESC OLD_FILES+=usr/share/groff_font/devX100/HB OLD_FILES+=usr/share/groff_font/devX100/HBI OLD_FILES+=usr/share/groff_font/devX100/HI OLD_FILES+=usr/share/groff_font/devX100/HR OLD_FILES+=usr/share/groff_font/devX100/NB OLD_FILES+=usr/share/groff_font/devX100/NBI OLD_FILES+=usr/share/groff_font/devX100/NI OLD_FILES+=usr/share/groff_font/devX100/NR OLD_FILES+=usr/share/groff_font/devX100/S OLD_FILES+=usr/share/groff_font/devX100/TB OLD_FILES+=usr/share/groff_font/devX100/TBI OLD_FILES+=usr/share/groff_font/devX100/TI OLD_FILES+=usr/share/groff_font/devX100/TR OLD_DIRS+=usr/share/groff_font/devX100 OLD_FILES+=usr/share/groff_font/devX75-12/CB OLD_FILES+=usr/share/groff_font/devX75-12/CBI OLD_FILES+=usr/share/groff_font/devX75-12/CI OLD_FILES+=usr/share/groff_font/devX75-12/CR OLD_FILES+=usr/share/groff_font/devX75-12/DESC OLD_FILES+=usr/share/groff_font/devX75-12/HB OLD_FILES+=usr/share/groff_font/devX75-12/HBI OLD_FILES+=usr/share/groff_font/devX75-12/HI OLD_FILES+=usr/share/groff_font/devX75-12/HR OLD_FILES+=usr/share/groff_font/devX75-12/NB OLD_FILES+=usr/share/groff_font/devX75-12/NBI OLD_FILES+=usr/share/groff_font/devX75-12/NI OLD_FILES+=usr/share/groff_font/devX75-12/NR OLD_FILES+=usr/share/groff_font/devX75-12/S OLD_FILES+=usr/share/groff_font/devX75-12/TB OLD_FILES+=usr/share/groff_font/devX75-12/TBI OLD_FILES+=usr/share/groff_font/devX75-12/TI OLD_FILES+=usr/share/groff_font/devX75-12/TR OLD_DIRS+=usr/share/groff_font/devX75-12 OLD_FILES+=usr/share/groff_font/devX75/CB OLD_FILES+=usr/share/groff_font/devX75/CBI OLD_FILES+=usr/share/groff_font/devX75/CI OLD_FILES+=usr/share/groff_font/devX75/CR OLD_FILES+=usr/share/groff_font/devX75/DESC OLD_FILES+=usr/share/groff_font/devX75/HB OLD_FILES+=usr/share/groff_font/devX75/HBI OLD_FILES+=usr/share/groff_font/devX75/HI OLD_FILES+=usr/share/groff_font/devX75/HR OLD_FILES+=usr/share/groff_font/devX75/NB OLD_FILES+=usr/share/groff_font/devX75/NBI OLD_FILES+=usr/share/groff_font/devX75/NI OLD_FILES+=usr/share/groff_font/devX75/NR OLD_FILES+=usr/share/groff_font/devX75/S OLD_FILES+=usr/share/groff_font/devX75/TB OLD_FILES+=usr/share/groff_font/devX75/TBI OLD_FILES+=usr/share/groff_font/devX75/TI OLD_FILES+=usr/share/groff_font/devX75/TR OLD_DIRS+=usr/share/groff_font/devX75 OLD_FILES+=usr/share/groff_font/devascii/B OLD_FILES+=usr/share/groff_font/devascii/BI OLD_FILES+=usr/share/groff_font/devascii/CW OLD_FILES+=usr/share/groff_font/devascii/DESC OLD_FILES+=usr/share/groff_font/devascii/I OLD_FILES+=usr/share/groff_font/devascii/L OLD_FILES+=usr/share/groff_font/devascii/R OLD_FILES+=usr/share/groff_font/devascii/S OLD_DIRS+=usr/share/groff_font/devascii OLD_FILES+=usr/share/groff_font/devcp1047/B OLD_FILES+=usr/share/groff_font/devcp1047/BI OLD_FILES+=usr/share/groff_font/devcp1047/CW OLD_FILES+=usr/share/groff_font/devcp1047/DESC OLD_FILES+=usr/share/groff_font/devcp1047/I OLD_FILES+=usr/share/groff_font/devcp1047/L OLD_FILES+=usr/share/groff_font/devcp1047/R OLD_FILES+=usr/share/groff_font/devcp1047/S OLD_DIRS+=usr/share/groff_font/devcp1047 OLD_FILES+=usr/share/groff_font/devdvi/CW OLD_FILES+=usr/share/groff_font/devdvi/CWEC OLD_FILES+=usr/share/groff_font/devdvi/CWI OLD_FILES+=usr/share/groff_font/devdvi/CWIEC OLD_FILES+=usr/share/groff_font/devdvi/CWITC OLD_FILES+=usr/share/groff_font/devdvi/CWTC OLD_FILES+=usr/share/groff_font/devdvi/CompileFonts OLD_FILES+=usr/share/groff_font/devdvi/DESC OLD_FILES+=usr/share/groff_font/devdvi/EX OLD_FILES+=usr/share/groff_font/devdvi/HB OLD_FILES+=usr/share/groff_font/devdvi/HBEC OLD_FILES+=usr/share/groff_font/devdvi/HBI OLD_FILES+=usr/share/groff_font/devdvi/HBIEC OLD_FILES+=usr/share/groff_font/devdvi/HBITC OLD_FILES+=usr/share/groff_font/devdvi/HBTC OLD_FILES+=usr/share/groff_font/devdvi/HI OLD_FILES+=usr/share/groff_font/devdvi/HIEC OLD_FILES+=usr/share/groff_font/devdvi/HITC OLD_FILES+=usr/share/groff_font/devdvi/HR OLD_FILES+=usr/share/groff_font/devdvi/HREC OLD_FILES+=usr/share/groff_font/devdvi/HRTC OLD_FILES+=usr/share/groff_font/devdvi/MI OLD_FILES+=usr/share/groff_font/devdvi/Makefile OLD_FILES+=usr/share/groff_font/devdvi/S OLD_FILES+=usr/share/groff_font/devdvi/SA OLD_FILES+=usr/share/groff_font/devdvi/SB OLD_FILES+=usr/share/groff_font/devdvi/SC OLD_FILES+=usr/share/groff_font/devdvi/TB OLD_FILES+=usr/share/groff_font/devdvi/TBEC OLD_FILES+=usr/share/groff_font/devdvi/TBI OLD_FILES+=usr/share/groff_font/devdvi/TBIEC OLD_FILES+=usr/share/groff_font/devdvi/TBITC OLD_FILES+=usr/share/groff_font/devdvi/TBTC OLD_FILES+=usr/share/groff_font/devdvi/TI OLD_FILES+=usr/share/groff_font/devdvi/TIEC OLD_FILES+=usr/share/groff_font/devdvi/TITC OLD_FILES+=usr/share/groff_font/devdvi/TR OLD_FILES+=usr/share/groff_font/devdvi/TREC OLD_FILES+=usr/share/groff_font/devdvi/TRTC OLD_FILES+=usr/share/groff_font/devdvi/ec.map OLD_FILES+=usr/share/groff_font/devdvi/msam.map OLD_FILES+=usr/share/groff_font/devdvi/msbm.map OLD_FILES+=usr/share/groff_font/devdvi/tc.map OLD_FILES+=usr/share/groff_font/devdvi/texb.map OLD_FILES+=usr/share/groff_font/devdvi/texex.map OLD_FILES+=usr/share/groff_font/devdvi/texi.map OLD_FILES+=usr/share/groff_font/devdvi/texmi.map OLD_FILES+=usr/share/groff_font/devdvi/texr.map OLD_FILES+=usr/share/groff_font/devdvi/texsy.map OLD_FILES+=usr/share/groff_font/devdvi/textex.map OLD_FILES+=usr/share/groff_font/devdvi/textt.map OLD_DIRS+=usr/share/groff_font/devdvi OLD_FILES+=usr/share/groff_font/devhtml/B OLD_FILES+=usr/share/groff_font/devhtml/BI OLD_FILES+=usr/share/groff_font/devhtml/CB OLD_FILES+=usr/share/groff_font/devhtml/CBI OLD_FILES+=usr/share/groff_font/devhtml/CI OLD_FILES+=usr/share/groff_font/devhtml/CR OLD_FILES+=usr/share/groff_font/devhtml/DESC OLD_FILES+=usr/share/groff_font/devhtml/I OLD_FILES+=usr/share/groff_font/devhtml/R OLD_FILES+=usr/share/groff_font/devhtml/S OLD_DIRS+=usr/share/groff_font/devhtml OLD_FILES+=usr/share/groff_font/devkoi8-r/B OLD_FILES+=usr/share/groff_font/devkoi8-r/BI OLD_FILES+=usr/share/groff_font/devkoi8-r/CW OLD_FILES+=usr/share/groff_font/devkoi8-r/DESC OLD_FILES+=usr/share/groff_font/devkoi8-r/I OLD_FILES+=usr/share/groff_font/devkoi8-r/L OLD_FILES+=usr/share/groff_font/devkoi8-r/R OLD_FILES+=usr/share/groff_font/devkoi8-r/S OLD_DIRS+=usr/share/groff_font/devkoi8-r OLD_FILES+=usr/share/groff_font/devlatin1/B OLD_FILES+=usr/share/groff_font/devlatin1/BI OLD_FILES+=usr/share/groff_font/devlatin1/CW OLD_FILES+=usr/share/groff_font/devlatin1/DESC OLD_FILES+=usr/share/groff_font/devlatin1/I OLD_FILES+=usr/share/groff_font/devlatin1/L OLD_FILES+=usr/share/groff_font/devlatin1/R OLD_FILES+=usr/share/groff_font/devlatin1/S OLD_DIRS+=usr/share/groff_font/devlatin1 OLD_FILES+=usr/share/groff_font/devlbp/CB OLD_FILES+=usr/share/groff_font/devlbp/CI OLD_FILES+=usr/share/groff_font/devlbp/CR OLD_FILES+=usr/share/groff_font/devlbp/DESC OLD_FILES+=usr/share/groff_font/devlbp/EB OLD_FILES+=usr/share/groff_font/devlbp/EI OLD_FILES+=usr/share/groff_font/devlbp/ER OLD_FILES+=usr/share/groff_font/devlbp/HB OLD_FILES+=usr/share/groff_font/devlbp/HBI OLD_FILES+=usr/share/groff_font/devlbp/HI OLD_FILES+=usr/share/groff_font/devlbp/HNB OLD_FILES+=usr/share/groff_font/devlbp/HNBI OLD_FILES+=usr/share/groff_font/devlbp/HNI OLD_FILES+=usr/share/groff_font/devlbp/HNR OLD_FILES+=usr/share/groff_font/devlbp/HR OLD_FILES+=usr/share/groff_font/devlbp/TB OLD_FILES+=usr/share/groff_font/devlbp/TBI OLD_FILES+=usr/share/groff_font/devlbp/TI OLD_FILES+=usr/share/groff_font/devlbp/TR OLD_DIRS+=usr/share/groff_font/devlbp OLD_FILES+=usr/share/groff_font/devlj4/AB OLD_FILES+=usr/share/groff_font/devlj4/ABI OLD_FILES+=usr/share/groff_font/devlj4/AI OLD_FILES+=usr/share/groff_font/devlj4/ALBB OLD_FILES+=usr/share/groff_font/devlj4/ALBR OLD_FILES+=usr/share/groff_font/devlj4/AOB OLD_FILES+=usr/share/groff_font/devlj4/AOI OLD_FILES+=usr/share/groff_font/devlj4/AOR OLD_FILES+=usr/share/groff_font/devlj4/AR OLD_FILES+=usr/share/groff_font/devlj4/CB OLD_FILES+=usr/share/groff_font/devlj4/CBI OLD_FILES+=usr/share/groff_font/devlj4/CI OLD_FILES+=usr/share/groff_font/devlj4/CLARENDON OLD_FILES+=usr/share/groff_font/devlj4/CORONET OLD_FILES+=usr/share/groff_font/devlj4/CR OLD_FILES+=usr/share/groff_font/devlj4/DESC OLD_FILES+=usr/share/groff_font/devlj4/GB OLD_FILES+=usr/share/groff_font/devlj4/GBI OLD_FILES+=usr/share/groff_font/devlj4/GI OLD_FILES+=usr/share/groff_font/devlj4/GR OLD_FILES+=usr/share/groff_font/devlj4/LGB OLD_FILES+=usr/share/groff_font/devlj4/LGI OLD_FILES+=usr/share/groff_font/devlj4/LGR OLD_FILES+=usr/share/groff_font/devlj4/MARIGOLD OLD_FILES+=usr/share/groff_font/devlj4/OB OLD_FILES+=usr/share/groff_font/devlj4/OBI OLD_FILES+=usr/share/groff_font/devlj4/OI OLD_FILES+=usr/share/groff_font/devlj4/OR OLD_FILES+=usr/share/groff_font/devlj4/S OLD_FILES+=usr/share/groff_font/devlj4/SYMBOL OLD_FILES+=usr/share/groff_font/devlj4/TB OLD_FILES+=usr/share/groff_font/devlj4/TBI OLD_FILES+=usr/share/groff_font/devlj4/TI OLD_FILES+=usr/share/groff_font/devlj4/TNRB OLD_FILES+=usr/share/groff_font/devlj4/TNRBI OLD_FILES+=usr/share/groff_font/devlj4/TNRI OLD_FILES+=usr/share/groff_font/devlj4/TNRR OLD_FILES+=usr/share/groff_font/devlj4/TR OLD_FILES+=usr/share/groff_font/devlj4/UB OLD_FILES+=usr/share/groff_font/devlj4/UBI OLD_FILES+=usr/share/groff_font/devlj4/UCB OLD_FILES+=usr/share/groff_font/devlj4/UCBI OLD_FILES+=usr/share/groff_font/devlj4/UCI OLD_FILES+=usr/share/groff_font/devlj4/UCR OLD_FILES+=usr/share/groff_font/devlj4/UI OLD_FILES+=usr/share/groff_font/devlj4/UR OLD_FILES+=usr/share/groff_font/devlj4/WINGDINGS OLD_DIRS+=usr/share/groff_font/devlj4 OLD_FILES+=usr/share/groff_font/devps/AB OLD_FILES+=usr/share/groff_font/devps/ABI OLD_FILES+=usr/share/groff_font/devps/AI OLD_FILES+=usr/share/groff_font/devps/AR OLD_FILES+=usr/share/groff_font/devps/BMB OLD_FILES+=usr/share/groff_font/devps/BMBI OLD_FILES+=usr/share/groff_font/devps/BMI OLD_FILES+=usr/share/groff_font/devps/BMR OLD_FILES+=usr/share/groff_font/devps/CB OLD_FILES+=usr/share/groff_font/devps/CBI OLD_FILES+=usr/share/groff_font/devps/CI OLD_FILES+=usr/share/groff_font/devps/CR OLD_FILES+=usr/share/groff_font/devps/DESC OLD_FILES+=usr/share/groff_font/devps/EURO OLD_FILES+=usr/share/groff_font/devps/HB OLD_FILES+=usr/share/groff_font/devps/HBI OLD_FILES+=usr/share/groff_font/devps/HI OLD_FILES+=usr/share/groff_font/devps/HNB OLD_FILES+=usr/share/groff_font/devps/HNBI OLD_FILES+=usr/share/groff_font/devps/HNI OLD_FILES+=usr/share/groff_font/devps/HNR OLD_FILES+=usr/share/groff_font/devps/HR OLD_FILES+=usr/share/groff_font/devps/Makefile OLD_FILES+=usr/share/groff_font/devps/NB OLD_FILES+=usr/share/groff_font/devps/NBI OLD_FILES+=usr/share/groff_font/devps/NI OLD_FILES+=usr/share/groff_font/devps/NR OLD_FILES+=usr/share/groff_font/devps/PB OLD_FILES+=usr/share/groff_font/devps/PBI OLD_FILES+=usr/share/groff_font/devps/PI OLD_FILES+=usr/share/groff_font/devps/PR OLD_FILES+=usr/share/groff_font/devps/S OLD_FILES+=usr/share/groff_font/devps/SS OLD_FILES+=usr/share/groff_font/devps/TB OLD_FILES+=usr/share/groff_font/devps/TBI OLD_FILES+=usr/share/groff_font/devps/TI OLD_FILES+=usr/share/groff_font/devps/TR OLD_FILES+=usr/share/groff_font/devps/ZCMI OLD_FILES+=usr/share/groff_font/devps/ZD OLD_FILES+=usr/share/groff_font/devps/ZDR OLD_FILES+=usr/share/groff_font/devps/afmname OLD_FILES+=usr/share/groff_font/devps/dingbats.map OLD_FILES+=usr/share/groff_font/devps/dingbats.rmap OLD_FILES+=usr/share/groff_font/devps/download OLD_FILES+=usr/share/groff_font/devps/freeeuro.pfa OLD_FILES+=usr/share/groff_font/devps/lgreekmap OLD_FILES+=usr/share/groff_font/devps/prologue OLD_FILES+=usr/share/groff_font/devps/symbol.sed OLD_FILES+=usr/share/groff_font/devps/symbolchars OLD_FILES+=usr/share/groff_font/devps/symbolsl.afm OLD_FILES+=usr/share/groff_font/devps/symbolsl.pfa OLD_FILES+=usr/share/groff_font/devps/text.enc OLD_FILES+=usr/share/groff_font/devps/textmap OLD_FILES+=usr/share/groff_font/devps/zapfdr.pfa OLD_DIRS+=usr/share/groff_font/devps OLD_FILES+=usr/share/groff_font/devutf8/B OLD_FILES+=usr/share/groff_font/devutf8/BI OLD_FILES+=usr/share/groff_font/devutf8/CW OLD_FILES+=usr/share/groff_font/devutf8/DESC OLD_FILES+=usr/share/groff_font/devutf8/I OLD_FILES+=usr/share/groff_font/devutf8/L OLD_FILES+=usr/share/groff_font/devutf8/R OLD_FILES+=usr/share/groff_font/devutf8/S OLD_DIRS+=usr/share/groff_font/devutf8 OLD_DIRS+=usr/share/groff_font OLD_FILES+=usr/share/man/man1/addftinfo.1.gz OLD_FILES+=usr/share/man/man1/afmtodit.1.gz OLD_FILES+=usr/share/man/man1/checknr.1.gz OLD_FILES+=usr/share/man/man1/colcrt.1.gz OLD_FILES+=usr/share/man/man1/eqn.1.gz OLD_FILES+=usr/share/man/man1/grn.1.gz OLD_FILES+=usr/share/man/man1/grodvi.1.gz OLD_FILES+=usr/share/man/man1/groff.1.gz OLD_FILES+=usr/share/man/man1/grog.1.gz OLD_FILES+=usr/share/man/man1/grolbp.1.gz OLD_FILES+=usr/share/man/man1/grolj4.1.gz OLD_FILES+=usr/share/man/man1/grops.1.gz OLD_FILES+=usr/share/man/man1/grotty.1.gz OLD_FILES+=usr/share/man/man1/hpftodit.1.gz OLD_FILES+=usr/share/man/man1/indxbib.1.gz OLD_FILES+=usr/share/man/man1/lkbib.1.gz OLD_FILES+=usr/share/man/man1/lookbib.1.gz OLD_FILES+=usr/share/man/man1/mmroff.1.gz OLD_FILES+=usr/share/man/man1/neqn.1.gz OLD_FILES+=usr/share/man/man1/nroff.1.gz OLD_FILES+=usr/share/man/man1/pfbtops.1.gz OLD_FILES+=usr/share/man/man1/pic.1.gz OLD_FILES+=usr/share/man/man1/psroff.1.gz OLD_FILES+=usr/share/man/man1/refer.1.gz OLD_FILES+=usr/share/man/man1/tbl.1.gz OLD_FILES+=usr/share/man/man1/tfmtodit.1.gz OLD_FILES+=usr/share/man/man1/troff.1.gz OLD_FILES+=usr/share/man/man1/vgrind.1.gz OLD_FILES+=usr/share/man/man5/groff_font.5.gz OLD_FILES+=usr/share/man/man5/groff_out.5.gz OLD_FILES+=usr/share/man/man5/groff_tmac.5.gz OLD_FILES+=usr/share/man/man5/lj4_font.5.gz OLD_FILES+=usr/share/man/man5/tmac.5.gz OLD_FILES+=usr/share/man/man5/vgrindefs.5.gz OLD_FILES+=usr/share/man/man7/ditroff.7.gz OLD_FILES+=usr/share/man/man7/groff.7.gz OLD_FILES+=usr/share/man/man7/groff_char.7.gz OLD_FILES+=usr/share/man/man7/groff_diff.7.gz OLD_FILES+=usr/share/man/man7/groff_man.7.gz OLD_FILES+=usr/share/man/man7/groff_mdoc.7.gz OLD_FILES+=usr/share/man/man7/groff_me.7.gz OLD_FILES+=usr/share/man/man7/groff_mm.7.gz OLD_FILES+=usr/share/man/man7/groff_mmse.7.gz OLD_FILES+=usr/share/man/man7/groff_ms.7.gz OLD_FILES+=usr/share/man/man7/groff_trace.7.gz OLD_FILES+=usr/share/man/man7/groff_www.7.gz OLD_FILES+=usr/share/man/man7/mdoc.samples.7.gz OLD_FILES+=usr/share/man/man7/me.7.gz OLD_FILES+=usr/share/man/man7/mm.7.gz OLD_FILES+=usr/share/man/man7/mmse.7.gz OLD_FILES+=usr/share/man/man7/ms.7.gz OLD_FILES+=usr/share/man/man7/orig_me.7.gz OLD_FILES+=usr/share/me/acm.me OLD_FILES+=usr/share/me/chars.me OLD_FILES+=usr/share/me/deltext.me OLD_FILES+=usr/share/me/eqn.me OLD_FILES+=usr/share/me/float.me OLD_FILES+=usr/share/me/footnote.me OLD_FILES+=usr/share/me/index.me OLD_FILES+=usr/share/me/letterhead.me OLD_FILES+=usr/share/me/local.me OLD_FILES+=usr/share/me/null.me OLD_FILES+=usr/share/me/refer.me OLD_FILES+=usr/share/me/revisions OLD_FILES+=usr/share/me/sh.me OLD_FILES+=usr/share/me/tbl.me OLD_FILES+=usr/share/me/thesis.me OLD_DIRS+=usr/share/me OLD_FILES+=usr/share/misc/vgrindefs OLD_FILES+=usr/share/misc/vgrindefs.db OLD_FILES+=usr/share/tmac/X.tmac OLD_FILES+=usr/share/tmac/Xps.tmac OLD_FILES+=usr/share/tmac/a4.tmac OLD_FILES+=usr/share/tmac/an-old.tmac OLD_FILES+=usr/share/tmac/an.tmac OLD_FILES+=usr/share/tmac/andoc.tmac OLD_FILES+=usr/share/tmac/composite.tmac OLD_FILES+=usr/share/tmac/cp1047.tmac OLD_FILES+=usr/share/tmac/devtag.tmac OLD_FILES+=usr/share/tmac/doc.tmac OLD_FILES+=usr/share/tmac/dvi.tmac OLD_FILES+=usr/share/tmac/e.tmac OLD_FILES+=usr/share/tmac/ec.tmac OLD_FILES+=usr/share/tmac/eqnrc OLD_FILES+=usr/share/tmac/europs.tmac OLD_FILES+=usr/share/tmac/html-end.tmac OLD_FILES+=usr/share/tmac/html.tmac OLD_FILES+=usr/share/tmac/hyphen.ru OLD_FILES+=usr/share/tmac/hyphen.us OLD_FILES+=usr/share/tmac/hyphenex.us OLD_FILES+=usr/share/tmac/koi8-r.tmac OLD_FILES+=usr/share/tmac/latin1.tmac OLD_FILES+=usr/share/tmac/latin2.tmac OLD_FILES+=usr/share/tmac/latin9.tmac OLD_FILES+=usr/share/tmac/lbp.tmac OLD_FILES+=usr/share/tmac/lj4.tmac OLD_FILES+=usr/share/tmac/m.tmac OLD_FILES+=usr/share/tmac/man.local OLD_FILES+=usr/share/tmac/man.tmac OLD_FILES+=usr/share/tmac/mandoc.tmac OLD_FILES+=usr/share/tmac/mdoc.local OLD_FILES+=usr/share/tmac/mdoc.tmac OLD_FILES+=usr/share/tmac/mdoc/doc-common OLD_FILES+=usr/share/tmac/mdoc/doc-ditroff OLD_FILES+=usr/share/tmac/mdoc/doc-nroff OLD_FILES+=usr/share/tmac/mdoc/doc-syms OLD_FILES+=usr/share/tmac/mdoc/fr.ISO8859-1 OLD_FILES+=usr/share/tmac/mdoc/ru.KOI8-R OLD_DIRS+=usr/share/tmac/mdoc OLD_FILES+=usr/share/tmac/me.tmac OLD_FILES+=usr/share/tmac/mm/0.MT OLD_FILES+=usr/share/tmac/mm/4.MT OLD_FILES+=usr/share/tmac/mm/5.MT OLD_FILES+=usr/share/tmac/mm/locale OLD_FILES+=usr/share/tmac/mm/mm.tmac OLD_FILES+=usr/share/tmac/mm/mmse.tmac OLD_FILES+=usr/share/tmac/mm/ms.cov OLD_FILES+=usr/share/tmac/mm/se_locale OLD_FILES+=usr/share/tmac/mm/se_ms.cov OLD_DIRS+=usr/share/tmac/mm OLD_FILES+=usr/share/tmac/ms.tmac OLD_FILES+=usr/share/tmac/mse.tmac OLD_FILES+=usr/share/tmac/papersize.tmac OLD_FILES+=usr/share/tmac/pic.tmac OLD_FILES+=usr/share/tmac/ps.tmac OLD_FILES+=usr/share/tmac/psatk.tmac OLD_FILES+=usr/share/tmac/psold.tmac OLD_FILES+=usr/share/tmac/pspic.tmac OLD_FILES+=usr/share/tmac/s.tmac OLD_FILES+=usr/share/tmac/safer.tmac OLD_FILES+=usr/share/tmac/tmac.orig_me OLD_FILES+=usr/share/tmac/tmac.vgrind OLD_FILES+=usr/share/tmac/trace.tmac OLD_FILES+=usr/share/tmac/troffrc OLD_FILES+=usr/share/tmac/troffrc-end OLD_FILES+=usr/share/tmac/tty-char.tmac OLD_FILES+=usr/share/tmac/tty.tmac OLD_FILES+=usr/share/tmac/unicode.tmac OLD_FILES+=usr/share/tmac/www.tmac OLD_DIRS+=usr/share/tmac # 20170607: remove incorrect atf_check(1) manpage link OLD_FILES+=usr/share/man/man1/atf_check.1.gz # 20170601: remove stale manpage OLD_FILES+=usr/share/man/man2/cap_rights_get.2.gz # 20170601: old libifconfig and libifc OLD_FILES+=usr/lib/libifc.a OLD_FILES+=usr/lib/libifc_p.a OLD_FILES+=usr/lib/libifconfig.a OLD_FILES+=usr/lib/libifconfig_p.a OLD_FILES+=usr/lib32/libifc.a OLD_FILES+=usr/lib32/libifc_p.a OLD_FILES+=usr/lib32/libifconfig.a OLD_FILES+=usr/lib32/libifconfig_p.a # 20170529: mount.conf(8) -> mount.conf(5) OLD_FILES+=usr/share/man/man8/mount.conf.8.gz # 20170525: remove misleading template OLD_FILES+=usr/share/misc/man.template # 20170525: disconnect the roff docs from the build OLD_FILES+=usr/share/doc/papers/beyond43.ascii.gz OLD_FILES+=usr/share/doc/papers/bio.ascii.gz OLD_FILES+=usr/share/doc/papers/contents.ascii.gz OLD_FILES+=usr/share/doc/papers/devfs.ascii.gz OLD_FILES+=usr/share/doc/papers/diskperf.ascii.gz OLD_FILES+=usr/share/doc/papers/fsinterface.ascii.gz OLD_FILES+=usr/share/doc/papers/hwpmc.ascii.gz OLD_FILES+=usr/share/doc/papers/jail.ascii.gz OLD_FILES+=usr/share/doc/papers/kernmalloc.ascii.gz OLD_FILES+=usr/share/doc/papers/kerntune.ascii.gz OLD_FILES+=usr/share/doc/papers/malloc.ascii.gz OLD_FILES+=usr/share/doc/papers/newvm.ascii.gz OLD_FILES+=usr/share/doc/papers/releng.ascii.gz OLD_FILES+=usr/share/doc/papers/sysperf.ascii.gz OLD_FILES+=usr/share/doc/papers/timecounter.ascii.gz OLD_DIRS+=usr/share/doc/papers OLD_FILES+=usr/share/doc/psd/01.cacm/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/01.cacm OLD_FILES+=usr/share/doc/psd/02.implement/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/02.implement OLD_FILES+=usr/share/doc/psd/03.iosys/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/03.iosys OLD_FILES+=usr/share/doc/psd/04.uprog/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/04.uprog OLD_FILES+=usr/share/doc/psd/05.sysman/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/05.sysman OLD_FILES+=usr/share/doc/psd/06.Clang/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/06.Clang OLD_FILES+=usr/share/doc/psd/12.make/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/12.make OLD_FILES+=usr/share/doc/psd/13.rcs/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/13.rcs OLD_FILES+=usr/share/doc/psd/13.rcs/rcs_func.ascii.gz OLD_DIRS+=usr/share/doc/psd/13.rcs OLD_FILES+=usr/share/doc/psd/15.yacc/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/15.yacc OLD_FILES+=usr/share/doc/psd/16.lex/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/16.lex OLD_FILES+=usr/share/doc/psd/17.m4/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/17.m4 OLD_FILES+=usr/share/doc/psd/18.gprof/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/18.gprof OLD_FILES+=usr/share/doc/psd/20.ipctut/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/20.ipctut OLD_FILES+=usr/share/doc/psd/21.ipc/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/21.ipc OLD_FILES+=usr/share/doc/psd/22.rpcgen/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/22.rpcgen OLD_FILES+=usr/share/doc/psd/23.rpc/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/23.rpc OLD_FILES+=usr/share/doc/psd/24.xdr/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/24.xdr OLD_FILES+=usr/share/doc/psd/25.xdrrfc/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/25.xdrrfc OLD_FILES+=usr/share/doc/psd/26.rpcrfc/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/26.rpcrfc OLD_FILES+=usr/share/doc/psd/27.nfsrfc/paper.ascii.gz OLD_DIRS+=usr/share/doc/psd/27.nfsrfc OLD_FILES+=usr/share/doc/psd/Title.ascii.gz OLD_FILES+=usr/share/doc/psd/contents.ascii.gz OLD_DIRS+=usr/share/doc/psd/ OLD_FILES+=usr/share/doc/smm/01.setup/paper.ascii.gz OLD_DIRS+=usr/share/doc/smm/01.setup OLD_FILES+=usr/share/doc/smm/02.config/paper.ascii.gz OLD_DIRS+=usr/share/doc/smm/02.config OLD_FILES+=usr/share/doc/smm/03.fsck/paper.ascii.gz OLD_DIRS+=usr/share/doc/smm/03.fsck OLD_FILES+=usr/share/doc/smm/04.quotas/paper.ascii.gz OLD_DIRS+=usr/share/doc/smm/04.quotas OLD_FILES+=usr/share/doc/smm/05.fastfs/paper.ascii.gz OLD_DIRS+=usr/share/doc/smm/05.fastfs OLD_FILES+=usr/share/doc/smm/06.nfs/paper.ascii.gz OLD_DIRS+=usr/share/doc/smm/06.nfs OLD_FILES+=usr/share/doc/smm/07.lpd/paper.ascii.gz OLD_DIRS+=usr/share/doc/smm/07.lpd OLD_FILES+=usr/share/doc/smm/08.sendmailop/paper.ascii.gz OLD_DIRS+=usr/share/doc/smm/08.sendmailop OLD_FILES+=usr/share/doc/smm/11.timedop/paper.ascii.gz OLD_DIRS+=usr/share/doc/smm/11.timedop OLD_FILES+=usr/share/doc/smm/12.timed/paper.ascii.gz OLD_DIRS+=usr/share/doc/smm/12.timed OLD_FILES+=usr/share/doc/smm/18.net/paper.ascii.gz OLD_DIRS+=usr/share/doc/smm/18.net OLD_FILES+=usr/share/doc/smm/Title.ascii.gz OLD_FILES+=usr/share/doc/smm/contents.ascii.gz OLD_DIRS+=usr/share/doc/smm OLD_FILES+=usr/share/doc/usd/04.csh/paper.ascii.gz OLD_DIRS+=usr/share/doc/usd/04.csh OLD_FILES+=usr/share/doc/usd/05.dc/paper.ascii.gz OLD_DIRS+=usr/share/doc/usd/05.dc OLD_FILES+=usr/share/doc/usd/06.bc/paper.ascii.gz OLD_DIRS+=usr/share/doc/usd/06.bc OLD_FILES+=usr/share/doc/usd/07.mail/paper.ascii.gz OLD_DIRS+=usr/share/doc/usd/07.mail OLD_FILES+=usr/share/doc/usd/10.exref/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/10.exref/summary.ascii.gz OLD_DIRS+=usr/share/doc/usd/10.exref OLD_FILES+=usr/share/doc/usd/11.edit/paper.ascii.gz OLD_DIRS+=usr/share/doc/usd/11.edit OLD_FILES+=usr/share/doc/usd/12.vi/paper.ascii.gz OLD_FILES+=usr/share/doc/usd/12.vi/summary.ascii.gz OLD_FILES+=usr/share/doc/usd/12.vi/viapwh.ascii.gz OLD_DIRS+=usr/share/doc/usd/12.vi OLD_FILES+=usr/share/doc/usd/13.viref/paper.ascii.gz OLD_DIRS+=usr/share/doc/usd/13.viref OLD_FILES+=usr/share/doc/usd/18.msdiffs/paper.ascii.gz OLD_DIRS+=usr/share/doc/usd/18.msdiffs OLD_FILES+=usr/share/doc/usd/19.memacros/paper.ascii.gz OLD_DIRS+=usr/share/doc/usd/19.memacros OLD_FILES+=usr/share/doc/usd/20.meref/paper.ascii.gz OLD_DIRS+=usr/share/doc/usd/20.meref OLD_FILES+=usr/share/doc/usd/21.troff/paper.ascii.gz OLD_DIRS+=usr/share/doc/usd/21.troff OLD_FILES+=usr/share/doc/usd/22.trofftut/paper.ascii.gz OLD_DIRS+=usr/share/doc/usd/22.trofftut OLD_FILES+=usr/share/doc/usd/Title.ascii.gz OLD_FILES+=usr/share/doc/usd/contents.ascii.gz OLD_DIRS+=usr/share/doc/usd # 20170523: 64-bit inode support, library version bumps OLD_LIBS+=lib/libzfs.so.2 OLD_LIBS+=usr/lib/libarchive.so.6 OLD_LIBS+=usr/lib/libmilter.so.5 OLD_LIBS+=usr/lib32/libzfs.so.2 OLD_LIBS+=usr/lib32/libarchive.so.6 OLD_LIBS+=usr/lib32/libmilter.so.5 # 20170427: NATM configuration support removed OLD_FILES+=etc/rc.d/atm1 OLD_FILES+=etc/rc.d/atm2 OLD_FILES+=etc/rc.d/atm3 # 20170424: NATM support removed OLD_FILES+=rescue/atmconfig OLD_FILES+=sbin/atmconfig OLD_FILES+=usr/include/bsnmp/snmp_atm.h OLD_FILES+=usr/include/dev/utopia/idtphy.h OLD_FILES+=usr/include/dev/utopia/suni.h OLD_FILES+=usr/include/dev/utopia/utopia.h OLD_FILES+=usr/include/dev/utopia/utopia_priv.h OLD_DIRS+=usr/include/dev/utopia OLD_FILES+=usr/include/net/if_atm.h OLD_FILES+=usr/include/netgraph/atm/ng_atm.h OLD_FILES+=usr/include/netinet/if_atm.h OLD_FILES+=usr/include/netnatm/natm.h OLD_FILES+=usr/lib/debug/sbin/atmconfig.debug OLD_FILES+=usr/lib/debug/usr/lib/snmp_atm.so.6.debug OLD_FILES+=usr/lib/snmp_atm.so OLD_FILES+=usr/lib/snmp_atm.so.6 OLD_FILES+=usr/share/doc/atm/atmconfig.help OLD_FILES+=usr/share/doc/atm/atmconfig_device.help OLD_DIRS+=usr/share/doc/atm OLD_FILES+=usr/share/man/man3/snmp_atm.3.gz OLD_FILES+=usr/share/man/man4/en.4.gz OLD_FILES+=usr/share/man/man4/fatm.4.gz OLD_FILES+=usr/share/man/man4/hatm.4.gz OLD_FILES+=usr/share/man/man4/if_en.4.gz OLD_FILES+=usr/share/man/man4/if_fatm.4.gz OLD_FILES+=usr/share/man/man4/if_hatm.4.gz OLD_FILES+=usr/share/man/man4/if_patm.4.gz OLD_FILES+=usr/share/man/man4/natm.4.gz OLD_FILES+=usr/share/man/man4/natmip.4.gz OLD_FILES+=usr/share/man/man4/ng_atm.4.gz OLD_FILES+=usr/share/man/man4/patm.4.gz OLD_FILES+=usr/share/man/man4/utopia.4.gz OLD_FILES+=usr/share/man/man8/atmconfig.8.gz OLD_FILES+=usr/share/man/man9/utopia.9.gz OLD_FILES+=usr/share/snmp/defs/atm_freebsd.def OLD_FILES+=usr/share/snmp/defs/atm_tree.def OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM-FREEBSD-MIB.txt OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM.txt # 20170420: remove GNU diff OLD_FILES+=usr/share/man/man7/diff.7.gz # 20170322: rename to _test to match the FreeBSD test suite name scheme OLD_FILES+=usr/tests/usr.bin/col/col OLD_FILES+=usr/tests/usr.bin/diff/diff OLD_FILES+=usr/tests/usr.bin/ident/ident OLD_FILES+=usr/tests/usr.bin/mkimg/mkimg OLD_FILES+=usr/tests/usr.bin/sdiff/sdiff OLD_FILES+=usr/tests/usr.bin/soelim/soelim OLD_FILES+=usr/tests/usr.sbin/pw/pw_config OLD_FILES+=usr/tests/usr.sbin/pw/pw_etcdir OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupadd OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupdel OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupmod OLD_FILES+=usr/tests/usr.sbin/pw/pw_lock OLD_FILES+=usr/tests/usr.sbin/pw/pw_useradd OLD_FILES+=usr/tests/usr.sbin/pw/pw_userdel OLD_FILES+=usr/tests/usr.sbin/pw/pw_usermod OLD_FILES+=usr/tests/usr.sbin/pw/pw_usernext # 20170322: garbage collect old references to igb(4) OLD_FILES+=usr/share/man/man4/if_igb.4.gz OLD_FILES+=usr/share/man/man4/igb.4.gz # 20170319: io_test requires zh_TW.Big5 locale. OLD_FILES+=usr/tests/lib/libc/locale/io_test # 20170319: remove nls for non supported Big5* locales OLD_DIRS+=usr/share/nls/zh_HK.Big5HKSCS OLD_DIRS+=usr/share/nls/zh_TW.Big5 # 20170313: move .../sys/geom/eli/... to .../sys/geom/class/eli/... OLD_FILES+=usr/tests/sys/geom/eli/pbkdf2/pbkdf2 OLD_FILES+=usr/tests/sys/geom/eli/pbkdf2/Kyuafile OLD_FILES+=usr/tests/sys/geom/eli/Kyuafile OLD_DIRS+=usr/tests/sys/geom/eli/pbkdf2 OLD_DIRS+=usr/tests/sys/geom/eli # 20170313: sbin/ipftest and ipresend temporarily disconnected. OLD_FILES+=sbin/ipftest OLD_FILES+=sbin/ipresend # 20170311: Remove WITHOUT_MANDOCDB option OLD_FILES+=usr/share/man/man1/makewhatis.1.gz # 20170308: rename some tests OLD_FILES+=usr/tests/bin/pwait/pwait OLD_FILES+=usr/tests/usr.bin/timeout/timeout # 20170307: remove pcap-int.h OLD_FILES+=usr/include/pcap-int.h # 20170302: new libc++ import which bumps version from 3.9.1 to 4.0.0. OLD_FILES+=usr/include/c++/v1/__undef___deallocate OLD_FILES+=usr/include/c++/v1/tr1/__undef___deallocate # 20170302: new clang import which bumps version from 3.9.1 to 4.0.0. OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/esan_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/3.9.1/include/sanitizer OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_cmath.h OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_intrinsics.h OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_math_forward_declares.h OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_runtime_wrapper.h OLD_FILES+=usr/lib/clang/3.9.1/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.9.1/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.9.1/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.9.1/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/altivec.h OLD_FILES+=usr/lib/clang/3.9.1/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.9.1/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512ifmaintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512ifmavlintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512pfintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vbmiintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vbmivlintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlcdintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/clflushoptintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/cpuid.h OLD_FILES+=usr/lib/clang/3.9.1/include/cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/3.9.1/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/htmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/immintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.9.1/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.9.1/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/module.modulemap OLD_FILES+=usr/lib/clang/3.9.1/include/msa.h OLD_FILES+=usr/lib/clang/3.9.1/include/mwaitxintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/opencl-c.h OLD_FILES+=usr/lib/clang/3.9.1/include/pkuintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/s390intrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/vadefs.h OLD_FILES+=usr/lib/clang/3.9.1/include/vecintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xopintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xsavecintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xsaveintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xsaveoptintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xsavesintrin.h OLD_FILES+=usr/lib/clang/3.9.1/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/3.9.1/include OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-i386.so OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-x86_64.so OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats_client-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats_client-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.9.1/lib/freebsd OLD_DIRS+=usr/lib/clang/3.9.1/lib OLD_DIRS+=usr/lib/clang/3.9.1 # 20170226: SVR4 compatibility removed .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" OLD_FILES+=usr/share/man/man4/streams.4 OLD_FILES+=usr/share/man/man4/svr4.4 .endif # 20170219: OpenPAM RADULA upgrade removed the libpam tests OLD_FILES+=usr/tests/lib/libpam/Kyuafile OLD_FILES+=usr/tests/lib/libpam/t_openpam_ctype OLD_FILES+=usr/tests/lib/libpam/t_openpam_readlinev OLD_FILES+=usr/tests/lib/libpam/t_openpam_readword OLD_DIRS+=usr/test/lib/libpam # 20170206: remove bdes(1) OLD_FILES+=usr/bin/bdes OLD_FILES+=usr/lib/debug/usr/bin/bdes.debug OLD_FILES+=usr/share/man/man1/bdes.1.gz # 20170206: merged projects/ipsec OLD_FILES+=usr/include/netinet/ip_ipsec.h OLD_FILES+=usr/include/netinet6/ip6_ipsec.h # 20170128: remove pc98 support OLD_FILES+=usr/include/dev/ic/i8251.h OLD_FILES+=usr/include/dev/ic/i8255.h OLD_FILES+=usr/include/dev/ic/rsa.h OLD_FILES+=usr/include/dev/ic/wd33c93reg.h OLD_FILES+=usr/include/sys/disk/pc98.h OLD_FILES+=usr/include/sys/diskpc98.h OLD_FILES+=usr/share/man/man4/i386/ct.4.gz OLD_FILES+=usr/share/man/man4/i386/snc.4.gz OLD_FILES+=usr/share/syscons/keymaps/jp.pc98.iso.kbd OLD_FILES+=usr/share/syscons/keymaps/jp.pc98.kbd OLD_FILES+=usr/share/vt/keymaps/jp.pc98.iso.kbd OLD_FILES+=usr/share/vt/keymaps/jp.pc98.kbd # 20170110: Four files from ggate tests consolidated into one OLD_FILES+=usr/tests/sys/geom/class/gate/1_test OLD_FILES+=usr/tests/sys/geom/class/gate/2_test OLD_FILES+=usr/tests/sys/geom/class/gate/3_test OLD_FILES+=usr/tests/sys/geom/class/gate/conf.sh # 20170103: libbsnmptools.so made into an INTERNALLIB OLD_FILES+=usr/lib/libbsnmptools.a OLD_FILES+=usr/lib/libbsnmptools_p.a OLD_LIBS+=usr/lib/libbsnmptools.so.0 OLD_LIBS+=usr/lib/libbsnmptools.so # 20170102: sysdecode_getfsstat_flags() renamed to sysdecode_getfsstat_mode() OLD_FILES+=usr/share/man/man3/sysdecode_getfsstat_flags.3.gz # 20161230: libarchive ACL pax test renamed to test_acl_pax_posix1e.tar.uu OLD_FILES+=usr/tests/lib/libarchive/test_acl_pax.tar.uu # 20161229: Three files from gnop tests consolidated into one OLD_FILES+=usr/tests/sys/geom/class/nop/1_test OLD_FILES+=usr/tests/sys/geom/class/nop/2_test OLD_FILES+=usr/tests/sys/geom/class/nop/conf.sh # 20161217: new clang import which bumps version from 3.9.0 to 3.9.1. OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/esan_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/3.9.0/include/sanitizer OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_cmath.h OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_intrinsics.h OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_math_forward_declares.h OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_runtime_wrapper.h OLD_FILES+=usr/lib/clang/3.9.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.9.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.9.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.9.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/altivec.h OLD_FILES+=usr/lib/clang/3.9.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.9.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512ifmaintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512ifmavlintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512pfintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vbmiintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vbmivlintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlcdintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/clflushoptintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/cpuid.h OLD_FILES+=usr/lib/clang/3.9.0/include/cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/3.9.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/htmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/immintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.9.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.9.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/module.modulemap OLD_FILES+=usr/lib/clang/3.9.0/include/mwaitxintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/opencl-c.h OLD_FILES+=usr/lib/clang/3.9.0/include/pkuintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/s390intrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/vadefs.h OLD_FILES+=usr/lib/clang/3.9.0/include/vecintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xopintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xsavecintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xsaveintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xsaveoptintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xsavesintrin.h OLD_FILES+=usr/lib/clang/3.9.0/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/3.9.0/include OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-i386.so OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-x86_64.so OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats_client-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats_client-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.9.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.9.0/lib OLD_DIRS+=usr/lib/clang/3.9.0 # 20161205: libproc version bump OLD_LIBS+=usr/lib/libproc.so.3 OLD_LIBS+=usr/lib32/libproc.so.3 # 20161127: Remove vm_page_cache(9) OLD_FILES+=usr/share/man/man9/vm_page_cache.9.gz # 20161124: new clang import which bumps version from 3.8.0 to 3.9.0. OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/3.8.0/include/sanitizer OLD_FILES+=usr/lib/clang/3.8.0/include/__clang_cuda_runtime_wrapper.h OLD_FILES+=usr/lib/clang/3.8.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.8.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.8.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.8.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/altivec.h OLD_FILES+=usr/lib/clang/3.8.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.8.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/cpuid.h OLD_FILES+=usr/lib/clang/3.8.0/include/cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/3.8.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/htmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/immintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.8.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.8.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/module.modulemap OLD_FILES+=usr/lib/clang/3.8.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/pkuintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/s390intrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/vadefs.h OLD_FILES+=usr/lib/clang/3.8.0/include/vecintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xopintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xsavecintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xsaveintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xsaveoptintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xsavesintrin.h OLD_FILES+=usr/lib/clang/3.8.0/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/3.8.0/include OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-i386.so OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-x86_64.so OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.8.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.8.0/lib OLD_DIRS+=usr/lib/clang/3.8.0 # 20161121: Hyper-V manuals only apply to amd64 and i386. .if ${TARGET_ARCH} != "amd64" && ${TARGET_ARCH} != "i386" OLD_FILES+=usr/share/man/man4/hv_kvp.4.gz OLD_FILES+=usr/share/man/man4/hv_netvsc.4.gz OLD_FILES+=usr/share/man/man4/hv_storvsc.4.gz OLD_FILES+=usr/share/man/man4/hv_utils.4.gz OLD_FILES+=usr/share/man/man4/hv_vmbus.4.gz OLD_FILES+=usr/share/man/man4/hv_vss.4.gz .endif # 20161118: Remove hv_ata_pci_disengage(4) OLD_FILES+=usr/share/man/man4/hv_ata_pci_disengage.4.gz # 20161017: urtwn(4) was merged into rtwn(4) OLD_FILES+=usr/share/man/man4/urtwn.4.gz OLD_FILES+=usr/share/man/man4/urtwnfw.4.gz # 20161015: Remove GNU rcs OLD_FILES+=usr/bin/ci OLD_FILES+=usr/bin/co OLD_FILES+=usr/bin/merge OLD_FILES+=usr/bin/rcs OLD_FILES+=usr/bin/rcsclean OLD_FILES+=usr/bin/rcsdiff OLD_FILES+=usr/bin/rcsfreeze OLD_FILES+=usr/bin/rcsmerge OLD_FILES+=usr/bin/rlog OLD_FILES+=usr/share/doc/psd/13.rcs/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/13.rcs/rcs_func.ascii.gz OLD_DIRS+=usr/share/doc/psd/13.rcs OLD_FILES+=usr/share/man/man1/ci.1.gz OLD_FILES+=usr/share/man/man1/co.1.gz OLD_FILES+=usr/share/man/man1/merge.1.gz OLD_FILES+=usr/share/man/man1/rcs.1.gz OLD_FILES+=usr/share/man/man1/rcsclean.1.gz OLD_FILES+=usr/share/man/man1/rcsdiff.1.gz OLD_FILES+=usr/share/man/man1/rcsfreeze.1.gz OLD_FILES+=usr/share/man/man1/rcsintro.1.gz OLD_FILES+=usr/share/man/man1/rcsmerge.1.gz OLD_FILES+=usr/share/man/man1/rlog.1.gz OLD_FILES+=usr/share/man/man5/rcsfile.5.gz # 20161010: remove link to removed m_getclr(9) macro OLD_FILES+=usr/share/man/man9/m_getclr.9.gz # 20161003: MK_ELFCOPY_AS_OBJCOPY option retired OLD_FILES+=usr/bin/elfcopy OLD_FILES+=usr/share/man/man1/elfcopy.1.gz # 20160906: libkqueue tests moved to /usr/tests/sys/kqueue/libkqueue OLD_FILES+=usr/tests/sys/kqueue/kqtest OLD_FILES+=usr/tests/sys/kqueue/kqueue_test # 20160903: idle page zeroing support removed OLD_FILES+=usr/share/man/man9/pmap_zero_idle.9.gz # 20160901: Remove digi(4) OLD_FILES+=usr/share/man/man4/digi.4.gz # 20160819: Remove ie(4) OLD_FILES+=usr/share/man/man4/i386/ie.4.gz # 20160819: Remove spic(4) OLD_FILES+=usr/share/man/man4/spic.4.gz # 20160819: Remove wl(4) and wlconfig(8) OLD_FILES+=usr/share/man/man4/i386/wl.4.gz OLD_FILES+=usr/sbin/wlconfig OLD_FILES+=usr/share/man/man8/i386/wlconfig.8.gz # 20160819: Remove si(4) and sicontrol(8) OLD_FILES+=usr/share/man/man4/si.4.gz OLD_FILES+=usr/sbin/sicontrol OLD_FILES+=usr/share/man/man8/sicontrol.8.gz # 20160819: Remove scd(4) OLD_FILES+=usr/share/man/man4/scd.4.gz # 20160815: Remove mcd(4) OLD_FILES+=usr/share/man/man4/mcd.4.gz # 20160805: lockmgr_waiters(9) removed OLD_FILES+=usr/share/man/man9/lockmgr_waiters.9.gz # 20160703: POSIXify locales with variants OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hant_TW.UTF-8 OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hant_TW.Big5 OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hant_HK.UTF-8 OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hans_CN.eucCN OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hans_CN.UTF-8 OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hans_CN.GBK OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hans_CN.GB2312 OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hans_CN.GB18030 OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/sr_Latn_RS.UTF-8 OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_COLLATE OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_CTYPE OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_MESSAGES OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_MONETARY OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_TIME OLD_DIRS+=usr/share/locale/sr_Latn_RS.ISO8859-2 OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/sr_Cyrl_RS.UTF-8 OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_COLLATE OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_CTYPE OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_MESSAGES OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_MONETARY OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_TIME OLD_DIRS+=usr/share/locale/sr_Cyrl_RS.ISO8859-5 OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/mn_Cyrl_MN.UTF-8 OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/kk_Cyrl_KZ.UTF-8 # 20160608: removed pam_verbose_error OLD_LIBS+=usr/lib/libpam.so.5 OLD_LIBS+=usr/lib/pam_chroot.so.5 OLD_LIBS+=usr/lib/pam_deny.so.5 OLD_LIBS+=usr/lib/pam_echo.so.5 OLD_LIBS+=usr/lib/pam_exec.so.5 OLD_LIBS+=usr/lib/pam_ftpusers.so.5 OLD_LIBS+=usr/lib/pam_group.so.5 OLD_LIBS+=usr/lib/pam_guest.so.5 OLD_LIBS+=usr/lib/pam_krb5.so.5 OLD_LIBS+=usr/lib/pam_ksu.so.5 OLD_LIBS+=usr/lib/pam_lastlog.so.5 OLD_LIBS+=usr/lib/pam_login_access.so.5 OLD_LIBS+=usr/lib/pam_nologin.so.5 OLD_LIBS+=usr/lib/pam_opie.so.5 OLD_LIBS+=usr/lib/pam_opieaccess.so.5 OLD_LIBS+=usr/lib/pam_passwdqc.so.5 OLD_LIBS+=usr/lib/pam_permit.so.5 OLD_LIBS+=usr/lib/pam_radius.so.5 OLD_LIBS+=usr/lib/pam_rhosts.so.5 OLD_LIBS+=usr/lib/pam_rootok.so.5 OLD_LIBS+=usr/lib/pam_securetty.so.5 OLD_LIBS+=usr/lib/pam_self.so.5 OLD_LIBS+=usr/lib/pam_ssh.so.5 OLD_LIBS+=usr/lib/pam_tacplus.so.5 OLD_LIBS+=usr/lib/pam_unix.so.5 OLD_LIBS+=usr/lib32/libpam.so.5 OLD_LIBS+=usr/lib32/pam_chroot.so.5 OLD_LIBS+=usr/lib32/pam_deny.so.5 OLD_LIBS+=usr/lib32/pam_echo.so.5 OLD_LIBS+=usr/lib32/pam_exec.so.5 OLD_LIBS+=usr/lib32/pam_ftpusers.so.5 OLD_LIBS+=usr/lib32/pam_group.so.5 OLD_LIBS+=usr/lib32/pam_guest.so.5 OLD_LIBS+=usr/lib32/pam_krb5.so.5 OLD_LIBS+=usr/lib32/pam_ksu.so.5 OLD_LIBS+=usr/lib32/pam_lastlog.so.5 OLD_LIBS+=usr/lib32/pam_login_access.so.5 OLD_LIBS+=usr/lib32/pam_nologin.so.5 OLD_LIBS+=usr/lib32/pam_opie.so.5 OLD_LIBS+=usr/lib32/pam_opieaccess.so.5 OLD_LIBS+=usr/lib32/pam_passwdqc.so.5 OLD_LIBS+=usr/lib32/pam_permit.so.5 OLD_LIBS+=usr/lib32/pam_radius.so.5 OLD_LIBS+=usr/lib32/pam_rhosts.so.5 OLD_LIBS+=usr/lib32/pam_rootok.so.5 OLD_LIBS+=usr/lib32/pam_securetty.so.5 OLD_LIBS+=usr/lib32/pam_self.so.5 OLD_LIBS+=usr/lib32/pam_ssh.so.5 OLD_LIBS+=usr/lib32/pam_tacplus.so.5 OLD_LIBS+=usr/lib32/pam_unix.so.5 # 20160523: remove extranous ALTQ files OLD_FILES+=usr/include/altq/altq_codel.h OLD_FILES+=usr/include/altq/altq_fairq.h # 20160519: remove DTrace Toolkit from base OLD_FILES+=usr/sbin/dtruss OLD_FILES+=usr/share/dtrace/toolkit/execsnoop OLD_FILES+=usr/share/dtrace/toolkit/hotkernel OLD_FILES+=usr/share/dtrace/toolkit/hotuser OLD_FILES+=usr/share/dtrace/toolkit/opensnoop OLD_FILES+=usr/share/dtrace/toolkit/procsystime OLD_DIRS+=usr/share/dtrace/toolkit OLD_FILES+=usr/share/man/man1/dtruss.1.gz # 20160519: stale MLINK removed OLD_FILES+=usr/share/man/man9/rman_await_resource.9.gz # 20160517: ReiserFS removed OLD_FILES+=usr/share/man/man5/reiserfs.5.gz # 20160504: tests rework OLD_FILES+=usr/tests/lib/libc/regex/data/README # 20160430: kvm_getfiles(3) removed from kvm(3) OLD_LIBS+=lib/libkvm.so.6 OLD_LIBS+=usr/lib32/libkvm.so.6 OLD_FILES+=usr/share/man/man3/kvm_getfiles.3.gz # 20160423: remove mroute6d OLD_FILES+=etc/rc.d/mroute6d # 20160419: rename units.lib -> definitions.units OLD_FILES+=usr/share/misc/units.lib # 20160419: remove Big5HKSCS locales OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_COLLATE OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_CTYPE OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_MONETARY OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_TIME OLD_DIRS+=usr/share/locale/zh_HK.Big5HKSCS OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_COLLATE OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_CTYPE OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_MESSAGES OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_MONETARY OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_NUMERIC OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_TIME OLD_DIRS+=usr/share/locale/zh_Hant_HK.Big5HKSCS # 20160317: rman_res_t size bump to uintmax_t OLD_LIBS+=usr/lib/libdevinfo.so.5 OLD_LIBS+=usr/lib32/libdevinfo.so.5 # 20160305: new clang import which bumps version from 3.7.1 to 3.8.0. OLD_FILES+=usr/bin/macho-dump OLD_FILES+=usr/bin/tblgen OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/3.7.1/include/sanitizer OLD_FILES+=usr/lib/clang/3.7.1/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.7.1/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.7.1/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.7.1/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/altivec.h OLD_FILES+=usr/lib/clang/3.7.1/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.7.1/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/cpuid.h OLD_FILES+=usr/lib/clang/3.7.1/include/cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/3.7.1/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/htmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/immintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.7.1/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.7.1/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/module.modulemap OLD_FILES+=usr/lib/clang/3.7.1/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/s390intrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/vadefs.h OLD_FILES+=usr/lib/clang/3.7.1/include/vecintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/xopintrin.h OLD_FILES+=usr/lib/clang/3.7.1/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/3.7.1/include OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.7.1/lib/freebsd OLD_DIRS+=usr/lib/clang/3.7.1/lib OLD_DIRS+=usr/lib/clang/3.7.1 # 20160301: Remove taskqueue_enqueue_fast OLD_FILES+=usr/share/man/man9/taskqueue_enqueue_fast.9.gz # 20160225: Remove casperd and libcapsicum. OLD_FILES+=sbin/casperd OLD_FILES+=etc/rc.d/casperd OLD_FILES+=usr/share/man/man8/casperd.8.gz OLD_FILES+=usr/include/libcapsicum.h OLD_FILES+=usr/include/libcapsicum_service.h OLD_FILES+=usr/include/libcapsicum.h OLD_FILES+=usr/share/man/man3/libcapsicum.3.gz OLD_FILES+=usr/include/libcapsicum_dns.h OLD_FILES+=usr/include/libcapsicum_grp.h OLD_FILES+=usr/include/libcapsicum_impl.h OLD_FILES+=usr/include/libcapsicum_pwd.h OLD_FILES+=usr/include/libcapsicum_random.h OLD_FILES+=usr/include/libcapsicum_sysctl.h OLD_FILES+=libexec/casper/dns OLD_FILES+=libexec/casper/grp OLD_FILES+=libexec/casper/pwd OLD_FILES+=libexec/casper/random OLD_FILES+=libexec/casper/sysctl OLD_FILES+=libexec/casper/.debug/random.debug OLD_FILES+=libexec/casper/.debug/dns.debug OLD_FILES+=libexec/casper/.debug/sysctl.debug OLD_FILES+=libexec/casper/.debug/pwd.debug OLD_FILES+=libexec/casper/.debug/grp.debug OLD_DIRS+=libexec/casper/.debug OLD_DIRS+=libexec/casper OLD_FILES+=usr/lib/libcapsicum.a OLD_FILES+=usr/lib/libcapsicum.so OLD_LIBS+=lib/libcapsicum.so.0 OLD_FILES+=usr/lib/libcapsicum_p.a OLD_FILES+=usr/lib32/libcapsicum.a OLD_FILES+=usr/lib32/libcapsicum.so OLD_LIBS+=usr/lib32/libcapsicum.so.0 OLD_FILES+=usr/lib32/libcapsicum_p.a # 20160223: functionality from mkulzma(1) merged into mkuzip(1) OLD_FILES+=usr/bin/mkulzma OLD_FILES+=usr/share/man/man4/geom_uncompress.4.gz OLD_FILES+=usr/share/man/man8/mkulzma.8.gz # 20160211: Remove obsolete unbound-control-setup OLD_FILES+=usr/sbin/unbound-control-setup # 20160121: cc.h moved OLD_FILES+=usr/include/netinet/cc.h # 20160116: Update mandoc to cvs snapshot 20160116 OLD_FILES+=usr/share/mdocml/example.style.css OLD_FILES+=usr/share/mdocml/style.css OLD_DIRS+=usr/share/mdocml # 20160114: SA-16:06.snmpd OLD_FILES+=usr/share/examples/etc/snmpd.config # 20160107: GNU ld installed as ld.bfd and linked as ld OLD_FILES+=usr/lib/debug/usr/bin/ld.debug # 20151225: new clang import which bumps version from 3.7.0 to 3.7.1. OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/3.7.0/include/sanitizer OLD_FILES+=usr/lib/clang/3.7.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.7.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.7.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.7.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/altivec.h OLD_FILES+=usr/lib/clang/3.7.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.7.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/cpuid.h OLD_FILES+=usr/lib/clang/3.7.0/include/cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/3.7.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/htmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/immintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.7.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.7.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/module.modulemap OLD_FILES+=usr/lib/clang/3.7.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/s390intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/vadefs.h OLD_FILES+=usr/lib/clang/3.7.0/include/vecintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/xopintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/3.7.0/include OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.7.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.7.0/lib OLD_DIRS+=usr/lib/clang/3.7.0 # 20151130: libelf moved from /usr/lib to /lib (libkvm dependency in r291406) OLD_LIBS+=usr/lib/libelf.so.2 # 20151115: Fox bad upgrade scheme OLD_FILES+=usr/share/locale/zh_CN.GB18030/zh_Hans_CN.GB18030 OLD_FILES+=usr/share/locale/zh_CN.GB2312/zh_Hans_CN.GB2312 OLD_FILES+=usr/share/locale/zh_CN.GBK/zh_Hans_CN.GBK OLD_FILES+=usr/share/locale/zh_CN.UTF-8/zh_Hans_CN.UTF-8 OLD_FILES+=usr/share/locale/zh_CN.eucCN/zh_Hans_CN.eucCN OLD_FILES+=usr/share/locale/zh_TW.Big5/zh_Hant_TW.Big5 OLD_FILES+=usr/share/locale/zh_TW.UTF-8/zh_Hant_TW.UTF-8 # 20151107: String collation improvements OLD_FILES+=usr/share/locale/UTF-8/LC_CTYPE OLD_DIRS+=usr/share/locale/UTF-8 OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_COLLATE OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_CTYPE OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MESSAGES OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MONETARY OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_NUMERIC OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_TIME OLD_DIRS+=usr/share/locale/kk_KZ.PT154/ OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.ISO8859-1 OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_CTYPE OLD_DIRS+=usr/share/locale/la_LN.ISO8859-13 OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.ISO8859-15 OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.ISO8859-2 OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.ISO8859-4 OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.US-ASCII OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_MESSAGES OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_TIME OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_COLLATE OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_MONETARY OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_CTYPE OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_NUMERIC OLD_DIRS+=usr/share/locale/lt_LT.ISO8859-4 OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_COLLATE OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_CTYPE OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MESSAGES OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MONETARY OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_NUMERIC OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_TIME OLD_DIRS+=usr/share/locale/no_NO.ISO8859-1 OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_COLLATE OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_CTYPE OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MESSAGES OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MONETARY OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_NUMERIC OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_TIME OLD_DIRS+=usr/share/locale/no_NO.ISO8859-15 OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/no_NO.UTF-8 OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_COLLATE OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_TIME OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_CTYPE OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_MESSAGES OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_MONETARY OLD_DIRS+=usr/share/locale/sr_YU.ISO8859-2 OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_COLLATE OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_MONETARY OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_CTYPE OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_TIME OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_MESSAGES OLD_DIRS+=usr/share/locale/sr_YU.ISO8859-5 OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_TIME OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_MESSAGES OLD_DIRS+=usr/share/locale/sr_YU.UTF-8 # 20151101: added missing _test suffix on multiple tests in lib/libc OLD_FILES+=usr/tests/lib/libc/c063/faccessat OLD_FILES+=usr/tests/lib/libc/c063/fchmodat OLD_FILES+=usr/tests/lib/libc/c063/fchownat OLD_FILES+=usr/tests/lib/libc/c063/fexecve OLD_FILES+=usr/tests/lib/libc/c063/fstatat OLD_FILES+=usr/tests/lib/libc/c063/linkat OLD_FILES+=usr/tests/lib/libc/c063/mkdirat OLD_FILES+=usr/tests/lib/libc/c063/mkfifoat OLD_FILES+=usr/tests/lib/libc/c063/mknodat OLD_FILES+=usr/tests/lib/libc/c063/openat OLD_FILES+=usr/tests/lib/libc/c063/readlinkat OLD_FILES+=usr/tests/lib/libc/c063/renameat OLD_FILES+=usr/tests/lib/libc/c063/symlinkat OLD_FILES+=usr/tests/lib/libc/c063/unlinkat OLD_FILES+=usr/tests/lib/libc/c063/utimensat OLD_FILES+=usr/tests/lib/libc/string/memchr OLD_FILES+=usr/tests/lib/libc/string/memcpy OLD_FILES+=usr/tests/lib/libc/string/memmem OLD_FILES+=usr/tests/lib/libc/string/memset OLD_FILES+=usr/tests/lib/libc/string/strcat OLD_FILES+=usr/tests/lib/libc/string/strchr OLD_FILES+=usr/tests/lib/libc/string/strcmp OLD_FILES+=usr/tests/lib/libc/string/strcpy OLD_FILES+=usr/tests/lib/libc/string/strcspn OLD_FILES+=usr/tests/lib/libc/string/strerror OLD_FILES+=usr/tests/lib/libc/string/strlen OLD_FILES+=usr/tests/lib/libc/string/strpbrk OLD_FILES+=usr/tests/lib/libc/string/strrchr OLD_FILES+=usr/tests/lib/libc/string/strspn OLD_FILES+=usr/tests/lib/libc/string/swab # 20151101: 430.status-rwho was renamed to 430.status-uptime OLD_FILES+=etc/periodic/daily/430.status-rwho # 20151030: OpenSSL 1.0.2d import OLD_FILES+=usr/share/openssl/man/man3/CMS_set1_signer_certs.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_ctrl.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_ctrl_str.3.gz OLD_FILES+=usr/share/openssl/man/man3/d2i_509_CRL_fp.3.gz OLD_LIBS+=lib/libcrypto.so.7 OLD_LIBS+=usr/lib/libssl.so.7 OLD_LIBS+=usr/lib32/libcrypto.so.7 OLD_LIBS+=usr/lib32/libssl.so.7 # 20151029: LinuxKPI moved to sys/compat/linuxkpi OLD_FILES+=usr/include/dev/usb/usb_compat_linux.h # 20151015: test symbols moved to /usr/lib/debug OLD_DIRS+=usr/tests/lib/atf/libatf-c++/.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/atf_c++_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/build_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/check_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/config_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/macros_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/tests_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/utils_test.debug OLD_DIRS+=usr/tests/lib/atf/libatf-c++/detail/.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/application_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/env_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/exceptions_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/fs_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/process_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/sanity_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/text_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/version_helper.debug OLD_DIRS+=usr/tests/lib/atf/libatf-c/.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/atf_c_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/build_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/check_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/config_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/error_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/macros_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/tc_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/tp_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/utils_test.debug OLD_DIRS+=usr/tests/lib/atf/libatf-c/detail/.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/dynstr_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/env_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/fs_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/list_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/map_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/process_helpers.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/process_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/sanity_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/text_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/user_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/version_helper.debug OLD_DIRS+=usr/tests/lib/atf/test-programs/.debug OLD_FILES+=usr/tests/lib/atf/test-programs/.debug/c_helpers.debug OLD_FILES+=usr/tests/lib/atf/test-programs/.debug/cpp_helpers.debug OLD_DIRS+=usr/tests/lib/libc/c063/.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/faccessat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/fchmodat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/fchownat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/fexecve.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/fstatat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/linkat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/mkdirat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/mkfifoat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/mknodat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/openat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/readlinkat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/renameat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/symlinkat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/unlinkat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/utimensat.debug OLD_DIRS+=usr/tests/lib/libc/db/.debug OLD_FILES+=usr/tests/lib/libc/db/.debug/h_db.debug OLD_DIRS+=usr/tests/lib/libc/gen/.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/alarm_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/arc4random_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/assert_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/basedirname_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/dir_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/floatunditf_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fnmatch_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpclassify2_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpclassify_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpsetmask_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpsetround_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/ftok_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/getcwd_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/getgrent_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/glob_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/humanize_number_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/isnan_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/nice_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/pause_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/raise_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/realpath_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/setdomainname_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/sethostname_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/sleep_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/syslog_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/time_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/ttyname_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/vis_test.debug OLD_DIRS+=usr/tests/lib/libc/gen/execve/.debug OLD_FILES+=usr/tests/lib/libc/gen/execve/.debug/execve_test.debug OLD_DIRS+=usr/tests/lib/libc/gen/posix_spawn/.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/fileactions_test.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_fileactions.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_spawn.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_spawnattr.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/spawn_test.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/spawnattr_test.debug OLD_DIRS+=usr/tests/lib/libc/hash/.debug OLD_FILES+=usr/tests/lib/libc/hash/.debug/h_hash.debug OLD_FILES+=usr/tests/lib/libc/hash/.debug/sha2_test.debug OLD_DIRS+=usr/tests/lib/libc/inet/.debug OLD_FILES+=usr/tests/lib/libc/inet/.debug/inet_network_test.debug OLD_DIRS+=usr/tests/lib/libc/locale/.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/io_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbrtowc_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbsnrtowcs_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbstowcs_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbtowc_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcscspn_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcspbrk_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcsspn_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcstod_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wctomb_test.debug OLD_DIRS+=usr/tests/lib/libc/net/.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/ether_aton_test.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/getprotoent_test.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/h_dns_server.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/h_nsd_recurse.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/h_protoent.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/h_servent.debug OLD_DIRS+=usr/tests/lib/libc/regex/.debug OLD_FILES+=usr/tests/lib/libc/regex/.debug/exhaust_test.debug OLD_FILES+=usr/tests/lib/libc/regex/.debug/h_regex.debug OLD_FILES+=usr/tests/lib/libc/regex/.debug/regex_att_test.debug OLD_DIRS+=usr/tests/lib/libc/ssp/.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_fgets.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_getcwd.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_gets.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memcpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memmove.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memset.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_raw.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_read.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_readlink.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_snprintf.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_sprintf.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_stpcpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_stpncpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strcat.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strcpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strncat.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strncpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_vsnprintf.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_vsprintf.debug OLD_DIRS+=usr/tests/lib/libc/stdio/.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/clearerr_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fflush_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fmemopen2_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fmemopen_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fopen_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fputc_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/mktemp_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/popen_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/printf_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/scanf_test.debug OLD_DIRS+=usr/tests/lib/libc/stdlib/.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/abs_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/atoi_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/div_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/exit_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/getenv_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/h_getopt.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/h_getopt_long.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/hsearch_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/posix_memalign_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/random_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/strtod_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/strtol_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/system_test.debug OLD_DIRS+=usr/tests/lib/libc/string/.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/memchr.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/memcpy.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/memmem.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/memset.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strcat.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strchr.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strcmp.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strcpy.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strcspn.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strerror.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strlen.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strpbrk.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strrchr.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strspn.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/swab.debug OLD_DIRS+=usr/tests/lib/libc/sys/.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/access_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/chroot_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/clock_gettime_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/connect_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/dup_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/fsync_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getcontext_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getgroups_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getitimer_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getlogin_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getpid_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getrusage_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getsid_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/gettimeofday_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/issetugid_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/kevent_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/kill_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/link_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/listen_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mincore_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mkdir_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mkfifo_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mknod_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mlock_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mmap_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mprotect_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgctl_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgget_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgrcv_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgsnd_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msync_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/nanosleep_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/pipe2_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/pipe_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/poll_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/revoke_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/select_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/setrlimit_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/setuid_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigaction_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigqueue_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigtimedwait_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/socketpair_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/stat_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/timer_create_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/truncate_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/ucontext_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/umask_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/unlink_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/write_test.debug OLD_DIRS+=usr/tests/lib/libc/termios/.debug OLD_FILES+=usr/tests/lib/libc/termios/.debug/tcsetpgrp_test.debug OLD_DIRS+=usr/tests/lib/libc/tls/.debug OLD_FILES+=usr/tests/lib/libc/tls/.debug/h_tls_dlopen.so.debug OLD_FILES+=usr/tests/lib/libc/tls/.debug/libh_tls_dynamic.so.1.debug OLD_FILES+=usr/tests/lib/libc/tls/.debug/tls_dlopen_test.debug OLD_FILES+=usr/tests/lib/libc/tls/.debug/tls_dynamic_test.debug OLD_DIRS+=usr/tests/lib/libc/ttyio/.debug OLD_FILES+=usr/tests/lib/libc/ttyio/.debug/ttyio_test.debug OLD_DIRS+=usr/tests/lib/libcrypt/.debug OLD_FILES+=usr/tests/lib/libcrypt/.debug/crypt_tests.debug OLD_DIRS+=usr/tests/lib/libmp/.debug OLD_FILES+=usr/tests/lib/libmp/.debug/legacy_test.debug OLD_DIRS+=usr/tests/lib/libnv/.debug OLD_FILES+=usr/tests/lib/libnv/.debug/dnv_tests.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nv_array_tests.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nv_tests.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_add_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_exists_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_free_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_get_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_move_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_send_recv_test.debug OLD_DIRS+=usr/tests/lib/libpam/.debug OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_ctype.debug OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_readlinev.debug OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_readword.debug OLD_DIRS+=usr/tests/lib/libproc/.debug OLD_FILES+=usr/tests/lib/libproc/.debug/proc_test.debug OLD_FILES+=usr/tests/lib/libproc/.debug/target_prog.debug OLD_DIRS+=usr/tests/lib/librt/.debug OLD_FILES+=usr/tests/lib/librt/.debug/sched_test.debug OLD_FILES+=usr/tests/lib/librt/.debug/sem_test.debug OLD_DIRS+=usr/tests/lib/libthr/.debug OLD_FILES+=usr/tests/lib/libthr/.debug/barrier_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/cond_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/condwait_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/detach_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/equal_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/fork_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/fpu_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/h_atexit.debug OLD_FILES+=usr/tests/lib/libthr/.debug/h_cancel.debug OLD_FILES+=usr/tests/lib/libthr/.debug/h_exit.debug OLD_FILES+=usr/tests/lib/libthr/.debug/h_resolv.debug OLD_FILES+=usr/tests/lib/libthr/.debug/join_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/kill_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/mutex_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/once_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/preempt_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/rwlock_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/sem_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/siglongjmp_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/sigmask_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/sigsuspend_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/sleep_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/swapcontext_test.debug OLD_DIRS+=usr/tests/lib/libthr/dlopen/.debug OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/dlopen_test.debug OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/h_pthread_dlopen.so.1.debug OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/main_pthread_create_test.debug OLD_DIRS+=usr/tests/lib/libutil/.debug OLD_FILES+=usr/tests/lib/libutil/.debug/flopen_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/grp_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/humanize_number_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/pidfile_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/trimdomain-nodomain_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/trimdomain_test.debug OLD_DIRS+=usr/tests/lib/libxo/.debug OLD_FILES+=usr/tests/lib/libxo/.debug/libenc_test.so.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_01.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_02.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_03.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_04.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_05.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_06.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_07.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_08.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_09.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_10.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_11.debug OLD_DIRS+=usr/tests/lib/msun/.debug OLD_FILES+=usr/tests/lib/msun/.debug/acos_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/asin_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/atan_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/cbrt_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/ceil_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/cos_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/cosh_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/erf_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/exp_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/fmod_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/infinity_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/ldexp_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/log_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/pow_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/precision_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/round_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/scalbn_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/sin_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/sinh_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/sqrt_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/tan_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/tanh_test.debug OLD_DIRS+=usr/tests/libexec/rtld-elf/.debug OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/ld_library_pathfds.debug OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/libpythagoras.so.0.debug OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/target.debug OLD_DIRS+=usr/tests/sbin/devd/.debug OLD_FILES+=usr/tests/sbin/devd/.debug/client_test.debug OLD_DIRS+=usr/tests/sbin/dhclient/.debug OLD_FILES+=usr/tests/sbin/dhclient/.debug/option-domain-search_test.debug OLD_DIRS+=usr/tests/share/examples/tests/atf/.debug OLD_FILES+=usr/tests/share/examples/tests/atf/.debug/printf_test.debug OLD_DIRS+=usr/tests/share/examples/tests/plain/.debug OLD_FILES+=usr/tests/share/examples/tests/plain/.debug/printf_test.debug OLD_DIRS+=usr/tests/sys/aio/.debug OLD_FILES+=usr/tests/sys/aio/.debug/aio_kqueue_test.debug OLD_FILES+=usr/tests/sys/aio/.debug/aio_test.debug OLD_FILES+=usr/tests/sys/aio/.debug/lio_kqueue_test.debug OLD_DIRS+=usr/tests/sys/fifo/.debug OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_create.debug OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_io.debug OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_misc.debug OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_open.debug OLD_DIRS+=usr/tests/sys/file/.debug OLD_FILES+=usr/tests/sys/file/.debug/closefrom_test.debug OLD_FILES+=usr/tests/sys/file/.debug/dup_test.debug OLD_FILES+=usr/tests/sys/file/.debug/fcntlflags_test.debug OLD_FILES+=usr/tests/sys/file/.debug/flock_helper.debug OLD_FILES+=usr/tests/sys/file/.debug/ftruncate_test.debug OLD_FILES+=usr/tests/sys/file/.debug/newfileops_on_fork_test.debug OLD_DIRS+=usr/tests/sys/kern/.debug OLD_FILES+=usr/tests/sys/kern/.debug/kern_descrip_test.debug OLD_FILES+=usr/tests/sys/kern/.debug/ptrace_test.debug OLD_FILES+=usr/tests/sys/kern/.debug/unix_seqpacket_test.debug OLD_DIRS+=usr/tests/sys/kern/execve/.debug OLD_FILES+=usr/tests/sys/kern/execve/.debug/execve_helper.debug OLD_FILES+=usr/tests/sys/kern/execve/.debug/good_aout.debug OLD_DIRS+=usr/tests/sys/kqueue/.debug OLD_FILES+=usr/tests/sys/kqueue/.debug/kqtest.debug OLD_DIRS+=usr/tests/sys/mqueue/.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest1.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest2.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest3.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest4.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest5.debug OLD_DIRS+=usr/tests/sys/netinet/.debug OLD_FILES+=usr/tests/sys/netinet/.debug/udp_dontroute.debug OLD_DIRS+=usr/tests/sys/pjdfstest/.debug OLD_FILES+=usr/tests/sys/pjdfstest/.debug/pjdfstest.debug OLD_DIRS+=usr/tests/sys/vm/.debug OLD_FILES+=usr/tests/sys/vm/.debug/mmap_test.debug # 20151015: Rename files due to file-installed-as-dir bug OLD_FILES+=usr/share/doc/legal/realtek OLD_FILES+=usr/share/doc/legal/realtek/LICENSE OLD_DIRS+=usr/share/doc/legal/realtek OLD_DIRS+=usr/share/doc/legal/intel_ipw OLD_FILES+=usr/share/doc/legal/intel_ipw/LICENSE OLD_FILES+=usr/share/doc/legal/intel_iwn OLD_FILES+=usr/share/doc/legal/intel_iwn/LICENSE OLD_DIRS+=usr/share/doc/legal/intel_iwn OLD_DIRS+=usr/share/doc/legal/intel_iwi OLD_FILES+=usr/share/doc/legal/intel_iwi/LICENSE OLD_DIRS+=usr/share/doc/legal/intel_wpi OLD_FILES+=usr/share/doc/legal/intel_wpi/LICENSE # 20151006: new libc++ import OLD_FILES+=usr/include/c++/__tuple_03 OLD_FILES+=usr/include/c++/v1/__tuple_03 OLD_FILES+=usr/include/c++/v1/tr1/__tuple_03 # 20151006: new clang import which bumps version from 3.6.1 to 3.7.0. OLD_FILES+=usr/lib/clang/3.6.1/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.6.1/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.6.1/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.6.1/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/altivec.h OLD_FILES+=usr/lib/clang/3.6.1/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.6.1/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/cpuid.h OLD_FILES+=usr/lib/clang/3.6.1/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/immintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.6.1/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.6.1/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/module.modulemap OLD_FILES+=usr/lib/clang/3.6.1/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/xopintrin.h OLD_DIRS+=usr/lib/clang/3.6.1/include OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.san-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.san-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.6.1/lib/freebsd OLD_DIRS+=usr/lib/clang/3.6.1/lib OLD_DIRS+=usr/lib/clang/3.6.1 # 20150928: unused sgsmsg utility is removed OLD_FILES+=usr/bin/sgsmsg # 20150926: remove links to removed/unimplemented mbuf(9) macros OLD_FILES+=usr/share/man/man9/MEXT_ADD_REF.9.gz OLD_FILES+=usr/share/man/man9/MEXTFREE.9.gz OLD_FILES+=usr/share/man/man9/MEXT_IS_REF.9.gz OLD_FILES+=usr/share/man/man9/MEXT_REM_REF.9.gz OLD_FILES+=usr/share/man/man9/MFREE.9.gz # 20150818: *allocm() are gone in jemalloc 4.0.0 OLD_FILES+=usr/share/man/man3/allocm.3.gz OLD_FILES+=usr/share/man/man3/dallocm.3.gz OLD_FILES+=usr/share/man/man3/nallocm.3.gz OLD_FILES+=usr/share/man/man3/rallocm.3.gz OLD_FILES+=usr/share/man/man3/sallocm.3.gz # 20150802: Remove netbsd's test on pw(8) OLD_FILES+=usr/tests/usr.sbin/pw/pw_test # 20150719: Remove libarchive.pc OLD_FILES+=usr/libdata/pkgconfig/libarchive.pc # 20150705: Rename DTrace provider man pages. OLD_FILES+=usr/share/man/man4/dtrace-io.4.gz OLD_FILES+=usr/share/man/man4/dtrace-ip.4.gz OLD_FILES+=usr/share/man/man4/dtrace-proc.4.gz OLD_FILES+=usr/share/man/man4/dtrace-sched.4.gz OLD_FILES+=usr/share/man/man4/dtrace-tcp.4.gz OLD_FILES+=usr/share/man/man4/dtrace-udp.4.gz # 20150704: nvlist private headers no longer installed OLD_FILES+=usr/include/sys/nv_impl.h OLD_FILES+=usr/include/sys/nvlist_impl.h OLD_FILES+=usr/include/sys/nvpair_impl.h # 20150624 OLD_LIBS+=usr/lib/libugidfw.so.4 OLD_LIBS+=usr/lib32/libugidfw.so.4 # 20150604: Move nvlist man pages to section 9. OLD_FILES+=usr/share/man/man3/libnv.3.gz OLD_FILES+=usr/share/man/man3/nv.3.gz OLD_FILES+=usr/share/man/man3/nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_stringf.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_stringv.3.gz OLD_FILES+=usr/share/man/man3/nvlist_clone.3.gz OLD_FILES+=usr/share/man/man3/nvlist_create.3.gz OLD_FILES+=usr/share/man/man3/nvlist_destroy.3.gz OLD_FILES+=usr/share/man/man3/nvlist_dump.3.gz OLD_FILES+=usr/share/man/man3/nvlist_empty.3.gz OLD_FILES+=usr/share/man/man3/nvlist_error.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_fdump.3.gz OLD_FILES+=usr/share/man/man3/nvlist_flags.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_parent.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_move_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_move_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_move_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_move_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_next.3.gz OLD_FILES+=usr/share/man/man3/nvlist_pack.3.gz OLD_FILES+=usr/share/man/man3/nvlist_recv.3.gz OLD_FILES+=usr/share/man/man3/nvlist_send.3.gz OLD_FILES+=usr/share/man/man3/nvlist_set_error.3.gz OLD_FILES+=usr/share/man/man3/nvlist_size.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_unpack.3.gz OLD_FILES+=usr/share/man/man3/nvlist_xfer.3.gz # 20150702: Remove duplicated nvlist includes. OLD_FILES+=usr/include/dnv.h OLD_FILES+=usr/include/nv.h # 20150528: PCI IOV device driver methods moved to a separate kobj interface. OLD_FILES+=usr/share/man/man9/PCI_ADD_VF.9.gz OLD_FILES+=usr/share/man/man9/PCI_INIT_IOV.9.gz OLD_FILES+=usr/share/man/man9/PCI_UNINIT_IOV.9.gz # 20150525: new clang import which bumps version from 3.6.0 to 3.6.1. OLD_FILES+=usr/lib/clang/3.6.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.6.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.6.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.6.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/altivec.h OLD_FILES+=usr/lib/clang/3.6.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.6.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/cpuid.h OLD_FILES+=usr/lib/clang/3.6.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/immintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.6.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.6.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/module.modulemap OLD_FILES+=usr/lib/clang/3.6.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/xopintrin.h OLD_DIRS+=usr/lib/clang/3.6.0/include OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.san-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.san-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.6.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.6.0/lib OLD_DIRS+=usr/lib/clang/3.6.0 # 20150521 OLD_FILES+=usr/bin/demandoc OLD_FILES+=usr/share/man/man1/demandoc.1.gz OLD_FILES+=usr/share/man/man3/mandoc.3.gz OLD_FILES+=usr/share/man/man3/mandoc_headers.3.gz # 20150520 OLD_FILES+=usr/lib/libheimsqlite.a OLD_FILES+=usr/lib/libheimsqlite.so OLD_LIBS+=usr/lib/libheimsqlite.so.11 OLD_FILES+=usr/lib/libheimsqlite_p.a OLD_FILES+=usr/lib32/libheimsqlite.a OLD_FILES+=usr/lib32/libheimsqlite.so OLD_LIBS+=usr/lib32/libheimsqlite.so.11 OLD_FILES+=usr/lib32/libheimsqlite_p.a # 20150518: tzdata2015c update OLD_FILES+=usr/share/zoneinfo/America/Montreal # 20150506 OLD_FILES+=usr/share/man/man9/NDHASGIANT.9.gz # 20150504 OLD_FILES+=usr/share/examples/etc/libmap32.conf OLD_FILES+=usr/include/bsdstat.h OLD_LIBS+=usr/lib32/private/libatf-c++.so.2 OLD_LIBS+=usr/lib32/private/libbsdstat.so.1 OLD_LIBS+=usr/lib32/private/libheimipcs.so.11 OLD_LIBS+=usr/lib32/private/libsqlite3.so.0 OLD_LIBS+=usr/lib32/private/libunbound.so.5 OLD_LIBS+=usr/lib32/private/libatf-c.so.1 OLD_LIBS+=usr/lib32/private/libheimipcc.so.11 OLD_LIBS+=usr/lib32/private/libldns.so.5 OLD_LIBS+=usr/lib32/private/libssh.so.5 OLD_LIBS+=usr/lib32/private/libucl.so.1 OLD_DIRS+=usr/lib32/private OLD_LIBS+=usr/lib/private/libatf-c++.so.2 OLD_LIBS+=usr/lib/private/libbsdstat.so.1 OLD_LIBS+=usr/lib/private/libheimipcs.so.11 OLD_LIBS+=usr/lib/private/libsqlite3.so.0 OLD_LIBS+=usr/lib/private/libunbound.so.5 OLD_LIBS+=usr/lib/private/libatf-c.so.1 OLD_LIBS+=usr/lib/private/libheimipcc.so.11 OLD_LIBS+=usr/lib/private/libldns.so.5 OLD_LIBS+=usr/lib/private/libssh.so.5 OLD_LIBS+=usr/lib/private/libucl.so.1 OLD_DIRS+=usr/lib/private # 20150501 OLD_FILES+=usr/bin/soeliminate OLD_FILES+=usr/share/man/man1/soeliminate.1.gz # 20150501: Remove the nvlist_.*[vf] functions manpages. OLD_FILES+=usr/share/man/man3/nvlist_addf_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movef_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movef_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movef_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movef_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movev_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movev_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movev_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movev_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_string.3.gz # 20150429: remove never written documentation OLD_FILES+=usr/share/doc/papers/hwpmc.ascii.gz # 20150427: test/sys/kern/mmap_test moved to test/sys/vm/mmap_test OLD_FILES+=usr/tests/sys/kern/mmap_test # 20150422: zlib.c moved from net to libkern OLD_FILES+=usr/include/net/zlib.h OLD_FILES+=usr/include/net/zutil.h # 20150418 OLD_FILES+=sbin/mount_oldnfs OLD_FILES+=usr/share/man/man8/mount_oldnfs.8.gz # 20150416: ALTQ moved to net/altq OLD_FILES+=usr/include/altq/altq_rmclass_debug.h OLD_FILES+=usr/include/altq/altq.h OLD_FILES+=usr/include/altq/altq_cdnr.h OLD_FILES+=usr/include/altq/altq_hfsc.h OLD_FILES+=usr/include/altq/altq_priq.h OLD_FILES+=usr/include/altq/altqconf.h OLD_FILES+=usr/include/altq/altq_classq.h OLD_FILES+=usr/include/altq/altq_red.h OLD_FILES+=usr/include/altq/if_altq.h OLD_FILES+=usr/include/altq/altq_var.h OLD_FILES+=usr/include/altq/altq_rmclass.h OLD_FILES+=usr/include/altq/altq_cbq.h OLD_FILES+=usr/include/altq/altq_rio.h OLD_DIRS+=usr/include/altq # 20150330: ntp 4.2.8p1 OLD_FILES+=usr/share/doc/ntp/driver1.html OLD_FILES+=usr/share/doc/ntp/driver10.html OLD_FILES+=usr/share/doc/ntp/driver11.html OLD_FILES+=usr/share/doc/ntp/driver12.html OLD_FILES+=usr/share/doc/ntp/driver16.html OLD_FILES+=usr/share/doc/ntp/driver18.html OLD_FILES+=usr/share/doc/ntp/driver19.html OLD_FILES+=usr/share/doc/ntp/driver2.html OLD_FILES+=usr/share/doc/ntp/driver20.html OLD_FILES+=usr/share/doc/ntp/driver22.html OLD_FILES+=usr/share/doc/ntp/driver26.html OLD_FILES+=usr/share/doc/ntp/driver27.html OLD_FILES+=usr/share/doc/ntp/driver28.html OLD_FILES+=usr/share/doc/ntp/driver29.html OLD_FILES+=usr/share/doc/ntp/driver3.html OLD_FILES+=usr/share/doc/ntp/driver30.html OLD_FILES+=usr/share/doc/ntp/driver32.html OLD_FILES+=usr/share/doc/ntp/driver33.html OLD_FILES+=usr/share/doc/ntp/driver34.html OLD_FILES+=usr/share/doc/ntp/driver35.html OLD_FILES+=usr/share/doc/ntp/driver36.html OLD_FILES+=usr/share/doc/ntp/driver37.html OLD_FILES+=usr/share/doc/ntp/driver4.html OLD_FILES+=usr/share/doc/ntp/driver5.html OLD_FILES+=usr/share/doc/ntp/driver6.html OLD_FILES+=usr/share/doc/ntp/driver7.html OLD_FILES+=usr/share/doc/ntp/driver8.html OLD_FILES+=usr/share/doc/ntp/driver9.html OLD_FILES+=usr/share/doc/ntp/ldisc.html OLD_FILES+=usr/share/doc/ntp/measure.html OLD_FILES+=usr/share/doc/ntp/mx4200data.html OLD_FILES+=usr/share/doc/ntp/notes.html OLD_FILES+=usr/share/doc/ntp/patches.html OLD_FILES+=usr/share/doc/ntp/porting.html OLD_FILES+=usr/share/man/man1/sntp.1.gz # 20150329 .if ${TARGET_ARCH} == "arm" OLD_FILES+=usr/include/bootconfig.h .endif # 20150326 OLD_FILES+=usr/share/man/man1/pmcstudy.1.gz # 20150315: new clang import which bumps version from 3.5.1 to 3.6.0. OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.5.1/altivec.h OLD_FILES+=usr/include/clang/3.5.1/ammintrin.h OLD_FILES+=usr/include/clang/3.5.1/arm_acle.h OLD_FILES+=usr/include/clang/3.5.1/arm_neon.h OLD_FILES+=usr/include/clang/3.5.1/avx2intrin.h OLD_FILES+=usr/include/clang/3.5.1/avxintrin.h OLD_FILES+=usr/include/clang/3.5.1/bmi2intrin.h OLD_FILES+=usr/include/clang/3.5.1/bmiintrin.h OLD_FILES+=usr/include/clang/3.5.1/cpuid.h OLD_FILES+=usr/include/clang/3.5.1/emmintrin.h OLD_FILES+=usr/include/clang/3.5.1/f16cintrin.h OLD_FILES+=usr/include/clang/3.5.1/fma4intrin.h OLD_FILES+=usr/include/clang/3.5.1/fmaintrin.h OLD_FILES+=usr/include/clang/3.5.1/ia32intrin.h OLD_FILES+=usr/include/clang/3.5.1/immintrin.h OLD_FILES+=usr/include/clang/3.5.1/lzcntintrin.h OLD_FILES+=usr/include/clang/3.5.1/mm3dnow.h OLD_FILES+=usr/include/clang/3.5.1/mm_malloc.h OLD_FILES+=usr/include/clang/3.5.1/mmintrin.h OLD_FILES+=usr/include/clang/3.5.1/module.modulemap OLD_FILES+=usr/include/clang/3.5.1/nmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/pmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/popcntintrin.h OLD_FILES+=usr/include/clang/3.5.1/prfchwintrin.h OLD_FILES+=usr/include/clang/3.5.1/rdseedintrin.h OLD_FILES+=usr/include/clang/3.5.1/rtmintrin.h OLD_FILES+=usr/include/clang/3.5.1/shaintrin.h OLD_FILES+=usr/include/clang/3.5.1/smmintrin.h OLD_FILES+=usr/include/clang/3.5.1/tbmintrin.h OLD_FILES+=usr/include/clang/3.5.1/tmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/wmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/x86intrin.h OLD_FILES+=usr/include/clang/3.5.1/xmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/xopintrin.h OLD_DIRS+=usr/include/clang/3.5.1 OLD_DIRS+=usr/include/clang OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.5.1/lib/freebsd OLD_DIRS+=usr/lib/clang/3.5.1/lib OLD_DIRS+=usr/lib/clang/3.5.1 # 20150302: binutils documentation distributed as a manpage OLD_FILES+=usr/share/doc/binutils/as.txt OLD_FILES+=usr/share/doc/binutils/ld.txt OLD_DIRS+=usr/share/doc/binutils # 20150222: Removed bcd(6) and ppt(6) OLD_FILES+=usr/bin/bcd OLD_FILES+=usr/bin/ppt OLD_FILES+=usr/share/man/man6/bcd.6.gz OLD_FILES+=usr/share/man/man6/ppt.6.gz # 20150217: Removed remnants of ar(4) driver OLD_FILES+=usr/include/dev/ic/hd64570.h # 20150212: /usr/games moving into /usr/bin OLD_FILES+=usr/games/bcd OLD_FILES+=usr/games/caesar OLD_FILES+=usr/games/factor OLD_FILES+=usr/games/fortune OLD_FILES+=usr/games/grdc OLD_FILES+=usr/games/morse OLD_FILES+=usr/games/number OLD_FILES+=usr/games/pom OLD_FILES+=usr/games/ppt OLD_FILES+=usr/games/primes OLD_FILES+=usr/games/random OLD_FILES+=usr/games/rot13 OLD_FILES+=usr/games/strfile OLD_FILES+=usr/games/unstr OLD_DIRS+=usr/games # 20150209: liblzma header OLD_FILES+=usr/include/lzma/lzma.h # 20150124: spl.9 and friends OLD_FILES+=usr/share/man/man9/spl.9.gz OLD_FILES+=usr/share/man/man9/spl0.9.gz OLD_FILES+=usr/share/man/man9/splbio.9.gz OLD_FILES+=usr/share/man/man9/splclock.9.gz OLD_FILES+=usr/share/man/man9/splhigh.9.gz OLD_FILES+=usr/share/man/man9/splimp.9.gz OLD_FILES+=usr/share/man/man9/splnet.9.gz OLD_FILES+=usr/share/man/man9/splsoftclock.9.gz OLD_FILES+=usr/share/man/man9/splsofttty.9.gz OLD_FILES+=usr/share/man/man9/splstatclock.9.gz OLD_FILES+=usr/share/man/man9/spltty.9.gz OLD_FILES+=usr/share/man/man9/splvm.9.gz OLD_FILES+=usr/share/man/man9/splx.9.gz # 20150118: toeplitz.c moved from netinet to net OLD_FILES+=usr/include/netinet/toeplitz.h # 20150118: new clang import which bumps version from 3.5.0 to 3.5.1. OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.5.0/altivec.h OLD_FILES+=usr/include/clang/3.5.0/ammintrin.h OLD_FILES+=usr/include/clang/3.5.0/arm_acle.h OLD_FILES+=usr/include/clang/3.5.0/arm_neon.h OLD_FILES+=usr/include/clang/3.5.0/avx2intrin.h OLD_FILES+=usr/include/clang/3.5.0/avxintrin.h OLD_FILES+=usr/include/clang/3.5.0/bmi2intrin.h OLD_FILES+=usr/include/clang/3.5.0/bmiintrin.h OLD_FILES+=usr/include/clang/3.5.0/cpuid.h OLD_FILES+=usr/include/clang/3.5.0/emmintrin.h OLD_FILES+=usr/include/clang/3.5.0/f16cintrin.h OLD_FILES+=usr/include/clang/3.5.0/fma4intrin.h OLD_FILES+=usr/include/clang/3.5.0/fmaintrin.h OLD_FILES+=usr/include/clang/3.5.0/ia32intrin.h OLD_FILES+=usr/include/clang/3.5.0/immintrin.h OLD_FILES+=usr/include/clang/3.5.0/lzcntintrin.h OLD_FILES+=usr/include/clang/3.5.0/mm3dnow.h OLD_FILES+=usr/include/clang/3.5.0/mm_malloc.h OLD_FILES+=usr/include/clang/3.5.0/mmintrin.h OLD_FILES+=usr/include/clang/3.5.0/module.modulemap OLD_FILES+=usr/include/clang/3.5.0/nmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/pmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/popcntintrin.h OLD_FILES+=usr/include/clang/3.5.0/prfchwintrin.h OLD_FILES+=usr/include/clang/3.5.0/rdseedintrin.h OLD_FILES+=usr/include/clang/3.5.0/rtmintrin.h OLD_FILES+=usr/include/clang/3.5.0/shaintrin.h OLD_FILES+=usr/include/clang/3.5.0/smmintrin.h OLD_FILES+=usr/include/clang/3.5.0/tbmintrin.h OLD_FILES+=usr/include/clang/3.5.0/tmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/wmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/x86intrin.h OLD_FILES+=usr/include/clang/3.5.0/xmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/xopintrin.h OLD_DIRS+=usr/include/clang/3.5.0 OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.5.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.5.0/lib OLD_DIRS+=usr/lib/clang/3.5.0 # 20150102: removal of asr(4) OLD_FILES+=usr/share/man/man4/asr.4.gz # 20150102: removal of texinfo OLD_FILES+=usr/bin/info OLD_FILES+=usr/bin/infokey OLD_FILES+=usr/bin/install-info OLD_FILES+=usr/bin/makeinfo OLD_FILES+=usr/bin/texindex OLD_FILES+=usr/share/info/am-utils.info.gz OLD_FILES+=usr/share/info/as.info.gz OLD_FILES+=usr/share/info/binutils.info.gz OLD_FILES+=usr/share/info/com_err.info.gz OLD_FILES+=usr/share/info/cpp.info.gz OLD_FILES+=usr/share/info/cppinternals.info.gz OLD_FILES+=usr/share/info/diff.info.gz OLD_FILES+=usr/share/info/dir OLD_FILES+=usr/share/info/gcc.info.gz OLD_FILES+=usr/share/info/gccint.info.gz OLD_FILES+=usr/share/info/gdb.info.gz OLD_FILES+=usr/share/info/gdbint.info.gz OLD_FILES+=usr/share/info/gperf.info.gz OLD_FILES+=usr/share/info/grep.info.gz OLD_FILES+=usr/share/info/groff.info.gz OLD_FILES+=usr/share/info/heimdal.info.gz OLD_FILES+=usr/share/info/history.info.gz OLD_FILES+=usr/share/info/info-stnd.info.gz OLD_FILES+=usr/share/info/info.info.gz OLD_FILES+=usr/share/info/ld.info.gz OLD_FILES+=usr/share/info/regex.info.gz OLD_FILES+=usr/share/info/rluserman.info.gz OLD_FILES+=usr/share/info/stabs.info.gz OLD_FILES+=usr/share/info/texinfo.info.gz OLD_FILES+=usr/share/man/man1/info.1.gz OLD_FILES+=usr/share/man/man1/infokey.1.gz OLD_FILES+=usr/share/man/man1/install-info.1.gz OLD_FILES+=usr/share/man/man1/makeinfo.1.gz OLD_FILES+=usr/share/man/man1/texindex.1.gz OLD_FILES+=usr/share/man/man5/info.5.gz OLD_FILES+=usr/share/man/man5/texinfo.5.gz OLD_DIRS+=usr/share/info # 20141231: new clang import which bumps version from 3.4.1 to 3.5.0. OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.4.1/altivec.h OLD_FILES+=usr/include/clang/3.4.1/ammintrin.h OLD_FILES+=usr/include/clang/3.4.1/arm_neon.h OLD_FILES+=usr/include/clang/3.4.1/avx2intrin.h OLD_FILES+=usr/include/clang/3.4.1/avxintrin.h OLD_FILES+=usr/include/clang/3.4.1/bmi2intrin.h OLD_FILES+=usr/include/clang/3.4.1/bmiintrin.h OLD_FILES+=usr/include/clang/3.4.1/cpuid.h OLD_FILES+=usr/include/clang/3.4.1/emmintrin.h OLD_FILES+=usr/include/clang/3.4.1/f16cintrin.h OLD_FILES+=usr/include/clang/3.4.1/fma4intrin.h OLD_FILES+=usr/include/clang/3.4.1/fmaintrin.h OLD_FILES+=usr/include/clang/3.4.1/immintrin.h OLD_FILES+=usr/include/clang/3.4.1/lzcntintrin.h OLD_FILES+=usr/include/clang/3.4.1/mm3dnow.h OLD_FILES+=usr/include/clang/3.4.1/mm_malloc.h OLD_FILES+=usr/include/clang/3.4.1/mmintrin.h OLD_FILES+=usr/include/clang/3.4.1/module.map OLD_FILES+=usr/include/clang/3.4.1/nmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/pmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/popcntintrin.h OLD_FILES+=usr/include/clang/3.4.1/prfchwintrin.h OLD_FILES+=usr/include/clang/3.4.1/rdseedintrin.h OLD_FILES+=usr/include/clang/3.4.1/rtmintrin.h OLD_FILES+=usr/include/clang/3.4.1/shaintrin.h OLD_FILES+=usr/include/clang/3.4.1/smmintrin.h OLD_FILES+=usr/include/clang/3.4.1/tbmintrin.h OLD_FILES+=usr/include/clang/3.4.1/tmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/wmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/x86intrin.h OLD_FILES+=usr/include/clang/3.4.1/xmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/xopintrin.h OLD_DIRS+=usr/include/clang/3.4.1 # 20141225: Remove gpib/ieee488 OLD_FILES+=usr/include/dev/ieee488/ibfoo_int.h OLD_FILES+=usr/include/dev/ieee488/tnt4882.h OLD_FILES+=usr/include/dev/ieee488/ugpib.h OLD_FILES+=usr/include/dev/ieee488/upd7210.h OLD_DIRS+=usr/include/dev/ieee488 OLD_FILES+=usr/include/gpib/gpib.h OLD_DIRS+=usr/include/gpib OLD_FILES+=usr/lib/libgpib.a OLD_FILES+=usr/lib/libgpib_p.a OLD_FILES+=usr/lib/libgpib.so OLD_LIBS+=usr/lib/libgpib.so.3 OLD_FILES+=usr/lib/libgpib_p.a OLD_FILES+=usr/lib32/libgpib.a OLD_FILES+=usr/lib32/libgpib_p.a OLD_FILES+=usr/lib32/libgpib.so OLD_LIBS+=usr/lib32/libgpib.so.3 OLD_FILES+=usr/share/man/man3/gpib.3.gz OLD_FILES+=usr/share/man/man3/ibclr.3.gz OLD_FILES+=usr/share/man/man3/ibdev.3.gz OLD_FILES+=usr/share/man/man3/ibdma.3.gz OLD_FILES+=usr/share/man/man3/ibeos.3.gz OLD_FILES+=usr/share/man/man3/ibeot.3.gz OLD_FILES+=usr/share/man/man3/ibloc.3.gz OLD_FILES+=usr/share/man/man3/ibonl.3.gz OLD_FILES+=usr/share/man/man3/ibpad.3.gz OLD_FILES+=usr/share/man/man3/ibrd.3.gz OLD_FILES+=usr/share/man/man3/ibsad.3.gz OLD_FILES+=usr/share/man/man3/ibsic.3.gz OLD_FILES+=usr/share/man/man3/ibtmo.3.gz OLD_FILES+=usr/share/man/man3/ibtrg.3.gz OLD_FILES+=usr/share/man/man3/ibwrt.3.gz OLD_FILES+=usr/share/man/man4/gpib.4.gz OLD_FILES+=usr/share/man/man4/pcii.4.gz OLD_FILES+=usr/share/man/man4/tnt4882.4.gz # 20141224: libxo moved to /lib OLD_LIBS+=usr/lib/libxo.so.0 # 20141223: remove in6_gif.h, in_gif.h and if_stf.h OLD_FILES+=usr/include/net/if_stf.h OLD_FILES+=usr/include/netinet/in_gif.h OLD_FILES+=usr/include/netinet6/in6_gif.h # 20141209: pw tests broken into a file per command OLD_FILES+=usr/tests/usr.sbin/pw/pw_delete OLD_FILES+=usr/tests/usr.sbin/pw/pw_modify # 20141202: update to mandoc CVS 20141201 OLD_FILES+=usr.bin/preconv OLD_FILES+=share/man/man1/preconv.1.gz # 20141129: mrouted rc.d scripts removed from base OLD_FILES+=etc/rc.d/mrouted # 20141126: convert sbin/mdconfig/tests to ATF format tests OLD_FILES+=usr/tests/sbin/mdconfig/legacy_test OLD_FILES+=usr/tests/sbin/mdconfig/mdconfig.test OLD_FILES+=usr/tests/sbin/mdconfig/run.pl # 20141126: remove xform_ipip decapsulation fallback OLD_FILES+=usr/include/netipsec/ipip_var.h # 20141122: mandoc updated to 1.13.1 OLD_FILES+=usr/share/mdocml/external.png # 20141111: SF_KQUEUE code removed OLD_FILES+=usr/include/sys/sf_base.h OLD_FILES+=usr/include/sys/sf_sync.h # 20141109: faith/faithd removal OLD_FILES+=etc/rc.d/faith OLD_FILES+=usr/share/man/man4/faith.4.gz OLD_FILES+=usr/share/man/man4/if_faith.4.gz OLD_FILES+=usr/sbin/faithd OLD_FILES+=usr/share/man/man8/faithd.8.gz # 20141107: overhaul if_gre(4) OLD_FILES+=usr/include/netinet/ip_gre.h # 20141102: postrandom obsoleted by new /dev/random code OLD_FILES+=etc/rc.d/postrandom # 20141031: initrandom obsoleted by new /dev/random code OLD_FILES+=etc/rc.d/initrandom # 20141030: atf 0.21 import OLD_FILES+=usr/share/man/man3/atf-c++-api.3.gz # 20141028: debug files accidentally installed as directory name OLD_FILES+=usr/lib/debug/usr/lib/i18n OLD_FILES+=usr/lib/debug/usr/lib/private OLD_FILES+=usr/lib/debug/usr/lib32/i18n OLD_FILES+=usr/lib/debug/usr/lib32/private # 20141015: OpenSSL 1.0.1j import OLD_FILES+=usr/share/openssl/man/man3/CMS_sign_add1_signer.3.gz # 20141003: libproc version bump OLD_LIBS+=usr/lib/libproc.so.2 OLD_LIBS+=usr/lib32/libproc.so.2 # 20140922: sleepq_calc_signal_retval.9 and sleepq_catch_signals.9 removed OLD_FILES+=usr/share/man/man9/sleepq_calc_signal_retval.9.gz OLD_FILES+=usr/share/man/man9/sleepq_catch_signals.9.gz # 20140917: hv_kvpd rc.d script removed in favor of devd configuration OLD_FILES+=etc/rc.d/hv_kvpd # 20140917: libnv was accidentally being installed to /usr/lib instead of /lib OLD_LIBS+=usr/lib/libnv.so.0 # 20140829: rc.d/kerberos removed OLD_FILES+=etc/rc.d/kerberos # 20140827: tzdata2014f import OLD_FILES+=usr/share/zoneinfo/Asia/Chongqing OLD_FILES+=usr/share/zoneinfo/Asia/Harbin OLD_FILES+=usr/share/zoneinfo/Asia/Kashgar # 20140814: libopie version bump OLD_LIBS+=usr/lib/libopie.so.7 OLD_LIBS+=usr/lib32/libopie.so.7 # 20140811: otp-sha renamed to otp-sha1 OLD_FILES+=usr/bin/otp-sha OLD_FILES+=usr/share/man/man1/otp-sha.1.gz # 20140807: Remove private lib files that should not be installed. OLD_FILES+=usr/lib32/private/libatf-c.a OLD_FILES+=usr/lib32/private/libatf-c.so OLD_FILES+=usr/lib32/private/libatf-c_p.a OLD_FILES+=usr/lib32/private/libatf-c++.a OLD_FILES+=usr/lib32/private/libatf-c++.so OLD_FILES+=usr/lib32/private/libatf-c++_p.a OLD_FILES+=usr/lib32/private/libbsdstat.a OLD_FILES+=usr/lib32/private/libbsdstat.so OLD_FILES+=usr/lib32/private/libbsdstat_p.a OLD_FILES+=usr/lib32/private/libheimipcc.a OLD_FILES+=usr/lib32/private/libheimipcc.so OLD_FILES+=usr/lib32/private/libheimipcc_p.a OLD_FILES+=usr/lib32/private/libheimipcs.a OLD_FILES+=usr/lib32/private/libheimipcs.so OLD_FILES+=usr/lib32/private/libheimipcs_p.a OLD_FILES+=usr/lib32/private/libldns.a OLD_FILES+=usr/lib32/private/libldns.so OLD_FILES+=usr/lib32/private/libldns_p.a OLD_FILES+=usr/lib32/private/libssh.a OLD_FILES+=usr/lib32/private/libssh.so OLD_FILES+=usr/lib32/private/libssh_p.a OLD_FILES+=usr/lib32/private/libunbound.a OLD_FILES+=usr/lib32/private/libunbound.so OLD_FILES+=usr/lib32/private/libunbound_p.a OLD_FILES+=usr/lib32/private/libucl.a OLD_FILES+=usr/lib32/private/libucl.so OLD_FILES+=usr/lib32/private/libucl_p.a OLD_FILES+=usr/lib/private/libatf-c.a OLD_FILES+=usr/lib/private/libatf-c.so OLD_FILES+=usr/lib/private/libatf-c_p.a OLD_FILES+=usr/lib/private/libatf-c++.a OLD_FILES+=usr/lib/private/libatf-c++.so OLD_FILES+=usr/lib/private/libatf-c++_p.a OLD_FILES+=usr/lib/private/libbsdstat.a OLD_FILES+=usr/lib/private/libbsdstat.so OLD_FILES+=usr/lib/private/libbsdstat_p.a OLD_FILES+=usr/lib/private/libheimipcc.a OLD_FILES+=usr/lib/private/libheimipcc.so OLD_FILES+=usr/lib/private/libheimipcc_p.a OLD_FILES+=usr/lib/private/libheimipcs.a OLD_FILES+=usr/lib/private/libheimipcs.so OLD_FILES+=usr/lib/private/libheimipcs_p.a OLD_FILES+=usr/lib/private/libldns.a OLD_FILES+=usr/lib/private/libldns.so OLD_FILES+=usr/lib/private/libldns_p.a OLD_FILES+=usr/lib/private/libssh.a OLD_FILES+=usr/lib/private/libssh.so OLD_FILES+=usr/lib/private/libssh_p.a OLD_FILES+=usr/lib/private/libunbound.a OLD_FILES+=usr/lib/private/libunbound.so OLD_FILES+=usr/lib/private/libunbound_p.a OLD_FILES+=usr/lib/private/libucl.a OLD_FILES+=usr/lib/private/libucl.so OLD_FILES+=usr/lib/private/libucl_p.a # 20140803: Remove an obsolete man page OLD_FILES+=usr/share/man/man9/pmap_change_wiring.9.gz # 20140731 OLD_FILES+=usr/share/man/man9/SYSCTL_ADD_OID.9.gz # 20140728: libsbuf restored to old version. OLD_LIBS+=lib/libsbuf.so.7 OLD_LIBS+=usr/lib32/libsbuf.so.7 # 20140728: Remove an obsolete man page OLD_FILES+=usr/share/man/man9/VOP_GETVOBJECT.9.gz OLD_FILES+=usr/share/man/man9/VOP_CREATEVOBJECT.9.gz OLD_FILES+=usr/share/man/man9/VOP_DESTROYVOBJECT.9.gz # 20140723: renamed to PCBGROUP.9 OLD_FILES+=usr/share/man/man9/PCBGROUPS.9.gz # 20140722: browse_packages_ftp.sh removed OLD_FILES+=usr/share/examples/bsdconfig/browse_packages_ftp.sh # 20140718: Remove obsolete man pages OLD_FILES+=usr/share/man/man9/zero_copy.9.gz OLD_FILES+=usr/share/man/man9/zero_copy_sockets.9.gz # 20140718: Remove an obsolete man page OLD_FILES+=usr/share/man/man9/pmap_page_protect.9.gz # 20140717: Remove an obsolete man page OLD_FILES+=usr/share/man/man9/pmap_clear_reference.9.gz # 20140716: Remove an incorrectly named man page OLD_FILES+=usr/share/man/man9/pmap_ts_modified.9.gz # 20140712: Removal of bsd.dtrace.mk OLD_FILES+=usr/share/mk/bsd.dtrace.mk # 20140705: turn libreadline into an internal lib OLD_LIBS+=lib/libreadline.so.8 OLD_FILES+=usr/lib/libreadline.a OLD_FILES+=usr/lib/libreadline_p.a OLD_FILES+=usr/lib/libreadline.so OLD_FILES+=usr/lib/libhistory.a OLD_FILES+=usr/lib/libhistory_p.a OLD_FILES+=usr/lib/libhistory.so OLD_LIBS+=usr/lib/libhistory.so.8 OLD_FILES+=usr/lib32/libhistory.a OLD_FILES+=usr/lib32/libhistory.so OLD_LIBS+=usr/lib32/libhistory.so.8 OLD_FILES+=usr/lib32/libhistory_p.a OLD_FILES+=usr/lib32/libreadline.a OLD_FILES+=usr/lib32/libreadline.so OLD_LIBS+=usr/lib32/libreadline.so.8 OLD_FILES+=usr/lib32/libreadline_p.a OLD_FILES+=usr/include/readline/chardefs.h OLD_FILES+=usr/include/readline/history.h OLD_FILES+=usr/include/readline/keymaps.h OLD_FILES+=usr/include/readline/readline.h OLD_FILES+=usr/include/readline/tilde.h OLD_FILES+=usr/include/readline/rlconf.h OLD_FILES+=usr/include/readline/rlstdc.h OLD_FILES+=usr/include/readline/rltypedefs.h OLD_FILES+=usr/include/readline/rltypedefs.h OLD_DIRS+=usr/include/readline OLD_FILES+=usr/share/info/readline.info.gz OLD_FILES+=usr/share/man/man3/readline.3.gz OLD_FILES+=usr/share/man/man3/rlhistory.3.gz # 20140625: csup removal OLD_FILES+=usr/bin/csup OLD_FILES+=usr/bin/cpasswd OLD_FILES+=usr/share/man/man1/csup.1.gz OLD_FILES+=usr/share/man/man1/cpasswd.1.gz OLD_FILES+=usr/share/examples/cvsup/README OLD_FILES+=usr/share/examples/cvsup/cvs-supfile OLD_FILES+=usr/share/examples/cvsup/stable-supfile OLD_FILES+=usr/share/examples/cvsup/standard-supfile OLD_DIRS+=usr/share/examples/cvsup # 20140614: send-pr removal OLD_FILES+=usr/bin/sendbug OLD_FILES+=usr/share/info/send-pr.info.gz OLD_FILES+=usr/share/man/man1/send-pr.1.gz OLD_FILES+=usr/share/man/man1/sendbug.1.gz OLD_FILES+=etc/gnats/freefall OLD_DIRS+=etc/gnats # 20140512: new clang import which bumps version from 3.4 to 3.4.1. OLD_FILES+=usr/include/clang/3.4/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.4/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.4/altivec.h OLD_FILES+=usr/include/clang/3.4/ammintrin.h OLD_FILES+=usr/include/clang/3.4/avx2intrin.h OLD_FILES+=usr/include/clang/3.4/avxintrin.h OLD_FILES+=usr/include/clang/3.4/bmi2intrin.h OLD_FILES+=usr/include/clang/3.4/bmiintrin.h OLD_FILES+=usr/include/clang/3.4/cpuid.h OLD_FILES+=usr/include/clang/3.4/emmintrin.h OLD_FILES+=usr/include/clang/3.4/f16cintrin.h OLD_FILES+=usr/include/clang/3.4/fma4intrin.h OLD_FILES+=usr/include/clang/3.4/fmaintrin.h OLD_FILES+=usr/include/clang/3.4/immintrin.h OLD_FILES+=usr/include/clang/3.4/lzcntintrin.h OLD_FILES+=usr/include/clang/3.4/mm3dnow.h OLD_FILES+=usr/include/clang/3.4/mm_malloc.h OLD_FILES+=usr/include/clang/3.4/mmintrin.h OLD_FILES+=usr/include/clang/3.4/module.map OLD_FILES+=usr/include/clang/3.4/nmmintrin.h OLD_FILES+=usr/include/clang/3.4/pmmintrin.h OLD_FILES+=usr/include/clang/3.4/popcntintrin.h OLD_FILES+=usr/include/clang/3.4/prfchwintrin.h OLD_FILES+=usr/include/clang/3.4/rdseedintrin.h OLD_FILES+=usr/include/clang/3.4/rtmintrin.h OLD_FILES+=usr/include/clang/3.4/shaintrin.h OLD_FILES+=usr/include/clang/3.4/smmintrin.h OLD_FILES+=usr/include/clang/3.4/tbmintrin.h OLD_FILES+=usr/include/clang/3.4/tmmintrin.h OLD_FILES+=usr/include/clang/3.4/wmmintrin.h OLD_FILES+=usr/include/clang/3.4/x86intrin.h OLD_FILES+=usr/include/clang/3.4/xmmintrin.h OLD_FILES+=usr/include/clang/3.4/xopintrin.h OLD_FILES+=usr/include/clang/3.4/arm_neon.h OLD_FILES+=usr/include/clang/3.4/module.map OLD_DIRS+=usr/include/clang/3.4 # 20140505: Bogusly installing src.opts.mk OLD_FILES+=usr/share/mk/src.opts.mk # 20140505: Reject PR kern/187551 OLD_FILES+=usr/tests/sbin/ifconfig/fibs_test # 20140502: Removal of lindev(4) OLD_FILES+=usr/share/man/man4/lindev.4.gz # 20140425 OLD_FILES+=usr/lib/libssp_p.a OLD_FILES+=usr/lib/libstand_p.a OLD_FILES+=usr/lib32/libssp_p.a OLD_FILES+=usr/lib32/libstand_p.a # 20140314: AppleTalk OLD_DIRS+=usr/include/netatalk OLD_FILES+=usr/include/netatalk/aarp.h OLD_FILES+=usr/include/netatalk/at.h OLD_FILES+=usr/include/netatalk/at_extern.h OLD_FILES+=usr/include/netatalk/at_var.h OLD_FILES+=usr/include/netatalk/ddp.h OLD_FILES+=usr/include/netatalk/ddp_pcb.h OLD_FILES+=usr/include/netatalk/ddp_var.h OLD_FILES+=usr/include/netatalk/endian.h OLD_FILES+=usr/include/netatalk/phase2.h # 20140314: Remove IPX/SPX OLD_LIBS+=lib/libipx.so.5 OLD_FILES+=usr/include/netipx/ipx.h OLD_FILES+=usr/include/netipx/ipx_if.h OLD_FILES+=usr/include/netipx/ipx_pcb.h OLD_FILES+=usr/include/netipx/ipx_var.h OLD_FILES+=usr/include/netipx/spx.h OLD_FILES+=usr/include/netipx/spx_debug.h OLD_FILES+=usr/include/netipx/spx_timer.h OLD_FILES+=usr/include/netipx/spx_var.h OLD_DIRS+=usr/include/netipx OLD_FILES+=usr/lib/libipx.a OLD_FILES+=usr/lib/libipx.so OLD_FILES+=usr/lib/libipx_p.a OLD_FILES+=usr/lib32/libipx.a OLD_FILES+=usr/lib32/libipx.so OLD_LIBS+=usr/lib32/libipx.so.5 OLD_FILES+=usr/lib32/libipx_p.a OLD_FILES+=usr/sbin/IPXrouted OLD_FILES+=usr/share/man/man3/ipx.3.gz OLD_FILES+=usr/share/man/man3/ipx_addr.3.gz OLD_FILES+=usr/share/man/man3/ipx_ntoa.3.gz OLD_FILES+=usr/share/man/man4/ef.4.gz OLD_FILES+=usr/share/man/man4/if_ef.4.gz OLD_FILES+=usr/share/man/man8/IPXrouted.8.gz # 20140314: bsdconfig usermgmt rewrite OLD_FILES+=usr/libexec/bsdconfig/070.usermgmt/userinput # 20140307: bsdconfig groupmgmt rewrite OLD_FILES+=usr/libexec/bsdconfig/070.usermgmt/groupinput # 20140223: Remove libyaml OLD_FILES+=usr/lib/private/libyaml.a OLD_FILES+=usr/lib/private/libyaml.so OLD_LIBS+=usr/lib/private/libyaml.so.1 OLD_FILES+=usr/lib/private/libyaml_p.a OLD_FILES+=usr/lib32/private/libyaml.a OLD_FILES+=usr/lib32/private/libyaml.so OLD_LIBS+=usr/lib32/private/libyaml.so.1 OLD_FILES+=usr/lib32/private/libyaml_p.a # 20140216: new clang import which bumps version from 3.3 to 3.4. OLD_FILES+=usr/bin/llvm-prof OLD_FILES+=usr/include/clang/3.3/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.3/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.3/altivec.h OLD_FILES+=usr/include/clang/3.3/ammintrin.h OLD_FILES+=usr/include/clang/3.3/avx2intrin.h OLD_FILES+=usr/include/clang/3.3/avxintrin.h OLD_FILES+=usr/include/clang/3.3/bmi2intrin.h OLD_FILES+=usr/include/clang/3.3/bmiintrin.h OLD_FILES+=usr/include/clang/3.3/cpuid.h OLD_FILES+=usr/include/clang/3.3/emmintrin.h OLD_FILES+=usr/include/clang/3.3/f16cintrin.h OLD_FILES+=usr/include/clang/3.3/fma4intrin.h OLD_FILES+=usr/include/clang/3.3/fmaintrin.h OLD_FILES+=usr/include/clang/3.3/immintrin.h OLD_FILES+=usr/include/clang/3.3/lzcntintrin.h OLD_FILES+=usr/include/clang/3.3/mm3dnow.h OLD_FILES+=usr/include/clang/3.3/mm_malloc.h OLD_FILES+=usr/include/clang/3.3/mmintrin.h OLD_FILES+=usr/include/clang/3.3/module.map OLD_FILES+=usr/include/clang/3.3/nmmintrin.h OLD_FILES+=usr/include/clang/3.3/pmmintrin.h OLD_FILES+=usr/include/clang/3.3/popcntintrin.h OLD_FILES+=usr/include/clang/3.3/prfchwintrin.h OLD_FILES+=usr/include/clang/3.3/rdseedintrin.h OLD_FILES+=usr/include/clang/3.3/rtmintrin.h OLD_FILES+=usr/include/clang/3.3/smmintrin.h OLD_FILES+=usr/include/clang/3.3/tmmintrin.h OLD_FILES+=usr/include/clang/3.3/wmmintrin.h OLD_FILES+=usr/include/clang/3.3/x86intrin.h OLD_FILES+=usr/include/clang/3.3/xmmintrin.h OLD_FILES+=usr/include/clang/3.3/xopintrin.h OLD_FILES+=usr/share/man/man1/llvm-prof.1.gz OLD_FILES+=usr/share/man/man1/llvm-ranlib.1.gz OLD_DIRS+=usr/include/clang/3.3 # 20140216: nve(4) removed OLD_FILES+=usr/share/man/man4/if_nve.4.gz OLD_FILES+=usr/share/man/man4/nve.4.gz # 20140205: Open Firmware device moved OLD_FILES+=usr/include/dev/ofw/ofw_nexus.h # 20140128: libelf and libdwarf import OLD_LIBS+=usr/lib/libelf.so.1 OLD_LIBS+=usr/lib32/libelf.so.1 OLD_LIBS+=usr/lib/libdwarf.so.3 OLD_LIBS+=usr/lib32/libdwarf.so.3 # 20140123: apicvar header moved to x86 OLD_FILES+=usr/include/machine/apicvar.h # 20131215: libcam version bumped OLD_LIBS+=lib/libcam.so.6 usr/lib32/libcam.so.6 # 20131202: libcapsicum and libcasper moved to /lib/ OLD_LIBS+=usr/lib/libcapsicum.so.0 OLD_LIBS+=usr/lib/libcasper.so.0 # 20131109: extattr(2) mlinks fixed OLD_FILES+=usr/share/man/man2/extattr_delete_list.2.gz OLD_FILES+=usr/share/man/man2/extattr_get_list.2.gz # 20131107: example files removed OLD_FILES+=usr/share/examples/libusb20/aux.c OLD_FILES+=usr/share/examples/libusb20/aux.h # 20131105: tzdata 2013h import OLD_FILES+=usr/share/zoneinfo/America/Shiprock OLD_FILES+=usr/share/zoneinfo/Antarctica/South_Pole # 20131103: WITH_LIBICONV_COMPAT removal OLD_FILES+=usr/include/_libiconv_compat.h OLD_FILES+=usr/lib/libiconv.a OLD_FILES+=usr/lib/libiconv.so OLD_FILES+=usr/lib/libiconv.so.3 OLD_FILES+=usr/lib/libiconv_p.a OLD_FILES+=usr/lib32/libiconv.a OLD_FILES+=usr/lib32/libiconv.so OLD_FILES+=usr/lib32/libiconv.so.3 OLD_FILES+=usr/lib32/libiconv_p.a # 20131103: removal of utxrm(8), use 'utx rm' instead. OLD_FILES+=usr/sbin/utxrm OLD_FILES+=usr/share/man/man8/utxrm.8.gz # 20131031: pkg_install has been removed OLD_FILES+=etc/periodic/daily/220.backup-pkgdb OLD_FILES+=etc/periodic/daily/490.status-pkg-changes OLD_FILES+=etc/periodic/security/460.chkportsum OLD_FILES+=etc/periodic/weekly/400.status-pkg OLD_FILES+=usr/sbin/pkg_add OLD_FILES+=usr/sbin/pkg_create OLD_FILES+=usr/sbin/pkg_delete OLD_FILES+=usr/sbin/pkg_info OLD_FILES+=usr/sbin/pkg_updating OLD_FILES+=usr/sbin/pkg_version OLD_FILES+=usr/share/man/man1/pkg_add.1.gz OLD_FILES+=usr/share/man/man1/pkg_create.1.gz OLD_FILES+=usr/share/man/man1/pkg_delete.1.gz OLD_FILES+=usr/share/man/man1/pkg_info.1.gz OLD_FILES+=usr/share/man/man1/pkg_updating.1.gz OLD_FILES+=usr/share/man/man1/pkg_version.1.gz # 20131030: /etc/keys moved to /usr/share/keys OLD_DIRS+=etc/keys OLD_DIRS+=etc/keys/pkg OLD_DIRS+=etc/keys/pkg/revoked OLD_DIRS+=etc/keys/pkg/trusted OLD_FILES+=etc/keys/pkg/trusted/pkg.freebsd.org.2013102301 # 20131028: ng_fec(4) removed OLD_FILES+=usr/include/netgraph/ng_fec.h OLD_FILES+=usr/share/man/man4/ng_fec.4.gz # 20131027: header moved OLD_FILES+=usr/include/net/pf_mtag.h # 20131023: remove never used iscsi directory OLD_DIRS+=usr/share/examples/iscsi # 20131021: isf(4) removed OLD_FILES+=usr/sbin/isfctl OLD_FILES+=usr/share/man/man4/isf.4.gz OLD_FILES+=usr/share/man/man8/isfctl.8.gz # 20131014: libbsdyml becomes private OLD_FILES+=usr/lib/libbsdyml.a OLD_FILES+=usr/lib/libbsdyml.so OLD_LIBS+=usr/lib/libbsdyml.so.0 OLD_FILES+=usr/lib/libbsdyml_p.a OLD_FILES+=usr/lib32/libbsdyml.a OLD_FILES+=usr/lib32/libbsdyml.so OLD_LIBS+=usr/lib32/libbsdyml.so.0 OLD_FILES+=usr/lib32/libbsdyml_p.a OLD_FILES+=usr/share/man/man3/libbsdyml.3.gz OLD_FILES+=usr/include/bsdyml.h # 20131013: Removal of the ATF tools OLD_FILES+=etc/atf/FreeBSD.conf OLD_FILES+=etc/atf/atf-run.hooks OLD_FILES+=etc/atf/common.conf OLD_FILES+=usr/bin/atf-config OLD_FILES+=usr/bin/atf-report OLD_FILES+=usr/bin/atf-run OLD_FILES+=usr/bin/atf-version OLD_FILES+=usr/share/atf/atf-run.hooks OLD_FILES+=usr/share/examples/atf/atf-run.hooks OLD_FILES+=usr/share/examples/atf/tests-results.css OLD_FILES+=usr/share/man/man1/atf-config.1.gz OLD_FILES+=usr/share/man/man1/atf-report.1.gz OLD_FILES+=usr/share/man/man1/atf-run.1.gz OLD_FILES+=usr/share/man/man1/atf-version.1.gz OLD_FILES+=usr/share/man/man5/atf-formats.5.gz OLD_FILES+=usr/share/xml/atf/tests-results.dtd OLD_FILES+=usr/share/xsl/atf/tests-results.xsl # 20131009: freebsd-version moved from /libexec to /bin OLD_FILES+=libexec/freebsd-version # 20131001: ar and ranlib from binutils not used OLD_FILES+=usr/bin/gnu-ar OLD_FILES+=usr/bin/gnu-ranlib OLD_FILES+=usr/share/man/man1/gnu-ar.1.gz OLD_FILES+=usr/share/man/man1/gnu-ranlib.1.gz # 20130930: BIND removed from base OLD_FILES+=etc/mtree/BIND.chroot.dist OLD_FILES+=etc/namedb OLD_FILES+=etc/periodic/daily/470.status-named OLD_FILES+=usr/bin/dig OLD_FILES+=usr/bin/nslookup OLD_FILES+=usr/bin/nsupdate OLD_DIRS+=usr/include/lwres OLD_FILES+=usr/include/lwres/context.h OLD_FILES+=usr/include/lwres/int.h OLD_FILES+=usr/include/lwres/ipv6.h OLD_FILES+=usr/include/lwres/lang.h OLD_FILES+=usr/include/lwres/list.h OLD_FILES+=usr/include/lwres/lwbuffer.h OLD_FILES+=usr/include/lwres/lwpacket.h OLD_FILES+=usr/include/lwres/lwres.h OLD_FILES+=usr/include/lwres/net.h OLD_FILES+=usr/include/lwres/netdb.h OLD_FILES+=usr/include/lwres/platform.h OLD_FILES+=usr/include/lwres/result.h OLD_FILES+=usr/include/lwres/string.h OLD_FILES+=usr/include/lwres/version.h OLD_FILES+=usr/lib/liblwres.a OLD_FILES+=usr/lib/liblwres.so OLD_LIBS+=usr/lib/liblwres.so.90 OLD_FILES+=usr/lib/liblwres_p.a OLD_FILES+=usr/sbin/arpaname OLD_FILES+=usr/sbin/ddns-confgen OLD_FILES+=usr/sbin/dnssec-dsfromkey OLD_FILES+=usr/sbin/dnssec-keyfromlabel OLD_FILES+=usr/sbin/dnssec-keygen OLD_FILES+=usr/sbin/dnssec-revoke OLD_FILES+=usr/sbin/dnssec-settime OLD_FILES+=usr/sbin/dnssec-signzone OLD_FILES+=usr/sbin/dnssec-verify OLD_FILES+=usr/sbin/genrandom OLD_FILES+=usr/sbin/isc-hmac-fixup OLD_FILES+=usr/sbin/lwresd OLD_FILES+=usr/sbin/named OLD_FILES+=usr/sbin/named-checkconf OLD_FILES+=usr/sbin/named-checkzone OLD_FILES+=usr/sbin/named-compilezone OLD_FILES+=usr/sbin/named-journalprint OLD_FILES+=usr/sbin/named.reconfig OLD_FILES+=usr/sbin/named.reload OLD_FILES+=usr/sbin/nsec3hash OLD_FILES+=usr/sbin/rndc OLD_FILES+=usr/sbin/rndc-confgen OLD_DIRS+=usr/share/doc/bind9 OLD_FILES+=usr/share/doc/bind9/CHANGES OLD_FILES+=usr/share/doc/bind9/COPYRIGHT OLD_FILES+=usr/share/doc/bind9/FAQ OLD_FILES+=usr/share/doc/bind9/HISTORY OLD_FILES+=usr/share/doc/bind9/README OLD_DIRS+=usr/share/doc/bind9/arm OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch01.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch02.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch03.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch04.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch05.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch06.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch07.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch08.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch09.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch10.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.pdf OLD_FILES+=usr/share/doc/bind9/arm/man.arpaname.html OLD_FILES+=usr/share/doc/bind9/arm/man.ddns-confgen.html OLD_FILES+=usr/share/doc/bind9/arm/man.dig.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-dsfromkey.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-keyfromlabel.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-keygen.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-revoke.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-settime.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-signzone.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-verify.html OLD_FILES+=usr/share/doc/bind9/arm/man.genrandom.html OLD_FILES+=usr/share/doc/bind9/arm/man.host.html OLD_FILES+=usr/share/doc/bind9/arm/man.isc-hmac-fixup.html OLD_FILES+=usr/share/doc/bind9/arm/man.named-checkconf.html OLD_FILES+=usr/share/doc/bind9/arm/man.named-checkzone.html OLD_FILES+=usr/share/doc/bind9/arm/man.named-journalprint.html OLD_FILES+=usr/share/doc/bind9/arm/man.named.html OLD_FILES+=usr/share/doc/bind9/arm/man.nsec3hash.html OLD_FILES+=usr/share/doc/bind9/arm/man.nsupdate.html OLD_FILES+=usr/share/doc/bind9/arm/man.rndc-confgen.html OLD_FILES+=usr/share/doc/bind9/arm/man.rndc.conf.html OLD_FILES+=usr/share/doc/bind9/arm/man.rndc.html OLD_DIRS+=usr/share/doc/bind9/misc OLD_FILES+=usr/share/doc/bind9/misc/dnssec OLD_FILES+=usr/share/doc/bind9/misc/format-options.pl OLD_FILES+=usr/share/doc/bind9/misc/ipv6 OLD_FILES+=usr/share/doc/bind9/misc/migration OLD_FILES+=usr/share/doc/bind9/misc/migration-4to9 OLD_FILES+=usr/share/doc/bind9/misc/options OLD_FILES+=usr/share/doc/bind9/misc/rfc-compliance OLD_FILES+=usr/share/doc/bind9/misc/roadmap OLD_FILES+=usr/share/doc/bind9/misc/sdb OLD_FILES+=usr/share/doc/bind9/misc/sort-options.pl OLD_FILES+=usr/share/man/man1/arpaname.1.gz OLD_FILES+=usr/share/man/man1/dig.1.gz OLD_FILES+=usr/share/man/man1/nslookup.1.gz OLD_FILES+=usr/share/man/man1/nsupdate.1.gz OLD_FILES+=usr/share/man/man3/lwres.3.gz OLD_FILES+=usr/share/man/man3/lwres_addr_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_add.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_back.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_clear.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_first.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_forward.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getmem.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint16.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint32.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint8.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_init.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_invalidate.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putmem.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint16.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint32.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint8.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_subtract.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_clear.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_get.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_init.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_print.3.gz OLD_FILES+=usr/share/man/man3/lwres_config.3.gz OLD_FILES+=usr/share/man/man3/lwres_context.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_allocmem.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_create.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_destroy.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_freemem.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_initserial.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_nextserial.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_sendrecv.3.gz OLD_FILES+=usr/share/man/man3/lwres_endhostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_endhostent_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_freeaddrinfo.3.gz OLD_FILES+=usr/share/man/man3/lwres_freehostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabn.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_gai_strerror.3.gz OLD_FILES+=usr/share/man/man3/lwres_getaddrinfo.3.gz OLD_FILES+=usr/share/man/man3/lwres_getaddrsbyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyname2.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyname_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostent_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_getipnode.3.gz OLD_FILES+=usr/share/man/man3/lwres_getipnodebyaddr.3.gz OLD_FILES+=usr/share/man/man3/lwres_getipnodebyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_getnamebyaddr.3.gz OLD_FILES+=usr/share/man/man3/lwres_getnameinfo.3.gz OLD_FILES+=usr/share/man/man3/lwres_getrrsetbyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnba.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_herror.3.gz OLD_FILES+=usr/share/man/man3/lwres_hstrerror.3.gz OLD_FILES+=usr/share/man/man3/lwres_inetntop.3.gz OLD_FILES+=usr/share/man/man3/lwres_lwpacket_parseheader.3.gz OLD_FILES+=usr/share/man/man3/lwres_lwpacket_renderheader.3.gz OLD_FILES+=usr/share/man/man3/lwres_net_ntop.3.gz OLD_FILES+=usr/share/man/man3/lwres_noop.3.gz OLD_FILES+=usr/share/man/man3/lwres_nooprequest_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_nooprequest_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_nooprequest_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_noopresponse_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_noopresponse_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_noopresponse_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_packet.3.gz OLD_FILES+=usr/share/man/man3/lwres_resutil.3.gz OLD_FILES+=usr/share/man/man3/lwres_sethostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_sethostent_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_string_parse.3.gz OLD_FILES+=usr/share/man/man5/named.conf.5.gz OLD_FILES+=usr/share/man/man5/rndc.conf.5.gz OLD_FILES+=usr/share/man/man8/ddns-confgen.8.gz OLD_FILES+=usr/share/man/man8/dnssec-dsfromkey.8.gz OLD_FILES+=usr/share/man/man8/dnssec-keyfromlabel.8.gz OLD_FILES+=usr/share/man/man8/dnssec-keygen.8.gz OLD_FILES+=usr/share/man/man8/dnssec-revoke.8.gz OLD_FILES+=usr/share/man/man8/dnssec-settime.8.gz OLD_FILES+=usr/share/man/man8/dnssec-signzone.8.gz OLD_FILES+=usr/share/man/man8/dnssec-verify.8.gz OLD_FILES+=usr/share/man/man8/genrandom.8.gz OLD_FILES+=usr/share/man/man8/isc-hmac-fixup.8.gz OLD_FILES+=usr/share/man/man8/lwresd.8.gz OLD_FILES+=usr/share/man/man8/named-checkconf.8.gz OLD_FILES+=usr/share/man/man8/named-checkzone.8.gz OLD_FILES+=usr/share/man/man8/named-compilezone.8.gz OLD_FILES+=usr/share/man/man8/named-journalprint.8.gz OLD_FILES+=usr/share/man/man8/named.8.gz OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz OLD_FILES+=usr/share/man/man8/named.reload.8.gz OLD_FILES+=usr/share/man/man8/nsec3hash.8.gz OLD_FILES+=usr/share/man/man8/rndc-confgen.8.gz OLD_FILES+=usr/share/man/man8/rndc.8.gz OLD_DIRS+=var/named/dev OLD_DIRS+=var/named/etc OLD_DIRS+=var/named/etc/namedb OLD_FILES+=var/named/etc/namedb/PROTO.localhost-v6.rev OLD_FILES+=var/named/etc/namedb/PROTO.localhost.rev OLD_DIRS+=var/named/etc/namedb/dynamic OLD_FILES+=var/named/etc/namedb/make-localhost OLD_DIRS+=var/named/etc/namedb/master OLD_FILES+=var/named/etc/namedb/master/empty.db OLD_FILES+=var/named/etc/namedb/master/localhost-forward.db OLD_FILES+=var/named/etc/namedb/master/localhost-reverse.db #OLD_FILES+=var/named/etc/namedb/named.conf # intentionally left out OLD_FILES+=var/named/etc/namedb/named.root OLD_DIRS+=var/named/etc/namedb/working OLD_DIRS+=var/named/etc/namedb/slave OLD_DIRS+=var/named/var OLD_DIRS+=var/named/var/dump OLD_DIRS+=var/named/var/log OLD_DIRS+=var/named/var/run OLD_DIRS+=var/named/var/run/named OLD_DIRS+=var/named/var/stats OLD_DIRS+=var/run/named # 20130923: example moved OLD_FILES+=usr/share/examples/bsdconfig/browse_packages.sh # 20130908: libssh becomes private OLD_FILES+=usr/lib/libssh.a OLD_FILES+=usr/lib/libssh.so OLD_LIBS+=usr/lib/libssh.so.5 OLD_FILES+=usr/lib/libssh_p.a OLD_FILES+=usr/lib32/libssh.a OLD_FILES+=usr/lib32/libssh.so OLD_LIBS+=usr/lib32/libssh.so.5 OLD_FILES+=usr/lib32/libssh_p.a # 20130903: gnupatch is no more OLD_FILES+=usr/bin/gnupatch OLD_FILES+=usr/share/man/man1/gnupatch.1.gz # 20130829: bsdpatch is patch unconditionally OLD_FILES+=usr/bin/bsdpatch OLD_FILES+=usr/share/man/man1/bsdpatch.1.gz # 20130822: bind 9.9.3-P2 import OLD_LIBS+=usr/lib/liblwres.so.80 # 20130814: vm_page_busy(9) OLD_FILES+=usr/share/man/man9/vm_page_flash.9.gz OLD_FILES+=usr/share/man/man9/vm_page_io.9.gz OLD_FILES+=usr/share/man/man9/vm_page_io_finish.9.gz OLD_FILES+=usr/share/man/man9/vm_page_io_start.9.gz OLD_FILES+=usr/share/man/man9/vm_page_wakeup.9.gz # 20130710: libkvm version bump OLD_LIBS+=lib/libkvm.so.5 OLD_LIBS+=usr/lib32/libkvm.so.5 # 20130623: dialog update from 1.1 to 1.2 OLD_LIBS+=usr/lib/libdialog.so.7 OLD_LIBS+=usr/lib32/libdialog.so.7 # 20130616: vfs_mount.9 removed OLD_FILES+=usr/share/man/man9/vfs_mount.9.gz # 20130614: remove CVS from base OLD_FILES+=usr/bin/cvs OLD_FILES+=usr/bin/cvsbug OLD_FILES+=usr/share/doc/psd/28.cvs/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/28.cvs/paper.ps.gz OLD_DIRS+=usr/share/doc/psd/28.cvs OLD_FILES+=usr/share/examples/cvs/contrib/README OLD_FILES+=usr/share/examples/cvs/contrib/clmerge OLD_FILES+=usr/share/examples/cvs/contrib/cln_hist OLD_FILES+=usr/share/examples/cvs/contrib/commit_prep OLD_FILES+=usr/share/examples/cvs/contrib/cvs2vendor OLD_FILES+=usr/share/examples/cvs/contrib/cvs_acls OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck.man OLD_FILES+=usr/share/examples/cvs/contrib/cvshelp.man OLD_FILES+=usr/share/examples/cvs/contrib/descend.man OLD_FILES+=usr/share/examples/cvs/contrib/easy-import OLD_FILES+=usr/share/examples/cvs/contrib/intro.doc OLD_FILES+=usr/share/examples/cvs/contrib/log OLD_FILES+=usr/share/examples/cvs/contrib/log_accum OLD_FILES+=usr/share/examples/cvs/contrib/mfpipe OLD_FILES+=usr/share/examples/cvs/contrib/rcs-to-cvs OLD_FILES+=usr/share/examples/cvs/contrib/rcs2log OLD_FILES+=usr/share/examples/cvs/contrib/rcslock OLD_FILES+=usr/share/examples/cvs/contrib/sccs2rcs OLD_DIRS+=usr/share/examples/cvs/contrib OLD_DIRS+=usr/share/examples/cvs OLD_FILES+=usr/share/info/cvs.info.gz OLD_FILES+=usr/share/info/cvsclient.info.gz OLD_FILES+=usr/share/man/man1/cvs.1.gz OLD_FILES+=usr/share/man/man5/cvs.5.gz OLD_FILES+=usr/share/man/man8/cvsbug.8.gz # 20130607: WITH_DEBUG_FILES added OLD_FILES+=lib/libufs.so.6.symbols OLD_FILES+=usr/lib32/libufs.so.6.symbols # 20130417: nfs fha moved from nfsserver to nfs OLD_FILES+=usr/include/nfsserver/nfs_fha.h # 20130411: new clang import which bumps version from 3.2 to 3.3. OLD_FILES+=usr/include/clang/3.2/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.2/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.2/altivec.h OLD_FILES+=usr/include/clang/3.2/ammintrin.h OLD_FILES+=usr/include/clang/3.2/avx2intrin.h OLD_FILES+=usr/include/clang/3.2/avxintrin.h OLD_FILES+=usr/include/clang/3.2/bmi2intrin.h OLD_FILES+=usr/include/clang/3.2/bmiintrin.h OLD_FILES+=usr/include/clang/3.2/cpuid.h OLD_FILES+=usr/include/clang/3.2/emmintrin.h OLD_FILES+=usr/include/clang/3.2/f16cintrin.h OLD_FILES+=usr/include/clang/3.2/fma4intrin.h OLD_FILES+=usr/include/clang/3.2/fmaintrin.h OLD_FILES+=usr/include/clang/3.2/immintrin.h OLD_FILES+=usr/include/clang/3.2/lzcntintrin.h OLD_FILES+=usr/include/clang/3.2/mm3dnow.h OLD_FILES+=usr/include/clang/3.2/mm_malloc.h OLD_FILES+=usr/include/clang/3.2/mmintrin.h OLD_FILES+=usr/include/clang/3.2/module.map OLD_FILES+=usr/include/clang/3.2/nmmintrin.h OLD_FILES+=usr/include/clang/3.2/pmmintrin.h OLD_FILES+=usr/include/clang/3.2/popcntintrin.h OLD_FILES+=usr/include/clang/3.2/rtmintrin.h OLD_FILES+=usr/include/clang/3.2/smmintrin.h OLD_FILES+=usr/include/clang/3.2/tmmintrin.h OLD_FILES+=usr/include/clang/3.2/wmmintrin.h OLD_FILES+=usr/include/clang/3.2/x86intrin.h OLD_FILES+=usr/include/clang/3.2/xmmintrin.h OLD_FILES+=usr/include/clang/3.2/xopintrin.h OLD_DIRS+=usr/include/clang/3.2 # 20130404: legacy ATA stack removed OLD_FILES+=etc/periodic/daily/405.status-ata-raid OLD_FILES+=rescue/atacontrol OLD_FILES+=sbin/atacontrol OLD_FILES+=usr/share/man/man8/atacontrol.8.gz OLD_FILES+=usr/share/man/man4/atapicam.4.gz OLD_FILES+=usr/share/man/man4/ataraid.4.gz OLD_FILES+=usr/sbin/burncd OLD_FILES+=usr/share/man/man8/burncd.8.gz # 20130316: vinum.4 removed OLD_FILES+=usr/share/man/man4/vinum.4.gz # 20130312: fortunes-o removed OLD_FILES+=usr/share/games/fortune/fortunes-o OLD_FILES+=usr/share/games/fortune/fortunes-o.dat # 20130311: Ports are no more available via cvsup OLD_FILES+=usr/share/examples/cvsup/ports-supfile OLD_FILES+=usr/share/examples/cvsup/refuse OLD_FILES+=usr/share/examples/cvsup/refuse.README # 20130309: NWFS and NCP supports removed OLD_FILES+=usr/bin/ncplist OLD_FILES+=usr/bin/ncplogin OLD_FILES+=usr/bin/ncplogout OLD_FILES+=usr/include/fs/nwfs/nwfs.h OLD_FILES+=usr/include/fs/nwfs/nwfs_mount.h OLD_FILES+=usr/include/fs/nwfs/nwfs_node.h OLD_FILES+=usr/include/fs/nwfs/nwfs_subr.h OLD_DIRS+=usr/include/fs/nwfs OLD_FILES+=usr/include/netncp/ncp.h OLD_FILES+=usr/include/netncp/ncp_cfg.h OLD_FILES+=usr/include/netncp/ncp_conn.h OLD_FILES+=usr/include/netncp/ncp_file.h OLD_FILES+=usr/include/netncp/ncp_lib.h OLD_FILES+=usr/include/netncp/ncp_ncp.h OLD_FILES+=usr/include/netncp/ncp_nls.h OLD_FILES+=usr/include/netncp/ncp_rcfile.h OLD_FILES+=usr/include/netncp/ncp_rq.h OLD_FILES+=usr/include/netncp/ncp_sock.h OLD_FILES+=usr/include/netncp/ncp_subr.h OLD_FILES+=usr/include/netncp/ncp_user.h OLD_FILES+=usr/include/netncp/ncpio.h OLD_FILES+=usr/include/netncp/nwerror.h OLD_DIRS+=usr/include/netncp OLD_FILES+=usr/lib/libncp.a OLD_FILES+=usr/lib/libncp.so OLD_LIBS+=usr/lib/libncp.so.4 OLD_FILES+=usr/lib/libncp_p.a OLD_FILES+=usr/lib32/libncp.a OLD_FILES+=usr/lib32/libncp.so OLD_LIBS+=usr/lib32/libncp.so.4 OLD_FILES+=usr/lib32/libncp_p.a OLD_FILES+=usr/sbin/mount_nwfs OLD_FILES+=usr/share/examples/nwclient/dot.nwfsrc OLD_FILES+=usr/share/examples/nwclient/nwfs.sh.sample OLD_DIRS+=usr/share/examples/nwclient OLD_FILES+=usr/share/man/man1/ncplist.1.gz OLD_FILES+=usr/share/man/man1/ncplogin.1.gz OLD_FILES+=usr/share/man/man1/ncplogout.1.gz OLD_FILES+=usr/share/man/man8/mount_nwfs.8.gz # 20130302: NTFS support removed OLD_FILES+=rescue/mount_ntfs OLD_FILES+=sbin/mount_ntfs OLD_FILES+=usr/include/fs/ntfs/ntfs.h OLD_FILES+=usr/include/fs/ntfs/ntfs_compr.h OLD_FILES+=usr/include/fs/ntfs/ntfs_ihash.h OLD_FILES+=usr/include/fs/ntfs/ntfs_inode.h OLD_FILES+=usr/include/fs/ntfs/ntfs_subr.h OLD_FILES+=usr/include/fs/ntfs/ntfs_vfsops.h OLD_FILES+=usr/include/fs/ntfs/ntfsmount.h OLD_DIRS+=usr/include/fs/ntfs OLD_FILES+=usr/share/man/man8/mount_ntfs.8.gz # 20130302: PORTALFS support removed OLD_FILES+=usr/include/fs/portalfs/portal.h OLD_DIRS+=usr/include/fs/portalfs OLD_FILES+=usr/sbin/mount_portalfs OLD_FILES+=usr/share/examples/portal/README OLD_FILES+=usr/share/examples/portal/portal.conf OLD_DIRS+=usr/share/examples/portal OLD_FILES+=usr/share/man/man8/mount_portalfs.8.gz # 20130302: CODAFS support removed OLD_FILES+=usr/share/man/man4/coda.4.gz # 20130302: XFS support removed OLD_FILES+=usr/share/man/man5/xfs.5.gz # 20130302: Capsicum overhaul OLD_FILES+=usr/share/man/man2/cap_getrights.2.gz OLD_FILES+=usr/share/man/man2/cap_new.2.gz # 20130213: OpenSSL 1.0.1e import OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_verifyrecover.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_verifyrecover_init.3.gz # 20130116: removed long unused directories for .1aout section manpages OLD_FILES+=usr/share/man/en.ISO8859-1/man1aout OLD_FILES+=usr/share/man/en.UTF-8/man1aout OLD_DIRS+=usr/share/man/man1aout OLD_DIRS+=usr/share/man/cat1aout OLD_DIRS+=usr/share/man/en.ISO8859-1/cat1aout OLD_DIRS+=usr/share/man/en.UTF-8/cat1aout # 20130110: bsd.compat.mk removed OLD_FILES+=usr/share/mk/bsd.compat.mk # 20130103: gnats-supfile removed OLD_FILES+=usr/share/examples/cvsup/gnats-supfile # 20121230: libdisk removed OLD_FILES+=usr/share/man/man3/libdisk.3.gz usr/include/libdisk.h OLD_FILES+=usr/lib/libdisk.a usr/lib32/libdisk.a # 20121230: remove wrongly created directories for auditdistd OLD_DIRS+=var/dist OLD_DIRS+=var/remote # 20121114: zpool-features manual page moved from section 5 to 7 OLD_FILES+=usr/share/man/man5/zpool-features.5.gz # 20121022: remove harp, hfa and idt man page OLD_FILES+=usr/share/man/man4/harp.4.gz OLD_FILES+=usr/share/man/man4/hfa.4.gz OLD_FILES+=usr/share/man/man4/idt.4.gz OLD_FILES+=usr/share/man/man4/if_idt.4.gz # 20121022: VFS_LOCK_GIANT elimination OLD_FILES+=usr/share/man/man9/VFS_LOCK_GIANT.9.gz OLD_FILES+=usr/share/man/man9/VFS_UNLOCK_GIANT.9.gz # 20121004: remove incomplete unwind.h OLD_FILES+=usr/include/clang/3.2/unwind.h # 20120910: NetBSD compat shims removed OLD_FILES+=usr/include/cam/scsi/scsi_low_pisa.h OLD_FILES+=usr/include/sys/device_port.h # 20120909: doc and www supfiles removed OLD_FILES+=usr/share/examples/cvsup/doc-supfile OLD_FILES+=usr/share/examples/cvsup/www-supfile # 20120908: pf cleanup OLD_FILES+=usr/include/net/if_pflow.h # 20120816: new clang import which bumps version from 3.1 to 3.2 OLD_FILES+=usr/bin/llvm-ld OLD_FILES+=usr/bin/llvm-stub OLD_FILES+=usr/include/clang/3.1/altivec.h OLD_FILES+=usr/include/clang/3.1/avx2intrin.h OLD_FILES+=usr/include/clang/3.1/avxintrin.h OLD_FILES+=usr/include/clang/3.1/bmi2intrin.h OLD_FILES+=usr/include/clang/3.1/bmiintrin.h OLD_FILES+=usr/include/clang/3.1/cpuid.h OLD_FILES+=usr/include/clang/3.1/emmintrin.h OLD_FILES+=usr/include/clang/3.1/fma4intrin.h OLD_FILES+=usr/include/clang/3.1/immintrin.h OLD_FILES+=usr/include/clang/3.1/lzcntintrin.h OLD_FILES+=usr/include/clang/3.1/mm3dnow.h OLD_FILES+=usr/include/clang/3.1/mm_malloc.h OLD_FILES+=usr/include/clang/3.1/mmintrin.h OLD_FILES+=usr/include/clang/3.1/module.map OLD_FILES+=usr/include/clang/3.1/nmmintrin.h OLD_FILES+=usr/include/clang/3.1/pmmintrin.h OLD_FILES+=usr/include/clang/3.1/popcntintrin.h OLD_FILES+=usr/include/clang/3.1/smmintrin.h OLD_FILES+=usr/include/clang/3.1/tmmintrin.h OLD_FILES+=usr/include/clang/3.1/unwind.h OLD_FILES+=usr/include/clang/3.1/wmmintrin.h OLD_FILES+=usr/include/clang/3.1/x86intrin.h OLD_FILES+=usr/include/clang/3.1/xmmintrin.h OLD_DIRS+=usr/include/clang/3.1 OLD_FILES+=usr/share/man/man1/llvm-ld.1.gz # 20120712: OpenSSL 1.0.1c import OLD_LIBS+=lib/libcrypto.so.6 OLD_LIBS+=usr/lib/libssl.so.6 OLD_LIBS+=usr/lib32/libcrypto.so.6 OLD_LIBS+=usr/lib32/libssl.so.6 OLD_FILES+=usr/include/openssl/aes_locl.h OLD_FILES+=usr/include/openssl/bio_lcl.h OLD_FILES+=usr/include/openssl/e_os.h OLD_FILES+=usr/include/openssl/fips.h OLD_FILES+=usr/include/openssl/fips_rand.h OLD_FILES+=usr/include/openssl/pq_compat.h OLD_FILES+=usr/include/openssl/tmdiff.h OLD_FILES+=usr/include/openssl/ui_locl.h OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_id_callback.3.gz # 20120621: remove old man page OLD_FILES+=usr/share/man/man8/vnconfig.8.gz # 20120619: TOE support updated OLD_FILES+=usr/include/netinet/toedev.h # 20120613: auth.conf removed OLD_FILES+=etc/auth.conf OLD_FILES+=usr/share/examples/etc/auth.conf OLD_FILES+=usr/share/man/man3/auth.3.gz OLD_FILES+=usr/share/man/man3/auth_getval.3.gz OLD_FILES+=usr/share/man/man5/auth.conf.5.gz # 20120530: kde pam lives now in ports OLD_FILES+=etc/pam.d/kde # 20120521: byacc import OLD_FILES+=usr/bin/yyfix OLD_FILES+=usr/share/man/man1/yyfix.1.gz # 20120505: new clang import installed a redundant internal header OLD_FILES+=usr/include/clang/3.1/stdalign.h # 20120428: MD2 removed from libmd OLD_LIBS+=lib/libmd.so.5 OLD_FILES+=usr/include/md2.h OLD_LIBS+=usr/lib32/libmd.so.5 OLD_FILES+=usr/share/man/man3/MD2Data.3.gz OLD_FILES+=usr/share/man/man3/MD2End.3.gz OLD_FILES+=usr/share/man/man3/MD2File.3.gz OLD_FILES+=usr/share/man/man3/MD2FileChunk.3.gz OLD_FILES+=usr/share/man/man3/MD2Final.3.gz OLD_FILES+=usr/share/man/man3/MD2Init.3.gz OLD_FILES+=usr/share/man/man3/MD2Update.3.gz OLD_FILES+=usr/share/man/man3/md2.3.gz # 20120425: libusb version bump (r234684) OLD_LIBS+=usr/lib/libusb.so.2 OLD_LIBS+=usr/lib32/libusb.so.2 OLD_FILES+=usr/share/man/man3/libsub_get_active_config_descriptor.3.gz # 20120415: new clang import which bumps version from 3.0 to 3.1 OLD_FILES+=usr/include/clang/3.0/altivec.h OLD_FILES+=usr/include/clang/3.0/avxintrin.h OLD_FILES+=usr/include/clang/3.0/emmintrin.h OLD_FILES+=usr/include/clang/3.0/immintrin.h OLD_FILES+=usr/include/clang/3.0/mm3dnow.h OLD_FILES+=usr/include/clang/3.0/mm_malloc.h OLD_FILES+=usr/include/clang/3.0/mmintrin.h OLD_FILES+=usr/include/clang/3.0/nmmintrin.h OLD_FILES+=usr/include/clang/3.0/pmmintrin.h OLD_FILES+=usr/include/clang/3.0/smmintrin.h OLD_FILES+=usr/include/clang/3.0/tmmintrin.h OLD_FILES+=usr/include/clang/3.0/wmmintrin.h OLD_FILES+=usr/include/clang/3.0/x86intrin.h OLD_FILES+=usr/include/clang/3.0/xmmintrin.h OLD_DIRS+=usr/include/clang/3.0 # 20120412: BIND 9.8.1 release notes removed OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.pdf OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.txt OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.html OLD_FILES+=usr/share/doc/bind9/release-notes.css # 20120330: legacy(4) moved to x86 OLD_FILES+=usr/include/machine/legacyvar.h # 20120324: MPI headers updated OLD_FILES+=usr/include/dev/mpt/mpilib/mpi_inb.h # 20120322: hwpmc_mips24k.h removed OLD_FILES+=usr/include/dev/hwpmc/hwpmc_mips24k.h # 20120322: Update heimdal to 1.5.1. OLD_FILES+=usr/include/krb5-v4compat.h \ usr/include/krb_err.h \ usr/include/hdb-private.h \ usr/share/man/man3/krb5_addresses.3.gz \ usr/share/man/man3/krb5_cc_cursor.3.gz \ usr/share/man/man3/krb5_cc_ops.3.gz \ usr/share/man/man3/krb5_config.3.gz \ usr/share/man/man3/krb5_config_get_int_default.3.gz \ usr/share/man/man3/krb5_context.3.gz \ usr/share/man/man3/krb5_data.3.gz \ usr/share/man/man3/krb5_err.3.gz \ usr/share/man/man3/krb5_errx.3.gz \ usr/share/man/man3/krb5_keyblock.3.gz \ usr/share/man/man3/krb5_keytab_entry.3.gz \ usr/share/man/man3/krb5_kt_cursor.3.gz \ usr/share/man/man3/krb5_kt_ops.3.gz \ usr/share/man/man3/krb5_set_warn_dest.3.gz \ usr/share/man/man3/krb5_verr.3.gz \ usr/share/man/man3/krb5_verrx.3.gz \ usr/share/man/man3/krb5_vwarnx.3.gz \ usr/share/man/man3/krb5_warn.3.gz \ usr/share/man/man3/krb5_warnx.3.gz OLD_LIBS+=usr/lib/libasn1.so.10 \ usr/lib/libhdb.so.10 \ usr/lib/libheimntlm.so.10 \ usr/lib/libhx509.so.10 \ usr/lib/libkadm5clnt.so.10 \ usr/lib/libkadm5srv.so.10 \ usr/lib/libkafs5.so.10 \ usr/lib/libkrb5.so.10 \ usr/lib/libroken.so.10 \ usr/lib32/libasn1.so.10 \ usr/lib32/libhdb.so.10 \ usr/lib32/libheimntlm.so.10 \ usr/lib32/libhx509.so.10 \ usr/lib32/libkadm5clnt.so.10 \ usr/lib32/libkadm5srv.so.10 \ usr/lib32/libkafs5.so.10 \ usr/lib32/libkrb5.so.10 \ usr/lib32/libroken.so.10 # 20120309: Remove fifofs header files. OLD_FILES+=usr/include/fs/fifofs/fifo.h OLD_DIRS+=usr/include/fs/fifofs # 20120304: xlocale cleanup OLD_FILES+=usr/include/_xlocale_ctype.h # 20120225: libarchive 3.0.3 OLD_FILES+=usr/share/man/man3/archive_read_data_into_buffer.3.gz \ usr/share/man/man3/archive_read_support_compression_all.3.gz \ usr/share/man/man3/archive_read_support_compression_bzip2.3.gz \ usr/share/man/man3/archive_read_support_compression_compress.3.gz \ usr/share/man/man3/archive_read_support_compression_gzip.3.gz \ usr/share/man/man3/archive_read_support_compression_lzma.3.gz \ usr/share/man/man3/archive_read_support_compression_none.3.gz \ usr/share/man/man3/archive_read_support_compression_program.3.gz \ usr/share/man/man3/archive_read_support_compression_program_signature.3.gz \ usr/share/man/man3/archive_read_support_compression_xz.3.gz \ usr/share/man/man3/archive_write_set_callbacks.3.gz \ usr/share/man/man3/archive_write_set_compression_bzip2.3.gz \ usr/share/man/man3/archive_write_set_compression_compress.3.gz \ usr/share/man/man3/archive_write_set_compression_gzip.3.gz \ usr/share/man/man3/archive_write_set_compression_none.3.gz \ usr/share/man/man3/archive_write_set_compression_program.3.gz OLD_LIBS+=usr/lib/libarchive.so.5 OLD_LIBS+=usr/lib32/libarchive.so.5 # 20120113: removal of wtmpcvt(1) OLD_FILES+=usr/bin/wtmpcvt OLD_FILES+=usr/share/man/man1/wtmpcvt.1.gz # 20111214: eventtimers(7) moved to eventtimers(4) OLD_FILES+=usr/share/man/man7/eventtimers.7.gz # 20111125: amd(4) removed OLD_FILES+=usr/share/man/man4/amd.4.gz # 20111125: libodialog removed OLD_FILES+=usr/lib/libodialog.a OLD_FILES+=usr/lib/libodialog.so OLD_LIBS+=usr/lib/libodialog.so.7 OLD_FILES+=usr/lib/libodialog_p.a OLD_FILES+=usr/lib32/libodialog.a OLD_FILES+=usr/lib32/libodialog.so OLD_LIBS+=usr/lib32/libodialog.so.7 OLD_FILES+=usr/lib32/libodialog_p.a # 20110930: sysinstall removed OLD_FILES+=usr/sbin/sysinstall OLD_FILES+=usr/share/man/man8/sysinstall.8.gz OLD_FILES+=usr/lib/libftpio.a OLD_FILES+=usr/lib/libftpio.so OLD_LIBS+=usr/lib/libftpio.so.8 OLD_FILES+=usr/lib/libftpio_p.a OLD_FILES+=usr/lib32/libftpio.a OLD_FILES+=usr/lib32/libftpio.so OLD_LIBS+=usr/lib32/libftpio.so.8 OLD_FILES+=usr/lib32/libftpio_p.a OLD_FILES+=usr/include/ftpio.h OLD_FILES+=usr/share/man/man3/ftpio.3.gz # 20110915: rename congestion control manpages OLD_FILES+=usr/share/man/man9/cc.9.gz # 20110831: atomic page flags operations OLD_FILES+=usr/share/man/man9/vm_page_flag.9.gz OLD_FILES+=usr/share/man/man9/vm_page_flag_clear.9.gz OLD_FILES+=usr/share/man/man9/vm_page_flag_set.9.gz # 20110828: library version bump for 9.0 OLD_LIBS+=lib/libcam.so.5 OLD_LIBS+=lib/libpcap.so.7 OLD_LIBS+=lib/libufs.so.5 OLD_LIBS+=usr/lib/libbsnmp.so.5 OLD_LIBS+=usr/lib/libdwarf.so.2 OLD_LIBS+=usr/lib/libopie.so.6 OLD_LIBS+=usr/lib/librtld_db.so.1 OLD_LIBS+=usr/lib/libtacplus.so.4 OLD_LIBS+=usr/lib32/libcam.so.5 OLD_LIBS+=usr/lib32/libpcap.so.7 OLD_LIBS+=usr/lib32/libufs.so.5 OLD_LIBS+=usr/lib32/libbsnmp.so.5 OLD_LIBS+=usr/lib32/libdwarf.so.2 OLD_LIBS+=usr/lib32/libopie.so.6 OLD_LIBS+=usr/lib32/librtld_db.so.1 OLD_LIBS+=usr/lib32/libtacplus.so.4 # 20110817: no more acd.4, ad.4, afd.4 and ast.4 OLD_FILES+=usr/share/man/man4/acd.4.gz OLD_FILES+=usr/share/man/man4/ad.4.gz OLD_FILES+=usr/share/man/man4/afd.4.gz OLD_FILES+=usr/share/man/man4/ast.4.gz # 20110718: no longer useful in the age of rc.d OLD_FILES+=usr/sbin/named.reconfig OLD_FILES+=usr/sbin/named.reload OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz OLD_FILES+=usr/share/man/man8/named.reload.8.gz # 20110716: bind 9.8.0 import OLD_LIBS+=usr/lib/liblwres.so.50 OLD_FILES+=usr/share/doc/bind9/KNOWN-DEFECTS OLD_FILES+=usr/share/doc/bind9/NSEC3-NOTES OLD_FILES+=usr/share/doc/bind9/README.idnkit OLD_FILES+=usr/share/doc/bind9/README.pkcs11 # 20110709: vm_map_clean.9 -> vm_map_sync.9 OLD_FILES+=usr/share/man/man9/vm_map_clean.9.gz # 20110709: Catch up with removal of these functions. OLD_FILES+=usr/share/man/man9/vm_page_copy.9.gz OLD_FILES+=usr/share/man/man9/vm_page_protect.9.gz OLD_FILES+=usr/share/man/man9/vm_page_zero_fill.9.gz # 20110707: script no longer needed by /etc/rc.d/nfsd OLD_FILES+=etc/rc.d/nfsserver # 20110705: files moved so both NFS clients can share them OLD_FILES+=usr/include/nfsclient/krpc.h OLD_FILES+=usr/include/nfsclient/nfsdiskless.h # 20110705: the switch of default NFS client to the new one OLD_FILES+=sbin/mount_newnfs OLD_FILES+=usr/share/man/man8/mount_newnfs.8.gz OLD_FILES+=usr/include/nfsclient/nfs_kdtrace.h # 20110628: calendar.msk removed OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.msk # 20110517: libpkg removed OLD_FILES+=usr/include/pkg.h OLD_FILES+=usr/lib/libpkg.a OLD_FILES+=usr/lib/libpkg.so OLD_LIBS+=usr/lib/libpkg.so.0 OLD_FILES+=usr/lib/libpkg_p.a OLD_FILES+=usr/lib32/libpkg.a OLD_FILES+=usr/lib32/libpkg.so OLD_LIBS+=usr/lib32/libpkg.so.0 OLD_FILES+=usr/lib32/libpkg_p.a # 20110517: libsbuf version bump OLD_LIBS+=lib/libsbuf.so.5 OLD_LIBS+=usr/lib32/libsbuf.so.5 # 20110502: new clang import which bumps version from 2.9 to 3.0 OLD_FILES+=usr/include/clang/2.9/emmintrin.h OLD_FILES+=usr/include/clang/2.9/mm_malloc.h OLD_FILES+=usr/include/clang/2.9/mmintrin.h OLD_FILES+=usr/include/clang/2.9/pmmintrin.h OLD_FILES+=usr/include/clang/2.9/tmmintrin.h OLD_FILES+=usr/include/clang/2.9/xmmintrin.h OLD_DIRS+=usr/include/clang/2.9 # 20110417: removal of Objective-C support OLD_FILES+=usr/include/objc/encoding.h OLD_FILES+=usr/include/objc/hash.h OLD_FILES+=usr/include/objc/NXConstStr.h OLD_FILES+=usr/include/objc/objc-api.h OLD_FILES+=usr/include/objc/objc-decls.h OLD_FILES+=usr/include/objc/objc-list.h OLD_FILES+=usr/include/objc/objc.h OLD_FILES+=usr/include/objc/Object.h OLD_FILES+=usr/include/objc/Protocol.h OLD_FILES+=usr/include/objc/runtime.h OLD_FILES+=usr/include/objc/sarray.h OLD_FILES+=usr/include/objc/thr.h OLD_FILES+=usr/include/objc/typedstream.h OLD_FILES+=usr/lib/libobjc.a OLD_FILES+=usr/lib/libobjc.so OLD_FILES+=usr/lib/libobjc_p.a OLD_FILES+=usr/libexec/cc1obj OLD_LIBS+=usr/lib/libobjc.so.4 OLD_DIRS+=usr/include/objc OLD_FILES+=usr/lib32/libobjc.a OLD_FILES+=usr/lib32/libobjc.so OLD_FILES+=usr/lib32/libobjc_p.a OLD_LIBS+=usr/lib32/libobjc.so.4 # 20110331: firmware.img created at build time OLD_FILES+=usr/share/examples/kld/firmware/fwimage/firmware.img # 20110224: sticky.8 -> sticky.7 OLD_FILES+=usr/share/man/man8/sticky.8.gz # 20110220: new clang import which bumps version from 2.8 to 2.9 OLD_FILES+=usr/include/clang/2.8/emmintrin.h OLD_FILES+=usr/include/clang/2.8/mm_malloc.h OLD_FILES+=usr/include/clang/2.8/mmintrin.h OLD_FILES+=usr/include/clang/2.8/pmmintrin.h OLD_FILES+=usr/include/clang/2.8/tmmintrin.h OLD_FILES+=usr/include/clang/2.8/xmmintrin.h OLD_DIRS+=usr/include/clang/2.8 # 20110119: netinet/sctp_cc_functions.h removed OLD_FILES+=usr/include/netinet/sctp_cc_functions.h # 20110119: Remove SYSCTL_*X* sysctl additions. OLD_FILES+=usr/share/man/man9/SYSCTL_XINT.9.gz \ usr/share/man/man9/SYSCTL_XLONG.9.gz # 20110112: Update dialog to new version, rename old libdialog to libodialog, # removing associated man pages and header files. OLD_FILES+=usr/share/man/man3/draw_shadow.3.gz \ usr/share/man/man3/draw_box.3.gz usr/share/man/man3/line_edit.3.gz \ usr/share/man/man3/strheight.3.gz usr/share/man/man3/strwidth.3.gz \ usr/share/man/man3/dialog_create_rc.3.gz \ usr/share/man/man3/dialog_yesno.3.gz usr/share/man/man3/dialog_noyes.3.gz \ usr/share/man/man3/dialog_prgbox.3.gz \ usr/share/man/man3/dialog_textbox.3.gz usr/share/man/man3/dialog_menu.3.gz \ usr/share/man/man3/dialog_checklist.3.gz \ usr/share/man/man3/dialog_radiolist.3.gz \ usr/share/man/man3/dialog_inputbox.3.gz \ usr/share/man/man3/dialog_clear_norefresh.3.gz \ usr/share/man/man3/dialog_clear.3.gz usr/share/man/man3/dialog_update.3.gz \ usr/share/man/man3/dialog_fselect.3.gz \ usr/share/man/man3/dialog_notify.3.gz \ usr/share/man/man3/dialog_mesgbox.3.gz \ usr/share/man/man3/dialog_gauge.3.gz usr/share/man/man3/init_dialog.3.gz \ usr/share/man/man3/end_dialog.3.gz usr/share/man/man3/use_helpfile.3.gz \ usr/share/man/man3/use_helpline.3.gz usr/share/man/man3/get_helpline.3.gz \ usr/share/man/man3/restore_helpline.3.gz \ usr/share/man/man3/dialog_msgbox.3.gz \ usr/share/man/man3/dialog_ftree.3.gz usr/share/man/man3/dialog_tree.3.gz \ usr/share/examples/dialog/README usr/share/examples/dialog/checklist \ usr/share/examples/dialog/ftreebox usr/share/examples/dialog/infobox \ usr/share/examples/dialog/inputbox usr/share/examples/dialog/menubox \ usr/share/examples/dialog/msgbox usr/share/examples/dialog/prgbox \ usr/share/examples/dialog/radiolist usr/share/examples/dialog/textbox \ usr/share/examples/dialog/treebox usr/share/examples/dialog/yesno \ usr/share/examples/libdialog/Makefile usr/share/examples/libdialog/check1.c\ usr/share/examples/libdialog/check2.c usr/share/examples/libdialog/check3.c\ usr/share/examples/libdialog/dselect.c \ usr/share/examples/libdialog/fselect.c \ usr/share/examples/libdialog/ftree1.c \ usr/share/examples/libdialog/ftree1.test \ usr/share/examples/libdialog/ftree2.c \ usr/share/examples/libdialog/ftree2.test \ usr/share/examples/libdialog/gauge.c usr/share/examples/libdialog/input1.c \ usr/share/examples/libdialog/input2.c usr/share/examples/libdialog/menu1.c \ usr/share/examples/libdialog/menu2.c usr/share/examples/libdialog/menu3.c \ usr/share/examples/libdialog/msg.c usr/share/examples/libdialog/prgbox.c \ usr/share/examples/libdialog/radio1.c usr/share/examples/libdialog/radio2.c\ usr/share/examples/libdialog/radio3.c usr/share/examples/libdialog/text.c \ usr/share/examples/libdialog/tree.c usr/share/examples/libdialog/yesno.c OLD_DIRS+=usr/share/examples/libdialog usr/share/examples/dialog # 20101114: Remove long-obsolete MAKEDEV.8 OLD_FILES+=usr/share/man/man8/MAKEDEV.8.gz # 20101112: vgonel(9) has gone to private API a while ago OLD_FILES+=usr/share/man/man9/vgonel.9.gz # 20101112: removed gasp.info OLD_FILES+=usr/share/info/gasp.info.gz # 20101109: machine/mutex.h removed OLD_FILES+=usr/include/machine/mutex.h # 20101109: headers moved from machine/ to x86/ .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/mptable.h .endif # 20101101: headers moved from machine/ to x86/ .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/apicreg.h OLD_FILES+=usr/include/machine/mca.h .endif # 20101020: catch up with vm_page_sleep_if_busy rename OLD_FILES+=usr/share/man/man9/vm_page_sleep_busy.9.gz # 20101018: taskqueue(9) updates OLD_FILES+=usr/share/man/man9/taskqueue_find.9.gz # 20101011: removed subblock.h from liblzma OLD_FILES+=usr/include/lzma/subblock.h # 20101002: removed manpath.config OLD_FILES+=etc/manpath.config OLD_FILES+=usr/share/examples/etc/manpath.config # 20100910: renamed sbuf_overflowed to sbuf_error OLD_FILES+=usr/share/man/man9/sbuf_overflowed.9.gz # 20100815: retired last traces of chooseproc(9) OLD_FILES+=usr/share/man/man9/chooseproc.9.gz # 20100806: removal of unused libcompat routines OLD_FILES+=usr/share/man/man3/ascftime.3.gz OLD_FILES+=usr/share/man/man3/cfree.3.gz OLD_FILES+=usr/share/man/man3/cftime.3.gz OLD_FILES+=usr/share/man/man3/getpw.3.gz # 20100801: tzdata2010k import OLD_FILES+=usr/share/zoneinfo/Pacific/Ponape OLD_FILES+=usr/share/zoneinfo/Pacific/Truk # 20100725: acpi_aiboost(4) removal. OLD_FILES+=usr/share/man/man4/acpi_aiboost.4.gz # 20100724: nfsclient/nfs_lock.h moved to nfs/nfs_lock.h OLD_FILES+=usr/include/nfsclient/nfs_lock.h # 20100720: new clang import which bumps version from 2.0 to 2.8 OLD_FILES+=usr/include/clang/2.0/emmintrin.h OLD_FILES+=usr/include/clang/2.0/mm_malloc.h OLD_FILES+=usr/include/clang/2.0/mmintrin.h OLD_FILES+=usr/include/clang/2.0/pmmintrin.h OLD_FILES+=usr/include/clang/2.0/tmmintrin.h OLD_FILES+=usr/include/clang/2.0/xmmintrin.h OLD_DIRS+=usr/include/clang/2.0 # 20100706: removed pc-sysinstall's detect-vmware.sh OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-vmware.sh # 20100701: [powerpc] removed .if ${TARGET_ARCH} == "powerpc" OLD_FILES+=usr/include/machine/intr.h .endif # 20100514: library version bump for versioned symbols for liblzma OLD_LIBS+=usr/lib/liblzma.so.0 OLD_LIBS+=usr/lib32/liblzma.so.0 # 20100511: move GCC-specific headers to /usr/include/gcc .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/emmintrin.h OLD_FILES+=usr/include/mm_malloc.h OLD_FILES+=usr/include/pmmintrin.h OLD_FILES+=usr/include/xmmintrin.h .endif .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" || ${TARGET_ARCH} == "arm" OLD_FILES+=usr/include/mmintrin.h .endif .if ${TARGET_ARCH} == "powerpc" OLD_FILES+=usr/include/altivec.h OLD_FILES+=usr/include/ppc-asm.h OLD_FILES+=usr/include/spe.h .endif # 20100416: [mips] removed .if ${TARGET_ARCH} == "mips" OLD_FILES+=usr/include/machine/psl.h .endif # 20100415: [mips] removed unused headers .if ${TARGET_ARCH} == "mips" OLD_FILES+=usr/include/machine/archtype.h OLD_FILES+=usr/include/machine/segments.h OLD_FILES+=usr/include/machine/rm7000.h OLD_FILES+=usr/include/machine/defs.h OLD_FILES+=usr/include/machine/queue.h .endif # 20100326: gcpio removal OLD_FILES+=usr/bin/gcpio OLD_FILES+=usr/share/info/cpio.info.gz OLD_FILES+=usr/share/man/man1/gcpio.1.gz # 20100322: libz update OLD_LIBS+=lib/libz.so.5 OLD_LIBS+=usr/lib32/libz.so.5 # 20100314: removal of regexp.h OLD_FILES+=usr/include/regexp.h OLD_FILES+=usr/share/man/man3/regexp.3.gz OLD_FILES+=usr/share/man/man3/regsub.3.gz # 20100303: actual removal of utmp.h OLD_FILES+=usr/include/utmp.h # 20100208: man pages moved .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/share/man/man4/i386/alpm.4.gz OLD_FILES+=usr/share/man/man4/i386/amdpm.4.gz OLD_FILES+=usr/share/man/man4/i386/mcd.4.gz OLD_FILES+=usr/share/man/man4/i386/padlock.4.gz OLD_FILES+=usr/share/man/man4/i386/pcf.4.gz OLD_FILES+=usr/share/man/man4/i386/scd.4.gz OLD_FILES+=usr/share/man/man4/i386/viapm.4.gz .endif # 20100122: move BSDL bc/dc USD documents to /usr/share/doc/usd OLD_FILES+=usr/share/doc/papers/bc.ascii.gz OLD_FILES+=usr/share/doc/papers/dc.ascii.gz # 20100120: replacing GNU bc/dc with BSDL versions OLD_FILES+=usr/share/examples/bc/ckbook.b OLD_FILES+=usr/share/examples/bc/pi.b OLD_FILES+=usr/share/examples/bc/primes.b OLD_FILES+=usr/share/examples/bc/twins.b OLD_FILES+=usr/share/info/dc.info.gz OLD_DIRS+=usr/share/examples/bc # 20100114: removal of ttyslot(3) OLD_FILES+=usr/share/man/man3/ttyslot.3.gz # 20100113: remove utmp.h, replace it by utmpx.h OLD_FILES+=usr/share/man/man3/login.3.gz OLD_FILES+=usr/share/man/man3/logout.3.gz OLD_FILES+=usr/share/man/man3/logwtmp.3.gz OLD_FILES+=usr/share/man/man3/ulog_endutxent.3.gz OLD_FILES+=usr/share/man/man3/ulog_getutxent.3.gz OLD_FILES+=usr/share/man/man3/ulog_getutxline.3.gz OLD_FILES+=usr/share/man/man3/ulog_getutxuser.3.gz OLD_FILES+=usr/share/man/man3/ulog_pututxline.3.gz OLD_FILES+=usr/share/man/man3/ulog_setutxent.3.gz OLD_FILES+=usr/share/man/man3/ulog_setutxfile.3.gz OLD_FILES+=usr/share/man/man5/lastlog.5.gz OLD_FILES+=usr/share/man/man5/utmp.5.gz OLD_FILES+=usr/share/man/man5/wtmp.5.gz OLD_LIBS+=lib/libutil.so.8 OLD_LIBS+=usr/lib32/libutil.so.8 # 20100105: new userland semaphore implementation OLD_FILES+=usr/include/sys/semaphore.h # 20100103: ntptrace(8) removed OLD_FILES+=usr/sbin/ntptrace OLD_FILES+=usr/share/man/man8/ntptrace.8.gz # 20091229: remove no longer relevant examples OLD_FILES+=usr/share/examples/pppd/auth-down.sample OLD_FILES+=usr/share/examples/pppd/auth-up.sample OLD_FILES+=usr/share/examples/pppd/chap-secrets.sample OLD_FILES+=usr/share/examples/pppd/chat.sh.sample OLD_FILES+=usr/share/examples/pppd/ip-down.sample OLD_FILES+=usr/share/examples/pppd/ip-up.sample OLD_FILES+=usr/share/examples/pppd/options.sample OLD_FILES+=usr/share/examples/pppd/pap-secrets.sample OLD_FILES+=usr/share/examples/pppd/ppp.deny.sample OLD_FILES+=usr/share/examples/pppd/ppp.shells.sample OLD_DIRS+=usr/share/examples/pppd OLD_FILES+=usr/share/examples/slattach/unit-command.sh OLD_DIRS+=usr/share/examples/slattach OLD_FILES+=usr/share/examples/sliplogin/slip.hosts OLD_FILES+=usr/share/examples/sliplogin/slip.login OLD_FILES+=usr/share/examples/sliplogin/slip.logout OLD_FILES+=usr/share/examples/sliplogin/slip.slparms OLD_DIRS+=usr/share/examples/sliplogin OLD_FILES+=usr/share/examples/startslip/sldown.sh OLD_FILES+=usr/share/examples/startslip/slip.sh OLD_FILES+=usr/share/examples/startslip/slup.sh OLD_DIRS+=usr/share/examples/startslip # 20091202: unify rc.firewall and rc.firewall6. OLD_FILES+=etc/rc.d/ip6fw OLD_FILES+=etc/rc.firewall6 OLD_FILES+=usr/share/examples/etc/rc.firewall6 # 20091117: removal of rc.early(8) link OLD_FILES+=usr/share/man/man8/rc.early.8.gz # 20091117: usr/share/zoneinfo/GMT link removed OLD_FILES+=usr/share/zoneinfo/GMT # 20091027: pselect.3 implemented as syscall OLD_FILES+=usr/share/man/man3/pselect.3.gz # 20091005: fusword.9 and susword.9 removed OLD_FILES+=usr/share/man/man9/fusword.9.gz OLD_FILES+=usr/share/man/man9/susword.9.gz # 20090909: vesa and dpms promoted to be i386/amd64 common OLD_FILES+=usr/include/machine/pc/vesa.h OLD_FILES+=usr/share/man/man4/i386/dpms.4.gz # 20090904: remove lukemftpd OLD_FILES+=usr/libexec/lukemftpd OLD_FILES+=usr/share/man/man5/ftpd.conf.5.gz OLD_FILES+=usr/share/man/man5/ftpusers.5.gz OLD_FILES+=usr/share/man/man8/lukemftpd.8.gz # 20090902: BSD.{x11,x11-4}.dist are dead and BSD.local.dist lives in ports/ OLD_FILES+=etc/mtree/BSD.local.dist OLD_FILES+=etc/mtree/BSD.x11.dist OLD_FILES+=etc/mtree/BSD.x11-4.dist # 20090812: net80211 documentation overhaul OLD_FILES+=usr/share/man/man9/ieee80211_add_rates.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_add_xrates.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_alloc_node.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_attach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_begin_scan.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_cfgget.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_cfgset.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_chan2ieee.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_chan2mode.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_create_ibss.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_crypto_attach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_crypto_detach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_decap.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_dump_pkt.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_dup_bss.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_encap.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_end_scan.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_find_node.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_fix_rate.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_free_allnodes.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_ieee2mhz.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_ioctl.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_lookup_node.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_media2rate.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_media_change.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_media_init.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_media_status.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_mhz2ieee.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_next_scan.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_node_attach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_node_detach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_node_lateattach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_print_essid.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_proto_attach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_proto_detach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_rate2media.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_recv_mgmt.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_send_mgmt.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_setmode.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_timeout_nodes.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_watchdog.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_wep_crypt.9.gz # 20090801: vimage.h removed in favour of vnet.h OLD_FILES+=usr/include/sys/vimage.h # 20101208: libbsnmp was moved to usr/lib OLD_LIBS+=lib/libbsnmp.so.5 # 20090719: library version bump for 8.0 OLD_LIBS+=lib/libalias.so.6 OLD_LIBS+=lib/libavl.so.1 OLD_LIBS+=lib/libbegemot.so.3 OLD_LIBS+=lib/libbsdxml.so.3 OLD_LIBS+=lib/libbsnmp.so.4 OLD_LIBS+=lib/libcam.so.4 OLD_LIBS+=lib/libcrypt.so.4 OLD_LIBS+=lib/libcrypto.so.5 OLD_LIBS+=lib/libctf.so.1 OLD_LIBS+=lib/libdevstat.so.6 OLD_LIBS+=lib/libdtrace.so.1 OLD_LIBS+=lib/libedit.so.6 OLD_LIBS+=lib/libgeom.so.4 OLD_LIBS+=lib/libipsec.so.3 OLD_LIBS+=lib/libipx.so.4 OLD_LIBS+=lib/libkiconv.so.3 OLD_LIBS+=lib/libkvm.so.4 OLD_LIBS+=lib/libmd.so.4 OLD_LIBS+=lib/libncurses.so.7 OLD_LIBS+=lib/libncursesw.so.7 OLD_LIBS+=lib/libnvpair.so.1 OLD_LIBS+=lib/libpcap.so.6 OLD_LIBS+=lib/libreadline.so.7 OLD_LIBS+=lib/libsbuf.so.4 OLD_LIBS+=lib/libufs.so.4 OLD_LIBS+=lib/libumem.so.1 OLD_LIBS+=lib/libutil.so.7 OLD_LIBS+=lib/libuutil.so.1 OLD_LIBS+=lib/libz.so.4 OLD_LIBS+=lib/libzfs.so.1 OLD_LIBS+=lib/libzpool.so.1 OLD_LIBS+=usr/lib/libarchive.so.4 OLD_LIBS+=usr/lib/libauditd.so.4 OLD_LIBS+=usr/lib/libbluetooth.so.3 OLD_LIBS+=usr/lib/libbsm.so.2 OLD_LIBS+=usr/lib/libbz2.so.3 OLD_LIBS+=usr/lib/libcalendar.so.4 OLD_LIBS+=usr/lib/libcom_err.so.4 OLD_LIBS+=usr/lib/libdevinfo.so.4 OLD_LIBS+=usr/lib/libdialog.so.6 OLD_LIBS+=usr/lib/libdwarf.so.1 OLD_LIBS+=usr/lib/libfetch.so.5 OLD_LIBS+=usr/lib/libform.so.4 OLD_LIBS+=usr/lib/libformw.so.4 OLD_LIBS+=usr/lib/libftpio.so.7 OLD_LIBS+=usr/lib/libgnuregex.so.4 OLD_LIBS+=usr/lib/libgpib.so.2 OLD_LIBS+=usr/lib/libhistory.so.7 OLD_LIBS+=usr/lib/libmagic.so.3 OLD_LIBS+=usr/lib/libmemstat.so.2 OLD_LIBS+=usr/lib/libmenu.so.4 OLD_LIBS+=usr/lib/libmenuw.so.4 OLD_LIBS+=usr/lib/libmilter.so.4 OLD_LIBS+=usr/lib/libncp.so.3 OLD_LIBS+=usr/lib/libnetgraph.so.3 OLD_LIBS+=usr/lib/libngatm.so.3 OLD_LIBS+=usr/lib/libobjc.so.3 OLD_LIBS+=usr/lib/libopie.so.5 OLD_LIBS+=usr/lib/libpam.so.4 OLD_LIBS+=usr/lib/libpanel.so.4 OLD_LIBS+=usr/lib/libpanelw.so.4 OLD_LIBS+=usr/lib/libpmc.so.4 OLD_LIBS+=usr/lib/libproc.so.1 OLD_LIBS+=usr/lib/libradius.so.3 OLD_LIBS+=usr/lib/librpcsvc.so.4 OLD_LIBS+=usr/lib/libsdp.so.3 OLD_LIBS+=usr/lib/libsmb.so.3 OLD_LIBS+=usr/lib/libssh.so.4 OLD_LIBS+=usr/lib/libssl.so.5 OLD_LIBS+=usr/lib/libtacplus.so.3 OLD_LIBS+=usr/lib/libugidfw.so.3 OLD_LIBS+=usr/lib/libusb.so.1 OLD_LIBS+=usr/lib/libusbhid.so.3 OLD_LIBS+=usr/lib/libvgl.so.5 OLD_LIBS+=usr/lib/libwrap.so.5 OLD_LIBS+=usr/lib/libypclnt.so.3 OLD_LIBS+=usr/lib/pam_chroot.so.4 OLD_LIBS+=usr/lib/pam_deny.so.4 OLD_LIBS+=usr/lib/pam_echo.so.4 OLD_LIBS+=usr/lib/pam_exec.so.4 OLD_LIBS+=usr/lib/pam_ftpusers.so.4 OLD_LIBS+=usr/lib/pam_group.so.4 OLD_LIBS+=usr/lib/pam_guest.so.4 OLD_LIBS+=usr/lib/pam_krb5.so.4 OLD_LIBS+=usr/lib/pam_ksu.so.4 OLD_LIBS+=usr/lib/pam_lastlog.so.4 OLD_LIBS+=usr/lib/pam_login_access.so.4 OLD_LIBS+=usr/lib/pam_nologin.so.4 OLD_LIBS+=usr/lib/pam_opie.so.4 OLD_LIBS+=usr/lib/pam_opieaccess.so.4 OLD_LIBS+=usr/lib/pam_passwdqc.so.4 OLD_LIBS+=usr/lib/pam_permit.so.4 OLD_LIBS+=usr/lib/pam_radius.so.4 OLD_LIBS+=usr/lib/pam_rhosts.so.4 OLD_LIBS+=usr/lib/pam_rootok.so.4 OLD_LIBS+=usr/lib/pam_securetty.so.4 OLD_LIBS+=usr/lib/pam_self.so.4 OLD_LIBS+=usr/lib/pam_ssh.so.4 OLD_LIBS+=usr/lib/pam_tacplus.so.4 OLD_LIBS+=usr/lib/pam_unix.so.4 OLD_LIBS+=usr/lib/snmp_atm.so.5 OLD_LIBS+=usr/lib/snmp_bridge.so.5 OLD_LIBS+=usr/lib/snmp_hostres.so.5 OLD_LIBS+=usr/lib/snmp_mibII.so.5 OLD_LIBS+=usr/lib/snmp_netgraph.so.5 OLD_LIBS+=usr/lib/snmp_pf.so.5 OLD_LIBS+=usr/lib32/libalias.so.6 OLD_LIBS+=usr/lib32/libarchive.so.4 OLD_LIBS+=usr/lib32/libauditd.so.4 OLD_LIBS+=usr/lib32/libavl.so.1 OLD_LIBS+=usr/lib32/libbegemot.so.3 OLD_LIBS+=usr/lib32/libbluetooth.so.3 OLD_LIBS+=usr/lib32/libbsdxml.so.3 OLD_LIBS+=usr/lib32/libbsm.so.2 OLD_LIBS+=usr/lib32/libbsnmp.so.4 OLD_LIBS+=usr/lib32/libbz2.so.3 OLD_LIBS+=usr/lib32/libcalendar.so.4 OLD_LIBS+=usr/lib32/libcam.so.4 OLD_LIBS+=usr/lib32/libcom_err.so.4 OLD_LIBS+=usr/lib32/libcrypt.so.4 OLD_LIBS+=usr/lib32/libcrypto.so.5 OLD_LIBS+=usr/lib32/libctf.so.1 OLD_LIBS+=usr/lib32/libdevinfo.so.4 OLD_LIBS+=usr/lib32/libdevstat.so.6 OLD_LIBS+=usr/lib32/libdialog.so.6 OLD_LIBS+=usr/lib32/libdtrace.so.1 OLD_LIBS+=usr/lib32/libdwarf.so.1 OLD_LIBS+=usr/lib32/libedit.so.6 OLD_LIBS+=usr/lib32/libfetch.so.5 OLD_LIBS+=usr/lib32/libform.so.4 OLD_LIBS+=usr/lib32/libformw.so.4 OLD_LIBS+=usr/lib32/libftpio.so.7 OLD_LIBS+=usr/lib32/libgeom.so.4 OLD_LIBS+=usr/lib32/libgnuregex.so.4 OLD_LIBS+=usr/lib32/libgpib.so.2 OLD_LIBS+=usr/lib32/libhistory.so.7 OLD_LIBS+=usr/lib32/libipsec.so.3 OLD_LIBS+=usr/lib32/libipx.so.4 OLD_LIBS+=usr/lib32/libkiconv.so.3 OLD_LIBS+=usr/lib32/libkvm.so.4 OLD_LIBS+=usr/lib32/libmagic.so.3 OLD_LIBS+=usr/lib32/libmd.so.4 OLD_LIBS+=usr/lib32/libmemstat.so.2 OLD_LIBS+=usr/lib32/libmenu.so.4 OLD_LIBS+=usr/lib32/libmenuw.so.4 OLD_LIBS+=usr/lib32/libmilter.so.4 OLD_LIBS+=usr/lib32/libncp.so.3 OLD_LIBS+=usr/lib32/libncurses.so.7 OLD_LIBS+=usr/lib32/libncursesw.so.7 OLD_LIBS+=usr/lib32/libnetgraph.so.3 OLD_LIBS+=usr/lib32/libngatm.so.3 OLD_LIBS+=usr/lib32/libnvpair.so.1 OLD_LIBS+=usr/lib32/libobjc.so.3 OLD_LIBS+=usr/lib32/libopie.so.5 OLD_LIBS+=usr/lib32/libpam.so.4 OLD_LIBS+=usr/lib32/libpanel.so.4 OLD_LIBS+=usr/lib32/libpanelw.so.4 OLD_LIBS+=usr/lib32/libpcap.so.6 OLD_LIBS+=usr/lib32/libpmc.so.4 OLD_LIBS+=usr/lib32/libproc.so.1 OLD_LIBS+=usr/lib32/libradius.so.3 OLD_LIBS+=usr/lib32/libreadline.so.7 OLD_LIBS+=usr/lib32/librpcsvc.so.4 OLD_LIBS+=usr/lib32/libsbuf.so.4 OLD_LIBS+=usr/lib32/libsdp.so.3 OLD_LIBS+=usr/lib32/libsmb.so.3 OLD_LIBS+=usr/lib32/libssh.so.4 OLD_LIBS+=usr/lib32/libssl.so.5 OLD_LIBS+=usr/lib32/libtacplus.so.3 OLD_LIBS+=usr/lib32/libufs.so.4 OLD_LIBS+=usr/lib32/libugidfw.so.3 OLD_LIBS+=usr/lib32/libumem.so.1 OLD_LIBS+=usr/lib32/libusb.so.1 OLD_LIBS+=usr/lib32/libusbhid.so.3 OLD_LIBS+=usr/lib32/libutil.so.7 OLD_LIBS+=usr/lib32/libuutil.so.1 OLD_LIBS+=usr/lib32/libvgl.so.5 OLD_LIBS+=usr/lib32/libwrap.so.5 OLD_LIBS+=usr/lib32/libypclnt.so.3 OLD_LIBS+=usr/lib32/libz.so.4 OLD_LIBS+=usr/lib32/libzfs.so.1 OLD_LIBS+=usr/lib32/libzpool.so.1 OLD_LIBS+=usr/lib32/pam_chroot.so.4 OLD_LIBS+=usr/lib32/pam_deny.so.4 OLD_LIBS+=usr/lib32/pam_echo.so.4 OLD_LIBS+=usr/lib32/pam_exec.so.4 OLD_LIBS+=usr/lib32/pam_ftpusers.so.4 OLD_LIBS+=usr/lib32/pam_group.so.4 OLD_LIBS+=usr/lib32/pam_guest.so.4 OLD_LIBS+=usr/lib32/pam_krb5.so.4 OLD_LIBS+=usr/lib32/pam_ksu.so.4 OLD_LIBS+=usr/lib32/pam_lastlog.so.4 OLD_LIBS+=usr/lib32/pam_login_access.so.4 OLD_LIBS+=usr/lib32/pam_nologin.so.4 OLD_LIBS+=usr/lib32/pam_opie.so.4 OLD_LIBS+=usr/lib32/pam_opieaccess.so.4 OLD_LIBS+=usr/lib32/pam_passwdqc.so.4 OLD_LIBS+=usr/lib32/pam_permit.so.4 OLD_LIBS+=usr/lib32/pam_radius.so.4 OLD_LIBS+=usr/lib32/pam_rhosts.so.4 OLD_LIBS+=usr/lib32/pam_rootok.so.4 OLD_LIBS+=usr/lib32/pam_securetty.so.4 OLD_LIBS+=usr/lib32/pam_self.so.4 OLD_LIBS+=usr/lib32/pam_ssh.so.4 OLD_LIBS+=usr/lib32/pam_tacplus.so.4 OLD_LIBS+=usr/lib32/pam_unix.so.4 # 20090718: the gdm pam.d file is no longer required. OLD_FILES+=etc/pam.d/gdm # 20090714: net_add_domain(9) renamed to domain_add(9) OLD_FILES+=usr/share/man/man9/net_add_domain.9.gz # 20090713: vimage container structs removed. OLD_FILES+=usr/include/netinet/vinet.h OLD_FILES+=usr/include/netinet6/vinet6.h OLD_FILES+=usr/include/netipsec/vipsec.h # 20090712: ieee80211.4 -> net80211.4 OLD_FILES+=usr/share/man/man4/ieee80211.4.gz # 20090711: typo fixed, kproc_resume,.9 -> kproc_resume.9 OLD_FILES+=usr/share/man/man9/kproc_resume,.9.gz # 20090709: msgctl.3 msgget.3 msgrcv.3 msgsnd.3 manual pages moved OLD_FILES+=usr/share/man/man3/msgctl.3.gz OLD_FILES+=usr/share/man/man3/msgget.3.gz OLD_FILES+=usr/share/man/man3/msgrcv.3.gz OLD_FILES+=usr/share/man/man3/msgsnd.3.gz # 20090630: old kernel RPC implementation removal OLD_FILES+=usr/include/nfs/rpcv2.h # 20090624: update usbdi(9) OLD_FILES+=usr/share/man/man9/usbd_abort_default_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_abort_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_alloc_buffer.9.gz OLD_FILES+=usr/share/man/man9/usbd_alloc_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_stall.9.gz OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_stall_async.9.gz OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_toggle.9.gz OLD_FILES+=usr/share/man/man9/usbd_close_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_device2interface_handle.9.gz OLD_FILES+=usr/share/man/man9/usbd_do_request_async.9.gz OLD_FILES+=usr/share/man/man9/usbd_do_request_flags_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_endpoint_count.9.gz OLD_FILES+=usr/share/man/man9/usbd_find_edesc.9.gz OLD_FILES+=usr/share/man/man9/usbd_find_idesc.9.gz OLD_FILES+=usr/share/man/man9/usbd_free_buffer.9.gz OLD_FILES+=usr/share/man/man9/usbd_free_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_buffer.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_config.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_config_desc.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_config_desc_full.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_config_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_device_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_endpoint_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_interface_altindex.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_interface_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_no_alts.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_quirks.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_speed.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_string.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_string_desc.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_xfer_status.9.gz OLD_FILES+=usr/share/man/man9/usbd_interface2device_handle.9.gz OLD_FILES+=usr/share/man/man9/usbd_interface2endpoint_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_interface_count.9.gz OLD_FILES+=usr/share/man/man9/usbd_open_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_open_pipe_intr.9.gz OLD_FILES+=usr/share/man/man9/usbd_pipe2device_handle.9.gz OLD_FILES+=usr/share/man/man9/usbd_set_config_index.9.gz OLD_FILES+=usr/share/man/man9/usbd_set_config_no.9.gz OLD_FILES+=usr/share/man/man9/usbd_set_interface.9.gz OLD_FILES+=usr/share/man/man9/usbd_setup_default_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_setup_isoc_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_setup_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_sync_transfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_transfer.9.gz OLD_FILES+=usr/share/man/man9/usb_find_desc.9.gz # 20090623: number of headers needed for a usb driver reduced OLD_FILES+=usr/include/dev/usb/usb_defs.h OLD_FILES+=usr/include/dev/usb/usb_error.h OLD_FILES+=usr/include/dev/usb/usb_handle_request.h OLD_FILES+=usr/include/dev/usb/usb_hid.h OLD_FILES+=usr/include/dev/usb/usb_lookup.h OLD_FILES+=usr/include/dev/usb/usb_mfunc.h OLD_FILES+=usr/include/dev/usb/usb_parse.h OLD_FILES+=usr/include/dev/usb/usb_revision.h # 20090609: devclass_add_driver is no longer public OLD_FILES+=usr/share/man/man9/devclass_add_driver.9.gz OLD_FILES+=usr/share/man/man9/devclass_delete_driver.9.gz OLD_FILES+=usr/share/man/man9/devclass_find_driver.9.gz # 20090605: removal of clists OLD_FILES+=usr/include/sys/clist.h # 20090602: removal of window(1) OLD_FILES+=usr/bin/window OLD_FILES+=usr/share/man/man1/window.1.gz # 20090531: bind 9.6.1rc1 import OLD_LIBS+=usr/lib/liblwres.so.30 # 20090530: removal of early.sh OLD_FILES+=etc/rc.d/early.sh # 20090527: renaming of S{LIST,TAILQ}_REMOVE_NEXT() to _REMOVE_AFTER() OLD_FILES+=usr/share/man/man3/SLIST_REMOVE_NEXT.3.gz OLD_FILES+=usr/share/man/man3/STAILQ_REMOVE_NEXT.3.gz # 20090527: removal of legacy USB stack OLD_FILES+=usr/include/legacy/dev/usb/dsbr100io.h OLD_FILES+=usr/include/legacy/dev/usb/ehcireg.h OLD_FILES+=usr/include/legacy/dev/usb/ehcivar.h OLD_FILES+=usr/include/legacy/dev/usb/hid.h OLD_FILES+=usr/include/legacy/dev/usb/if_urtwreg.h OLD_FILES+=usr/include/legacy/dev/usb/if_urtwvar.h OLD_FILES+=usr/include/legacy/dev/usb/ohcireg.h OLD_FILES+=usr/include/legacy/dev/usb/ohcivar.h OLD_FILES+=usr/include/legacy/dev/usb/rio500_usb.h OLD_FILES+=usr/include/legacy/dev/usb/rt2573_ucode.h OLD_FILES+=usr/include/legacy/dev/usb/sl811hsreg.h OLD_FILES+=usr/include/legacy/dev/usb/sl811hsvar.h OLD_FILES+=usr/include/legacy/dev/usb/ubser.h OLD_FILES+=usr/include/legacy/dev/usb/ucomvar.h OLD_FILES+=usr/include/legacy/dev/usb/udbp.h OLD_FILES+=usr/include/legacy/dev/usb/uftdireg.h OLD_FILES+=usr/include/legacy/dev/usb/ugraphire_rdesc.h OLD_FILES+=usr/include/legacy/dev/usb/uhcireg.h OLD_FILES+=usr/include/legacy/dev/usb/uhcivar.h OLD_FILES+=usr/include/legacy/dev/usb/usb.h OLD_FILES+=usr/include/legacy/dev/usb/usb_mem.h OLD_FILES+=usr/include/legacy/dev/usb/usb_port.h OLD_FILES+=usr/include/legacy/dev/usb/usb_quirks.h OLD_FILES+=usr/include/legacy/dev/usb/usbcdc.h OLD_FILES+=usr/include/legacy/dev/usb/usbdi.h OLD_FILES+=usr/include/legacy/dev/usb/usbdi_util.h OLD_FILES+=usr/include/legacy/dev/usb/usbdivar.h OLD_FILES+=usr/include/legacy/dev/usb/usbhid.h OLD_FILES+=usr/include/legacy/dev/usb/uxb360gp_rdesc.h OLD_DIRS+=usr/include/legacy/dev/usb OLD_DIRS+=usr/include/legacy/dev OLD_DIRS+=usr/include/legacy # 20090526: removal of makekey(8) OLD_FILES+=usr/libexec/makekey OLD_FILES+=usr/share/man/man8/makekey.8.gz # 20090522: removal of University of Michigan NFSv4 client OLD_FILES+=etc/rc.d/idmapd OLD_FILES+=sbin/idmapd OLD_FILES+=sbin/mount_nfs4 OLD_FILES+=usr/share/man/man8/idmapd.8.gz OLD_FILES+=usr/share/man/man8/mount_nfs4.8.gz # 20090513: removal of legacy versions of USB network interface drivers OLD_FILES+=usr/include/legacy/dev/usb/if_upgtvar.h OLD_FILES+=usr/include/legacy/dev/usb/usb_ethersubr.h # 20090417: removal of legacy versions of USB network interface drivers OLD_FILES+=usr/include/legacy/dev/usb/if_auereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_axereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_cdcereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_cuereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_kuereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_ruereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_rumreg.h OLD_FILES+=usr/include/legacy/dev/usb/if_rumvar.h OLD_FILES+=usr/include/legacy/dev/usb/if_udavreg.h OLD_FILES+=usr/include/legacy/dev/usb/if_uralreg.h OLD_FILES+=usr/include/legacy/dev/usb/if_uralvar.h OLD_FILES+=usr/include/legacy/dev/usb/if_zydfw.h OLD_FILES+=usr/include/legacy/dev/usb/if_zydreg.h OLD_FILES+=usr/include/legacy/dev/usb/kue_fw.h # 20090416: removal of ar(4), ray(4), sr(4), raycontrol(8) OLD_FILES+=usr/sbin/raycontrol OLD_FILES+=usr/share/man/man4/i386/ar.4.gz OLD_FILES+=usr/share/man/man4/i386/ray.4.gz OLD_FILES+=usr/share/man/man4/i386/sr.4.gz OLD_FILES+=usr/share/man/man8/raycontrol.8.gz # 20090410: VOP_LEASE.9 removed OLD_FILES+=usr/share/man/man9/VOP_LEASE.9.gz # 20090406: usb_sw_transfer.h removed OLD_FILES+=usr/include/dev/usb/usb_sw_transfer.h # 20090405: removal of if_ppp(4) and if_sl(4) OLD_FILES+=sbin/slattach rescue/slattach OLD_FILES+=sbin/startslip rescue/startslip OLD_FILES+=usr/include/net/if_ppp.h OLD_FILES+=usr/include/net/if_pppvar.h OLD_FILES+=usr/include/net/if_slvar.h OLD_FILES+=usr/include/net/ppp_comp.h OLD_FILES+=usr/include/net/slip.h OLD_FILES+=usr/sbin/sliplogin OLD_FILES+=usr/sbin/slstat OLD_FILES+=usr/sbin/pppd OLD_FILES+=usr/sbin/pppstats OLD_FILES+=usr/share/man/man1/startslip.1.gz OLD_FILES+=usr/share/man/man4/if_ppp.4.gz OLD_FILES+=usr/share/man/man4/if_sl.4.gz OLD_FILES+=usr/share/man/man4/ppp.4.gz OLD_FILES+=usr/share/man/man4/sl.4.gz OLD_FILES+=usr/share/man/man8/pppd.8.gz OLD_FILES+=usr/share/man/man8/pppstats.8.gz OLD_FILES+=usr/share/man/man8/slattach.8.gz OLD_FILES+=usr/share/man/man8/slip.8.gz OLD_FILES+=usr/share/man/man8/sliplogin.8.gz OLD_FILES+=usr/share/man/man8/slstat.8.gz # 20090321: libpcap upgraded to 1.0.0 OLD_LIBS+=lib/libpcap.so.5 OLD_LIBS+=usr/lib32/libpcap.so.5 # 20090319: uscanner(4) has been removed OLD_FILES+=usr/share/man/man4/uscanner.4.gz # 20090313: k8temp(4) renamed to amdtemp(4) OLD_FILES+=usr/share/man/man4/k8temp.4.gz # 20090308: libusb.so.1 renamed OLD_LIBS+=usr/lib/libusb20.so.1 OLD_FILES+=usr/lib/libusb20.a OLD_FILES+=usr/lib/libusb20.so OLD_FILES+=usr/lib/libusb20_p.a OLD_FILES+=usr/include/libusb20_compat01.h OLD_FILES+=usr/include/libusb20_compat10.h OLD_LIBS+=usr/lib32/libusb20.so.1 OLD_FILES+=usr/lib32/libusb20.a OLD_FILES+=usr/lib32/libusb20.so OLD_FILES+=usr/lib32/libusb20_p.a # 20090226: libmp(3) functions renamed OLD_LIBS+=usr/lib/libmp.so.6 OLD_LIBS+=usr/lib32/libmp.so.6 # 20090223: changeover of USB stacks OLD_FILES+=usr/include/dev/usb2/include/ufm2_ioctl.h OLD_FILES+=usr/include/dev/usb2/include/urio2_ioctl.h OLD_FILES+=usr/include/dev/usb2/include/usb2_cdc.h OLD_FILES+=usr/include/dev/usb2/include/usb2_defs.h OLD_FILES+=usr/include/dev/usb2/include/usb2_devid.h OLD_FILES+=usr/include/dev/usb2/include/usb2_devtable.h OLD_FILES+=usr/include/dev/usb2/include/usb2_endian.h OLD_FILES+=usr/include/dev/usb2/include/usb2_error.h OLD_FILES+=usr/include/dev/usb2/include/usb2_hid.h OLD_FILES+=usr/include/dev/usb2/include/usb2_ioctl.h OLD_FILES+=usr/include/dev/usb2/include/usb2_mfunc.h OLD_FILES+=usr/include/dev/usb2/include/usb2_revision.h OLD_FILES+=usr/include/dev/usb2/include/usb2_standard.h OLD_DIRS+=usr/include/dev/usb2/include OLD_DIRS+=usr/include/dev/usb2 OLD_FILES+=usr/include/dev/usb/dsbr100io.h OLD_FILES+=usr/include/dev/usb/ehcireg.h OLD_FILES+=usr/include/dev/usb/ehcivar.h OLD_FILES+=usr/include/dev/usb/hid.h OLD_FILES+=usr/include/dev/usb/if_auereg.h OLD_FILES+=usr/include/dev/usb/if_axereg.h OLD_FILES+=usr/include/dev/usb/if_cdcereg.h OLD_FILES+=usr/include/dev/usb/if_cuereg.h OLD_FILES+=usr/include/dev/usb/if_kuereg.h OLD_FILES+=usr/include/dev/usb/if_ruereg.h OLD_FILES+=usr/include/dev/usb/if_rumreg.h OLD_FILES+=usr/include/dev/usb/if_rumvar.h OLD_FILES+=usr/include/dev/usb/if_udavreg.h OLD_FILES+=usr/include/dev/usb/if_upgtvar.h OLD_FILES+=usr/include/dev/usb/if_uralreg.h OLD_FILES+=usr/include/dev/usb/if_uralvar.h OLD_FILES+=usr/include/dev/usb/if_urtwreg.h OLD_FILES+=usr/include/dev/usb/if_urtwvar.h OLD_FILES+=usr/include/dev/usb/if_zydfw.h OLD_FILES+=usr/include/dev/usb/if_zydreg.h OLD_FILES+=usr/include/dev/usb/kue_fw.h OLD_FILES+=usr/include/dev/usb/ohcireg.h OLD_FILES+=usr/include/dev/usb/ohcivar.h OLD_FILES+=usr/include/dev/usb/rio500_usb.h OLD_FILES+=usr/include/dev/usb/rt2573_ucode.h OLD_FILES+=usr/include/dev/usb/sl811hsreg.h OLD_FILES+=usr/include/dev/usb/sl811hsvar.h OLD_FILES+=usr/include/dev/usb/ubser.h OLD_FILES+=usr/include/dev/usb/ucomvar.h OLD_FILES+=usr/include/dev/usb/udbp.h OLD_FILES+=usr/include/dev/usb/uftdireg.h OLD_FILES+=usr/include/dev/usb/ugraphire_rdesc.h OLD_FILES+=usr/include/dev/usb/uhcireg.h OLD_FILES+=usr/include/dev/usb/uhcivar.h OLD_FILES+=usr/include/dev/usb/usb_ethersubr.h OLD_FILES+=usr/include/dev/usb/usb_mem.h OLD_FILES+=usr/include/dev/usb/usb_port.h OLD_FILES+=usr/include/dev/usb/usb_quirks.h OLD_FILES+=usr/include/dev/usb/usbcdc.h OLD_FILES+=usr/include/dev/usb/usbdivar.h OLD_FILES+=usr/include/dev/usb/uxb360gp_rdesc.h OLD_FILES+=usr/sbin/usbdevs OLD_FILES+=usr/share/man/man8/usbdevs.8.gz # 20090203: removal of pccard header files OLD_FILES+=usr/include/pccard/cardinfo.h OLD_FILES+=usr/include/pccard/cis.h OLD_DIRS+=usr/include/pccard # 20090203: adding_user.8 moved to adding_user.7 OLD_FILES+=usr/share/man/man8/adding_user.8.gz # 20090122: tzdata2009a import OLD_FILES+=usr/share/zoneinfo/Asia/Katmandu # 20090102: file 4.26 import OLD_FILES+=usr/share/misc/magic.mime OLD_FILES+=usr/share/misc/magic.mime.mgc # 20081223: bind 9.4.3 import, nsupdate.8 moved to nsupdate.1 OLD_FILES+=usr/share/man/man8/nsupdate.8.gz # 20081223: ipprotosw.h removed OLD_FILES+=usr/include/netinet/ipprotosw.h # 20081123: vfs_mountedon.9 removed OLD_FILES+=usr/share/man/man9/vfs_mountedon.9.gz # 20081023: FREE.9 and MALLOC.9 removed OLD_FILES+=usr/share/man/man9/FREE.9.gz OLD_FILES+=usr/share/man/man9/MALLOC.9.gz # 20080928: removal of inaccurate device_ids(9) manual page OLD_FILES+=usr/share/man/man9/device_ids.9.gz OLD_FILES+=usr/share/man/man9/major.9.gz OLD_FILES+=usr/share/man/man9/minor.9.gz OLD_FILES+=usr/share/man/man9/umajor.9.gz OLD_FILES+=usr/share/man/man9/uminor.9.gz # 20080917: removal of manpage for axed kernel primitive suser(9) OLD_FILES+=usr/share/man/man9/suser.9.gz OLD_FILES+=usr/share/man/man9/suser_cred.9.gz # 20080913: pax removed from rescue OLD_FILES+=rescue/pax # 20080823: removal of unneeded pt_chown, to implement grantpt(3) OLD_FILES+=usr/libexec/pt_chown # 20080822: ntp 4.2.4p5 import OLD_FILES+=usr/share/doc/ntp/driver23.html OLD_FILES+=usr/share/doc/ntp/driver24.html # 20080821: several man pages moved from man4.i386 to man4 .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/share/man/man4/i386/acpi_aiboost.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_asus.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_fujitsu.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_ibm.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_panasonic.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_sony.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_toshiba.4.gz OLD_FILES+=usr/share/man/man4/i386/ichwd.4.gz OLD_FILES+=usr/share/man/man4/i386/if_ndis.4.gz OLD_FILES+=usr/share/man/man4/i386/io.4.gz OLD_FILES+=usr/share/man/man4/i386/linux.4.gz OLD_FILES+=usr/share/man/man4/i386/ndis.4.gz .endif # 20080820: MPSAFE TTY layer integrated OLD_FILES+=usr/include/sys/linedisc.h OLD_FILES+=usr/share/man/man3/posix_openpt.3.gz # 20080725: sgtty.h removed OLD_FILES+=usr/include/sgtty.h # 20080706: bsdlabel(8) removed on powerpc .if ${TARGET_ARCH} == "powerpc" OLD_FILES+=sbin/bsdlabel OLD_FILES+=usr/share/man/man8/bsdlabel.8.gz .endif # 20080704: sbsh(4) removed OLD_FILES+=usr/share/man/man4/if_sbsh.4.gz OLD_FILES+=usr/share/man/man4/sbsh.4.gz # 20080704: cnw(4) removed OLD_FILES+=usr/share/man/man4/if_cnw.4.gz OLD_FILES+=usr/share/man/man4/cnw.4.gz # 20080704: oltr(4) removed .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/share/man/man4/i386/if_oltr.4.gz OLD_FILES+=usr/share/man/man4/i386/oltr.4.gz .endif # 20080704: arl(4) removed .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/sbin/arlcontrol OLD_FILES+=usr/share/man/man4/i386/arl.4.gz OLD_FILES+=usr/share/man/man8/arlcontrol.8.gz .endif # 20080703: sunlabel only for sparc64 .if ${TARGET_ARCH} != "sparc64" OLD_FILES+=sbin/sunlabel OLD_FILES+=usr/share/man/man8/sunlabel.8.gz .endif # 20080701: wpa_supplicant.conf moved to share/examples/etc/ OLD_FILES+=usr/share/examples/wpa_supplicant/wpa_supplicant.conf OLD_DIRS+=usr/share/examples/wpa_supplicant # 20080614: pecoff image activator removed .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/pecoff_machdep.h .endif # 20080614: sgtty removed OLD_FILES+=usr/include/sys/ttychars.h OLD_FILES+=usr/include/sys/ttydev.h OLD_FILES+=usr/share/man/man3/gtty.3.gz OLD_FILES+=usr/share/man/man3/stty.3.gz # 20080609: gpt(8) removed OLD_FILES+=sbin/gpt OLD_FILES+=usr/share/man/man8/gpt.8.gz # 20080525: I4B removed OLD_FILES+=etc/isdn/answer OLD_FILES+=etc/isdn/isdntel OLD_FILES+=etc/isdn/record OLD_FILES+=etc/isdn/tell OLD_FILES+=etc/isdn/tell-record OLD_FILES+=etc/isdn/unknown_incoming OLD_FILES+=etc/isdn/holidays.D OLD_FILES+=etc/isdn/isdnd.rates.A OLD_FILES+=etc/isdn/isdnd.rates.D OLD_FILES+=etc/isdn/isdnd.rates.F OLD_FILES+=etc/isdn/isdnd.rates.L OLD_FILES+=etc/isdn/isdnd.rates.UK.BT OLD_FILES+=etc/isdn/isdnd.rc.sample OLD_FILES+=etc/isdn/isdntel.alias.sample OLD_DIRS+=etc/isdn OLD_FILES+=etc/rc.d/isdnd OLD_FILES+=usr/include/i4b/i4b_cause.h OLD_FILES+=usr/include/i4b/i4b_debug.h OLD_FILES+=usr/include/i4b/i4b_ioctl.h OLD_FILES+=usr/include/i4b/i4b_rbch_ioctl.h OLD_FILES+=usr/include/i4b/i4b_tel_ioctl.h OLD_FILES+=usr/include/i4b/i4b_trace.h OLD_DIRS+=usr/include/i4b OLD_FILES+=usr/sbin/dtmfdecode OLD_FILES+=usr/sbin/g711conv OLD_FILES+=usr/sbin/isdnd OLD_FILES+=usr/sbin/isdndebug OLD_FILES+=usr/sbin/isdndecode OLD_FILES+=usr/sbin/isdnmonitor OLD_FILES+=usr/sbin/isdnphone OLD_FILES+=usr/sbin/isdntel OLD_FILES+=usr/sbin/isdntelctl OLD_FILES+=usr/sbin/isdntrace OLD_FILES+=usr/share/isdn/0.al OLD_FILES+=usr/share/isdn/1.al OLD_FILES+=usr/share/isdn/2.al OLD_FILES+=usr/share/isdn/3.al OLD_FILES+=usr/share/isdn/4.al OLD_FILES+=usr/share/isdn/5.al OLD_FILES+=usr/share/isdn/6.al OLD_FILES+=usr/share/isdn/7.al OLD_FILES+=usr/share/isdn/8.al OLD_FILES+=usr/share/isdn/9.al OLD_FILES+=usr/share/isdn/beep.al OLD_FILES+=usr/share/isdn/msg.al OLD_DIRS+=usr/share/isdn OLD_FILES+=usr/share/man/man1/dtmfdecode.1.gz OLD_FILES+=usr/share/man/man1/g711conv.1.gz OLD_FILES+=usr/share/man/man4/i4b.4.gz OLD_FILES+=usr/share/man/man4/i4bcapi.4.gz OLD_FILES+=usr/share/man/man4/i4bctl.4.gz OLD_FILES+=usr/share/man/man4/i4bing.4.gz OLD_FILES+=usr/share/man/man4/i4bipr.4.gz OLD_FILES+=usr/share/man/man4/i4bisppp.4.gz OLD_FILES+=usr/share/man/man4/i4bq921.4.gz OLD_FILES+=usr/share/man/man4/i4bq931.4.gz OLD_FILES+=usr/share/man/man4/i4brbch.4.gz OLD_FILES+=usr/share/man/man4/i4btel.4.gz OLD_FILES+=usr/share/man/man4/i4btrc.4.gz OLD_FILES+=usr/share/man/man4/iavc.4.gz OLD_FILES+=usr/share/man/man4/isic.4.gz OLD_FILES+=usr/share/man/man4/ifpi.4.gz OLD_FILES+=usr/share/man/man4/ifpi2.4.gz OLD_FILES+=usr/share/man/man4/ifpnp.4.gz OLD_FILES+=usr/share/man/man4/ihfc.4.gz OLD_FILES+=usr/share/man/man4/itjc.4.gz OLD_FILES+=usr/share/man/man4/iwic.4.gz OLD_FILES+=usr/share/man/man5/isdnd.rc.5.gz OLD_FILES+=usr/share/man/man5/isdnd.rates.5.gz OLD_FILES+=usr/share/man/man5/isdnd.acct.5.gz OLD_FILES+=usr/share/man/man8/isdnd.8.gz OLD_FILES+=usr/share/man/man8/isdndebug.8.gz OLD_FILES+=usr/share/man/man8/isdndecode.8.gz OLD_FILES+=usr/share/man/man8/isdnmonitor.8.gz OLD_FILES+=usr/share/man/man8/isdnphone.8.gz OLD_FILES+=usr/share/man/man8/isdntel.8.gz OLD_FILES+=usr/share/man/man8/isdntelctl.8.gz OLD_FILES+=usr/share/man/man8/isdntrace.8.gz OLD_FILES+=usr/share/examples/isdn/contrib/README OLD_FILES+=usr/share/examples/isdn/contrib/anleitung.ppp OLD_FILES+=usr/share/examples/isdn/contrib/answer.c OLD_FILES+=usr/share/examples/isdn/contrib/answer.sh OLD_FILES+=usr/share/examples/isdn/contrib/convert.sh OLD_FILES+=usr/share/examples/isdn/contrib/hplay.c OLD_FILES+=usr/share/examples/isdn/contrib/i4b-ppp-newbie.txt OLD_FILES+=usr/share/examples/isdn/contrib/isdnctl OLD_FILES+=usr/share/examples/isdn/contrib/isdnd_acct OLD_FILES+=usr/share/examples/isdn/contrib/isdnd_acct.pl OLD_FILES+=usr/share/examples/isdn/contrib/isdntelmux.c OLD_FILES+=usr/share/examples/isdn/contrib/mrtg-isp0.sh OLD_FILES+=usr/share/examples/isdn/i4brunppp/Makefile OLD_FILES+=usr/share/examples/isdn/i4brunppp/README OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp-isdnd.rc OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp.8 OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp.c OLD_FILES+=usr/share/examples/isdn/v21/Makefile OLD_FILES+=usr/share/examples/isdn/v21/README OLD_FILES+=usr/share/examples/isdn/v21/v21modem.c OLD_FILES+=usr/share/examples/isdn/FAQ OLD_FILES+=usr/share/examples/isdn/KERNEL OLD_FILES+=usr/share/examples/isdn/Overview OLD_FILES+=usr/share/examples/isdn/README OLD_FILES+=usr/share/examples/isdn/ROADMAP OLD_FILES+=usr/share/examples/isdn/ReleaseNotes OLD_FILES+=usr/share/examples/isdn/Resources OLD_FILES+=usr/share/examples/isdn/SupportedCards OLD_FILES+=usr/share/examples/isdn/ThankYou OLD_DIRS+=usr/share/examples/isdn/contrib OLD_DIRS+=usr/share/examples/isdn/i4brunppp OLD_DIRS+=usr/share/examples/isdn/v21 OLD_DIRS+=usr/share/examples/isdn OLD_FILES+=usr/share/examples/ppp/isdnd.rc OLD_FILES+=usr/share/examples/ppp/ppp.conf.isdn # 20080525: ng_atmpif removed OLD_FILES+=usr/include/netgraph/atm/ng_atmpif.h OLD_FILES+=usr/share/man/man4/ng_atmpif.4.gz # 20080522: pmap_addr_hint removed OLD_FILES+=usr/share/man/man9/pmap_addr_hint.9.gz # 20080517: ipsec_osdep.h removed OLD_FILES+=usr/include/netipsec/ipsec_osdep.h # 20080507: heimdal 1.1 import OLD_LIBS+=usr/lib/libasn1.so.9 OLD_LIBS+=usr/lib/libgssapi.so.9 OLD_LIBS+=usr/lib/libgssapi_krb5.so.9 OLD_LIBS+=usr/lib/libhdb.so.9 OLD_LIBS+=usr/lib/libkadm5clnt.so.9 OLD_LIBS+=usr/lib/libkadm5srv.so.9 OLD_LIBS+=usr/lib/libkafs5.so.9 OLD_LIBS+=usr/lib/libkrb5.so.9 OLD_LIBS+=usr/lib/libroken.so.9 OLD_LIBS+=usr/lib32/libgssapi.so.9 # 20080420: Symbol card support dropped OLD_FILES+=usr/include/dev/wi/spectrum24t_cf.h # 20080420: awi removal OLD_FILES+=usr/share/man/man4/awi.4.gz OLD_FILES+=usr/share/man/man4/if_awi.4.gz # 20080331: pkg_sign has been removed OLD_FILES+=usr/sbin/pkg_check OLD_FILES+=usr/sbin/pkg_sign OLD_FILES+=usr/share/man/man1/pkg_check.1.gz OLD_FILES+=usr/share/man/man1/pkg_sign.1.gz # 20080325: tzdata2008b import OLD_FILES+=usr/share/zoneinfo/Asia/Calcutta OLD_FILES+=usr/share/zoneinfo/Asia/Saigon # 20080314: stack_print(9) mlink fixed OLD_FILES+=usr/share/man/man9/stack_printf.9.gz # 20080312: libkse removal OLD_FILES+=usr/include/sys/kse.h OLD_FILES+=usr/lib/libkse.so OLD_LIBS+=usr/lib/libkse.so.3 OLD_FILES+=usr/share/man/man2/kse.2.gz OLD_FILES+=usr/share/man/man2/kse_create.2.gz OLD_FILES+=usr/share/man/man2/kse_exit.2.gz OLD_FILES+=usr/share/man/man2/kse_release.2.gz OLD_FILES+=usr/share/man/man2/kse_switchin.2.gz OLD_FILES+=usr/share/man/man2/kse_thr_interrupt.2.gz OLD_FILES+=usr/share/man/man2/kse_wakeup.2.gz OLD_FILES+=usr/lib32/libkse.so OLD_LIBS+=usr/lib32/libkse.so.3 # 20080225: bsdar/bsdranlib rename to ar/ranlib OLD_FILES+=usr/bin/bsdar OLD_FILES+=usr/bin/bsdranlib OLD_FILES+=usr/share/man/man1/bsdar.1.gz OLD_FILES+=usr/share/man/man1/bsdranlib.1.gz # 20080220: geom_lvm rename to geom_linux_lvm OLD_FILES+=usr/share/man/man4/geom_lvm.4.gz # 20080126: oldcard.4 removal OLD_FILES+=usr/share/man/man4/card.4.gz OLD_FILES+=usr/share/man/man4/oldcard.4.gz # 20080122: Removed from the tree OLD_FILES+=usr/share/man/man9/BUF_REFCNT.9.gz # 20080108: Moved to section 2 OLD_FILES+=usr/share/man/man3/shm_open.3.gz OLD_FILES+=usr/share/man/man3/shm_unlink.3.gz # 20071207: Merged with fortunes-o.real OLD_FILES+=usr/share/games/fortune/fortunes2-o OLD_FILES+=usr/share/games/fortune/fortunes2-o.dat # 20071201: Removal of XRPU driver OLD_FILES+=usr/include/sys/xrpuio.h # 20071129: Disabled static versions of libkse by default OLD_FILES+=usr/lib/libkse.a OLD_FILES+=usr/lib/libkse_p.a OLD_FILES+=usr/lib/libkse_pic.a OLD_FILES+=usr/lib32/libkse.a OLD_FILES+=usr/lib32/libkse_p.a OLD_FILES+=usr/lib32/libkse_pic.a # 20071129: Removed a Solaris compatibility header OLD_FILES+=usr/include/sys/_elf_solaris.h # 20071125: Renamed to pmc_get_msr() OLD_FILES+=usr/share/man/man3/pmc_x86_get_msr.3.gz # 20071108: Removed very crunch OLDCARD support file OLD_FILES+=etc/defaults/pccard.conf # 20071025: rc.d/nfslocking superseded by rc.d/lockd and rc.d/statd OLD_FILES+=etc/rc.d/nfslocking # 20070930: rename of cached to nscd OLD_FILES+=etc/cached.conf OLD_FILES+=etc/rc.d/cached OLD_FILES+=usr/sbin/cached OLD_FILES+=usr/share/man/man5/cached.conf.5.gz OLD_FILES+=usr/share/man/man8/cached.8.gz # 20070807: removal of PowerPC specific header file. .if ${TARGET_ARCH} == "powerpc" OLD_FILES+=usr/include/machine/interruptvar.h .endif # 20070801: fast_ipsec.4 gone OLD_FILES+=usr/share/man/man4/fast_ipsec.4.gz # 20070715: netatm temporarily disconnected (removed 20080525) OLD_FILES+=rescue/atm OLD_FILES+=rescue/fore_dnld OLD_FILES+=rescue/ilmid OLD_FILES+=sbin/atm OLD_FILES+=sbin/fore_dnld OLD_FILES+=sbin/ilmid OLD_FILES+=usr/include/libatm.h OLD_FILES+=usr/include/netatm/atm.h OLD_FILES+=usr/include/netatm/atm_cm.h OLD_FILES+=usr/include/netatm/atm_if.h OLD_FILES+=usr/include/netatm/atm_ioctl.h OLD_FILES+=usr/include/netatm/atm_pcb.h OLD_FILES+=usr/include/netatm/atm_sap.h OLD_FILES+=usr/include/netatm/atm_sigmgr.h OLD_FILES+=usr/include/netatm/atm_stack.h OLD_FILES+=usr/include/netatm/atm_sys.h OLD_FILES+=usr/include/netatm/atm_var.h OLD_FILES+=usr/include/netatm/atm_vc.h OLD_FILES+=usr/include/netatm/ipatm/ipatm.h OLD_FILES+=usr/include/netatm/ipatm/ipatm_serv.h OLD_FILES+=usr/include/netatm/ipatm/ipatm_var.h OLD_FILES+=usr/include/netatm/port.h OLD_FILES+=usr/include/netatm/queue.h OLD_FILES+=usr/include/netatm/sigpvc/sigpvc_var.h OLD_FILES+=usr/include/netatm/spans/spans_cls.h OLD_FILES+=usr/include/netatm/spans/spans_kxdr.h OLD_FILES+=usr/include/netatm/spans/spans_var.h OLD_FILES+=usr/include/netatm/uni/sscf_uni.h OLD_FILES+=usr/include/netatm/uni/sscf_uni_var.h OLD_FILES+=usr/include/netatm/uni/sscop.h OLD_FILES+=usr/include/netatm/uni/sscop_misc.h OLD_FILES+=usr/include/netatm/uni/sscop_pdu.h OLD_FILES+=usr/include/netatm/uni/sscop_var.h OLD_FILES+=usr/include/netatm/uni/uni.h OLD_FILES+=usr/include/netatm/uni/uniip_var.h OLD_FILES+=usr/include/netatm/uni/unisig.h OLD_FILES+=usr/include/netatm/uni/unisig_decode.h OLD_FILES+=usr/include/netatm/uni/unisig_mbuf.h OLD_FILES+=usr/include/netatm/uni/unisig_msg.h OLD_FILES+=usr/include/netatm/uni/unisig_print.h OLD_FILES+=usr/include/netatm/uni/unisig_var.h OLD_FILES+=usr/lib/libatm.a OLD_FILES+=usr/lib/libatm_p.a OLD_FILES+=usr/sbin/atmarpd OLD_FILES+=usr/sbin/scspd OLD_FILES+=usr/share/man/en.ISO8859-1/man8/atm.8.gz OLD_FILES+=usr/share/man/en.ISO8859-1/man8/atmarpd.8.gz OLD_FILES+=usr/share/man/en.ISO8859-1/man8/fore_dnld.8.gz OLD_FILES+=usr/share/man/en.ISO8859-1/man8/ilmid.8.gz OLD_FILES+=usr/share/man/en.ISO8859-1/man8/scspd.8.gz OLD_FILES+=usr/share/man/man8/atm.8.gz OLD_FILES+=usr/share/man/man8/atmarpd.8.gz OLD_FILES+=usr/share/man/man8/fore_dnld.8.gz OLD_FILES+=usr/share/man/man8/ilmid.8.gz OLD_FILES+=usr/share/man/man8/scspd.8.gz OLD_FILES+=usr/share/examples/atm/NOTES OLD_FILES+=usr/share/examples/atm/README OLD_FILES+=usr/share/examples/atm/Startup OLD_FILES+=usr/share/examples/atm/atm-config.sh OLD_FILES+=usr/share/examples/atm/atm-sockets.txt OLD_FILES+=usr/share/examples/atm/cpcs-design.txt OLD_FILES+=usr/share/examples/atm/fore-microcode.txt OLD_FILES+=usr/share/examples/atm/sscf-design.txt OLD_FILES+=usr/share/examples/atm/sscop-design.txt OLD_LIBS+=lib/libatm.so.5 OLD_LIBS+=usr/lib/libatm.so OLD_DIRS+=usr/include/netatm/sigpvc OLD_DIRS+=usr/include/netatm/spans OLD_DIRS+=usr/include/netatm/ipatm OLD_DIRS+=usr/include/netatm/uni OLD_DIRS+=usr/include/netatm OLD_DIRS+=usr/share/examples/atm OLD_FILES+=usr/lib32/libatm.a OLD_FILES+=usr/lib32/libatm.so OLD_LIBS+=usr/lib32/libatm.so.5 OLD_FILES+=usr/lib32/libatm_p.a # 20070705: I4B headers repo-copied to include/i4b/ .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/i4b_cause.h OLD_FILES+=usr/include/machine/i4b_debug.h OLD_FILES+=usr/include/machine/i4b_ioctl.h OLD_FILES+=usr/include/machine/i4b_rbch_ioctl.h OLD_FILES+=usr/include/machine/i4b_tel_ioctl.h OLD_FILES+=usr/include/machine/i4b_trace.h .endif # 20070703: pf 4.1 import OLD_FILES+=usr/libexec/ftp-proxy # 20070701: KAME IPSec removal OLD_FILES+=usr/include/netinet6/ah.h OLD_FILES+=usr/include/netinet6/ah6.h OLD_FILES+=usr/include/netinet6/ah_aesxcbcmac.h OLD_FILES+=usr/include/netinet6/esp.h OLD_FILES+=usr/include/netinet6/esp6.h OLD_FILES+=usr/include/netinet6/esp_aesctr.h OLD_FILES+=usr/include/netinet6/esp_camellia.h OLD_FILES+=usr/include/netinet6/esp_rijndael.h OLD_FILES+=usr/include/netinet6/ipsec.h OLD_FILES+=usr/include/netinet6/ipsec6.h OLD_FILES+=usr/include/netinet6/ipcomp.h OLD_FILES+=usr/include/netinet6/ipcomp6.h OLD_FILES+=usr/include/netkey/key.h OLD_FILES+=usr/include/netkey/key_debug.h OLD_FILES+=usr/include/netkey/key_var.h OLD_FILES+=usr/include/netkey/keydb.h OLD_FILES+=usr/include/netkey/keysock.h OLD_DIRS+=usr/include/netkey # 20070701: remove wicontrol OLD_FILES+=usr/sbin/wicontrol OLD_FILES+=usr/share/man/man8/wicontrol.8.gz # 20070625: umapfs removal OLD_FILES+=rescue/mount_umapfs OLD_FILES+=sbin/mount_umapfs OLD_FILES+=usr/include/fs/umapfs/umap.h OLD_FILES+=usr/share/man/man8/mount_umapfs.8.gz OLD_DIRS+=usr/include/fs/umapfs # 20070618: Removal of the PROTO.localhost* files OLD_FILES+=etc/namedb/PROTO.localhost-v6.rev OLD_FILES+=etc/namedb/PROTO.localhost.rev OLD_FILES+=etc/namedb/make-localhost # 20070618: shared library version bump OLD_LIBS+=lib/libalias.so.5 OLD_LIBS+=lib/libbsnmp.so.3 OLD_LIBS+=lib/libncurses.so.6 OLD_LIBS+=lib/libncursesw.so.6 OLD_LIBS+=lib/libreadline.so.6 OLD_LIBS+=usr/lib/libdialog.so.5 OLD_LIBS+=usr/lib/libgnuregex.so.3 OLD_LIBS+=usr/lib/libhistory.so.6 OLD_LIBS+=usr/lib/libpam.so.3 OLD_LIBS+=usr/lib/libssh.so.3 OLD_LIBS+=usr/lib/pam_chroot.so.3 OLD_LIBS+=usr/lib/pam_deny.so.3 OLD_LIBS+=usr/lib/pam_echo.so.3 OLD_LIBS+=usr/lib/pam_exec.so.3 OLD_LIBS+=usr/lib/pam_ftpusers.so.3 OLD_LIBS+=usr/lib/pam_group.so.3 OLD_LIBS+=usr/lib/pam_guest.so.3 OLD_LIBS+=usr/lib/pam_krb5.so.3 OLD_LIBS+=usr/lib/pam_ksu.so.3 OLD_LIBS+=usr/lib/pam_lastlog.so.3 OLD_LIBS+=usr/lib/pam_login_access.so.3 OLD_LIBS+=usr/lib/pam_nologin.so.3 OLD_LIBS+=usr/lib/pam_opie.so.3 OLD_LIBS+=usr/lib/pam_opieaccess.so.3 OLD_LIBS+=usr/lib/pam_passwdqc.so.3 OLD_LIBS+=usr/lib/pam_permit.so.3 OLD_LIBS+=usr/lib/pam_radius.so.3 OLD_LIBS+=usr/lib/pam_rhosts.so.3 OLD_LIBS+=usr/lib/pam_rootok.so.3 OLD_LIBS+=usr/lib/pam_securetty.so.3 OLD_LIBS+=usr/lib/pam_self.so.3 OLD_LIBS+=usr/lib/pam_ssh.so.3 OLD_LIBS+=usr/lib/pam_tacplus.so.3 OLD_LIBS+=usr/lib/pam_unix.so.3 OLD_LIBS+=usr/lib/snmp_atm.so.4 OLD_LIBS+=usr/lib/snmp_bridge.so.4 OLD_LIBS+=usr/lib/snmp_hostres.so.4 OLD_LIBS+=usr/lib/snmp_mibII.so.4 OLD_LIBS+=usr/lib/snmp_netgraph.so.4 OLD_LIBS+=usr/lib/snmp_pf.so.4 OLD_LIBS+=usr/lib32/libalias.so.5 OLD_LIBS+=usr/lib32/libbsnmp.so.3 OLD_LIBS+=usr/lib32/libdialog.so.5 OLD_LIBS+=usr/lib32/libgnuregex.so.3 OLD_LIBS+=usr/lib32/libhistory.so.6 OLD_LIBS+=usr/lib32/libncurses.so.6 OLD_LIBS+=usr/lib32/libncursesw.so.6 OLD_LIBS+=usr/lib32/libpam.so.3 OLD_LIBS+=usr/lib32/libreadline.so.6 OLD_LIBS+=usr/lib32/libssh.so.3 OLD_LIBS+=usr/lib32/pam_chroot.so.3 OLD_LIBS+=usr/lib32/pam_deny.so.3 OLD_LIBS+=usr/lib32/pam_echo.so.3 OLD_LIBS+=usr/lib32/pam_exec.so.3 OLD_LIBS+=usr/lib32/pam_ftpusers.so.3 OLD_LIBS+=usr/lib32/pam_group.so.3 OLD_LIBS+=usr/lib32/pam_guest.so.3 OLD_LIBS+=usr/lib32/pam_krb5.so.3 OLD_LIBS+=usr/lib32/pam_ksu.so.3 OLD_LIBS+=usr/lib32/pam_lastlog.so.3 OLD_LIBS+=usr/lib32/pam_login_access.so.3 OLD_LIBS+=usr/lib32/pam_nologin.so.3 OLD_LIBS+=usr/lib32/pam_opie.so.3 OLD_LIBS+=usr/lib32/pam_opieaccess.so.3 OLD_LIBS+=usr/lib32/pam_passwdqc.so.3 OLD_LIBS+=usr/lib32/pam_permit.so.3 OLD_LIBS+=usr/lib32/pam_radius.so.3 OLD_LIBS+=usr/lib32/pam_rhosts.so.3 OLD_LIBS+=usr/lib32/pam_rootok.so.3 OLD_LIBS+=usr/lib32/pam_securetty.so.3 OLD_LIBS+=usr/lib32/pam_self.so.3 OLD_LIBS+=usr/lib32/pam_ssh.so.3 OLD_LIBS+=usr/lib32/pam_tacplus.so.3 OLD_LIBS+=usr/lib32/pam_unix.so.3 # 20070613: IPX over IP tunnel removal OLD_FILES+=usr/include/netipx/ipx_ip.h # 20070605: sched_core removal OLD_FILES+=usr/share/man/man4/sched_core.4.gz # 20070603: BIND 9.4.1 import OLD_LIBS+=usr/lib/liblwres.so.10 # 20070521: shared library version bump OLD_LIBS+=lib/libatm.so.4 OLD_LIBS+=lib/libbegemot.so.2 OLD_LIBS+=lib/libbsdxml.so.2 OLD_LIBS+=lib/libcam.so.3 OLD_LIBS+=lib/libcrypt.so.3 OLD_LIBS+=lib/libdevstat.so.5 OLD_LIBS+=lib/libedit.so.5 OLD_LIBS+=lib/libgeom.so.3 OLD_LIBS+=lib/libipsec.so.2 OLD_LIBS+=lib/libipx.so.3 OLD_LIBS+=lib/libkiconv.so.2 OLD_LIBS+=lib/libkse.so.2 OLD_LIBS+=lib/libkvm.so.3 OLD_LIBS+=lib/libm.so.4 OLD_LIBS+=lib/libmd.so.3 OLD_LIBS+=lib/libpcap.so.4 OLD_LIBS+=lib/libpthread.so.2 OLD_LIBS+=lib/libsbuf.so.3 OLD_LIBS+=lib/libthr.so.2 OLD_LIBS+=lib/libufs.so.3 OLD_LIBS+=lib/libutil.so.6 OLD_LIBS+=lib/libz.so.3 OLD_LIBS+=usr/lib/libbluetooth.so.2 OLD_LIBS+=usr/lib/libbsm.so.1 OLD_LIBS+=usr/lib/libbz2.so.2 OLD_LIBS+=usr/lib/libcalendar.so.3 OLD_LIBS+=usr/lib/libcom_err.so.3 OLD_LIBS+=usr/lib/libdevinfo.so.3 OLD_LIBS+=usr/lib/libfetch.so.4 OLD_LIBS+=usr/lib/libform.so.3 OLD_LIBS+=usr/lib/libformw.so.3 OLD_LIBS+=usr/lib/libftpio.so.6 OLD_LIBS+=usr/lib/libgpib.so.1 OLD_LIBS+=usr/lib/libkse.so.2 OLD_LIBS+=usr/lib/libmagic.so.2 OLD_LIBS+=usr/lib/libmemstat.so.1 OLD_LIBS+=usr/lib/libmenu.so.3 OLD_LIBS+=usr/lib/libmenuw.so.3 OLD_LIBS+=usr/lib/libmilter.so.3 OLD_LIBS+=usr/lib/libmp.so.5 OLD_LIBS+=usr/lib/libncp.so.2 OLD_LIBS+=usr/lib/libnetgraph.so.2 OLD_LIBS+=usr/lib/libngatm.so.2 OLD_LIBS+=usr/lib/libopie.so.4 OLD_LIBS+=usr/lib/libpanel.so.3 OLD_LIBS+=usr/lib/libpanelw.so.3 OLD_LIBS+=usr/lib/libpmc.so.3 OLD_LIBS+=usr/lib/libradius.so.2 OLD_LIBS+=usr/lib/librpcsvc.so.3 OLD_LIBS+=usr/lib/libsdp.so.2 OLD_LIBS+=usr/lib/libsmb.so.2 OLD_LIBS+=usr/lib/libstdc++.so.5 OLD_LIBS+=usr/lib/libtacplus.so.2 OLD_LIBS+=usr/lib/libthr.so.2 OLD_LIBS+=usr/lib/libthread_db.so.2 OLD_LIBS+=usr/lib/libugidfw.so.2 OLD_LIBS+=usr/lib/libusbhid.so.2 OLD_LIBS+=usr/lib/libvgl.so.4 OLD_LIBS+=usr/lib/libwrap.so.4 OLD_LIBS+=usr/lib/libypclnt.so.2 OLD_LIBS+=usr/lib/snmp_bridge.so.3 OLD_LIBS+=usr/lib/snmp_hostres.so.3 OLD_LIBS+=usr/lib32/libatm.so.4 OLD_LIBS+=usr/lib32/libbegemot.so.2 OLD_LIBS+=usr/lib32/libbluetooth.so.2 OLD_LIBS+=usr/lib32/libbsdxml.so.2 OLD_LIBS+=usr/lib32/libbsm.so.1 OLD_LIBS+=usr/lib32/libbz2.so.2 OLD_LIBS+=usr/lib32/libcalendar.so.3 OLD_LIBS+=usr/lib32/libcam.so.3 OLD_LIBS+=usr/lib32/libcom_err.so.3 OLD_LIBS+=usr/lib32/libcrypt.so.3 OLD_LIBS+=usr/lib32/libdevinfo.so.3 OLD_LIBS+=usr/lib32/libdevstat.so.5 OLD_LIBS+=usr/lib32/libedit.so.5 OLD_LIBS+=usr/lib32/libfetch.so.4 OLD_LIBS+=usr/lib32/libform.so.3 OLD_LIBS+=usr/lib32/libformw.so.3 OLD_LIBS+=usr/lib32/libftpio.so.6 OLD_LIBS+=usr/lib32/libgeom.so.3 OLD_LIBS+=usr/lib32/libgpib.so.1 OLD_LIBS+=usr/lib32/libipsec.so.2 OLD_LIBS+=usr/lib32/libipx.so.3 OLD_LIBS+=usr/lib32/libkiconv.so.2 OLD_LIBS+=usr/lib32/libkse.so.2 OLD_LIBS+=usr/lib32/libkvm.so.3 OLD_LIBS+=usr/lib32/libm.so.4 OLD_LIBS+=usr/lib32/libmagic.so.2 OLD_LIBS+=usr/lib32/libmd.so.3 OLD_LIBS+=usr/lib32/libmemstat.so.1 OLD_LIBS+=usr/lib32/libmenu.so.3 OLD_LIBS+=usr/lib32/libmenuw.so.3 OLD_LIBS+=usr/lib32/libmilter.so.3 OLD_LIBS+=usr/lib32/libmp.so.5 OLD_LIBS+=usr/lib32/libncp.so.2 OLD_LIBS+=usr/lib32/libnetgraph.so.2 OLD_LIBS+=usr/lib32/libngatm.so.2 OLD_LIBS+=usr/lib32/libopie.so.4 OLD_LIBS+=usr/lib32/libpanel.so.3 OLD_LIBS+=usr/lib32/libpanelw.so.3 OLD_LIBS+=usr/lib32/libpcap.so.4 OLD_LIBS+=usr/lib32/libpmc.so.3 OLD_LIBS+=usr/lib32/libpthread.so.2 OLD_LIBS+=usr/lib32/libradius.so.2 OLD_LIBS+=usr/lib32/librpcsvc.so.3 OLD_LIBS+=usr/lib32/libsbuf.so.3 OLD_LIBS+=usr/lib32/libsdp.so.2 OLD_LIBS+=usr/lib32/libsmb.so.2 OLD_LIBS+=usr/lib32/libstdc++.so.5 OLD_LIBS+=usr/lib32/libtacplus.so.2 OLD_LIBS+=usr/lib32/libthr.so.2 OLD_LIBS+=usr/lib32/libthread_db.so.2 OLD_LIBS+=usr/lib32/libufs.so.3 OLD_LIBS+=usr/lib32/libugidfw.so.2 OLD_LIBS+=usr/lib32/libusbhid.so.2 OLD_LIBS+=usr/lib32/libutil.so.6 OLD_LIBS+=usr/lib32/libvgl.so.4 OLD_LIBS+=usr/lib32/libwrap.so.4 OLD_LIBS+=usr/lib32/libypclnt.so.2 OLD_LIBS+=usr/lib32/libz.so.3 # 20070519: GCC 4.2 OLD_FILES+=usr/bin/f77 OLD_FILES+=usr/bin/protoize OLD_FILES+=usr/include/g2c.h OLD_FILES+=usr/libexec/f771 OLD_FILES+=usr/share/info/g77.info.gz OLD_FILES+=usr/share/man/man1/f77.1.gz OLD_FILES+=usr/include/c++/3.4/algorithm OLD_FILES+=usr/include/c++/3.4/backward/algo.h OLD_FILES+=usr/include/c++/3.4/backward/algobase.h OLD_FILES+=usr/include/c++/3.4/backward/alloc.h OLD_FILES+=usr/include/c++/3.4/backward/backward_warning.h OLD_FILES+=usr/include/c++/3.4/backward/bvector.h OLD_FILES+=usr/include/c++/3.4/backward/complex.h OLD_FILES+=usr/include/c++/3.4/backward/defalloc.h OLD_FILES+=usr/include/c++/3.4/backward/deque.h OLD_FILES+=usr/include/c++/3.4/backward/fstream.h OLD_FILES+=usr/include/c++/3.4/backward/function.h OLD_FILES+=usr/include/c++/3.4/backward/hash_map.h OLD_FILES+=usr/include/c++/3.4/backward/hash_set.h OLD_FILES+=usr/include/c++/3.4/backward/hashtable.h OLD_FILES+=usr/include/c++/3.4/backward/heap.h OLD_FILES+=usr/include/c++/3.4/backward/iomanip.h OLD_FILES+=usr/include/c++/3.4/backward/iostream.h OLD_FILES+=usr/include/c++/3.4/backward/istream.h OLD_FILES+=usr/include/c++/3.4/backward/iterator.h OLD_FILES+=usr/include/c++/3.4/backward/list.h OLD_FILES+=usr/include/c++/3.4/backward/map.h OLD_FILES+=usr/include/c++/3.4/backward/multimap.h OLD_FILES+=usr/include/c++/3.4/backward/multiset.h OLD_FILES+=usr/include/c++/3.4/backward/new.h OLD_FILES+=usr/include/c++/3.4/backward/ostream.h OLD_FILES+=usr/include/c++/3.4/backward/pair.h OLD_FILES+=usr/include/c++/3.4/backward/queue.h OLD_FILES+=usr/include/c++/3.4/backward/rope.h OLD_FILES+=usr/include/c++/3.4/backward/set.h OLD_FILES+=usr/include/c++/3.4/backward/slist.h OLD_FILES+=usr/include/c++/3.4/backward/stack.h OLD_FILES+=usr/include/c++/3.4/backward/stream.h OLD_FILES+=usr/include/c++/3.4/backward/streambuf.h OLD_FILES+=usr/include/c++/3.4/backward/strstream OLD_FILES+=usr/include/c++/3.4/backward/tempbuf.h OLD_FILES+=usr/include/c++/3.4/backward/tree.h OLD_FILES+=usr/include/c++/3.4/backward/vector.h OLD_FILES+=usr/include/c++/3.4/bits/allocator.h OLD_FILES+=usr/include/c++/3.4/bits/atomic_word.h OLD_FILES+=usr/include/c++/3.4/bits/atomicity.h OLD_FILES+=usr/include/c++/3.4/bits/basic_file.h OLD_FILES+=usr/include/c++/3.4/bits/basic_ios.h OLD_FILES+=usr/include/c++/3.4/bits/basic_ios.tcc OLD_FILES+=usr/include/c++/3.4/bits/basic_string.h OLD_FILES+=usr/include/c++/3.4/bits/basic_string.tcc OLD_FILES+=usr/include/c++/3.4/bits/boost_concept_check.h OLD_FILES+=usr/include/c++/3.4/bits/c++allocator.h OLD_FILES+=usr/include/c++/3.4/bits/c++config.h OLD_FILES+=usr/include/c++/3.4/bits/c++io.h OLD_FILES+=usr/include/c++/3.4/bits/c++locale.h OLD_FILES+=usr/include/c++/3.4/bits/c++locale_internal.h OLD_FILES+=usr/include/c++/3.4/bits/char_traits.h OLD_FILES+=usr/include/c++/3.4/bits/cmath.tcc OLD_FILES+=usr/include/c++/3.4/bits/codecvt.h OLD_FILES+=usr/include/c++/3.4/bits/codecvt_specializations.h OLD_FILES+=usr/include/c++/3.4/bits/concept_check.h OLD_FILES+=usr/include/c++/3.4/bits/concurrence.h OLD_FILES+=usr/include/c++/3.4/bits/cpp_type_traits.h OLD_FILES+=usr/include/c++/3.4/bits/ctype_base.h OLD_FILES+=usr/include/c++/3.4/bits/ctype_inline.h OLD_FILES+=usr/include/c++/3.4/bits/ctype_noninline.h OLD_FILES+=usr/include/c++/3.4/bits/deque.tcc OLD_FILES+=usr/include/c++/3.4/bits/fstream.tcc OLD_FILES+=usr/include/c++/3.4/bits/functexcept.h OLD_FILES+=usr/include/c++/3.4/bits/gslice.h OLD_FILES+=usr/include/c++/3.4/bits/gslice_array.h OLD_FILES+=usr/include/c++/3.4/bits/gthr-default.h OLD_FILES+=usr/include/c++/3.4/bits/gthr-posix.h OLD_FILES+=usr/include/c++/3.4/bits/gthr-single.h OLD_FILES+=usr/include/c++/3.4/bits/gthr.h OLD_FILES+=usr/include/c++/3.4/bits/indirect_array.h OLD_FILES+=usr/include/c++/3.4/bits/ios_base.h OLD_FILES+=usr/include/c++/3.4/bits/istream.tcc OLD_FILES+=usr/include/c++/3.4/bits/list.tcc OLD_FILES+=usr/include/c++/3.4/bits/locale_classes.h OLD_FILES+=usr/include/c++/3.4/bits/locale_facets.h OLD_FILES+=usr/include/c++/3.4/bits/locale_facets.tcc OLD_FILES+=usr/include/c++/3.4/bits/localefwd.h OLD_FILES+=usr/include/c++/3.4/bits/mask_array.h OLD_FILES+=usr/include/c++/3.4/bits/messages_members.h OLD_FILES+=usr/include/c++/3.4/bits/os_defines.h OLD_FILES+=usr/include/c++/3.4/bits/ostream.tcc OLD_FILES+=usr/include/c++/3.4/bits/postypes.h OLD_FILES+=usr/include/c++/3.4/bits/slice_array.h OLD_FILES+=usr/include/c++/3.4/bits/sstream.tcc OLD_FILES+=usr/include/c++/3.4/bits/stl_algo.h OLD_FILES+=usr/include/c++/3.4/bits/stl_algobase.h OLD_FILES+=usr/include/c++/3.4/bits/stl_bvector.h OLD_FILES+=usr/include/c++/3.4/bits/stl_construct.h OLD_FILES+=usr/include/c++/3.4/bits/stl_deque.h OLD_FILES+=usr/include/c++/3.4/bits/stl_function.h OLD_FILES+=usr/include/c++/3.4/bits/stl_heap.h OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator.h OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator_base_funcs.h OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator_base_types.h OLD_FILES+=usr/include/c++/3.4/bits/stl_list.h OLD_FILES+=usr/include/c++/3.4/bits/stl_map.h OLD_FILES+=usr/include/c++/3.4/bits/stl_multimap.h OLD_FILES+=usr/include/c++/3.4/bits/stl_multiset.h OLD_FILES+=usr/include/c++/3.4/bits/stl_numeric.h OLD_FILES+=usr/include/c++/3.4/bits/stl_pair.h OLD_FILES+=usr/include/c++/3.4/bits/stl_queue.h OLD_FILES+=usr/include/c++/3.4/bits/stl_raw_storage_iter.h OLD_FILES+=usr/include/c++/3.4/bits/stl_relops.h OLD_FILES+=usr/include/c++/3.4/bits/stl_set.h OLD_FILES+=usr/include/c++/3.4/bits/stl_stack.h OLD_FILES+=usr/include/c++/3.4/bits/stl_tempbuf.h OLD_FILES+=usr/include/c++/3.4/bits/stl_threads.h OLD_FILES+=usr/include/c++/3.4/bits/stl_tree.h OLD_FILES+=usr/include/c++/3.4/bits/stl_uninitialized.h OLD_FILES+=usr/include/c++/3.4/bits/stl_vector.h OLD_FILES+=usr/include/c++/3.4/bits/stream_iterator.h OLD_FILES+=usr/include/c++/3.4/bits/streambuf.tcc OLD_FILES+=usr/include/c++/3.4/bits/streambuf_iterator.h OLD_FILES+=usr/include/c++/3.4/bits/stringfwd.h OLD_FILES+=usr/include/c++/3.4/bits/time_members.h OLD_FILES+=usr/include/c++/3.4/bits/type_traits.h OLD_FILES+=usr/include/c++/3.4/bits/valarray_after.h OLD_FILES+=usr/include/c++/3.4/bits/valarray_array.h OLD_FILES+=usr/include/c++/3.4/bits/valarray_array.tcc OLD_FILES+=usr/include/c++/3.4/bits/valarray_before.h OLD_FILES+=usr/include/c++/3.4/bits/vector.tcc OLD_FILES+=usr/include/c++/3.4/bitset OLD_FILES+=usr/include/c++/3.4/cassert OLD_FILES+=usr/include/c++/3.4/cctype OLD_FILES+=usr/include/c++/3.4/cerrno OLD_FILES+=usr/include/c++/3.4/cfloat OLD_FILES+=usr/include/c++/3.4/ciso646 OLD_FILES+=usr/include/c++/3.4/climits OLD_FILES+=usr/include/c++/3.4/clocale OLD_FILES+=usr/include/c++/3.4/cmath OLD_FILES+=usr/include/c++/3.4/complex OLD_FILES+=usr/include/c++/3.4/csetjmp OLD_FILES+=usr/include/c++/3.4/csignal OLD_FILES+=usr/include/c++/3.4/cstdarg OLD_FILES+=usr/include/c++/3.4/cstddef OLD_FILES+=usr/include/c++/3.4/cstdio OLD_FILES+=usr/include/c++/3.4/cstdlib OLD_FILES+=usr/include/c++/3.4/cstring OLD_FILES+=usr/include/c++/3.4/ctime OLD_FILES+=usr/include/c++/3.4/cwchar OLD_FILES+=usr/include/c++/3.4/cwctype OLD_FILES+=usr/include/c++/3.4/cxxabi.h OLD_FILES+=usr/include/c++/3.4/debug/bitset OLD_FILES+=usr/include/c++/3.4/debug/debug.h OLD_FILES+=usr/include/c++/3.4/debug/deque OLD_FILES+=usr/include/c++/3.4/debug/formatter.h OLD_FILES+=usr/include/c++/3.4/debug/hash_map OLD_FILES+=usr/include/c++/3.4/debug/hash_map.h OLD_FILES+=usr/include/c++/3.4/debug/hash_multimap.h OLD_FILES+=usr/include/c++/3.4/debug/hash_multiset.h OLD_FILES+=usr/include/c++/3.4/debug/hash_set OLD_FILES+=usr/include/c++/3.4/debug/hash_set.h OLD_FILES+=usr/include/c++/3.4/debug/list OLD_FILES+=usr/include/c++/3.4/debug/map OLD_FILES+=usr/include/c++/3.4/debug/map.h OLD_FILES+=usr/include/c++/3.4/debug/multimap.h OLD_FILES+=usr/include/c++/3.4/debug/multiset.h OLD_FILES+=usr/include/c++/3.4/debug/safe_base.h OLD_FILES+=usr/include/c++/3.4/debug/safe_iterator.h OLD_FILES+=usr/include/c++/3.4/debug/safe_iterator.tcc OLD_FILES+=usr/include/c++/3.4/debug/safe_sequence.h OLD_FILES+=usr/include/c++/3.4/debug/set OLD_FILES+=usr/include/c++/3.4/debug/set.h OLD_FILES+=usr/include/c++/3.4/debug/string OLD_FILES+=usr/include/c++/3.4/debug/vector OLD_FILES+=usr/include/c++/3.4/deque OLD_FILES+=usr/include/c++/3.4/exception OLD_FILES+=usr/include/c++/3.4/exception_defines.h OLD_FILES+=usr/include/c++/3.4/ext/algorithm OLD_FILES+=usr/include/c++/3.4/ext/bitmap_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/debug_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/enc_filebuf.h OLD_FILES+=usr/include/c++/3.4/ext/functional OLD_FILES+=usr/include/c++/3.4/ext/hash_fun.h OLD_FILES+=usr/include/c++/3.4/ext/hash_map OLD_FILES+=usr/include/c++/3.4/ext/hash_set OLD_FILES+=usr/include/c++/3.4/ext/hashtable.h OLD_FILES+=usr/include/c++/3.4/ext/iterator OLD_FILES+=usr/include/c++/3.4/ext/malloc_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/memory OLD_FILES+=usr/include/c++/3.4/ext/mt_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/new_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/numeric OLD_FILES+=usr/include/c++/3.4/ext/pod_char_traits.h OLD_FILES+=usr/include/c++/3.4/ext/pool_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/rb_tree OLD_FILES+=usr/include/c++/3.4/ext/rope OLD_FILES+=usr/include/c++/3.4/ext/ropeimpl.h OLD_FILES+=usr/include/c++/3.4/ext/slist OLD_FILES+=usr/include/c++/3.4/ext/stdio_filebuf.h OLD_FILES+=usr/include/c++/3.4/ext/stdio_sync_filebuf.h OLD_FILES+=usr/include/c++/3.4/fstream OLD_FILES+=usr/include/c++/3.4/functional OLD_FILES+=usr/include/c++/3.4/iomanip OLD_FILES+=usr/include/c++/3.4/ios OLD_FILES+=usr/include/c++/3.4/iosfwd OLD_FILES+=usr/include/c++/3.4/iostream OLD_FILES+=usr/include/c++/3.4/istream OLD_FILES+=usr/include/c++/3.4/iterator OLD_FILES+=usr/include/c++/3.4/limits OLD_FILES+=usr/include/c++/3.4/list OLD_FILES+=usr/include/c++/3.4/locale OLD_FILES+=usr/include/c++/3.4/map OLD_FILES+=usr/include/c++/3.4/memory OLD_FILES+=usr/include/c++/3.4/new OLD_FILES+=usr/include/c++/3.4/numeric OLD_FILES+=usr/include/c++/3.4/ostream OLD_FILES+=usr/include/c++/3.4/queue OLD_FILES+=usr/include/c++/3.4/set OLD_FILES+=usr/include/c++/3.4/sstream OLD_FILES+=usr/include/c++/3.4/stack OLD_FILES+=usr/include/c++/3.4/stdexcept OLD_FILES+=usr/include/c++/3.4/streambuf OLD_FILES+=usr/include/c++/3.4/string OLD_FILES+=usr/include/c++/3.4/typeinfo OLD_FILES+=usr/include/c++/3.4/utility OLD_FILES+=usr/include/c++/3.4/valarray OLD_FILES+=usr/include/c++/3.4/vector OLD_DIRS+=usr/include/c++/3.4/backward OLD_DIRS+=usr/include/c++/3.4/bits OLD_DIRS+=usr/include/c++/3.4/debug OLD_DIRS+=usr/include/c++/3.4/ext OLD_DIRS+=usr/include/c++/3.4 # 20070510: zpool/zfs moved to /sbin OLD_FILES+=usr/sbin/zfs OLD_FILES+=usr/sbin/zpool # 20070423: rc.bluetooth (examples) removed OLD_FILES+=usr/share/examples/netgraph/bluetooth/rc.bluetooth OLD_DIRS+=usr/share/examples/netgraph/bluetooth # 20070421: worm.4 removed OLD_FILES+=usr/share/man/man4/worm.4.gz # 20070417: trunk(4) renamed to lagg(4) OLD_FILES+=usr/include/net/if_trunk.h # 20070409: uuidgen moved to /bin/ OLD_FILES+=usr/bin/uuidgen # 20070328: bzip2 1.0.4 OLD_FILES+=usr/share/info/bzip2.info.gz # 20070303: libarchive 2.0 OLD_LIBS+=usr/lib/libarchive.so.3 OLD_LIBS+=usr/lib32/libarchive.so.3 # 20070301: remove addr2ascii and ascii2addr OLD_FILES+=usr/share/man/man3/addr2ascii.3.gz OLD_FILES+=usr/share/man/man3/ascii2addr.3.gz # 20070225: vm_page_unmanage() removed OLD_FILES+=usr/share/man/man9/vm_page_unmanage.9.gz # 20070216: VFS_VPTOFH(9) -> VOP_VPTOFH(9) OLD_FILES+=usr/share/man/man9/VFS_VPTOFH.9.gz # 20070212: kame.4 removed OLD_FILES+=usr/share/man/man4/kame.4.gz # 20070201: remove libmytinfo link OLD_FILES+=usr/lib/libmytinfo.a OLD_FILES+=usr/lib/libmytinfo.so OLD_FILES+=usr/lib/libmytinfo_p.a OLD_FILES+=usr/lib/libmytinfow.a OLD_FILES+=usr/lib/libmytinfow.so OLD_FILES+=usr/lib/libmytinfow_p.a OLD_FILES+=usr/lib32/libmytinfo.a OLD_FILES+=usr/lib32/libmytinfo.so OLD_FILES+=usr/lib32/libmytinfo_p.a OLD_FILES+=usr/lib32/libmytinfow.a OLD_FILES+=usr/lib32/libmytinfow.so OLD_FILES+=usr/lib32/libmytinfow_p.a # 20070128: remove vnconfig OLD_FILES+=usr/sbin/vnconfig # 20070127: remove bpf_compat.h OLD_FILES+=usr/include/net/bpf_compat.h # 20070125: objformat bites the dust OLD_FILES+=usr/bin/objformat OLD_FILES+=usr/share/man/man1/objformat.1.gz OLD_FILES+=usr/include/objformat.h OLD_FILES+=usr/share/man/man3/getobjformat.3.gz # 20061201: remove symlink to *.so.4 libalias modules OLD_FILES+=usr/lib/libalias_cuseeme.so OLD_FILES+=usr/lib/libalias_dummy.so OLD_FILES+=usr/lib/libalias_ftp.so OLD_FILES+=usr/lib/libalias_irc.so OLD_FILES+=usr/lib/libalias_nbt.so OLD_FILES+=usr/lib/libalias_pptp.so OLD_FILES+=usr/lib/libalias_skinny.so OLD_FILES+=usr/lib/libalias_smedia.so # 20061201: remove old *.so.4 libalias modules OLD_FILES+=lib/libalias_cuseeme.so.4 OLD_FILES+=lib/libalias_dummy.so.4 OLD_FILES+=lib/libalias_ftp.so.4 OLD_FILES+=lib/libalias_irc.so.4 OLD_FILES+=lib/libalias_nbt.so.4 OLD_FILES+=lib/libalias_pptp.so.4 OLD_FILES+=lib/libalias_skinny.so.4 OLD_FILES+=lib/libalias_smedia.so.4 # 20061126: remove old man page OLD_FILES+=usr/share/man/man3/archive_read_set_bytes_per_block.3.gz # 20061125: remove old man page OLD_FILES+=usr/share/man/man9/devsw.9.gz # 20061122: remove obsolete mount programs OLD_FILES+=sbin/mount_devfs OLD_FILES+=sbin/mount_ext2fs OLD_FILES+=sbin/mount_fdescfs OLD_FILES+=sbin/mount_linprocfs OLD_FILES+=sbin/mount_procfs OLD_FILES+=sbin/mount_std OLD_FILES+=rescue/mount_devfs OLD_FILES+=rescue/mount_ext2fs OLD_FILES+=rescue/mount_fdescfs OLD_FILES+=rescue/mount_linprocfs OLD_FILES+=rescue/mount_procfs OLD_FILES+=rescue/mount_std OLD_FILES+=usr/share/man/man8/mount_devfs.8.gz OLD_FILES+=usr/share/man/man8/mount_ext2fs.8.gz OLD_FILES+=usr/share/man/man8/mount_fdescfs.8.gz OLD_FILES+=usr/share/man/man8/mount_linprocfs.8.gz OLD_FILES+=usr/share/man/man8/mount_procfs.8.gz OLD_FILES+=usr/share/man/man8/mount_std.8.gz # 20061116: uhidev.4 removed OLD_FILES+=usr/share/man/man4/uhidev.4.gz # 20061106: archive_write_prepare.3 removed OLD_FILES+=usr/share/man/man3/archive_write_prepare.3.gz # 20061018: pccardc removed OLD_FILES+=usr/sbin/pccardc usr/share/man/man8/pccardc.8.gz # 20060930: demangle.h from contrib/libstdc++/include/ext/ OLD_FILES+=usr/include/c++/3.4/ext/demangle.h # 20060929: mrouted removed OLD_FILES+=usr/sbin/map-mbone OLD_FILES+=usr/sbin/mrinfo OLD_FILES+=usr/sbin/mrouted OLD_FILES+=usr/sbin/mtrace OLD_FILES+=usr/share/man/man8/map-mbone.8.gz OLD_FILES+=usr/share/man/man8/mrinfo.8.gz OLD_FILES+=usr/share/man/man8/mrouted.8.gz OLD_FILES+=usr/share/man/man8/mtrace.8.gz # 20060924: tcpslice removed OLD_FILES+=usr/sbin/tcpslice OLD_FILES+=usr/share/man/man1/tcpslice.1.gz # 20060829: kvmdb cleanup script removed OLD_FILES+=etc/periodic/weekly/120.clean-kvmdb # 20060822: ramdisk{,-own} have been replaced by mdconfig{,2} OLD_FILES+=etc/rc.d/ramdisk OLD_FILES+=etc/rc.d/ramdisk-own # 20060729: OpenSSL 0.9.7e -> 0.9.8b upgrade OLD_FILES+=usr/include/openssl/eng_int.h OLD_FILES+=usr/include/openssl/hw_4758_cca_err.h OLD_FILES+=usr/include/openssl/hw_aep_err.h OLD_FILES+=usr/include/openssl/hw_atalla_err.h OLD_FILES+=usr/include/openssl/hw_cswift_err.h OLD_FILES+=usr/include/openssl/hw_ncipher_err.h OLD_FILES+=usr/include/openssl/hw_nuron_err.h OLD_FILES+=usr/include/openssl/hw_sureware_err.h OLD_FILES+=usr/include/openssl/hw_ubsec_err.h # 20060713: mount_linsysfs(8) never existed in 7.x OLD_FILES+=sbin/mount_linsysfs OLD_FILES+=usr/share/man/man8/mount_linsysfs.8.gz # 20060704: KAME compat file net_osdep.h removed OLD_FILES+=usr/include/net/net_osdep.h # 20060605: man page links removed by OpenBSM 1.0 alpha 6 import OLD_FILES+=usr/share/man/man3/au_to_socket.3.gz OLD_FILES+=usr/share/man/man3/au_to_socket_ex_128.3.gz OLD_FILES+=usr/share/man/man3/au_to_socket_ex_32.3.gz # 20060517: pcvt removed OLD_FILES+=usr/share/pcvt/README.FIRST OLD_FILES+=usr/share/pcvt/Etc/xmodmap-german OLD_FILES+=usr/share/pcvt/Etc/pcvt.sh OLD_FILES+=usr/share/pcvt/Etc/pcvt.el OLD_FILES+=usr/share/pcvt/Etc/Terminfo OLD_FILES+=usr/share/pcvt/Etc/Termcap OLD_DIRS+=usr/share/pcvt/Etc OLD_FILES+=usr/share/pcvt/Doc/NotesAndHints OLD_FILES+=usr/share/pcvt/Doc/Keyboard.VT OLD_FILES+=usr/share/pcvt/Doc/Keyboard.HP OLD_FILES+=usr/share/pcvt/Doc/EscapeSequences OLD_FILES+=usr/share/pcvt/Doc/Charsets OLD_FILES+=usr/share/pcvt/Doc/CharGen OLD_FILES+=usr/share/pcvt/Doc/Bibliography OLD_FILES+=usr/share/pcvt/Doc/Acknowledgements OLD_DIRS+=usr/share/pcvt/Doc OLD_DIRS+=usr/share/pcvt OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.816 OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.814 OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.810 OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.808 OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.816 OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.814 OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.810 OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.808 OLD_DIRS+=usr/share/misc/pcvtfonts OLD_FILES+=usr/share/misc/keycap.pcvt OLD_FILES+=usr/share/man/man8/ispcvt.8.gz OLD_FILES+=usr/share/man/man5/keycap.5.gz OLD_FILES+=usr/share/man/man4/pcvt.4.gz OLD_FILES+=usr/share/man/man3/kgetstr.3.gz OLD_FILES+=usr/share/man/man3/kgetnum.3.gz OLD_FILES+=usr/share/man/man3/kgetflag.3.gz OLD_FILES+=usr/share/man/man3/kgetent.3.gz OLD_FILES+=usr/share/man/man3/keycap.3.gz OLD_FILES+=usr/share/man/man1/vt220keys.1.gz OLD_FILES+=usr/share/man/man1/scon.1.gz OLD_FILES+=usr/share/man/man1/loadfont.1.gz OLD_FILES+=usr/share/man/man1/kcon.1.gz OLD_FILES+=usr/share/man/man1/fontedit.1.gz OLD_FILES+=usr/share/man/man1/cursor.1.gz OLD_FILES+=usr/sbin/vt220keys OLD_FILES+=usr/sbin/scon OLD_FILES+=usr/sbin/loadfont OLD_FILES+=usr/sbin/kcon OLD_FILES+=usr/sbin/ispcvt OLD_FILES+=usr/sbin/fontedit OLD_FILES+=usr/sbin/cursor OLD_FILES+=usr/lib/libkeycap_p.a OLD_FILES+=usr/lib/libkeycap.a OLD_FILES+=usr/include/machine/pcvt_ioctl.h # 20060514: lnc(4) replaced by le(4) OLD_FILES+=usr/share/man/man4/i386/lnc.4.gz # 20060512: remove ip6fw OLD_FILES+=etc/periodic/security/600.ip6fwdenied OLD_FILES+=etc/periodic/security/650.ip6fwlimit OLD_FILES+=sbin/ip6fw OLD_FILES+=usr/include/netinet6/ip6_fw.h OLD_FILES+=usr/share/man/man8/ip6fw.8.gz # 20060424: sab(4) removed OLD_FILES+=usr/share/man/man4/sab.4.gz # 20060328: remove redundant rc.d script OLD_FILES+=etc/rc.d/ike # 20060127: revert libdisk to static-only OLD_FILES+=usr/lib/libdisk.so # 20060115: sys/pccard includes cleanup OLD_FILES+=usr/include/pccard/driver.h OLD_FILES+=usr/include/pccard/i82365.h OLD_FILES+=usr/include/pccard/meciareg.h OLD_FILES+=usr/include/pccard/pccard_nbk.h OLD_FILES+=usr/include/pccard/pcic_pci.h OLD_FILES+=usr/include/pccard/pcicvar.h OLD_FILES+=usr/include/pccard/slot.h # 20051215: rescue/nextboot.sh renamed to rescue/nextboot OLD_FILES+=rescue/nextboot.sh # 20051214: usbd(8) removed OLD_FILES+=etc/rc.d/usbd OLD_FILES+=etc/usbd.conf OLD_FILES+=usr/sbin/usbd OLD_FILES+=usr/share/man/man8/usbd.8.gz # 20051029: rc.d/ppp-user renamed to rc.d/ppp for convenience OLD_FILES+=etc/rc.d/ppp-user # 20051012: setkey(8) moved to /sbin/ OLD_FILES+=usr/sbin/setkey # 20050930: pccardd(8) removed OLD_FILES+=usr/sbin/pccardd OLD_FILES+=usr/share/man/man5/pccard.conf.5.gz OLD_FILES+=usr/share/man/man8/pccardd.8.gz # 20050927: bridge(4) replaced by if_bridge(4) OLD_FILES+=usr/include/net/bridge.h # 20050831: not implemented OLD_FILES+=usr/share/man/man3/getino.3.gz OLD_FILES+=usr/share/man/man3/putino.3.gz # 20050825: T/TCP retired several months ago OLD_FILES+=usr/share/man/man4/ttcp.4.gz # 20050805 tn3270 retired long ago OLD_FILES+=usr/share/misc/map3270 # 20050801: too old to be interesting here OLD_FILES+=usr/share/doc/papers/px.ps.gz # 20050721: moved to ports OLD_FILES+=usr/sbin/vttest OLD_FILES+=usr/share/man/man1/vttest.1.gz # 20050617: wpa man pages moved to section 8 OLD_FILES+=usr/share/man/man1/hostapd.1.gz OLD_FILES+=usr/share/man/man1/hostapd_cli.1.gz OLD_FILES+=usr/share/man/man1/wpa_cli.1.gz OLD_FILES+=usr/share/man/man1/wpa_supplicant.1.gz # 20050610: rexecd (insecure by design) OLD_FILES+=etc/pam.d/rexecd OLD_FILES+=usr/share/man/man8/rexecd.8.gz OLD_FILES+=usr/libexec/rexecd # 20050606: OpenBSD dhclient replaces ISC one OLD_FILES+=bin/omshell OLD_FILES+=sbin/omshell OLD_FILES+=usr/share/man/man1/omshell.1.gz OLD_FILES+=usr/share/man/man5/dhcp-eval.5.gz # 200504XX: ipf tools moved from /usr to / OLD_FILES+=rescue/ipfs OLD_FILES+=rescue/ipfstat OLD_FILES+=rescue/ipmon OLD_FILES+=rescue/ipnat OLD_FILES+=usr/sbin/ipftest OLD_FILES+=usr/sbin/ipresend OLD_FILES+=usr/sbin/ipsend OLD_FILES+=usr/sbin/iptest OLD_FILES+=usr/share/man/man1/ipnat.1.gz OLD_FILES+=usr/share/man/man1/ipsend.1.gz OLD_FILES+=usr/share/man/man1/iptest.1.gz OLD_FILES+=usr/share/man/man5/ipsend.5.gz # 200503XX: bsdtar takes over gtar OLD_FILES+=usr/bin/gtar OLD_FILES+=usr/share/man/man1/gtar.1.gz # 200503XX OLD_FILES+=usr/share/man/man3/exp10.3.gz OLD_FILES+=usr/share/man/man3/exp10f.3.gz OLD_FILES+=usr/share/man/man3/fpsetsticky.3.gz # 20050324: updated release infrastructure OLD_FILES+=usr/share/man/man5/drivers.conf.5.gz # 20050317: removed from BIND 9 distribution OLD_FILES+=usr/share/doc/bind9/KNOWN_DEFECTS # 2005XXXX: OLD_FILES+=sbin/mount_autofs OLD_FILES+=usr/lib/libautofs.a OLD_FILES+=usr/lib/libautofs.so OLD_FILES+=usr/share/man/man8/mount_autofs.8.gz # 20050203: Merged with fortunes OLD_FILES+=usr/share/games/fortune/fortunes2 OLD_FILES+=usr/share/games/fortune/fortunes2.dat # 200501XX: OLD_FILES+=usr/libexec/getNAME # 200411XX: gvinum replaces vinum OLD_FILES+=bin/vinum OLD_FILES+=rescue/vinum OLD_FILES+=sbin/vinum OLD_FILES+=usr/share/man/man8/vinum.8.gz # 200411XX: libxpg4 removal OLD_FILES+=usr/lib/libxpg4.a OLD_FILES+=usr/lib/libxpg4.so OLD_FILES+=usr/lib/libxpg4_p.a # 20041109: replaced by em(4) OLD_FILES+=usr/share/man/man4/gx.4.gz OLD_FILES+=usr/share/man/man4/if_gx.4.gz # 20041017: rune interface removed OLD_FILES+=usr/include/rune.h OLD_FILES+=usr/share/man/man3/fgetrune.3.gz OLD_FILES+=usr/share/man/man3/fputrune.3.gz OLD_FILES+=usr/share/man/man3/fungetrune.3.gz OLD_FILES+=usr/share/man/man3/mbrrune.3.gz OLD_FILES+=usr/share/man/man3/mbrune.3.gz OLD_FILES+=usr/share/man/man3/rune.3.gz OLD_FILES+=usr/share/man/man3/setinvalidrune.3.gz OLD_FILES+=usr/share/man/man3/sgetrune.3.gz OLD_FILES+=usr/share/man/man3/sputrune.3.gz # 20040925: bind9 import OLD_FILES+=usr/bin/dnskeygen OLD_FILES+=usr/bin/dnsquery OLD_FILES+=usr/lib/libisc.a OLD_FILES+=usr/lib/libisc.so OLD_FILES+=usr/lib/libisc_p.a OLD_FILES+=usr/libexec/named-xfer OLD_FILES+=usr/sbin/named.restart OLD_FILES+=usr/sbin/ndc OLD_FILES+=usr/sbin/nslookup OLD_FILES+=usr/sbin/nsupdate OLD_FILES+=usr/share/doc/bind/html/acl.html OLD_FILES+=usr/share/doc/bind/html/address_list.html OLD_FILES+=usr/share/doc/bind/html/comments.html OLD_FILES+=usr/share/doc/bind/html/config.html OLD_FILES+=usr/share/doc/bind/html/controls.html OLD_FILES+=usr/share/doc/bind/html/docdef.html OLD_FILES+=usr/share/doc/bind/html/example.html OLD_FILES+=usr/share/doc/bind/html/include.html OLD_FILES+=usr/share/doc/bind/html/index.html OLD_FILES+=usr/share/doc/bind/html/key.html OLD_FILES+=usr/share/doc/bind/html/logging.html OLD_FILES+=usr/share/doc/bind/html/master.html OLD_FILES+=usr/share/doc/bind/html/options.html OLD_FILES+=usr/share/doc/bind/html/server.html OLD_FILES+=usr/share/doc/bind/html/trusted-keys.html OLD_FILES+=usr/share/doc/bind/html/zone.html OLD_FILES+=usr/share/doc/bind/misc/DynamicUpdate OLD_FILES+=usr/share/doc/bind/misc/FAQ.1of2 OLD_FILES+=usr/share/doc/bind/misc/FAQ.2of2 OLD_FILES+=usr/share/doc/bind/misc/rfc2317-notes.txt OLD_FILES+=usr/share/doc/bind/misc/style.txt OLD_FILES+=usr/share/man/man1/dnskeygen.1.gz OLD_FILES+=usr/share/man/man1/dnsquery.1.gz OLD_FILES+=usr/share/man/man8/named-bootconf.8.gz OLD_FILES+=usr/share/man/man8/named-xfer.8.gz OLD_FILES+=usr/share/man/man8/named.restart.8.gz OLD_FILES+=usr/share/man/man8/ndc.8.gz OLD_FILES+=usr/share/man/man8/nslookup.8.gz # 200409XX OLD_FILES+=usr/share/man/man3/ENSURE.3.gz OLD_FILES+=usr/share/man/man3/ENSURE_ERR.3.gz OLD_FILES+=usr/share/man/man3/INSIST.3.gz OLD_FILES+=usr/share/man/man3/INSIST_ERR.3.gz OLD_FILES+=usr/share/man/man3/INVARIANT.3.gz OLD_FILES+=usr/share/man/man3/INVARIANT_ERR.3.gz OLD_FILES+=usr/share/man/man3/REQUIRE.3.gz OLD_FILES+=usr/share/man/man3/REQUIRE_ERR.3.gz OLD_FILES+=usr/share/man/man3/assertion_type_to_text.3.gz OLD_FILES+=usr/share/man/man3/assertions.3.gz OLD_FILES+=usr/share/man/man3/bitncmp.3.gz OLD_FILES+=usr/share/man/man3/evAddTime.3.gz OLD_FILES+=usr/share/man/man3/evCancelConn.3.gz OLD_FILES+=usr/share/man/man3/evCancelRW.3.gz OLD_FILES+=usr/share/man/man3/evClearIdleTimer.3.gz OLD_FILES+=usr/share/man/man3/evClearTimer.3.gz OLD_FILES+=usr/share/man/man3/evCmpTime.3.gz OLD_FILES+=usr/share/man/man3/evConnFunc.3.gz OLD_FILES+=usr/share/man/man3/evConnect.3.gz OLD_FILES+=usr/share/man/man3/evConsIovec.3.gz OLD_FILES+=usr/share/man/man3/evConsTime.3.gz OLD_FILES+=usr/share/man/man3/evCreate.3.gz OLD_FILES+=usr/share/man/man3/evDefer.3.gz OLD_FILES+=usr/share/man/man3/evDeselectFD.3.gz OLD_FILES+=usr/share/man/man3/evDestroy.3.gz OLD_FILES+=usr/share/man/man3/evDispatch.3.gz OLD_FILES+=usr/share/man/man3/evDo.3.gz OLD_FILES+=usr/share/man/man3/evDrop.3.gz OLD_FILES+=usr/share/man/man3/evFileFunc.3.gz OLD_FILES+=usr/share/man/man3/evGetNext.3.gz OLD_FILES+=usr/share/man/man3/evHold.3.gz OLD_FILES+=usr/share/man/man3/evInitID.3.gz OLD_FILES+=usr/share/man/man3/evLastEventTime.3.gz OLD_FILES+=usr/share/man/man3/evListen.3.gz OLD_FILES+=usr/share/man/man3/evMainLoop.3.gz OLD_FILES+=usr/share/man/man3/evNowTime.3.gz OLD_FILES+=usr/share/man/man3/evPrintf.3.gz OLD_FILES+=usr/share/man/man3/evRead.3.gz OLD_FILES+=usr/share/man/man3/evResetTimer.3.gz OLD_FILES+=usr/share/man/man3/evSelectFD.3.gz OLD_FILES+=usr/share/man/man3/evSetDebug.3.gz OLD_FILES+=usr/share/man/man3/evSetIdleTimer.3.gz OLD_FILES+=usr/share/man/man3/evSetTimer.3.gz OLD_FILES+=usr/share/man/man3/evStreamFunc.3.gz OLD_FILES+=usr/share/man/man3/evSubTime.3.gz OLD_FILES+=usr/share/man/man3/evTestID.3.gz OLD_FILES+=usr/share/man/man3/evTimeRW.3.gz OLD_FILES+=usr/share/man/man3/evTimeSpec.3.gz OLD_FILES+=usr/share/man/man3/evTimeVal.3.gz OLD_FILES+=usr/share/man/man3/evTimerFunc.3.gz OLD_FILES+=usr/share/man/man3/evTouchIdleTimer.3.gz OLD_FILES+=usr/share/man/man3/evTryAccept.3.gz OLD_FILES+=usr/share/man/man3/evUnhold.3.gz OLD_FILES+=usr/share/man/man3/evUntimeRW.3.gz OLD_FILES+=usr/share/man/man3/evUnwait.3.gz OLD_FILES+=usr/share/man/man3/evWaitFor.3.gz OLD_FILES+=usr/share/man/man3/evWaitFunc.3.gz OLD_FILES+=usr/share/man/man3/evWrite.3.gz OLD_FILES+=usr/share/man/man3/eventlib.3.gz OLD_FILES+=usr/share/man/man3/heap.3.gz OLD_FILES+=usr/share/man/man3/heap_decreased.3.gz OLD_FILES+=usr/share/man/man3/heap_delete.3.gz OLD_FILES+=usr/share/man/man3/heap_element.3.gz OLD_FILES+=usr/share/man/man3/heap_for_each.3.gz OLD_FILES+=usr/share/man/man3/heap_free.3.gz OLD_FILES+=usr/share/man/man3/heap_increased.3.gz OLD_FILES+=usr/share/man/man3/heap_insert.3.gz OLD_FILES+=usr/share/man/man3/heap_new.3.gz OLD_FILES+=usr/share/man/man3/log_add_channel.3.gz OLD_FILES+=usr/share/man/man3/log_category_is_active.3.gz OLD_FILES+=usr/share/man/man3/log_close_stream.3.gz OLD_FILES+=usr/share/man/man3/log_dec_references.3.gz OLD_FILES+=usr/share/man/man3/log_free_channel.3.gz OLD_FILES+=usr/share/man/man3/log_free_context.3.gz OLD_FILES+=usr/share/man/man3/log_get_filename.3.gz OLD_FILES+=usr/share/man/man3/log_get_stream.3.gz OLD_FILES+=usr/share/man/man3/log_inc_references.3.gz OLD_FILES+=usr/share/man/man3/log_new_context.3.gz OLD_FILES+=usr/share/man/man3/log_new_file_channel.3.gz OLD_FILES+=usr/share/man/man3/log_new_null_channel.3.gz OLD_FILES+=usr/share/man/man3/log_new_syslog_channel.3.gz OLD_FILES+=usr/share/man/man3/log_open_stream.3.gz OLD_FILES+=usr/share/man/man3/log_option.3.gz OLD_FILES+=usr/share/man/man3/log_remove_channel.3.gz OLD_FILES+=usr/share/man/man3/log_set_file_owner.3.gz OLD_FILES+=usr/share/man/man3/log_vwrite.3.gz OLD_FILES+=usr/share/man/man3/log_write.3.gz OLD_FILES+=usr/share/man/man3/logging.3.gz OLD_FILES+=usr/share/man/man3/memcluster.3.gz OLD_FILES+=usr/share/man/man3/memget.3.gz OLD_FILES+=usr/share/man/man3/memput.3.gz OLD_FILES+=usr/share/man/man3/memstats.3.gz OLD_FILES+=usr/share/man/man3/set_assertion_failure_callback.3. OLD_FILES+=usr/share/man/man3/sigwait.3.gz OLD_FILES+=usr/share/man/man3/tree_add.3.gz OLD_FILES+=usr/share/man/man3/tree_delete.3.gz OLD_FILES+=usr/share/man/man3/tree_init.3.gz OLD_FILES+=usr/share/man/man3/tree_mung.3.gz OLD_FILES+=usr/share/man/man3/tree_srch.3.gz OLD_FILES+=usr/share/man/man3/tree_trav.3.gz # 2004XXYY: OS internal libs, no ports use them, no need to use OLD_LIBS OLD_FILES+=lib/geom/geom_concat.so.1 OLD_FILES+=lib/geom/geom_label.so.1 OLD_FILES+=lib/geom/geom_nop.so.1 OLD_FILES+=lib/geom/geom_stripe.so.1 # 20040728: GCC 3.4.2 OLD_DIRS+=usr/include/c++/3.3 OLD_FILES+=usr/include/c++/3.3/FlexLexer.h OLD_FILES+=usr/include/c++/3.3/algorithm OLD_FILES+=usr/include/c++/3.3/backward/algo.h OLD_FILES+=usr/include/c++/3.3/backward/algobase.h OLD_FILES+=usr/include/c++/3.3/backward/alloc.h OLD_FILES+=usr/include/c++/3.3/backward/backward_warning.h OLD_FILES+=usr/include/c++/3.3/backward/bvector.h OLD_FILES+=usr/include/c++/3.3/backward/complex.h OLD_FILES+=usr/include/c++/3.3/backward/defalloc.h OLD_FILES+=usr/include/c++/3.3/backward/deque.h OLD_FILES+=usr/include/c++/3.3/backward/fstream.h OLD_FILES+=usr/include/c++/3.3/backward/function.h OLD_FILES+=usr/include/c++/3.3/backward/hash_map.h OLD_FILES+=usr/include/c++/3.3/backward/hash_set.h OLD_FILES+=usr/include/c++/3.3/backward/hashtable.h OLD_FILES+=usr/include/c++/3.3/backward/heap.h OLD_FILES+=usr/include/c++/3.3/backward/iomanip.h OLD_FILES+=usr/include/c++/3.3/backward/iostream.h OLD_FILES+=usr/include/c++/3.3/backward/istream.h OLD_FILES+=usr/include/c++/3.3/backward/iterator.h OLD_FILES+=usr/include/c++/3.3/backward/list.h OLD_FILES+=usr/include/c++/3.3/backward/map.h OLD_FILES+=usr/include/c++/3.3/backward/multimap.h OLD_FILES+=usr/include/c++/3.3/backward/multiset.h OLD_FILES+=usr/include/c++/3.3/backward/new.h OLD_FILES+=usr/include/c++/3.3/backward/ostream.h OLD_FILES+=usr/include/c++/3.3/backward/pair.h OLD_FILES+=usr/include/c++/3.3/backward/queue.h OLD_FILES+=usr/include/c++/3.3/backward/rope.h OLD_FILES+=usr/include/c++/3.3/backward/set.h OLD_FILES+=usr/include/c++/3.3/backward/slist.h OLD_FILES+=usr/include/c++/3.3/backward/stack.h OLD_FILES+=usr/include/c++/3.3/backward/stream.h OLD_FILES+=usr/include/c++/3.3/backward/streambuf.h OLD_FILES+=usr/include/c++/3.3/backward/strstream OLD_FILES+=usr/include/c++/3.3/backward/strstream.h OLD_FILES+=usr/include/c++/3.3/backward/tempbuf.h OLD_FILES+=usr/include/c++/3.3/backward/tree.h OLD_FILES+=usr/include/c++/3.3/backward/vector.h OLD_DIRS+=usr/include/c++/3.3/backward OLD_FILES+=usr/include/c++/3.3/bits/atomicity.h OLD_FILES+=usr/include/c++/3.3/bits/basic_file.h OLD_FILES+=usr/include/c++/3.3/bits/basic_ios.h OLD_FILES+=usr/include/c++/3.3/bits/basic_ios.tcc OLD_FILES+=usr/include/c++/3.3/bits/basic_string.h OLD_FILES+=usr/include/c++/3.3/bits/basic_string.tcc OLD_FILES+=usr/include/c++/3.3/bits/boost_concept_check.h OLD_FILES+=usr/include/c++/3.3/bits/c++config.h OLD_FILES+=usr/include/c++/3.3/bits/c++io.h OLD_FILES+=usr/include/c++/3.3/bits/c++locale.h OLD_FILES+=usr/include/c++/3.3/bits/c++locale_internal.h OLD_FILES+=usr/include/c++/3.3/bits/char_traits.h OLD_FILES+=usr/include/c++/3.3/bits/cmath.tcc OLD_FILES+=usr/include/c++/3.3/bits/codecvt.h OLD_FILES+=usr/include/c++/3.3/bits/codecvt_specializations.h OLD_FILES+=usr/include/c++/3.3/bits/concept_check.h OLD_FILES+=usr/include/c++/3.3/bits/cpp_type_traits.h OLD_FILES+=usr/include/c++/3.3/bits/ctype_base.h OLD_FILES+=usr/include/c++/3.3/bits/ctype_inline.h OLD_FILES+=usr/include/c++/3.3/bits/ctype_noninline.h OLD_FILES+=usr/include/c++/3.3/bits/deque.tcc OLD_FILES+=usr/include/c++/3.3/bits/fpos.h OLD_FILES+=usr/include/c++/3.3/bits/fstream.tcc OLD_FILES+=usr/include/c++/3.3/bits/functexcept.h OLD_FILES+=usr/include/c++/3.3/bits/generic_shadow.h OLD_FILES+=usr/include/c++/3.3/bits/gslice.h OLD_FILES+=usr/include/c++/3.3/bits/gslice_array.h OLD_FILES+=usr/include/c++/3.3/bits/gthr-default.h OLD_FILES+=usr/include/c++/3.3/bits/gthr-posix.h OLD_FILES+=usr/include/c++/3.3/bits/gthr-single.h OLD_FILES+=usr/include/c++/3.3/bits/gthr.h OLD_FILES+=usr/include/c++/3.3/bits/indirect_array.h OLD_FILES+=usr/include/c++/3.3/bits/ios_base.h OLD_FILES+=usr/include/c++/3.3/bits/istream.tcc OLD_FILES+=usr/include/c++/3.3/bits/list.tcc OLD_FILES+=usr/include/c++/3.3/bits/locale_classes.h OLD_FILES+=usr/include/c++/3.3/bits/locale_facets.h OLD_FILES+=usr/include/c++/3.3/bits/locale_facets.tcc OLD_FILES+=usr/include/c++/3.3/bits/localefwd.h OLD_FILES+=usr/include/c++/3.3/bits/mask_array.h OLD_FILES+=usr/include/c++/3.3/bits/messages_members.h OLD_FILES+=usr/include/c++/3.3/bits/os_defines.h OLD_FILES+=usr/include/c++/3.3/bits/ostream.tcc OLD_FILES+=usr/include/c++/3.3/bits/pthread_allocimpl.h OLD_FILES+=usr/include/c++/3.3/bits/slice.h OLD_FILES+=usr/include/c++/3.3/bits/slice_array.h OLD_FILES+=usr/include/c++/3.3/bits/sstream.tcc OLD_FILES+=usr/include/c++/3.3/bits/stl_algo.h OLD_FILES+=usr/include/c++/3.3/bits/stl_algobase.h OLD_FILES+=usr/include/c++/3.3/bits/stl_alloc.h OLD_FILES+=usr/include/c++/3.3/bits/stl_bvector.h OLD_FILES+=usr/include/c++/3.3/bits/stl_construct.h OLD_FILES+=usr/include/c++/3.3/bits/stl_deque.h OLD_FILES+=usr/include/c++/3.3/bits/stl_function.h OLD_FILES+=usr/include/c++/3.3/bits/stl_heap.h OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator.h OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator_base_funcs.h OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator_base_types.h OLD_FILES+=usr/include/c++/3.3/bits/stl_list.h OLD_FILES+=usr/include/c++/3.3/bits/stl_map.h OLD_FILES+=usr/include/c++/3.3/bits/stl_multimap.h OLD_FILES+=usr/include/c++/3.3/bits/stl_multiset.h OLD_FILES+=usr/include/c++/3.3/bits/stl_numeric.h OLD_FILES+=usr/include/c++/3.3/bits/stl_pair.h OLD_FILES+=usr/include/c++/3.3/bits/stl_pthread_alloc.h OLD_FILES+=usr/include/c++/3.3/bits/stl_queue.h OLD_FILES+=usr/include/c++/3.3/bits/stl_raw_storage_iter.h OLD_FILES+=usr/include/c++/3.3/bits/stl_relops.h OLD_FILES+=usr/include/c++/3.3/bits/stl_set.h OLD_FILES+=usr/include/c++/3.3/bits/stl_stack.h OLD_FILES+=usr/include/c++/3.3/bits/stl_tempbuf.h OLD_FILES+=usr/include/c++/3.3/bits/stl_threads.h OLD_FILES+=usr/include/c++/3.3/bits/stl_tree.h OLD_FILES+=usr/include/c++/3.3/bits/stl_uninitialized.h OLD_FILES+=usr/include/c++/3.3/bits/stl_vector.h OLD_FILES+=usr/include/c++/3.3/bits/stream_iterator.h OLD_FILES+=usr/include/c++/3.3/bits/streambuf.tcc OLD_FILES+=usr/include/c++/3.3/bits/streambuf_iterator.h OLD_FILES+=usr/include/c++/3.3/bits/stringfwd.h OLD_FILES+=usr/include/c++/3.3/bits/time_members.h OLD_FILES+=usr/include/c++/3.3/bits/type_traits.h OLD_FILES+=usr/include/c++/3.3/bits/valarray_array.h OLD_FILES+=usr/include/c++/3.3/bits/valarray_array.tcc OLD_FILES+=usr/include/c++/3.3/bits/valarray_meta.h OLD_FILES+=usr/include/c++/3.3/bits/vector.tcc OLD_DIRS+=usr/include/c++/3.3/bits OLD_FILES+=usr/include/c++/3.3/bitset OLD_FILES+=usr/include/c++/3.3/cassert OLD_FILES+=usr/include/c++/3.3/cctype OLD_FILES+=usr/include/c++/3.3/cerrno OLD_FILES+=usr/include/c++/3.3/cfloat OLD_FILES+=usr/include/c++/3.3/ciso646 OLD_FILES+=usr/include/c++/3.3/climits OLD_FILES+=usr/include/c++/3.3/clocale OLD_FILES+=usr/include/c++/3.3/cmath OLD_FILES+=usr/include/c++/3.3/complex OLD_FILES+=usr/include/c++/3.3/csetjmp OLD_FILES+=usr/include/c++/3.3/csignal OLD_FILES+=usr/include/c++/3.3/cstdarg OLD_FILES+=usr/include/c++/3.3/cstddef OLD_FILES+=usr/include/c++/3.3/cstdio OLD_FILES+=usr/include/c++/3.3/cstdlib OLD_FILES+=usr/include/c++/3.3/cstring OLD_FILES+=usr/include/c++/3.3/ctime OLD_FILES+=usr/include/c++/3.3/cwchar OLD_FILES+=usr/include/c++/3.3/cwctype OLD_FILES+=usr/include/c++/3.3/cxxabi.h OLD_FILES+=usr/include/c++/3.3/deque OLD_FILES+=usr/include/c++/3.3/exception OLD_FILES+=usr/include/c++/3.3/exception_defines.h OLD_FILES+=usr/include/c++/3.3/ext/algorithm OLD_FILES+=usr/include/c++/3.3/ext/enc_filebuf.h OLD_FILES+=usr/include/c++/3.3/ext/functional OLD_FILES+=usr/include/c++/3.3/ext/hash_map OLD_FILES+=usr/include/c++/3.3/ext/hash_set OLD_FILES+=usr/include/c++/3.3/ext/iterator OLD_FILES+=usr/include/c++/3.3/ext/memory OLD_FILES+=usr/include/c++/3.3/ext/numeric OLD_FILES+=usr/include/c++/3.3/ext/rb_tree OLD_FILES+=usr/include/c++/3.3/ext/rope OLD_FILES+=usr/include/c++/3.3/ext/ropeimpl.h OLD_FILES+=usr/include/c++/3.3/ext/slist OLD_FILES+=usr/include/c++/3.3/ext/stdio_filebuf.h OLD_FILES+=usr/include/c++/3.3/ext/stl_hash_fun.h OLD_FILES+=usr/include/c++/3.3/ext/stl_hashtable.h OLD_FILES+=usr/include/c++/3.3/ext/stl_rope.h OLD_DIRS+=usr/include/c++/3.3/ext OLD_FILES+=usr/include/c++/3.3/fstream OLD_FILES+=usr/include/c++/3.3/functional OLD_FILES+=usr/include/c++/3.3/iomanip OLD_FILES+=usr/include/c++/3.3/ios OLD_FILES+=usr/include/c++/3.3/iosfwd OLD_FILES+=usr/include/c++/3.3/iostream OLD_FILES+=usr/include/c++/3.3/istream OLD_FILES+=usr/include/c++/3.3/iterator OLD_FILES+=usr/include/c++/3.3/limits OLD_FILES+=usr/include/c++/3.3/list OLD_FILES+=usr/include/c++/3.3/locale OLD_FILES+=usr/include/c++/3.3/map OLD_FILES+=usr/include/c++/3.3/memory OLD_FILES+=usr/include/c++/3.3/new OLD_FILES+=usr/include/c++/3.3/numeric OLD_FILES+=usr/include/c++/3.3/ostream OLD_FILES+=usr/include/c++/3.3/queue OLD_FILES+=usr/include/c++/3.3/set OLD_FILES+=usr/include/c++/3.3/sstream OLD_FILES+=usr/include/c++/3.3/stack OLD_FILES+=usr/include/c++/3.3/stdexcept OLD_FILES+=usr/include/c++/3.3/streambuf OLD_FILES+=usr/include/c++/3.3/string OLD_FILES+=usr/include/c++/3.3/typeinfo OLD_FILES+=usr/include/c++/3.3/utility OLD_FILES+=usr/include/c++/3.3/valarray OLD_FILES+=usr/include/c++/3.3/vector # 20040713: fla(4) removed. OLD_FILES+=usr/share/man/man4/fla.4.gz # 200407XX OLD_FILES+=usr/sbin/kernbb OLD_FILES+=usr/sbin/ntp-genkeys OLD_FILES+=usr/sbin/ntptimeset OLD_FILES+=usr/share/man/man8/kernbb.8.gz OLD_FILES+=usr/share/man/man8/ntp-genkeys.8.gz # 20040627: usbdevs.h and usbdevs_data.h removal OLD_FILES+=usr/include/dev/usb/usbdevs.h OLD_FILES+=usr/include/dev/usb/usbdevs_data.h # 200406XX OLD_FILES+=usr/bin/gasp OLD_FILES+=usr/bin/gdbreplay OLD_FILES+=usr/share/man/man1/gasp.1.gz OLD_FILES+=sbin/mountd OLD_FILES+=sbin/mount_fdesc OLD_FILES+=sbin/mount_umap OLD_FILES+=sbin/mount_union OLD_FILES+=sbin/mount_msdos OLD_FILES+=sbin/mount_null OLD_FILES+=sbin/mount_kernfs # 200405XX: arl OLD_FILES+=usr/sbin/arlconfig OLD_FILES+=usr/share/man/man8/arlconfig.8.gz # 200403XX OLD_FILES+=bin/raidctl OLD_FILES+=sbin/raidctl OLD_FILES+=usr/bin/sasc OLD_FILES+=usr/sbin/sgsc OLD_FILES+=usr/sbin/stlload OLD_FILES+=usr/sbin/stlstats OLD_FILES+=usr/share/man/man1/sasc.1.gz OLD_FILES+=usr/share/man/man1/sgsc.1.gz OLD_FILES+=usr/share/man/man4/i386/stl.4.gz OLD_FILES+=usr/share/man/man8/raidctl.8.gz # 20040229: clean_environment() was removed after 3 days OLD_FILES+=usr/share/man/man3/clean_environment.3.gz # 20040119: installed as `isdntel' in newer systems OLD_FILES+=etc/isdn/isdntel.sh # 200XYYZZ: /lib transition clitches OLD_FILES+=lib/libalias.so OLD_FILES+=lib/libatm.so OLD_FILES+=lib/libbsdxml.so OLD_FILES+=lib/libc.so OLD_FILES+=lib/libcam.so OLD_FILES+=lib/libcrypt.so OLD_FILES+=lib/libcrypto.so OLD_FILES+=lib/libdevstat.so OLD_FILES+=lib/libedit.so OLD_FILES+=lib/libgeom.so OLD_FILES+=lib/libipsec.so OLD_FILES+=lib/libipx.so OLD_FILES+=lib/libkvm.so OLD_FILES+=lib/libm.so OLD_FILES+=lib/libmd.so OLD_FILES+=lib/libncurses.so OLD_FILES+=lib/libreadline.so OLD_FILES+=lib/libsbuf.so OLD_FILES+=lib/libufs.so OLD_FILES+=lib/libz.so # 200312XX OLD_FILES+=bin/cxconfig OLD_FILES+=sbin/cxconfig OLD_FILES+=usr/share/man/man8/cxconfig.8.gz # 20031016: MULTI_DRIVER_MODULE macro removed OLD_FILES+=usr/share/man/man9/MULTI_DRIVER_MODULE.9.gz # 200309XX OLD_FILES+=usr/bin/symorder OLD_FILES+=usr/share/man/man1/symorder.1.gz # 200308XX OLD_FILES+=usr/sbin/amldb OLD_FILES+=usr/share/man/man8/amldb.8.gz # 200307XX OLD_FILES+=sbin/mount_nwfs OLD_FILES+=sbin/mount_portalfs OLD_FILES+=sbin/mount_smbfs # 200306XX OLD_FILES+=usr/sbin/dev_mkdb OLD_FILES+=usr/share/man/man8/dev_mkdb.8.gz # 200304XX OLD_FILES+=usr/lib/libcipher.a OLD_FILES+=usr/lib/libcipher.so OLD_FILES+=usr/lib/libcipher_p.a OLD_FILES+=usr/lib/libgmp.a OLD_FILES+=usr/lib/libgmp.so OLD_FILES+=usr/lib/libgmp_p.a OLD_FILES+=usr/lib/libperl.a OLD_FILES+=usr/lib/libperl.so OLD_FILES+=usr/lib/libperl_p.a OLD_FILES+=usr/lib/libposix1e.a OLD_FILES+=usr/lib/libposix1e.so OLD_FILES+=usr/lib/libposix1e_p.a OLD_FILES+=usr/lib/libskey.a OLD_FILES+=usr/lib/libskey.so OLD_FILES+=usr/lib/libskey_p.a OLD_FILES+=usr/libexec/tradcpp0 OLD_FILES+=usr/libexec/cpp0 # 200304XX: removal of xten OLD_FILES+=usr/libexec/xtend OLD_FILES+=usr/sbin/xten OLD_FILES+=usr/share/man/man1/xten.1.gz OLD_FILES+=usr/share/man/man8/xtend.8.gz # 200303XX OLD_FILES+=usr/lib/libacl.so OLD_FILES+=usr/lib/libdescrypt.so OLD_FILES+=usr/lib/libf2c.so OLD_FILES+=usr/lib/libg++.so OLD_FILES+=usr/lib/libkdb.so OLD_FILES+=usr/lib/librsaINTL.so OLD_FILES+=usr/lib/libscrypt.so OLD_FILES+=usr/lib/libss.so # 200302XX OLD_FILES+=usr/lib/libacl.a OLD_FILES+=usr/lib/libacl_p.a OLD_FILES+=usr/lib/libkadm.a OLD_FILES+=usr/lib/libkadm.so OLD_FILES+=usr/lib/libkadm_p.a OLD_FILES+=usr/lib/libkafs.a OLD_FILES+=usr/lib/libkafs.so OLD_FILES+=usr/lib/libkafs_p.a OLD_FILES+=usr/lib/libkdb.a OLD_FILES+=usr/lib/libkdb_p.a OLD_FILES+=usr/lib/libkrb.a OLD_FILES+=usr/lib/libkrb.so OLD_FILES+=usr/lib/libkrb_p.a OLD_FILES+=usr/share/man/man3/SSL_CIPHER_get_name.3.gz OLD_FILES+=usr/share/man/man3/SSL_COMP_add_compression_method.3 OLD_FILES+=usr/share/man/man3/SSL_CTX_add_extra_chain_cert.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_add_session.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_ctrl.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_flush_sessions.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_free.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_get_verify_mode.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_load_verify_locations.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_new.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_number.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_set_cache_size.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_set_get_cb.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_sessions.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cert_store.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cert_verify_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cipher_list.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_client_CA_list.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_client_cert_cb.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_default_passwd_cb.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_generate_session_id.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_info_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_max_cert_list.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_mode.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_msg_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_options.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_quiet_shutdown.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_session_cache_mode.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_session_id_context.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_ssl_version.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_timeout.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_tmp_dh_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_tmp_rsa_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_verify.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_use_certificate.3.gz OLD_FILES+=usr/share/man/man3/SSL_SESSION_free.3.gz OLD_FILES+=usr/share/man/man3/SSL_SESSION_get_ex_new_index.3.gz OLD_FILES+=usr/share/man/man3/SSL_SESSION_get_time.3.gz OLD_FILES+=usr/share/man/man3/SSL_accept.3.gz OLD_FILES+=usr/share/man/man3/SSL_alert_type_string.3.gz OLD_FILES+=usr/share/man/man3/SSL_clear.3.gz OLD_FILES+=usr/share/man/man3/SSL_connect.3.gz OLD_FILES+=usr/share/man/man3/SSL_do_handshake.3.gz OLD_FILES+=usr/share/man/man3/SSL_free.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_SSL_CTX.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_ciphers.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_client_CA_list.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_current_cipher.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_default_timeout.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_error.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_ex_data_X509_STORE_CTX_idx.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_ex_new_index.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_fd.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_peer_cert_chain.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_peer_certificate.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_rbio.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_session.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_verify_result.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_version.3.gz OLD_FILES+=usr/share/man/man3/SSL_library_init.3.gz OLD_FILES+=usr/share/man/man3/SSL_load_client_CA_file.3.gz OLD_FILES+=usr/share/man/man3/SSL_new.3.gz OLD_FILES+=usr/share/man/man3/SSL_pending.3.gz OLD_FILES+=usr/share/man/man3/SSL_read.3.gz OLD_FILES+=usr/share/man/man3/SSL_rstate_string.3.gz OLD_FILES+=usr/share/man/man3/SSL_session_reused.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_bio.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_connect_state.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_fd.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_session.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_shutdown.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_verify_result.3.gz OLD_FILES+=usr/share/man/man3/SSL_shutdown.3.gz OLD_FILES+=usr/share/man/man3/SSL_state_string.3.gz OLD_FILES+=usr/share/man/man3/SSL_want.3.gz OLD_FILES+=usr/share/man/man3/SSL_write.3.gz OLD_FILES+=usr/share/man/man3/d2i_SSL_SESSION.3.gz # 200301XX OLD_FILES+=usr/share/man/man3/des_3cbc_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_3ecb_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_cbc_cksum.3.gz OLD_FILES+=usr/share/man/man3/des_cbc_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_cfb_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_ecb_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_enc_read.3.gz OLD_FILES+=usr/share/man/man3/des_enc_write.3.gz OLD_FILES+=usr/share/man/man3/des_is_weak_key.3.gz OLD_FILES+=usr/share/man/man3/des_key_sched.3.gz OLD_FILES+=usr/share/man/man3/des_ofb_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_pcbc_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_quad_cksum.3.gz OLD_FILES+=usr/share/man/man3/des_random_key.3.gz OLD_FILES+=usr/share/man/man3/des_read_2password.3.gz OLD_FILES+=usr/share/man/man3/des_read_password.3.gz OLD_FILES+=usr/share/man/man3/des_read_pw_string.3.gz OLD_FILES+=usr/share/man/man3/des_set_key.3.gz OLD_FILES+=usr/share/man/man3/des_set_odd_parity.3.gz OLD_FILES+=usr/share/man/man3/des_string_to_2key.3.gz OLD_FILES+=usr/share/man/man3/des_string_to_key.3.gz # 200212XX OLD_FILES+=usr/sbin/kenv OLD_FILES+=usr/bin/kenv OLD_FILES+=usr/sbin/elf2aout # 200210XX OLD_FILES+=usr/include/libusbhid.h OLD_FILES+=usr/share/man/man3/All_FreeBSD.3.gz OLD_FILES+=usr/share/man/man3/CheckRules.3.gz OLD_FILES+=usr/share/man/man3/ChunkCanBeRoot.3.gz OLD_FILES+=usr/share/man/man3/Clone_Disk.3.gz OLD_FILES+=usr/share/man/man3/Collapse_Chunk.3.gz OLD_FILES+=usr/share/man/man3/Collapse_Disk.3.gz OLD_FILES+=usr/share/man/man3/Create_Chunk.3.gz OLD_FILES+=usr/share/man/man3/Create_Chunk_DWIM.3.gz OLD_FILES+=usr/share/man/man3/Cyl_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Debug_Disk.3.gz OLD_FILES+=usr/share/man/man3/Delete_Chunk.3.gz OLD_FILES+=usr/share/man/man3/Disk_Names.3.gz OLD_FILES+=usr/share/man/man3/Free_Disk.3.gz OLD_FILES+=usr/share/man/man3/MakeDev.3.gz OLD_FILES+=usr/share/man/man3/MakeDevDisk.3.gz OLD_FILES+=usr/share/man/man3/Next_Cyl_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Next_Track_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Open_Disk.3.gz OLD_FILES+=usr/share/man/man3/Prev_Cyl_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Prev_Track_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Set_Bios_Geom.3.gz OLD_FILES+=usr/share/man/man3/Set_Boot_Blocks.3.gz OLD_FILES+=usr/share/man/man3/Set_Boot_Mgr.3.gz OLD_FILES+=usr/share/man/man3/ShowChunkFlags.3.gz OLD_FILES+=usr/share/man/man3/Track_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Write_Disk.3.gz OLD_FILES+=usr/share/man/man3/slice_type_name.3.gz # 200210XX: most games moved to ports OLD_FILES+=usr/share/man/man6/adventure.6.gz OLD_FILES+=usr/share/man/man6/arithmetic.6.gz OLD_FILES+=usr/share/man/man6/atc.6.gz OLD_FILES+=usr/share/man/man6/backgammon.6.gz OLD_FILES+=usr/share/man/man6/battlestar.6.gz OLD_FILES+=usr/share/man/man6/bs.6.gz OLD_FILES+=usr/share/man/man6/canfield.6.gz OLD_FILES+=usr/share/man/man6/cfscores.6.gz OLD_FILES+=usr/share/man/man6/cribbage.6.gz OLD_FILES+=usr/share/man/man6/fish.6.gz OLD_FILES+=usr/share/man/man6/hack.6.gz OLD_FILES+=usr/share/man/man6/hangman.6.gz OLD_FILES+=usr/share/man/man6/larn.6.gz OLD_FILES+=usr/share/man/man6/mille.6.gz OLD_FILES+=usr/share/man/man6/phantasia.6.gz OLD_FILES+=usr/share/man/man6/piano.6.gz OLD_FILES+=usr/share/man/man6/pig.6.gz OLD_FILES+=usr/share/man/man6/quiz.6.gz OLD_FILES+=usr/share/man/man6/rain.6.gz OLD_FILES+=usr/share/man/man6/robots.6.gz OLD_FILES+=usr/share/man/man6/rogue.6.gz OLD_FILES+=usr/share/man/man6/sail.6.gz OLD_FILES+=usr/share/man/man6/snake.6.gz OLD_FILES+=usr/share/man/man6/snscore.6.gz OLD_FILES+=usr/share/man/man6/trek.6.gz OLD_FILES+=usr/share/man/man6/wargames.6.gz OLD_FILES+=usr/share/man/man6/worm.6.gz OLD_FILES+=usr/share/man/man6/worms.6.gz OLD_FILES+=usr/share/man/man6/wump.6.gz # 200207XX OLD_FILES+=usr/share/man/man1aout/ar.1aout.gz OLD_FILES+=usr/share/man/man1aout/as.1aout.gz OLD_FILES+=usr/share/man/man1aout/ld.1aout.gz OLD_FILES+=usr/share/man/man1aout/nm.1aout.gz OLD_FILES+=usr/share/man/man1aout/ranlib.1aout.gz OLD_FILES+=usr/share/man/man1aout/size.1aout.gz OLD_FILES+=usr/share/man/man1aout/strings.1aout.gz OLD_FILES+=usr/share/man/man1aout/strip.1aout.gz OLD_FILES+=bin/mountd OLD_FILES+=bin/nfsd # 20020707 sbin/nfsd -> usr.sbin/nfsd OLD_FILES+=sbin/nfsd # 200206XX OLD_FILES+=usr/lib/libpam_ssh.a OLD_FILES+=usr/lib/libpam_ssh_p.a OLD_FILES+=usr/bin/help OLD_FILES+=usr/bin/sccs .if ${TARGET_ARCH} != "amd64" && ${TARGET} != "arm" && ${TARGET_ARCH} != "i386" && ${TARGET} != "powerpc" OLD_FILES+=usr/bin/gdbserver .endif OLD_FILES+=usr/bin/ssh-keysign OLD_FILES+=usr/sbin/gifconfig OLD_FILES+=usr/sbin/prefix # 200205XX OLD_FILES+=usr/bin/doscmd # 200204XX OLD_FILES+=usr/bin/a2p OLD_FILES+=usr/bin/ptx OLD_FILES+=usr/bin/pod2text OLD_FILES+=usr/bin/pod2man OLD_FILES+=usr/bin/pod2latex OLD_FILES+=usr/bin/pod2html OLD_FILES+=usr/bin/h2ph OLD_FILES+=usr/bin/dprofpp OLD_FILES+=usr/bin/c2ph OLD_FILES+=usr/bin/h2xs OLD_FILES+=usr/bin/pl2pm OLD_FILES+=usr/bin/splain OLD_FILES+=usr/bin/s2p OLD_FILES+=usr/bin/find2perl OLD_FILES+=usr/sbin/pkg_update OLD_FILES+=usr/sbin/scriptdump # 20020409 GC kget(1), userconfig is long dead. OLD_FILES+=sbin/kget OLD_FILES+=usr/share/man/man8/kget.8.gz # 200203XX OLD_FILES+=usr/lib/libss.a OLD_FILES+=usr/lib/libss_p.a OLD_FILES+=usr/lib/libtelnet.a OLD_FILES+=usr/lib/libtelnet_p.a OLD_FILES+=usr/sbin/diskpart # 200202XX OLD_FILES+=usr/bin/gprof4 # 200201XX OLD_FILES+=usr/sbin/linux # 2001XXXX OLD_FILES+=usr/bin/joy OLD_FILES+=usr/sbin/ibcs2 OLD_FILES+=usr/sbin/svr4 OLD_FILES+=usr/bin/chflags OLD_FILES+=usr/sbin/uuconv OLD_FILES+=usr/sbin/uuchk OLD_FILES+=usr/sbin/portmap OLD_FILES+=usr/sbin/pmap_set OLD_FILES+=usr/sbin/pmap_dump OLD_FILES+=usr/sbin/mcon OLD_FILES+=usr/sbin/stlstty OLD_FILES+=usr/sbin/ispppcontrol OLD_FILES+=usr/sbin/rndcontrol # 20011001: UUCP migration to ports OLD_FILES+=usr/bin/uucp OLD_FILES+=usr/bin/uulog OLD_FILES+=usr/bin/uuname OLD_FILES+=usr/bin/uupick OLD_FILES+=usr/bin/uusched OLD_FILES+=usr/bin/uustat OLD_FILES+=usr/bin/uuto OLD_FILES+=usr/bin/uux OLD_FILES+=usr/libexec/uucp/uucico OLD_FILES+=usr/libexec/uucp/uuxqt OLD_FILES+=usr/libexec/uucpd OLD_FILES+=usr/share/man/man1/uuconv.1.gz OLD_FILES+=usr/share/man/man1/uucp.1.gz OLD_FILES+=usr/share/man/man1/uulog.1.gz OLD_FILES+=usr/share/man/man1/uuname.1.gz OLD_FILES+=usr/share/man/man1/uupick.1.gz OLD_FILES+=usr/share/man/man1/uustat.1.gz OLD_FILES+=usr/share/man/man1/uuto.1.gz OLD_FILES+=usr/share/man/man1/uux.1.gz OLD_FILES+=usr/share/man/man8/uuchk.8.gz OLD_FILES+=usr/share/man/man8/uucico.8.gz OLD_FILES+=usr/share/man/man8/uucpd.8.gz OLD_FILES+=usr/share/man/man8/uusched.8.gz OLD_FILES+=usr/share/man/man8/uuxqt.8.gz # 20010523 mount_portal -> mount_portalfs OLD_FILES+=sbin/mount_portal OLD_FILES+=usr/share/man/man8/mount_portal.8.gz # 200104XX OLD_FILES+=usr/lib/libdescrypt.a OLD_FILES+=usr/lib/libscrypt.a OLD_FILES+=usr/lib/libscrypt_p.a OLD_FILES+=usr/sbin/pim6stat OLD_FILES+=usr/sbin/pim6sd OLD_FILES+=usr/sbin/pim6dd # 20010217 OLD_FILES+=usr/share/doc/bind/misc/dns-setup # 20001200 OLD_FILES+=usr/lib/libgcc_r_pic.a # 200009XX OLD_FILES+=usr/lib/libRSAglue.a OLD_FILES+=usr/lib/libRSAglue.so OLD_FILES+=usr/lib/librsaINTL.a OLD_FILES+=usr/lib/librsaUSA.a OLD_FILES+=usr/lib/librsaUSA.so # 200002XX ? OLD_FILES+=usr/lib/libf2c.a OLD_FILES+=usr/lib/libf2c_p.a OLD_FILES+=usr/lib/libg++.a OLD_FILES+=usr/lib/libg++_p.a # 20001006 OLD_FILES+=usr/bin/miniperl # 20000810 OLD_FILES+=usr/bin/sperl # 200001XX OLD_FILES+=usr/sbin/apmconf # 199911XX OLD_FILES+=usr/sbin/ipfstat OLD_FILES+=usr/sbin/ipmon OLD_FILES+=usr/sbin/ipnat OLD_FILES+=usr/sbin/bad144 OLD_FILES+=usr/sbin/wormcontrol OLD_FILES+=usr/sbin/named-bootconf OLD_FILES+=usr/sbin/kvm_mkdb OLD_FILES+=usr/sbin/keyadmin # 199909XX OLD_FILES+=usr/lib/libdesrypt_p.a OLD_FILES+=sbin/ft # 199903XX OLD_FILES+=sbin/modload OLD_FILES+=sbin/modunload OLD_FILES+=usr/sbin/natd # 199812XX OLD_FILES+=sbin/dset # 199809XX OLD_FILES+=sbin/scsi OLD_FILES+=sbin/scsiformat OLD_FILES+=usr/sbin/ncrcontrol OLD_FILES+=usr/sbin/tickadj # 199806XX OLD_FILES+=usr/sbin/mkdosfs # 199801XX OLD_FILES+=sbin/mount_lfs OLD_FILES+=sbin/newlfs OLD_FILES+=sbin/dumplfs OLD_FILES+=usr/sbin/qcamcontrol OLD_FILES+=usr/sbin/supscan # 1997XXXX OLD_FILES+=usr/sbin/sysctl OLD_FILES+=usr/sbin/ctm_scan OLD_FILES+=usr/sbin/addgroup OLD_FILES+=usr/sbin/rmgroup # 1996XXXX OLD_FILES+=sbin/rdisc OLD_FILES+=usr/sbin/cdplay OLD_FILES+=usr/sbin/supfilesrv OLD_FILES+=usr/sbin/routed OLD_FILES+=usr/sbin/lsdev OLD_FILES+=usr/sbin/yppasswdd ## unsorted # do we still support aout builds? #OLD_FILES+=usr/lib/aout/c++rt0.o #OLD_FILES+=usr/lib/aout/crt0.o #OLD_FILES+=usr/lib/aout/gcrt0.o #OLD_FILES+=usr/lib/aout/scrt0.o #OLD_FILES+=usr/lib/aout/sgcrt0.o OLD_FILES+=usr/bin/sperl5 OLD_FILES+=usr/bin/perl5.6.0 OLD_FILES+=usr/bin/sperl5.6.0 OLD_FILES+=usr/bin/perlbc OLD_FILES+=usr/bin/perl5.00503 OLD_FILES+=usr/bin/sperl5.00503 OLD_FILES+=usr/bin/perlbug OLD_FILES+=usr/bin/perlcc OLD_FILES+=usr/bin/perldoc OLD_FILES+=usr/bin/suidperl OLD_FILES+=usr/lib/pam_ftp.so OLD_FILES+=usr/libdata/perl/5.00503/CGI/Apache.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Carp.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Cookie.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Fast.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Push.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Switch.pm OLD_FILES+=usr/libdata/perl/5.00503/CPAN/FirstTime.pm OLD_FILES+=usr/libdata/perl/5.00503/CPAN/Nox.pm OLD_FILES+=usr/libdata/perl/5.00503/Class/Struct.pm OLD_FILES+=usr/libdata/perl/5.00503/Devel/SelfStubber.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Command.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Embed.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Install.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Installed.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Liblist.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_OS2.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_Unix.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_VMS.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_Win32.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MakeMaker.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Manifest.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Mkbootstrap.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Mksymlists.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Packlist.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/inst OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/testlib.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/typemap OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/xsubpp OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Mac.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/OS2.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Unix.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/VMS.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Win32.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Basename.pm OLD_FILES+=usr/libdata/perl/5.00503/File/CheckTree.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Compare.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Copy.pm OLD_FILES+=usr/libdata/perl/5.00503/File/DosGlob.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Find.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Path.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec.pm OLD_FILES+=usr/libdata/perl/5.00503/File/stat.pm OLD_FILES+=usr/libdata/perl/5.00503/Getopt/Long.pm OLD_FILES+=usr/libdata/perl/5.00503/Getopt/Std.pm OLD_FILES+=usr/libdata/perl/5.00503/I18N/Collate.pm OLD_FILES+=usr/libdata/perl/5.00503/IPC/Open2.pm OLD_FILES+=usr/libdata/perl/5.00503/IPC/Open3.pm OLD_FILES+=usr/libdata/perl/5.00503/Math/BigFloat.pm OLD_FILES+=usr/libdata/perl/5.00503/Math/BigInt.pm OLD_FILES+=usr/libdata/perl/5.00503/Math/Complex.pm OLD_FILES+=usr/libdata/perl/5.00503/Math/Trig.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/Ping.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/hostent.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/netent.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/protoent.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/servent.pm OLD_FILES+=usr/libdata/perl/5.00503/Pod/Functions.pm OLD_FILES+=usr/libdata/perl/5.00503/Pod/Html.pm OLD_FILES+=usr/libdata/perl/5.00503/Pod/Text.pm OLD_FILES+=usr/libdata/perl/5.00503/Search/Dict.pm OLD_FILES+=usr/libdata/perl/5.00503/Sys/Hostname.pm OLD_FILES+=usr/libdata/perl/5.00503/Sys/Syslog.pm OLD_FILES+=usr/libdata/perl/5.00503/Term/Cap.pm OLD_FILES+=usr/libdata/perl/5.00503/Term/Complete.pm OLD_FILES+=usr/libdata/perl/5.00503/Term/ReadLine.pm OLD_FILES+=usr/libdata/perl/5.00503/Test/Harness.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/Abbrev.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/ParseWords.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/Soundex.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/Tabs.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/Wrap.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/Array.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/Handle.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/Hash.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/RefHash.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/Scalar.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/SubstrHash.pm OLD_FILES+=usr/libdata/perl/5.00503/Time/Local.pm OLD_FILES+=usr/libdata/perl/5.00503/Time/gmtime.pm OLD_FILES+=usr/libdata/perl/5.00503/Time/localtime.pm OLD_FILES+=usr/libdata/perl/5.00503/Time/tm.pm OLD_FILES+=usr/libdata/perl/5.00503/User/grent.pm OLD_FILES+=usr/libdata/perl/5.00503/User/pwent.pm OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/GetOptions.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/FindOption.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/Configure.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/config.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/Croak.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/autosplit.ix OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Deparse.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/CC.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Debug.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Showlex.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/makeliblinks OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Bblock.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/cc_harness OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Bytecode.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Stackobj.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Xref.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Lint.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Asmdata.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Assembler.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Disassembler.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/disassemble OLD_FILES+=usr/libdata/perl/5.00503/mach/B/assemble OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Terse.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/C.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/EXTERN.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/INTERN.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/XSUB.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/XSlock.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/av.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/bytecode.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/byterun.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cc_runtime.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/config.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cop.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cv.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/dosish.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/embed.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/embedvar.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/fakethr.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/form.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/gv.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/handy.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/hv.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/intrpvar.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/iperlsys.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/keywords.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/mg.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/nostdio.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/objXSUB.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/objpp.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/op.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/opcode.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/patchlevel.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perl.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlio.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlsdio.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlsfio.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlvars.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perly.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/pp.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/pp_proto.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/proto.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regcomp.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regexp.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regnodes.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/scope.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/sv.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/thrdvar.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/thread.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/unixish.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/util.h OLD_FILES+=usr/libdata/perl/5.00503/mach/Data/Dumper.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/File.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Select.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Socket.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Handle.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Seekable.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Pipe.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/SysV.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/Msg.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/Semaphore.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/B.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/B.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/autosplit.ix OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/DB_File.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/DB_File.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/Dumper.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/Dumper.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/.exists OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_findfile.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_expandspec.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_find_symbol_anywhere.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/autosplit.ix OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/DynaLoader.a OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/extralibs.ld OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/Fcntl.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/Fcntl.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/IO.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/IO.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/SysV.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/SysV.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/NDBM_File.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/NDBM_File.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/Opcode.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/Opcode.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/assert.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/tolower.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/toupper.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/closedir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/opendir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/readdir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rewinddir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/errno.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/creat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fcntl.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgrgid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgrnam.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atan2.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/cos.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/exp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fabs.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/log.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/pow.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sin.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sqrt.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpwnam.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpwuid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/longjmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setjmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/kill.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/feof.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/siglongjmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sigsetjmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/raise.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/offsetof.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/clearerr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fclose.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fdopen.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgetc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgets.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fileno.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fopen.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fputc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fputs.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fread.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/freopen.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fscanf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fseek.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ferror.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fflush.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgetpos.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fsetpos.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ftell.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fwrite.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getchar.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/gets.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/perror.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/printf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/putc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/putchar.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/puts.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/remove.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rename.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rewind.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/scanf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sscanf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/tmpfile.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ungetc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vfprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vsprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/abs.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atexit.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atof.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atoi.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atol.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/bsearch.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/calloc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/div.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/exit.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/free.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getenv.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/labs.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ldiv.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/malloc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/qsort.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rand.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/realloc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/srand.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/system.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memchr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memcmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memcpy.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memmove.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memset.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strchr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcpy.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcspn.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strerror.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strlen.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncpy.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strpbrk.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strrchr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strspn.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strstr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strtok.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chmod.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fstat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/mkdir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/stat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/umask.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/wait.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/waitpid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/gmtime.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/localtime.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/time.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/alarm.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chdir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chown.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execl.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execle.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execlp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execv.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execve.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execvp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fork.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getcwd.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getegid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/geteuid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgroups.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getlogin.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpgrp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getppid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getuid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/isatty.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/link.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rmdir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setbuf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setgid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setuid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setvbuf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sleep.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/unlink.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/utime.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/autosplit.ix OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/POSIX.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/POSIX.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/SDBM_File.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/SDBM_File.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/Socket.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/Socket.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/attrs.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/attrs.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/re.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/re.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/sdbm/extralibs.ld OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Errno/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/Config.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/O.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/perllocal.pod OLD_FILES+=usr/libdata/perl/5.00503/mach/DB_File.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Errno.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Fcntl.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/NDBM_File.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Safe.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Opcode.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/ops.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/POSIX.pod OLD_FILES+=usr/libdata/perl/5.00503/mach/POSIX.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/SDBM_File.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Socket.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/attrs.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/re.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/_h2ph_pre.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/a.out.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_ccb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_extend.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_periph.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_queue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_sim.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt_periph.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt_sim.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/aio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/alias.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/assert.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/bitstring.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/calendar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/camlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/com_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/com_right.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ctype.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/curses.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/db.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/des.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/devstat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/dialog.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/dirent.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/disktab.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/dlfcn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/elf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/errno.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/eti.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fcntl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fetch.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/float.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/floatingpoint.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fnmatch.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/form.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fstab.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ftpio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fts.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/glob.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/gmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/gnuregex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/grp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/histedit.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ieeefp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ifaddrs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/inttypes.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/iso646.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/kvm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libatm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libdisk.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libgen.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libusb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libutil.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/limits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/link.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/linker_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/locale.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/login_cap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/malloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/FlexLexer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/PlotFile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/SFile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/_G_config.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/algo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/algobase.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/alloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/builtinbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/bvector.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/complex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/defalloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/deque.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/editbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/floatio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/fstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/function.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hash_map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hash_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hashtable.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/heap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/indstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iolibio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iomanip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/list.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostdio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostreamP.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/istream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iterator.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/libio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/libioP.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/multimap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/multiset.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/new.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/ostream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pair.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/parsestream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pfstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/procbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pthread_alloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/rope.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/ropeimpl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/slist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stdiostream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_algo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/tree.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_algobase.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_alloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_bvector.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_config.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_construct.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_deque.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_function.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_fun.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hashtable.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_heap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_iterator.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_list.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_multimap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_multiset.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_numeric.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_pair.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_queue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_raw_storage_iter.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_relops.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_rope.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_slist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_stack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_tempbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_tree.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_uninitialized.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_vector.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/streambuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/strfile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/strstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/tempbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/type_traits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/vector.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/math.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/md2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/md4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/md5.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/memory.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/menu.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/mp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/mpool.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/mqueue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ncurses.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ndbm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netdb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nl_types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nlist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objformat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/opie.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/osreldate.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/panel.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/paths.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap-int.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap-namedb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/poll.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pthread.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pthread_np.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pwd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/radlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ranlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/regex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/regexp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/resolv.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ripemd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rune.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/runetype.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sched.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/search.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/semaphore.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/setjmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sgtty.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sha.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/signal.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/skey.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stab.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stand.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stdarg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stddef.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stdio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stdlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/strhash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/string.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stringlist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/strings.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/struct.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sysexits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/syslog.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/taclib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/tar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/tcpd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/term.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/termcap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/termios.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/time.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/timers.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ttyent.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ucontext.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/unctrl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/unistd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/utime.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/utmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/values.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/varargs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vgl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vis.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/zconf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/zlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/ftp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/inet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/nameser.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/nameser_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/telnet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/tftp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/assertions.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/ctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/dst.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/eventlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/heap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/irpmarshall.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/logging.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/memcluster.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/misc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/tree.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/list.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ansi.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apm_bios.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apm_segments.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asc_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asmacros.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asnames.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/atomic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bootinfo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_at386.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_memio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pc98.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pio_ind.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cdk.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/clock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/comstats.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/console.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cpu.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cpufunc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cputypes.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cronyx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/db_machdep.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/dvcfg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/elf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/endian.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/exec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/float.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/floatingpoint.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/frame.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/globaldata.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/globals.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/gsc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_cause.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_rbch_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_tel_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_trace.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ieeefp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/if_wavelan_ieee.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/if_wl_wavelan.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/iic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/in_cksum.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_bt848.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_ctx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_fd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_meteor.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ipl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/joystick.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/limits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/lock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/md_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mouse.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mpapic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mtpr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_dma.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/npx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/param.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcaudioio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcb_ext.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcvt_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/perfmon.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/physio_proc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pmap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/proc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/profile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/psl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ptrace.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/reg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/reloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/resource.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/segments.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/setjmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/sigframe.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/signal.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smptests.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/soundcard.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/speaker.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/specialreg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/spigot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/stdarg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/sysarch.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/trap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/tss.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/uc_device.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ucontext.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ultrasound.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/varargs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/vm86.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/vmparam.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/wtio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_isppp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pci_cfgreg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/bootsect.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/bpb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/denode.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/direntry.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/fat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/msdosfsmount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpf_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpfdesc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bridge.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ethernet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/hostcache.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_arp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_atm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_dl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_gif.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_ieee80211.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_llc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_media.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_mib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_ppp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_pppvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_slvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_sppp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_stf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tapvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tun.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/slip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tunvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_vlan_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/intrq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/iso88025.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/net_osdep.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/netisr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/pfkeyv2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ppp_comp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ppp_defs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/radix.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/raw_cb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/route.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/slcompress.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/zlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_faith.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_arc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_gre.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/krpc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsdiskless.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsm_subs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsmount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsnode.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsproto.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsrtt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsrvcache.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsv2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nqnfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/rpcv2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/xdr_subs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/aarp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at_extern.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/ddp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/ddp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/endian.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/phase2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_cm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_if.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sigmgr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_stack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sys.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_vc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/kern_include.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/port.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/queue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/netgraph.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_UI.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_async.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_bpf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_bridge.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_cisco.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_echo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ether.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_frame_relay.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_hole.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_iface.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ksocket.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_lmi.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_message.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_mppc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_one2many.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_parse.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ppp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_pppoe.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_pptpgre.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_rfc1490.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_sample.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_socket.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_socketvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_tee.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_tty.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_vjc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_eiface.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_etf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_device.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_l2tp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_fec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/icmp6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/icmp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_atm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_ether.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_fddi.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/igmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/igmp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_gif.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_hostcache.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_systm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_auth.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_dummynet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_ecn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_encap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fil.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_flow.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_frag.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_icmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_mroute.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_nat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_proxy.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_state.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ipl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ipprotosw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_fsm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_seq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_timer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcpip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/udp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/udp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fw2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_gre.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ah.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ah6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/icmp6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_gif.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_ifattach.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_prefix.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_ecn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_fw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_mroute.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6protosw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipcomp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipcomp6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipsec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipsec6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/mld6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/nd6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/pim6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/pim6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/scope6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/tcp6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/udp6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp_rijndael.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/raw_ip6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_if.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_ip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_timer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/keydb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/keysock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netnatm/natm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_cfg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_conn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_file.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_lib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_ncp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_nls.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_rcfile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_rq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_sock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_subr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_user.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/nwerror.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/idp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/idp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_error.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_if.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/sp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spidp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_timer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_compr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_ihash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_inode.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_subr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_vfsops.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfsmount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_mount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_node.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_subr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/NXConstStr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/Object.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/Protocol.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/encoding.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/hash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc-api.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc-list.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/runtime.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/sarray.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/thr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/typedstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1_mac.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/bio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/blowfish.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/bn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/buffer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/cast.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/comp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/conf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/conf_api.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/crypto.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/des.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dh.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dsa.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dso.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/e_os.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/e_os2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ebcdic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/evp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hmac.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/lhash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md5.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/mdc2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/obj_mac.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/objects.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/opensslconf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/opensslv.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pem.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pem2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pkcs12.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pkcs7.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rand.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc5.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ripemd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rsa.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/safestack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/sha.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl23.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl3.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl_locl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/stack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/symhacks.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/tls1.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/tmdiff.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/txt_db.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509_vfy.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509v3.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/idea.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/aes.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1t.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/cryptlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/des_old.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/engine.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/krb5_asn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/kssl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ocsp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ossl_typ.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/aes_locl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/eng_int.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_4758_cca_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_aep_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_atalla_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_cswift_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_ncipher_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_nuron_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_sureware_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_ubsec_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui_locl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/cardinfo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/cis.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/driver.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/i82365.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pccard_nbk.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/slot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/meciareg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pcic_pci.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pcicvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/aio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/mqueue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/posix4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/sched.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/semaphore.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/dumprestore.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/routed.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/rwhod.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/talkd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/timed.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/chardefs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/history.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/keymaps.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/readline.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/rlconf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/rlstdc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/tilde.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth_des.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth_unix.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/clnt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/des.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/des_crypt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/key_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_clnt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_rmt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc_com.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc_msg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/svc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/svc_auth.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/xdr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/bootparam_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/crypt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/key_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/klm_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/mount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nfs_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_cache.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_callback.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_db.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_tags.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nislib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nlm_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rnusers.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rquota.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rstat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rwall.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/sm_inter.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/spray.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yp_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypclnt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yppasswd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypupdate_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypxfrd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_macros.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_appl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_malloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_misc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_mod_misc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_modules.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/mit-sipb-copyright.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/ss.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/ss_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/_posix.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ata.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/acct.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/acl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/agpio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/aio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/assym.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/blist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/buf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/bus.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/bus_private.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/callout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ccdvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdefs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdrio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/chio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/clist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/endian.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/conf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cons.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/consio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/copyright.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ctype.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dir.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dataacq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/link_elf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/device_port.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/devicestat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dirent.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/disk.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/disklabel.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/diskslice.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dkstat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dmap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/domain.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dvdio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf32.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf64.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf_common.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf_generic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/errno.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/event.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/eventhandler.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/eventvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/exec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/extattr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fbio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fcntl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/file.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/filedesc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/filio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/gmon.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact_aout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact_elf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/inflate.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/interrupt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/inttypes.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioccom.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioctl_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ipc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/jail.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/joystick.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kbio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kernel.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kthread.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ktrace.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/libkern.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/linker.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/linker_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/lock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/lockf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/malloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/md5.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/memrange.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mman.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/module.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/msg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/msgbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mtio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/namei.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/param.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pciio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pipe.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/poll.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/proc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/procfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/protosw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ptio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ptrace.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/queue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/random.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/reboot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/resource.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/resourcevar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/rman.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/rtprio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/select.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sem.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/shm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/signal.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/signalvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/snoop.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/socket.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/socketvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sockio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/soundcard.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/stat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syscall-hide.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syscall.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysent.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syslimits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syslog.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysproto.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/systm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/taskqueue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/termios.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/time.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timeb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timepps.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timers.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/times.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/tprintf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/tty.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttychars.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttycom.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttydefaults.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttydev.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ucontext.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ucred.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/uio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/un.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/unistd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/unpcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/user.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/utsname.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vmmeter.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vnioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vnode.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/wait.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/wormio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/xrpuio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kobj.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/link_aout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/nlist_aout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mchain.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fnv_hash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/iconv.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/md4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/pmap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/swap_pager.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_extern.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_kern.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_object.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_page.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_pageout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_pager.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_param.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_zone.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vnode_pager.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/complex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stdbool.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/langinfo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/netbios.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_conn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_dev.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_rq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_subr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_tran.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_trantcp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g2c.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/telnet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/elf-hints.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libusbhid.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/radlib_vs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readpassphrase.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/wchar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/wctype.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cast.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/castsb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cryptodev.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cryptosoft.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/deflate.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/rijndael.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/rmd160.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/skipjack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/xform.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ah.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ah_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/esp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/esp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipcomp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipcomp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipip_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipsec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipsec6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/keydb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/keysock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/xform.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/bzlib.ph OLD_FILES+=usr/libdata/perl/5.00503/pod/perl.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perl5004delta.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlapio.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlbook.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlbot.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlcall.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldata.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldebug.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldelta.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldiag.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldsc.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlembed.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq1.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq2.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq3.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq4.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq5.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq6.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq7.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlipc.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq8.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq9.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlform.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfunc.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlguts.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlhist.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perllocale.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perllol.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmod.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmodinstall.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmodlib.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlobj.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlop.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlopentut.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlpod.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlport.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlre.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlref.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlreftut.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlrun.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsec.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlstyle.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsub.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsyn.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlthrtut.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perltie.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perltoc.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perltoot.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perltrap.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlvar.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlxs.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlxstut.pod OLD_FILES+=usr/libdata/perl/5.00503/AnyDBM_File.pm OLD_FILES+=usr/libdata/perl/5.00503/AutoLoader.pm OLD_FILES+=usr/libdata/perl/5.00503/AutoSplit.pm OLD_FILES+=usr/libdata/perl/5.00503/Benchmark.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI.pm OLD_FILES+=usr/libdata/perl/5.00503/CPAN.pm OLD_FILES+=usr/libdata/perl/5.00503/Carp.pm OLD_FILES+=usr/libdata/perl/5.00503/Cwd.pm OLD_FILES+=usr/libdata/perl/5.00503/DirHandle.pm OLD_FILES+=usr/libdata/perl/5.00503/Dumpvalue.pm OLD_FILES+=usr/libdata/perl/5.00503/English.pm OLD_FILES+=usr/libdata/perl/5.00503/Env.pm OLD_FILES+=usr/libdata/perl/5.00503/Exporter.pm OLD_FILES+=usr/libdata/perl/5.00503/Fatal.pm OLD_FILES+=usr/libdata/perl/5.00503/FileCache.pm OLD_FILES+=usr/libdata/perl/5.00503/FileHandle.pm OLD_FILES+=usr/libdata/perl/5.00503/FindBin.pm OLD_FILES+=usr/libdata/perl/5.00503/SelectSaver.pm OLD_FILES+=usr/libdata/perl/5.00503/SelfLoader.pm OLD_FILES+=usr/libdata/perl/5.00503/Shell.pm OLD_FILES+=usr/libdata/perl/5.00503/Symbol.pm OLD_FILES+=usr/libdata/perl/5.00503/Test.pm OLD_FILES+=usr/libdata/perl/5.00503/abbrev.pl OLD_FILES+=usr/libdata/perl/5.00503/UNIVERSAL.pm OLD_FILES+=usr/libdata/perl/5.00503/assert.pl OLD_FILES+=usr/libdata/perl/5.00503/autouse.pm OLD_FILES+=usr/libdata/perl/5.00503/base.pm OLD_FILES+=usr/libdata/perl/5.00503/bigfloat.pl OLD_FILES+=usr/libdata/perl/5.00503/bigint.pl OLD_FILES+=usr/libdata/perl/5.00503/bigrat.pl OLD_FILES+=usr/libdata/perl/5.00503/blib.pm OLD_FILES+=usr/libdata/perl/5.00503/cacheout.pl OLD_FILES+=usr/libdata/perl/5.00503/chat2.pl OLD_FILES+=usr/libdata/perl/5.00503/complete.pl OLD_FILES+=usr/libdata/perl/5.00503/constant.pm OLD_FILES+=usr/libdata/perl/5.00503/ctime.pl OLD_FILES+=usr/libdata/perl/5.00503/diagnostics.pm OLD_FILES+=usr/libdata/perl/5.00503/dotsh.pl OLD_FILES+=usr/libdata/perl/5.00503/dumpvar.pl OLD_FILES+=usr/libdata/perl/5.00503/exceptions.pl OLD_FILES+=usr/libdata/perl/5.00503/fastcwd.pl OLD_FILES+=usr/libdata/perl/5.00503/fields.pm OLD_FILES+=usr/libdata/perl/5.00503/find.pl OLD_FILES+=usr/libdata/perl/5.00503/finddepth.pl OLD_FILES+=usr/libdata/perl/5.00503/flush.pl OLD_FILES+=usr/libdata/perl/5.00503/ftp.pl OLD_FILES+=usr/libdata/perl/5.00503/getcwd.pl OLD_FILES+=usr/libdata/perl/5.00503/getopt.pl OLD_FILES+=usr/libdata/perl/5.00503/getopts.pl OLD_FILES+=usr/libdata/perl/5.00503/hostname.pl OLD_FILES+=usr/libdata/perl/5.00503/importenv.pl OLD_FILES+=usr/libdata/perl/5.00503/integer.pm OLD_FILES+=usr/libdata/perl/5.00503/less.pm OLD_FILES+=usr/libdata/perl/5.00503/lib.pm OLD_FILES+=usr/libdata/perl/5.00503/locale.pm OLD_FILES+=usr/libdata/perl/5.00503/look.pl OLD_FILES+=usr/libdata/perl/5.00503/newgetopt.pl OLD_FILES+=usr/libdata/perl/5.00503/open2.pl OLD_FILES+=usr/libdata/perl/5.00503/open3.pl OLD_FILES+=usr/libdata/perl/5.00503/overload.pm OLD_FILES+=usr/libdata/perl/5.00503/perl5db.pl OLD_FILES+=usr/libdata/perl/5.00503/pwd.pl OLD_FILES+=usr/libdata/perl/5.00503/shellwords.pl OLD_FILES+=usr/libdata/perl/5.00503/sigtrap.pm OLD_FILES+=usr/libdata/perl/5.00503/stat.pl OLD_FILES+=usr/libdata/perl/5.00503/strict.pm OLD_FILES+=usr/libdata/perl/5.00503/subs.pm OLD_FILES+=usr/libdata/perl/5.00503/syslog.pl OLD_FILES+=usr/libdata/perl/5.00503/tainted.pl OLD_FILES+=usr/libdata/perl/5.00503/termcap.pl OLD_FILES+=usr/libdata/perl/5.00503/timelocal.pl OLD_FILES+=usr/libdata/perl/5.00503/validate.pl OLD_FILES+=usr/libdata/perl/5.00503/vars.pm OLD_FILES+=usr/libdata/perl/5.00503/re.pm OLD_FILES+=usr/libdata/perl/5.00503/Config.pm OLD_FILES+=usr/libdata/perl/5.00503/.exists OLD_FILES+=usr/libdata/perl/5.00503/DynaLoader.pm OLD_FILES+=usr/share/perl/man/man3/AnyDBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/AutoLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/AutoSplit.3.gz OLD_FILES+=usr/share/perl/man/man3/B.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Asmdata.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Assembler.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Bblock.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Bytecode.3.gz OLD_FILES+=usr/share/perl/man/man3/B::C.3.gz OLD_FILES+=usr/share/perl/man/man3/B::CC.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Debug.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Deparse.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Disassembler.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Lint.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Showlex.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Stackobj.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Terse.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Xref.3.gz OLD_FILES+=usr/share/perl/man/man3/Benchmark.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Apache.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Carp.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Cookie.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Fast.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Push.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Switch.3.gz OLD_FILES+=usr/share/perl/man/man3/CPAN.3.gz OLD_FILES+=usr/share/perl/man/man3/CPAN::FirstTime.3.gz OLD_FILES+=usr/share/perl/man/man3/CPAN::Nox.3.gz OLD_FILES+=usr/share/perl/man/man3/Carp.3.gz OLD_FILES+=usr/share/perl/man/man3/Class::Struct.3.gz OLD_FILES+=usr/share/perl/man/man3/Config.3.gz OLD_FILES+=usr/share/perl/man/man3/Cwd.3.gz OLD_FILES+=usr/share/perl/man/man3/DB_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Data::Dumper.3.gz OLD_FILES+=usr/share/perl/man/man3/Devel::SelfStubber.3.gz OLD_FILES+=usr/share/perl/man/man3/DirHandle.3.gz OLD_FILES+=usr/share/perl/man/man3/Dumpvalue.3.gz OLD_FILES+=usr/share/perl/man/man3/DynaLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/English.3.gz OLD_FILES+=usr/share/perl/man/man3/Env.3.gz OLD_FILES+=usr/share/perl/man/man3/Exporter.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Command.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Embed.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Install.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Installed.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Liblist.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_OS2.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Unix.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_VMS.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Win32.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MakeMaker.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Manifest.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Mkbootstrap.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Mksymlists.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Packlist.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::testlib.3.gz OLD_FILES+=usr/share/perl/man/man3/Fatal.3.gz OLD_FILES+=usr/share/perl/man/man3/Fcntl.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Basename.3.gz OLD_FILES+=usr/share/perl/man/man3/File::CheckTree.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Compare.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Copy.3.gz OLD_FILES+=usr/share/perl/man/man3/File::DosGlob.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Find.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Path.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::Mac.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::OS2.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::Unix.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::VMS.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::Win32.3.gz OLD_FILES+=usr/share/perl/man/man3/File::stat.3.gz OLD_FILES+=usr/share/perl/man/man3/FileCache.3.gz OLD_FILES+=usr/share/perl/man/man3/IO.3.gz OLD_FILES+=usr/share/perl/man/man3/FileHandle.3.gz OLD_FILES+=usr/share/perl/man/man3/FindBin.3.gz OLD_FILES+=usr/share/perl/man/man3/GDBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Getopt::Long.3.gz OLD_FILES+=usr/share/perl/man/man3/Getopt::Std.3.gz OLD_FILES+=usr/share/perl/man/man3/I18N::Collate.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::File.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Handle.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Pipe.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Seekable.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Select.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Socket.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::Msg.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::Open2.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::Open3.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::Semaphore.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::SysV.3.gz OLD_FILES+=usr/share/perl/man/man3/Math::BigFloat.3.gz OLD_FILES+=usr/share/perl/man/man3/Math::BigInt.3.gz OLD_FILES+=usr/share/perl/man/man3/Math::Complex.3.gz OLD_FILES+=usr/share/perl/man/man3/Math::Trig.3.gz OLD_FILES+=usr/share/perl/man/man3/NDBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::Ping.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::hostent.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::netent.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::protoent.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::servent.3.gz OLD_FILES+=usr/share/perl/man/man3/O.3.gz OLD_FILES+=usr/share/perl/man/man3/ODBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Opcode.3.gz OLD_FILES+=usr/share/perl/man/man3/POSIX.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Html.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Text.3.gz OLD_FILES+=usr/share/perl/man/man3/SDBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Safe.3.gz OLD_FILES+=usr/share/perl/man/man3/Search::Dict.3.gz OLD_FILES+=usr/share/perl/man/man3/SelectSaver.3.gz OLD_FILES+=usr/share/perl/man/man3/SelfLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/Shell.3.gz OLD_FILES+=usr/share/perl/man/man3/Socket.3.gz OLD_FILES+=usr/share/perl/man/man3/Symbol.3.gz OLD_FILES+=usr/share/perl/man/man3/re.3.gz OLD_FILES+=usr/share/perl/man/man3/Sys::Hostname.3.gz OLD_FILES+=usr/share/perl/man/man3/Sys::Syslog.3.gz OLD_FILES+=usr/share/perl/man/man3/Term::Cap.3.gz OLD_FILES+=usr/share/perl/man/man3/Term::Complete.3.gz OLD_FILES+=usr/share/perl/man/man3/Term::ReadLine.3.gz OLD_FILES+=usr/share/perl/man/man3/Test.3.gz OLD_FILES+=usr/share/perl/man/man3/Test::Harness.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::Abbrev.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::ParseWords.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::Soundex.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::Tabs.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::Wrap.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread::Queue.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread::Semaphore.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread::Signal.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread::Specific.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::Array.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::Handle.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::Hash.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::RefHash.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::Scalar.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::SubstrHash.3.gz OLD_FILES+=usr/share/perl/man/man3/Time::Local.3.gz OLD_FILES+=usr/share/perl/man/man3/Time::gmtime.3.gz OLD_FILES+=usr/share/perl/man/man3/Time::localtime.3.gz OLD_FILES+=usr/share/perl/man/man3/Time::tm.3.gz OLD_FILES+=usr/share/perl/man/man3/UNIVERSAL.3.gz OLD_FILES+=usr/share/perl/man/man3/User::grent.3.gz OLD_FILES+=usr/share/perl/man/man3/User::pwent.3.gz OLD_FILES+=usr/share/perl/man/man3/attrs.3.gz OLD_FILES+=usr/share/perl/man/man3/autouse.3.gz OLD_FILES+=usr/share/perl/man/man3/base.3.gz OLD_FILES+=usr/share/perl/man/man3/blib.3.gz OLD_FILES+=usr/share/perl/man/man3/constant.3.gz OLD_FILES+=usr/share/perl/man/man3/diagnostics.3.gz OLD_FILES+=usr/share/perl/man/man3/fields.3.gz OLD_FILES+=usr/share/perl/man/man3/integer.3.gz OLD_FILES+=usr/share/perl/man/man3/less.3.gz OLD_FILES+=usr/share/perl/man/man3/lib.3.gz OLD_FILES+=usr/share/perl/man/man3/locale.3.gz OLD_FILES+=usr/share/perl/man/man3/ops.3.gz OLD_FILES+=usr/share/perl/man/man3/overload.3.gz OLD_FILES+=usr/share/perl/man/man3/sigtrap.3.gz OLD_FILES+=usr/share/perl/man/man3/strict.3.gz OLD_FILES+=usr/share/perl/man/man3/subs.3.gz OLD_FILES+=usr/share/perl/man/man3/vars.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Stash.3.gz OLD_FILES+=usr/share/perl/man/man3/ByteLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Pretty.3.gz OLD_FILES+=usr/share/perl/man/man3/Carp::Heavy.3.gz OLD_FILES+=usr/share/perl/man/man3/DB.3.gz OLD_FILES+=usr/share/perl/man/man3/DProf::DProf.3.gz OLD_FILES+=usr/share/perl/man/man3/Exporter::Heavy.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Cygwin.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Glob.3.gz OLD_FILES+=usr/share/perl/man/man3/Glob::Glob.3.gz OLD_FILES+=usr/share/perl/man/man3/Hostname::Hostname.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Dir.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Poll.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Socket::INET.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Socket::UNIX.3.gz OLD_FILES+=usr/share/perl/man/man3/Peek::Peek.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Checker.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Find.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::InputObjects.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Man.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::ParseUtils.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Parser.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Plainer.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Select.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Text::Color.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Text::Termcap.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Usage.3.gz OLD_FILES+=usr/share/perl/man/man3/Syslog::Syslog.3.gz OLD_FILES+=usr/share/perl/man/man3/Term::ANSIColor.3.gz OLD_FILES+=usr/share/perl/man/man3/XSLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/attributes.3.gz OLD_FILES+=usr/share/perl/man/man3/bytes.3.gz OLD_FILES+=usr/share/perl/man/man3/charnames.3.gz OLD_FILES+=usr/share/perl/man/man3/filetest.3.gz OLD_FILES+=usr/share/perl/man/man3/open.3.gz OLD_FILES+=usr/share/perl/man/man3/utf8.3.gz OLD_FILES+=usr/share/perl/man/man3/warnings.3.gz OLD_FILES+=usr/share/perl/man/man3/warnings::register.3.gz OLD_FILES+=usr/share/perl/man/whatis OLD_FILES+=usr/share/man/man1/CA.pl.1.gz OLD_FILES+=usr/share/man/man1/asn1parse.1.gz OLD_FILES+=usr/share/man/man1/ca.1.gz OLD_FILES+=usr/share/man/man1/ciphers.1.gz OLD_FILES+=usr/share/man/man1/config.1.gz OLD_FILES+=usr/share/man/man1/crl.1.gz OLD_FILES+=usr/share/man/man1/crl2pkcs7.1.gz OLD_FILES+=usr/share/man/man1/dgst.1.gz OLD_FILES+=usr/share/man/man1/dhparam.1.gz OLD_FILES+=usr/share/man/man1/doscmd.1.gz OLD_FILES+=usr/share/man/man1/dsa.1.gz OLD_FILES+=usr/share/man/man1/dsaparam.1.gz OLD_FILES+=usr/share/man/man1/enc.1.gz OLD_FILES+=usr/share/man/man1/gendsa.1.gz OLD_FILES+=usr/share/man/man1/genrsa.1.gz OLD_FILES+=usr/share/man/man1/getNAME.1.gz OLD_FILES+=usr/share/man/man1/nseq.1.gz OLD_FILES+=usr/share/man/man1/ocsp.1.gz OLD_FILES+=usr/share/man/man1/openssl.1.gz OLD_FILES+=usr/share/man/man1/perl.1.gz OLD_FILES+=usr/share/man/man1/perl5004delta.1.gz OLD_FILES+=usr/share/man/man1/perlapio.1.gz OLD_FILES+=usr/share/man/man1/perlbook.1.gz OLD_FILES+=usr/share/man/man1/perlbot.1.gz OLD_FILES+=usr/share/man/man1/perlcall.1.gz OLD_FILES+=usr/share/man/man1/perldata.1.gz OLD_FILES+=usr/share/man/man1/perldebug.1.gz OLD_FILES+=usr/share/man/man1/perldelta.1.gz OLD_FILES+=usr/share/man/man1/perldiag.1.gz OLD_FILES+=usr/share/man/man1/perldsc.1.gz OLD_FILES+=usr/share/man/man1/perlembed.1.gz OLD_FILES+=usr/share/man/man1/perlfaq.1.gz OLD_FILES+=usr/share/man/man1/perlfaq1.1.gz OLD_FILES+=usr/share/man/man1/perlfaq2.1.gz OLD_FILES+=usr/share/man/man1/perlfaq3.1.gz OLD_FILES+=usr/share/man/man1/perlfaq4.1.gz OLD_FILES+=usr/share/man/man1/perlfaq5.1.gz OLD_FILES+=usr/share/man/man1/perlfaq6.1.gz OLD_FILES+=usr/share/man/man1/perlfaq7.1.gz OLD_FILES+=usr/share/man/man1/perlfaq8.1.gz OLD_FILES+=usr/share/man/man1/perlfaq9.1.gz OLD_FILES+=usr/share/man/man1/perlform.1.gz OLD_FILES+=usr/share/man/man1/perlfunc.1.gz OLD_FILES+=usr/share/man/man1/perlguts.1.gz OLD_FILES+=usr/share/man/man1/perlhist.1.gz OLD_FILES+=usr/share/man/man1/perlipc.1.gz OLD_FILES+=usr/share/man/man1/perllocale.1.gz OLD_FILES+=usr/share/man/man1/perllol.1.gz OLD_FILES+=usr/share/man/man1/perlmod.1.gz OLD_FILES+=usr/share/man/man1/perlmodinstall.1.gz OLD_FILES+=usr/share/man/man1/perlmodlib.1.gz OLD_FILES+=usr/share/man/man1/perlobj.1.gz OLD_FILES+=usr/share/man/man1/perlop.1.gz OLD_FILES+=usr/share/man/man1/perlopentut.1.gz OLD_FILES+=usr/share/man/man1/perlpod.1.gz OLD_FILES+=usr/share/man/man1/perlport.1.gz OLD_FILES+=usr/share/man/man1/perlre.1.gz OLD_FILES+=usr/share/man/man1/perlref.1.gz OLD_FILES+=usr/share/man/man1/perlreftut.1.gz OLD_FILES+=usr/share/man/man1/perlrun.1.gz OLD_FILES+=usr/share/man/man1/perlsec.1.gz OLD_FILES+=usr/share/man/man1/perlstyle.1.gz OLD_FILES+=usr/share/man/man1/perlsub.1.gz OLD_FILES+=usr/share/man/man1/perlsyn.1.gz OLD_FILES+=usr/share/man/man1/perlthrtut.1.gz OLD_FILES+=usr/share/man/man1/perltie.1.gz OLD_FILES+=usr/share/man/man1/perltoc.1.gz OLD_FILES+=usr/share/man/man1/perltoot.1.gz OLD_FILES+=usr/share/man/man1/perltrap.1.gz OLD_FILES+=usr/share/man/man1/perlvar.1.gz OLD_FILES+=usr/share/man/man1/perlxs.1.gz OLD_FILES+=usr/share/man/man1/perlxstut.1.gz OLD_FILES+=usr/share/man/man1/perlbug.1.gz OLD_FILES+=usr/share/man/man1/perlcc.1.gz OLD_FILES+=usr/share/man/man1/perldoc.1.gz OLD_FILES+=usr/share/man/man1/perl5005delta.1.gz OLD_FILES+=usr/share/man/man1/perlfork.1.gz OLD_FILES+=usr/share/man/man1/perlboot.1.gz OLD_FILES+=usr/share/man/man1/perltootc.1.gz OLD_FILES+=usr/share/man/man1/perldbmfilter.1.gz OLD_FILES+=usr/share/man/man1/perldebguts.1.gz OLD_FILES+=usr/share/man/man1/perlnumber.1.gz OLD_FILES+=usr/share/man/man1/perlcompile.1.gz OLD_FILES+=usr/share/man/man1/perltodo.1.gz OLD_FILES+=usr/share/man/man1/perlapi.1.gz OLD_FILES+=usr/share/man/man1/perlintern.1.gz OLD_FILES+=usr/share/man/man1/perlhack.1.gz OLD_FILES+=usr/share/man/man1/perlbc.1.gz OLD_FILES+=usr/share/man/man1/pkcs12.1.gz OLD_FILES+=usr/share/man/man1/pkcs7.1.gz OLD_FILES+=usr/share/man/man1/pkcs8.1.gz OLD_FILES+=usr/share/man/man1/rand.1.gz OLD_FILES+=usr/share/man/man1/req.1.gz OLD_FILES+=usr/share/man/man1/rsa.1.gz OLD_FILES+=usr/share/man/man1/rsautl.1.gz OLD_FILES+=usr/share/man/man1/s_client.1.gz OLD_FILES+=usr/share/man/man1/s_server.1.gz OLD_FILES+=usr/share/man/man1/sess_id.1.gz OLD_FILES+=usr/share/man/man1/smime.1.gz OLD_FILES+=usr/share/man/man1/speed.1.gz OLD_FILES+=usr/share/man/man1/spkac.1.gz OLD_FILES+=usr/share/man/man1/verify.1.gz OLD_FILES+=usr/share/man/man1/version.1.gz OLD_FILES+=usr/share/man/man1/x509.1.gz OLD_FILES+=usr/share/man/man3/SSL_COMP_add_compression_method.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_get_ex_new_index.3.gz OLD_FILES+=usr/share/man/man3/archive_entry_dup.3.gz OLD_FILES+=usr/share/man/man3/archive_entry_set_tartype.3.gz OLD_FILES+=usr/share/man/man3/archive_entry_tartype.3.gz OLD_FILES+=usr/share/man/man3/archive_read_data_into_file.3.gz OLD_FILES+=usr/share/man/man3/archive_read_open_tar.3.gz OLD_FILES+=usr/share/man/man3/archive_read_support_format_gnutar.3.gz OLD_FILES+=usr/share/man/man3/cipher.3.gz OLD_FILES+=usr/share/man/man3/des_cipher.3.gz OLD_FILES+=usr/share/man/man3/des_setkey.3.gz OLD_FILES+=usr/share/man/man3/encrypt.3.gz OLD_FILES+=usr/share/man/man3/endvfsent.3.gz OLD_FILES+=usr/share/man/man3/getvfsbytype.3.gz OLD_FILES+=usr/share/man/man3/getvfsent.3.gz OLD_FILES+=usr/share/man/man3/isnanf.3.gz OLD_FILES+=usr/share/man/man3/libautofs.3.gz OLD_FILES+=usr/share/man/man3/pthread_attr_setsstack.3.gz OLD_FILES+=usr/share/man/man3/pthread_getcancelstate.3.gz OLD_FILES+=usr/share/man/man3/pthread_mutexattr_getpshared.3.gz OLD_FILES+=usr/share/man/man3/pthread_mutexattr_setpshared.3.gz OLD_FILES+=usr/share/man/man3/set_assertion_failure_callback.3.gz OLD_FILES+=usr/share/man/man3/setkey.3.gz OLD_FILES+=usr/share/man/man3/setvfsent.3.gz OLD_FILES+=usr/share/man/man3/ssl.3.gz OLD_FILES+=usr/share/man/man3/vfsisloadable.3.gz OLD_FILES+=usr/share/man/man3/vfsload.3.gz OLD_FILES+=usr/share/man/man4/als4000.4.gz OLD_FILES+=usr/share/man/man4/csa.4.gz OLD_FILES+=usr/share/man/man4/emu10k1.4.gz OLD_FILES+=usr/share/man/man4/euc.4.gz OLD_FILES+=usr/share/man/man4/gusc.4.gz OLD_FILES+=usr/share/man/man4/if_fwp.4.gz OLD_FILES+=usr/share/man/man4/lomac.4.gz OLD_FILES+=usr/share/man/man4/maestro3.4.gz OLD_FILES+=usr/share/man/man4/raid.4.gz OLD_FILES+=usr/share/man/man4/sbc.4.gz OLD_FILES+=usr/share/man/man4/sd.4.gz OLD_FILES+=usr/share/man/man4/snc.4.gz OLD_FILES+=usr/share/man/man4/st.4.gz OLD_FILES+=usr/share/man/man4/uaudio.4.gz OLD_FILES+=usr/share/man/man4/utf2.4.gz OLD_FILES+=usr/share/man/man4/vinumdebug.4.gz OLD_FILES+=usr/share/man/man5/disklabel.5.gz OLD_FILES+=usr/share/man/man5/dm.conf.5.gz OLD_FILES+=usr/share/man/man5/ranlib.5.gz OLD_FILES+=usr/share/man/man5/utf2.5.gz OLD_FILES+=usr/share/man/man7/groff_mwww.7.gz OLD_FILES+=usr/share/man/man7/mmroff.7.gz OLD_FILES+=usr/share/man/man7/mwww.7.gz OLD_FILES+=usr/share/man/man7/style.perl.7.gz OLD_FILES+=usr/share/man/man8/apm.8.gz OLD_FILES+=usr/share/man/man8/apmconf.8.gz OLD_FILES+=usr/share/man/man8/apmd.8.gz OLD_FILES+=usr/share/man/man8/dm.8.gz OLD_FILES+=usr/share/man/man8/pam_ftp.8.gz OLD_FILES+=usr/share/man/man8/pam_wheel.8.gz OLD_FILES+=usr/share/man/man8/sconfig.8.gz OLD_FILES+=usr/share/man/man8/ssl.8.gz OLD_FILES+=usr/share/man/man8/wlconfig.8.gz OLD_FILES+=usr/share/man/man9/CURSIG.9.gz OLD_FILES+=usr/share/man/man9/VFS_INIT.9.gz OLD_FILES+=usr/share/man/man9/at_exit.9.gz OLD_FILES+=usr/share/man/man9/at_fork.9.gz OLD_FILES+=usr/share/man/man9/cdevsw_add.9.gz OLD_FILES+=usr/share/man/man9/cdevsw_remove.9.gz OLD_FILES+=usr/share/man/man9/cv_waitq_empty.9.gz OLD_FILES+=usr/share/man/man9/cv_waitq_remove.9.gz OLD_FILES+=usr/share/man/man9/endtsleep.9.gz OLD_FILES+=usr/share/man/man9/jumbo.9.gz OLD_FILES+=usr/share/man/man9/jumbo_freem.9.gz OLD_FILES+=usr/share/man/man9/jumbo_pg_alloc.9.gz OLD_FILES+=usr/share/man/man9/jumbo_pg_free.9.gz OLD_FILES+=usr/share/man/man9/jumbo_pg_steal.9.gz OLD_FILES+=usr/share/man/man9/jumbo_phys_to_kva.9.gz OLD_FILES+=usr/share/man/man9/jumbo_vm_init.9.gz OLD_FILES+=usr/share/man/man9/mac_biba.9.gz OLD_FILES+=usr/share/man/man9/mac_bsdextended.9.gz OLD_FILES+=usr/share/man/man9/mono_time.9.gz OLD_FILES+=usr/share/man/man9/p1003_1b.9.gz OLD_FILES+=usr/share/man/man9/pmap_prefault.9.gz OLD_FILES+=usr/share/man/man9/posix4.9.gz OLD_FILES+=usr/share/man/man9/resource_query_name.9.gz OLD_FILES+=usr/share/man/man9/resource_query_string.9.gz OLD_FILES+=usr/share/man/man9/resource_query_unit.9.gz OLD_FILES+=usr/share/man/man9/rm_at_exit.9.gz OLD_FILES+=usr/share/man/man9/rm_at_fork.9.gz OLD_FILES+=usr/share/man/man9/runtime.9.gz OLD_FILES+=usr/share/man/man9/sleepinit.9.gz OLD_FILES+=usr/share/man/man9/unsleep.9.gz OLD_FILES+=usr/share/man/ja/man1/perl.1.gz OLD_FILES+=usr/share/games/atc/Game_List OLD_FILES+=usr/share/games/atc/Killer OLD_FILES+=usr/share/games/atc/crossover OLD_FILES+=usr/share/games/atc/default OLD_FILES+=usr/share/games/atc/easy OLD_FILES+=usr/share/games/atc/game_2 OLD_FILES+=usr/share/games/larn/larnmaze OLD_FILES+=usr/share/games/larn/larnopts OLD_FILES+=usr/share/games/larn/larn.help OLD_FILES+=usr/share/games/quiz.db/africa OLD_FILES+=usr/share/games/quiz.db/america OLD_FILES+=usr/share/games/quiz.db/areas OLD_FILES+=usr/share/games/quiz.db/arith OLD_FILES+=usr/share/games/quiz.db/asia OLD_FILES+=usr/share/games/quiz.db/babies OLD_FILES+=usr/share/games/quiz.db/bard OLD_FILES+=usr/share/games/quiz.db/chinese OLD_FILES+=usr/share/games/quiz.db/collectives OLD_FILES+=usr/share/games/quiz.db/ed OLD_FILES+=usr/share/games/quiz.db/elements OLD_FILES+=usr/share/games/quiz.db/europe OLD_FILES+=usr/share/games/quiz.db/flowers OLD_FILES+=usr/share/games/quiz.db/greek OLD_FILES+=usr/share/games/quiz.db/inca OLD_FILES+=usr/share/games/quiz.db/index OLD_FILES+=usr/share/games/quiz.db/latin OLD_FILES+=usr/share/games/quiz.db/locomotive OLD_FILES+=usr/share/games/quiz.db/midearth OLD_FILES+=usr/share/games/quiz.db/morse OLD_FILES+=usr/share/games/quiz.db/murders OLD_FILES+=usr/share/games/quiz.db/poetry OLD_FILES+=usr/share/games/quiz.db/posneg OLD_FILES+=usr/share/games/quiz.db/pres OLD_FILES+=usr/share/games/quiz.db/province OLD_FILES+=usr/share/games/quiz.db/seq-easy OLD_FILES+=usr/share/games/quiz.db/seq-hard OLD_FILES+=usr/share/games/quiz.db/sexes OLD_FILES+=usr/share/games/quiz.db/sov OLD_FILES+=usr/share/games/quiz.db/spell OLD_FILES+=usr/share/games/quiz.db/state OLD_FILES+=usr/share/games/quiz.db/trek OLD_FILES+=usr/share/games/quiz.db/ucc OLD_FILES+=usr/share/games/cribbage.instr OLD_FILES+=usr/share/games/fish.instr OLD_FILES+=usr/share/games/wump.info OLD_FILES+=usr/games/hide/adventure OLD_FILES+=usr/games/hide/arithmetic OLD_FILES+=usr/games/hide/atc OLD_FILES+=usr/games/hide/backgammon OLD_FILES+=usr/games/hide/teachgammon OLD_FILES+=usr/games/hide/battlestar OLD_FILES+=usr/games/hide/bs OLD_FILES+=usr/games/hide/canfield OLD_FILES+=usr/games/hide/cribbage OLD_FILES+=usr/games/hide/fish OLD_FILES+=usr/games/hide/hack OLD_FILES+=usr/games/hide/hangman OLD_FILES+=usr/games/hide/larn OLD_FILES+=usr/games/hide/mille OLD_FILES+=usr/games/hide/phantasia OLD_FILES+=usr/games/hide/quiz OLD_FILES+=usr/games/hide/robots OLD_FILES+=usr/games/hide/rogue OLD_FILES+=usr/games/hide/sail OLD_FILES+=usr/games/hide/snake OLD_FILES+=usr/games/hide/trek OLD_FILES+=usr/games/hide/worm OLD_FILES+=usr/games/hide/wump OLD_FILES+=usr/games/adventure OLD_FILES+=usr/games/arithmetic OLD_FILES+=usr/games/atc OLD_FILES+=usr/games/backgammon OLD_FILES+=usr/games/teachgammon OLD_FILES+=usr/games/battlestar OLD_FILES+=usr/games/bs OLD_FILES+=usr/games/canfield OLD_FILES+=usr/games/cfscores OLD_FILES+=usr/games/cribbage OLD_FILES+=usr/games/dm OLD_FILES+=usr/games/fish OLD_FILES+=usr/games/hack OLD_FILES+=usr/games/hangman OLD_FILES+=usr/games/larn OLD_FILES+=usr/games/mille OLD_FILES+=usr/games/phantasia OLD_FILES+=usr/games/piano OLD_FILES+=usr/games/pig OLD_FILES+=usr/games/quiz OLD_FILES+=usr/games/rain OLD_FILES+=usr/games/robots OLD_FILES+=usr/games/rogue OLD_FILES+=usr/games/sail OLD_FILES+=usr/games/snake OLD_FILES+=usr/games/snscore OLD_FILES+=usr/games/trek OLD_FILES+=usr/games/wargames OLD_FILES+=usr/games/worm OLD_FILES+=usr/games/worms OLD_FILES+=usr/games/wump OLD_FILES+=sbin/mount_reiserfs OLD_FILES+=usr/include/cam/cam_extend.h OLD_FILES+=usr/include/dev/wi/wi_hostap.h OLD_FILES+=usr/include/disktab.h OLD_FILES+=usr/include/g++/FlexLexer.h OLD_FILES+=usr/include/g++/PlotFile.h OLD_FILES+=usr/include/g++/SFile.h OLD_FILES+=usr/include/g++/_G_config.h OLD_FILES+=usr/include/g++/algo.h OLD_FILES+=usr/include/g++/algobase.h OLD_FILES+=usr/include/g++/algorithm OLD_FILES+=usr/include/g++/alloc.h OLD_FILES+=usr/include/g++/bitset OLD_FILES+=usr/include/g++/builtinbuf.h OLD_FILES+=usr/include/g++/bvector.h OLD_FILES+=usr/include/g++/cassert OLD_FILES+=usr/include/g++/cctype OLD_FILES+=usr/include/g++/cerrno OLD_FILES+=usr/include/g++/cfloat OLD_FILES+=usr/include/g++/ciso646 OLD_FILES+=usr/include/g++/climits OLD_FILES+=usr/include/g++/clocale OLD_FILES+=usr/include/g++/cmath OLD_FILES+=usr/include/g++/complex OLD_FILES+=usr/include/g++/complex.h OLD_FILES+=usr/include/g++/csetjmp OLD_FILES+=usr/include/g++/csignal OLD_FILES+=usr/include/g++/cstdarg OLD_FILES+=usr/include/g++/cstddef OLD_FILES+=usr/include/g++/cstdio OLD_FILES+=usr/include/g++/cstdlib OLD_FILES+=usr/include/g++/cstring OLD_FILES+=usr/include/g++/ctime OLD_FILES+=usr/include/g++/cwchar OLD_FILES+=usr/include/g++/cwctype OLD_FILES+=usr/include/g++/defalloc.h OLD_FILES+=usr/include/g++/deque OLD_FILES+=usr/include/g++/deque.h OLD_FILES+=usr/include/g++/editbuf.h OLD_FILES+=usr/include/g++/exception OLD_FILES+=usr/include/g++/floatio.h OLD_FILES+=usr/include/g++/fstream OLD_FILES+=usr/include/g++/fstream.h OLD_FILES+=usr/include/g++/function.h OLD_FILES+=usr/include/g++/functional OLD_FILES+=usr/include/g++/hash_map OLD_FILES+=usr/include/g++/hash_map.h OLD_FILES+=usr/include/g++/hash_set OLD_FILES+=usr/include/g++/hash_set.h OLD_FILES+=usr/include/g++/hashtable.h OLD_FILES+=usr/include/g++/heap.h OLD_FILES+=usr/include/g++/indstream.h OLD_FILES+=usr/include/g++/iolibio.h OLD_FILES+=usr/include/g++/iomanip OLD_FILES+=usr/include/g++/iomanip.h OLD_FILES+=usr/include/g++/iosfwd OLD_FILES+=usr/include/g++/iostdio.h OLD_FILES+=usr/include/g++/iostream OLD_FILES+=usr/include/g++/iostream.h OLD_FILES+=usr/include/g++/iostreamP.h OLD_FILES+=usr/include/g++/istream.h OLD_FILES+=usr/include/g++/iterator OLD_FILES+=usr/include/g++/iterator.h OLD_FILES+=usr/include/g++/libio.h OLD_FILES+=usr/include/g++/libioP.h OLD_FILES+=usr/include/g++/list OLD_FILES+=usr/include/g++/list.h OLD_FILES+=usr/include/g++/map OLD_FILES+=usr/include/g++/map.h OLD_FILES+=usr/include/g++/memory OLD_FILES+=usr/include/g++/multimap.h OLD_FILES+=usr/include/g++/multiset.h OLD_FILES+=usr/include/g++/new OLD_FILES+=usr/include/g++/new.h OLD_FILES+=usr/include/g++/numeric OLD_FILES+=usr/include/g++/ostream.h OLD_FILES+=usr/include/g++/pair.h OLD_FILES+=usr/include/g++/parsestream.h OLD_FILES+=usr/include/g++/pfstream.h OLD_FILES+=usr/include/g++/procbuf.h OLD_FILES+=usr/include/g++/pthread_alloc OLD_FILES+=usr/include/g++/pthread_alloc.h OLD_FILES+=usr/include/g++/queue OLD_FILES+=usr/include/g++/rope OLD_FILES+=usr/include/g++/rope.h OLD_FILES+=usr/include/g++/ropeimpl.h OLD_FILES+=usr/include/g++/set OLD_FILES+=usr/include/g++/set.h OLD_FILES+=usr/include/g++/slist OLD_FILES+=usr/include/g++/slist.h OLD_FILES+=usr/include/g++/sstream OLD_FILES+=usr/include/g++/stack OLD_FILES+=usr/include/g++/stack.h OLD_FILES+=usr/include/g++/std/bastring.cc OLD_FILES+=usr/include/g++/std/bastring.h OLD_FILES+=usr/include/g++/std/complext.cc OLD_FILES+=usr/include/g++/std/complext.h OLD_FILES+=usr/include/g++/std/dcomplex.h OLD_FILES+=usr/include/g++/std/fcomplex.h OLD_FILES+=usr/include/g++/std/gslice.h OLD_FILES+=usr/include/g++/std/gslice_array.h OLD_FILES+=usr/include/g++/std/indirect_array.h OLD_FILES+=usr/include/g++/std/ldcomplex.h OLD_FILES+=usr/include/g++/std/mask_array.h OLD_FILES+=usr/include/g++/std/slice.h OLD_FILES+=usr/include/g++/std/slice_array.h OLD_FILES+=usr/include/g++/std/std_valarray.h OLD_FILES+=usr/include/g++/std/straits.h OLD_FILES+=usr/include/g++/std/valarray_array.h OLD_FILES+=usr/include/g++/std/valarray_array.tcc OLD_FILES+=usr/include/g++/std/valarray_meta.h OLD_FILES+=usr/include/g++/stdexcept OLD_FILES+=usr/include/g++/stdiostream.h OLD_FILES+=usr/include/g++/stl.h OLD_FILES+=usr/include/g++/stl_algo.h OLD_FILES+=usr/include/g++/stl_algobase.h OLD_FILES+=usr/include/g++/stl_alloc.h OLD_FILES+=usr/include/g++/stl_bvector.h OLD_FILES+=usr/include/g++/stl_config.h OLD_FILES+=usr/include/g++/stl_construct.h OLD_FILES+=usr/include/g++/stl_deque.h OLD_FILES+=usr/include/g++/stl_function.h OLD_FILES+=usr/include/g++/stl_hash_fun.h OLD_FILES+=usr/include/g++/stl_hash_map.h OLD_FILES+=usr/include/g++/stl_hash_set.h OLD_FILES+=usr/include/g++/stl_hashtable.h OLD_FILES+=usr/include/g++/stl_heap.h OLD_FILES+=usr/include/g++/stl_iterator.h OLD_FILES+=usr/include/g++/stl_list.h OLD_FILES+=usr/include/g++/stl_map.h OLD_FILES+=usr/include/g++/stl_multimap.h OLD_FILES+=usr/include/g++/stl_multiset.h OLD_FILES+=usr/include/g++/stl_numeric.h OLD_FILES+=usr/include/g++/stl_pair.h OLD_FILES+=usr/include/g++/stl_queue.h OLD_FILES+=usr/include/g++/stl_raw_storage_iter.h OLD_FILES+=usr/include/g++/stl_relops.h OLD_FILES+=usr/include/g++/stl_rope.h OLD_FILES+=usr/include/g++/stl_set.h OLD_FILES+=usr/include/g++/stl_slist.h OLD_FILES+=usr/include/g++/stl_stack.h OLD_FILES+=usr/include/g++/stl_tempbuf.h OLD_FILES+=usr/include/g++/stl_tree.h OLD_FILES+=usr/include/g++/stl_uninitialized.h OLD_FILES+=usr/include/g++/stl_vector.h OLD_FILES+=usr/include/g++/stream.h OLD_FILES+=usr/include/g++/streambuf.h OLD_FILES+=usr/include/g++/strfile.h OLD_FILES+=usr/include/g++/string OLD_FILES+=usr/include/g++/strstream OLD_FILES+=usr/include/g++/strstream.h OLD_FILES+=usr/include/g++/tempbuf.h OLD_FILES+=usr/include/g++/tree.h OLD_FILES+=usr/include/g++/type_traits.h OLD_FILES+=usr/include/g++/typeinfo OLD_FILES+=usr/include/g++/utility OLD_FILES+=usr/include/g++/valarray OLD_FILES+=usr/include/g++/vector OLD_FILES+=usr/include/g++/vector.h OLD_FILES+=usr/include/gmp.h OLD_FILES+=usr/include/isc/assertions.h OLD_FILES+=usr/include/isc/ctl.h OLD_FILES+=usr/include/isc/dst.h OLD_FILES+=usr/include/isc/eventlib.h OLD_FILES+=usr/include/isc/heap.h OLD_FILES+=usr/include/isc/irpmarshall.h OLD_FILES+=usr/include/isc/list.h OLD_FILES+=usr/include/isc/logging.h OLD_FILES+=usr/include/isc/memcluster.h OLD_FILES+=usr/include/isc/misc.h OLD_FILES+=usr/include/isc/tree.h OLD_FILES+=usr/include/machine/ansi.h OLD_FILES+=usr/include/machine/apic.h OLD_FILES+=usr/include/machine/asc_ioctl.h OLD_FILES+=usr/include/machine/asnames.h OLD_FILES+=usr/include/machine/bus_at386.h OLD_FILES+=usr/include/machine/bus_memio.h OLD_FILES+=usr/include/machine/bus_pc98.h OLD_FILES+=usr/include/machine/bus_pio.h OLD_FILES+=usr/include/machine/cdk.h OLD_FILES+=usr/include/machine/comstats.h OLD_FILES+=usr/include/machine/console.h OLD_FILES+=usr/include/machine/critical.h OLD_FILES+=usr/include/machine/cronyx.h OLD_FILES+=usr/include/machine/dvcfg.h OLD_FILES+=usr/include/machine/globaldata.h OLD_FILES+=usr/include/machine/globals.h OLD_FILES+=usr/include/machine/gsc.h OLD_FILES+=usr/include/machine/i4b_isppp.h OLD_FILES+=usr/include/machine/if_wavelan_ieee.h OLD_FILES+=usr/include/machine/iic.h OLD_FILES+=usr/include/machine/ioctl_ctx.h OLD_FILES+=usr/include/machine/ioctl_fd.h OLD_FILES+=usr/include/machine/ipl.h OLD_FILES+=usr/include/machine/lock.h OLD_FILES+=usr/include/machine/mouse.h OLD_FILES+=usr/include/machine/mpapic.h OLD_FILES+=usr/include/machine/mtpr.h OLD_FILES+=usr/include/machine/pc/msdos.h OLD_FILES+=usr/include/machine/physio_proc.h OLD_FILES+=usr/include/machine/smb.h OLD_FILES+=usr/include/machine/spigot.h OLD_FILES+=usr/include/machine/types.h OLD_FILES+=usr/include/machine/uc_device.h OLD_FILES+=usr/include/machine/ultrasound.h OLD_FILES+=usr/include/machine/wtio.h OLD_FILES+=usr/include/msdosfs/bootsect.h OLD_FILES+=usr/include/msdosfs/bpb.h OLD_FILES+=usr/include/msdosfs/denode.h OLD_FILES+=usr/include/msdosfs/direntry.h OLD_FILES+=usr/include/msdosfs/fat.h OLD_FILES+=usr/include/msdosfs/msdosfsmount.h OLD_FILES+=usr/include/net/hostcache.h OLD_FILES+=usr/include/net/if_faith.h OLD_FILES+=usr/include/net/if_ieee80211.h OLD_FILES+=usr/include/net/if_tunvar.h OLD_FILES+=usr/include/net/intrq.h OLD_FILES+=usr/include/netatm/kern_include.h OLD_FILES+=usr/include/netinet/if_fddi.h OLD_FILES+=usr/include/netinet/in_hostcache.h OLD_FILES+=usr/include/netinet/ip_flow.h OLD_FILES+=usr/include/netinet/ip_fw2.h OLD_FILES+=usr/include/netinet6/in6_prefix.h OLD_FILES+=usr/include/netns/idp.h OLD_FILES+=usr/include/netns/idp_var.h OLD_FILES+=usr/include/netns/ns.h OLD_FILES+=usr/include/netns/ns_error.h OLD_FILES+=usr/include/netns/ns_if.h OLD_FILES+=usr/include/netns/ns_pcb.h OLD_FILES+=usr/include/netns/sp.h OLD_FILES+=usr/include/netns/spidp.h OLD_FILES+=usr/include/netns/spp_debug.h OLD_FILES+=usr/include/netns/spp_timer.h OLD_FILES+=usr/include/netns/spp_var.h OLD_FILES+=usr/include/nfs/nfs.h OLD_FILES+=usr/include/nfs/nfsm_subs.h OLD_FILES+=usr/include/nfs/nfsmount.h OLD_FILES+=usr/include/nfs/nfsnode.h OLD_FILES+=usr/include/nfs/nfsrtt.h OLD_FILES+=usr/include/nfs/nfsrvcache.h OLD_FILES+=usr/include/nfs/nfsv2.h OLD_FILES+=usr/include/nfs/nqnfs.h OLD_FILES+=usr/include/ntfs/ntfs.h OLD_FILES+=usr/include/ntfs/ntfs_compr.h OLD_FILES+=usr/include/ntfs/ntfs_ihash.h OLD_FILES+=usr/include/ntfs/ntfs_inode.h OLD_FILES+=usr/include/ntfs/ntfs_subr.h OLD_FILES+=usr/include/ntfs/ntfs_vfsops.h OLD_FILES+=usr/include/ntfs/ntfsmount.h OLD_FILES+=usr/include/nwfs/nwfs.h OLD_FILES+=usr/include/nwfs/nwfs_mount.h OLD_FILES+=usr/include/nwfs/nwfs_node.h OLD_FILES+=usr/include/nwfs/nwfs_subr.h OLD_FILES+=usr/include/posix4/_semaphore.h OLD_FILES+=usr/include/posix4/aio.h OLD_FILES+=usr/include/posix4/ksem.h OLD_FILES+=usr/include/posix4/mqueue.h OLD_FILES+=usr/include/posix4/posix4.h OLD_FILES+=usr/include/posix4/sched.h OLD_FILES+=usr/include/posix4/semaphore.h OLD_DIRS+=usr/include/posix4 OLD_FILES+=usr/include/security/_pam_compat.h OLD_FILES+=usr/include/security/_pam_macros.h OLD_FILES+=usr/include/security/_pam_types.h OLD_FILES+=usr/include/security/pam_malloc.h OLD_FILES+=usr/include/security/pam_misc.h OLD_FILES+=usr/include/skey.h OLD_FILES+=usr/include/strhash.h OLD_FILES+=usr/include/struct.h OLD_FILES+=usr/include/sys/_label.h OLD_FILES+=usr/include/sys/_posix.h OLD_FILES+=usr/include/sys/bus_private.h OLD_FILES+=usr/include/sys/ccdvar.h OLD_FILES+=usr/include/sys/diskslice.h OLD_FILES+=usr/include/sys/dmap.h OLD_FILES+=usr/include/sys/inttypes.h OLD_FILES+=usr/include/sys/jumbo.h OLD_FILES+=usr/include/sys/mac_policy.h OLD_FILES+=usr/include/sys/pbioio.h OLD_FILES+=usr/include/sys/syscall-hide.h OLD_FILES+=usr/include/sys/tprintf.h OLD_FILES+=usr/include/sys/vnioctl.h OLD_FILES+=usr/include/sys/wormio.h OLD_FILES+=usr/include/telnet.h OLD_FILES+=usr/include/ufs/mfs/mfs_extern.h OLD_FILES+=usr/include/ufs/mfs/mfsnode.h OLD_FILES+=usr/include/values.h OLD_FILES+=usr/include/vm/vm_zone.h OLD_FILES+=usr/share/examples/etc/usbd.conf OLD_FILES+=usr/share/examples/meteor/README OLD_FILES+=usr/share/examples/meteor/rgb16.c OLD_FILES+=usr/share/examples/meteor/rgb24.c OLD_FILES+=usr/share/examples/meteor/test-n.c OLD_FILES+=usr/share/examples/meteor/yuvpk.c OLD_FILES+=usr/share/examples/meteor/yuvpl.c OLD_FILES+=usr/share/examples/worm/README OLD_FILES+=usr/share/examples/worm/makecdfs.sh OLD_FILES+=usr/share/groff_font/devlj4/Makefile OLD_FILES+=usr/share/groff_font/devlj4/text.map OLD_FILES+=usr/share/groff_font/devlj4/special.map OLD_FILES+=usr/share/misc/nslookup.help OLD_FILES+=usr/share/sendmail/cf/feature/nodns.m4 OLD_FILES+=usr/share/syscons/keymaps/lat-amer.kbd OLD_FILES+=usr/share/vi/catalog/ru_SU.KOI8-R OLD_FILES+=usr/share/zoneinfo/Africa/Timbuktu OLD_FILES+=usr/share/zoneinfo/Africa/Asmera OLD_FILES+=usr/share/zoneinfo/America/Buenos_Aires OLD_FILES+=usr/share/zoneinfo/America/Cordoba OLD_FILES+=usr/share/zoneinfo/America/Jujuy OLD_FILES+=usr/share/zoneinfo/America/Catamarca OLD_FILES+=usr/share/zoneinfo/America/Mendoza OLD_FILES+=usr/share/zoneinfo/America/Indianapolis OLD_FILES+=usr/share/zoneinfo/America/Louisville OLD_FILES+=usr/share/zoneinfo/America/Argentina/ComodRivadavia OLD_FILES+=usr/share/zoneinfo/Atlantic/Faeroe OLD_FILES+=usr/share/zoneinfo/Europe/Belfast OLD_FILES+=usr/share/zoneinfo/Pacific/Yap OLD_FILES+=usr/share/zoneinfo/SystemV/YST9 OLD_FILES+=usr/share/zoneinfo/SystemV/PST8 OLD_FILES+=usr/share/zoneinfo/SystemV/EST5EDT OLD_FILES+=usr/share/zoneinfo/SystemV/CST6CDT OLD_FILES+=usr/share/zoneinfo/SystemV/MST7MDT OLD_FILES+=usr/share/zoneinfo/SystemV/PST8PDT OLD_FILES+=usr/share/zoneinfo/SystemV/YST9YDT OLD_FILES+=usr/share/zoneinfo/SystemV/HST10 OLD_FILES+=usr/share/zoneinfo/SystemV/MST7 OLD_FILES+=usr/share/zoneinfo/SystemV/EST5 OLD_FILES+=usr/share/zoneinfo/SystemV/AST4ADT OLD_FILES+=usr/share/zoneinfo/SystemV/CST6 OLD_FILES+=usr/share/zoneinfo/SystemV/AST4 OLD_FILES+=usr/share/doc/ntp/accopt.htm OLD_FILES+=usr/share/doc/ntp/assoc.htm OLD_FILES+=usr/share/doc/ntp/audio.htm OLD_FILES+=usr/share/doc/ntp/authopt.htm OLD_FILES+=usr/share/doc/ntp/biblio.htm OLD_FILES+=usr/share/doc/ntp/build.htm OLD_FILES+=usr/share/doc/ntp/clockopt.htm OLD_FILES+=usr/share/doc/ntp/config.htm OLD_FILES+=usr/share/doc/ntp/confopt.htm OLD_FILES+=usr/share/doc/ntp/copyright.htm OLD_FILES+=usr/share/doc/ntp/debug.htm OLD_FILES+=usr/share/doc/ntp/driver1.htm OLD_FILES+=usr/share/doc/ntp/driver10.htm OLD_FILES+=usr/share/doc/ntp/driver11.htm OLD_FILES+=usr/share/doc/ntp/driver12.htm OLD_FILES+=usr/share/doc/ntp/driver16.htm OLD_FILES+=usr/share/doc/ntp/driver18.htm OLD_FILES+=usr/share/doc/ntp/driver19.htm OLD_FILES+=usr/share/doc/ntp/driver2.htm OLD_FILES+=usr/share/doc/ntp/driver20.htm OLD_FILES+=usr/share/doc/ntp/driver22.htm OLD_FILES+=usr/share/doc/ntp/driver23.htm OLD_FILES+=usr/share/doc/ntp/driver24.htm OLD_FILES+=usr/share/doc/ntp/driver26.htm OLD_FILES+=usr/share/doc/ntp/driver27.htm OLD_FILES+=usr/share/doc/ntp/driver28.htm OLD_FILES+=usr/share/doc/ntp/driver29.htm OLD_FILES+=usr/share/doc/ntp/driver3.htm OLD_FILES+=usr/share/doc/ntp/driver30.htm OLD_FILES+=usr/share/doc/ntp/driver32.htm OLD_FILES+=usr/share/doc/ntp/driver33.htm OLD_FILES+=usr/share/doc/ntp/driver34.htm OLD_FILES+=usr/share/doc/ntp/driver35.htm OLD_FILES+=usr/share/doc/ntp/driver36.htm OLD_FILES+=usr/share/doc/ntp/driver37.htm OLD_FILES+=usr/share/doc/ntp/driver4.htm OLD_FILES+=usr/share/doc/ntp/driver5.htm OLD_FILES+=usr/share/doc/ntp/driver6.htm OLD_FILES+=usr/share/doc/ntp/driver7.htm OLD_FILES+=usr/share/doc/ntp/driver8.htm OLD_FILES+=usr/share/doc/ntp/driver9.htm OLD_FILES+=usr/share/doc/ntp/exec.htm OLD_FILES+=usr/share/doc/ntp/extern.htm OLD_FILES+=usr/share/doc/ntp/gadget.htm OLD_FILES+=usr/share/doc/ntp/hints.htm OLD_FILES+=usr/share/doc/ntp/howto.htm OLD_FILES+=usr/share/doc/ntp/htmlprimer.htm OLD_FILES+=usr/share/doc/ntp/index.htm OLD_FILES+=usr/share/doc/ntp/kern.htm OLD_FILES+=usr/share/doc/ntp/kernpps.htm OLD_FILES+=usr/share/doc/ntp/ldisc.htm OLD_FILES+=usr/share/doc/ntp/measure.htm OLD_FILES+=usr/share/doc/ntp/miscopt.htm OLD_FILES+=usr/share/doc/ntp/monopt.htm OLD_FILES+=usr/share/doc/ntp/mx4200data.htm OLD_FILES+=usr/share/doc/ntp/notes.htm OLD_FILES+=usr/share/doc/ntp/ntpd.htm OLD_FILES+=usr/share/doc/ntp/ntpdate.htm OLD_FILES+=usr/share/doc/ntp/ntpdc.htm OLD_FILES+=usr/share/doc/ntp/ntpq.htm OLD_FILES+=usr/share/doc/ntp/ntptime.htm OLD_FILES+=usr/share/doc/ntp/ntptrace.htm OLD_FILES+=usr/share/doc/ntp/parsedata.htm OLD_FILES+=usr/share/doc/ntp/parsenew.htm OLD_FILES+=usr/share/doc/ntp/patches.htm OLD_FILES+=usr/share/doc/ntp/porting.htm OLD_FILES+=usr/share/doc/ntp/pps.htm OLD_FILES+=usr/share/doc/ntp/prefer.htm OLD_FILES+=usr/share/doc/ntp/qth.htm OLD_FILES+=usr/share/doc/ntp/quick.htm OLD_FILES+=usr/share/doc/ntp/rdebug.htm OLD_FILES+=usr/share/doc/ntp/refclock.htm OLD_FILES+=usr/share/doc/ntp/release.htm OLD_FILES+=usr/share/doc/ntp/tickadj.htm OLD_FILES+=usr/share/doc/papers/nqnfs.ascii.gz OLD_FILES+=usr/share/doc/papers/px.ascii.gz OLD_FILES+=usr/share/man/man3/exp10.3.gz OLD_FILES+=usr/share/man/man3/exp10f.3.gz OLD_FILES+=usr/share/man/man3/fpsetsticky.3.gz OLD_FILES+=usr/share/man/man3/gss_krb5_compat_des3_mic.3.gz OLD_FILES+=usr/share/man/man3/gss_krb5_copy_ccache.3.gz OLD_FILES+=usr/share/man/man3/mac_is_present_np.3.gz OLD_FILES+=usr/share/man/man3/mbmb.3.gz OLD_FILES+=usr/share/man/man3/setrunelocale.3.gz OLD_FILES+=usr/share/man/man5/usbd.conf.5.gz .if ${TARGET_ARCH} != "i386" && ${TARGET_ARCH} != "amd64" OLD_FILES+=usr/share/man/man8/boot_i386.8.gz .endif .if ${TARGET_ARCH} != "aarch64" && ${TARGET} != "arm" && \ ${TARGET_ARCH} != "powerpc" && ${TARGET_ARCH} != "powerpc64" && \ ${TARGET_ARCH} != "sparc64" && ${TARGET} != "mips" OLD_FILES+=usr/share/man/man8/ofwdump.8.gz .endif OLD_FILES+=usr/share/man/man8/mount_reiserfs.8.gz OLD_FILES+=usr/share/man/man9/VFS_START.9.gz OLD_FILES+=usr/share/man/man9/cpu_critical_exit.9.gz OLD_FILES+=usr/share/man/man9/cpu_critical_enter.9.gz OLD_FILES+=usr/share/info/annotate.info.gz OLD_FILES+=usr/share/info/tar.info.gz OLD_FILES+=usr/share/bsnmp/defs/tree.def OLD_FILES+=usr/share/bsnmp/defs/mibII_tree.def OLD_FILES+=usr/share/bsnmp/defs/netgraph_tree.def OLD_FILES+=usr/share/bsnmp/mibs/FOKUS-MIB.txt OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-MIB.txt OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-SNMPD.txt OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-NETGRAPH.txt OLD_FILES+=usr/libdata/msdosfs/iso22dos OLD_FILES+=usr/libdata/msdosfs/iso72dos OLD_FILES+=usr/libdata/msdosfs/koi2dos OLD_FILES+=usr/libdata/msdosfs/koi8u2dos # The following files are *not* obsolete, they just don't get touched at # install, so don't add them: # - boot/loader.rc # - usr/share/tmac/man.local # - usr/share/tmac/mm/locale # - usr/share/tmac/mm/se_locale # - var/yp/Makefile # 20071120: shared library version bump OLD_LIBS+=usr/lib/libasn1.so.8 OLD_LIBS+=usr/lib/libgssapi.so.8 OLD_LIBS+=usr/lib/libgssapi_krb5.so.8 OLD_LIBS+=usr/lib/libhdb.so.8 OLD_LIBS+=usr/lib/libkadm5clnt.so.8 OLD_LIBS+=usr/lib/libkadm5srv.so.8 OLD_LIBS+=usr/lib/libkafs5.so.8 OLD_LIBS+=usr/lib/libkrb5.so.8 OLD_LIBS+=usr/lib/libobjc.so.2 OLD_LIBS+=usr/lib32/libgssapi.so.8 OLD_LIBS+=usr/lib32/libobjc.so.2 # 20070519: GCC 4.2 OLD_LIBS+=usr/lib/libg2c.a OLD_LIBS+=usr/lib/libg2c.so OLD_LIBS+=usr/lib/libg2c.so.2 OLD_LIBS+=usr/lib/libg2c_p.a OLD_LIBS+=usr/lib/libgcc_pic.a OLD_LIBS+=usr/lib32/libg2c.a OLD_LIBS+=usr/lib32/libg2c.so OLD_LIBS+=usr/lib32/libg2c.so.2 OLD_LIBS+=usr/lib32/libg2c_p.a OLD_LIBS+=usr/lib32/libgcc_pic.a # 20060729: OpenSSL 0.9.7e -> 0.9.8b upgrade OLD_LIBS+=lib/libcrypto.so.4 OLD_LIBS+=usr/lib/libssl.so.4 OLD_LIBS+=usr/lib32/libcrypto.so.4 OLD_LIBS+=usr/lib32/libssl.so.4 # 20060521: gethostbyaddr(3) ABI change OLD_LIBS+=usr/lib/libroken.so.8 OLD_LIBS+=lib/libatm.so.3 OLD_LIBS+=lib/libc.so.6 OLD_LIBS+=lib/libutil.so.5 OLD_LIBS+=usr/lib32/libatm.so.3 OLD_LIBS+=usr/lib32/libc.so.6 OLD_LIBS+=usr/lib32/libutil.so.5 # 20060413: shared library moved to /usr/lib OLD_LIBS+=lib/libgpib.so.1 # 20060413: libpcap.so.4 moved to /lib/ OLD_LIBS+=usr/lib/libpcap.so.4 # 20060412: libpthread.so.2 moved to /lib/ OLD_LIBS+=usr/lib/libpthread.so.2 # 20060127: revert libdisk to static-only OLD_LIBS+=usr/lib/libdisk.so.3 # 20051027: libc_r discontinued (removed 20101113) OLD_LIBS+=usr/lib/libc_r.a OLD_LIBS+=usr/lib/libc_r.so OLD_LIBS+=usr/lib/libc_r.so.7 OLD_LIBS+=usr/lib/libc_r_p.a OLD_LIBS+=usr/lib32/libc_r.a OLD_LIBS+=usr/lib32/libc_r.so OLD_LIBS+=usr/lib32/libc_r.so.7 OLD_LIBS+=usr/lib32/libc_r_p.a # 20050722: bump for 6.0-RELEASE OLD_LIBS+=lib/libalias.so.4 OLD_LIBS+=lib/libatm.so.2 OLD_LIBS+=lib/libbegemot.so.1 OLD_LIBS+=lib/libbsdxml.so.1 OLD_LIBS+=lib/libbsnmp.so.2 OLD_LIBS+=lib/libc.so.5 OLD_LIBS+=lib/libcam.so.2 OLD_LIBS+=lib/libcrypt.so.2 OLD_LIBS+=lib/libcrypto.so.3 OLD_LIBS+=lib/libdevstat.so.4 OLD_LIBS+=lib/libedit.so.4 OLD_LIBS+=lib/libgeom.so.2 OLD_LIBS+=lib/libgpib.so.0 OLD_LIBS+=lib/libipsec.so.1 OLD_LIBS+=lib/libipx.so.2 OLD_LIBS+=lib/libkiconv.so.1 OLD_LIBS+=lib/libkvm.so.2 OLD_LIBS+=lib/libm.so.3 OLD_LIBS+=lib/libmd.so.2 OLD_LIBS+=lib/libncurses.so.5 OLD_LIBS+=lib/libreadline.so.5 OLD_LIBS+=lib/libsbuf.so.2 OLD_LIBS+=lib/libufs.so.2 OLD_LIBS+=lib/libutil.so.4 OLD_LIBS+=lib/libz.so.2 OLD_LIBS+=usr/lib/libarchive.so.1 OLD_LIBS+=usr/lib/libasn1.so.7 OLD_LIBS+=usr/lib/libbluetooth.so.1 OLD_LIBS+=usr/lib/libbz2.so.1 OLD_LIBS+=usr/lib/libc_r.so.5 OLD_LIBS+=usr/lib/libcalendar.so.2 OLD_LIBS+=usr/lib/libcom_err.so.2 OLD_LIBS+=usr/lib/libdevinfo.so.2 OLD_LIBS+=usr/lib/libdialog.so.4 OLD_LIBS+=usr/lib/libfetch.so.3 OLD_LIBS+=usr/lib/libform.so.2 OLD_LIBS+=usr/lib/libftpio.so.5 OLD_LIBS+=usr/lib/libg2c.so.1 OLD_LIBS+=usr/lib/libgnuregex.so.2 OLD_LIBS+=usr/lib/libgssapi.so.7 OLD_LIBS+=usr/lib/libhdb.so.7 OLD_LIBS+=usr/lib/libhistory.so.5 OLD_LIBS+=usr/lib/libkadm5clnt.so.7 OLD_LIBS+=usr/lib/libkadm5srv.so.7 OLD_LIBS+=usr/lib/libkafs5.so.7 OLD_LIBS+=usr/lib/libkrb5.so.7 OLD_LIBS+=usr/lib/libmagic.so.1 OLD_LIBS+=usr/lib/libmenu.so.2 OLD_LIBS+=usr/lib/libmilter.so.2 OLD_LIBS+=usr/lib/libmp.so.4 OLD_LIBS+=usr/lib/libncp.so.1 OLD_LIBS+=usr/lib/libnetgraph.so.1 OLD_LIBS+=usr/lib/libngatm.so.1 OLD_LIBS+=usr/lib/libobjc.so.1 OLD_LIBS+=usr/lib/libopie.so.3 OLD_LIBS+=usr/lib/libpam.so.2 OLD_LIBS+=usr/lib/libpanel.so.2 OLD_LIBS+=usr/lib/libpcap.so.3 OLD_LIBS+=usr/lib/libpmc.so.2 OLD_LIBS+=usr/lib/libpthread.so.1 OLD_LIBS+=usr/lib/libradius.so.1 OLD_LIBS+=usr/lib/libroken.so.7 OLD_LIBS+=usr/lib/librpcsvc.so.2 OLD_LIBS+=usr/lib/libsdp.so.1 OLD_LIBS+=usr/lib/libsmb.so.1 OLD_LIBS+=usr/lib/libssh.so.2 OLD_LIBS+=usr/lib/libssl.so.3 OLD_LIBS+=usr/lib/libstdc++.so.4 OLD_LIBS+=usr/lib/libtacplus.so.1 OLD_LIBS+=usr/lib/libthr.so.1 OLD_LIBS+=usr/lib/libthread_db.so.1 OLD_LIBS+=usr/lib/libugidfw.so.1 OLD_LIBS+=usr/lib/libusbhid.so.1 OLD_LIBS+=usr/lib/libvgl.so.3 OLD_LIBS+=usr/lib/libwrap.so.3 OLD_LIBS+=usr/lib/libypclnt.so.1 OLD_LIBS+=usr/lib/pam_chroot.so.2 OLD_LIBS+=usr/lib/pam_deny.so.2 OLD_LIBS+=usr/lib/pam_echo.so.2 OLD_LIBS+=usr/lib/pam_exec.so.2 OLD_LIBS+=usr/lib/pam_ftpusers.so.2 OLD_LIBS+=usr/lib/pam_group.so.2 OLD_LIBS+=usr/lib/pam_guest.so.2 OLD_LIBS+=usr/lib/pam_krb5.so.2 OLD_LIBS+=usr/lib/pam_ksu.so.2 OLD_LIBS+=usr/lib/pam_lastlog.so.2 OLD_LIBS+=usr/lib/pam_login_access.so.2 OLD_LIBS+=usr/lib/pam_nologin.so.2 OLD_LIBS+=usr/lib/pam_opie.so.2 OLD_LIBS+=usr/lib/pam_opieaccess.so.2 OLD_LIBS+=usr/lib/pam_passwdqc.so.2 OLD_LIBS+=usr/lib/pam_permit.so.2 OLD_LIBS+=usr/lib/pam_radius.so.2 OLD_LIBS+=usr/lib/pam_rhosts.so.2 OLD_LIBS+=usr/lib/pam_rootok.so.2 OLD_LIBS+=usr/lib/pam_securetty.so.2 OLD_LIBS+=usr/lib/pam_self.so.2 OLD_LIBS+=usr/lib/pam_ssh.so.2 OLD_LIBS+=usr/lib/pam_tacplus.so.2 OLD_LIBS+=usr/lib/pam_unix.so.2 OLD_LIBS+=usr/lib/snmp_atm.so.3 OLD_LIBS+=usr/lib/snmp_mibII.so.3 OLD_LIBS+=usr/lib/snmp_netgraph.so.3 OLD_LIBS+=usr/lib/snmp_pf.so.3 # 200505XX: ? OLD_LIBS+=usr/lib/snmp_atm.so.2 OLD_LIBS+=usr/lib/snmp_mibII.so.2 OLD_LIBS+=usr/lib/snmp_netgraph.so.2 OLD_LIBS+=usr/lib/snmp_pf.so.2 # 2005XXXX: not ready for primetime yet OLD_LIBS+=usr/lib/libautofs.so.1 # 200411XX: libxpg4 removal OLD_LIBS+=usr/lib/libxpg4.so.3 # 200410XX: libm compatibility fix OLD_LIBS+=lib/libm.so.2 # 20041001: version bump OLD_LIBS+=lib/libreadline.so.4 OLD_LIBS+=usr/lib/libhistory.so.4 OLD_LIBS+=usr/lib/libopie.so.2 OLD_LIBS+=usr/lib/libpcap.so.2 # 20040925: bind9 import OLD_LIBS+=usr/lib/libisc.so.1 # 200408XX OLD_LIBS+=usr/lib/snmp_netgraph.so.1 # 200404XX OLD_LIBS+=usr/lib/libsnmp.so.1 OLD_LIBS+=usr/lib/snmp_mibII.so.1 # 200309XX OLD_LIBS+=usr/lib/libasn1.so.6 OLD_LIBS+=usr/lib/libhdb.so.6 OLD_LIBS+=usr/lib/libkadm5clnt.so.6 OLD_LIBS+=usr/lib/libkadm5srv.so.6 OLD_LIBS+=usr/lib/libkrb5.so.6 OLD_LIBS+=usr/lib/libroken.so.6 # 200304XX OLD_LIBS+=usr/lib/libc.so.4 OLD_LIBS+=usr/lib/libc_r.so.4 OLD_LIBS+=usr/lib/libdevstat.so.2 OLD_LIBS+=usr/lib/libedit.so.3 OLD_LIBS+=usr/lib/libgmp.so.3 OLD_LIBS+=usr/lib/libmp.so.3 OLD_LIBS+=usr/lib/libpam.so.1 OLD_LIBS+=usr/lib/libposix1e.so.2 OLD_LIBS+=usr/lib/libskey.so.2 OLD_LIBS+=usr/lib/libusbhid.so.0 OLD_LIBS+=usr/lib/libvgl.so.2 # 20030218: OpenSSL 0.9.7 import OLD_FILES+=usr/include/des.h OLD_FILES+=usr/lib/libdes.a OLD_FILES+=usr/lib/libdes.so OLD_LIBS+=usr/lib/libdes.so.3 OLD_FILES+=usr/lib/libdes_p.a # 200302XX OLD_LIBS+=usr/lib/libacl.so.3 OLD_LIBS+=usr/lib/libasn1.so.5 OLD_LIBS+=usr/lib/libcrypto.so.2 OLD_LIBS+=usr/lib/libgssapi.so.5 OLD_LIBS+=usr/lib/libhdb.so.5 OLD_LIBS+=usr/lib/libkadm.so.3 OLD_LIBS+=usr/lib/libkadm5clnt.so.5 OLD_LIBS+=usr/lib/libkadm5srv.so.5 OLD_LIBS+=usr/lib/libkafs.so.3 OLD_LIBS+=usr/lib/libkafs5.so.5 OLD_LIBS+=usr/lib/libkdb.so.3 OLD_LIBS+=usr/lib/libkrb.so.3 OLD_LIBS+=usr/lib/libroken.so. OLD_LIBS+=usr/lib/libssl.so.2 OLD_LIBS+=usr/lib/pam_kerberosIV.so # 200208XX OLD_LIBS+=usr/lib/libgssapi.so.4 # 200203XX OLD_LIBS+=usr/lib/libss.so.3 OLD_LIBS+=usr/lib/libusb.so.0 # 200112XX OLD_LIBS+=usr/lib/libfetch.so.2 # 200110XX OLD_LIBS+=usr/lib/libgssapi.so.3 # 200104XX OLD_LIBS+=usr/lib/libdescrypt.so.2 OLD_LIBS+=usr/lib/libscrypt.so.2 # 200102XX OLD_LIBS+=usr/lib/libcrypto.so.1 OLD_LIBS+=usr/lib/libssl.so.1 # 200009XX OLD_LIBS+=usr/lib/libRSAglue.so.1 OLD_LIBS+=usr/lib/librsaINTL.so.1 OLD_LIBS+=usr/lib/librsaUSA.so.1 # 200006XX OLD_LIBS+=usr/lib/libalias.so.3 OLD_LIBS+=usr/lib/libfetch.so.1 OLD_LIBS+=usr/lib/libipsec.so.0 # 200005XX OLD_LIBS+=usr/lib/libxpg4.so.2 # 200002XX OLD_LIBS+=usr/lib/libc.so.3 OLD_LIBS+=usr/lib/libcurses.so.2 OLD_LIBS+=usr/lib/libdialog.so.3 OLD_LIBS+=usr/lib/libedit.so.2 OLD_LIBS+=usr/lib/libf2c.so.2 OLD_LIBS+=usr/lib/libftpio.so.4 OLD_LIBS+=usr/lib/libg++.so.4 OLD_LIBS+=usr/lib/libhistory.so.3 OLD_LIBS+=usr/lib/libmytinfo.so.2 OLD_LIBS+=usr/lib/libncurses.so.3 OLD_LIBS+=usr/lib/libreadline.so.3 OLD_LIBS+=usr/lib/libss.so.2 OLD_LIBS+=usr/lib/libtermcap.so.2 OLD_LIBS+=usr/lib/libutil.so.2 OLD_LIBS+=usr/lib/libvgl.so.1 OLD_LIBS+=usr/lib/libwrap.so.2 # 19991216 OLD_FILES+=usr/sbin/xntpdc # 199909XX OLD_LIBS+=usr/lib/libc_r.so.3 # ??? OLD_LIBS+=usr/lib/libarchive.so.2 OLD_LIBS+=usr/lib/libbsnmp.so.1 OLD_LIBS+=usr/lib/libc_r.so.6 OLD_LIBS+=usr/lib32/libarchive.so.2 OLD_LIBS+=usr/lib32/libc_r.so.6 OLD_LIBS+=usr/lib/libcipher.so.2 OLD_LIBS+=usr/lib/libgssapi.so.6 OLD_LIBS+=usr/lib/libkse.so.1 OLD_LIBS+=usr/lib/liblwres.so.3 OLD_LIBS+=usr/lib/pam_ftp.so.2 # 20131013: Removal of the ATF tools OLD_DIRS+=etc/atf OLD_DIRS+=usr/share/examples/atf OLD_DIRS+=usr/share/xml/atf OLD_DIRS+=usr/share/xml OLD_DIRS+=usr/share/xsl/atf OLD_DIRS+=usr/share/xsl # 20040925: bind9 import OLD_DIRS+=usr/share/doc/bind/html OLD_DIRS+=usr/share/doc/bind/misc OLD_DIRS+=usr/share/doc/bind/ # ??? OLD_DIRS+=usr/include/g++/std OLD_DIRS+=usr/include/msdosfs OLD_DIRS+=usr/include/ntfs OLD_DIRS+=usr/include/nwfs OLD_DIRS+=usr/include/ufs/mfs # 20011001: UUCP migration to ports OLD_DIRS+=usr/libexec/uucp .include "tools/build/mk/OptionalObsoleteFiles.inc" Index: head/share/man/man9/Makefile =================================================================== --- head/share/man/man9/Makefile (revision 339926) +++ head/share/man/man9/Makefile (revision 339927) @@ -1,2271 +1,2272 @@ # $FreeBSD$ .include PACKAGE=runtime-manuals MAN= accept_filter.9 \ accf_data.9 \ accf_dns.9 \ accf_http.9 \ acl.9 \ alq.9 \ altq.9 \ atomic.9 \ bhnd.9 \ bhnd_erom.9 \ bios.9 \ bitset.9 \ boot.9 \ bpf.9 \ buf.9 \ buf_ring.9 \ BUF_ISLOCKED.9 \ BUF_LOCK.9 \ BUF_LOCKFREE.9 \ BUF_LOCKINIT.9 \ BUF_RECURSED.9 \ BUF_TIMELOCK.9 \ BUF_UNLOCK.9 \ bus_activate_resource.9 \ BUS_ADD_CHILD.9 \ bus_adjust_resource.9 \ bus_alloc_resource.9 \ BUS_BIND_INTR.9 \ bus_child_present.9 \ BUS_CHILD_DELETED.9 \ BUS_CHILD_DETACHED.9 \ BUS_CONFIG_INTR.9 \ BUS_DESCRIBE_INTR.9 \ bus_dma.9 \ bus_generic_attach.9 \ bus_generic_detach.9 \ bus_generic_new_pass.9 \ bus_generic_print_child.9 \ bus_generic_read_ivar.9 \ bus_generic_shutdown.9 \ BUS_GET_CPUS.9 \ bus_get_resource.9 \ bus_map_resource.9 \ BUS_NEW_PASS.9 \ BUS_PRINT_CHILD.9 \ BUS_READ_IVAR.9 \ BUS_RESCAN.9 \ bus_release_resource.9 \ bus_set_pass.9 \ bus_set_resource.9 \ BUS_SETUP_INTR.9 \ bus_space.9 \ byteorder.9 \ casuword.9 \ cd.9 \ cnv.9 \ condvar.9 \ config_intrhook.9 \ contigmalloc.9 \ copy.9 \ counter.9 \ cpuset.9 \ cr_cansee.9 \ critical_enter.9 \ cr_seeothergids.9 \ cr_seeotheruids.9 \ crypto.9 \ CTASSERT.9 \ DB_COMMAND.9 \ DECLARE_GEOM_CLASS.9 \ DECLARE_MODULE.9 \ DELAY.9 \ devclass.9 \ devclass_find.9 \ devclass_get_device.9 \ devclass_get_devices.9 \ devclass_get_drivers.9 \ devclass_get_maxunit.9 \ devclass_get_name.9 \ devclass_get_softc.9 \ dev_clone.9 \ devfs_set_cdevpriv.9 \ device.9 \ device_add_child.9 \ DEVICE_ATTACH.9 \ device_delete_child.9 \ DEVICE_DETACH.9 \ device_enable.9 \ device_find_child.9 \ device_get_children.9 \ device_get_devclass.9 \ device_get_driver.9 \ device_get_ivars.9 \ device_get_name.9 \ device_get_parent.9 \ device_get_softc.9 \ device_get_state.9 \ device_get_sysctl.9 \ device_get_unit.9 \ DEVICE_IDENTIFY.9 \ device_printf.9 \ DEVICE_PROBE.9 \ device_probe_and_attach.9 \ device_quiet.9 \ device_set_desc.9 \ device_set_driver.9 \ device_set_flags.9 \ DEVICE_SHUTDOWN.9 \ DEV_MODULE.9 \ dev_refthread.9 \ devstat.9 \ devtoname.9 \ disk.9 \ dnv.9 \ domain.9 \ domainset.9 \ dpcpu.9 \ drbr.9 \ driver.9 \ DRIVER_MODULE.9 \ efirt.9 \ epoch.9 \ EVENTHANDLER.9 \ eventtimers.9 \ extattr.9 \ fail.9 \ fdt_pinctrl.9 \ fetch.9 \ firmware.9 \ fpu_kern.9 \ g_access.9 \ g_attach.9 \ g_bio.9 \ g_consumer.9 \ g_data.9 \ get_cyclecount.9 \ getenv.9 \ getnewvnode.9 \ g_event.9 \ g_geom.9 \ g_provider.9 \ g_provider_by_name.9 \ groupmember.9 \ g_wither_geom.9 \ hash.9 \ hashinit.9 \ hexdump.9 \ hhook.9 \ ieee80211.9 \ ieee80211_amrr.9 \ ieee80211_beacon.9 \ ieee80211_bmiss.9 \ ieee80211_crypto.9 \ ieee80211_ddb.9 \ ieee80211_input.9 \ ieee80211_node.9 \ ieee80211_output.9 \ ieee80211_proto.9 \ ieee80211_radiotap.9 \ ieee80211_regdomain.9 \ ieee80211_scan.9 \ ieee80211_vap.9 \ iflib.9 \ iflibdd.9 \ iflibdi.9 \ iflibtxrx.9 \ ifnet.9 \ inittodr.9 \ insmntque.9 \ intro.9 \ ithread.9 \ KASSERT.9 \ kern_testfrwk.9 \ kernacc.9 \ kernel_mount.9 \ khelp.9 \ kobj.9 \ kproc.9 \ kqueue.9 \ kthread.9 \ ktr.9 \ lock.9 \ locking.9 \ LOCK_PROFILING.9 \ mac.9 \ make_dev.9 \ malloc.9 \ mbchain.9 \ mbuf.9 \ mbuf_tags.9 \ MD5.9 \ mdchain.9 \ memcchr.9 \ memguard.9 \ microseq.9 \ microtime.9 \ microuptime.9 \ mi_switch.9 \ mod_cc.9 \ module.9 \ MODULE_DEPEND.9 \ MODULE_PNP_INFO.9 \ MODULE_VERSION.9 \ mtx_pool.9 \ mutex.9 \ namei.9 \ netisr.9 \ nv.9 \ OF_child.9 \ OF_device_from_xref.9 \ OF_finddevice.9 \ OF_getprop.9 \ OF_node_from_xref.9 \ OF_package_to_path.9 \ ofw_bus_is_compatible.9 \ ofw_bus_status_okay.9 \ osd.9 \ owll.9 \ own.9 \ panic.9 \ pbuf.9 \ PCBGROUP.9 \ p_candebug.9 \ p_cansee.9 \ pci.9 \ PCI_IOV_ADD_VF.9 \ PCI_IOV_INIT.9 \ pci_iov_schema.9 \ PCI_IOV_UNINIT.9 \ pfil.9 \ pfind.9 \ pget.9 \ pgfind.9 \ PHOLD.9 \ physio.9 \ pmap.9 \ pmap_activate.9 \ pmap_clear_modify.9 \ pmap_copy.9 \ pmap_enter.9 \ pmap_extract.9 \ pmap_growkernel.9 \ pmap_init.9 \ pmap_is_modified.9 \ pmap_is_prefaultable.9 \ pmap_map.9 \ pmap_mincore.9 \ pmap_object_init_pt.9 \ pmap_page_exists_quick.9 \ pmap_page_init.9 \ pmap_pinit.9 \ pmap_protect.9 \ pmap_qenter.9 \ pmap_quick_enter_page.9 \ pmap_release.9 \ pmap_remove.9 \ pmap_resident_count.9 \ pmap_unwire.9 \ pmap_zero_page.9 \ printf.9 \ prison_check.9 \ priv.9 \ proc_rwmem.9 \ pseudofs.9 \ psignal.9 \ random.9 \ random_harvest.9 \ ratecheck.9 \ redzone.9 \ refcount.9 \ resettodr.9 \ resource_int_value.9 \ rijndael.9 \ rman.9 \ rmlock.9 \ rtalloc.9 \ rtentry.9 \ runqueue.9 \ rwlock.9 \ sbuf.9 \ scheduler.9 \ SDT.9 \ securelevel_gt.9 \ selrecord.9 \ sema.9 \ sf_buf.9 \ sglist.9 \ shm_map.9 \ signal.9 \ sleep.9 \ sleepqueue.9 \ socket.9 \ stack.9 \ store.9 \ style.9 \ style.lua.9 \ swi.9 \ sx.9 \ syscall_helper_register.9 \ SYSCALL_MODULE.9 \ sysctl.9 \ sysctl_add_oid.9 \ sysctl_ctx_init.9 \ SYSINIT.9 \ taskqueue.9 \ tcp_functions.9 \ thread_exit.9 \ time.9 \ timeout.9 \ tvtohz.9 \ ucred.9 \ uidinfo.9 \ uio.9 \ unr.9 \ vaccess.9 \ vaccess_acl_nfs4.9 \ vaccess_acl_posix1e.9 \ vcount.9 \ vflush.9 \ VFS.9 \ vfs_busy.9 \ VFS_CHECKEXP.9 \ vfsconf.9 \ VFS_FHTOVP.9 \ vfs_getnewfsid.9 \ vfs_getopt.9 \ vfs_getvfs.9 \ VFS_MOUNT.9 \ vfs_mountedfrom.9 \ VFS_QUOTACTL.9 \ VFS_ROOT.9 \ vfs_rootmountalloc.9 \ VFS_SET.9 \ VFS_STATFS.9 \ vfs_suser.9 \ VFS_SYNC.9 \ vfs_timestamp.9 \ vfs_unbusy.9 \ VFS_UNMOUNT.9 \ vfs_unmountall.9 \ VFS_VGET.9 \ vget.9 \ vgone.9 \ vhold.9 \ vinvalbuf.9 \ vm_fault_prefault.9 \ vm_map.9 \ vm_map_check_protection.9 \ vm_map_create.9 \ vm_map_delete.9 \ vm_map_entry_resize_free.9 \ vm_map_find.9 \ vm_map_findspace.9 \ vm_map_inherit.9 \ vm_map_init.9 \ vm_map_insert.9 \ vm_map_lock.9 \ vm_map_lookup.9 \ vm_map_madvise.9 \ vm_map_max.9 \ vm_map_protect.9 \ vm_map_remove.9 \ vm_map_simplify_entry.9 \ vm_map_stack.9 \ vm_map_submap.9 \ vm_map_sync.9 \ vm_map_wire.9 \ vm_page_alloc.9 \ vm_page_bits.9 \ vm_page_busy.9 \ vm_page_deactivate.9 \ vm_page_dontneed.9 \ vm_page_aflag.9 \ vm_page_free.9 \ vm_page_grab.9 \ vm_page_hold.9 \ vm_page_insert.9 \ vm_page_lookup.9 \ vm_page_rename.9 \ vm_page_wire.9 \ vm_set_page_size.9 \ vmem.9 \ vn_fullpath.9 \ vn_isdisk.9 \ vnet.9 \ vnode.9 \ VOP_ACCESS.9 \ VOP_ACLCHECK.9 \ VOP_ADVISE.9 \ VOP_ADVLOCK.9 \ VOP_ALLOCATE.9 \ VOP_ATTRIB.9 \ VOP_BWRITE.9 \ VOP_CREATE.9 \ VOP_FSYNC.9 \ VOP_GETACL.9 \ VOP_GETEXTATTR.9 \ VOP_GETPAGES.9 \ VOP_INACTIVE.9 \ VOP_IOCTL.9 \ VOP_LINK.9 \ VOP_LISTEXTATTR.9 \ VOP_LOCK.9 \ VOP_LOOKUP.9 \ VOP_OPENCLOSE.9 \ VOP_PATHCONF.9 \ VOP_PRINT.9 \ VOP_RDWR.9 \ VOP_READDIR.9 \ VOP_READLINK.9 \ VOP_REALLOCBLKS.9 \ VOP_REMOVE.9 \ VOP_RENAME.9 \ VOP_REVOKE.9 \ VOP_SETACL.9 \ VOP_SETEXTATTR.9 \ VOP_STRATEGY.9 \ VOP_VPTOCNP.9 \ VOP_VPTOFH.9 \ vref.9 \ vrefcnt.9 \ vrele.9 \ vslock.9 \ watchdog.9 \ zone.9 MLINKS= unr.9 alloc_unr.9 \ unr.9 alloc_unrl.9 \ unr.9 alloc_unr_specific.9 \ unr.9 clear_unrhdr.9 \ unr.9 delete_unrhdr.9 \ unr.9 free_unr.9 \ unr.9 new_unrhdr.9 MLINKS+=accept_filter.9 accept_filt_add.9 \ accept_filter.9 accept_filt_del.9 \ accept_filter.9 accept_filt_generic_mod_event.9 \ accept_filter.9 accept_filt_get.9 MLINKS+=alq.9 ALQ.9 \ alq.9 alq_close.9 \ alq.9 alq_flush.9 \ alq.9 alq_get.9 \ alq.9 alq_getn.9 \ alq.9 alq_open.9 \ alq.9 alq_open_flags.9 \ alq.9 alq_post.9 \ alq.9 alq_post_flags.9 \ alq.9 alq_write.9 \ alq.9 alq_writen.9 MLINKS+=altq.9 ALTQ.9 MLINKS+=atomic.9 atomic_add.9 \ atomic.9 atomic_clear.9 \ atomic.9 atomic_cmpset.9 \ atomic.9 atomic_fcmpset.9 \ atomic.9 atomic_fetchadd.9 \ atomic.9 atomic_load.9 \ atomic.9 atomic_readandclear.9 \ atomic.9 atomic_set.9 \ atomic.9 atomic_store.9 \ atomic.9 atomic_subtract.9 \ atomic.9 atomic_swap.9 \ atomic.9 atomic_testandclear.9 \ atomic.9 atomic_testandset.9 \ atomic.9 atomic_thread_fence.9 MLINKS+=bhnd.9 BHND_MATCH_BOARD_TYPE.9 \ bhnd.9 BHND_MATCH_BOARD_VENDOR.9 \ bhnd.9 BHND_MATCH_CHIP_ID.9 \ bhnd.9 BHND_MATCH_CHIP_PKG.9 \ bhnd.9 BHND_MATCH_CHIP_REV.9 \ bhnd.9 BHND_MATCH_CORE_ID.9 \ bhnd.9 BHND_MATCH_CORE_VENDOR.9 \ bhnd.9 bhnd_activate_resource.9 \ bhnd.9 bhnd_alloc_pmu.9 \ bhnd.9 bhnd_alloc_resource.9 \ bhnd.9 bhnd_alloc_resource_any.9 \ bhnd.9 bhnd_alloc_resources.9 \ bhnd.9 bhnd_board_matches.9 \ bhnd.9 bhnd_bus_match_child.9 \ bhnd.9 bhnd_bus_read_1.9 \ bhnd.9 bhnd_bus_read_2.9 \ bhnd.9 bhnd_bus_read_4.9 \ bhnd.9 bhnd_bus_read_stream_1.9 \ bhnd.9 bhnd_bus_read_stream_2.9 \ bhnd.9 bhnd_bus_read_stream_4.9 \ bhnd.9 bhnd_bus_write_1.9 \ bhnd.9 bhnd_bus_write_2.9 \ bhnd.9 bhnd_bus_write_4.9 \ bhnd.9 bhnd_bus_write_stream_1.9 \ bhnd.9 bhnd_bus_write_stream_2.9 \ bhnd.9 bhnd_bus_write_stream_4.9 \ bhnd.9 bhnd_chip_matches.9 \ bhnd.9 bhnd_core_class.9 \ bhnd.9 bhnd_core_get_match_desc.9 \ bhnd.9 bhnd_core_matches.9 \ bhnd.9 bhnd_core_name.9 \ bhnd.9 bhnd_cores_equal.9 \ bhnd.9 bhnd_deactivate_resource.9 \ bhnd.9 bhnd_decode_port_rid.9 \ bhnd.9 bhnd_deregister_provider.9 \ bhnd.9 bhnd_device_lookup.9 \ bhnd.9 bhnd_device_matches.9 \ bhnd.9 bhnd_device_quirks.9 \ bhnd.9 bhnd_driver_get_erom_class.9 \ bhnd.9 bhnd_enable_clocks.9 \ bhnd.9 bhnd_find_core_class.9 \ bhnd.9 bhnd_find_core_name.9 \ bhnd.9 bhnd_format_chip_id.9 \ bhnd.9 bhnd_get_attach_type.9 \ bhnd.9 bhnd_get_chipid.9 \ bhnd.9 bhnd_get_class.9 \ bhnd.9 bhnd_get_clock_freq.9 \ bhnd.9 bhnd_get_clock_latency.9 \ bhnd.9 bhnd_get_core_index.9 \ bhnd.9 bhnd_get_core_info.9 \ bhnd.9 bhnd_get_core_unit.9 \ bhnd.9 bhnd_get_device.9 \ bhnd.9 bhnd_get_device_name.9 \ bhnd.9 bhnd_get_dma_translation.9 \ bhnd.9 bhnd_get_hwrev.9 \ bhnd.9 bhnd_get_intr_count.9 \ bhnd.9 bhnd_get_intr_ivec.9 \ bhnd.9 bhnd_get_port_count.9 \ bhnd.9 bhnd_get_port_rid.9 \ bhnd.9 bhnd_get_region_addr.9 \ bhnd.9 bhnd_get_region_count.9 \ bhnd.9 bhnd_get_vendor.9 \ bhnd.9 bhnd_get_vendor_name.9 \ bhnd.9 bhnd_hwrev_matches.9 \ bhnd.9 bhnd_is_hw_suspended.9 \ bhnd.9 bhnd_is_region_valid.9 \ bhnd.9 bhnd_map_intr.9 \ bhnd.9 bhnd_match_core.9 \ bhnd.9 bhnd_nvram_getvar.9 \ bhnd.9 bhnd_nvram_getvar_array.9 \ bhnd.9 bhnd_nvram_getvar_int.9 \ bhnd.9 bhnd_nvram_getvar_int16.9 \ bhnd.9 bhnd_nvram_getvar_int32.9 \ bhnd.9 bhnd_nvram_getvar_int8.9 \ bhnd.9 bhnd_nvram_getvar_str.9 \ bhnd.9 bhnd_nvram_getvar_uint.9 \ bhnd.9 bhnd_nvram_getvar_uint16.9 \ bhnd.9 bhnd_nvram_getvar_uint32.9 \ bhnd.9 bhnd_nvram_getvar_uint8.9 \ bhnd.9 bhnd_nvram_string_array_next.9 \ bhnd.9 bhnd_read_board_info.9 \ bhnd.9 bhnd_read_config.9 \ bhnd.9 bhnd_read_ioctl.9 \ bhnd.9 bhnd_read_iost.9 \ bhnd.9 bhnd_register_provider.9 \ bhnd.9 bhnd_release_ext_rsrc.9 \ bhnd.9 bhnd_release_pmu.9 \ bhnd.9 bhnd_release_provider.9 \ bhnd.9 bhnd_release_resource.9 \ bhnd.9 bhnd_release_resources.9 \ bhnd.9 bhnd_request_clock.9 \ bhnd.9 bhnd_request_ext_rsrc.9 \ bhnd.9 bhnd_reset_hw.9 \ bhnd.9 bhnd_retain_provider.9 \ bhnd.9 bhnd_set_custom_core_desc.9 \ bhnd.9 bhnd_set_default_core_desc.9 \ bhnd.9 bhnd_suspend_hw.9 \ bhnd.9 bhnd_unmap_intr.9 \ bhnd.9 bhnd_vendor_name.9 \ bhnd.9 bhnd_write_config.9 \ bhnd.9 bhnd_write_ioctl.9 MLINKS+=bhnd_erom.9 bhnd_erom_alloc.9 \ bhnd_erom.9 bhnd_erom_dump.9 \ bhnd_erom.9 bhnd_erom_fini_static.9 \ bhnd_erom.9 bhnd_erom_free.9 \ bhnd_erom.9 bhnd_erom_free_core_table.9 \ bhnd_erom.9 bhnd_erom_get_core_table.9 \ bhnd_erom.9 bhnd_erom_init_static.9 \ bhnd_erom.9 bhnd_erom_io.9 \ bhnd_erom.9 bhnd_erom_io_fini.9 \ bhnd_erom.9 bhnd_erom_io_map.9 \ bhnd_erom.9 bhnd_erom_io_read.9 \ bhnd_erom.9 bhnd_erom_iobus_init.9 \ bhnd_erom.9 bhnd_erom_iores_new.9 \ bhnd_erom.9 bhnd_erom_lookup_core.9 \ bhnd_erom.9 bhnd_erom_lookup_core_addr.9 \ bhnd_erom.9 bhnd_erom_probe.9 \ bhnd_erom.9 bhnd_erom_probe_driver_classes.9 MLINKS+=bitset.9 BITSET_DEFINE.9 \ bitset.9 BITSET_T_INITIALIZER.9 \ bitset.9 BITSET_FSET.9 \ bitset.9 BIT_CLR.9 \ bitset.9 BIT_COPY.9 \ bitset.9 BIT_ISSET.9 \ bitset.9 BIT_SET.9 \ bitset.9 BIT_ZERO.9 \ bitset.9 BIT_FILL.9 \ bitset.9 BIT_SETOF.9 \ bitset.9 BIT_EMPTY.9 \ bitset.9 BIT_ISFULLSET.9 \ bitset.9 BIT_FFS.9 \ bitset.9 BIT_COUNT.9 \ bitset.9 BIT_SUBSET.9 \ bitset.9 BIT_OVERLAP.9 \ bitset.9 BIT_CMP.9 \ bitset.9 BIT_OR.9 \ bitset.9 BIT_AND.9 \ bitset.9 BIT_NAND.9 \ bitset.9 BIT_CLR_ATOMIC.9 \ bitset.9 BIT_SET_ATOMIC.9 \ bitset.9 BIT_SET_ATOMIC_ACQ.9 \ bitset.9 BIT_AND_ATOMIC.9 \ bitset.9 BIT_OR_ATOMIC.9 \ bitset.9 BIT_COPY_STORE_REL.9 MLINKS+=bpf.9 bpfattach.9 \ bpf.9 bpfattach2.9 \ bpf.9 bpfdetach.9 \ bpf.9 bpf_filter.9 \ bpf.9 bpf_mtap.9 \ bpf.9 bpf_mtap2.9 \ bpf.9 bpf_tap.9 \ bpf.9 bpf_validate.9 MLINKS+=buf.9 bp.9 MLINKS+=buf_ring.9 buf_ring_alloc.9 \ buf_ring.9 buf_ring_free.9 \ buf_ring.9 buf_ring_enqueue.9 \ buf_ring.9 buf_ring_enqueue_bytes.9 \ buf_ring.9 buf_ring_dequeue_mc.9 \ buf_ring.9 buf_ring_dequeue_sc.9 \ buf_ring.9 buf_ring_count.9 \ buf_ring.9 buf_ring_empty.9 \ buf_ring.9 buf_ring_full.9 \ buf_ring.9 buf_ring_peek.9 MLINKS+=bus_activate_resource.9 bus_deactivate_resource.9 MLINKS+=bus_alloc_resource.9 bus_alloc_resource_any.9 MLINKS+=BUS_BIND_INTR.9 bus_bind_intr.9 MLINKS+=BUS_DESCRIBE_INTR.9 bus_describe_intr.9 MLINKS+=bus_dma.9 busdma.9 \ bus_dma.9 bus_dmamap_create.9 \ bus_dma.9 bus_dmamap_destroy.9 \ bus_dma.9 bus_dmamap_load.9 \ bus_dma.9 bus_dmamap_load_bio.9 \ bus_dma.9 bus_dmamap_load_ccb.9 \ bus_dma.9 bus_dmamap_load_mbuf.9 \ bus_dma.9 bus_dmamap_load_mbuf_sg.9 \ bus_dma.9 bus_dmamap_load_uio.9 \ bus_dma.9 bus_dmamap_sync.9 \ bus_dma.9 bus_dmamap_unload.9 \ bus_dma.9 bus_dmamem_alloc.9 \ bus_dma.9 bus_dmamem_free.9 \ bus_dma.9 bus_dma_tag_create.9 \ bus_dma.9 bus_dma_tag_destroy.9 MLINKS+=bus_generic_read_ivar.9 bus_generic_write_ivar.9 MLINKS+=BUS_GET_CPUS.9 bus_get_cpus.9 MLINKS+=bus_map_resource.9 bus_unmap_resource.9 \ bus_map_resource.9 resource_init_map_request.9 MLINKS+=BUS_READ_IVAR.9 BUS_WRITE_IVAR.9 MLINKS+=BUS_SETUP_INTR.9 bus_setup_intr.9 \ BUS_SETUP_INTR.9 BUS_TEARDOWN_INTR.9 \ BUS_SETUP_INTR.9 bus_teardown_intr.9 MLINKS+=bus_space.9 bus_space_alloc.9 \ bus_space.9 bus_space_barrier.9 \ bus_space.9 bus_space_copy_region_1.9 \ bus_space.9 bus_space_copy_region_2.9 \ bus_space.9 bus_space_copy_region_4.9 \ bus_space.9 bus_space_copy_region_8.9 \ bus_space.9 bus_space_copy_region_stream_1.9 \ bus_space.9 bus_space_copy_region_stream_2.9 \ bus_space.9 bus_space_copy_region_stream_4.9 \ bus_space.9 bus_space_copy_region_stream_8.9 \ bus_space.9 bus_space_free.9 \ bus_space.9 bus_space_map.9 \ bus_space.9 bus_space_read_1.9 \ bus_space.9 bus_space_read_2.9 \ bus_space.9 bus_space_read_4.9 \ bus_space.9 bus_space_read_8.9 \ bus_space.9 bus_space_read_multi_1.9 \ bus_space.9 bus_space_read_multi_2.9 \ bus_space.9 bus_space_read_multi_4.9 \ bus_space.9 bus_space_read_multi_8.9 \ bus_space.9 bus_space_read_multi_stream_1.9 \ bus_space.9 bus_space_read_multi_stream_2.9 \ bus_space.9 bus_space_read_multi_stream_4.9 \ bus_space.9 bus_space_read_multi_stream_8.9 \ bus_space.9 bus_space_read_region_1.9 \ bus_space.9 bus_space_read_region_2.9 \ bus_space.9 bus_space_read_region_4.9 \ bus_space.9 bus_space_read_region_8.9 \ bus_space.9 bus_space_read_region_stream_1.9 \ bus_space.9 bus_space_read_region_stream_2.9 \ bus_space.9 bus_space_read_region_stream_4.9 \ bus_space.9 bus_space_read_region_stream_8.9 \ bus_space.9 bus_space_read_stream_1.9 \ bus_space.9 bus_space_read_stream_2.9 \ bus_space.9 bus_space_read_stream_4.9 \ bus_space.9 bus_space_read_stream_8.9 \ bus_space.9 bus_space_set_multi_1.9 \ bus_space.9 bus_space_set_multi_2.9 \ bus_space.9 bus_space_set_multi_4.9 \ bus_space.9 bus_space_set_multi_8.9 \ bus_space.9 bus_space_set_multi_stream_1.9 \ bus_space.9 bus_space_set_multi_stream_2.9 \ bus_space.9 bus_space_set_multi_stream_4.9 \ bus_space.9 bus_space_set_multi_stream_8.9 \ bus_space.9 bus_space_set_region_1.9 \ bus_space.9 bus_space_set_region_2.9 \ bus_space.9 bus_space_set_region_4.9 \ bus_space.9 bus_space_set_region_8.9 \ bus_space.9 bus_space_set_region_stream_1.9 \ bus_space.9 bus_space_set_region_stream_2.9 \ bus_space.9 bus_space_set_region_stream_4.9 \ bus_space.9 bus_space_set_region_stream_8.9 \ bus_space.9 bus_space_subregion.9 \ bus_space.9 bus_space_unmap.9 \ bus_space.9 bus_space_write_1.9 \ bus_space.9 bus_space_write_2.9 \ bus_space.9 bus_space_write_4.9 \ bus_space.9 bus_space_write_8.9 \ bus_space.9 bus_space_write_multi_1.9 \ bus_space.9 bus_space_write_multi_2.9 \ bus_space.9 bus_space_write_multi_4.9 \ bus_space.9 bus_space_write_multi_8.9 \ bus_space.9 bus_space_write_multi_stream_1.9 \ bus_space.9 bus_space_write_multi_stream_2.9 \ bus_space.9 bus_space_write_multi_stream_4.9 \ bus_space.9 bus_space_write_multi_stream_8.9 \ bus_space.9 bus_space_write_region_1.9 \ bus_space.9 bus_space_write_region_2.9 \ bus_space.9 bus_space_write_region_4.9 \ bus_space.9 bus_space_write_region_8.9 \ bus_space.9 bus_space_write_region_stream_1.9 \ bus_space.9 bus_space_write_region_stream_2.9 \ bus_space.9 bus_space_write_region_stream_4.9 \ bus_space.9 bus_space_write_region_stream_8.9 \ bus_space.9 bus_space_write_stream_1.9 \ bus_space.9 bus_space_write_stream_2.9 \ bus_space.9 bus_space_write_stream_4.9 \ bus_space.9 bus_space_write_stream_8.9 MLINKS+=byteorder.9 be16dec.9 \ byteorder.9 be16enc.9 \ byteorder.9 be16toh.9 \ byteorder.9 be32dec.9 \ byteorder.9 be32enc.9 \ byteorder.9 be32toh.9 \ byteorder.9 be64dec.9 \ byteorder.9 be64enc.9 \ byteorder.9 be64toh.9 \ byteorder.9 bswap16.9 \ byteorder.9 bswap32.9 \ byteorder.9 bswap64.9 \ byteorder.9 htobe16.9 \ byteorder.9 htobe32.9 \ byteorder.9 htobe64.9 \ byteorder.9 htole16.9 \ byteorder.9 htole32.9 \ byteorder.9 htole64.9 \ byteorder.9 le16dec.9 \ byteorder.9 le16enc.9 \ byteorder.9 le16toh.9 \ byteorder.9 le32dec.9 \ byteorder.9 le32enc.9 \ byteorder.9 le32toh.9 \ byteorder.9 le64dec.9 \ byteorder.9 le64enc.9 \ byteorder.9 le64toh.9 MLINKS+=cnv.9 cnvlist.9 \ cnv.9 cnvlist_free_binary.9 \ cnv.9 cnvlist_free_bool.9 \ cnv.9 cnvlist_free_bool_array.9 \ cnv.9 cnvlist_free_descriptor.9 \ cnv.9 cnvlist_free_descriptor_array.9 \ cnv.9 cnvlist_free_null.9 \ cnv.9 cnvlist_free_number.9 \ cnv.9 cnvlist_free_number_array.9 \ cnv.9 cnvlist_free_nvlist.9 \ cnv.9 cnvlist_free_nvlist_array.9 \ cnv.9 cnvlist_free_string.9 \ cnv.9 cnvlist_free_string_array.9 \ cnv.9 cnvlist_get_binary.9 \ cnv.9 cnvlist_get_bool.9 \ cnv.9 cnvlist_get_bool_array.9 \ cnv.9 cnvlist_get_descriptor.9 \ cnv.9 cnvlist_get_descriptor_array.9 \ cnv.9 cnvlist_get_number.9 \ cnv.9 cnvlist_get_number_array.9 \ cnv.9 cnvlist_get_nvlist.9 \ cnv.9 cnvlist_get_nvlist_array.9 \ cnv.9 cnvlist_get_string.9 \ cnv.9 cnvlist_get_string_array.9 \ cnv.9 cnvlist_take_binary.9 \ cnv.9 cnvlist_take_bool.9 \ cnv.9 cnvlist_take_bool_array.9 \ cnv.9 cnvlist_take_descriptor.9 \ cnv.9 cnvlist_take_descriptor_array.9 \ cnv.9 cnvlist_take_number.9 \ cnv.9 cnvlist_take_number_array.9 \ cnv.9 cnvlist_take_nvlist.9 \ cnv.9 cnvlist_take_nvlist_array.9 \ cnv.9 cnvlist_take_string.9 \ cnv.9 cnvlist_take_string_array.9 MLINKS+=condvar.9 cv_broadcast.9 \ condvar.9 cv_broadcastpri.9 \ condvar.9 cv_destroy.9 \ condvar.9 cv_init.9 \ condvar.9 cv_signal.9 \ condvar.9 cv_timedwait.9 \ condvar.9 cv_timedwait_sig.9 \ condvar.9 cv_timedwait_sig_sbt.9 \ condvar.9 cv_wait.9 \ condvar.9 cv_wait_sig.9 \ condvar.9 cv_wait_unlock.9 \ condvar.9 cv_wmesg.9 MLINKS+=config_intrhook.9 config_intrhook_disestablish.9 \ config_intrhook.9 config_intrhook_establish.9 \ config_intrhook.9 config_intrhook_oneshot.9 -MLINKS+=contigmalloc.9 contigfree.9 +MLINKS+=contigmalloc.9 contigmalloc_domainset.9 \ + contigmalloc.9 contigfree.9 MLINKS+=casuword.9 casueword.9 \ casuword.9 casueword32.9 \ casuword.9 casuword32.9 MLINKS+=copy.9 copyin.9 \ copy.9 copyin_nofault.9 \ copy.9 copyinstr.9 \ copy.9 copyout.9 \ copy.9 copyout_nofault.9 \ copy.9 copystr.9 MLINKS+=counter.9 counter_u64_alloc.9 \ counter.9 counter_u64_free.9 \ counter.9 counter_u64_add.9 \ counter.9 counter_enter.9 \ counter.9 counter_exit.9 \ counter.9 counter_u64_add_protected.9 \ counter.9 counter_u64_fetch.9 \ counter.9 counter_u64_zero.9 \ counter.9 SYSCTL_COUNTER_U64.9 \ counter.9 SYSCTL_ADD_COUNTER_U64.9 \ counter.9 SYSCTL_COUNTER_U64_ARRAY.9 \ counter.9 SYSCTL_ADD_COUNTER_U64_ARRAY.9 MLINKS+=cpuset.9 CPUSET_T_INITIALIZER.9 \ cpuset.9 CPUSET_FSET.9 \ cpuset.9 CPU_CLR.9 \ cpuset.9 CPU_COPY.9 \ cpuset.9 CPU_ISSET.9 \ cpuset.9 CPU_SET.9 \ cpuset.9 CPU_ZERO.9 \ cpuset.9 CPU_FILL.9 \ cpuset.9 CPU_SETOF.9 \ cpuset.9 CPU_EMPTY.9 \ cpuset.9 CPU_ISFULLSET.9 \ cpuset.9 CPU_FFS.9 \ cpuset.9 CPU_COUNT.9 \ cpuset.9 CPU_SUBSET.9 \ cpuset.9 CPU_OVERLAP.9 \ cpuset.9 CPU_CMP.9 \ cpuset.9 CPU_OR.9 \ cpuset.9 CPU_AND.9 \ cpuset.9 CPU_NAND.9 \ cpuset.9 CPU_CLR_ATOMIC.9 \ cpuset.9 CPU_SET_ATOMIC.9 \ cpuset.9 CPU_SET_ATOMIC_ACQ.9 \ cpuset.9 CPU_AND_ATOMIC.9 \ cpuset.9 CPU_OR_ATOMIC.9 \ cpuset.9 CPU_COPY_STORE_REL.9 MLINKS+=critical_enter.9 critical.9 \ critical_enter.9 critical_exit.9 MLINKS+=crypto.9 crypto_dispatch.9 \ crypto.9 crypto_done.9 \ crypto.9 crypto_freereq.9 \ crypto.9 crypto_freesession.9 \ crypto.9 crypto_get_driverid.9 \ crypto.9 crypto_getreq.9 \ crypto.9 crypto_kdispatch.9 \ crypto.9 crypto_kdone.9 \ crypto.9 crypto_kregister.9 \ crypto.9 crypto_newsession.9 \ crypto.9 crypto_register.9 \ crypto.9 crypto_unblock.9 \ crypto.9 crypto_unregister.9 \ crypto.9 crypto_unregister_all.9 MLINKS+=DB_COMMAND.9 DB_SHOW_ALL_COMMAND.9 \ DB_COMMAND.9 DB_SHOW_COMMAND.9 MLINKS+=DECLARE_MODULE.9 DECLARE_MODULE_TIED.9 MLINKS+=dev_clone.9 drain_dev_clone_events.9 MLINKS+=dev_refthread.9 devvn_refthread.9 \ dev_refthread.9 dev_relthread.9 MLINKS+=devfs_set_cdevpriv.9 devfs_clear_cdevpriv.9 \ devfs_set_cdevpriv.9 devfs_get_cdevpriv.9 MLINKS+=device_add_child.9 device_add_child_ordered.9 MLINKS+=device_enable.9 device_disable.9 \ device_enable.9 device_is_enabled.9 MLINKS+=device_get_ivars.9 device_set_ivars.9 MLINKS+=device_get_name.9 device_get_nameunit.9 MLINKS+=device_get_state.9 device_busy.9 \ device_get_state.9 device_is_alive.9 \ device_get_state.9 device_is_attached.9 \ device_get_state.9 device_unbusy.9 MLINKS+=device_get_sysctl.9 device_get_sysctl_ctx.9 \ device_get_sysctl.9 device_get_sysctl_tree.9 MLINKS+=device_quiet.9 device_is_quiet.9 \ device_quiet.9 device_verbose.9 MLINKS+=device_set_desc.9 device_get_desc.9 \ device_set_desc.9 device_set_desc_copy.9 MLINKS+=device_set_flags.9 device_get_flags.9 MLINKS+=devstat.9 devicestat.9 \ devstat.9 devstat_add_entry.9 \ devstat.9 devstat_end_transaction.9 \ devstat.9 devstat_remove_entry.9 \ devstat.9 devstat_start_transaction.9 MLINKS+=disk.9 disk_add_alias.9 \ disk.9 disk_alloc.9 \ disk.9 disk_create.9 \ disk.9 disk_destroy.9 \ disk.9 disk_gone.9 \ disk.9 disk_resize.9 MLINKS+=dnv.9 dnvlist.9 \ dnv.9 dnvlist_get_binary.9 \ dnv.9 dnvlist_get_bool.9 \ dnv.9 dnvlist_get_descriptor.9 \ dnv.9 dnvlist_get_number.9 \ dnv.9 dnvlist_get_nvlist.9 \ dnv.9 dnvlist_get_string.9 \ dnv.9 dnvlist_take_binary.9 \ dnv.9 dnvlist_take_bool.9 \ dnv.9 dnvlist_take_descriptor.9 \ dnv.9 dnvlist_take_number.9 \ dnv.9 dnvlist_take_nvlist.9 \ dnv.9 dnvlist_take_string.9 MLINKS+=domain.9 DOMAIN_SET.9 \ domain.9 domain_add.9 \ domain.9 pfctlinput.9 \ domain.9 pfctlinput2.9 \ domain.9 pffinddomain.9 \ domain.9 pffindproto.9 \ domain.9 pffindtype.9 MLINKS+=drbr.9 drbr_free.9 \ drbr.9 drbr_enqueue.9 \ drbr.9 drbr_dequeue.9 \ drbr.9 drbr_dequeue_cond.9 \ drbr.9 drbr_flush.9 \ drbr.9 drbr_empty.9 \ drbr.9 drbr_inuse.9 \ drbr.9 drbr_stats_update.9 MLINKS+=DRIVER_MODULE.9 DRIVER_MODULE_ORDERED.9 \ DRIVER_MODULE.9 EARLY_DRIVER_MODULE.9 \ DRIVER_MODULE.9 EARLY_DRIVER_MODULE_ORDERED.9 MLINKS+=EVENTHANDLER.9 EVENTHANDLER_DECLARE.9 \ EVENTHANDLER.9 EVENTHANDLER_DEFINE.9 \ EVENTHANDLER.9 EVENTHANDLER_DEREGISTER.9 \ EVENTHANDLER.9 eventhandler_deregister.9 \ EVENTHANDLER.9 eventhandler_find_list.9 \ EVENTHANDLER.9 EVENTHANDLER_INVOKE.9 \ EVENTHANDLER.9 eventhandler_prune_list.9 \ EVENTHANDLER.9 EVENTHANDLER_REGISTER.9 \ EVENTHANDLER.9 eventhandler_register.9 MLINKS+=eventtimers.9 et_register.9 \ eventtimers.9 et_deregister.9 \ eventtimers.9 et_ban.9 \ eventtimers.9 et_find.9 \ eventtimers.9 et_free.9 \ eventtimers.9 et_init.9 \ eventtimers.9 ET_LOCK.9 \ eventtimers.9 ET_UNLOCK.9 \ eventtimers.9 et_start.9 \ eventtimers.9 et_stop.9 MLINKS+=fail.9 KFAIL_POINT_CODE.9 \ fail.9 KFAIL_POINT_ERROR.9 \ fail.9 KFAIL_POINT_GOTO.9 \ fail.9 KFAIL_POINT_RETURN.9 \ fail.9 KFAIL_POINT_RETURN_VOID.9 MLINKS+=fdt_pinctrl.9 fdt_pinctrl_configure.9 \ fdt_pinctrl.9 fdt_pinctrl_configure_by_name.9 \ fdt_pinctrl.9 fdt_pinctrl_configure_tree.9 \ fdt_pinctrl.9 fdt_pinctrl_register.9 MLINKS+=fetch.9 fubyte.9 \ fetch.9 fuword.9 \ fetch.9 fuword16.9 \ fetch.9 fuword32.9 \ fetch.9 fuword64.9 \ fetch.9 fueword.9 \ fetch.9 fueword32.9 \ fetch.9 fueword64.9 MLINKS+=firmware.9 firmware_get.9 \ firmware.9 firmware_put.9 \ firmware.9 firmware_register.9 \ firmware.9 firmware_unregister.9 MLINKS+=fpu_kern.9 fpu_kern_alloc_ctx.9 \ fpu_kern.9 fpu_kern_free_ctx.9 \ fpu_kern.9 fpu_kern_enter.9 \ fpu_kern.9 fpu_kern_leave.9 \ fpu_kern.9 fpu_kern_thread.9 \ fpu_kern.9 is_fpu_kern_thread.9 MLINKS+=g_attach.9 g_detach.9 MLINKS+=g_bio.9 g_alloc_bio.9 \ g_bio.9 g_clone_bio.9 \ g_bio.9 g_destroy_bio.9 \ g_bio.9 g_duplicate_bio.9 \ g_bio.9 g_new_bio.9 \ g_bio.9 g_print_bio.9 \ g_bio.9 g_reset_bio.9 MLINKS+=g_consumer.9 g_destroy_consumer.9 \ g_consumer.9 g_new_consumer.9 MLINKS+=g_data.9 g_read_data.9 \ g_data.9 g_write_data.9 MLINKS+=getenv.9 freeenv.9 \ getenv.9 getenv_int.9 \ getenv.9 getenv_long.9 \ getenv.9 getenv_string.9 \ getenv.9 getenv_quad.9 \ getenv.9 getenv_uint.9 \ getenv.9 getenv_ulong.9 \ getenv.9 kern_getenv.9 \ getenv.9 kern_setenv.9 \ getenv.9 kern_unsetenv.9 \ getenv.9 setenv.9 \ getenv.9 testenv.9 \ getenv.9 unsetenv.9 MLINKS+=g_event.9 g_cancel_event.9 \ g_event.9 g_post_event.9 \ g_event.9 g_waitfor_event.9 MLINKS+=g_geom.9 g_destroy_geom.9 \ g_geom.9 g_new_geomf.9 MLINKS+=g_provider.9 g_destroy_provider.9 \ g_provider.9 g_error_provider.9 \ g_provider.9 g_new_providerf.9 MLINKS+=hash.9 hash32.9 \ hash.9 hash32_buf.9 \ hash.9 hash32_str.9 \ hash.9 hash32_stre.9 \ hash.9 hash32_strn.9 \ hash.9 hash32_strne.9 \ hash.9 jenkins_hash.9 \ hash.9 jenkins_hash32.9 MLINKS+=hashinit.9 hashdestroy.9 \ hashinit.9 hashinit_flags.9 \ hashinit.9 phashinit.9 MLINKS+=hhook.9 hhook_head_register.9 \ hhook.9 hhook_head_deregister.9 \ hhook.9 hhook_head_deregister_lookup.9 \ hhook.9 hhook_run_hooks.9 \ hhook.9 HHOOKS_RUN_IF.9 \ hhook.9 HHOOKS_RUN_LOOKUP_IF.9 MLINKS+=ieee80211.9 ieee80211_ifattach.9 \ ieee80211.9 ieee80211_ifdetach.9 MLINKS+=ieee80211_amrr.9 ieee80211_amrr_choose.9 \ ieee80211_amrr.9 ieee80211_amrr_cleanup.9 \ ieee80211_amrr.9 ieee80211_amrr_init.9 \ ieee80211_amrr.9 ieee80211_amrr_node_init.9 \ ieee80211_amrr.9 ieee80211_amrr_setinterval.9 \ ieee80211_amrr.9 ieee80211_amrr_tx_complete.9 \ ieee80211_amrr.9 ieee80211_amrr_tx_update.9 MLINKS+=ieee80211_beacon.9 ieee80211_beacon_alloc.9 \ ieee80211_beacon.9 ieee80211_beacon_notify.9 \ ieee80211_beacon.9 ieee80211_beacon_update.9 MLINKS+=ieee80211_bmiss.9 ieee80211_beacon_miss.9 MLINKS+=ieee80211_crypto.9 ieee80211_crypto_available.9 \ ieee80211_crypto.9 ieee80211_crypto_decap.9 \ ieee80211_crypto.9 ieee80211_crypto_delglobalkeys.9 \ ieee80211_crypto.9 ieee80211_crypto_delkey.9 \ ieee80211_crypto.9 ieee80211_crypto_demic.9 \ ieee80211_crypto.9 ieee80211_crypto_encap.9 \ ieee80211_crypto.9 ieee80211_crypto_enmic.9 \ ieee80211_crypto.9 ieee80211_crypto_newkey.9 \ ieee80211_crypto.9 ieee80211_crypto_register.9 \ ieee80211_crypto.9 ieee80211_crypto_reload_keys.9 \ ieee80211_crypto.9 ieee80211_crypto_setkey.9 \ ieee80211_crypto.9 ieee80211_crypto_unregister.9 \ ieee80211_crypto.9 ieee80211_key_update_begin.9 \ ieee80211_crypto.9 ieee80211_key_update_end.9 \ ieee80211_crypto.9 ieee80211_notify_michael_failure.9 \ ieee80211_crypto.9 ieee80211_notify_replay_failure.9 MLINKS+=ieee80211_input.9 ieee80211_input_all.9 MLINKS+=ieee80211_node.9 ieee80211_dump_node.9 \ ieee80211_node.9 ieee80211_dump_nodes.9 \ ieee80211_node.9 ieee80211_find_rxnode.9 \ ieee80211_node.9 ieee80211_find_rxnode_withkey.9 \ ieee80211_node.9 ieee80211_free_node.9 \ ieee80211_node.9 ieee80211_iterate_nodes.9 \ ieee80211_node.9 ieee80211_ref_node.9 \ ieee80211_node.9 ieee80211_unref_node.9 MLINKS+=ieee80211_output.9 ieee80211_process_callback.9 \ ieee80211_output.9 M_SEQNO_GET.9 \ ieee80211_output.9 M_WME_GETAC.9 MLINKS+=ieee80211_proto.9 ieee80211_new_state.9 \ ieee80211_proto.9 ieee80211_resume_all.9 \ ieee80211_proto.9 ieee80211_start_all.9 \ ieee80211_proto.9 ieee80211_stop_all.9 \ ieee80211_proto.9 ieee80211_suspend_all.9 \ ieee80211_proto.9 ieee80211_waitfor_parent.9 MLINKS+=ieee80211_radiotap.9 ieee80211_radiotap_active.9 \ ieee80211_radiotap.9 ieee80211_radiotap_active_vap.9 \ ieee80211_radiotap.9 ieee80211_radiotap_attach.9 \ ieee80211_radiotap.9 ieee80211_radiotap_tx.9 \ ieee80211_radiotap.9 radiotap.9 MLINKS+=ieee80211_regdomain.9 ieee80211_alloc_countryie.9 \ ieee80211_regdomain.9 ieee80211_init_channels.9 \ ieee80211_regdomain.9 ieee80211_sort_channels.9 MLINKS+=ieee80211_scan.9 ieee80211_add_scan.9 \ ieee80211_scan.9 ieee80211_bg_scan.9 \ ieee80211_scan.9 ieee80211_cancel_scan.9 \ ieee80211_scan.9 ieee80211_cancel_scan_any.9 \ ieee80211_scan.9 ieee80211_check_scan.9 \ ieee80211_scan.9 ieee80211_check_scan_current.9 \ ieee80211_scan.9 ieee80211_flush.9 \ ieee80211_scan.9 ieee80211_probe_curchan.9 \ ieee80211_scan.9 ieee80211_scan_assoc_fail.9 \ ieee80211_scan.9 ieee80211_scan_done.9 \ ieee80211_scan.9 ieee80211_scan_dump_channels.9 \ ieee80211_scan.9 ieee80211_scan_flush.9 \ ieee80211_scan.9 ieee80211_scan_iterate.9 \ ieee80211_scan.9 ieee80211_scan_next.9 \ ieee80211_scan.9 ieee80211_scan_timeout.9 \ ieee80211_scan.9 ieee80211_scanner_get.9 \ ieee80211_scan.9 ieee80211_scanner_register.9 \ ieee80211_scan.9 ieee80211_scanner_unregister.9 \ ieee80211_scan.9 ieee80211_scanner_unregister_all.9 \ ieee80211_scan.9 ieee80211_start_scan.9 MLINKS+=ieee80211_vap.9 ieee80211_vap_attach.9 \ ieee80211_vap.9 ieee80211_vap_detach.9 \ ieee80211_vap.9 ieee80211_vap_setup.9 MLINKS+=iflibdd.9 ifdi_attach_pre.9 \ iflibdd.9 ifdi_attach_post.9 \ iflibdd.9 ifdi_detach.9 \ iflibdd.9 ifdi_get_counter.9 \ iflibdd.9 ifdi_i2c_req.9 \ iflibdd.9 ifdi_init.9 \ iflibdd.9 ifdi_intr_enable.9 \ iflibdd.9 ifdi_intr_disable.9 \ iflibdd.9 ifdi_led_func.9 \ iflibdd.9 ifdi_link_intr_enable.9 \ iflibdd.9 ifdi_media_set.9 \ iflibdd.9 ifdi_media_status.9 \ iflibdd.9 ifdi_media_change.9 \ iflibdd.9 ifdi_mtu_set.9 \ iflibdd.9 ifdi_multi_set.9 \ iflibdd.9 ifdi_promisc_set.9 \ iflibdd.9 ifdi_queues_alloc.9 \ iflibdd.9 ifdi_queues_free.9 \ iflibdd.9 ifdi_queue_intr_enable.9 \ iflibdd.9 ifdi_resume.9 \ iflibdd.9 ifdi_rxq_setup.9 \ iflibdd.9 ifdi_stop.9 \ iflibdd.9 ifdi_suspend.9 \ iflibdd.9 ifdi_sysctl_int_delay.9 \ iflibdd.9 ifdi_timer.9 \ iflibdd.9 ifdi_txq_setup.9 \ iflibdd.9 ifdi_update_admin_status.9 \ iflibdd.9 ifdi_vf_add.9 \ iflibdd.9 ifdi_vflr_handle.9 \ iflibdd.9 ifdi_vlan_register.9 \ iflibdd.9 ifdi_vlan_unregister.9 \ iflibdd.9 ifdi_watchdog_reset.9 \ iflibdd.9 iov_init.9 \ iflibdd.9 iov_uinit.9 MLINKS+=iflibdi.9 iflib_add_int_delay_sysctl.9 \ iflibdi.9 iflib_device_attach.9 \ iflibdi.9 iflib_device_deregister.9 \ iflibdi.9 iflib_device_detach.9 \ iflibdi.9 iflib_device_suspend.9 \ iflibdi.9 iflib_device_register.9 \ iflibdi.9 iflib_device_resume.9 \ iflibdi.9 iflib_led_create.9 \ iflibdi.9 iflib_irq_alloc.9 \ iflibdi.9 iflib_irq_alloc_generic.9 \ iflibdi.9 iflib_link_intr_deferred.9 \ iflibdi.9 iflib_link_state_change.9 \ iflibdi.9 iflib_rx_intr_deferred.9 \ iflibdi.9 iflib_tx_intr_deferred.9 MLINKS+=iflibtxrx.9 isc_rxd_available.9 \ iflibtxrx.9 isc_rxd_refill.9 \ iflibtxrx.9 isc_rxd_flush.9 \ iflibtxrx.9 isc_rxd_pkt_get.9 \ iflibtxrx.9 isc_txd_credits_update.9 \ iflibtxrx.9 isc_txd_encap.9 \ iflibtxrx.9 isc_txd_flush.9 MLINKS+=ifnet.9 if_addmulti.9 \ ifnet.9 if_alloc.9 \ ifnet.9 if_allmulti.9 \ ifnet.9 if_attach.9 \ ifnet.9 if_data.9 \ ifnet.9 IF_DEQUEUE.9 \ ifnet.9 if_delmulti.9 \ ifnet.9 if_detach.9 \ ifnet.9 if_down.9 \ ifnet.9 if_findmulti.9 \ ifnet.9 if_free.9 \ ifnet.9 if_free_type.9 \ ifnet.9 if_up.9 \ ifnet.9 ifa_free.9 \ ifnet.9 ifa_ifwithaddr.9 \ ifnet.9 ifa_ifwithdstaddr.9 \ ifnet.9 ifa_ifwithnet.9 \ ifnet.9 ifa_ref.9 \ ifnet.9 ifaddr.9 \ ifnet.9 ifaddr_byindex.9 \ ifnet.9 ifaof_ifpforaddr.9 \ ifnet.9 ifioctl.9 \ ifnet.9 ifpromisc.9 \ ifnet.9 ifqueue.9 \ ifnet.9 ifunit.9 \ ifnet.9 ifunit_ref.9 MLINKS+=insmntque.9 insmntque1.9 MLINKS+=ithread.9 ithread_add_handler.9 \ ithread.9 ithread_create.9 \ ithread.9 ithread_destroy.9 \ ithread.9 ithread_priority.9 \ ithread.9 ithread_remove_handler.9 \ ithread.9 ithread_schedule.9 MLINKS+=kernacc.9 useracc.9 MLINKS+=kernel_mount.9 free_mntarg.9 \ kernel_mount.9 kernel_vmount.9 \ kernel_mount.9 mount_arg.9 \ kernel_mount.9 mount_argb.9 \ kernel_mount.9 mount_argf.9 \ kernel_mount.9 mount_argsu.9 MLINKS+=khelp.9 khelp_add_hhook.9 \ khelp.9 KHELP_DECLARE_MOD.9 \ khelp.9 KHELP_DECLARE_MOD_UMA.9 \ khelp.9 khelp_destroy_osd.9 \ khelp.9 khelp_get_id.9 \ khelp.9 khelp_get_osd.9 \ khelp.9 khelp_init_osd.9 \ khelp.9 khelp_remove_hhook.9 MLINKS+=kobj.9 DEFINE_CLASS.9 \ kobj.9 kobj_class_compile.9 \ kobj.9 kobj_class_compile_static.9 \ kobj.9 kobj_class_free.9 \ kobj.9 kobj_create.9 \ kobj.9 kobj_delete.9 \ kobj.9 kobj_init.9 \ kobj.9 kobj_init_static.9 MLINKS+=kproc.9 kproc_create.9 \ kproc.9 kproc_exit.9 \ kproc.9 kproc_kthread_add.9 \ kproc.9 kproc_resume.9 \ kproc.9 kproc_shutdown.9 \ kproc.9 kproc_start.9 \ kproc.9 kproc_suspend.9 \ kproc.9 kproc_suspend_check.9 \ kproc.9 kthread_create.9 MLINKS+=kqueue.9 knlist_add.9 \ kqueue.9 knlist_clear.9 \ kqueue.9 knlist_delete.9 \ kqueue.9 knlist_destroy.9 \ kqueue.9 knlist_empty.9 \ kqueue.9 knlist_init.9 \ kqueue.9 knlist_init_mtx.9 \ kqueue.9 knlist_init_rw_reader.9 \ kqueue.9 knlist_remove.9 \ kqueue.9 knlist_remove_inevent.9 \ kqueue.9 knote_fdclose.9 \ kqueue.9 KNOTE_LOCKED.9 \ kqueue.9 KNOTE_UNLOCKED.9 \ kqueue.9 kqfd_register.9 \ kqueue.9 kqueue_add_filteropts.9 \ kqueue.9 kqueue_del_filteropts.9 MLINKS+=kthread.9 kthread_add.9 \ kthread.9 kthread_exit.9 \ kthread.9 kthread_resume.9 \ kthread.9 kthread_shutdown.9 \ kthread.9 kthread_start.9 \ kthread.9 kthread_suspend.9 \ kthread.9 kthread_suspend_check.9 MLINKS+=ktr.9 CTR0.9 \ ktr.9 CTR1.9 \ ktr.9 CTR2.9 \ ktr.9 CTR3.9 \ ktr.9 CTR4.9 \ ktr.9 CTR5.9 \ ktr.9 CTR6.9 MLINKS+=lock.9 lockdestroy.9 \ lock.9 lockinit.9 \ lock.9 lockmgr.9 \ lock.9 lockmgr_args.9 \ lock.9 lockmgr_args_rw.9 \ lock.9 lockmgr_assert.9 \ lock.9 lockmgr_disown.9 \ lock.9 lockmgr_printinfo.9 \ lock.9 lockmgr_recursed.9 \ lock.9 lockmgr_rw.9 \ lock.9 lockstatus.9 MLINKS+=LOCK_PROFILING.9 MUTEX_PROFILING.9 MLINKS+=make_dev.9 destroy_dev.9 \ make_dev.9 destroy_dev_drain.9 \ make_dev.9 destroy_dev_sched.9 \ make_dev.9 destroy_dev_sched_cb.9 \ make_dev.9 dev_depends.9 \ make_dev.9 make_dev_alias.9 \ make_dev.9 make_dev_alias_p.9 \ make_dev.9 make_dev_cred.9 \ make_dev.9 make_dev_credf.9 \ make_dev.9 make_dev_p.9 \ make_dev.9 make_dev_s.9 MLINKS+=malloc.9 free.9 \ - malloc.9 malloc_domain.9 \ + malloc.9 malloc_domainset.9 \ malloc.9 free_domain.9 \ malloc.9 mallocarray.9 \ malloc.9 MALLOC_DECLARE.9 \ malloc.9 MALLOC_DEFINE.9 \ malloc.9 realloc.9 \ malloc.9 reallocf.9 MLINKS+=mbchain.9 mb_detach.9 \ mbchain.9 mb_done.9 \ mbchain.9 mb_fixhdr.9 \ mbchain.9 mb_init.9 \ mbchain.9 mb_initm.9 \ mbchain.9 mb_put_int64be.9 \ mbchain.9 mb_put_int64le.9 \ mbchain.9 mb_put_mbuf.9 \ mbchain.9 mb_put_mem.9 \ mbchain.9 mb_put_uint16be.9 \ mbchain.9 mb_put_uint16le.9 \ mbchain.9 mb_put_uint32be.9 \ mbchain.9 mb_put_uint32le.9 \ mbchain.9 mb_put_uint8.9 \ mbchain.9 mb_put_uio.9 \ mbchain.9 mb_reserve.9 MLINKS+=\ mbuf.9 m_adj.9 \ mbuf.9 m_align.9 \ mbuf.9 M_ALIGN.9 \ mbuf.9 m_append.9 \ mbuf.9 m_apply.9 \ mbuf.9 m_cat.9 \ mbuf.9 m_catpkt.9 \ mbuf.9 MCHTYPE.9 \ mbuf.9 MCLGET.9 \ mbuf.9 m_collapse.9 \ mbuf.9 m_copyback.9 \ mbuf.9 m_copydata.9 \ mbuf.9 m_copym.9 \ mbuf.9 m_copypacket.9 \ mbuf.9 m_copyup.9 \ mbuf.9 m_defrag.9 \ mbuf.9 m_devget.9 \ mbuf.9 m_dup.9 \ mbuf.9 m_dup_pkthdr.9 \ mbuf.9 MEXTADD.9 \ mbuf.9 m_fixhdr.9 \ mbuf.9 m_free.9 \ mbuf.9 m_freem.9 \ mbuf.9 MGET.9 \ mbuf.9 m_get.9 \ mbuf.9 m_get2.9 \ mbuf.9 m_getjcl.9 \ mbuf.9 m_getcl.9 \ mbuf.9 MGETHDR.9 \ mbuf.9 m_gethdr.9 \ mbuf.9 m_getm.9 \ mbuf.9 m_getptr.9 \ mbuf.9 MH_ALIGN.9 \ mbuf.9 M_LEADINGSPACE.9 \ mbuf.9 m_length.9 \ mbuf.9 M_MOVE_PKTHDR.9 \ mbuf.9 m_move_pkthdr.9 \ mbuf.9 M_PREPEND.9 \ mbuf.9 m_prepend.9 \ mbuf.9 m_pulldown.9 \ mbuf.9 m_pullup.9 \ mbuf.9 m_split.9 \ mbuf.9 mtod.9 \ mbuf.9 M_TRAILINGSPACE.9 \ mbuf.9 m_unshare.9 \ mbuf.9 M_WRITABLE.9 MLINKS+=\ mbuf_tags.9 m_tag_alloc.9 \ mbuf_tags.9 m_tag_copy.9 \ mbuf_tags.9 m_tag_copy_chain.9 \ mbuf_tags.9 m_tag_delete.9 \ mbuf_tags.9 m_tag_delete_chain.9 \ mbuf_tags.9 m_tag_delete_nonpersistent.9 \ mbuf_tags.9 m_tag_find.9 \ mbuf_tags.9 m_tag_first.9 \ mbuf_tags.9 m_tag_free.9 \ mbuf_tags.9 m_tag_get.9 \ mbuf_tags.9 m_tag_init.9 \ mbuf_tags.9 m_tag_locate.9 \ mbuf_tags.9 m_tag_next.9 \ mbuf_tags.9 m_tag_prepend.9 \ mbuf_tags.9 m_tag_unlink.9 MLINKS+=MD5.9 MD5Init.9 \ MD5.9 MD5Transform.9 MLINKS+=mdchain.9 md_append_record.9 \ mdchain.9 md_done.9 \ mdchain.9 md_get_int64.9 \ mdchain.9 md_get_int64be.9 \ mdchain.9 md_get_int64le.9 \ mdchain.9 md_get_mbuf.9 \ mdchain.9 md_get_mem.9 \ mdchain.9 md_get_uint16.9 \ mdchain.9 md_get_uint16be.9 \ mdchain.9 md_get_uint16le.9 \ mdchain.9 md_get_uint32.9 \ mdchain.9 md_get_uint32be.9 \ mdchain.9 md_get_uint32le.9 \ mdchain.9 md_get_uint8.9 \ mdchain.9 md_get_uio.9 \ mdchain.9 md_initm.9 \ mdchain.9 md_next_record.9 MLINKS+=microtime.9 bintime.9 \ microtime.9 getbintime.9 \ microtime.9 getmicrotime.9 \ microtime.9 getnanotime.9 \ microtime.9 nanotime.9 MLINKS+=microuptime.9 binuptime.9 \ microuptime.9 getbinuptime.9 \ microuptime.9 getmicrouptime.9 \ microuptime.9 getnanouptime.9 \ microuptime.9 getsbinuptime.9 \ microuptime.9 nanouptime.9 \ microuptime.9 sbinuptime.9 MLINKS+=mi_switch.9 cpu_switch.9 \ mi_switch.9 cpu_throw.9 MLINKS+=mod_cc.9 CCV.9 \ mod_cc.9 DECLARE_CC_MODULE.9 MLINKS+=mtx_pool.9 mtx_pool_alloc.9 \ mtx_pool.9 mtx_pool_create.9 \ mtx_pool.9 mtx_pool_destroy.9 \ mtx_pool.9 mtx_pool_find.9 \ mtx_pool.9 mtx_pool_lock.9 \ mtx_pool.9 mtx_pool_lock_spin.9 \ mtx_pool.9 mtx_pool_unlock.9 \ mtx_pool.9 mtx_pool_unlock_spin.9 MLINKS+=mutex.9 mtx_assert.9 \ mutex.9 mtx_destroy.9 \ mutex.9 mtx_init.9 \ mutex.9 mtx_initialized.9 \ mutex.9 mtx_lock.9 \ mutex.9 mtx_lock_flags.9 \ mutex.9 mtx_lock_spin.9 \ mutex.9 mtx_lock_spin_flags.9 \ mutex.9 mtx_owned.9 \ mutex.9 mtx_recursed.9 \ mutex.9 mtx_sleep.9 \ mutex.9 MTX_SYSINIT.9 \ mutex.9 mtx_trylock.9 \ mutex.9 mtx_trylock_flags.9 \ mutex.9 mtx_trylock_spin.9 \ mutex.9 mtx_trylock_spin_flags.9 \ mutex.9 mtx_unlock.9 \ mutex.9 mtx_unlock_flags.9 \ mutex.9 mtx_unlock_spin.9 \ mutex.9 mtx_unlock_spin_flags.9 MLINKS+=namei.9 NDFREE.9 \ namei.9 NDINIT.9 MLINKS+=netisr.9 netisr_clearqdrops.9 \ netisr.9 netisr_default_flow2cpu.9 \ netisr.9 netisr_dispatch.9 \ netisr.9 netisr_dispatch_src.9 \ netisr.9 netisr_get_cpucount.9 \ netisr.9 netisr_get_cpuid.9 \ netisr.9 netisr_getqdrops.9 \ netisr.9 netisr_getqlimit.9 \ netisr.9 netisr_queue.9 \ netisr.9 netisr_queue_src.9 \ netisr.9 netisr_register.9 \ netisr.9 netisr_setqlimit.9 \ netisr.9 netisr_unregister.9 MLINKS+=nv.9 libnv.9 \ nv.9 nvlist.9 \ nv.9 nvlist_add_binary.9 \ nv.9 nvlist_add_bool.9 \ nv.9 nvlist_add_bool_array.9 \ nv.9 nvlist_add_descriptor.9 \ nv.9 nvlist_add_descriptor_array.9 \ nv.9 nvlist_add_null.9 \ nv.9 nvlist_add_number.9 \ nv.9 nvlist_add_number_array.9 \ nv.9 nvlist_add_nvlist.9 \ nv.9 nvlist_add_nvlist_array.9 \ nv.9 nvlist_add_string.9 \ nv.9 nvlist_add_stringf.9 \ nv.9 nvlist_add_stringv.9 \ nv.9 nvlist_add_string_array.9 \ nv.9 nvlist_clone.9 \ nv.9 nvlist_create.9 \ nv.9 nvlist_destroy.9 \ nv.9 nvlist_dump.9 \ nv.9 nvlist_empty.9 \ nv.9 nvlist_error.9 \ nv.9 nvlist_exists.9 \ nv.9 nvlist_exists_binary.9 \ nv.9 nvlist_exists_bool.9 \ nv.9 nvlist_exists_bool_array.9 \ nv.9 nvlist_exists_descriptor.9 \ nv.9 nvlist_exists_descriptor_array.9 \ nv.9 nvlist_exists_null.9 \ nv.9 nvlist_exists_number.9 \ nv.9 nvlist_exists_number_array.9 \ nv.9 nvlist_exists_nvlist.9 \ nv.9 nvlist_exists_nvlist_array.9 \ nv.9 nvlist_exists_string.9 \ nv.9 nvlist_exists_type.9 \ nv.9 nvlist_fdump.9 \ nv.9 nvlist_flags.9 \ nv.9 nvlist_free.9 \ nv.9 nvlist_free_binary.9 \ nv.9 nvlist_free_bool.9 \ nv.9 nvlist_free_bool_array.9 \ nv.9 nvlist_free_descriptor.9 \ nv.9 nvlist_free_descriptor_array.9 \ nv.9 nvlist_free_null.9 \ nv.9 nvlist_free_number.9 \ nv.9 nvlist_free_number_array.9 \ nv.9 nvlist_free_nvlist.9 \ nv.9 nvlist_free_nvlist_array.9 \ nv.9 nvlist_free_string.9 \ nv.9 nvlist_free_string_array.9 \ nv.9 nvlist_free_type.9 \ nv.9 nvlist_get_binary.9 \ nv.9 nvlist_get_bool.9 \ nv.9 nvlist_get_bool_array.9 \ nv.9 nvlist_get_descriptor.9 \ nv.9 nvlist_get_descriptor_array.9 \ nv.9 nvlist_get_number.9 \ nv.9 nvlist_get_number_array.9 \ nv.9 nvlist_get_nvlist.9 \ nv.9 nvlist_get_nvlist_array.9 \ nv.9 nvlist_get_parent.9 \ nv.9 nvlist_get_string.9 \ nv.9 nvlist_get_string_array.9 \ nv.9 nvlist_move_binary.9 \ nv.9 nvlist_move_descriptor.9 \ nv.9 nvlist_move_descriptor_array.9 \ nv.9 nvlist_move_nvlist.9 \ nv.9 nvlist_move_nvlist_array.9 \ nv.9 nvlist_move_string.9 \ nv.9 nvlist_move_string_array.9 \ nv.9 nvlist_next.9 \ nv.9 nvlist_pack.9 \ nv.9 nvlist_recv.9 \ nv.9 nvlist_send.9 \ nv.9 nvlist_set_error.9 \ nv.9 nvlist_size.9 \ nv.9 nvlist_take_binary.9 \ nv.9 nvlist_take_bool.9 \ nv.9 nvlist_take_bool_array.9 \ nv.9 nvlist_take_descriptor.9 \ nv.9 nvlist_take_descriptor_array.9 \ nv.9 nvlist_take_number.9 \ nv.9 nvlist_take_number_array.9 \ nv.9 nvlist_take_nvlist.9 \ nv.9 nvlist_take_nvlist_array.9 \ nv.9 nvlist_take_string.9 \ nv.9 nvlist_take_string_array.9 \ nv.9 nvlist_unpack.9 \ nv.9 nvlist_xfer.9 MLINKS+=OF_child.9 OF_parent.9 \ OF_child.9 OF_peer.9 MLINKS+=OF_device_from_xref.9 OF_device_register_xref.9 \ OF_device_from_xref.9 OF_xref_from_device.9 MLINKS+=OF_getprop.9 OF_getencprop.9 \ OF_getprop.9 OF_getencprop_alloc.9 \ OF_getprop.9 OF_getencprop_alloc_multi.9 \ OF_getprop.9 OF_getprop_alloc.9 \ OF_getprop.9 OF_getprop_alloc_multi.9 \ OF_getprop.9 OF_getproplen.9 \ OF_getprop.9 OF_hasprop.9 \ OF_getprop.9 OF_nextprop.9 \ OF_getprop.9 OF_prop_free.9 \ OF_getprop.9 OF_searchencprop.9 \ OF_getprop.9 OF_searchprop.9 \ OF_getprop.9 OF_setprop.9 MLINKS+=OF_node_from_xref.9 OF_xref_from_node.9 MLINKS+=ofw_bus_is_compatible.9 ofw_bus_is_compatible_strict.9 \ ofw_bus_is_compatible.9 ofw_bus_node_is_compatible.9 \ ofw_bus_is_compatible.9 ofw_bus_search_compatible.9 MLINKS+= ofw_bus_status_okay.9 ofw_bus_get_status.9 \ ofw_bus_status_okay.9 ofw_bus_node_status_okay.9 MLINKS+=osd.9 osd_call.9 \ osd.9 osd_del.9 \ osd.9 osd_deregister.9 \ osd.9 osd_exit.9 \ osd.9 osd_get.9 \ osd.9 osd_register.9 \ osd.9 osd_set.9 MLINKS+=panic.9 vpanic.9 MLINKS+=pbuf.9 getpbuf.9 \ pbuf.9 relpbuf.9 \ pbuf.9 trypbuf.9 MLINKS+=PCBGROUP.9 in_pcbgroup_byhash.9 \ PCBGROUP.9 in_pcbgroup_byinpcb.9 \ PCBGROUP.9 in_pcbgroup_destroy.9 \ PCBGROUP.9 in_pcbgroup_enabled.9 \ PCBGROUP.9 in_pcbgroup_init.9 \ PCBGROUP.9 in_pcbgroup_remove.9 \ PCBGROUP.9 in_pcbgroup_update.9 \ PCBGROUP.9 in_pcbgroup_update_mbuf.9 \ PCBGROUP.9 in6_pcbgroup_byhash.9 MLINKS+=pci.9 pci_alloc_msi.9 \ pci.9 pci_alloc_msix.9 \ pci.9 pci_disable_busmaster.9 \ pci.9 pci_disable_io.9 \ pci.9 pci_enable_busmaster.9 \ pci.9 pci_enable_io.9 \ pci.9 pci_find_bsf.9 \ pci.9 pci_find_cap.9 \ pci.9 pci_find_dbsf.9 \ pci.9 pci_find_device.9 \ pci.9 pci_find_extcap.9 \ pci.9 pci_find_htcap.9 \ pci.9 pci_find_pcie_root_port.9 \ pci.9 pci_get_id.9 \ pci.9 pci_get_max_read_req.9 \ pci.9 pci_get_powerstate.9 \ pci.9 pci_get_vpd_ident.9 \ pci.9 pci_get_vpd_readonly.9 \ pci.9 pci_iov_attach.9 \ pci.9 pci_iov_attach_name.9 \ pci.9 pci_iov_detach.9 \ pci.9 pci_msi_count.9 \ pci.9 pci_msix_count.9 \ pci.9 pci_msix_pba_bar.9 \ pci.9 pci_msix_table_bar.9 \ pci.9 pci_pending_msix.9 \ pci.9 pci_read_config.9 \ pci.9 pci_release_msi.9 \ pci.9 pci_remap_msix.9 \ pci.9 pci_restore_state.9 \ pci.9 pci_save_state.9 \ pci.9 pci_set_powerstate.9 \ pci.9 pci_set_max_read_req.9 \ pci.9 pci_write_config.9 \ pci.9 pcie_adjust_config.9 \ pci.9 pcie_flr.9 \ pci.9 pcie_max_completion_timeout.9 \ pci.9 pcie_read_config.9 \ pci.9 pcie_wait_for_pending_transactions.9 \ pci.9 pcie_write_config.9 MLINKS+=pci_iov_schema.9 pci_iov_schema_alloc_node.9 \ pci_iov_schema.9 pci_iov_schema_add_bool.9 \ pci_iov_schema.9 pci_iov_schema_add_string.9 \ pci_iov_schema.9 pci_iov_schema_add_uint8.9 \ pci_iov_schema.9 pci_iov_schema_add_uint16.9 \ pci_iov_schema.9 pci_iov_schema_add_uint32.9 \ pci_iov_schema.9 pci_iov_schema_add_uint64.9 \ pci_iov_schema.9 pci_iov_schema_add_unicast_mac.9 MLINKS+=pfil.9 pfil_add_hook.9 \ pfil.9 pfil_head_register.9 \ pfil.9 pfil_head_unregister.9 \ pfil.9 pfil_hook_get.9 \ pfil.9 pfil_remove_hook.9 \ pfil.9 pfil_rlock.9 \ pfil.9 pfil_run_hooks.9 \ pfil.9 pfil_runlock.9 \ pfil.9 pfil_wlock.9 \ pfil.9 pfil_wunlock.9 MLINKS+=pfind.9 zpfind.9 MLINKS+=PHOLD.9 PRELE.9 \ PHOLD.9 _PHOLD.9 \ PHOLD.9 _PRELE.9 \ PHOLD.9 PROC_ASSERT_HELD.9 \ PHOLD.9 PROC_ASSERT_NOT_HELD.9 MLINKS+=pmap_copy.9 pmap_copy_page.9 MLINKS+=pmap_extract.9 pmap_extract_and_hold.9 MLINKS+=pmap_init.9 pmap_init2.9 MLINKS+=pmap_is_modified.9 pmap_ts_referenced.9 MLINKS+=pmap_pinit.9 pmap_pinit0.9 \ pmap_pinit.9 pmap_pinit2.9 MLINKS+=pmap_qenter.9 pmap_qremove.9 MLINKS+=pmap_quick_enter_page.9 pmap_quick_remove_page.9 MLINKS+=pmap_remove.9 pmap_remove_all.9 \ pmap_remove.9 pmap_remove_pages.9 MLINKS+=pmap_resident_count.9 pmap_wired_count.9 MLINKS+=pmap_zero_page.9 pmap_zero_area.9 MLINKS+=printf.9 log.9 \ printf.9 tprintf.9 \ printf.9 uprintf.9 MLINKS+=priv.9 priv_check.9 \ priv.9 priv_check_cred.9 MLINKS+=proc_rwmem.9 proc_readmem.9 \ proc_rwmem.9 proc_writemem.9 MLINKS+=psignal.9 gsignal.9 \ psignal.9 pgsignal.9 \ psignal.9 tdsignal.9 MLINKS+=random.9 arc4rand.9 \ random.9 arc4random.9 \ random.9 read_random.9 \ random.9 read_random_uio.9 \ random.9 srandom.9 MLINKS+=random_harvest.9 random_harvest_direct.9 \ random_harvest.9 random_harvest_fast.9 \ random_harvest.9 random_harvest_queue.9 MLINKS+=ratecheck.9 ppsratecheck.9 MLINKS+=refcount.9 refcount_acquire.9 \ refcount.9 refcount_init.9 \ refcount.9 refcount_release.9 MLINKS+=resource_int_value.9 resource_long_value.9 \ resource_int_value.9 resource_string_value.9 MLINKS+=rman.9 rman_activate_resource.9 \ rman.9 rman_adjust_resource.9 \ rman.9 rman_deactivate_resource.9 \ rman.9 rman_fini.9 \ rman.9 rman_first_free_region.9 \ rman.9 rman_get_bushandle.9 \ rman.9 rman_get_bustag.9 \ rman.9 rman_get_device.9 \ rman.9 rman_get_end.9 \ rman.9 rman_get_flags.9 \ rman.9 rman_get_mapping.9 \ rman.9 rman_get_rid.9 \ rman.9 rman_get_size.9 \ rman.9 rman_get_start.9 \ rman.9 rman_get_virtual.9 \ rman.9 rman_init.9 \ rman.9 rman_init_from_resource.9 \ rman.9 rman_is_region_manager.9 \ rman.9 rman_last_free_region.9 \ rman.9 rman_make_alignment_flags.9 \ rman.9 rman_manage_region.9 \ rman.9 rman_release_resource.9 \ rman.9 rman_reserve_resource.9 \ rman.9 rman_reserve_resource_bound.9 \ rman.9 rman_set_bushandle.9 \ rman.9 rman_set_bustag.9 \ rman.9 rman_set_mapping.9 \ rman.9 rman_set_rid.9 \ rman.9 rman_set_virtual.9 MLINKS+=rmlock.9 rm_assert.9 \ rmlock.9 rm_destroy.9 \ rmlock.9 rm_init.9 \ rmlock.9 rm_init_flags.9 \ rmlock.9 rm_rlock.9 \ rmlock.9 rm_runlock.9 \ rmlock.9 rm_sleep.9 \ rmlock.9 RM_SYSINIT.9 \ rmlock.9 RM_SYSINIT_FLAGS.9 \ rmlock.9 rm_try_rlock.9 \ rmlock.9 rm_wlock.9 \ rmlock.9 rm_wowned.9 \ rmlock.9 rm_wunlock.9 MLINKS+=rtalloc.9 rtalloc1.9 \ rtalloc.9 rtalloc_ign.9 \ rtalloc.9 RT_ADDREF.9 \ rtalloc.9 RT_LOCK.9 \ rtalloc.9 RT_REMREF.9 \ rtalloc.9 RT_RTFREE.9 \ rtalloc.9 RT_UNLOCK.9 \ rtalloc.9 RTFREE_LOCKED.9 \ rtalloc.9 RTFREE.9 \ rtalloc.9 rtfree.9 \ rtalloc.9 rtalloc1_fib.9 \ rtalloc.9 rtalloc_ign_fib.9 \ rtalloc.9 rtalloc_fib.9 MLINKS+=runqueue.9 choosethread.9 \ runqueue.9 procrunnable.9 \ runqueue.9 remrunqueue.9 \ runqueue.9 setrunqueue.9 MLINKS+=rwlock.9 rw_assert.9 \ rwlock.9 rw_destroy.9 \ rwlock.9 rw_downgrade.9 \ rwlock.9 rw_init.9 \ rwlock.9 rw_init_flags.9 \ rwlock.9 rw_initialized.9 \ rwlock.9 rw_rlock.9 \ rwlock.9 rw_runlock.9 \ rwlock.9 rw_unlock.9 \ rwlock.9 rw_sleep.9 \ rwlock.9 RW_SYSINIT.9 \ rwlock.9 RW_SYSINIT_FLAGS.9 \ rwlock.9 rw_try_rlock.9 \ rwlock.9 rw_try_upgrade.9 \ rwlock.9 rw_try_wlock.9 \ rwlock.9 rw_wlock.9 \ rwlock.9 rw_wowned.9 \ rwlock.9 rw_wunlock.9 MLINKS+=sbuf.9 sbuf_bcat.9 \ sbuf.9 sbuf_bcopyin.9 \ sbuf.9 sbuf_bcpy.9 \ sbuf.9 sbuf_cat.9 \ sbuf.9 sbuf_clear.9 \ sbuf.9 sbuf_clear_flags.9 \ sbuf.9 sbuf_copyin.9 \ sbuf.9 sbuf_cpy.9 \ sbuf.9 sbuf_data.9 \ sbuf.9 sbuf_delete.9 \ sbuf.9 sbuf_done.9 \ sbuf.9 sbuf_error.9 \ sbuf.9 sbuf_finish.9 \ sbuf.9 sbuf_get_flags.9 \ sbuf.9 sbuf_hexdump.9 \ sbuf.9 sbuf_len.9 \ sbuf.9 sbuf_new.9 \ sbuf.9 sbuf_new_auto.9 \ sbuf.9 sbuf_new_for_sysctl.9 \ sbuf.9 sbuf_printf.9 \ sbuf.9 sbuf_putc.9 \ sbuf.9 sbuf_set_drain.9 \ sbuf.9 sbuf_set_flags.9 \ sbuf.9 sbuf_setpos.9 \ sbuf.9 sbuf_start_section.9 \ sbuf.9 sbuf_end_section.9 \ sbuf.9 sbuf_trim.9 \ sbuf.9 sbuf_vprintf.9 MLINKS+=scheduler.9 curpriority_cmp.9 \ scheduler.9 maybe_resched.9 \ scheduler.9 propagate_priority.9 \ scheduler.9 resetpriority.9 \ scheduler.9 roundrobin.9 \ scheduler.9 roundrobin_interval.9 \ scheduler.9 schedclock.9 \ scheduler.9 schedcpu.9 \ scheduler.9 sched_setup.9 \ scheduler.9 setrunnable.9 \ scheduler.9 updatepri.9 MLINKS+=SDT.9 SDT_PROVIDER_DECLARE.9 \ SDT.9 SDT_PROVIDER_DEFINE.9 \ SDT.9 SDT_PROBE_DECLARE.9 \ SDT.9 SDT_PROBE_DEFINE.9 \ SDT.9 SDT_PROBE.9 MLINKS+=securelevel_gt.9 securelevel_ge.9 MLINKS+=selrecord.9 seldrain.9 \ selrecord.9 selwakeup.9 MLINKS+=sema.9 sema_destroy.9 \ sema.9 sema_init.9 \ sema.9 sema_post.9 \ sema.9 sema_timedwait.9 \ sema.9 sema_trywait.9 \ sema.9 sema_value.9 \ sema.9 sema_wait.9 MLINKS+=sf_buf.9 sf_buf_alloc.9 \ sf_buf.9 sf_buf_free.9 \ sf_buf.9 sf_buf_kva.9 \ sf_buf.9 sf_buf_page.9 MLINKS+=sglist.9 sglist_alloc.9 \ sglist.9 sglist_append.9 \ sglist.9 sglist_append_bio.9 \ sglist.9 sglist_append_mbuf.9 \ sglist.9 sglist_append_phys.9 \ sglist.9 sglist_append_sglist.9 \ sglist.9 sglist_append_uio.9 \ sglist.9 sglist_append_user.9 \ sglist.9 sglist_append_vmpages.9 \ sglist.9 sglist_build.9 \ sglist.9 sglist_clone.9 \ sglist.9 sglist_consume_uio.9 \ sglist.9 sglist_count.9 \ sglist.9 sglist_count_vmpages.9 \ sglist.9 sglist_free.9 \ sglist.9 sglist_hold.9 \ sglist.9 sglist_init.9 \ sglist.9 sglist_join.9 \ sglist.9 sglist_length.9 \ sglist.9 sglist_reset.9 \ sglist.9 sglist_slice.9 \ sglist.9 sglist_split.9 MLINKS+=shm_map.9 shm_unmap.9 MLINKS+=signal.9 cursig.9 \ signal.9 execsigs.9 \ signal.9 issignal.9 \ signal.9 killproc.9 \ signal.9 pgsigio.9 \ signal.9 postsig.9 \ signal.9 SETSETNEQ.9 \ signal.9 SETSETOR.9 \ signal.9 SIGADDSET.9 \ signal.9 SIG_CONTSIGMASK.9 \ signal.9 SIGDELSET.9 \ signal.9 SIGEMPTYSET.9 \ signal.9 sigexit.9 \ signal.9 SIGFILLSET.9 \ signal.9 siginit.9 \ signal.9 SIGISEMPTY.9 \ signal.9 SIGISMEMBER.9 \ signal.9 SIGNOTEMPTY.9 \ signal.9 signotify.9 \ signal.9 SIGPENDING.9 \ signal.9 SIGSETAND.9 \ signal.9 SIGSETCANTMASK.9 \ signal.9 SIGSETEQ.9 \ signal.9 SIGSETNAND.9 \ signal.9 SIG_STOPSIGMASK.9 \ signal.9 trapsignal.9 MLINKS+=sleep.9 msleep.9 \ sleep.9 msleep_sbt.9 \ sleep.9 msleep_spin.9 \ sleep.9 msleep_spin_sbt.9 \ sleep.9 pause.9 \ sleep.9 pause_sig.9 \ sleep.9 pause_sbt.9 \ sleep.9 tsleep.9 \ sleep.9 tsleep_sbt.9 \ sleep.9 wakeup.9 \ sleep.9 wakeup_one.9 MLINKS+=sleepqueue.9 init_sleepqueues.9 \ sleepqueue.9 sleepq_abort.9 \ sleepqueue.9 sleepq_add.9 \ sleepqueue.9 sleepq_alloc.9 \ sleepqueue.9 sleepq_broadcast.9 \ sleepqueue.9 sleepq_free.9 \ sleepqueue.9 sleepq_lookup.9 \ sleepqueue.9 sleepq_lock.9 \ sleepqueue.9 sleepq_release.9 \ sleepqueue.9 sleepq_remove.9 \ sleepqueue.9 sleepq_set_timeout.9 \ sleepqueue.9 sleepq_set_timeout_sbt.9 \ sleepqueue.9 sleepq_signal.9 \ sleepqueue.9 sleepq_sleepcnt.9 \ sleepqueue.9 sleepq_timedwait.9 \ sleepqueue.9 sleepq_timedwait_sig.9 \ sleepqueue.9 sleepq_type.9 \ sleepqueue.9 sleepq_wait.9 \ sleepqueue.9 sleepq_wait_sig.9 MLINKS+=socket.9 soabort.9 \ socket.9 soaccept.9 \ socket.9 sobind.9 \ socket.9 socheckuid.9 \ socket.9 soclose.9 \ socket.9 soconnect.9 \ socket.9 socreate.9 \ socket.9 sodisconnect.9 \ socket.9 sodtor_set.9 \ socket.9 sodupsockaddr.9 \ socket.9 sofree.9 \ socket.9 sogetopt.9 \ socket.9 sohasoutofband.9 \ socket.9 solisten.9 \ socket.9 solisten_proto.9 \ socket.9 solisten_proto_check.9 \ socket.9 sonewconn.9 \ socket.9 sooptcopyin.9 \ socket.9 sooptcopyout.9 \ socket.9 sopoll.9 \ socket.9 sopoll_generic.9 \ socket.9 soreceive.9 \ socket.9 soreceive_dgram.9 \ socket.9 soreceive_generic.9 \ socket.9 soreceive_stream.9 \ socket.9 soreserve.9 \ socket.9 sorflush.9 \ socket.9 sosend.9 \ socket.9 sosend_dgram.9 \ socket.9 sosend_generic.9 \ socket.9 sosetopt.9 \ socket.9 soshutdown.9 \ socket.9 sotoxsocket.9 \ socket.9 soupcall_clear.9 \ socket.9 soupcall_set.9 \ socket.9 sowakeup.9 MLINKS+=stack.9 stack_copy.9 \ stack.9 stack_create.9 \ stack.9 stack_destroy.9 \ stack.9 stack_print.9 \ stack.9 stack_print_ddb.9 \ stack.9 stack_print_short.9 \ stack.9 stack_print_short_ddb.9 \ stack.9 stack_put.9 \ stack.9 stack_save.9 \ stack.9 stack_sbuf_print.9 \ stack.9 stack_sbuf_print_ddb.9 \ stack.9 stack_zero.9 MLINKS+=store.9 subyte.9 \ store.9 suword.9 \ store.9 suword16.9 \ store.9 suword32.9 \ store.9 suword64.9 MLINKS+=swi.9 swi_add.9 \ swi.9 swi_remove.9 \ swi.9 swi_sched.9 MLINKS+=sx.9 sx_assert.9 \ sx.9 sx_destroy.9 \ sx.9 sx_downgrade.9 \ sx.9 sx_init.9 \ sx.9 sx_init_flags.9 \ sx.9 sx_sleep.9 \ sx.9 sx_slock.9 \ sx.9 sx_slock_sig.9 \ sx.9 sx_sunlock.9 \ sx.9 SX_SYSINIT.9 \ sx.9 SX_SYSINIT_FLAGS.9 \ sx.9 sx_try_slock.9 \ sx.9 sx_try_upgrade.9 \ sx.9 sx_try_xlock.9 \ sx.9 sx_unlock.9 \ sx.9 sx_xholder.9 \ sx.9 sx_xlock.9 \ sx.9 sx_xlock_sig.9 \ sx.9 sx_xlocked.9 \ sx.9 sx_xunlock.9 MLINKS+=syscall_helper_register.9 syscall_helper_unregister.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER_COMPAT.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER_COMPAT_F.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER_F.9 MLINKS+=sysctl.9 SYSCTL_DECL.9 \ sysctl.9 SYSCTL_ADD_INT.9 \ sysctl.9 SYSCTL_ADD_LONG.9 \ sysctl.9 SYSCTL_ADD_NODE.9 \ sysctl.9 SYSCTL_ADD_NODE_WITH_LABEL.9 \ sysctl.9 SYSCTL_ADD_OPAQUE.9 \ sysctl.9 SYSCTL_ADD_PROC.9 \ sysctl.9 SYSCTL_ADD_QUAD.9 \ sysctl.9 SYSCTL_ADD_ROOT_NODE.9 \ sysctl.9 SYSCTL_ADD_S8.9 \ sysctl.9 SYSCTL_ADD_S16.9 \ sysctl.9 SYSCTL_ADD_S32.9 \ sysctl.9 SYSCTL_ADD_S64.9 \ sysctl.9 SYSCTL_ADD_STRING.9 \ sysctl.9 SYSCTL_ADD_STRUCT.9 \ sysctl.9 SYSCTL_ADD_U8.9 \ sysctl.9 SYSCTL_ADD_U16.9 \ sysctl.9 SYSCTL_ADD_U32.9 \ sysctl.9 SYSCTL_ADD_U64.9 \ sysctl.9 SYSCTL_ADD_UAUTO.9 \ sysctl.9 SYSCTL_ADD_UINT.9 \ sysctl.9 SYSCTL_ADD_ULONG.9 \ sysctl.9 SYSCTL_ADD_UQUAD.9 \ sysctl.9 SYSCTL_CHILDREN.9 \ sysctl.9 SYSCTL_STATIC_CHILDREN.9 \ sysctl.9 SYSCTL_NODE_CHILDREN.9 \ sysctl.9 SYSCTL_PARENT.9 \ sysctl.9 SYSCTL_INT.9 \ sysctl.9 SYSCTL_INT_WITH_LABEL.9 \ sysctl.9 SYSCTL_LONG.9 \ sysctl.9 SYSCTL_NODE.9 \ sysctl.9 SYSCTL_NODE_WITH_LABEL.9 \ sysctl.9 SYSCTL_OPAQUE.9 \ sysctl.9 SYSCTL_PROC.9 \ sysctl.9 SYSCTL_QUAD.9 \ sysctl.9 SYSCTL_ROOT_NODE.9 \ sysctl.9 SYSCTL_S8.9 \ sysctl.9 SYSCTL_S16.9 \ sysctl.9 SYSCTL_S32.9 \ sysctl.9 SYSCTL_S64.9 \ sysctl.9 SYSCTL_STRING.9 \ sysctl.9 SYSCTL_STRUCT.9 \ sysctl.9 SYSCTL_U8.9 \ sysctl.9 SYSCTL_U16.9 \ sysctl.9 SYSCTL_U32.9 \ sysctl.9 SYSCTL_U64.9 \ sysctl.9 SYSCTL_UINT.9 \ sysctl.9 SYSCTL_ULONG.9 \ sysctl.9 SYSCTL_UQUAD.9 MLINKS+=sysctl_add_oid.9 sysctl_move_oid.9 \ sysctl_add_oid.9 sysctl_remove_oid.9 \ sysctl_add_oid.9 sysctl_remove_name.9 MLINKS+=sysctl_ctx_init.9 sysctl_ctx_entry_add.9 \ sysctl_ctx_init.9 sysctl_ctx_entry_del.9 \ sysctl_ctx_init.9 sysctl_ctx_entry_find.9 \ sysctl_ctx_init.9 sysctl_ctx_free.9 MLINKS+=SYSINIT.9 SYSUNINIT.9 MLINKS+=taskqueue.9 TASK_INIT.9 \ taskqueue.9 TASK_INITIALIZER.9 \ taskqueue.9 taskqueue_block.9 \ taskqueue.9 taskqueue_cancel.9 \ taskqueue.9 taskqueue_cancel_timeout.9 \ taskqueue.9 taskqueue_create.9 \ taskqueue.9 taskqueue_create_fast.9 \ taskqueue.9 TASKQUEUE_DECLARE.9 \ taskqueue.9 TASKQUEUE_DEFINE.9 \ taskqueue.9 TASKQUEUE_DEFINE_THREAD.9 \ taskqueue.9 taskqueue_drain.9 \ taskqueue.9 taskqueue_drain_all.9 \ taskqueue.9 taskqueue_drain_timeout.9 \ taskqueue.9 taskqueue_enqueue.9 \ taskqueue.9 taskqueue_enqueue_timeout.9 \ taskqueue.9 TASKQUEUE_FAST_DEFINE.9 \ taskqueue.9 TASKQUEUE_FAST_DEFINE_THREAD.9 \ taskqueue.9 taskqueue_free.9 \ taskqueue.9 taskqueue_member.9 \ taskqueue.9 taskqueue_run.9 \ taskqueue.9 taskqueue_set_callback.9 \ taskqueue.9 taskqueue_start_threads.9 \ taskqueue.9 taskqueue_start_threads_pinned.9 \ taskqueue.9 taskqueue_unblock.9 \ taskqueue.9 TIMEOUT_TASK_INIT.9 MLINKS+=tcp_functions.9 register_tcp_functions.9 \ tcp_functions.9 register_tcp_functions_as_name.9 \ tcp_functions.9 register_tcp_functions_as_names.9 \ tcp_functions.9 deregister_tcp_functions.9 MLINKS+=time.9 boottime.9 \ time.9 time_second.9 \ time.9 time_uptime.9 MLINKS+=timeout.9 callout.9 \ timeout.9 callout_active.9 \ timeout.9 callout_async_drain.9 \ timeout.9 callout_deactivate.9 \ timeout.9 callout_drain.9 \ timeout.9 callout_handle_init.9 \ timeout.9 callout_init.9 \ timeout.9 callout_init_mtx.9 \ timeout.9 callout_init_rm.9 \ timeout.9 callout_init_rw.9 \ timeout.9 callout_pending.9 \ timeout.9 callout_reset.9 \ timeout.9 callout_reset_curcpu.9 \ timeout.9 callout_reset_on.9 \ timeout.9 callout_reset_sbt.9 \ timeout.9 callout_reset_sbt_curcpu.9 \ timeout.9 callout_reset_sbt_on.9 \ timeout.9 callout_schedule.9 \ timeout.9 callout_schedule_curcpu.9 \ timeout.9 callout_schedule_on.9 \ timeout.9 callout_schedule_sbt.9 \ timeout.9 callout_schedule_sbt_curcpu.9 \ timeout.9 callout_schedule_sbt_on.9 \ timeout.9 callout_stop.9 \ timeout.9 callout_when.9 \ timeout.9 untimeout.9 MLINKS+=ucred.9 cred_update_thread.9 \ ucred.9 crcopy.9 \ ucred.9 crcopysafe.9 \ ucred.9 crdup.9 \ ucred.9 crfree.9 \ ucred.9 crget.9 \ ucred.9 crhold.9 \ ucred.9 crsetgroups.9 \ ucred.9 cru2x.9 MLINKS+=uidinfo.9 uifind.9 \ uidinfo.9 uifree.9 \ uidinfo.9 uihashinit.9 \ uidinfo.9 uihold.9 MLINKS+=uio.9 uiomove.9 \ uio.9 uiomove_frombuf.9 \ uio.9 uiomove_nofault.9 .if ${MK_USB} != "no" MAN+= usbdi.9 MLINKS+=usbdi.9 usbd_do_request.9 \ usbdi.9 usbd_do_request_flags.9 \ usbdi.9 usbd_errstr.9 \ usbdi.9 usbd_lookup_id_by_info.9 \ usbdi.9 usbd_lookup_id_by_uaa.9 \ usbdi.9 usbd_transfer_clear_stall.9 \ usbdi.9 usbd_transfer_drain.9 \ usbdi.9 usbd_transfer_pending.9 \ usbdi.9 usbd_transfer_poll.9 \ usbdi.9 usbd_transfer_setup.9 \ usbdi.9 usbd_transfer_start.9 \ usbdi.9 usbd_transfer_stop.9 \ usbdi.9 usbd_transfer_submit.9 \ usbdi.9 usbd_transfer_unsetup.9 \ usbdi.9 usbd_xfer_clr_flag.9 \ usbdi.9 usbd_xfer_frame_data.9 \ usbdi.9 usbd_xfer_frame_len.9 \ usbdi.9 usbd_xfer_get_frame.9 \ usbdi.9 usbd_xfer_get_priv.9 \ usbdi.9 usbd_xfer_is_stalled.9 \ usbdi.9 usbd_xfer_max_framelen.9 \ usbdi.9 usbd_xfer_max_frames.9 \ usbdi.9 usbd_xfer_max_len.9 \ usbdi.9 usbd_xfer_set_flag.9 \ usbdi.9 usbd_xfer_set_frame_data.9 \ usbdi.9 usbd_xfer_set_frame_len.9 \ usbdi.9 usbd_xfer_set_frame_offset.9 \ usbdi.9 usbd_xfer_set_frames.9 \ usbdi.9 usbd_xfer_set_interval.9 \ usbdi.9 usbd_xfer_set_priv.9 \ usbdi.9 usbd_xfer_set_stall.9 \ usbdi.9 usbd_xfer_set_timeout.9 \ usbdi.9 usbd_xfer_softc.9 \ usbdi.9 usbd_xfer_state.9 \ usbdi.9 usbd_xfer_status.9 \ usbdi.9 usb_fifo_alloc_buffer.9 \ usbdi.9 usb_fifo_attach.9 \ usbdi.9 usb_fifo_detach.9 \ usbdi.9 usb_fifo_free_buffer.9 \ usbdi.9 usb_fifo_get_data.9 \ usbdi.9 usb_fifo_get_data_buffer.9 \ usbdi.9 usb_fifo_get_data_error.9 \ usbdi.9 usb_fifo_get_data_linear.9 \ usbdi.9 usb_fifo_put_bytes_max.9 \ usbdi.9 usb_fifo_put_data.9 \ usbdi.9 usb_fifo_put_data_buffer.9 \ usbdi.9 usb_fifo_put_data_error.9 \ usbdi.9 usb_fifo_put_data_linear.9 \ usbdi.9 usb_fifo_reset.9 \ usbdi.9 usb_fifo_softc.9 \ usbdi.9 usb_fifo_wakeup.9 .endif MLINKS+=vcount.9 count_dev.9 MLINKS+=vfsconf.9 vfs_modevent.9 \ vfsconf.9 vfs_register.9 \ vfsconf.9 vfs_unregister.9 MLINKS+=vfs_getopt.9 vfs_copyopt.9 \ vfs_getopt.9 vfs_filteropt.9 \ vfs_getopt.9 vfs_flagopt.9 \ vfs_getopt.9 vfs_getopts.9 \ vfs_getopt.9 vfs_scanopt.9 \ vfs_getopt.9 vfs_setopt.9 \ vfs_getopt.9 vfs_setopt_part.9 \ vfs_getopt.9 vfs_setopts.9 MLINKS+=vhold.9 vdrop.9 \ vhold.9 vdropl.9 \ vhold.9 vholdl.9 MLINKS+=vmem.9 vmem_add.9 \ vmem.9 vmem_alloc.9 \ vmem.9 vmem_create.9 \ vmem.9 vmem_destroy.9 \ vmem.9 vmem_free.9 \ vmem.9 vmem_xalloc.9 \ vmem.9 vmem_xfree.9 MLINKS+=vm_map_lock.9 vm_map_lock_downgrade.9 \ vm_map_lock.9 vm_map_lock_read.9 \ vm_map_lock.9 vm_map_lock_upgrade.9 \ vm_map_lock.9 vm_map_trylock.9 \ vm_map_lock.9 vm_map_trylock_read.9 \ vm_map_lock.9 vm_map_unlock.9 \ vm_map_lock.9 vm_map_unlock_read.9 MLINKS+=vm_map_lookup.9 vm_map_lookup_done.9 MLINKS+=vm_map_max.9 vm_map_min.9 \ vm_map_max.9 vm_map_pmap.9 MLINKS+=vm_map_stack.9 vm_map_growstack.9 MLINKS+=vm_map_wire.9 vm_map_unwire.9 MLINKS+=vm_page_bits.9 vm_page_clear_dirty.9 \ vm_page_bits.9 vm_page_dirty.9 \ vm_page_bits.9 vm_page_is_valid.9 \ vm_page_bits.9 vm_page_set_invalid.9 \ vm_page_bits.9 vm_page_set_validclean.9 \ vm_page_bits.9 vm_page_test_dirty.9 \ vm_page_bits.9 vm_page_undirty.9 \ vm_page_bits.9 vm_page_zero_invalid.9 MLINKS+=vm_page_busy.9 vm_page_busied.9 \ vm_page_busy.9 vm_page_busy_downgrade.9 \ vm_page_busy.9 vm_page_busy_sleep.9 \ vm_page_busy.9 vm_page_sbusied.9 \ vm_page_busy.9 vm_page_sbusy.9 \ vm_page_busy.9 vm_page_sleep_if_busy.9 \ vm_page_busy.9 vm_page_sunbusy.9 \ vm_page_busy.9 vm_page_trysbusy.9 \ vm_page_busy.9 vm_page_tryxbusy.9 \ vm_page_busy.9 vm_page_xbusied.9 \ vm_page_busy.9 vm_page_xbusy.9 \ vm_page_busy.9 vm_page_xunbusy.9 \ vm_page_busy.9 vm_page_assert_sbusied.9 \ vm_page_busy.9 vm_page_assert_unbusied.9 \ vm_page_busy.9 vm_page_assert_xbusied.9 MLINKS+=vm_page_aflag.9 vm_page_aflag_clear.9 \ vm_page_aflag.9 vm_page_aflag_set.9 \ vm_page_aflag.9 vm_page_reference.9 MLINKS+=vm_page_free.9 vm_page_free_toq.9 \ vm_page_free.9 vm_page_free_zero.9 \ vm_page_free.9 vm_page_try_to_free.9 MLINKS+=vm_page_hold.9 vm_page_unhold.9 MLINKS+=vm_page_insert.9 vm_page_remove.9 MLINKS+=vm_page_wire.9 vm_page_unwire.9 MLINKS+=VOP_ACCESS.9 VOP_ACCESSX.9 MLINKS+=VOP_ATTRIB.9 VOP_GETATTR.9 \ VOP_ATTRIB.9 VOP_SETATTR.9 MLINKS+=VOP_CREATE.9 VOP_MKDIR.9 \ VOP_CREATE.9 VOP_MKNOD.9 \ VOP_CREATE.9 VOP_SYMLINK.9 MLINKS+=VOP_GETPAGES.9 VOP_PUTPAGES.9 MLINKS+=VOP_INACTIVE.9 VOP_RECLAIM.9 MLINKS+=VOP_LOCK.9 vn_lock.9 \ VOP_LOCK.9 VOP_ISLOCKED.9 \ VOP_LOCK.9 VOP_UNLOCK.9 MLINKS+=VOP_OPENCLOSE.9 VOP_CLOSE.9 \ VOP_OPENCLOSE.9 VOP_OPEN.9 MLINKS+=VOP_RDWR.9 VOP_READ.9 \ VOP_RDWR.9 VOP_WRITE.9 MLINKS+=VOP_REMOVE.9 VOP_RMDIR.9 MLINKS+=vnet.9 vimage.9 MLINKS+=vref.9 VREF.9 \ vref.9 vrefl.9 MLINKS+=vrele.9 vput.9 \ vrele.9 vunref.9 MLINKS+=vslock.9 vsunlock.9 MLINKS+=zone.9 uma.9 \ zone.9 uma_zalloc.9 \ zone.9 uma_zalloc_arg.9 \ zone.9 uma_zalloc_domain.9 \ zone.9 uma_zcreate.9 \ zone.9 uma_zdestroy.9 \ zone.9 uma_zfree.9 \ zone.9 uma_zfree_arg.9 \ zone.9 uma_zfree_domain.9 \ zone.9 uma_zone_get_cur.9 \ zone.9 uma_zone_get_max.9 \ zone.9 uma_zone_set_max.9 \ zone.9 uma_zone_set_warning.9 \ zone.9 uma_zone_set_maxaction.9 .include Index: head/share/man/man9/contigmalloc.9 =================================================================== --- head/share/man/man9/contigmalloc.9 (revision 339926) +++ head/share/man/man9/contigmalloc.9 (revision 339927) @@ -1,139 +1,163 @@ .\" .\" Copyright (c) 2004 Joseph Koshy .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' .\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED .\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR .\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE .\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd January 29, 2015 +.Dd October 30, 2018 .Dt CONTIGMALLOC 9 .Os .Sh NAME .Nm contigmalloc , contigfree .Nd manage contiguous kernel physical memory .Sh SYNOPSIS .In sys/types.h .In sys/malloc.h .Ft "void *" .Fo contigmalloc .Fa "unsigned long size" .Fa "struct malloc_type *type" .Fa "int flags" .Fa "vm_paddr_t low" .Fa "vm_paddr_t high" .Fa "unsigned long alignment" .Fa "vm_paddr_t boundary" .Fc .Ft void .Fo contigfree .Fa "void *addr" .Fa "unsigned long size" .Fa "struct malloc_type *type" .Fc +.In sys/param.h +.In sys/domainset.h +.Ft "void *" +.Fo contigmalloc_domainset +.Fa "unsigned long size" +.Fa "struct malloc_type *type" +.Fa "struct domainset *ds" +.Fa "int flags" +.Fa "vm_paddr_t low" +.Fa "vm_paddr_t high" +.Fa "unsigned long alignment" +.Fa "vm_paddr_t boundary" +.Fc .Sh DESCRIPTION The .Fn contigmalloc function allocates .Fa size bytes of contiguous physical memory that is aligned to .Fa alignment bytes, and which does not cross a boundary of .Fa boundary bytes. If successful, the allocation will reside between physical addresses .Fa low and .Fa high . The returned pointer points to a wired kernel virtual address range of .Fa size bytes allocated from the kernel virtual address (KVA) map. .Pp The +.Fn contigmalloc_domainset +variant allows the caller to additionally specify a +.Xr numa 4 +domain selection policy. +See +.Xr domainset 9 +for some example policies. +.Pp +The .Fa flags parameter modifies .Fn contigmalloc Ns 's behaviour as follows: .Bl -tag -width indent .It Dv M_ZERO Causes the allocated physical memory to be zero filled. .It Dv M_NOWAIT Causes .Fn contigmalloc to return .Dv NULL if the request cannot be immediately fulfilled due to resource shortage. .El .Pp Other flags (if present) are ignored. .Pp The .Fn contigfree function deallocates memory allocated by a previous call to -.Fn contigmalloc . +.Fn contigmalloc +or +.Fn contigmalloc_domainset . .Sh IMPLEMENTATION NOTES The .Fn contigmalloc function does not sleep waiting for memory resources to be freed up, but instead actively reclaims pages before giving up. However, unless .Dv M_NOWAIT is specified, it may select a page for reclamation that must first be written to backing storage, causing it to sleep. .Pp The .Fn contigfree function does not accept .Dv NULL as an address input, unlike .Xr free 9 . .Sh RETURN VALUES The .Fn contigmalloc function returns a kernel virtual address if allocation succeeds, or .Dv NULL otherwise. .Sh EXAMPLES .Bd -literal void *p; p = contigmalloc(8192, M_DEVBUF, M_ZERO, 0, (1L << 22), 32 * 1024, 1024 * 1024); .Ed .Pp Ask for 8192 bytes of zero-filled memory residing between physical address 0 and 4194303 inclusive, aligned to a 32K boundary and not crossing a 1M address boundary. .Sh DIAGNOSTICS The .Fn contigmalloc function will panic if .Fa size is zero, or if .Fa alignment or .Fa boundary is not a power of two. .Sh SEE ALSO .Xr malloc 9 , .Xr memguard 9 Index: head/share/man/man9/domainset.9 =================================================================== --- head/share/man/man9/domainset.9 (revision 339926) +++ head/share/man/man9/domainset.9 (revision 339927) @@ -1,133 +1,152 @@ .\" Copyright (c) 2018 Jeffrey Roberson .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' .\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED .\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR .\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE .\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd October 20, 2018 +.Dd October 30, 2018 .Dt DOMAINSET 9 .Os .Sh NAME .Nm domainset(9) .Nd domainset functions and operation .Sh SYNOPSIS .In sys/_domainset.h .In sys/domainset.h .\" .Bd -literal -offset indent struct domainset { domainset_t ds_mask; uint16_t ds_policy; domainid_t ds_prefer; ... }; .Ed .Pp +.Ft struct domainset * +.Fn DOMAINSET_FIXED domain +.Ft struct domainset * .Fn DOMAINSET_RR +.Ft struct domainset * .Fn DOMAINSET_PREF domain .Ft struct domainset * .Fn domainset_create "const struct domainset *key" .Ft int .Fn sysctl_handle_domainset "SYSCTL_HANDLER_ARGS" .Sh DESCRIPTION The .Nm API provides memory domain allocation policy for NUMA machines. Each .Vt domainset contains a bitmask of allowed domains, an integer policy, and an optional preferred domain. Together, these specify a search order for memory allocations as well as the ability to restrict threads and objects to a subset of available memory domains for system partitioning and resource management. .Pp Every thread in the system and optionally every .Vt vm_object_t , which is used to represent files and other memory sources, has a reference to a .Vt struct domainset . The domainset associated with the object is consulted first and the system falls back to the thread policy if none exists. .Pp The allocation policy has the following possible values: .Bl -tag -width "foo" .It Dv DOMAINSET_POLICY_ROUNDROBIN Memory is allocated from each domain in the mask in a round-robin fashion. This distributes bandwidth evenly among available domains. This policy can specify a single domain for a fixed allocation. .It Dv DOMAINSET_POLICY_FIRSTTOUCH Memory is allocated from the node that it is first accessed on. Allocation falls back to round-robin if the current domain is not in the allowed set or is out of memory. This policy optimizes for locality but may give pessimal results if the memory is accessed from many CPUs that are not in the local domain. .It Dv DOMAINSET_POLICY_PREFER Memory is allocated from the node in the .Vt prefer member. The preferred node must be set in the allowed mask. If the preferred node is out of memory the allocation falls back to round-robin among allowed sets. .It Dv DOMAINSET_POLICY_INTERLEAVE Memory is allocated in a striped fashion with multiple pages allocated to each domain in the set according to the offset within the object. The strip width is object dependent and may be as large as a super-page (2MB on amd64). This gives good distribution among memory domains while keeping system efficiency higher and is preferential to round-robin for general use. .El .Pp The +.Fn DOMAINSET_FIXED , .Fn DOMAINSET_RR and .Fn DOMAINSET_PREF -provide pointers to global pre-defined policies for use when the +macros provide pointers to global pre-defined policies for use when the desired policy is known at compile time. +.Fn DOMAINSET_FIXED +is a policy which only permits allocations from the specified domain. +.Fn DOMAINSET_RR +provides round-robin selection among all domains in the system. +The +.Fn DOMAINSET_PREF +policies attempt allocation from the specified domain, but unlike +.Fn DOMAINSET_FIXED +will fall back to other domains to satisfy the request. +These policies should be used in preference to +.Fn DOMAINSET_FIXED +to avoid blocking indefinitely on a +.Dv M_WAITOK +request. The .Fn domainset_create function takes a partially filled in domainset as a key and returns a valid domainset or NULL. It is critical that consumers not use domainsets that have not been returned by this function. .Vt domainset is an immutable type that is shared among all matching keys and must not be modified after return. .Pp The .Fn sysctl_handle_domainset function is provided as a convenience for modifying or viewing domainsets that are not accessible via .Xr cpuset 2 . It is intended for use with .Xr sysctl 9 . .Pp .Sh SEE ALSO .Xr cpuset 1 , .Xr cpuset 2 , .Xr cpuset_setdomain 2 , .Xr bitset 9 .Sh HISTORY .In sys/domainset.h first appeared in .Fx 12.0 . Index: head/share/man/man9/malloc.9 =================================================================== --- head/share/man/man9/malloc.9 (revision 339926) +++ head/share/man/man9/malloc.9 (revision 339927) @@ -1,316 +1,323 @@ .\" .\" Copyright (c) 1996 The NetBSD Foundation, Inc. .\" All rights reserved. .\" .\" This code is derived from software contributed to The NetBSD Foundation .\" by Paul Kranenburg. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS .\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED .\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR .\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE .\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" $NetBSD: malloc.9,v 1.3 1996/11/11 00:05:11 lukem Exp $ .\" $FreeBSD$ .\" -.Dd June 13, 2018 +.Dd October 30, 2018 .Dt MALLOC 9 .Os .Sh NAME .Nm malloc , .Nm free , .Nm realloc , .Nm reallocf , .Nm MALLOC_DEFINE , .Nm MALLOC_DECLARE .Nd kernel memory management routines .Sh SYNOPSIS .In sys/types.h .In sys/malloc.h .Ft void * .Fn malloc "size_t size" "struct malloc_type *type" "int flags" .Ft void * -.Fn malloc_domain "size_t size" "struct malloc_type *type" "int domain" "int flags" -.Ft void * .Fn mallocarray "size_t nmemb" "size_t size" "struct malloc_type *type" "int flags" .Ft void .Fn free "void *addr" "struct malloc_type *type" -.Ft void -.Fn free_domain "void *addr" "struct malloc_type *type" .Ft void * .Fn realloc "void *addr" "size_t size" "struct malloc_type *type" "int flags" .Ft void * .Fn reallocf "void *addr" "size_t size" "struct malloc_type *type" "int flags" .Fn MALLOC_DECLARE type .In sys/param.h .In sys/malloc.h .In sys/kernel.h .Fn MALLOC_DEFINE type shortdesc longdesc +.In sys/param.h +.In sys/domainset.h +.Ft void * +.Fn malloc_domainset "size_t size" "struct malloc_type *type" "struct domainset *ds" "int flags" +.Ft void +.Fn free_domain "void *addr" "struct malloc_type *type" .Sh DESCRIPTION The .Fn malloc function allocates uninitialized memory in kernel address space for an object whose size is specified by .Fa size . .Pp The -.Fn malloc_domain -variant allocates the object from the specified memory domain. Memory allocated -with this function should be returned with -.Fn free_domain . +.Fn malloc_domainset +variant allocates memory from a specific +.Xr numa 4 +domain using the specified domain selection policy. See -.Xr numa 9 for more details. +.Xr domainset 9 +for some example policies. +Memory allocated with this function should be returned with +.Fn free_domain . .Pp The .Fn mallocarray function allocates uninitialized memory in kernel address space for an array of .Fa nmemb entries whose size is specified by .Fa size . .Pp The .Fn free function releases memory at address .Fa addr that was previously allocated by .Fn malloc for re-use. The memory is not zeroed. If .Fa addr is .Dv NULL , then .Fn free does nothing. .Pp The .Fn realloc function changes the size of the previously allocated memory referenced by .Fa addr to .Fa size bytes. The contents of the memory are unchanged up to the lesser of the new and old sizes. Note that the returned value may differ from .Fa addr . If the requested memory cannot be allocated, .Dv NULL is returned and the memory referenced by .Fa addr is valid and unchanged. If .Fa addr is .Dv NULL , the .Fn realloc function behaves identically to .Fn malloc for the specified size. .Pp The .Fn reallocf function is identical to .Fn realloc except that it will free the passed pointer when the requested memory cannot be allocated. .Pp Unlike its standard C library counterpart .Pq Xr malloc 3 , the kernel version takes two more arguments. The .Fa flags argument further qualifies .Fn malloc Ns 's operational characteristics as follows: .Bl -tag -width indent .It Dv M_ZERO Causes the allocated memory to be set to all zeros. .It Dv M_NODUMP For allocations greater than page size, causes the allocated memory to be excluded from kernel core dumps. .It Dv M_NOWAIT Causes .Fn malloc , .Fn realloc , and .Fn reallocf to return .Dv NULL if the request cannot be immediately fulfilled due to resource shortage. Note that .Dv M_NOWAIT is required when running in an interrupt context. .It Dv M_WAITOK Indicates that it is OK to wait for resources. If the request cannot be immediately fulfilled, the current process is put to sleep to wait for resources to be released by other processes. The .Fn malloc , .Fn mallocarray , .Fn realloc , and .Fn reallocf functions cannot return .Dv NULL if .Dv M_WAITOK is specified. If the multiplication of .Fa nmemb and .Fa size would cause an integer overflow, the .Fn mallocarray function induces a panic. .It Dv M_USE_RESERVE Indicates that the system can use its reserve of memory to satisfy the request. This option should only be used in combination with .Dv M_NOWAIT when an allocation failure cannot be tolerated by the caller without catastrophic effects on the system. .It Dv M_EXEC Indicates that the system should allocate executable memory. If this flag is not set, the system will not allocate executable memory. Not all platforms enforce a distinction between executable and non-executable memory. .El .Pp Exactly one of either .Dv M_WAITOK or .Dv M_NOWAIT must be specified. .Pp The .Fa type argument is used to perform statistics on memory usage, and for basic sanity checks. It can be used to identify multiple allocations. The statistics can be examined by .Sq vmstat -m . .Pp A .Fa type is defined using .Vt "struct malloc_type" via the .Fn MALLOC_DECLARE and .Fn MALLOC_DEFINE macros. .Bd -literal -offset indent /* sys/something/foo_extern.h */ MALLOC_DECLARE(M_FOOBUF); /* sys/something/foo_main.c */ MALLOC_DEFINE(M_FOOBUF, "foobuffers", "Buffers to foo data into the ether"); /* sys/something/foo_subr.c */ \&... buf = malloc(sizeof(*buf), M_FOOBUF, M_NOWAIT); .Ed .Pp In order to use .Fn MALLOC_DEFINE , one must include .In sys/param.h (instead of .In sys/types.h ) and .In sys/kernel.h . .Sh CONTEXT .Fn malloc , .Fn realloc and .Fn reallocf may not be called from fast interrupts handlers. When called from threaded interrupts, .Fa flags must contain .Dv M_NOWAIT . .Pp .Fn malloc , .Fn realloc and .Fn reallocf may sleep when called with .Dv M_WAITOK . .Fn free never sleeps. However, .Fn malloc , .Fn realloc , .Fn reallocf and .Fn free may not be called in a critical section or while holding a spin lock. .Pp Any calls to .Fn malloc (even with .Dv M_NOWAIT ) or .Fn free when holding a .Xr vnode 9 interlock, will cause a LOR (Lock Order Reversal) due to the intertwining of VM Objects and Vnodes. .Sh IMPLEMENTATION NOTES The memory allocator allocates memory in chunks that have size a power of two for requests up to the size of a page of memory. For larger requests, one or more pages is allocated. While it should not be relied upon, this information may be useful for optimizing the efficiency of memory use. .Sh RETURN VALUES The .Fn malloc , .Fn realloc , and .Fn reallocf functions return a kernel virtual address that is suitably aligned for storage of any type of object, or .Dv NULL if the request could not be satisfied (implying that .Dv M_NOWAIT was set). .Sh DIAGNOSTICS A kernel compiled with the .Dv INVARIANTS configuration option attempts to detect memory corruption caused by such things as writing outside the allocated area and imbalanced calls to the .Fn malloc and .Fn free functions. Failing consistency checks will cause a panic or a system console message. .Sh SEE ALSO +.Xr numa 4 , .Xr vmstat 8 , .Xr contigmalloc 9 , +.Xr domainset 9 , .Xr memguard 9 , .Xr vnode 9 Index: head/sys/dev/hwpmc/hwpmc_logging.c =================================================================== --- head/sys/dev/hwpmc/hwpmc_logging.c (revision 339926) +++ head/sys/dev/hwpmc/hwpmc_logging.c (revision 339927) @@ -1,1294 +1,1296 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005-2007 Joseph Koshy * Copyright (c) 2007 The FreeBSD Foundation * Copyright (c) 2018 Matthew Macy * All rights reserved. * * Portions of this software were developed by A. Joseph Koshy under * sponsorship from the FreeBSD Foundation and Google, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Logging code for hwpmc(4) */ #include __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #endif #define curdomain PCPU_GET(domain) /* * Sysctl tunables */ SYSCTL_DECL(_kern_hwpmc); /* * kern.hwpmc.logbuffersize -- size of the per-cpu owner buffers. */ static int pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; #if (__FreeBSD_version < 1100000) TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "logbuffersize", &pmclog_buffer_size); #endif SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_RDTUN, &pmclog_buffer_size, 0, "size of log buffers in kilobytes"); /* * kern.hwpmc.nbuffer -- number of global log buffers */ static int pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU; #if (__FreeBSD_version < 1100000) TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nbuffers", &pmc_nlogbuffers_pcpu); #endif SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers_pcpu, CTLFLAG_RDTUN, &pmc_nlogbuffers_pcpu, 0, "number of log buffers per cpu"); /* * Global log buffer list and associated spin lock. */ static struct mtx pmc_kthread_mtx; /* sleep lock */ #define PMCLOG_INIT_BUFFER_DESCRIPTOR(D, buf, domain) do { \ (D)->plb_fence = ((char *) (buf)) + 1024*pmclog_buffer_size; \ (D)->plb_base = (D)->plb_ptr = ((char *) (buf)); \ (D)->plb_domain = domain; \ } while (0) #define PMCLOG_RESET_BUFFER_DESCRIPTOR(D) do { \ (D)->plb_ptr = (D)->plb_base; \ } while (0) /* * Log file record constructors. */ #define _PMCLOG_TO_HEADER(T,L) \ ((PMCLOG_HEADER_MAGIC << 24) | \ (PMCLOG_TYPE_ ## T << 16) | \ ((L) & 0xFFFF)) /* reserve LEN bytes of space and initialize the entry header */ #define _PMCLOG_RESERVE_SAFE(PO,TYPE,LEN,ACTION, TSC) do { \ uint32_t *_le; \ int _len = roundup((LEN), sizeof(uint32_t)); \ struct pmclog_header *ph; \ if ((_le = pmclog_reserve((PO), _len)) == NULL) { \ ACTION; \ } \ ph = (struct pmclog_header *)_le; \ ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \ ph->pl_tsc = (TSC); \ _le += sizeof(*ph)/4 /* skip over timestamp */ /* reserve LEN bytes of space and initialize the entry header */ #define _PMCLOG_RESERVE(PO,TYPE,LEN,ACTION) do { \ uint32_t *_le; \ int _len = roundup((LEN), sizeof(uint32_t)); \ uint64_t tsc; \ struct pmclog_header *ph; \ tsc = pmc_rdtsc(); \ spinlock_enter(); \ if ((_le = pmclog_reserve((PO), _len)) == NULL) { \ spinlock_exit(); \ ACTION; \ } \ ph = (struct pmclog_header *)_le; \ ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \ ph->pl_tsc = tsc; \ _le += sizeof(*ph)/4 /* skip over timestamp */ #define PMCLOG_RESERVE_SAFE(P,T,L,TSC) _PMCLOG_RESERVE_SAFE(P,T,L,return,TSC) #define PMCLOG_RESERVE(P,T,L) _PMCLOG_RESERVE(P,T,L,return) #define PMCLOG_RESERVE_WITH_ERROR(P,T,L) _PMCLOG_RESERVE(P,T,L, \ error=ENOMEM;goto error) #define PMCLOG_EMIT32(V) do { *_le++ = (V); } while (0) #define PMCLOG_EMIT64(V) do { \ *_le++ = (uint32_t) ((V) & 0xFFFFFFFF); \ *_le++ = (uint32_t) (((V) >> 32) & 0xFFFFFFFF); \ } while (0) /* Emit a string. Caution: does NOT update _le, so needs to be last */ #define PMCLOG_EMITSTRING(S,L) do { bcopy((S), _le, (L)); } while (0) #define PMCLOG_EMITNULLSTRING(L) do { bzero(_le, (L)); } while (0) #define PMCLOG_DESPATCH_SAFE(PO) \ pmclog_release((PO)); \ } while (0) #define PMCLOG_DESPATCH_SCHED_LOCK(PO) \ pmclog_release_flags((PO), 0); \ } while (0) #define PMCLOG_DESPATCH(PO) \ pmclog_release((PO)); \ spinlock_exit(); \ } while (0) #define PMCLOG_DESPATCH_SYNC(PO) \ pmclog_schedule_io((PO), 1); \ spinlock_exit(); \ } while (0) #define TSDELTA 4 /* * Assertions about the log file format. */ CTASSERT(sizeof(struct pmclog_callchain) == 7*4 + TSDELTA + PMC_CALLCHAIN_DEPTH_MAX*sizeof(uintfptr_t)); CTASSERT(sizeof(struct pmclog_closelog) == 3*4 + TSDELTA); CTASSERT(sizeof(struct pmclog_dropnotify) == 3*4 + TSDELTA); CTASSERT(sizeof(struct pmclog_map_in) == PATH_MAX + TSDELTA + 5*4 + sizeof(uintfptr_t)); CTASSERT(offsetof(struct pmclog_map_in,pl_pathname) == 5*4 + TSDELTA + sizeof(uintfptr_t)); CTASSERT(sizeof(struct pmclog_map_out) == 5*4 + 2*sizeof(uintfptr_t) + TSDELTA); CTASSERT(sizeof(struct pmclog_pmcallocate) == 9*4 + TSDELTA); CTASSERT(sizeof(struct pmclog_pmcattach) == 5*4 + PATH_MAX + TSDELTA); CTASSERT(offsetof(struct pmclog_pmcattach,pl_pathname) == 5*4 + TSDELTA); CTASSERT(sizeof(struct pmclog_pmcdetach) == 5*4 + TSDELTA); CTASSERT(sizeof(struct pmclog_proccsw) == 7*4 + 8 + TSDELTA); CTASSERT(sizeof(struct pmclog_procexec) == 5*4 + PATH_MAX + sizeof(uintfptr_t) + TSDELTA); CTASSERT(offsetof(struct pmclog_procexec,pl_pathname) == 5*4 + TSDELTA + sizeof(uintfptr_t)); CTASSERT(sizeof(struct pmclog_procexit) == 5*4 + 8 + TSDELTA); CTASSERT(sizeof(struct pmclog_procfork) == 5*4 + TSDELTA); CTASSERT(sizeof(struct pmclog_sysexit) == 6*4); CTASSERT(sizeof(struct pmclog_userdata) == 6*4); /* * Log buffer structure */ struct pmclog_buffer { TAILQ_ENTRY(pmclog_buffer) plb_next; char *plb_base; char *plb_ptr; char *plb_fence; uint16_t plb_domain; } __aligned(CACHE_LINE_SIZE); /* * Prototypes */ static int pmclog_get_buffer(struct pmc_owner *po); static void pmclog_loop(void *arg); static void pmclog_release(struct pmc_owner *po); static uint32_t *pmclog_reserve(struct pmc_owner *po, int length); static void pmclog_schedule_io(struct pmc_owner *po, int wakeup); static void pmclog_schedule_all(struct pmc_owner *po); static void pmclog_stop_kthread(struct pmc_owner *po); /* * Helper functions */ static inline void pmc_plb_rele_unlocked(struct pmclog_buffer *plb) { TAILQ_INSERT_HEAD(&pmc_dom_hdrs[plb->plb_domain]->pdbh_head, plb, plb_next); } static inline void pmc_plb_rele(struct pmclog_buffer *plb) { mtx_lock_spin(&pmc_dom_hdrs[plb->plb_domain]->pdbh_mtx); pmc_plb_rele_unlocked(plb); mtx_unlock_spin(&pmc_dom_hdrs[plb->plb_domain]->pdbh_mtx); } /* * Get a log buffer */ static int pmclog_get_buffer(struct pmc_owner *po) { struct pmclog_buffer *plb; int domain; KASSERT(po->po_curbuf[curcpu] == NULL, ("[pmclog,%d] po=%p current buffer still valid", __LINE__, po)); domain = curdomain; MPASS(pmc_dom_hdrs[domain]); mtx_lock_spin(&pmc_dom_hdrs[domain]->pdbh_mtx); if ((plb = TAILQ_FIRST(&pmc_dom_hdrs[domain]->pdbh_head)) != NULL) TAILQ_REMOVE(&pmc_dom_hdrs[domain]->pdbh_head, plb, plb_next); mtx_unlock_spin(&pmc_dom_hdrs[domain]->pdbh_mtx); PMCDBG2(LOG,GTB,1, "po=%p plb=%p", po, plb); #ifdef HWPMC_DEBUG if (plb) KASSERT(plb->plb_ptr == plb->plb_base && plb->plb_base < plb->plb_fence, ("[pmclog,%d] po=%p buffer invariants: ptr=%p " "base=%p fence=%p", __LINE__, po, plb->plb_ptr, plb->plb_base, plb->plb_fence)); #endif po->po_curbuf[curcpu] = plb; /* update stats */ counter_u64_add(pmc_stats.pm_buffer_requests, 1); if (plb == NULL) counter_u64_add(pmc_stats.pm_buffer_requests_failed, 1); return (plb ? 0 : ENOMEM); } struct pmclog_proc_init_args { struct proc *kthr; struct pmc_owner *po; bool exit; bool acted; }; int pmclog_proc_create(struct thread *td, void **handlep) { struct pmclog_proc_init_args *ia; int error; ia = malloc(sizeof(*ia), M_TEMP, M_WAITOK | M_ZERO); error = kproc_create(pmclog_loop, ia, &ia->kthr, RFHIGHPID, 0, "hwpmc: proc(%d)", td->td_proc->p_pid); if (error == 0) *handlep = ia; return (error); } void pmclog_proc_ignite(void *handle, struct pmc_owner *po) { struct pmclog_proc_init_args *ia; ia = handle; mtx_lock(&pmc_kthread_mtx); MPASS(!ia->acted); MPASS(ia->po == NULL); MPASS(!ia->exit); MPASS(ia->kthr != NULL); if (po == NULL) { ia->exit = true; } else { ia->po = po; KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread (%p) already present", __LINE__, po, po->po_kthread)); po->po_kthread = ia->kthr; } wakeup(ia); while (!ia->acted) msleep(ia, &pmc_kthread_mtx, PWAIT, "pmclogw", 0); mtx_unlock(&pmc_kthread_mtx); free(ia, M_TEMP); } /* * Log handler loop. * * This function is executed by each pmc owner's helper thread. */ static void pmclog_loop(void *arg) { struct pmclog_proc_init_args *ia; struct pmc_owner *po; struct pmclog_buffer *lb; struct proc *p; struct ucred *ownercred; struct ucred *mycred; struct thread *td; sigset_t unb; struct uio auio; struct iovec aiov; size_t nbytes; int error; td = curthread; SIGEMPTYSET(unb); SIGADDSET(unb, SIGHUP); (void)kern_sigprocmask(td, SIG_UNBLOCK, &unb, NULL, 0); ia = arg; MPASS(ia->kthr == curproc); MPASS(!ia->acted); mtx_lock(&pmc_kthread_mtx); while (ia->po == NULL && !ia->exit) msleep(ia, &pmc_kthread_mtx, PWAIT, "pmclogi", 0); if (ia->exit) { ia->acted = true; wakeup(ia); mtx_unlock(&pmc_kthread_mtx); kproc_exit(0); } MPASS(ia->po != NULL); po = ia->po; ia->acted = true; wakeup(ia); mtx_unlock(&pmc_kthread_mtx); ia = NULL; p = po->po_owner; mycred = td->td_ucred; PROC_LOCK(p); ownercred = crhold(p->p_ucred); PROC_UNLOCK(p); PMCDBG2(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread); KASSERT(po->po_kthread == curthread->td_proc, ("[pmclog,%d] proc mismatch po=%p po/kt=%p curproc=%p", __LINE__, po, po->po_kthread, curthread->td_proc)); lb = NULL; /* * Loop waiting for I/O requests to be added to the owner * struct's queue. The loop is exited when the log file * is deconfigured. */ mtx_lock(&pmc_kthread_mtx); for (;;) { /* check if we've been asked to exit */ if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) break; if (lb == NULL) { /* look for a fresh buffer to write */ mtx_lock_spin(&po->po_mtx); if ((lb = TAILQ_FIRST(&po->po_logbuffers)) == NULL) { mtx_unlock_spin(&po->po_mtx); /* No more buffers and shutdown required. */ if (po->po_flags & PMC_PO_SHUTDOWN) break; (void) msleep(po, &pmc_kthread_mtx, PWAIT, "pmcloop", 250); continue; } TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next); mtx_unlock_spin(&po->po_mtx); } mtx_unlock(&pmc_kthread_mtx); /* process the request */ PMCDBG3(LOG,WRI,2, "po=%p base=%p ptr=%p", po, lb->plb_base, lb->plb_ptr); /* change our thread's credentials before issuing the I/O */ aiov.iov_base = lb->plb_base; aiov.iov_len = nbytes = lb->plb_ptr - lb->plb_base; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = -1; auio.uio_resid = nbytes; auio.uio_rw = UIO_WRITE; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; /* switch thread credentials -- see kern_ktrace.c */ td->td_ucred = ownercred; error = fo_write(po->po_file, &auio, ownercred, 0, td); td->td_ucred = mycred; if (error) { /* XXX some errors are recoverable */ /* send a SIGIO to the owner and exit */ PROC_LOCK(p); kern_psignal(p, SIGIO); PROC_UNLOCK(p); mtx_lock(&pmc_kthread_mtx); po->po_error = error; /* save for flush log */ PMCDBG2(LOG,WRI,2, "po=%p error=%d", po, error); break; } mtx_lock(&pmc_kthread_mtx); /* put the used buffer back into the global pool */ PMCLOG_RESET_BUFFER_DESCRIPTOR(lb); pmc_plb_rele(lb); lb = NULL; } wakeup_one(po->po_kthread); po->po_kthread = NULL; mtx_unlock(&pmc_kthread_mtx); /* return the current I/O buffer to the global pool */ if (lb) { PMCLOG_RESET_BUFFER_DESCRIPTOR(lb); pmc_plb_rele(lb); } /* * Exit this thread, signalling the waiter */ crfree(ownercred); kproc_exit(0); } /* * Release and log entry and schedule an I/O if needed. */ static void pmclog_release_flags(struct pmc_owner *po, int wakeup) { struct pmclog_buffer *plb; plb = po->po_curbuf[curcpu]; KASSERT(plb->plb_ptr >= plb->plb_base, ("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__, po, plb->plb_ptr, plb->plb_base)); KASSERT(plb->plb_ptr <= plb->plb_fence, ("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__, po, plb->plb_ptr, plb->plb_fence)); /* schedule an I/O if we've filled a buffer */ if (plb->plb_ptr >= plb->plb_fence) pmclog_schedule_io(po, wakeup); PMCDBG1(LOG,REL,1, "po=%p", po); } static void pmclog_release(struct pmc_owner *po) { pmclog_release_flags(po, 1); } /* * Attempt to reserve 'length' bytes of space in an owner's log * buffer. The function returns a pointer to 'length' bytes of space * if there was enough space or returns NULL if no space was * available. Non-null returns do so with the po mutex locked. The * caller must invoke pmclog_release() on the pmc owner structure * when done. */ static uint32_t * pmclog_reserve(struct pmc_owner *po, int length) { uintptr_t newptr, oldptr; struct pmclog_buffer *plb, **pplb; PMCDBG2(LOG,ALL,1, "po=%p len=%d", po, length); KASSERT(length % sizeof(uint32_t) == 0, ("[pmclog,%d] length not a multiple of word size", __LINE__)); /* No more data when shutdown in progress. */ if (po->po_flags & PMC_PO_SHUTDOWN) return (NULL); pplb = &po->po_curbuf[curcpu]; if (*pplb == NULL && pmclog_get_buffer(po) != 0) goto fail; KASSERT(*pplb != NULL, ("[pmclog,%d] po=%p no current buffer", __LINE__, po)); plb = *pplb; KASSERT(plb->plb_ptr >= plb->plb_base && plb->plb_ptr <= plb->plb_fence, ("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p", __LINE__, po, plb->plb_ptr, plb->plb_base, plb->plb_fence)); oldptr = (uintptr_t) plb->plb_ptr; newptr = oldptr + length; KASSERT(oldptr != (uintptr_t) NULL, ("[pmclog,%d] po=%p Null log buffer pointer", __LINE__, po)); /* * If we have space in the current buffer, return a pointer to * available space with the PO structure locked. */ if (newptr <= (uintptr_t) plb->plb_fence) { plb->plb_ptr = (char *) newptr; goto done; } /* * Otherwise, schedule the current buffer for output and get a * fresh buffer. */ pmclog_schedule_io(po, 0); if (pmclog_get_buffer(po) != 0) goto fail; plb = *pplb; KASSERT(plb != NULL, ("[pmclog,%d] po=%p no current buffer", __LINE__, po)); KASSERT(plb->plb_ptr != NULL, ("[pmclog,%d] null return from pmc_get_log_buffer", __LINE__)); KASSERT(plb->plb_ptr == plb->plb_base && plb->plb_ptr <= plb->plb_fence, ("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p", __LINE__, po, plb->plb_ptr, plb->plb_base, plb->plb_fence)); oldptr = (uintptr_t) plb->plb_ptr; done: return ((uint32_t *) oldptr); fail: return (NULL); } /* * Schedule an I/O. * * Transfer the current buffer to the helper kthread. */ static void pmclog_schedule_io(struct pmc_owner *po, int wakeup) { struct pmclog_buffer *plb; plb = po->po_curbuf[curcpu]; po->po_curbuf[curcpu] = NULL; KASSERT(plb != NULL, ("[pmclog,%d] schedule_io with null buffer po=%p", __LINE__, po)); KASSERT(plb->plb_ptr >= plb->plb_base, ("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__, po, plb->plb_ptr, plb->plb_base)); KASSERT(plb->plb_ptr <= plb->plb_fence, ("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__, po, plb->plb_ptr, plb->plb_fence)); PMCDBG1(LOG,SIO, 1, "po=%p", po); /* * Add the current buffer to the tail of the buffer list and * wakeup the helper. */ mtx_lock_spin(&po->po_mtx); TAILQ_INSERT_TAIL(&po->po_logbuffers, plb, plb_next); mtx_unlock_spin(&po->po_mtx); if (wakeup) wakeup_one(po); } /* * Stop the helper kthread. */ static void pmclog_stop_kthread(struct pmc_owner *po) { mtx_lock(&pmc_kthread_mtx); po->po_flags &= ~PMC_PO_OWNS_LOGFILE; if (po->po_kthread != NULL) { PROC_LOCK(po->po_kthread); kern_psignal(po->po_kthread, SIGHUP); PROC_UNLOCK(po->po_kthread); } wakeup_one(po); while (po->po_kthread) msleep(po->po_kthread, &pmc_kthread_mtx, PPAUSE, "pmckstp", 0); mtx_unlock(&pmc_kthread_mtx); } /* * Public functions */ /* * Configure a log file for pmc owner 'po'. * * Parameter 'logfd' is a file handle referencing an open file in the * owner process. This file needs to have been opened for writing. */ int pmclog_configure_log(struct pmc_mdep *md, struct pmc_owner *po, int logfd) { struct proc *p; struct timespec ts; uint64_t tsc; int error; sx_assert(&pmc_sx, SA_XLOCKED); PMCDBG2(LOG,CFG,1, "config po=%p logfd=%d", po, logfd); p = po->po_owner; /* return EBUSY if a log file was already present */ if (po->po_flags & PMC_PO_OWNS_LOGFILE) return (EBUSY); KASSERT(po->po_file == NULL, ("[pmclog,%d] po=%p file (%p) already present", __LINE__, po, po->po_file)); /* get a reference to the file state */ error = fget_write(curthread, logfd, &cap_write_rights, &po->po_file); if (error) goto error; /* mark process as owning a log file */ po->po_flags |= PMC_PO_OWNS_LOGFILE; /* mark process as using HWPMCs */ PROC_LOCK(p); p->p_flag |= P_HWPMC; PROC_UNLOCK(p); nanotime(&ts); tsc = pmc_rdtsc(); /* create a log initialization entry */ PMCLOG_RESERVE_WITH_ERROR(po, INITIALIZE, sizeof(struct pmclog_initialize)); PMCLOG_EMIT32(PMC_VERSION); PMCLOG_EMIT32(md->pmd_cputype); #if defined(__i386__) || defined(__amd64__) PMCLOG_EMIT64(tsc_freq); #else /* other architectures will need to fill this in */ PMCLOG_EMIT32(0); PMCLOG_EMIT32(0); #endif memcpy(_le, &ts, sizeof(ts)); _le += sizeof(ts)/4; PMCLOG_EMITSTRING(pmc_cpuid, PMC_CPUID_LEN); PMCLOG_DESPATCH_SYNC(po); return (0); error: KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread not " "stopped", __LINE__, po)); if (po->po_file) (void) fdrop(po->po_file, curthread); po->po_file = NULL; /* clear file and error state */ po->po_error = 0; po->po_flags &= ~PMC_PO_OWNS_LOGFILE; return (error); } /* * De-configure a log file. This will throw away any buffers queued * for this owner process. */ int pmclog_deconfigure_log(struct pmc_owner *po) { int error; struct pmclog_buffer *lb; PMCDBG1(LOG,CFG,1, "de-config po=%p", po); if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) return (EINVAL); KASSERT(po->po_sscount == 0, ("[pmclog,%d] po=%p still owning SS PMCs", __LINE__, po)); KASSERT(po->po_file != NULL, ("[pmclog,%d] po=%p no log file", __LINE__, po)); /* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */ pmclog_stop_kthread(po); KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread not stopped", __LINE__, po)); /* return all queued log buffers to the global pool */ while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) { TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next); PMCLOG_RESET_BUFFER_DESCRIPTOR(lb); pmc_plb_rele(lb); } for (int i = 0; i < mp_ncpus; i++) { thread_lock(curthread); sched_bind(curthread, i); thread_unlock(curthread); /* return the 'current' buffer to the global pool */ if ((lb = po->po_curbuf[curcpu]) != NULL) { PMCLOG_RESET_BUFFER_DESCRIPTOR(lb); pmc_plb_rele(lb); } } thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); /* drop a reference to the fd */ if (po->po_file != NULL) { error = fdrop(po->po_file, curthread); po->po_file = NULL; } else error = 0; po->po_error = 0; return (error); } /* * Flush a process' log buffer. */ int pmclog_flush(struct pmc_owner *po, int force) { int error; PMCDBG1(LOG,FLS,1, "po=%p", po); /* * If there is a pending error recorded by the logger thread, * return that. */ if (po->po_error) return (po->po_error); error = 0; /* * Check that we do have an active log file. */ mtx_lock(&pmc_kthread_mtx); if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) { error = EINVAL; goto error; } pmclog_schedule_all(po); error: mtx_unlock(&pmc_kthread_mtx); return (error); } static void pmclog_schedule_one_cond(struct pmc_owner *po) { struct pmclog_buffer *plb; int cpu; spinlock_enter(); cpu = curcpu; /* tell hardclock not to run again */ if (PMC_CPU_HAS_SAMPLES(cpu)) PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL); plb = po->po_curbuf[cpu]; if (plb && plb->plb_ptr != plb->plb_base) pmclog_schedule_io(po, 1); spinlock_exit(); } static void pmclog_schedule_all(struct pmc_owner *po) { /* * Schedule the current buffer if any and not empty. */ for (int i = 0; i < mp_ncpus; i++) { thread_lock(curthread); sched_bind(curthread, i); thread_unlock(curthread); pmclog_schedule_one_cond(po); } thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } int pmclog_close(struct pmc_owner *po) { PMCDBG1(LOG,CLO,1, "po=%p", po); pmclog_process_closelog(po); mtx_lock(&pmc_kthread_mtx); /* * Initiate shutdown: no new data queued, * thread will close file on last block. */ po->po_flags |= PMC_PO_SHUTDOWN; /* give time for all to see */ DELAY(50); /* * Schedule the current buffer. */ pmclog_schedule_all(po); wakeup_one(po); mtx_unlock(&pmc_kthread_mtx); return (0); } void pmclog_process_callchain(struct pmc *pm, struct pmc_sample *ps) { int n, recordlen; uint32_t flags; struct pmc_owner *po; PMCDBG3(LOG,SAM,1,"pm=%p pid=%d n=%d", pm, ps->ps_pid, ps->ps_nsamples); recordlen = offsetof(struct pmclog_callchain, pl_pc) + ps->ps_nsamples * sizeof(uintfptr_t); po = pm->pm_owner; flags = PMC_CALLCHAIN_TO_CPUFLAGS(ps->ps_cpu,ps->ps_flags); PMCLOG_RESERVE_SAFE(po, CALLCHAIN, recordlen, ps->ps_tsc); PMCLOG_EMIT32(ps->ps_pid); PMCLOG_EMIT32(ps->ps_tid); PMCLOG_EMIT32(pm->pm_id); PMCLOG_EMIT32(flags); for (n = 0; n < ps->ps_nsamples; n++) PMCLOG_EMITADDR(ps->ps_pc[n]); PMCLOG_DESPATCH_SAFE(po); } void pmclog_process_closelog(struct pmc_owner *po) { PMCLOG_RESERVE(po,CLOSELOG,sizeof(struct pmclog_closelog)); PMCLOG_DESPATCH_SYNC(po); } void pmclog_process_dropnotify(struct pmc_owner *po) { PMCLOG_RESERVE(po,DROPNOTIFY,sizeof(struct pmclog_dropnotify)); PMCLOG_DESPATCH(po); } void pmclog_process_map_in(struct pmc_owner *po, pid_t pid, uintfptr_t start, const char *path) { int pathlen, recordlen; KASSERT(path != NULL, ("[pmclog,%d] map-in, null path", __LINE__)); pathlen = strlen(path) + 1; /* #bytes for path name */ recordlen = offsetof(struct pmclog_map_in, pl_pathname) + pathlen; PMCLOG_RESERVE(po, MAP_IN, recordlen); PMCLOG_EMIT32(pid); PMCLOG_EMIT32(0); PMCLOG_EMITADDR(start); PMCLOG_EMITSTRING(path,pathlen); PMCLOG_DESPATCH_SYNC(po); } void pmclog_process_map_out(struct pmc_owner *po, pid_t pid, uintfptr_t start, uintfptr_t end) { KASSERT(start <= end, ("[pmclog,%d] start > end", __LINE__)); PMCLOG_RESERVE(po, MAP_OUT, sizeof(struct pmclog_map_out)); PMCLOG_EMIT32(pid); PMCLOG_EMIT32(0); PMCLOG_EMITADDR(start); PMCLOG_EMITADDR(end); PMCLOG_DESPATCH(po); } void pmclog_process_pmcallocate(struct pmc *pm) { struct pmc_owner *po; struct pmc_soft *ps; po = pm->pm_owner; PMCDBG1(LOG,ALL,1, "pm=%p", pm); if (PMC_TO_CLASS(pm) == PMC_CLASS_SOFT) { PMCLOG_RESERVE(po, PMCALLOCATEDYN, sizeof(struct pmclog_pmcallocatedyn)); PMCLOG_EMIT32(pm->pm_id); PMCLOG_EMIT32(pm->pm_event); PMCLOG_EMIT32(pm->pm_flags); PMCLOG_EMIT32(0); PMCLOG_EMIT64(pm->pm_sc.pm_reloadcount); ps = pmc_soft_ev_acquire(pm->pm_event); if (ps != NULL) PMCLOG_EMITSTRING(ps->ps_ev.pm_ev_name,PMC_NAME_MAX); else PMCLOG_EMITNULLSTRING(PMC_NAME_MAX); pmc_soft_ev_release(ps); PMCLOG_DESPATCH_SYNC(po); } else { PMCLOG_RESERVE(po, PMCALLOCATE, sizeof(struct pmclog_pmcallocate)); PMCLOG_EMIT32(pm->pm_id); PMCLOG_EMIT32(pm->pm_event); PMCLOG_EMIT32(pm->pm_flags); PMCLOG_EMIT32(0); PMCLOG_EMIT64(pm->pm_sc.pm_reloadcount); PMCLOG_DESPATCH_SYNC(po); } } void pmclog_process_pmcattach(struct pmc *pm, pid_t pid, char *path) { int pathlen, recordlen; struct pmc_owner *po; PMCDBG2(LOG,ATT,1,"pm=%p pid=%d", pm, pid); po = pm->pm_owner; pathlen = strlen(path) + 1; /* #bytes for the string */ recordlen = offsetof(struct pmclog_pmcattach, pl_pathname) + pathlen; PMCLOG_RESERVE(po, PMCATTACH, recordlen); PMCLOG_EMIT32(pm->pm_id); PMCLOG_EMIT32(pid); PMCLOG_EMITSTRING(path, pathlen); PMCLOG_DESPATCH_SYNC(po); } void pmclog_process_pmcdetach(struct pmc *pm, pid_t pid) { struct pmc_owner *po; PMCDBG2(LOG,ATT,1,"!pm=%p pid=%d", pm, pid); po = pm->pm_owner; PMCLOG_RESERVE(po, PMCDETACH, sizeof(struct pmclog_pmcdetach)); PMCLOG_EMIT32(pm->pm_id); PMCLOG_EMIT32(pid); PMCLOG_DESPATCH_SYNC(po); } void pmclog_process_proccreate(struct pmc_owner *po, struct proc *p, int sync) { if (sync) { PMCLOG_RESERVE(po, PROC_CREATE, sizeof(struct pmclog_proccreate)); PMCLOG_EMIT32(p->p_pid); PMCLOG_EMIT32(p->p_flag); PMCLOG_EMITSTRING(p->p_comm, MAXCOMLEN+1); PMCLOG_DESPATCH_SYNC(po); } else { PMCLOG_RESERVE(po, PROC_CREATE, sizeof(struct pmclog_proccreate)); PMCLOG_EMIT32(p->p_pid); PMCLOG_EMIT32(p->p_flag); PMCLOG_EMITSTRING(p->p_comm, MAXCOMLEN+1); PMCLOG_DESPATCH(po); } } /* * Log a context switch event to the log file. */ void pmclog_process_proccsw(struct pmc *pm, struct pmc_process *pp, pmc_value_t v, struct thread *td) { struct pmc_owner *po; KASSERT(pm->pm_flags & PMC_F_LOG_PROCCSW, ("[pmclog,%d] log-process-csw called gratuitously", __LINE__)); PMCDBG3(LOG,SWO,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid, v); po = pm->pm_owner; PMCLOG_RESERVE_SAFE(po, PROCCSW, sizeof(struct pmclog_proccsw), pmc_rdtsc()); PMCLOG_EMIT64(v); PMCLOG_EMIT32(pm->pm_id); PMCLOG_EMIT32(pp->pp_proc->p_pid); PMCLOG_EMIT32(td->td_tid); PMCLOG_EMIT32(0); PMCLOG_DESPATCH_SCHED_LOCK(po); } void pmclog_process_procexec(struct pmc_owner *po, pmc_id_t pmid, pid_t pid, uintfptr_t startaddr, char *path) { int pathlen, recordlen; PMCDBG3(LOG,EXC,1,"po=%p pid=%d path=\"%s\"", po, pid, path); pathlen = strlen(path) + 1; /* #bytes for the path */ recordlen = offsetof(struct pmclog_procexec, pl_pathname) + pathlen; PMCLOG_RESERVE(po, PROCEXEC, recordlen); PMCLOG_EMIT32(pid); PMCLOG_EMIT32(pmid); PMCLOG_EMITADDR(startaddr); PMCLOG_EMITSTRING(path,pathlen); PMCLOG_DESPATCH_SYNC(po); } /* * Log a process exit event (and accumulated pmc value) to the log file. */ void pmclog_process_procexit(struct pmc *pm, struct pmc_process *pp) { int ri; struct pmc_owner *po; ri = PMC_TO_ROWINDEX(pm); PMCDBG3(LOG,EXT,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid, pp->pp_pmcs[ri].pp_pmcval); po = pm->pm_owner; PMCLOG_RESERVE(po, PROCEXIT, sizeof(struct pmclog_procexit)); PMCLOG_EMIT32(pm->pm_id); PMCLOG_EMIT32(pp->pp_proc->p_pid); PMCLOG_EMIT64(pp->pp_pmcs[ri].pp_pmcval); PMCLOG_DESPATCH(po); } /* * Log a fork event. */ void pmclog_process_procfork(struct pmc_owner *po, pid_t oldpid, pid_t newpid) { PMCLOG_RESERVE(po, PROCFORK, sizeof(struct pmclog_procfork)); PMCLOG_EMIT32(oldpid); PMCLOG_EMIT32(newpid); PMCLOG_DESPATCH(po); } /* * Log a process exit event of the form suitable for system-wide PMCs. */ void pmclog_process_sysexit(struct pmc_owner *po, pid_t pid) { PMCLOG_RESERVE(po, SYSEXIT, sizeof(struct pmclog_sysexit)); PMCLOG_EMIT32(pid); PMCLOG_DESPATCH(po); } void pmclog_process_threadcreate(struct pmc_owner *po, struct thread *td, int sync) { struct proc *p; p = td->td_proc; if (sync) { PMCLOG_RESERVE(po, THR_CREATE, sizeof(struct pmclog_threadcreate)); PMCLOG_EMIT32(td->td_tid); PMCLOG_EMIT32(p->p_pid); PMCLOG_EMIT32(p->p_flag); PMCLOG_EMIT32(0); PMCLOG_EMITSTRING(td->td_name, MAXCOMLEN+1); PMCLOG_DESPATCH_SYNC(po); } else { PMCLOG_RESERVE(po, THR_CREATE, sizeof(struct pmclog_threadcreate)); PMCLOG_EMIT32(td->td_tid); PMCLOG_EMIT32(p->p_pid); PMCLOG_EMIT32(p->p_flag); PMCLOG_EMIT32(0); PMCLOG_EMITSTRING(td->td_name, MAXCOMLEN+1); PMCLOG_DESPATCH(po); } } void pmclog_process_threadexit(struct pmc_owner *po, struct thread *td) { PMCLOG_RESERVE(po, THR_EXIT, sizeof(struct pmclog_threadexit)); PMCLOG_EMIT32(td->td_tid); PMCLOG_DESPATCH(po); } /* * Write a user log entry. */ int pmclog_process_userlog(struct pmc_owner *po, struct pmc_op_writelog *wl) { int error; PMCDBG2(LOG,WRI,1, "writelog po=%p ud=0x%x", po, wl->pm_userdata); error = 0; PMCLOG_RESERVE_WITH_ERROR(po, USERDATA, sizeof(struct pmclog_userdata)); PMCLOG_EMIT32(wl->pm_userdata); PMCLOG_DESPATCH(po); error: return (error); } /* * Initialization. * * Create a pool of log buffers and initialize mutexes. */ void pmclog_initialize() { - int domain; struct pmclog_buffer *plb; + int domain, ncpus, total; if (pmclog_buffer_size <= 0 || pmclog_buffer_size > 16*1024) { (void) printf("hwpmc: tunable logbuffersize=%d must be " "greater than zero and less than or equal to 16MB.\n", pmclog_buffer_size); pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; } if (pmc_nlogbuffers_pcpu <= 0) { (void) printf("hwpmc: tunable nlogbuffers=%d must be greater " "than zero.\n", pmc_nlogbuffers_pcpu); pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU; } if (pmc_nlogbuffers_pcpu*pmclog_buffer_size > 32*1024) { (void) printf("hwpmc: memory allocated pcpu must be less than 32MB (is %dK).\n", pmc_nlogbuffers_pcpu*pmclog_buffer_size); pmc_nlogbuffers_pcpu = PMC_NLOGBUFFERS_PCPU; pmclog_buffer_size = PMC_LOG_BUFFER_SIZE; } for (domain = 0; domain < vm_ndomains; domain++) { - int ncpus = pmc_dom_hdrs[domain]->pdbh_ncpus; - int total = ncpus*pmc_nlogbuffers_pcpu; + ncpus = pmc_dom_hdrs[domain]->pdbh_ncpus; + total = ncpus * pmc_nlogbuffers_pcpu; - plb = malloc_domain(sizeof(struct pmclog_buffer)*total, M_PMC, domain, M_WAITOK|M_ZERO); + plb = malloc_domainset(sizeof(struct pmclog_buffer) * total, + M_PMC, DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); pmc_dom_hdrs[domain]->pdbh_plbs = plb; - for (int i = 0; i < total; i++, plb++) { + for (; total > 0; total--, plb++) { void *buf; - buf = malloc_domain(1024 * pmclog_buffer_size, M_PMC, domain, - M_WAITOK|M_ZERO); + buf = malloc_domainset(1024 * pmclog_buffer_size, M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); PMCLOG_INIT_BUFFER_DESCRIPTOR(plb, buf, domain); pmc_plb_rele_unlocked(plb); } } mtx_init(&pmc_kthread_mtx, "pmc-kthread", "pmc-sleep", MTX_DEF); } /* * Shutdown logging. * * Destroy mutexes and release memory back the to free pool. */ void pmclog_shutdown() { struct pmclog_buffer *plb; int domain; mtx_destroy(&pmc_kthread_mtx); for (domain = 0; domain < vm_ndomains; domain++) { while ((plb = TAILQ_FIRST(&pmc_dom_hdrs[domain]->pdbh_head)) != NULL) { TAILQ_REMOVE(&pmc_dom_hdrs[domain]->pdbh_head, plb, plb_next); free(plb->plb_base, M_PMC); } free(pmc_dom_hdrs[domain]->pdbh_plbs, M_PMC); } } Index: head/sys/dev/hwpmc/hwpmc_mod.c =================================================================== --- head/sys/dev/hwpmc/hwpmc_mod.c (revision 339926) +++ head/sys/dev/hwpmc/hwpmc_mod.c (revision 339927) @@ -1,5979 +1,5965 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2008 Joseph Koshy * Copyright (c) 2007 The FreeBSD Foundation * Copyright (c) 2018 Matthew Macy * All rights reserved. * * Portions of this software were developed by A. Joseph Koshy under * sponsorship from the FreeBSD Foundation and Google, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* needs to be after */ #include #include #include #include #include #include #include #include "hwpmc_soft.h" -#ifdef NUMA -#define NDOMAINS vm_ndomains -#else -#define NDOMAINS 1 -#define malloc_domain(size, type, domain, flags) malloc((size), (type), (flags)) -#define free_domain(addr, type) free(addr, type) -#endif - #define PMC_EPOCH_ENTER() struct epoch_tracker pmc_et; epoch_enter_preempt(global_epoch_preempt, &pmc_et) #define PMC_EPOCH_EXIT() epoch_exit_preempt(global_epoch_preempt, &pmc_et) /* * Types */ enum pmc_flags { PMC_FLAG_NONE = 0x00, /* do nothing */ PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */ PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */ PMC_FLAG_NOWAIT = 0x04, /* do not wait for mallocs */ }; /* * The offset in sysent where the syscall is allocated. */ static int pmc_syscall_num = NO_SYSCALL; struct pmc_cpu **pmc_pcpu; /* per-cpu state */ pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */ #define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)] struct mtx_pool *pmc_mtxpool; static int *pmc_pmcdisp; /* PMC row dispositions */ #define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0) #define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0) #define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0) #define PMC_MARK_ROW_FREE(R) do { \ pmc_pmcdisp[(R)] = 0; \ } while (0) #define PMC_MARK_ROW_STANDALONE(R) do { \ KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \ __LINE__)); \ atomic_add_int(&pmc_pmcdisp[(R)], -1); \ KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()), \ ("[pmc,%d] row disposition error", __LINE__)); \ } while (0) #define PMC_UNMARK_ROW_STANDALONE(R) do { \ atomic_add_int(&pmc_pmcdisp[(R)], 1); \ KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \ __LINE__)); \ } while (0) #define PMC_MARK_ROW_THREAD(R) do { \ KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \ __LINE__)); \ atomic_add_int(&pmc_pmcdisp[(R)], 1); \ } while (0) #define PMC_UNMARK_ROW_THREAD(R) do { \ atomic_add_int(&pmc_pmcdisp[(R)], -1); \ KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \ __LINE__)); \ } while (0) /* various event handlers */ static eventhandler_tag pmc_exit_tag, pmc_fork_tag, pmc_kld_load_tag, pmc_kld_unload_tag; /* Module statistics */ struct pmc_driverstats pmc_stats; /* Machine/processor dependent operations */ static struct pmc_mdep *md; /* * Hash tables mapping owner processes and target threads to PMCs. */ struct mtx pmc_processhash_mtx; /* spin mutex */ static u_long pmc_processhashmask; static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash; /* * Hash table of PMC owner descriptors. This table is protected by * the shared PMC "sx" lock. */ static u_long pmc_ownerhashmask; static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash; /* * List of PMC owners with system-wide sampling PMCs. */ static CK_LIST_HEAD(, pmc_owner) pmc_ss_owners; /* * List of free thread entries. This is protected by the spin * mutex. */ static struct mtx pmc_threadfreelist_mtx; /* spin mutex */ static LIST_HEAD(, pmc_thread) pmc_threadfreelist; static int pmc_threadfreelist_entries=0; #define THREADENTRY_SIZE \ (sizeof(struct pmc_thread) + (md->pmd_npmc * sizeof(struct pmc_threadpmcstate))) /* * Task to free thread descriptors */ static struct grouptask free_gtask; /* * A map of row indices to classdep structures. */ static struct pmc_classdep **pmc_rowindex_to_classdep; /* * Prototypes */ #ifdef HWPMC_DEBUG static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS); static int pmc_debugflags_parse(char *newstr, char *fence); #endif static int load(struct module *module, int cmd, void *arg); static int pmc_add_sample(ring_type_t ring, struct pmc *pm, struct trapframe *tf); static void pmc_add_thread_descriptors_from_proc(struct proc *p, struct pmc_process *pp); static int pmc_attach_process(struct proc *p, struct pmc *pm); static struct pmc *pmc_allocate_pmc_descriptor(void); static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p); static int pmc_attach_one_process(struct proc *p, struct pmc *pm); static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu); static int pmc_can_attach(struct pmc *pm, struct proc *p); static void pmc_capture_user_callchain(int cpu, int soft, struct trapframe *tf); static void pmc_cleanup(void); static int pmc_detach_process(struct proc *p, struct pmc *pm); static int pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags); static void pmc_destroy_owner_descriptor(struct pmc_owner *po); static void pmc_destroy_pmc_descriptor(struct pmc *pm); static void pmc_destroy_process_descriptor(struct pmc_process *pp); static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p); static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm); static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmc); static struct pmc_process *pmc_find_process_descriptor(struct proc *p, uint32_t mode); static struct pmc_thread *pmc_find_thread_descriptor(struct pmc_process *pp, struct thread *td, uint32_t mode); static void pmc_force_context_switch(void); static void pmc_link_target_process(struct pmc *pm, struct pmc_process *pp); static void pmc_log_all_process_mappings(struct pmc_owner *po); static void pmc_log_kernel_mappings(struct pmc *pm); static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p); static void pmc_maybe_remove_owner(struct pmc_owner *po); static void pmc_process_csw_in(struct thread *td); static void pmc_process_csw_out(struct thread *td); static void pmc_process_exit(void *arg, struct proc *p); static void pmc_process_fork(void *arg, struct proc *p1, struct proc *p2, int n); static void pmc_process_samples(int cpu, ring_type_t soft); static void pmc_release_pmc_descriptor(struct pmc *pmc); static void pmc_process_thread_add(struct thread *td); static void pmc_process_thread_delete(struct thread *td); static void pmc_process_thread_userret(struct thread *td); static void pmc_remove_owner(struct pmc_owner *po); static void pmc_remove_process_descriptor(struct pmc_process *pp); static void pmc_restore_cpu_binding(struct pmc_binding *pb); static void pmc_save_cpu_binding(struct pmc_binding *pb); static void pmc_select_cpu(int cpu); static int pmc_start(struct pmc *pm); static int pmc_stop(struct pmc *pm); static int pmc_syscall_handler(struct thread *td, void *syscall_args); static struct pmc_thread *pmc_thread_descriptor_pool_alloc(void); static void pmc_thread_descriptor_pool_drain(void); static void pmc_thread_descriptor_pool_free(struct pmc_thread *pt); static void pmc_unlink_target_process(struct pmc *pmc, struct pmc_process *pp); static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp); static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp); static struct pmc_mdep *pmc_generic_cpu_initialize(void); static void pmc_generic_cpu_finalize(struct pmc_mdep *md); static void pmc_post_callchain_callback(void); static void pmc_process_threadcreate(struct thread *td); static void pmc_process_threadexit(struct thread *td); static void pmc_process_proccreate(struct proc *p); static void pmc_process_allproc(struct pmc *pm); /* * Kernel tunables and sysctl(8) interface. */ SYSCTL_DECL(_kern_hwpmc); SYSCTL_NODE(_kern_hwpmc, OID_AUTO, stats, CTLFLAG_RW, 0, "HWPMC stats"); /* Stats. */ SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_ignored, CTLFLAG_RW, &pmc_stats.pm_intr_ignored, "# of interrupts ignored"); SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_processed, CTLFLAG_RW, &pmc_stats.pm_intr_processed, "# of interrupts processed"); SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, intr_bufferfull, CTLFLAG_RW, &pmc_stats.pm_intr_bufferfull, "# of interrupts where buffer was full"); SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscalls, CTLFLAG_RW, &pmc_stats.pm_syscalls, "# of syscalls"); SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, syscall_errors, CTLFLAG_RW, &pmc_stats.pm_syscall_errors, "# of syscall_errors"); SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests, CTLFLAG_RW, &pmc_stats.pm_buffer_requests, "# of buffer requests"); SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, buffer_requests_failed, CTLFLAG_RW, &pmc_stats.pm_buffer_requests_failed, "# of buffer requests which failed"); SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, log_sweeps, CTLFLAG_RW, &pmc_stats.pm_log_sweeps, "# of ?"); SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, merges, CTLFLAG_RW, &pmc_stats.pm_merges, "# of times kernel stack was found for user trace"); SYSCTL_COUNTER_U64(_kern_hwpmc_stats, OID_AUTO, overwrites, CTLFLAG_RW, &pmc_stats.pm_overwrites, "# of times a sample was overwritten before being logged"); static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH; SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_RDTUN, &pmc_callchaindepth, 0, "depth of call chain records"); char pmc_cpuid[64]; SYSCTL_STRING(_kern_hwpmc, OID_AUTO, cpuid, CTLFLAG_RD, pmc_cpuid, 0, "cpu version string"); #ifdef HWPMC_DEBUG struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS; char pmc_debugstr[PMC_DEBUG_STRSIZE]; TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr, sizeof(pmc_debugstr)); SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags, CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH, 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags"); #endif /* * kern.hwpmc.hashrows -- determines the number of rows in the * of the hash table used to look up threads */ static int pmc_hashsize = PMC_HASH_SIZE; SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_RDTUN, &pmc_hashsize, 0, "rows in hash tables"); /* * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU */ static int pmc_nsamples = PMC_NSAMPLES; SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_RDTUN, &pmc_nsamples, 0, "number of PC samples per CPU"); static uint64_t pmc_sample_mask = PMC_NSAMPLES-1; /* * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool. */ static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE; SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_RDTUN, &pmc_mtxpool_size, 0, "size of spin mutex pool"); /* * kern.hwpmc.threadfreelist_entries -- number of free entries */ SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_entries, CTLFLAG_RD, &pmc_threadfreelist_entries, 0, "number of avalable thread entries"); /* * kern.hwpmc.threadfreelist_max -- maximum number of free entries */ static int pmc_threadfreelist_max = PMC_THREADLIST_MAX; SYSCTL_INT(_kern_hwpmc, OID_AUTO, threadfreelist_max, CTLFLAG_RW, &pmc_threadfreelist_max, 0, "maximum number of available thread entries before freeing some"); /* * security.bsd.unprivileged_syspmcs -- allow non-root processes to * allocate system-wide PMCs. * * Allowing unprivileged processes to allocate system PMCs is convenient * if system-wide measurements need to be taken concurrently with other * per-process measurements. This feature is turned off by default. */ static int pmc_unprivileged_syspmcs = 0; SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RWTUN, &pmc_unprivileged_syspmcs, 0, "allow unprivileged process to allocate system PMCs"); /* * Hash function. Discard the lower 2 bits of the pointer since * these are always zero for our uses. The hash multiplier is * round((2^LONG_BIT) * ((sqrt(5)-1)/2)). */ #if LONG_BIT == 64 #define _PMC_HM 11400714819323198486u #elif LONG_BIT == 32 #define _PMC_HM 2654435769u #else #error Must know the size of 'long' to compile #endif #define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M)) /* * Syscall structures */ /* The `sysent' for the new syscall */ static struct sysent pmc_sysent = { .sy_narg = 2, .sy_call = pmc_syscall_handler, }; static struct syscall_module_data pmc_syscall_mod = { .chainevh = load, .chainarg = NULL, .offset = &pmc_syscall_num, .new_sysent = &pmc_sysent, .old_sysent = { .sy_narg = 0, .sy_call = NULL }, .flags = SY_THR_STATIC_KLD, }; static moduledata_t pmc_mod = { .name = PMC_MODULE_NAME, .evhand = syscall_module_handler, .priv = &pmc_syscall_mod, }; #ifdef EARLY_AP_STARTUP DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SYSCALLS, SI_ORDER_ANY); #else DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY); #endif MODULE_VERSION(pmc, PMC_VERSION); #ifdef HWPMC_DEBUG enum pmc_dbgparse_state { PMCDS_WS, /* in whitespace */ PMCDS_MAJOR, /* seen a major keyword */ PMCDS_MINOR }; static int pmc_debugflags_parse(char *newstr, char *fence) { char c, *p, *q; struct pmc_debugflags *tmpflags; int error, found, *newbits, tmp; size_t kwlen; tmpflags = malloc(sizeof(*tmpflags), M_PMC, M_WAITOK|M_ZERO); p = newstr; error = 0; for (; p < fence && (c = *p); p++) { /* skip white space */ if (c == ' ' || c == '\t') continue; /* look for a keyword followed by "=" */ for (q = p; p < fence && (c = *p) && c != '='; p++) ; if (c != '=') { error = EINVAL; goto done; } kwlen = p - q; newbits = NULL; /* lookup flag group name */ #define DBG_SET_FLAG_MAJ(S,F) \ if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \ newbits = &tmpflags->pdb_ ## F; DBG_SET_FLAG_MAJ("cpu", CPU); DBG_SET_FLAG_MAJ("csw", CSW); DBG_SET_FLAG_MAJ("logging", LOG); DBG_SET_FLAG_MAJ("module", MOD); DBG_SET_FLAG_MAJ("md", MDP); DBG_SET_FLAG_MAJ("owner", OWN); DBG_SET_FLAG_MAJ("pmc", PMC); DBG_SET_FLAG_MAJ("process", PRC); DBG_SET_FLAG_MAJ("sampling", SAM); if (newbits == NULL) { error = EINVAL; goto done; } p++; /* skip the '=' */ /* Now parse the individual flags */ tmp = 0; newflag: for (q = p; p < fence && (c = *p); p++) if (c == ' ' || c == '\t' || c == ',') break; /* p == fence or c == ws or c == "," or c == 0 */ if ((kwlen = p - q) == 0) { *newbits = tmp; continue; } found = 0; #define DBG_SET_FLAG_MIN(S,F) \ if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \ tmp |= found = (1 << PMC_DEBUG_MIN_ ## F) /* a '*' denotes all possible flags in the group */ if (kwlen == 1 && *q == '*') tmp = found = ~0; /* look for individual flag names */ DBG_SET_FLAG_MIN("allocaterow", ALR); DBG_SET_FLAG_MIN("allocate", ALL); DBG_SET_FLAG_MIN("attach", ATT); DBG_SET_FLAG_MIN("bind", BND); DBG_SET_FLAG_MIN("config", CFG); DBG_SET_FLAG_MIN("exec", EXC); DBG_SET_FLAG_MIN("exit", EXT); DBG_SET_FLAG_MIN("find", FND); DBG_SET_FLAG_MIN("flush", FLS); DBG_SET_FLAG_MIN("fork", FRK); DBG_SET_FLAG_MIN("getbuf", GTB); DBG_SET_FLAG_MIN("hook", PMH); DBG_SET_FLAG_MIN("init", INI); DBG_SET_FLAG_MIN("intr", INT); DBG_SET_FLAG_MIN("linktarget", TLK); DBG_SET_FLAG_MIN("mayberemove", OMR); DBG_SET_FLAG_MIN("ops", OPS); DBG_SET_FLAG_MIN("read", REA); DBG_SET_FLAG_MIN("register", REG); DBG_SET_FLAG_MIN("release", REL); DBG_SET_FLAG_MIN("remove", ORM); DBG_SET_FLAG_MIN("sample", SAM); DBG_SET_FLAG_MIN("scheduleio", SIO); DBG_SET_FLAG_MIN("select", SEL); DBG_SET_FLAG_MIN("signal", SIG); DBG_SET_FLAG_MIN("swi", SWI); DBG_SET_FLAG_MIN("swo", SWO); DBG_SET_FLAG_MIN("start", STA); DBG_SET_FLAG_MIN("stop", STO); DBG_SET_FLAG_MIN("syscall", PMS); DBG_SET_FLAG_MIN("unlinktarget", TUL); DBG_SET_FLAG_MIN("write", WRI); if (found == 0) { /* unrecognized flag name */ error = EINVAL; goto done; } if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */ *newbits = tmp; continue; } p++; goto newflag; } /* save the new flag set */ bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags)); done: free(tmpflags, M_PMC); return error; } static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS) { char *fence, *newstr; int error; unsigned int n; (void) arg1; (void) arg2; /* unused parameters */ n = sizeof(pmc_debugstr); newstr = malloc(n, M_PMC, M_WAITOK|M_ZERO); (void) strlcpy(newstr, pmc_debugstr, n); error = sysctl_handle_string(oidp, newstr, n, req); /* if there is a new string, parse and copy it */ if (error == 0 && req->newptr != NULL) { fence = newstr + (n < req->newlen ? n : req->newlen + 1); if ((error = pmc_debugflags_parse(newstr, fence)) == 0) (void) strlcpy(pmc_debugstr, newstr, sizeof(pmc_debugstr)); } free(newstr, M_PMC); return error; } #endif /* * Map a row index to a classdep structure and return the adjusted row * index for the PMC class index. */ static struct pmc_classdep * pmc_ri_to_classdep(struct pmc_mdep *md, int ri, int *adjri) { struct pmc_classdep *pcd; (void) md; KASSERT(ri >= 0 && ri < md->pmd_npmc, ("[pmc,%d] illegal row-index %d", __LINE__, ri)); pcd = pmc_rowindex_to_classdep[ri]; KASSERT(pcd != NULL, ("[pmc,%d] ri %d null pcd", __LINE__, ri)); *adjri = ri - pcd->pcd_ri; KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num, ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri)); return (pcd); } /* * Concurrency Control * * The driver manages the following data structures: * * - target process descriptors, one per target process * - owner process descriptors (and attached lists), one per owner process * - lookup hash tables for owner and target processes * - PMC descriptors (and attached lists) * - per-cpu hardware state * - the 'hook' variable through which the kernel calls into * this module * - the machine hardware state (managed by the MD layer) * * These data structures are accessed from: * * - thread context-switch code * - interrupt handlers (possibly on multiple cpus) * - kernel threads on multiple cpus running on behalf of user * processes doing system calls * - this driver's private kernel threads * * = Locks and Locking strategy = * * The driver uses four locking strategies for its operation: * * - The global SX lock "pmc_sx" is used to protect internal * data structures. * * Calls into the module by syscall() start with this lock being * held in exclusive mode. Depending on the requested operation, * the lock may be downgraded to 'shared' mode to allow more * concurrent readers into the module. Calls into the module from * other parts of the kernel acquire the lock in shared mode. * * This SX lock is held in exclusive mode for any operations that * modify the linkages between the driver's internal data structures. * * The 'pmc_hook' function pointer is also protected by this lock. * It is only examined with the sx lock held in exclusive mode. The * kernel module is allowed to be unloaded only with the sx lock held * in exclusive mode. In normal syscall handling, after acquiring the * pmc_sx lock we first check that 'pmc_hook' is non-null before * proceeding. This prevents races between the thread unloading the module * and other threads seeking to use the module. * * - Lookups of target process structures and owner process structures * cannot use the global "pmc_sx" SX lock because these lookups need * to happen during context switches and in other critical sections * where sleeping is not allowed. We protect these lookup tables * with their own private spin-mutexes, "pmc_processhash_mtx" and * "pmc_ownerhash_mtx". * * - Interrupt handlers work in a lock free manner. At interrupt * time, handlers look at the PMC pointer (phw->phw_pmc) configured * when the PMC was started. If this pointer is NULL, the interrupt * is ignored after updating driver statistics. We ensure that this * pointer is set (using an atomic operation if necessary) before the * PMC hardware is started. Conversely, this pointer is unset atomically * only after the PMC hardware is stopped. * * We ensure that everything needed for the operation of an * interrupt handler is available without it needing to acquire any * locks. We also ensure that a PMC's software state is destroyed only * after the PMC is taken off hardware (on all CPUs). * * - Context-switch handling with process-private PMCs needs more * care. * * A given process may be the target of multiple PMCs. For example, * PMCATTACH and PMCDETACH may be requested by a process on one CPU * while the target process is running on another. A PMC could also * be getting released because its owner is exiting. We tackle * these situations in the following manner: * * - each target process structure 'pmc_process' has an array * of 'struct pmc *' pointers, one for each hardware PMC. * * - At context switch IN time, each "target" PMC in RUNNING state * gets started on hardware and a pointer to each PMC is copied into * the per-cpu phw array. The 'runcount' for the PMC is * incremented. * * - At context switch OUT time, all process-virtual PMCs are stopped * on hardware. The saved value is added to the PMCs value field * only if the PMC is in a non-deleted state (the PMCs state could * have changed during the current time slice). * * Note that since in-between a switch IN on a processor and a switch * OUT, the PMC could have been released on another CPU. Therefore * context switch OUT always looks at the hardware state to turn * OFF PMCs and will update a PMC's saved value only if reachable * from the target process record. * * - OP PMCRELEASE could be called on a PMC at any time (the PMC could * be attached to many processes at the time of the call and could * be active on multiple CPUs). * * We prevent further scheduling of the PMC by marking it as in * state 'DELETED'. If the runcount of the PMC is non-zero then * this PMC is currently running on a CPU somewhere. The thread * doing the PMCRELEASE operation waits by repeatedly doing a * pause() till the runcount comes to zero. * * The contents of a PMC descriptor (struct pmc) are protected using * a spin-mutex. In order to save space, we use a mutex pool. * * In terms of lock types used by witness(4), we use: * - Type "pmc-sx", used by the global SX lock. * - Type "pmc-sleep", for sleep mutexes used by logger threads. * - Type "pmc-per-proc", for protecting PMC owner descriptors. * - Type "pmc-leaf", used for all other spin mutexes. */ /* * save the cpu binding of the current kthread */ static void pmc_save_cpu_binding(struct pmc_binding *pb) { PMCDBG0(CPU,BND,2, "save-cpu"); thread_lock(curthread); pb->pb_bound = sched_is_bound(curthread); pb->pb_cpu = curthread->td_oncpu; thread_unlock(curthread); PMCDBG1(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu); } /* * restore the cpu binding of the current thread */ static void pmc_restore_cpu_binding(struct pmc_binding *pb) { PMCDBG2(CPU,BND,2, "restore-cpu curcpu=%d restore=%d", curthread->td_oncpu, pb->pb_cpu); thread_lock(curthread); if (pb->pb_bound) sched_bind(curthread, pb->pb_cpu); else sched_unbind(curthread); thread_unlock(curthread); PMCDBG0(CPU,BND,2, "restore-cpu done"); } /* * move execution over the specified cpu and bind it there. */ static void pmc_select_cpu(int cpu) { KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[pmc,%d] bad cpu number %d", __LINE__, cpu)); /* Never move to an inactive CPU. */ KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive " "CPU %d", __LINE__, cpu)); PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d", cpu); thread_lock(curthread); sched_bind(curthread, cpu); thread_unlock(curthread); KASSERT(curthread->td_oncpu == cpu, ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__, cpu, curthread->td_oncpu)); PMCDBG1(CPU,SEL,2, "select-cpu cpu=%d ok", cpu); } /* * Force a context switch. * * We do this by pause'ing for 1 tick -- invoking mi_switch() is not * guaranteed to force a context switch. */ static void pmc_force_context_switch(void) { pause("pmcctx", 1); } uint64_t pmc_rdtsc(void) { #if defined(__i386__) || defined(__amd64__) if (__predict_true(amd_feature & AMDID_RDTSCP)) return rdtscp(); else return rdtsc(); #else return get_cyclecount(); #endif } /* * Get the file name for an executable. This is a simple wrapper * around vn_fullpath(9). */ static void pmc_getfilename(struct vnode *v, char **fullpath, char **freepath) { *fullpath = "unknown"; *freepath = NULL; vn_fullpath(curthread, v, fullpath, freepath); } /* * remove an process owning PMCs */ void pmc_remove_owner(struct pmc_owner *po) { struct pmc *pm, *tmp; sx_assert(&pmc_sx, SX_XLOCKED); PMCDBG1(OWN,ORM,1, "remove-owner po=%p", po); /* Remove descriptor from the owner hash table */ LIST_REMOVE(po, po_next); /* release all owned PMC descriptors */ LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) { PMCDBG1(OWN,ORM,2, "pmc=%p", pm); KASSERT(pm->pm_owner == po, ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po)); pmc_release_pmc_descriptor(pm); /* will unlink from the list */ pmc_destroy_pmc_descriptor(pm); } KASSERT(po->po_sscount == 0, ("[pmc,%d] SS count not zero", __LINE__)); KASSERT(LIST_EMPTY(&po->po_pmcs), ("[pmc,%d] PMC list not empty", __LINE__)); /* de-configure the log file if present */ if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_deconfigure_log(po); } /* * remove an owner process record if all conditions are met. */ static void pmc_maybe_remove_owner(struct pmc_owner *po) { PMCDBG1(OWN,OMR,1, "maybe-remove-owner po=%p", po); /* * Remove owner record if * - this process does not own any PMCs * - this process has not allocated a system-wide sampling buffer */ if (LIST_EMPTY(&po->po_pmcs) && ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) { pmc_remove_owner(po); pmc_destroy_owner_descriptor(po); } } /* * Add an association between a target process and a PMC. */ static void pmc_link_target_process(struct pmc *pm, struct pmc_process *pp) { int ri; struct pmc_target *pt; #ifdef INVARIANTS struct pmc_thread *pt_td; #endif sx_assert(&pmc_sx, SX_XLOCKED); KASSERT(pm != NULL && pp != NULL, ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp)); KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)), ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d", __LINE__, pm, pp->pp_proc->p_pid)); KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= ((int) md->pmd_npmc - 1), ("[pmc,%d] Illegal reference count %d for process record %p", __LINE__, pp->pp_refcnt, (void *) pp)); ri = PMC_TO_ROWINDEX(pm); PMCDBG3(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p", pm, ri, pp); #ifdef HWPMC_DEBUG LIST_FOREACH(pt, &pm->pm_targets, pt_next) if (pt->pt_process == pp) KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets", __LINE__, pp, pm)); #endif pt = malloc(sizeof(struct pmc_target), M_PMC, M_WAITOK|M_ZERO); pt->pt_process = pp; LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next); atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc, (uintptr_t)pm); if (pm->pm_owner->po_owner == pp->pp_proc) pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER; /* * Initialize the per-process values at this row index. */ pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ? pm->pm_sc.pm_reloadcount : 0; pp->pp_refcnt++; #ifdef INVARIANTS /* Confirm that the per-thread values at this row index are cleared. */ if (PMC_TO_MODE(pm) == PMC_MODE_TS) { mtx_lock_spin(pp->pp_tdslock); LIST_FOREACH(pt_td, &pp->pp_tds, pt_next) { KASSERT(pt_td->pt_pmcs[ri].pt_pmcval == (pmc_value_t) 0, ("[pmc,%d] pt_pmcval not cleared for pid=%d at " "ri=%d", __LINE__, pp->pp_proc->p_pid, ri)); } mtx_unlock_spin(pp->pp_tdslock); } #endif } /* * Removes the association between a target process and a PMC. */ static void pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp) { int ri; struct proc *p; struct pmc_target *ptgt; struct pmc_thread *pt; sx_assert(&pmc_sx, SX_XLOCKED); KASSERT(pm != NULL && pp != NULL, ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp)); KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc, ("[pmc,%d] Illegal ref count %d on process record %p", __LINE__, pp->pp_refcnt, (void *) pp)); ri = PMC_TO_ROWINDEX(pm); PMCDBG3(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p", pm, ri, pp); KASSERT(pp->pp_pmcs[ri].pp_pmc == pm, ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__, ri, pm, pp->pp_pmcs[ri].pp_pmc)); pp->pp_pmcs[ri].pp_pmc = NULL; pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0; /* Clear the per-thread values at this row index. */ if (PMC_TO_MODE(pm) == PMC_MODE_TS) { mtx_lock_spin(pp->pp_tdslock); LIST_FOREACH(pt, &pp->pp_tds, pt_next) pt->pt_pmcs[ri].pt_pmcval = (pmc_value_t) 0; mtx_unlock_spin(pp->pp_tdslock); } /* Remove owner-specific flags */ if (pm->pm_owner->po_owner == pp->pp_proc) { pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS; pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER; } pp->pp_refcnt--; /* Remove the target process from the PMC structure */ LIST_FOREACH(ptgt, &pm->pm_targets, pt_next) if (ptgt->pt_process == pp) break; KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found " "in pmc %p", __LINE__, pp->pp_proc, pp, pm)); LIST_REMOVE(ptgt, pt_next); free(ptgt, M_PMC); /* if the PMC now lacks targets, send the owner a SIGIO */ if (LIST_EMPTY(&pm->pm_targets)) { p = pm->pm_owner->po_owner; PROC_LOCK(p); kern_psignal(p, SIGIO); PROC_UNLOCK(p); PMCDBG2(PRC,SIG,2, "signalling proc=%p signal=%d", p, SIGIO); } } /* * Check if PMC 'pm' may be attached to target process 't'. */ static int pmc_can_attach(struct pmc *pm, struct proc *t) { struct proc *o; /* pmc owner */ struct ucred *oc, *tc; /* owner, target credentials */ int decline_attach, i; /* * A PMC's owner can always attach that PMC to itself. */ if ((o = pm->pm_owner->po_owner) == t) return 0; PROC_LOCK(o); oc = o->p_ucred; crhold(oc); PROC_UNLOCK(o); PROC_LOCK(t); tc = t->p_ucred; crhold(tc); PROC_UNLOCK(t); /* * The effective uid of the PMC owner should match at least one * of the {effective,real,saved} uids of the target process. */ decline_attach = oc->cr_uid != tc->cr_uid && oc->cr_uid != tc->cr_svuid && oc->cr_uid != tc->cr_ruid; /* * Every one of the target's group ids, must be in the owner's * group list. */ for (i = 0; !decline_attach && i < tc->cr_ngroups; i++) decline_attach = !groupmember(tc->cr_groups[i], oc); /* check the read and saved gids too */ if (decline_attach == 0) decline_attach = !groupmember(tc->cr_rgid, oc) || !groupmember(tc->cr_svgid, oc); crfree(tc); crfree(oc); return !decline_attach; } /* * Attach a process to a PMC. */ static int pmc_attach_one_process(struct proc *p, struct pmc *pm) { int ri, error; char *fullpath, *freepath; struct pmc_process *pp; sx_assert(&pmc_sx, SX_XLOCKED); PMCDBG5(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm, PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); /* * Locate the process descriptor corresponding to process 'p', * allocating space as needed. * * Verify that rowindex 'pm_rowindex' is free in the process * descriptor. * * If not, allocate space for a descriptor and link the * process descriptor and PMC. */ ri = PMC_TO_ROWINDEX(pm); /* mark process as using HWPMCs */ PROC_LOCK(p); p->p_flag |= P_HWPMC; PROC_UNLOCK(p); if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL) { error = ENOMEM; goto fail; } if (pp->pp_pmcs[ri].pp_pmc == pm) {/* already present at slot [ri] */ error = EEXIST; goto fail; } if (pp->pp_pmcs[ri].pp_pmc != NULL) { error = EBUSY; goto fail; } pmc_link_target_process(pm, pp); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) && (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0) pm->pm_flags |= PMC_F_NEEDS_LOGFILE; pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */ /* issue an attach event to a configured log file */ if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) { if (p->p_flag & P_KPROC) { fullpath = kernelname; freepath = NULL; } else { pmc_getfilename(p->p_textvp, &fullpath, &freepath); pmclog_process_pmcattach(pm, p->p_pid, fullpath); } free(freepath, M_TEMP); if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) pmc_log_process_mappings(pm->pm_owner, p); } return (0); fail: PROC_LOCK(p); p->p_flag &= ~P_HWPMC; PROC_UNLOCK(p); return (error); } /* * Attach a process and optionally its children */ static int pmc_attach_process(struct proc *p, struct pmc *pm) { int error; struct proc *top; sx_assert(&pmc_sx, SX_XLOCKED); PMCDBG5(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm, PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); /* * If this PMC successfully allowed a GETMSR operation * in the past, disallow further ATTACHes. */ if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0) return EPERM; if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0) return pmc_attach_one_process(p, pm); /* * Traverse all child processes, attaching them to * this PMC. */ sx_slock(&proctree_lock); top = p; for (;;) { if ((error = pmc_attach_one_process(p, pm)) != 0) break; if (!LIST_EMPTY(&p->p_children)) p = LIST_FIRST(&p->p_children); else for (;;) { if (p == top) goto done; if (LIST_NEXT(p, p_sibling)) { p = LIST_NEXT(p, p_sibling); break; } p = p->p_pptr; } } if (error) (void) pmc_detach_process(top, pm); done: sx_sunlock(&proctree_lock); return error; } /* * Detach a process from a PMC. If there are no other PMCs tracking * this process, remove the process structure from its hash table. If * 'flags' contains PMC_FLAG_REMOVE, then free the process structure. */ static int pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags) { int ri; struct pmc_process *pp; sx_assert(&pmc_sx, SX_XLOCKED); KASSERT(pm != NULL, ("[pmc,%d] null pm pointer", __LINE__)); ri = PMC_TO_ROWINDEX(pm); PMCDBG6(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x", pm, ri, p, p->p_pid, p->p_comm, flags); if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) return ESRCH; if (pp->pp_pmcs[ri].pp_pmc != pm) return EINVAL; pmc_unlink_target_process(pm, pp); /* Issue a detach entry if a log file is configured */ if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_pmcdetach(pm, p->p_pid); /* * If there are no PMCs targeting this process, we remove its * descriptor from the target hash table and unset the P_HWPMC * flag in the struct proc. */ KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc, ("[pmc,%d] Illegal refcnt %d for process struct %p", __LINE__, pp->pp_refcnt, pp)); if (pp->pp_refcnt != 0) /* still a target of some PMC */ return 0; pmc_remove_process_descriptor(pp); if (flags & PMC_FLAG_REMOVE) pmc_destroy_process_descriptor(pp); PROC_LOCK(p); p->p_flag &= ~P_HWPMC; PROC_UNLOCK(p); return 0; } /* * Detach a process and optionally its descendants from a PMC. */ static int pmc_detach_process(struct proc *p, struct pmc *pm) { struct proc *top; sx_assert(&pmc_sx, SX_XLOCKED); PMCDBG5(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm, PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm); if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0) return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE); /* * Traverse all children, detaching them from this PMC. We * ignore errors since we could be detaching a PMC from a * partially attached proc tree. */ sx_slock(&proctree_lock); top = p; for (;;) { (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE); if (!LIST_EMPTY(&p->p_children)) p = LIST_FIRST(&p->p_children); else for (;;) { if (p == top) goto done; if (LIST_NEXT(p, p_sibling)) { p = LIST_NEXT(p, p_sibling); break; } p = p->p_pptr; } } done: sx_sunlock(&proctree_lock); if (LIST_EMPTY(&pm->pm_targets)) pm->pm_flags &= ~PMC_F_ATTACH_DONE; return 0; } /* * Thread context switch IN */ static void pmc_process_csw_in(struct thread *td) { int cpu; unsigned int adjri, ri; struct pmc *pm; struct proc *p; struct pmc_cpu *pc; struct pmc_hw *phw; pmc_value_t newvalue; struct pmc_process *pp; struct pmc_thread *pt; struct pmc_classdep *pcd; p = td->td_proc; pt = NULL; if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL) return; KASSERT(pp->pp_proc == td->td_proc, ("[pmc,%d] not my thread state", __LINE__)); critical_enter(); /* no preemption from this point */ cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ PMCDBG5(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p, p->p_pid, p->p_comm, pp); KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[pmc,%d] weird CPU id %d", __LINE__, cpu)); pc = pmc_pcpu[cpu]; for (ri = 0; ri < md->pmd_npmc; ri++) { if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL) continue; KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)), ("[pmc,%d] Target PMC in non-virtual mode (%d)", __LINE__, PMC_TO_MODE(pm))); KASSERT(PMC_TO_ROWINDEX(pm) == ri, ("[pmc,%d] Row index mismatch pmc %d != ri %d", __LINE__, PMC_TO_ROWINDEX(pm), ri)); /* * Only PMCs that are marked as 'RUNNING' need * be placed on hardware. */ if (pm->pm_state != PMC_STATE_RUNNING) continue; KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0, ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount))); /* increment PMC runcount */ counter_u64_add(pm->pm_runcount, 1); /* configure the HWPMC we are going to use. */ pcd = pmc_ri_to_classdep(md, ri, &adjri); pcd->pcd_config_pmc(cpu, adjri, pm); phw = pc->pc_hwpmcs[ri]; KASSERT(phw != NULL, ("[pmc,%d] null hw pointer", __LINE__)); KASSERT(phw->phw_pmc == pm, ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__, phw->phw_pmc, pm)); /* * Write out saved value and start the PMC. * * Sampling PMCs use a per-thread value, while * counting mode PMCs use a per-pmc value that is * inherited across descendants. */ if (PMC_TO_MODE(pm) == PMC_MODE_TS) { if (pt == NULL) pt = pmc_find_thread_descriptor(pp, td, PMC_FLAG_NONE); KASSERT(pt != NULL, ("[pmc,%d] No thread found for td=%p", __LINE__, td)); mtx_pool_lock_spin(pmc_mtxpool, pm); /* * If we have a thread descriptor, use the per-thread * counter in the descriptor. If not, we will use * a per-process counter. * * TODO: Remove the per-process "safety net" once * we have thoroughly tested that we don't hit the * above assert. */ if (pt != NULL) { if (pt->pt_pmcs[ri].pt_pmcval > 0) newvalue = pt->pt_pmcs[ri].pt_pmcval; else newvalue = pm->pm_sc.pm_reloadcount; } else { /* * Use the saved value calculated after the most * recent time a thread using the shared counter * switched out. Reset the saved count in case * another thread from this process switches in * before any threads switch out. */ newvalue = pp->pp_pmcs[ri].pp_pmcval; pp->pp_pmcs[ri].pp_pmcval = pm->pm_sc.pm_reloadcount; } mtx_pool_unlock_spin(pmc_mtxpool, pm); KASSERT(newvalue > 0 && newvalue <= pm->pm_sc.pm_reloadcount, ("[pmc,%d] pmcval outside of expected range cpu=%d " "ri=%d pmcval=%jx pm_reloadcount=%jx", __LINE__, cpu, ri, newvalue, pm->pm_sc.pm_reloadcount)); } else { KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC, ("[pmc,%d] illegal mode=%d", __LINE__, PMC_TO_MODE(pm))); mtx_pool_lock_spin(pmc_mtxpool, pm); newvalue = PMC_PCPU_SAVED(cpu, ri) = pm->pm_gv.pm_savedvalue; mtx_pool_unlock_spin(pmc_mtxpool, pm); } PMCDBG3(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue); pcd->pcd_write_pmc(cpu, adjri, newvalue); /* If a sampling mode PMC, reset stalled state. */ if (PMC_TO_MODE(pm) == PMC_MODE_TS) pm->pm_pcpu_state[cpu].pps_stalled = 0; /* Indicate that we desire this to run. */ pm->pm_pcpu_state[cpu].pps_cpustate = 1; /* Start the PMC. */ pcd->pcd_start_pmc(cpu, adjri); } /* * perform any other architecture/cpu dependent thread * switch-in actions. */ (void) (*md->pmd_switch_in)(pc, pp); critical_exit(); } /* * Thread context switch OUT. */ static void pmc_process_csw_out(struct thread *td) { int cpu; int64_t tmp; struct pmc *pm; struct proc *p; enum pmc_mode mode; struct pmc_cpu *pc; pmc_value_t newvalue; unsigned int adjri, ri; struct pmc_process *pp; struct pmc_thread *pt = NULL; struct pmc_classdep *pcd; /* * Locate our process descriptor; this may be NULL if * this process is exiting and we have already removed * the process from the target process table. * * Note that due to kernel preemption, multiple * context switches may happen while the process is * exiting. * * Note also that if the target process cannot be * found we still need to deconfigure any PMCs that * are currently running on hardware. */ p = td->td_proc; pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE); /* * save PMCs */ critical_enter(); cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */ PMCDBG5(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p, p->p_pid, p->p_comm, pp); KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[pmc,%d weird CPU id %d", __LINE__, cpu)); pc = pmc_pcpu[cpu]; /* * When a PMC gets unlinked from a target PMC, it will * be removed from the target's pp_pmc[] array. * * However, on a MP system, the target could have been * executing on another CPU at the time of the unlink. * So, at context switch OUT time, we need to look at * the hardware to determine if a PMC is scheduled on * it. */ for (ri = 0; ri < md->pmd_npmc; ri++) { pcd = pmc_ri_to_classdep(md, ri, &adjri); pm = NULL; (void) (*pcd->pcd_get_config)(cpu, adjri, &pm); if (pm == NULL) /* nothing at this row index */ continue; mode = PMC_TO_MODE(pm); if (!PMC_IS_VIRTUAL_MODE(mode)) continue; /* not a process virtual PMC */ KASSERT(PMC_TO_ROWINDEX(pm) == ri, ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", __LINE__, PMC_TO_ROWINDEX(pm), ri)); /* * Change desired state, and then stop if not stalled. * This two-step dance should avoid race conditions where * an interrupt re-enables the PMC after this code has * already checked the pm_stalled flag. */ pm->pm_pcpu_state[cpu].pps_cpustate = 0; if (pm->pm_pcpu_state[cpu].pps_stalled == 0) pcd->pcd_stop_pmc(cpu, adjri); KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount))); /* reduce this PMC's runcount */ counter_u64_add(pm->pm_runcount, -1); /* * If this PMC is associated with this process, * save the reading. */ if (pm->pm_state != PMC_STATE_DELETED && pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) { KASSERT(pm == pp->pp_pmcs[ri].pp_pmc, ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc)); KASSERT(pp->pp_refcnt > 0, ("[pmc,%d] pp refcnt = %d", __LINE__, pp->pp_refcnt)); pcd->pcd_read_pmc(cpu, adjri, &newvalue); if (mode == PMC_MODE_TS) { PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d val=%jd (samp)", cpu, ri, newvalue); if (pt == NULL) pt = pmc_find_thread_descriptor(pp, td, PMC_FLAG_NONE); KASSERT(pt != NULL, ("[pmc,%d] No thread found for td=%p", __LINE__, td)); mtx_pool_lock_spin(pmc_mtxpool, pm); /* * If we have a thread descriptor, save the * per-thread counter in the descriptor. If not, * we will update the per-process counter. * * TODO: Remove the per-process "safety net" * once we have thoroughly tested that we * don't hit the above assert. */ if (pt != NULL) pt->pt_pmcs[ri].pt_pmcval = newvalue; else { /* * For sampling process-virtual PMCs, * newvalue is the number of events to * be seen until the next sampling * interrupt. We can just add the events * left from this invocation to the * counter, then adjust in case we * overflow our range. * * (Recall that we reload the counter * every time we use it.) */ pp->pp_pmcs[ri].pp_pmcval += newvalue; if (pp->pp_pmcs[ri].pp_pmcval > pm->pm_sc.pm_reloadcount) pp->pp_pmcs[ri].pp_pmcval -= pm->pm_sc.pm_reloadcount; } mtx_pool_unlock_spin(pmc_mtxpool, pm); } else { tmp = newvalue - PMC_PCPU_SAVED(cpu,ri); PMCDBG3(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd (count)", cpu, ri, tmp); /* * For counting process-virtual PMCs, * we expect the count to be * increasing monotonically, modulo a 64 * bit wraparound. */ KASSERT(tmp >= 0, ("[pmc,%d] negative increment cpu=%d " "ri=%d newvalue=%jx saved=%jx " "incr=%jx", __LINE__, cpu, ri, newvalue, PMC_PCPU_SAVED(cpu,ri), tmp)); mtx_pool_lock_spin(pmc_mtxpool, pm); pm->pm_gv.pm_savedvalue += tmp; pp->pp_pmcs[ri].pp_pmcval += tmp; mtx_pool_unlock_spin(pmc_mtxpool, pm); if (pm->pm_flags & PMC_F_LOG_PROCCSW) pmclog_process_proccsw(pm, pp, tmp, td); } } /* mark hardware as free */ pcd->pcd_config_pmc(cpu, adjri, NULL); } /* * perform any other architecture/cpu dependent thread * switch out functions. */ (void) (*md->pmd_switch_out)(pc, pp); critical_exit(); } /* * A new thread for a process. */ static void pmc_process_thread_add(struct thread *td) { struct pmc_process *pmc; pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE); if (pmc != NULL) pmc_find_thread_descriptor(pmc, td, PMC_FLAG_ALLOCATE); } /* * A thread delete for a process. */ static void pmc_process_thread_delete(struct thread *td) { struct pmc_process *pmc; pmc = pmc_find_process_descriptor(td->td_proc, PMC_FLAG_NONE); if (pmc != NULL) pmc_thread_descriptor_pool_free(pmc_find_thread_descriptor(pmc, td, PMC_FLAG_REMOVE)); } /* * A userret() call for a thread. */ static void pmc_process_thread_userret(struct thread *td) { sched_pin(); pmc_capture_user_callchain(curcpu, PMC_UR, td->td_frame); sched_unpin(); } /* * A mapping change for a process. */ static void pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm) { int ri; pid_t pid; char *fullpath, *freepath; const struct pmc *pm; struct pmc_owner *po; const struct pmc_process *pp; freepath = fullpath = NULL; MPASS(!in_epoch(global_epoch_preempt)); pmc_getfilename((struct vnode *) pkm->pm_file, &fullpath, &freepath); pid = td->td_proc->p_pid; PMC_EPOCH_ENTER(); /* Inform owners of all system-wide sampling PMCs. */ CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_map_in(po, pid, pkm->pm_address, fullpath); if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL) goto done; /* * Inform sampling PMC owners tracking this process. */ for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL && PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) pmclog_process_map_in(pm->pm_owner, pid, pkm->pm_address, fullpath); done: if (freepath) free(freepath, M_TEMP); PMC_EPOCH_EXIT(); } /* * Log an munmap request. */ static void pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm) { int ri; pid_t pid; struct pmc_owner *po; const struct pmc *pm; const struct pmc_process *pp; pid = td->td_proc->p_pid; PMC_EPOCH_ENTER(); CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_map_out(po, pid, pkm->pm_address, pkm->pm_address + pkm->pm_size); PMC_EPOCH_EXIT(); if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL) return; for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL && PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) pmclog_process_map_out(pm->pm_owner, pid, pkm->pm_address, pkm->pm_address + pkm->pm_size); } /* * Log mapping information about the kernel. */ static void pmc_log_kernel_mappings(struct pmc *pm) { struct pmc_owner *po; struct pmckern_map_in *km, *kmbase; MPASS(in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx)); KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)), ("[pmc,%d] non-sampling PMC (%p) desires mapping information", __LINE__, (void *) pm)); po = pm->pm_owner; if (po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE) return; if (PMC_TO_MODE(pm) == PMC_MODE_SS) pmc_process_allproc(pm); /* * Log the current set of kernel modules. */ kmbase = linker_hwpmc_list_objects(); for (km = kmbase; km->pm_file != NULL; km++) { PMCDBG2(LOG,REG,1,"%s %p", (char *) km->pm_file, (void *) km->pm_address); pmclog_process_map_in(po, (pid_t) -1, km->pm_address, km->pm_file); } free(kmbase, M_LINKER); po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE; } /* * Log the mappings for a single process. */ static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p) { vm_map_t map; struct vnode *vp; struct vmspace *vm; vm_map_entry_t entry; vm_offset_t last_end; u_int last_timestamp; struct vnode *last_vp; vm_offset_t start_addr; vm_object_t obj, lobj, tobj; char *fullpath, *freepath; last_vp = NULL; last_end = (vm_offset_t) 0; fullpath = freepath = NULL; if ((vm = vmspace_acquire_ref(p)) == NULL) return; map = &vm->vm_map; vm_map_lock_read(map); for (entry = map->header.next; entry != &map->header; entry = entry->next) { if (entry == NULL) { PMCDBG2(LOG,OPS,2, "hwpmc: vm_map entry unexpectedly " "NULL! pid=%d vm_map=%p\n", p->p_pid, map); break; } /* * We only care about executable map entries. */ if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) || !(entry->protection & VM_PROT_EXECUTE) || (entry->object.vm_object == NULL)) { continue; } obj = entry->object.vm_object; VM_OBJECT_RLOCK(obj); /* * Walk the backing_object list to find the base * (non-shadowed) vm_object. */ for (lobj = tobj = obj; tobj != NULL; tobj = tobj->backing_object) { if (tobj != obj) VM_OBJECT_RLOCK(tobj); if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); lobj = tobj; } /* * At this point lobj is the base vm_object and it is locked. */ if (lobj == NULL) { PMCDBG3(LOG,OPS,2, "hwpmc: lobj unexpectedly NULL! pid=%d " "vm_map=%p vm_obj=%p\n", p->p_pid, map, obj); VM_OBJECT_RUNLOCK(obj); continue; } vp = vm_object_vnode(lobj); if (vp == NULL) { if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); VM_OBJECT_RUNLOCK(obj); continue; } /* * Skip contiguous regions that point to the same * vnode, so we don't emit redundant MAP-IN * directives. */ if (entry->start == last_end && vp == last_vp) { last_end = entry->end; if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); VM_OBJECT_RUNLOCK(obj); continue; } /* * We don't want to keep the proc's vm_map or this * vm_object locked while we walk the pathname, since * vn_fullpath() can sleep. However, if we drop the * lock, it's possible for concurrent activity to * modify the vm_map list. To protect against this, * we save the vm_map timestamp before we release the * lock, and check it after we reacquire the lock * below. */ start_addr = entry->start; last_end = entry->end; last_timestamp = map->timestamp; vm_map_unlock_read(map); vref(vp); if (lobj != obj) VM_OBJECT_RUNLOCK(lobj); VM_OBJECT_RUNLOCK(obj); freepath = NULL; pmc_getfilename(vp, &fullpath, &freepath); last_vp = vp; vrele(vp); vp = NULL; pmclog_process_map_in(po, p->p_pid, start_addr, fullpath); if (freepath) free(freepath, M_TEMP); vm_map_lock_read(map); /* * If our saved timestamp doesn't match, this means * that the vm_map was modified out from under us and * we can't trust our current "entry" pointer. Do a * new lookup for this entry. If there is no entry * for this address range, vm_map_lookup_entry() will * return the previous one, so we always want to go to * entry->next on the next loop iteration. * * There is an edge condition here that can occur if * there is no entry at or before this address. In * this situation, vm_map_lookup_entry returns * &map->header, which would cause our loop to abort * without processing the rest of the map. However, * in practice this will never happen for process * vm_map. This is because the executable's text * segment is the first mapping in the proc's address * space, and this mapping is never removed until the * process exits, so there will always be a non-header * entry at or before the requested address for * vm_map_lookup_entry to return. */ if (map->timestamp != last_timestamp) vm_map_lookup_entry(map, last_end - 1, &entry); } vm_map_unlock_read(map); vmspace_free(vm); return; } /* * Log mappings for all processes in the system. */ static void pmc_log_all_process_mappings(struct pmc_owner *po) { struct proc *p, *top; sx_assert(&pmc_sx, SX_XLOCKED); if ((p = pfind(1)) == NULL) panic("[pmc,%d] Cannot find init", __LINE__); PROC_UNLOCK(p); sx_slock(&proctree_lock); top = p; for (;;) { pmc_log_process_mappings(po, p); if (!LIST_EMPTY(&p->p_children)) p = LIST_FIRST(&p->p_children); else for (;;) { if (p == top) goto done; if (LIST_NEXT(p, p_sibling)) { p = LIST_NEXT(p, p_sibling); break; } p = p->p_pptr; } } done: sx_sunlock(&proctree_lock); } /* * The 'hook' invoked from the kernel proper */ #ifdef HWPMC_DEBUG const char *pmc_hooknames[] = { /* these strings correspond to PMC_FN_* in */ "", "EXEC", "CSW-IN", "CSW-OUT", "SAMPLE", "UNUSED1", "UNUSED2", "MMAP", "MUNMAP", "CALLCHAIN-NMI", "CALLCHAIN-SOFT", "SOFTSAMPLING", "THR-CREATE", "THR-EXIT", "THR-USERRET", "THR-CREATE-LOG", "THR-EXIT-LOG", "PROC-CREATE-LOG" }; #endif static int pmc_hook_handler(struct thread *td, int function, void *arg) { int cpu; PMCDBG4(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function, pmc_hooknames[function], arg); switch (function) { /* * Process exec() */ case PMC_FN_PROCESS_EXEC: { char *fullpath, *freepath; unsigned int ri; int is_using_hwpmcs; struct pmc *pm; struct proc *p; struct pmc_owner *po; struct pmc_process *pp; struct pmckern_procexec *pk; sx_assert(&pmc_sx, SX_XLOCKED); p = td->td_proc; pmc_getfilename(p->p_textvp, &fullpath, &freepath); pk = (struct pmckern_procexec *) arg; PMC_EPOCH_ENTER(); /* Inform owners of SS mode PMCs of the exec event. */ CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_procexec(po, PMC_ID_INVALID, p->p_pid, pk->pm_entryaddr, fullpath); PMC_EPOCH_EXIT(); PROC_LOCK(p); is_using_hwpmcs = p->p_flag & P_HWPMC; PROC_UNLOCK(p); if (!is_using_hwpmcs) { if (freepath) free(freepath, M_TEMP); break; } /* * PMCs are not inherited across an exec(): remove any * PMCs that this process is the owner of. */ if ((po = pmc_find_owner_descriptor(p)) != NULL) { pmc_remove_owner(po); pmc_destroy_owner_descriptor(po); } /* * If the process being exec'ed is not the target of any * PMC, we are done. */ if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) { if (freepath) free(freepath, M_TEMP); break; } /* * Log the exec event to all monitoring owners. Skip * owners who have already received the event because * they had system sampling PMCs active. */ for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) { po = pm->pm_owner; if (po->po_sscount == 0 && po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_procexec(po, pm->pm_id, p->p_pid, pk->pm_entryaddr, fullpath); } if (freepath) free(freepath, M_TEMP); PMCDBG4(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d", p, p->p_pid, p->p_comm, pk->pm_credentialschanged); if (pk->pm_credentialschanged == 0) /* no change */ break; /* * If the newly exec()'ed process has a different credential * than before, allow it to be the target of a PMC only if * the PMC's owner has sufficient privilege. */ for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) if (pmc_can_attach(pm, td->td_proc) != 0) pmc_detach_one_process(td->td_proc, pm, PMC_FLAG_NONE); KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc, ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__, pp->pp_refcnt, pp)); /* * If this process is no longer the target of any * PMCs, we can remove the process entry and free * up space. */ if (pp->pp_refcnt == 0) { pmc_remove_process_descriptor(pp); pmc_destroy_process_descriptor(pp); break; } } break; case PMC_FN_CSW_IN: pmc_process_csw_in(td); break; case PMC_FN_CSW_OUT: pmc_process_csw_out(td); break; /* * Process accumulated PC samples. * * This function is expected to be called by hardclock() for * each CPU that has accumulated PC samples. * * This function is to be executed on the CPU whose samples * are being processed. */ case PMC_FN_DO_SAMPLES: /* * Clear the cpu specific bit in the CPU mask before * do the rest of the processing. If the NMI handler * gets invoked after the "atomic_clear_int()" call * below but before "pmc_process_samples()" gets * around to processing the interrupt, then we will * come back here at the next hardclock() tick (and * may find nothing to do if "pmc_process_samples()" * had already processed the interrupt). We don't * lose the interrupt sample. */ DPCPU_SET(pmc_sampled, 0); cpu = PCPU_GET(cpuid); pmc_process_samples(cpu, PMC_HR); pmc_process_samples(cpu, PMC_SR); pmc_process_samples(cpu, PMC_UR); break; case PMC_FN_MMAP: pmc_process_mmap(td, (struct pmckern_map_in *) arg); break; case PMC_FN_MUNMAP: MPASS(in_epoch(global_epoch_preempt) || sx_xlocked(&pmc_sx)); pmc_process_munmap(td, (struct pmckern_map_out *) arg); break; case PMC_FN_PROC_CREATE_LOG: pmc_process_proccreate((struct proc *)arg); break; case PMC_FN_USER_CALLCHAIN: /* * Record a call chain. */ KASSERT(td == curthread, ("[pmc,%d] td != curthread", __LINE__)); pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_HR, (struct trapframe *) arg); KASSERT(td->td_pinned == 1, ("[pmc,%d] invalid td_pinned value", __LINE__)); sched_unpin(); /* Can migrate safely now. */ td->td_pflags &= ~TDP_CALLCHAIN; break; case PMC_FN_USER_CALLCHAIN_SOFT: /* * Record a call chain. */ KASSERT(td == curthread, ("[pmc,%d] td != curthread", __LINE__)); cpu = PCPU_GET(cpuid); pmc_capture_user_callchain(cpu, PMC_SR, (struct trapframe *) arg); KASSERT(td->td_pinned == 1, ("[pmc,%d] invalid td_pinned value", __LINE__)); sched_unpin(); /* Can migrate safely now. */ td->td_pflags &= ~TDP_CALLCHAIN; break; case PMC_FN_SOFT_SAMPLING: /* * Call soft PMC sampling intr. */ pmc_soft_intr((struct pmckern_soft *) arg); break; case PMC_FN_THR_CREATE: pmc_process_thread_add(td); pmc_process_threadcreate(td); break; case PMC_FN_THR_CREATE_LOG: pmc_process_threadcreate(td); break; case PMC_FN_THR_EXIT: KASSERT(td == curthread, ("[pmc,%d] td != curthread", __LINE__)); pmc_process_thread_delete(td); pmc_process_threadexit(td); break; case PMC_FN_THR_EXIT_LOG: pmc_process_threadexit(td); break; case PMC_FN_THR_USERRET: KASSERT(td == curthread, ("[pmc,%d] td != curthread", __LINE__)); pmc_process_thread_userret(td); break; default: #ifdef HWPMC_DEBUG KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function)); #endif break; } return 0; } /* * allocate a 'struct pmc_owner' descriptor in the owner hash table. */ static struct pmc_owner * pmc_allocate_owner_descriptor(struct proc *p) { uint32_t hindex; struct pmc_owner *po; struct pmc_ownerhash *poh; hindex = PMC_HASH_PTR(p, pmc_ownerhashmask); poh = &pmc_ownerhash[hindex]; /* allocate space for N pointers and one descriptor struct */ po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK|M_ZERO); po->po_owner = p; LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */ TAILQ_INIT(&po->po_logbuffers); mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN); PMCDBG4(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p", p, p->p_pid, p->p_comm, po); return po; } static void pmc_destroy_owner_descriptor(struct pmc_owner *po) { PMCDBG4(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)", po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm); mtx_destroy(&po->po_mtx); free(po, M_PMC); } /* * Allocate a thread descriptor from the free pool. * * NOTE: This *can* return NULL. */ static struct pmc_thread * pmc_thread_descriptor_pool_alloc(void) { struct pmc_thread *pt; mtx_lock_spin(&pmc_threadfreelist_mtx); if ((pt = LIST_FIRST(&pmc_threadfreelist)) != NULL) { LIST_REMOVE(pt, pt_next); pmc_threadfreelist_entries--; } mtx_unlock_spin(&pmc_threadfreelist_mtx); return (pt); } /* * Add a thread descriptor to the free pool. We use this instead of free() * to maintain a cache of free entries. Additionally, we can safely call * this function when we cannot call free(), such as in a critical section. * */ static void pmc_thread_descriptor_pool_free(struct pmc_thread *pt) { if (pt == NULL) return; memset(pt, 0, THREADENTRY_SIZE); mtx_lock_spin(&pmc_threadfreelist_mtx); LIST_INSERT_HEAD(&pmc_threadfreelist, pt, pt_next); pmc_threadfreelist_entries++; if (pmc_threadfreelist_entries > pmc_threadfreelist_max) GROUPTASK_ENQUEUE(&free_gtask); mtx_unlock_spin(&pmc_threadfreelist_mtx); } /* * A callout to manage the free list. */ static void pmc_thread_descriptor_pool_free_task(void *arg __unused) { struct pmc_thread *pt; LIST_HEAD(, pmc_thread) tmplist; int delta; LIST_INIT(&tmplist); /* Determine what changes, if any, we need to make. */ mtx_lock_spin(&pmc_threadfreelist_mtx); delta = pmc_threadfreelist_entries - pmc_threadfreelist_max; while (delta > 0 && (pt = LIST_FIRST(&pmc_threadfreelist)) != NULL) { delta--; LIST_REMOVE(pt, pt_next); LIST_INSERT_HEAD(&tmplist, pt, pt_next); } mtx_unlock_spin(&pmc_threadfreelist_mtx); /* If there are entries to free, free them. */ while (!LIST_EMPTY(&tmplist)) { pt = LIST_FIRST(&tmplist); LIST_REMOVE(pt, pt_next); free(pt, M_PMC); } } /* * Drain the thread free pool, freeing all allocations. */ static void pmc_thread_descriptor_pool_drain() { struct pmc_thread *pt, *next; LIST_FOREACH_SAFE(pt, &pmc_threadfreelist, pt_next, next) { LIST_REMOVE(pt, pt_next); free(pt, M_PMC); } } /* * find the descriptor corresponding to thread 'td', adding or removing it * as specified by 'mode'. * * Note that this supports additional mode flags in addition to those * supported by pmc_find_process_descriptor(): * PMC_FLAG_NOWAIT: Causes the function to not wait for mallocs. * This makes it safe to call while holding certain other locks. */ static struct pmc_thread * pmc_find_thread_descriptor(struct pmc_process *pp, struct thread *td, uint32_t mode) { struct pmc_thread *pt = NULL, *ptnew = NULL; int wait_flag; KASSERT(td != NULL, ("[pmc,%d] called to add NULL td", __LINE__)); /* * Pre-allocate memory in the PMC_FLAG_ALLOCATE case prior to * acquiring the lock. */ if (mode & PMC_FLAG_ALLOCATE) { if ((ptnew = pmc_thread_descriptor_pool_alloc()) == NULL) { wait_flag = M_WAITOK; if ((mode & PMC_FLAG_NOWAIT) || in_epoch(global_epoch_preempt)) wait_flag = M_NOWAIT; ptnew = malloc(THREADENTRY_SIZE, M_PMC, wait_flag|M_ZERO); } } mtx_lock_spin(pp->pp_tdslock); LIST_FOREACH(pt, &pp->pp_tds, pt_next) if (pt->pt_td == td) break; if ((mode & PMC_FLAG_REMOVE) && pt != NULL) LIST_REMOVE(pt, pt_next); if ((mode & PMC_FLAG_ALLOCATE) && pt == NULL && ptnew != NULL) { pt = ptnew; ptnew = NULL; pt->pt_td = td; LIST_INSERT_HEAD(&pp->pp_tds, pt, pt_next); } mtx_unlock_spin(pp->pp_tdslock); if (ptnew != NULL) { free(ptnew, M_PMC); } return pt; } /* * Try to add thread descriptors for each thread in a process. */ static void pmc_add_thread_descriptors_from_proc(struct proc *p, struct pmc_process *pp) { struct thread *curtd; struct pmc_thread **tdlist; int i, tdcnt, tdlistsz; KASSERT(!PROC_LOCKED(p), ("[pmc,%d] proc unexpectedly locked", __LINE__)); tdcnt = 32; restart: tdlistsz = roundup2(tdcnt, 32); tdcnt = 0; tdlist = malloc(sizeof(struct pmc_thread*) * tdlistsz, M_TEMP, M_WAITOK); PROC_LOCK(p); FOREACH_THREAD_IN_PROC(p, curtd) tdcnt++; if (tdcnt >= tdlistsz) { PROC_UNLOCK(p); free(tdlist, M_TEMP); goto restart; } /* * Try to add each thread to the list without sleeping. If unable, * add to a queue to retry after dropping the process lock. */ tdcnt = 0; FOREACH_THREAD_IN_PROC(p, curtd) { tdlist[tdcnt] = pmc_find_thread_descriptor(pp, curtd, PMC_FLAG_ALLOCATE|PMC_FLAG_NOWAIT); if (tdlist[tdcnt] == NULL) { PROC_UNLOCK(p); for (i = 0; i <= tdcnt; i++) pmc_thread_descriptor_pool_free(tdlist[i]); free(tdlist, M_TEMP); goto restart; } tdcnt++; } PROC_UNLOCK(p); free(tdlist, M_TEMP); } /* * find the descriptor corresponding to process 'p', adding or removing it * as specified by 'mode'. */ static struct pmc_process * pmc_find_process_descriptor(struct proc *p, uint32_t mode) { uint32_t hindex; struct pmc_process *pp, *ppnew; struct pmc_processhash *pph; hindex = PMC_HASH_PTR(p, pmc_processhashmask); pph = &pmc_processhash[hindex]; ppnew = NULL; /* * Pre-allocate memory in the PMC_FLAG_ALLOCATE case since we * cannot call malloc(9) once we hold a spin lock. */ if (mode & PMC_FLAG_ALLOCATE) ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc * sizeof(struct pmc_targetstate), M_PMC, M_WAITOK|M_ZERO); mtx_lock_spin(&pmc_processhash_mtx); LIST_FOREACH(pp, pph, pp_next) if (pp->pp_proc == p) break; if ((mode & PMC_FLAG_REMOVE) && pp != NULL) LIST_REMOVE(pp, pp_next); if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL && ppnew != NULL) { ppnew->pp_proc = p; LIST_INIT(&ppnew->pp_tds); ppnew->pp_tdslock = mtx_pool_find(pmc_mtxpool, ppnew); LIST_INSERT_HEAD(pph, ppnew, pp_next); mtx_unlock_spin(&pmc_processhash_mtx); pp = ppnew; ppnew = NULL; /* Add thread descriptors for this process' current threads. */ pmc_add_thread_descriptors_from_proc(p, pp); } else mtx_unlock_spin(&pmc_processhash_mtx); if (ppnew != NULL) free(ppnew, M_PMC); return pp; } /* * remove a process descriptor from the process hash table. */ static void pmc_remove_process_descriptor(struct pmc_process *pp) { KASSERT(pp->pp_refcnt == 0, ("[pmc,%d] Removing process descriptor %p with count %d", __LINE__, pp, pp->pp_refcnt)); mtx_lock_spin(&pmc_processhash_mtx); LIST_REMOVE(pp, pp_next); mtx_unlock_spin(&pmc_processhash_mtx); } /* * destroy a process descriptor. */ static void pmc_destroy_process_descriptor(struct pmc_process *pp) { struct pmc_thread *pmc_td; while ((pmc_td = LIST_FIRST(&pp->pp_tds)) != NULL) { LIST_REMOVE(pmc_td, pt_next); pmc_thread_descriptor_pool_free(pmc_td); } free(pp, M_PMC); } /* * find an owner descriptor corresponding to proc 'p' */ static struct pmc_owner * pmc_find_owner_descriptor(struct proc *p) { uint32_t hindex; struct pmc_owner *po; struct pmc_ownerhash *poh; hindex = PMC_HASH_PTR(p, pmc_ownerhashmask); poh = &pmc_ownerhash[hindex]; po = NULL; LIST_FOREACH(po, poh, po_next) if (po->po_owner == p) break; PMCDBG5(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> " "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po); return po; } /* * pmc_allocate_pmc_descriptor * * Allocate a pmc descriptor and initialize its * fields. */ static struct pmc * pmc_allocate_pmc_descriptor(void) { struct pmc *pmc; pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK|M_ZERO); pmc->pm_runcount = counter_u64_alloc(M_WAITOK); pmc->pm_pcpu_state = malloc(sizeof(struct pmc_pcpu_state)*mp_ncpus, M_PMC, M_WAITOK|M_ZERO); PMCDBG1(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc); return pmc; } /* * Destroy a pmc descriptor. */ static void pmc_destroy_pmc_descriptor(struct pmc *pm) { KASSERT(pm->pm_state == PMC_STATE_DELETED || pm->pm_state == PMC_STATE_FREE, ("[pmc,%d] destroying non-deleted PMC", __LINE__)); KASSERT(LIST_EMPTY(&pm->pm_targets), ("[pmc,%d] destroying pmc with targets", __LINE__)); KASSERT(pm->pm_owner == NULL, ("[pmc,%d] destroying pmc attached to an owner", __LINE__)); KASSERT(counter_u64_fetch(pm->pm_runcount) == 0, ("[pmc,%d] pmc has non-zero run count %ld", __LINE__, (unsigned long)counter_u64_fetch(pm->pm_runcount))); counter_u64_free(pm->pm_runcount); free(pm->pm_pcpu_state, M_PMC); free(pm, M_PMC); } static void pmc_wait_for_pmc_idle(struct pmc *pm) { #ifdef INVARIANTS volatile int maxloop; maxloop = 100 * pmc_cpu_max(); #endif /* * Loop (with a forced context switch) till the PMC's runcount * comes down to zero. */ pmclog_flush(pm->pm_owner, 1); while (counter_u64_fetch(pm->pm_runcount) > 0) { pmclog_flush(pm->pm_owner, 1); #ifdef INVARIANTS maxloop--; KASSERT(maxloop > 0, ("[pmc,%d] (ri%d, rc%ld) waiting too long for " "pmc to be free", __LINE__, PMC_TO_ROWINDEX(pm), (unsigned long)counter_u64_fetch(pm->pm_runcount))); #endif pmc_force_context_switch(); } } /* * This function does the following things: * * - detaches the PMC from hardware * - unlinks all target threads that were attached to it * - removes the PMC from its owner's list * - destroys the PMC private mutex * * Once this function completes, the given pmc pointer can be freed by * calling pmc_destroy_pmc_descriptor(). */ static void pmc_release_pmc_descriptor(struct pmc *pm) { enum pmc_mode mode; struct pmc_hw *phw; u_int adjri, ri, cpu; struct pmc_owner *po; struct pmc_binding pb; struct pmc_process *pp; struct pmc_classdep *pcd; struct pmc_target *ptgt, *tmp; sx_assert(&pmc_sx, SX_XLOCKED); KASSERT(pm, ("[pmc,%d] null pmc", __LINE__)); ri = PMC_TO_ROWINDEX(pm); pcd = pmc_ri_to_classdep(md, ri, &adjri); mode = PMC_TO_MODE(pm); PMCDBG3(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri, mode); /* * First, we take the PMC off hardware. */ cpu = 0; if (PMC_IS_SYSTEM_MODE(mode)) { /* * A system mode PMC runs on a specific CPU. Switch * to this CPU and turn hardware off. */ pmc_save_cpu_binding(&pb); cpu = PMC_TO_CPU(pm); pmc_select_cpu(cpu); /* switch off non-stalled CPUs */ pm->pm_pcpu_state[cpu].pps_cpustate = 0; if (pm->pm_state == PMC_STATE_RUNNING && pm->pm_pcpu_state[cpu].pps_stalled == 0) { phw = pmc_pcpu[cpu]->pc_hwpmcs[ri]; KASSERT(phw->phw_pmc == pm, ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)", __LINE__, ri, phw->phw_pmc, pm)); PMCDBG2(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri); critical_enter(); pcd->pcd_stop_pmc(cpu, adjri); critical_exit(); } PMCDBG2(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri); critical_enter(); pcd->pcd_config_pmc(cpu, adjri, NULL); critical_exit(); /* adjust the global and process count of SS mode PMCs */ if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) { po = pm->pm_owner; po->po_sscount--; if (po->po_sscount == 0) { atomic_subtract_rel_int(&pmc_ss_count, 1); CK_LIST_REMOVE(po, po_ssnext); epoch_wait_preempt(global_epoch_preempt); } } pm->pm_state = PMC_STATE_DELETED; pmc_restore_cpu_binding(&pb); /* * We could have references to this PMC structure in * the per-cpu sample queues. Wait for the queue to * drain. */ pmc_wait_for_pmc_idle(pm); } else if (PMC_IS_VIRTUAL_MODE(mode)) { /* * A virtual PMC could be running on multiple CPUs at * a given instant. * * By marking its state as DELETED, we ensure that * this PMC is never further scheduled on hardware. * * Then we wait till all CPUs are done with this PMC. */ pm->pm_state = PMC_STATE_DELETED; /* Wait for the PMCs runcount to come to zero. */ pmc_wait_for_pmc_idle(pm); /* * At this point the PMC is off all CPUs and cannot be * freshly scheduled onto a CPU. It is now safe to * unlink all targets from this PMC. If a * process-record's refcount falls to zero, we remove * it from the hash table. The module-wide SX lock * protects us from races. */ LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) { pp = ptgt->pt_process; pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */ PMCDBG1(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt); /* * If the target process record shows that no * PMCs are attached to it, reclaim its space. */ if (pp->pp_refcnt == 0) { pmc_remove_process_descriptor(pp); pmc_destroy_process_descriptor(pp); } } cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */ } /* * Release any MD resources */ (void) pcd->pcd_release_pmc(cpu, adjri, pm); /* * Update row disposition */ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) PMC_UNMARK_ROW_STANDALONE(ri); else PMC_UNMARK_ROW_THREAD(ri); /* unlink from the owner's list */ if (pm->pm_owner) { LIST_REMOVE(pm, pm_next); pm->pm_owner = NULL; } } /* * Register an owner and a pmc. */ static int pmc_register_owner(struct proc *p, struct pmc *pmc) { struct pmc_owner *po; sx_assert(&pmc_sx, SX_XLOCKED); if ((po = pmc_find_owner_descriptor(p)) == NULL) if ((po = pmc_allocate_owner_descriptor(p)) == NULL) return ENOMEM; KASSERT(pmc->pm_owner == NULL, ("[pmc,%d] attempting to own an initialized PMC", __LINE__)); pmc->pm_owner = po; LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next); PROC_LOCK(p); p->p_flag |= P_HWPMC; PROC_UNLOCK(p); if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_pmcallocate(pmc); PMCDBG2(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p", po, pmc); return 0; } /* * Return the current row disposition: * == 0 => FREE * > 0 => PROCESS MODE * < 0 => SYSTEM MODE */ int pmc_getrowdisp(int ri) { return pmc_pmcdisp[ri]; } /* * Check if a PMC at row index 'ri' can be allocated to the current * process. * * Allocation can fail if: * - the current process is already being profiled by a PMC at index 'ri', * attached to it via OP_PMCATTACH. * - the current process has already allocated a PMC at index 'ri' * via OP_ALLOCATE. */ static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu) { enum pmc_mode mode; struct pmc *pm; struct pmc_owner *po; struct pmc_process *pp; PMCDBG5(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d " "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu); /* * We shouldn't have already allocated a process-mode PMC at * row index 'ri'. * * We shouldn't have allocated a system-wide PMC on the same * CPU and same RI. */ if ((po = pmc_find_owner_descriptor(p)) != NULL) LIST_FOREACH(pm, &po->po_pmcs, pm_next) { if (PMC_TO_ROWINDEX(pm) == ri) { mode = PMC_TO_MODE(pm); if (PMC_IS_VIRTUAL_MODE(mode)) return EEXIST; if (PMC_IS_SYSTEM_MODE(mode) && (int) PMC_TO_CPU(pm) == cpu) return EEXIST; } } /* * We also shouldn't be the target of any PMC at this index * since otherwise a PMC_ATTACH to ourselves will fail. */ if ((pp = pmc_find_process_descriptor(p, 0)) != NULL) if (pp->pp_pmcs[ri].pp_pmc) return EEXIST; PMCDBG4(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok", p, p->p_pid, p->p_comm, ri); return 0; } /* * Check if a given PMC at row index 'ri' can be currently used in * mode 'mode'. */ static int pmc_can_allocate_row(int ri, enum pmc_mode mode) { enum pmc_disp disp; sx_assert(&pmc_sx, SX_XLOCKED); PMCDBG2(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode); if (PMC_IS_SYSTEM_MODE(mode)) disp = PMC_DISP_STANDALONE; else disp = PMC_DISP_THREAD; /* * check disposition for PMC row 'ri': * * Expected disposition Row-disposition Result * * STANDALONE STANDALONE or FREE proceed * STANDALONE THREAD fail * THREAD THREAD or FREE proceed * THREAD STANDALONE fail */ if (!PMC_ROW_DISP_IS_FREE(ri) && !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) && !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri))) return EBUSY; /* * All OK */ PMCDBG2(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode); return 0; } /* * Find a PMC descriptor with user handle 'pmcid' for thread 'td'. */ static struct pmc * pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid) { struct pmc *pm; KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc, ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__, PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc)); LIST_FOREACH(pm, &po->po_pmcs, pm_next) if (pm->pm_id == pmcid) return pm; return NULL; } static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc) { struct pmc *pm, *opm; struct pmc_owner *po; struct pmc_process *pp; PMCDBG1(PMC,FND,1, "find-pmc id=%d", pmcid); if (PMC_ID_TO_ROWINDEX(pmcid) >= md->pmd_npmc) return (EINVAL); if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL) { /* * In case of PMC_F_DESCENDANTS child processes we will not find * the current process in the owners hash list. Find the owner * process first and from there lookup the po. */ if ((pp = pmc_find_process_descriptor(curthread->td_proc, PMC_FLAG_NONE)) == NULL) { return ESRCH; } else { opm = pp->pp_pmcs[PMC_ID_TO_ROWINDEX(pmcid)].pp_pmc; if (opm == NULL) return ESRCH; if ((opm->pm_flags & (PMC_F_ATTACHED_TO_OWNER| PMC_F_DESCENDANTS)) != (PMC_F_ATTACHED_TO_OWNER| PMC_F_DESCENDANTS)) return ESRCH; po = opm->pm_owner; } } if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL) return EINVAL; PMCDBG2(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm); *pmc = pm; return 0; } /* * Start a PMC. */ static int pmc_start(struct pmc *pm) { enum pmc_mode mode; struct pmc_owner *po; struct pmc_binding pb; struct pmc_classdep *pcd; int adjri, error, cpu, ri; KASSERT(pm != NULL, ("[pmc,%d] null pm", __LINE__)); mode = PMC_TO_MODE(pm); ri = PMC_TO_ROWINDEX(pm); pcd = pmc_ri_to_classdep(md, ri, &adjri); error = 0; PMCDBG3(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri); po = pm->pm_owner; /* * Disallow PMCSTART if a logfile is required but has not been * configured yet. */ if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) && (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) return (EDOOFUS); /* programming error */ /* * If this is a sampling mode PMC, log mapping information for * the kernel modules that are currently loaded. */ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) pmc_log_kernel_mappings(pm); if (PMC_IS_VIRTUAL_MODE(mode)) { /* * If a PMCATTACH has never been done on this PMC, * attach it to its owner process. */ if (LIST_EMPTY(&pm->pm_targets)) error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH : pmc_attach_process(po->po_owner, pm); /* * If the PMC is attached to its owner, then force a context * switch to ensure that the MD state gets set correctly. */ if (error == 0) { pm->pm_state = PMC_STATE_RUNNING; if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) pmc_force_context_switch(); } return (error); } /* * A system-wide PMC. * * Add the owner to the global list if this is a system-wide * sampling PMC. */ if (mode == PMC_MODE_SS) { /* * Log mapping information for all existing processes in the * system. Subsequent mappings are logged as they happen; * see pmc_process_mmap(). */ if (po->po_logprocmaps == 0) { pmc_log_all_process_mappings(po); po->po_logprocmaps = 1; } po->po_sscount++; if (po->po_sscount == 1) { atomic_add_rel_int(&pmc_ss_count, 1); CK_LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext); PMCDBG1(PMC,OPS,1, "po=%p in global list", po); } } /* * Move to the CPU associated with this * PMC, and start the hardware. */ pmc_save_cpu_binding(&pb); cpu = PMC_TO_CPU(pm); if (!pmc_cpu_is_active(cpu)) return (ENXIO); pmc_select_cpu(cpu); /* * global PMCs are configured at allocation time * so write out the initial value and start the PMC. */ pm->pm_state = PMC_STATE_RUNNING; critical_enter(); if ((error = pcd->pcd_write_pmc(cpu, adjri, PMC_IS_SAMPLING_MODE(mode) ? pm->pm_sc.pm_reloadcount : pm->pm_sc.pm_initial)) == 0) { /* If a sampling mode PMC, reset stalled state. */ if (PMC_IS_SAMPLING_MODE(mode)) pm->pm_pcpu_state[cpu].pps_stalled = 0; /* Indicate that we desire this to run. Start it. */ pm->pm_pcpu_state[cpu].pps_cpustate = 1; error = pcd->pcd_start_pmc(cpu, adjri); } critical_exit(); pmc_restore_cpu_binding(&pb); return (error); } /* * Stop a PMC. */ static int pmc_stop(struct pmc *pm) { struct pmc_owner *po; struct pmc_binding pb; struct pmc_classdep *pcd; int adjri, cpu, error, ri; KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__)); PMCDBG3(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm, PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm)); pm->pm_state = PMC_STATE_STOPPED; /* * If the PMC is a virtual mode one, changing the state to * non-RUNNING is enough to ensure that the PMC never gets * scheduled. * * If this PMC is current running on a CPU, then it will * handled correctly at the time its target process is context * switched out. */ if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) return 0; /* * A system-mode PMC. Move to the CPU associated with * this PMC, and stop the hardware. We update the * 'initial count' so that a subsequent PMCSTART will * resume counting from the current hardware count. */ pmc_save_cpu_binding(&pb); cpu = PMC_TO_CPU(pm); KASSERT(cpu >= 0 && cpu < pmc_cpu_max(), ("[pmc,%d] illegal cpu=%d", __LINE__, cpu)); if (!pmc_cpu_is_active(cpu)) return ENXIO; pmc_select_cpu(cpu); ri = PMC_TO_ROWINDEX(pm); pcd = pmc_ri_to_classdep(md, ri, &adjri); pm->pm_pcpu_state[cpu].pps_cpustate = 0; critical_enter(); if ((error = pcd->pcd_stop_pmc(cpu, adjri)) == 0) error = pcd->pcd_read_pmc(cpu, adjri, &pm->pm_sc.pm_initial); critical_exit(); pmc_restore_cpu_binding(&pb); po = pm->pm_owner; /* remove this owner from the global list of SS PMC owners */ if (PMC_TO_MODE(pm) == PMC_MODE_SS) { po->po_sscount--; if (po->po_sscount == 0) { atomic_subtract_rel_int(&pmc_ss_count, 1); CK_LIST_REMOVE(po, po_ssnext); epoch_wait_preempt(global_epoch_preempt); PMCDBG1(PMC,OPS,2,"po=%p removed from global list", po); } } return (error); } static struct pmc_classdep * pmc_class_to_classdep(enum pmc_class class) { int n; for (n = 0; n < md->pmd_nclass; n++) if (md->pmd_classdep[n].pcd_class == class) return (&md->pmd_classdep[n]); return (NULL); } #if defined(HWPMC_DEBUG) && defined(KTR) static const char *pmc_op_to_name[] = { #undef __PMC_OP #define __PMC_OP(N, D) #N , __PMC_OPS() NULL }; #endif /* * The syscall interface */ #define PMC_GET_SX_XLOCK(...) do { \ sx_xlock(&pmc_sx); \ if (pmc_hook == NULL) { \ sx_xunlock(&pmc_sx); \ return __VA_ARGS__; \ } \ } while (0) #define PMC_DOWNGRADE_SX() do { \ sx_downgrade(&pmc_sx); \ is_sx_downgraded = 1; \ } while (0) static int pmc_syscall_handler(struct thread *td, void *syscall_args) { int error, is_sx_downgraded, op; struct pmc_syscall_args *c; void *pmclog_proc_handle; void *arg; c = (struct pmc_syscall_args *)syscall_args; op = c->pmop_code; arg = c->pmop_data; /* PMC isn't set up yet */ if (pmc_hook == NULL) return (EINVAL); if (op == PMC_OP_CONFIGURELOG) { /* * We cannot create the logging process inside * pmclog_configure_log() because there is a LOR * between pmc_sx and process structure locks. * Instead, pre-create the process and ignite the loop * if everything is fine, otherwise direct the process * to exit. */ error = pmclog_proc_create(td, &pmclog_proc_handle); if (error != 0) goto done_syscall; } PMC_GET_SX_XLOCK(ENOSYS); is_sx_downgraded = 0; PMCDBG3(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op, pmc_op_to_name[op], arg); error = 0; counter_u64_add(pmc_stats.pm_syscalls, 1); switch (op) { /* * Configure a log file. * * XXX This OP will be reworked. */ case PMC_OP_CONFIGURELOG: { struct proc *p; struct pmc *pm; struct pmc_owner *po; struct pmc_op_configurelog cl; if ((error = copyin(arg, &cl, sizeof(cl))) != 0) { pmclog_proc_ignite(pmclog_proc_handle, NULL); break; } /* mark this process as owning a log file */ p = td->td_proc; if ((po = pmc_find_owner_descriptor(p)) == NULL) if ((po = pmc_allocate_owner_descriptor(p)) == NULL) { pmclog_proc_ignite(pmclog_proc_handle, NULL); error = ENOMEM; break; } /* * If a valid fd was passed in, try to configure that, * otherwise if 'fd' was less than zero and there was * a log file configured, flush its buffers and * de-configure it. */ if (cl.pm_logfd >= 0) { error = pmclog_configure_log(md, po, cl.pm_logfd); pmclog_proc_ignite(pmclog_proc_handle, error == 0 ? po : NULL); } else if (po->po_flags & PMC_PO_OWNS_LOGFILE) { pmclog_proc_ignite(pmclog_proc_handle, NULL); error = pmclog_close(po); if (error == 0) { LIST_FOREACH(pm, &po->po_pmcs, pm_next) if (pm->pm_flags & PMC_F_NEEDS_LOGFILE && pm->pm_state == PMC_STATE_RUNNING) pmc_stop(pm); error = pmclog_deconfigure_log(po); } } else { pmclog_proc_ignite(pmclog_proc_handle, NULL); error = EINVAL; } } break; /* * Flush a log file. */ case PMC_OP_FLUSHLOG: { struct pmc_owner *po; sx_assert(&pmc_sx, SX_XLOCKED); if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { error = EINVAL; break; } error = pmclog_flush(po, 0); } break; /* * Close a log file. */ case PMC_OP_CLOSELOG: { struct pmc_owner *po; sx_assert(&pmc_sx, SX_XLOCKED); if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { error = EINVAL; break; } error = pmclog_close(po); } break; /* * Retrieve hardware configuration. */ case PMC_OP_GETCPUINFO: /* CPU information */ { struct pmc_op_getcpuinfo gci; struct pmc_classinfo *pci; struct pmc_classdep *pcd; int cl; gci.pm_cputype = md->pmd_cputype; gci.pm_ncpu = pmc_cpu_max(); gci.pm_npmc = md->pmd_npmc; gci.pm_nclass = md->pmd_nclass; pci = gci.pm_classes; pcd = md->pmd_classdep; for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) { pci->pm_caps = pcd->pcd_caps; pci->pm_class = pcd->pcd_class; pci->pm_width = pcd->pcd_width; pci->pm_num = pcd->pcd_num; } error = copyout(&gci, arg, sizeof(gci)); } break; /* * Retrieve soft events list. */ case PMC_OP_GETDYNEVENTINFO: { enum pmc_class cl; enum pmc_event ev; struct pmc_op_getdyneventinfo *gei; struct pmc_dyn_event_descr dev; struct pmc_soft *ps; uint32_t nevent; sx_assert(&pmc_sx, SX_LOCKED); gei = (struct pmc_op_getdyneventinfo *) arg; if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0) break; /* Only SOFT class is dynamic. */ if (cl != PMC_CLASS_SOFT) { error = EINVAL; break; } nevent = 0; for (ev = PMC_EV_SOFT_FIRST; (int)ev <= PMC_EV_SOFT_LAST; ev++) { ps = pmc_soft_ev_acquire(ev); if (ps == NULL) continue; bcopy(&ps->ps_ev, &dev, sizeof(dev)); pmc_soft_ev_release(ps); error = copyout(&dev, &gei->pm_events[nevent], sizeof(struct pmc_dyn_event_descr)); if (error != 0) break; nevent++; } if (error != 0) break; error = copyout(&nevent, &gei->pm_nevent, sizeof(nevent)); } break; /* * Get module statistics */ case PMC_OP_GETDRIVERSTATS: { struct pmc_op_getdriverstats gms; #define CFETCH(a, b, field) a.field = counter_u64_fetch(b.field) CFETCH(gms, pmc_stats, pm_intr_ignored); CFETCH(gms, pmc_stats, pm_intr_processed); CFETCH(gms, pmc_stats, pm_intr_bufferfull); CFETCH(gms, pmc_stats, pm_syscalls); CFETCH(gms, pmc_stats, pm_syscall_errors); CFETCH(gms, pmc_stats, pm_buffer_requests); CFETCH(gms, pmc_stats, pm_buffer_requests_failed); CFETCH(gms, pmc_stats, pm_log_sweeps); #undef CFETCH error = copyout(&gms, arg, sizeof(gms)); } break; /* * Retrieve module version number */ case PMC_OP_GETMODULEVERSION: { uint32_t cv, modv; /* retrieve the client's idea of the ABI version */ if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0) break; /* don't service clients newer than our driver */ modv = PMC_VERSION; if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) { error = EPROGMISMATCH; break; } error = copyout(&modv, arg, sizeof(int)); } break; /* * Retrieve the state of all the PMCs on a given * CPU. */ case PMC_OP_GETPMCINFO: { int ari; struct pmc *pm; size_t pmcinfo_size; uint32_t cpu, n, npmc; struct pmc_owner *po; struct pmc_binding pb; struct pmc_classdep *pcd; struct pmc_info *p, *pmcinfo; struct pmc_op_getpmcinfo *gpi; PMC_DOWNGRADE_SX(); gpi = (struct pmc_op_getpmcinfo *) arg; if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0) break; if (cpu >= pmc_cpu_max()) { error = EINVAL; break; } if (!pmc_cpu_is_active(cpu)) { error = ENXIO; break; } /* switch to CPU 'cpu' */ pmc_save_cpu_binding(&pb); pmc_select_cpu(cpu); npmc = md->pmd_npmc; pmcinfo_size = npmc * sizeof(struct pmc_info); pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK); p = pmcinfo; for (n = 0; n < md->pmd_npmc; n++, p++) { pcd = pmc_ri_to_classdep(md, n, &ari); KASSERT(pcd != NULL, ("[pmc,%d] null pcd ri=%d", __LINE__, n)); if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0) break; if (PMC_ROW_DISP_IS_STANDALONE(n)) p->pm_rowdisp = PMC_DISP_STANDALONE; else if (PMC_ROW_DISP_IS_THREAD(n)) p->pm_rowdisp = PMC_DISP_THREAD; else p->pm_rowdisp = PMC_DISP_FREE; p->pm_ownerpid = -1; if (pm == NULL) /* no PMC associated */ continue; po = pm->pm_owner; KASSERT(po->po_owner != NULL, ("[pmc,%d] pmc_owner had a null proc pointer", __LINE__)); p->pm_ownerpid = po->po_owner->p_pid; p->pm_mode = PMC_TO_MODE(pm); p->pm_event = pm->pm_event; p->pm_flags = pm->pm_flags; if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) p->pm_reloadcount = pm->pm_sc.pm_reloadcount; } pmc_restore_cpu_binding(&pb); /* now copy out the PMC info collected */ if (error == 0) error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size); free(pmcinfo, M_PMC); } break; /* * Set the administrative state of a PMC. I.e. whether * the PMC is to be used or not. */ case PMC_OP_PMCADMIN: { int cpu, ri; enum pmc_state request; struct pmc_cpu *pc; struct pmc_hw *phw; struct pmc_op_pmcadmin pma; struct pmc_binding pb; sx_assert(&pmc_sx, SX_XLOCKED); KASSERT(td == curthread, ("[pmc,%d] td != curthread", __LINE__)); error = priv_check(td, PRIV_PMC_MANAGE); if (error) break; if ((error = copyin(arg, &pma, sizeof(pma))) != 0) break; cpu = pma.pm_cpu; if (cpu < 0 || cpu >= (int) pmc_cpu_max()) { error = EINVAL; break; } if (!pmc_cpu_is_active(cpu)) { error = ENXIO; break; } request = pma.pm_state; if (request != PMC_STATE_DISABLED && request != PMC_STATE_FREE) { error = EINVAL; break; } ri = pma.pm_pmc; /* pmc id == row index */ if (ri < 0 || ri >= (int) md->pmd_npmc) { error = EINVAL; break; } /* * We can't disable a PMC with a row-index allocated * for process virtual PMCs. */ if (PMC_ROW_DISP_IS_THREAD(ri) && request == PMC_STATE_DISABLED) { error = EBUSY; break; } /* * otherwise, this PMC on this CPU is either free or * in system-wide mode. */ pmc_save_cpu_binding(&pb); pmc_select_cpu(cpu); pc = pmc_pcpu[cpu]; phw = pc->pc_hwpmcs[ri]; /* * XXX do we need some kind of 'forced' disable? */ if (phw->phw_pmc == NULL) { if (request == PMC_STATE_DISABLED && (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) { phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED; PMC_MARK_ROW_STANDALONE(ri); } else if (request == PMC_STATE_FREE && (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) { phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED; PMC_UNMARK_ROW_STANDALONE(ri); } /* other cases are a no-op */ } else error = EBUSY; pmc_restore_cpu_binding(&pb); } break; /* * Allocate a PMC. */ case PMC_OP_PMCALLOCATE: { int adjri, n; u_int cpu; uint32_t caps; struct pmc *pmc; enum pmc_mode mode; struct pmc_hw *phw; struct pmc_binding pb; struct pmc_classdep *pcd; struct pmc_op_pmcallocate pa; if ((error = copyin(arg, &pa, sizeof(pa))) != 0) break; caps = pa.pm_caps; mode = pa.pm_mode; cpu = pa.pm_cpu; if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC && mode != PMC_MODE_TS && mode != PMC_MODE_TC) || (cpu != (u_int) PMC_CPU_ANY && cpu >= pmc_cpu_max())) { error = EINVAL; break; } /* * Virtual PMCs should only ask for a default CPU. * System mode PMCs need to specify a non-default CPU. */ if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) || (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) { error = EINVAL; break; } /* * Check that an inactive CPU is not being asked for. */ if (PMC_IS_SYSTEM_MODE(mode) && !pmc_cpu_is_active(cpu)) { error = ENXIO; break; } /* * Refuse an allocation for a system-wide PMC if this * process has been jailed, or if this process lacks * super-user credentials and the sysctl tunable * 'security.bsd.unprivileged_syspmcs' is zero. */ if (PMC_IS_SYSTEM_MODE(mode)) { if (jailed(curthread->td_ucred)) { error = EPERM; break; } if (!pmc_unprivileged_syspmcs) { error = priv_check(curthread, PRIV_PMC_SYSTEM); if (error) break; } } /* * Look for valid values for 'pm_flags' */ if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT | PMC_F_CALLCHAIN | PMC_F_USERCALLCHAIN)) != 0) { error = EINVAL; break; } /* PMC_F_USERCALLCHAIN is only valid with PMC_F_CALLCHAIN */ if ((pa.pm_flags & (PMC_F_CALLCHAIN | PMC_F_USERCALLCHAIN)) == PMC_F_USERCALLCHAIN) { error = EINVAL; break; } /* PMC_F_USERCALLCHAIN is only valid for sampling mode */ if (pa.pm_flags & PMC_F_USERCALLCHAIN && mode != PMC_MODE_TS && mode != PMC_MODE_SS) { error = EINVAL; break; } /* process logging options are not allowed for system PMCs */ if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags & (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) { error = EINVAL; break; } /* * All sampling mode PMCs need to be able to interrupt the * CPU. */ if (PMC_IS_SAMPLING_MODE(mode)) caps |= PMC_CAP_INTERRUPT; /* A valid class specifier should have been passed in. */ pcd = pmc_class_to_classdep(pa.pm_class); if (pcd == NULL) { error = EINVAL; break; } /* The requested PMC capabilities should be feasible. */ if ((pcd->pcd_caps & caps) != caps) { error = EOPNOTSUPP; break; } PMCDBG4(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d", pa.pm_ev, caps, mode, cpu); pmc = pmc_allocate_pmc_descriptor(); pmc->pm_id = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class, PMC_ID_INVALID); pmc->pm_event = pa.pm_ev; pmc->pm_state = PMC_STATE_FREE; pmc->pm_caps = caps; pmc->pm_flags = pa.pm_flags; /* XXX set lower bound on sampling for process counters */ if (PMC_IS_SAMPLING_MODE(mode)) { /* * Don't permit requested sample rate to be less than 1000 */ if (pa.pm_count < 1000) log(LOG_WARNING, "pmcallocate: passed sample rate %ju - setting to 1000\n", (uintmax_t)pa.pm_count); pmc->pm_sc.pm_reloadcount = MAX(1000, pa.pm_count); } else pmc->pm_sc.pm_initial = pa.pm_count; /* switch thread to CPU 'cpu' */ pmc_save_cpu_binding(&pb); #define PMC_IS_SHAREABLE_PMC(cpu, n) \ (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \ PMC_PHW_FLAG_IS_SHAREABLE) #define PMC_IS_UNALLOCATED(cpu, n) \ (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL) if (PMC_IS_SYSTEM_MODE(mode)) { pmc_select_cpu(cpu); for (n = pcd->pcd_ri; n < (int) md->pmd_npmc; n++) { pcd = pmc_ri_to_classdep(md, n, &adjri); if (pmc_can_allocate_row(n, mode) == 0 && pmc_can_allocate_rowindex( curthread->td_proc, n, cpu) == 0 && (PMC_IS_UNALLOCATED(cpu, n) || PMC_IS_SHAREABLE_PMC(cpu, n)) && pcd->pcd_allocate_pmc(cpu, adjri, pmc, &pa) == 0) break; } } else { /* Process virtual mode */ for (n = pcd->pcd_ri; n < (int) md->pmd_npmc; n++) { pcd = pmc_ri_to_classdep(md, n, &adjri); if (pmc_can_allocate_row(n, mode) == 0 && pmc_can_allocate_rowindex( curthread->td_proc, n, PMC_CPU_ANY) == 0 && pcd->pcd_allocate_pmc(curthread->td_oncpu, adjri, pmc, &pa) == 0) break; } } #undef PMC_IS_UNALLOCATED #undef PMC_IS_SHAREABLE_PMC pmc_restore_cpu_binding(&pb); if (n == (int) md->pmd_npmc) { pmc_destroy_pmc_descriptor(pmc); pmc = NULL; error = EINVAL; break; } /* Fill in the correct value in the ID field */ pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n); PMCDBG5(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x", pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id); /* Process mode PMCs with logging enabled need log files */ if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW)) pmc->pm_flags |= PMC_F_NEEDS_LOGFILE; /* All system mode sampling PMCs require a log file */ if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode)) pmc->pm_flags |= PMC_F_NEEDS_LOGFILE; /* * Configure global pmc's immediately */ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) { pmc_save_cpu_binding(&pb); pmc_select_cpu(cpu); phw = pmc_pcpu[cpu]->pc_hwpmcs[n]; pcd = pmc_ri_to_classdep(md, n, &adjri); if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 || (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) { (void) pcd->pcd_release_pmc(cpu, adjri, pmc); pmc_destroy_pmc_descriptor(pmc); pmc = NULL; pmc_restore_cpu_binding(&pb); error = EPERM; break; } pmc_restore_cpu_binding(&pb); } pmc->pm_state = PMC_STATE_ALLOCATED; pmc->pm_class = pa.pm_class; /* * mark row disposition */ if (PMC_IS_SYSTEM_MODE(mode)) PMC_MARK_ROW_STANDALONE(n); else PMC_MARK_ROW_THREAD(n); /* * Register this PMC with the current thread as its owner. */ if ((error = pmc_register_owner(curthread->td_proc, pmc)) != 0) { pmc_release_pmc_descriptor(pmc); pmc_destroy_pmc_descriptor(pmc); pmc = NULL; break; } /* * Return the allocated index. */ pa.pm_pmcid = pmc->pm_id; error = copyout(&pa, arg, sizeof(pa)); } break; /* * Attach a PMC to a process. */ case PMC_OP_PMCATTACH: { struct pmc *pm; struct proc *p; struct pmc_op_pmcattach a; sx_assert(&pmc_sx, SX_XLOCKED); if ((error = copyin(arg, &a, sizeof(a))) != 0) break; if (a.pm_pid < 0) { error = EINVAL; break; } else if (a.pm_pid == 0) a.pm_pid = td->td_proc->p_pid; if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0) break; if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) { error = EINVAL; break; } /* PMCs may be (re)attached only when allocated or stopped */ if (pm->pm_state == PMC_STATE_RUNNING) { error = EBUSY; break; } else if (pm->pm_state != PMC_STATE_ALLOCATED && pm->pm_state != PMC_STATE_STOPPED) { error = EINVAL; break; } /* lookup pid */ if ((p = pfind(a.pm_pid)) == NULL) { error = ESRCH; break; } /* * Ignore processes that are working on exiting. */ if (p->p_flag & P_WEXIT) { error = ESRCH; PROC_UNLOCK(p); /* pfind() returns a locked process */ break; } /* * we are allowed to attach a PMC to a process if * we can debug it. */ error = p_candebug(curthread, p); PROC_UNLOCK(p); if (error == 0) error = pmc_attach_process(p, pm); } break; /* * Detach an attached PMC from a process. */ case PMC_OP_PMCDETACH: { struct pmc *pm; struct proc *p; struct pmc_op_pmcattach a; if ((error = copyin(arg, &a, sizeof(a))) != 0) break; if (a.pm_pid < 0) { error = EINVAL; break; } else if (a.pm_pid == 0) a.pm_pid = td->td_proc->p_pid; if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0) break; if ((p = pfind(a.pm_pid)) == NULL) { error = ESRCH; break; } /* * Treat processes that are in the process of exiting * as if they were not present. */ if (p->p_flag & P_WEXIT) error = ESRCH; PROC_UNLOCK(p); /* pfind() returns a locked process */ if (error == 0) error = pmc_detach_process(p, pm); } break; /* * Retrieve the MSR number associated with the counter * 'pmc_id'. This allows processes to directly use RDPMC * instructions to read their PMCs, without the overhead of a * system call. */ case PMC_OP_PMCGETMSR: { int adjri, ri; struct pmc *pm; struct pmc_target *pt; struct pmc_op_getmsr gm; struct pmc_classdep *pcd; PMC_DOWNGRADE_SX(); if ((error = copyin(arg, &gm, sizeof(gm))) != 0) break; if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0) break; /* * The allocated PMC has to be a process virtual PMC, * i.e., of type MODE_T[CS]. Global PMCs can only be * read using the PMCREAD operation since they may be * allocated on a different CPU than the one we could * be running on at the time of the RDPMC instruction. * * The GETMSR operation is not allowed for PMCs that * are inherited across processes. */ if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) || (pm->pm_flags & PMC_F_DESCENDANTS)) { error = EINVAL; break; } /* * It only makes sense to use a RDPMC (or its * equivalent instruction on non-x86 architectures) on * a process that has allocated and attached a PMC to * itself. Conversely the PMC is only allowed to have * one process attached to it -- its owner. */ if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL || LIST_NEXT(pt, pt_next) != NULL || pt->pt_process->pp_proc != pm->pm_owner->po_owner) { error = EINVAL; break; } ri = PMC_TO_ROWINDEX(pm); pcd = pmc_ri_to_classdep(md, ri, &adjri); /* PMC class has no 'GETMSR' support */ if (pcd->pcd_get_msr == NULL) { error = ENOSYS; break; } if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0) break; if ((error = copyout(&gm, arg, sizeof(gm))) < 0) break; /* * Mark our process as using MSRs. Update machine * state using a forced context switch. */ pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS; pmc_force_context_switch(); } break; /* * Release an allocated PMC */ case PMC_OP_PMCRELEASE: { pmc_id_t pmcid; struct pmc *pm; struct pmc_owner *po; struct pmc_op_simple sp; /* * Find PMC pointer for the named PMC. * * Use pmc_release_pmc_descriptor() to switch off the * PMC, remove all its target threads, and remove the * PMC from its owner's list. * * Remove the owner record if this is the last PMC * owned. * * Free up space. */ if ((error = copyin(arg, &sp, sizeof(sp))) != 0) break; pmcid = sp.pm_pmcid; if ((error = pmc_find_pmc(pmcid, &pm)) != 0) break; po = pm->pm_owner; pmc_release_pmc_descriptor(pm); pmc_maybe_remove_owner(po); pmc_destroy_pmc_descriptor(pm); } break; /* * Read and/or write a PMC. */ case PMC_OP_PMCRW: { int adjri; struct pmc *pm; uint32_t cpu, ri; pmc_value_t oldvalue; struct pmc_binding pb; struct pmc_op_pmcrw prw; struct pmc_classdep *pcd; struct pmc_op_pmcrw *pprw; PMC_DOWNGRADE_SX(); if ((error = copyin(arg, &prw, sizeof(prw))) != 0) break; ri = 0; PMCDBG2(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid, prw.pm_flags); /* must have at least one flag set */ if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) { error = EINVAL; break; } /* locate pmc descriptor */ if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0) break; /* Can't read a PMC that hasn't been started. */ if (pm->pm_state != PMC_STATE_ALLOCATED && pm->pm_state != PMC_STATE_STOPPED && pm->pm_state != PMC_STATE_RUNNING) { error = EINVAL; break; } /* writing a new value is allowed only for 'STOPPED' pmcs */ if (pm->pm_state == PMC_STATE_RUNNING && (prw.pm_flags & PMC_F_NEWVALUE)) { error = EBUSY; break; } if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) { /* * If this PMC is attached to its owner (i.e., * the process requesting this operation) and * is running, then attempt to get an * upto-date reading from hardware for a READ. * Writes are only allowed when the PMC is * stopped, so only update the saved value * field. * * If the PMC is not running, or is not * attached to its owner, read/write to the * savedvalue field. */ ri = PMC_TO_ROWINDEX(pm); pcd = pmc_ri_to_classdep(md, ri, &adjri); mtx_pool_lock_spin(pmc_mtxpool, pm); cpu = curthread->td_oncpu; if (prw.pm_flags & PMC_F_OLDVALUE) { if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) && (pm->pm_state == PMC_STATE_RUNNING)) error = (*pcd->pcd_read_pmc)(cpu, adjri, &oldvalue); else oldvalue = pm->pm_gv.pm_savedvalue; } if (prw.pm_flags & PMC_F_NEWVALUE) pm->pm_gv.pm_savedvalue = prw.pm_value; mtx_pool_unlock_spin(pmc_mtxpool, pm); } else { /* System mode PMCs */ cpu = PMC_TO_CPU(pm); ri = PMC_TO_ROWINDEX(pm); pcd = pmc_ri_to_classdep(md, ri, &adjri); if (!pmc_cpu_is_active(cpu)) { error = ENXIO; break; } /* move this thread to CPU 'cpu' */ pmc_save_cpu_binding(&pb); pmc_select_cpu(cpu); critical_enter(); /* save old value */ if (prw.pm_flags & PMC_F_OLDVALUE) if ((error = (*pcd->pcd_read_pmc)(cpu, adjri, &oldvalue))) goto error; /* write out new value */ if (prw.pm_flags & PMC_F_NEWVALUE) error = (*pcd->pcd_write_pmc)(cpu, adjri, prw.pm_value); error: critical_exit(); pmc_restore_cpu_binding(&pb); if (error) break; } pprw = (struct pmc_op_pmcrw *) arg; #ifdef HWPMC_DEBUG if (prw.pm_flags & PMC_F_NEWVALUE) PMCDBG3(PMC,OPS,2, "rw id=%d new %jx -> old %jx", ri, prw.pm_value, oldvalue); else if (prw.pm_flags & PMC_F_OLDVALUE) PMCDBG2(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue); #endif /* return old value if requested */ if (prw.pm_flags & PMC_F_OLDVALUE) if ((error = copyout(&oldvalue, &pprw->pm_value, sizeof(prw.pm_value)))) break; } break; /* * Set the sampling rate for a sampling mode PMC and the * initial count for a counting mode PMC. */ case PMC_OP_PMCSETCOUNT: { struct pmc *pm; struct pmc_op_pmcsetcount sc; PMC_DOWNGRADE_SX(); if ((error = copyin(arg, &sc, sizeof(sc))) != 0) break; if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0) break; if (pm->pm_state == PMC_STATE_RUNNING) { error = EBUSY; break; } if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) { /* * Don't permit requested sample rate to be less than 1000 */ if (sc.pm_count < 1000) log(LOG_WARNING, "pmcsetcount: passed sample rate %ju - setting to 1000\n", (uintmax_t)sc.pm_count); pm->pm_sc.pm_reloadcount = MAX(1000, sc.pm_count); } else pm->pm_sc.pm_initial = sc.pm_count; } break; /* * Start a PMC. */ case PMC_OP_PMCSTART: { pmc_id_t pmcid; struct pmc *pm; struct pmc_op_simple sp; sx_assert(&pmc_sx, SX_XLOCKED); if ((error = copyin(arg, &sp, sizeof(sp))) != 0) break; pmcid = sp.pm_pmcid; if ((error = pmc_find_pmc(pmcid, &pm)) != 0) break; KASSERT(pmcid == pm->pm_id, ("[pmc,%d] pmcid %x != id %x", __LINE__, pm->pm_id, pmcid)); if (pm->pm_state == PMC_STATE_RUNNING) /* already running */ break; else if (pm->pm_state != PMC_STATE_STOPPED && pm->pm_state != PMC_STATE_ALLOCATED) { error = EINVAL; break; } error = pmc_start(pm); } break; /* * Stop a PMC. */ case PMC_OP_PMCSTOP: { pmc_id_t pmcid; struct pmc *pm; struct pmc_op_simple sp; PMC_DOWNGRADE_SX(); if ((error = copyin(arg, &sp, sizeof(sp))) != 0) break; pmcid = sp.pm_pmcid; /* * Mark the PMC as inactive and invoke the MD stop * routines if needed. */ if ((error = pmc_find_pmc(pmcid, &pm)) != 0) break; KASSERT(pmcid == pm->pm_id, ("[pmc,%d] pmc id %x != pmcid %x", __LINE__, pm->pm_id, pmcid)); if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */ break; else if (pm->pm_state != PMC_STATE_RUNNING) { error = EINVAL; break; } error = pmc_stop(pm); } break; /* * Write a user supplied value to the log file. */ case PMC_OP_WRITELOG: { struct pmc_op_writelog wl; struct pmc_owner *po; PMC_DOWNGRADE_SX(); if ((error = copyin(arg, &wl, sizeof(wl))) != 0) break; if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) { error = EINVAL; break; } if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) { error = EINVAL; break; } error = pmclog_process_userlog(po, &wl); } break; default: error = EINVAL; break; } if (is_sx_downgraded) sx_sunlock(&pmc_sx); else sx_xunlock(&pmc_sx); done_syscall: if (error) counter_u64_add(pmc_stats.pm_syscall_errors, 1); return (error); } /* * Helper functions */ /* * Mark the thread as needing callchain capture and post an AST. The * actual callchain capture will be done in a context where it is safe * to take page faults. */ static void pmc_post_callchain_callback(void) { struct thread *td; td = curthread; /* * If there is multiple PMCs for the same interrupt ignore new post */ if (td->td_pflags & TDP_CALLCHAIN) return; /* * Mark this thread as needing callchain capture. * `td->td_pflags' will be safe to touch because this thread * was in user space when it was interrupted. */ td->td_pflags |= TDP_CALLCHAIN; /* * Don't let this thread migrate between CPUs until callchain * capture completes. */ sched_pin(); return; } /* * Find a free slot in the per-cpu array of samples and capture the * current callchain there. If a sample was successfully added, a bit * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook * needs to be invoked from the clock handler. * * This function is meant to be called from an NMI handler. It cannot * use any of the locking primitives supplied by the OS. */ static int pmc_add_sample(ring_type_t ring, struct pmc *pm, struct trapframe *tf) { int error, cpu, callchaindepth, inuserspace; struct thread *td; struct pmc_sample *ps; struct pmc_samplebuffer *psb; error = 0; /* * Allocate space for a sample buffer. */ cpu = curcpu; psb = pmc_pcpu[cpu]->pc_sb[ring]; inuserspace = TRAPF_USERMODE(tf); ps = PMC_PROD_SAMPLE(psb); if (psb->ps_considx != psb->ps_prodidx && ps->ps_nsamples) { /* in use, reader hasn't caught up */ pm->pm_pcpu_state[cpu].pps_stalled = 1; counter_u64_add(pmc_stats.pm_intr_bufferfull, 1); PMCDBG6(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm, (void *) tf, inuserspace, (int) (psb->ps_prodidx & pmc_sample_mask), (int) (psb->ps_considx & pmc_sample_mask)); callchaindepth = 1; error = ENOMEM; goto done; } /* Fill in entry. */ PMCDBG6(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm, (void *) tf, inuserspace, (int) (psb->ps_prodidx & pmc_sample_mask), (int) (psb->ps_considx & pmc_sample_mask)); td = curthread; ps->ps_pmc = pm; ps->ps_td = td; ps->ps_pid = td->td_proc->p_pid; ps->ps_tid = td->td_tid; ps->ps_tsc = pmc_rdtsc(); ps->ps_ticks = ticks; ps->ps_cpu = cpu; ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0; callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ? pmc_callchaindepth : 1; MPASS(ps->ps_pc != NULL); if (callchaindepth == 1) ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf); else { /* * Kernel stack traversals can be done immediately, * while we defer to an AST for user space traversals. */ if (!inuserspace) { callchaindepth = pmc_save_kernel_callchain(ps->ps_pc, callchaindepth, tf); } else { pmc_post_callchain_callback(); callchaindepth = PMC_USER_CALLCHAIN_PENDING; } } ps->ps_nsamples = callchaindepth; /* mark entry as in use */ if (ring == PMC_UR) { ps->ps_nsamples_actual = callchaindepth; /* mark entry as in use */ ps->ps_nsamples = PMC_USER_CALLCHAIN_PENDING; } else ps->ps_nsamples = callchaindepth; /* mark entry as in use */ KASSERT(counter_u64_fetch(pm->pm_runcount) >= 0, ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount))); counter_u64_add(pm->pm_runcount, 1); /* hold onto PMC */ /* increment write pointer */ psb->ps_prodidx++; done: /* mark CPU as needing processing */ if (callchaindepth != PMC_USER_CALLCHAIN_PENDING) DPCPU_SET(pmc_sampled, 1); return (error); } /* * Interrupt processing. * * This function is meant to be called from an NMI handler. It cannot * use any of the locking primitives supplied by the OS. */ int pmc_process_interrupt(int ring, struct pmc *pm, struct trapframe *tf) { struct thread *td; td = curthread; if ((pm->pm_flags & PMC_F_USERCALLCHAIN) && (td->td_proc->p_flag & P_KPROC) == 0 && !TRAPF_USERMODE(tf)) { atomic_add_int(&td->td_pmcpend, 1); return (pmc_add_sample(PMC_UR, pm, tf)); } return (pmc_add_sample(ring, pm, tf)); } /* * Capture a user call chain. This function will be called from ast() * before control returns to userland and before the process gets * rescheduled. */ static void pmc_capture_user_callchain(int cpu, int ring, struct trapframe *tf) { struct pmc *pm; struct thread *td; struct pmc_sample *ps; struct pmc_samplebuffer *psb; uint64_t considx, prodidx; int nsamples, nrecords, pass, iter; #ifdef INVARIANTS int ncallchains; int nfree; int start_ticks = ticks; #endif psb = pmc_pcpu[cpu]->pc_sb[ring]; td = curthread; KASSERT(td->td_pflags & TDP_CALLCHAIN, ("[pmc,%d] Retrieving callchain for thread that doesn't want it", __LINE__)); #ifdef INVARIANTS ncallchains = 0; nfree = 0; #endif nrecords = INT_MAX; pass = 0; restart: if (ring == PMC_UR) nrecords = atomic_readandclear_32(&td->td_pmcpend); for (iter = 0, considx = psb->ps_considx, prodidx = psb->ps_prodidx; considx < prodidx && iter < pmc_nsamples; considx++, iter++) { ps = PMC_CONS_SAMPLE_OFF(psb, considx); /* * Iterate through all deferred callchain requests. * Walk from the current read pointer to the current * write pointer. */ #ifdef INVARIANTS if (ps->ps_nsamples == PMC_SAMPLE_FREE) { nfree++; continue; } if ((ps->ps_pmc == NULL) || (ps->ps_pmc->pm_state != PMC_STATE_RUNNING)) nfree++; #endif if (ps->ps_td != td || ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING || ps->ps_pmc->pm_state != PMC_STATE_RUNNING) continue; KASSERT(ps->ps_cpu == cpu, ("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", __LINE__, ps->ps_cpu, PCPU_GET(cpuid))); pm = ps->ps_pmc; KASSERT(pm->pm_flags & PMC_F_CALLCHAIN, ("[pmc,%d] Retrieving callchain for PMC that doesn't " "want it", __LINE__)); KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, ("[pmc,%d] runcount %ld", __LINE__, (unsigned long)counter_u64_fetch(pm->pm_runcount))); if (ring == PMC_UR) { nsamples = ps->ps_nsamples_actual; counter_u64_add(pmc_stats.pm_merges, 1); } else nsamples = 0; /* * Retrieve the callchain and mark the sample buffer * as 'processable' by the timer tick sweep code. */ #ifdef INVARIANTS ncallchains++; #endif if (__predict_true(nsamples < pmc_callchaindepth - 1)) nsamples += pmc_save_user_callchain(ps->ps_pc + nsamples, pmc_callchaindepth - nsamples - 1, tf); /* * We have to prevent hardclock from potentially overwriting * this sample between when we read the value and when we set * it */ spinlock_enter(); /* * Verify that the sample hasn't been dropped in the meantime */ if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING) { ps->ps_nsamples = nsamples; /* * If we couldn't get a sample, simply drop the reference */ if (nsamples == 0) counter_u64_add(pm->pm_runcount, -1); } spinlock_exit(); if (nrecords-- == 1) break; } if (__predict_false(ring == PMC_UR && td->td_pmcpend)) { if (pass == 0) { pass = 1; goto restart; } /* only collect samples for this part once */ td->td_pmcpend = 0; } #ifdef INVARIANTS if ((ticks - start_ticks) > hz) log(LOG_ERR, "%s took %d ticks\n", __func__, (ticks - start_ticks)); #endif /* mark CPU as needing processing */ DPCPU_SET(pmc_sampled, 1); } /* * Process saved PC samples. */ static void pmc_process_samples(int cpu, ring_type_t ring) { struct pmc *pm; int adjri, n; struct thread *td; struct pmc_owner *po; struct pmc_sample *ps; struct pmc_classdep *pcd; struct pmc_samplebuffer *psb; uint64_t delta; KASSERT(PCPU_GET(cpuid) == cpu, ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__, PCPU_GET(cpuid), cpu)); psb = pmc_pcpu[cpu]->pc_sb[ring]; delta = psb->ps_prodidx - psb->ps_considx; MPASS(delta <= pmc_nsamples); MPASS(psb->ps_considx <= psb->ps_prodidx); for (n = 0; psb->ps_considx < psb->ps_prodidx; psb->ps_considx++, n++) { ps = PMC_CONS_SAMPLE(psb); if (__predict_false(ps->ps_nsamples == PMC_SAMPLE_FREE)) continue; pm = ps->ps_pmc; /* skip non-running samples */ if (pm->pm_state != PMC_STATE_RUNNING) goto entrydone; KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount))); po = pm->pm_owner; KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)), ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__, pm, PMC_TO_MODE(pm))); /* If there is a pending AST wait for completion */ if (ps->ps_nsamples == PMC_USER_CALLCHAIN_PENDING) { /* if sample is more than 65 ms old, drop it */ if (ticks - ps->ps_ticks > (hz >> 4)) { /* * track how often we hit this as it will * preferentially lose user samples * for long running system calls */ counter_u64_add(pmc_stats.pm_overwrites, 1); goto entrydone; } /* Need a rescan at a later time. */ DPCPU_SET(pmc_sampled, 1); break; } PMCDBG6(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu, pm, ps->ps_nsamples, ps->ps_flags, (int) (psb->ps_prodidx & pmc_sample_mask), (int) (psb->ps_considx & pmc_sample_mask)); /* * If this is a process-mode PMC that is attached to * its owner, and if the PC is in user mode, update * profiling statistics like timer-based profiling * would have done. * * Otherwise, this is either a sampling-mode PMC that * is attached to a different process than its owner, * or a system-wide sampling PMC. Dispatch a log * entry to the PMC's owner process. */ if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) { if (ps->ps_flags & PMC_CC_F_USERSPACE) { td = FIRST_THREAD_IN_PROC(po->po_owner); addupc_intr(td, ps->ps_pc[0], 1); } } else pmclog_process_callchain(pm, ps); entrydone: ps->ps_nsamples = 0; /* mark entry as free */ KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, ("[pmc,%d] pm=%p runcount %ld", __LINE__, (void *) pm, (unsigned long)counter_u64_fetch(pm->pm_runcount))); counter_u64_add(pm->pm_runcount, -1); } counter_u64_add(pmc_stats.pm_log_sweeps, 1); /* Do not re-enable stalled PMCs if we failed to process any samples */ if (n == 0) return; /* * Restart any stalled sampling PMCs on this CPU. * * If the NMI handler sets the pm_stalled field of a PMC after * the check below, we'll end up processing the stalled PMC at * the next hardclock tick. */ for (n = 0; n < md->pmd_npmc; n++) { pcd = pmc_ri_to_classdep(md, n, &adjri); KASSERT(pcd != NULL, ("[pmc,%d] null pcd ri=%d", __LINE__, n)); (void) (*pcd->pcd_get_config)(cpu,adjri,&pm); if (pm == NULL || /* !cfg'ed */ pm->pm_state != PMC_STATE_RUNNING || /* !active */ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */ !pm->pm_pcpu_state[cpu].pps_cpustate || /* !desired */ !pm->pm_pcpu_state[cpu].pps_stalled) /* !stalled */ continue; pm->pm_pcpu_state[cpu].pps_stalled = 0; (*pcd->pcd_start_pmc)(cpu, adjri); } } /* * Event handlers. */ /* * Handle a process exit. * * Remove this process from all hash tables. If this process * owned any PMCs, turn off those PMCs and deallocate them, * removing any associations with target processes. * * This function will be called by the last 'thread' of a * process. * * XXX This eventhandler gets called early in the exit process. * Consider using a 'hook' invocation from thread_exit() or equivalent * spot. Another negative is that kse_exit doesn't seem to call * exit1() [??]. * */ static void pmc_process_exit(void *arg __unused, struct proc *p) { struct pmc *pm; int adjri, cpu; unsigned int ri; int is_using_hwpmcs; struct pmc_owner *po; struct pmc_process *pp; struct pmc_classdep *pcd; pmc_value_t newvalue, tmp; PROC_LOCK(p); is_using_hwpmcs = p->p_flag & P_HWPMC; PROC_UNLOCK(p); /* * Log a sysexit event to all SS PMC owners. */ PMC_EPOCH_ENTER(); CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_sysexit(po, p->p_pid); PMC_EPOCH_EXIT(); if (!is_using_hwpmcs) return; PMC_GET_SX_XLOCK(); PMCDBG3(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid, p->p_comm); /* * Since this code is invoked by the last thread in an exiting * process, we would have context switched IN at some prior * point. However, with PREEMPTION, kernel mode context * switches may happen any time, so we want to disable a * context switch OUT till we get any PMCs targeting this * process off the hardware. * * We also need to atomically remove this process' * entry from our target process hash table, using * PMC_FLAG_REMOVE. */ PMCDBG3(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid, p->p_comm); critical_enter(); /* no preemption */ cpu = curthread->td_oncpu; if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_REMOVE)) != NULL) { PMCDBG2(PRC,EXT,2, "process-exit proc=%p pmc-process=%p", p, pp); /* * The exiting process could the target of * some PMCs which will be running on * currently executing CPU. * * We need to turn these PMCs off like we * would do at context switch OUT time. */ for (ri = 0; ri < md->pmd_npmc; ri++) { /* * Pick up the pmc pointer from hardware * state similar to the CSW_OUT code. */ pm = NULL; pcd = pmc_ri_to_classdep(md, ri, &adjri); (void) (*pcd->pcd_get_config)(cpu, adjri, &pm); PMCDBG2(PRC,EXT,2, "ri=%d pm=%p", ri, pm); if (pm == NULL || !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) continue; PMCDBG4(PRC,EXT,2, "ppmcs[%d]=%p pm=%p " "state=%d", ri, pp->pp_pmcs[ri].pp_pmc, pm, pm->pm_state); KASSERT(PMC_TO_ROWINDEX(pm) == ri, ("[pmc,%d] ri mismatch pmc(%d) ri(%d)", __LINE__, PMC_TO_ROWINDEX(pm), ri)); KASSERT(pm == pp->pp_pmcs[ri].pp_pmc, ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc)); KASSERT(counter_u64_fetch(pm->pm_runcount) > 0, ("[pmc,%d] bad runcount ri %d rc %ld", __LINE__, ri, (unsigned long)counter_u64_fetch(pm->pm_runcount))); /* * Change desired state, and then stop if not * stalled. This two-step dance should avoid * race conditions where an interrupt re-enables * the PMC after this code has already checked * the pm_stalled flag. */ if (pm->pm_pcpu_state[cpu].pps_cpustate) { pm->pm_pcpu_state[cpu].pps_cpustate = 0; if (!pm->pm_pcpu_state[cpu].pps_stalled) { (void) pcd->pcd_stop_pmc(cpu, adjri); if (PMC_TO_MODE(pm) == PMC_MODE_TC) { pcd->pcd_read_pmc(cpu, adjri, &newvalue); tmp = newvalue - PMC_PCPU_SAVED(cpu,ri); mtx_pool_lock_spin(pmc_mtxpool, pm); pm->pm_gv.pm_savedvalue += tmp; pp->pp_pmcs[ri].pp_pmcval += tmp; mtx_pool_unlock_spin( pmc_mtxpool, pm); } } } KASSERT((int64_t) counter_u64_fetch(pm->pm_runcount) > 0, ("[pmc,%d] runcount is %d", __LINE__, ri)); counter_u64_add(pm->pm_runcount, -1); (void) pcd->pcd_config_pmc(cpu, adjri, NULL); } /* * Inform the MD layer of this pseudo "context switch * out" */ (void) md->pmd_switch_out(pmc_pcpu[cpu], pp); critical_exit(); /* ok to be pre-empted now */ /* * Unlink this process from the PMCs that are * targeting it. This will send a signal to * all PMC owner's whose PMCs are orphaned. * * Log PMC value at exit time if requested. */ for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) { if (pm->pm_flags & PMC_F_NEEDS_LOGFILE && PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm))) pmclog_process_procexit(pm, pp); pmc_unlink_target_process(pm, pp); } free(pp, M_PMC); } else critical_exit(); /* pp == NULL */ /* * If the process owned PMCs, free them up and free up * memory. */ if ((po = pmc_find_owner_descriptor(p)) != NULL) { pmc_remove_owner(po); pmc_destroy_owner_descriptor(po); } sx_xunlock(&pmc_sx); } /* * Handle a process fork. * * If the parent process 'p1' is under HWPMC monitoring, then copy * over any attached PMCs that have 'do_descendants' semantics. */ static void pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc, int flags) { int is_using_hwpmcs; unsigned int ri; uint32_t do_descendants; struct pmc *pm; struct pmc_owner *po; struct pmc_process *ppnew, *ppold; (void) flags; /* unused parameter */ PROC_LOCK(p1); is_using_hwpmcs = p1->p_flag & P_HWPMC; PROC_UNLOCK(p1); /* * If there are system-wide sampling PMCs active, we need to * log all fork events to their owner's logs. */ PMC_EPOCH_ENTER(); CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) { pmclog_process_procfork(po, p1->p_pid, newproc->p_pid); pmclog_process_proccreate(po, newproc, 1); } PMC_EPOCH_EXIT(); if (!is_using_hwpmcs) return; PMC_GET_SX_XLOCK(); PMCDBG4(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1, p1->p_pid, p1->p_comm, newproc); /* * If the parent process (curthread->td_proc) is a * target of any PMCs, look for PMCs that are to be * inherited, and link these into the new process * descriptor. */ if ((ppold = pmc_find_process_descriptor(curthread->td_proc, PMC_FLAG_NONE)) == NULL) goto done; /* nothing to do */ do_descendants = 0; for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL) do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS; if (do_descendants == 0) /* nothing to do */ goto done; /* * Now mark the new process as being tracked by this driver. */ PROC_LOCK(newproc); newproc->p_flag |= P_HWPMC; PROC_UNLOCK(newproc); /* allocate a descriptor for the new process */ if ((ppnew = pmc_find_process_descriptor(newproc, PMC_FLAG_ALLOCATE)) == NULL) goto done; /* * Run through all PMCs that were targeting the old process * and which specified F_DESCENDANTS and attach them to the * new process. * * Log the fork event to all owners of PMCs attached to this * process, if not already logged. */ for (ri = 0; ri < md->pmd_npmc; ri++) if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL && (pm->pm_flags & PMC_F_DESCENDANTS)) { pmc_link_target_process(pm, ppnew); po = pm->pm_owner; if (po->po_sscount == 0 && po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_procfork(po, p1->p_pid, newproc->p_pid); } done: sx_xunlock(&pmc_sx); } static void pmc_process_threadcreate(struct thread *td) { struct pmc_owner *po; PMC_EPOCH_ENTER(); CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_threadcreate(po, td, 1); PMC_EPOCH_EXIT(); } static void pmc_process_threadexit(struct thread *td) { struct pmc_owner *po; PMC_EPOCH_ENTER(); CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_threadexit(po, td); PMC_EPOCH_EXIT(); } static void pmc_process_proccreate(struct proc *p) { struct pmc_owner *po; PMC_EPOCH_ENTER(); CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_proccreate(po, p, 1 /* sync */); PMC_EPOCH_EXIT(); } static void pmc_process_allproc(struct pmc *pm) { struct pmc_owner *po; struct thread *td; struct proc *p; po = pm->pm_owner; if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) return; sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { pmclog_process_proccreate(po, p, 0 /* sync */); PROC_LOCK(p); FOREACH_THREAD_IN_PROC(p, td) pmclog_process_threadcreate(po, td, 0 /* sync */); PROC_UNLOCK(p); } sx_sunlock(&allproc_lock); pmclog_flush(po, 0); } static void pmc_kld_load(void *arg __unused, linker_file_t lf) { struct pmc_owner *po; /* * Notify owners of system sampling PMCs about KLD operations. */ PMC_EPOCH_ENTER(); CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_map_in(po, (pid_t) -1, (uintfptr_t) lf->address, lf->filename); PMC_EPOCH_EXIT(); /* * TODO: Notify owners of (all) process-sampling PMCs too. */ } static void pmc_kld_unload(void *arg __unused, const char *filename __unused, caddr_t address, size_t size) { struct pmc_owner *po; PMC_EPOCH_ENTER(); CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext) if (po->po_flags & PMC_PO_OWNS_LOGFILE) pmclog_process_map_out(po, (pid_t) -1, (uintfptr_t) address, (uintfptr_t) address + size); PMC_EPOCH_EXIT(); /* * TODO: Notify owners of process-sampling PMCs. */ } /* * initialization */ static const char * pmc_name_of_pmcclass(enum pmc_class class) { switch (class) { #undef __PMC_CLASS #define __PMC_CLASS(S,V,D) \ case PMC_CLASS_##S: \ return #S; __PMC_CLASSES(); default: return (""); } } /* * Base class initializer: allocate structure and set default classes. */ struct pmc_mdep * pmc_mdep_alloc(int nclasses) { struct pmc_mdep *md; int n; /* SOFT + md classes */ n = 1 + nclasses; md = malloc(sizeof(struct pmc_mdep) + n * sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO); md->pmd_nclass = n; /* Add base class. */ pmc_soft_initialize(md); return md; } void pmc_mdep_free(struct pmc_mdep *md) { pmc_soft_finalize(md); free(md, M_PMC); } static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp) { (void) pc; (void) pp; return (0); } static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp) { (void) pc; (void) pp; return (0); } static struct pmc_mdep * pmc_generic_cpu_initialize(void) { struct pmc_mdep *md; md = pmc_mdep_alloc(0); md->pmd_cputype = PMC_CPU_GENERIC; md->pmd_pcpu_init = NULL; md->pmd_pcpu_fini = NULL; md->pmd_switch_in = generic_switch_in; md->pmd_switch_out = generic_switch_out; return (md); } static void pmc_generic_cpu_finalize(struct pmc_mdep *md) { (void) md; } static int pmc_initialize(void) { int c, cpu, error, n, ri; unsigned int maxcpu, domain; struct pcpu *pc; struct pmc_binding pb; struct pmc_sample *ps; struct pmc_classdep *pcd; struct pmc_samplebuffer *sb; md = NULL; error = 0; pmc_stats.pm_intr_ignored = counter_u64_alloc(M_WAITOK); pmc_stats.pm_intr_processed = counter_u64_alloc(M_WAITOK); pmc_stats.pm_intr_bufferfull = counter_u64_alloc(M_WAITOK); pmc_stats.pm_syscalls = counter_u64_alloc(M_WAITOK); pmc_stats.pm_syscall_errors = counter_u64_alloc(M_WAITOK); pmc_stats.pm_buffer_requests = counter_u64_alloc(M_WAITOK); pmc_stats.pm_buffer_requests_failed = counter_u64_alloc(M_WAITOK); pmc_stats.pm_log_sweeps = counter_u64_alloc(M_WAITOK); pmc_stats.pm_merges = counter_u64_alloc(M_WAITOK); pmc_stats.pm_overwrites = counter_u64_alloc(M_WAITOK); #ifdef HWPMC_DEBUG /* parse debug flags first */ if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr, sizeof(pmc_debugstr))) pmc_debugflags_parse(pmc_debugstr, pmc_debugstr+strlen(pmc_debugstr)); #endif PMCDBG1(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION); /* check kernel version */ if (pmc_kernel_version != PMC_VERSION) { if (pmc_kernel_version == 0) printf("hwpmc: this kernel has not been compiled with " "'options HWPMC_HOOKS'.\n"); else printf("hwpmc: kernel version (0x%x) does not match " "module version (0x%x).\n", pmc_kernel_version, PMC_VERSION); return EPROGMISMATCH; } /* * check sysctl parameters */ if (pmc_hashsize <= 0) { (void) printf("hwpmc: tunable \"hashsize\"=%d must be " "greater than zero.\n", pmc_hashsize); pmc_hashsize = PMC_HASH_SIZE; } if (pmc_nsamples <= 0 || pmc_nsamples > 65535) { (void) printf("hwpmc: tunable \"nsamples\"=%d out of " "range.\n", pmc_nsamples); pmc_nsamples = PMC_NSAMPLES; } pmc_sample_mask = pmc_nsamples-1; if (pmc_callchaindepth <= 0 || pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX) { (void) printf("hwpmc: tunable \"callchaindepth\"=%d out of " "range - using %d.\n", pmc_callchaindepth, PMC_CALLCHAIN_DEPTH_MAX); pmc_callchaindepth = PMC_CALLCHAIN_DEPTH_MAX; } md = pmc_md_initialize(); if (md == NULL) { /* Default to generic CPU. */ md = pmc_generic_cpu_initialize(); if (md == NULL) return (ENOSYS); } KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1, ("[pmc,%d] no classes or pmcs", __LINE__)); /* Compute the map from row-indices to classdep pointers. */ pmc_rowindex_to_classdep = malloc(sizeof(struct pmc_classdep *) * md->pmd_npmc, M_PMC, M_WAITOK|M_ZERO); for (n = 0; n < md->pmd_npmc; n++) pmc_rowindex_to_classdep[n] = NULL; for (ri = c = 0; c < md->pmd_nclass; c++) { pcd = &md->pmd_classdep[c]; for (n = 0; n < pcd->pcd_num; n++, ri++) pmc_rowindex_to_classdep[ri] = pcd; } KASSERT(ri == md->pmd_npmc, ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__, ri, md->pmd_npmc)); maxcpu = pmc_cpu_max(); /* allocate space for the per-cpu array */ pmc_pcpu = malloc(maxcpu * sizeof(struct pmc_cpu *), M_PMC, M_WAITOK|M_ZERO); /* per-cpu 'saved values' for managing process-mode PMCs */ pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc, M_PMC, M_WAITOK); /* Perform CPU-dependent initialization. */ pmc_save_cpu_binding(&pb); error = 0; for (cpu = 0; error == 0 && cpu < maxcpu; cpu++) { if (!pmc_cpu_is_active(cpu)) continue; pmc_select_cpu(cpu); pmc_pcpu[cpu] = malloc(sizeof(struct pmc_cpu) + md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC, M_WAITOK|M_ZERO); if (md->pmd_pcpu_init) error = md->pmd_pcpu_init(md, cpu); for (n = 0; error == 0 && n < md->pmd_nclass; n++) error = md->pmd_classdep[n].pcd_pcpu_init(md, cpu); } pmc_restore_cpu_binding(&pb); if (error) return (error); /* allocate space for the sample array */ for (cpu = 0; cpu < maxcpu; cpu++) { if (!pmc_cpu_is_active(cpu)) continue; pc = pcpu_find(cpu); domain = pc->pc_domain; - sb = malloc_domain(sizeof(struct pmc_samplebuffer) + - pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain, - M_WAITOK|M_ZERO); + sb = malloc_domainset(sizeof(struct pmc_samplebuffer) + + pmc_nsamples * sizeof(struct pmc_sample), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); KASSERT(pmc_pcpu[cpu] != NULL, ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu)); - sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples * - sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO); + sb->ps_callchains = malloc_domainset(pmc_callchaindepth * + pmc_nsamples * sizeof(uintptr_t), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) ps->ps_pc = sb->ps_callchains + (n * pmc_callchaindepth); pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb; - sb = malloc_domain(sizeof(struct pmc_samplebuffer) + - pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain, - M_WAITOK|M_ZERO); + sb = malloc_domainset(sizeof(struct pmc_samplebuffer) + + pmc_nsamples * sizeof(struct pmc_sample), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); - KASSERT(pmc_pcpu[cpu] != NULL, - ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu)); - - sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples * - sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO); - + sb->ps_callchains = malloc_domainset(pmc_callchaindepth * + pmc_nsamples * sizeof(uintptr_t), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) ps->ps_pc = sb->ps_callchains + (n * pmc_callchaindepth); pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb; - sb = malloc_domain(sizeof(struct pmc_samplebuffer) + - pmc_nsamples * sizeof(struct pmc_sample), M_PMC, domain, - M_WAITOK|M_ZERO); - - KASSERT(pmc_pcpu[cpu] != NULL, - ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu)); - - sb->ps_callchains = malloc_domain(pmc_callchaindepth * pmc_nsamples * - sizeof(uintptr_t), M_PMC, domain, M_WAITOK|M_ZERO); - + sb = malloc_domainset(sizeof(struct pmc_samplebuffer) + + pmc_nsamples * sizeof(struct pmc_sample), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); + sb->ps_callchains = malloc_domainset(pmc_callchaindepth * + pmc_nsamples * sizeof(uintptr_t), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++) - ps->ps_pc = sb->ps_callchains + - (n * pmc_callchaindepth); + ps->ps_pc = sb->ps_callchains + n * pmc_callchaindepth; pmc_pcpu[cpu]->pc_sb[PMC_UR] = sb; } /* allocate space for the row disposition array */ pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc, M_PMC, M_WAITOK|M_ZERO); /* mark all PMCs as available */ for (n = 0; n < (int) md->pmd_npmc; n++) PMC_MARK_ROW_FREE(n); /* allocate thread hash tables */ pmc_ownerhash = hashinit(pmc_hashsize, M_PMC, &pmc_ownerhashmask); pmc_processhash = hashinit(pmc_hashsize, M_PMC, &pmc_processhashmask); mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf", MTX_SPIN); CK_LIST_INIT(&pmc_ss_owners); pmc_ss_count = 0; /* allocate a pool of spin mutexes */ pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size, MTX_SPIN); PMCDBG4(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx " "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask, pmc_processhash, pmc_processhashmask); /* Initialize a spin mutex for the thread free list. */ mtx_init(&pmc_threadfreelist_mtx, "pmc-threadfreelist", "pmc-leaf", MTX_SPIN); /* * Initialize the callout to monitor the thread free list. * This callout will also handle the initial population of the list. */ taskqgroup_config_gtask_init(NULL, &free_gtask, pmc_thread_descriptor_pool_free_task, "thread descriptor pool free task"); /* register process {exit,fork,exec} handlers */ pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit, pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY); pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork, pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY); /* register kld event handlers */ pmc_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, pmc_kld_load, NULL, EVENTHANDLER_PRI_ANY); pmc_kld_unload_tag = EVENTHANDLER_REGISTER(kld_unload, pmc_kld_unload, NULL, EVENTHANDLER_PRI_ANY); /* initialize logging */ pmclog_initialize(); /* set hook functions */ pmc_intr = md->pmd_intr; wmb(); pmc_hook = pmc_hook_handler; if (error == 0) { printf(PMC_MODULE_NAME ":"); for (n = 0; n < (int) md->pmd_nclass; n++) { pcd = &md->pmd_classdep[n]; printf(" %s/%d/%d/0x%b", pmc_name_of_pmcclass(pcd->pcd_class), pcd->pcd_num, pcd->pcd_width, pcd->pcd_caps, "\20" "\1INT\2USR\3SYS\4EDG\5THR" "\6REA\7WRI\10INV\11QUA\12PRC" "\13TAG\14CSC"); } printf("\n"); } return (error); } /* prepare to be unloaded */ static void pmc_cleanup(void) { int c, cpu; unsigned int maxcpu; struct pmc_ownerhash *ph; struct pmc_owner *po, *tmp; struct pmc_binding pb; #ifdef HWPMC_DEBUG struct pmc_processhash *prh; #endif PMCDBG0(MOD,INI,0, "cleanup"); /* switch off sampling */ CPU_FOREACH(cpu) DPCPU_ID_SET(cpu, pmc_sampled, 0); pmc_intr = NULL; sx_xlock(&pmc_sx); if (pmc_hook == NULL) { /* being unloaded already */ sx_xunlock(&pmc_sx); return; } pmc_hook = NULL; /* prevent new threads from entering module */ /* deregister event handlers */ EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag); EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag); EVENTHANDLER_DEREGISTER(kld_load, pmc_kld_load_tag); EVENTHANDLER_DEREGISTER(kld_unload, pmc_kld_unload_tag); /* send SIGBUS to all owner threads, free up allocations */ if (pmc_ownerhash) for (ph = pmc_ownerhash; ph <= &pmc_ownerhash[pmc_ownerhashmask]; ph++) { LIST_FOREACH_SAFE(po, ph, po_next, tmp) { pmc_remove_owner(po); /* send SIGBUS to owner processes */ PMCDBG3(MOD,INI,2, "cleanup signal proc=%p " "(%d, %s)", po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm); PROC_LOCK(po->po_owner); kern_psignal(po->po_owner, SIGBUS); PROC_UNLOCK(po->po_owner); pmc_destroy_owner_descriptor(po); } } /* reclaim allocated data structures */ mtx_destroy(&pmc_threadfreelist_mtx); pmc_thread_descriptor_pool_drain(); if (pmc_mtxpool) mtx_pool_destroy(&pmc_mtxpool); mtx_destroy(&pmc_processhash_mtx); taskqgroup_config_gtask_deinit(&free_gtask); if (pmc_processhash) { #ifdef HWPMC_DEBUG struct pmc_process *pp; PMCDBG0(MOD,INI,3, "destroy process hash"); for (prh = pmc_processhash; prh <= &pmc_processhash[pmc_processhashmask]; prh++) LIST_FOREACH(pp, prh, pp_next) PMCDBG1(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid); #endif hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask); pmc_processhash = NULL; } if (pmc_ownerhash) { PMCDBG0(MOD,INI,3, "destroy owner hash"); hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask); pmc_ownerhash = NULL; } KASSERT(CK_LIST_EMPTY(&pmc_ss_owners), ("[pmc,%d] Global SS owner list not empty", __LINE__)); KASSERT(pmc_ss_count == 0, ("[pmc,%d] Global SS count not empty", __LINE__)); /* do processor and pmc-class dependent cleanup */ maxcpu = pmc_cpu_max(); PMCDBG0(MOD,INI,3, "md cleanup"); if (md) { pmc_save_cpu_binding(&pb); for (cpu = 0; cpu < maxcpu; cpu++) { PMCDBG2(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p", cpu, pmc_pcpu[cpu]); if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL) continue; pmc_select_cpu(cpu); for (c = 0; c < md->pmd_nclass; c++) md->pmd_classdep[c].pcd_pcpu_fini(md, cpu); if (md->pmd_pcpu_fini) md->pmd_pcpu_fini(md, cpu); } if (md->pmd_cputype == PMC_CPU_GENERIC) pmc_generic_cpu_finalize(md); else pmc_md_finalize(md); pmc_mdep_free(md); md = NULL; pmc_restore_cpu_binding(&pb); } /* Free per-cpu descriptors. */ for (cpu = 0; cpu < maxcpu; cpu++) { if (!pmc_cpu_is_active(cpu)) continue; KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL, ("[pmc,%d] Null hw cpu sample buffer cpu=%d", __LINE__, cpu)); KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL, ("[pmc,%d] Null sw cpu sample buffer cpu=%d", __LINE__, cpu)); KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_UR] != NULL, ("[pmc,%d] Null userret cpu sample buffer cpu=%d", __LINE__, cpu)); free_domain(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC); free_domain(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC); free_domain(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC); free_domain(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC); free_domain(pmc_pcpu[cpu]->pc_sb[PMC_UR]->ps_callchains, M_PMC); free_domain(pmc_pcpu[cpu]->pc_sb[PMC_UR], M_PMC); free_domain(pmc_pcpu[cpu], M_PMC); } free(pmc_pcpu, M_PMC); pmc_pcpu = NULL; free(pmc_pcpu_saved, M_PMC); pmc_pcpu_saved = NULL; if (pmc_pmcdisp) { free(pmc_pmcdisp, M_PMC); pmc_pmcdisp = NULL; } if (pmc_rowindex_to_classdep) { free(pmc_rowindex_to_classdep, M_PMC); pmc_rowindex_to_classdep = NULL; } pmclog_shutdown(); counter_u64_free(pmc_stats.pm_intr_ignored); counter_u64_free(pmc_stats.pm_intr_processed); counter_u64_free(pmc_stats.pm_intr_bufferfull); counter_u64_free(pmc_stats.pm_syscalls); counter_u64_free(pmc_stats.pm_syscall_errors); counter_u64_free(pmc_stats.pm_buffer_requests); counter_u64_free(pmc_stats.pm_buffer_requests_failed); counter_u64_free(pmc_stats.pm_log_sweeps); counter_u64_free(pmc_stats.pm_merges); counter_u64_free(pmc_stats.pm_overwrites); sx_xunlock(&pmc_sx); /* we are done */ } /* * The function called at load/unload. */ static int load (struct module *module __unused, int cmd, void *arg __unused) { int error; error = 0; switch (cmd) { case MOD_LOAD : /* initialize the subsystem */ error = pmc_initialize(); if (error != 0) break; PMCDBG2(MOD,INI,1, "syscall=%d maxcpu=%d", pmc_syscall_num, pmc_cpu_max()); break; case MOD_UNLOAD : case MOD_SHUTDOWN: pmc_cleanup(); PMCDBG0(MOD,INI,1, "unloaded"); break; default : error = EINVAL; /* XXX should panic(9) */ break; } return error; } Index: head/sys/i386/i386/pmap.c =================================================================== --- head/sys/i386/i386/pmap.c (revision 339926) +++ head/sys/i386/i386/pmap.c (revision 339927) @@ -1,6053 +1,6053 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * Copyright (c) 2005-2010 Alan L. Cox * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 */ /*- * Copyright (c) 2003 Networks Associates Technology, Inc. * All rights reserved. * Copyright (c) 2018 The FreeBSD Foundation * All rights reserved. * * This software was developed for the FreeBSD Project by Jake Burkholder, * Safeport Network Services, and Network Associates Laboratories, the * Security Research Division of Network Associates, Inc. under * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA * CHATS research program. * * Portions of this software were developed by * Konstantin Belousov under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Manages physical address maps. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */ #include "opt_apic.h" #include "opt_cpu.h" #include "opt_pmap.h" #include "opt_smp.h" #include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_APIC #include #include #include #endif #include #include #include #include #include #include #include #ifdef SMP #include #endif #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif #if !defined(DIAGNOSTIC) #ifdef __GNUC_GNU_INLINE__ #define PMAP_INLINE __attribute__((__gnu_inline__)) inline #else #define PMAP_INLINE extern inline #endif #else #define PMAP_INLINE #endif #ifdef PV_STATS #define PV_STAT(x) do { x ; } while (0) #else #define PV_STAT(x) do { } while (0) #endif #define pa_index(pa) ((pa) >> PDRSHIFT) #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) /* * Get PDEs and PTEs for user/kernel address space */ #define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT]) #define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0) #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) #define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \ atomic_clear_int((u_int *)(pte), PG_W)) #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) struct pmap kernel_pmap_store; vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ static int pgeflag = 0; /* PG_G or-in */ static int pseflag = 0; /* PG_PS or-in */ static int nkpt = NKPT; vm_offset_t kernel_vm_end = /* 0 + */ NKPT * NBPDR; #if defined(PAE) || defined(PAE_TABLES) pt_entry_t pg_nx; static uma_zone_t pdptzone; #endif static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); static int pat_works = 1; SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1, "Is page attribute table fully functional?"); static int pg_ps_enabled = 1; SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pg_ps_enabled, 0, "Are large page mappings enabled?"); #define PAT_INDEX_SIZE 8 static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */ /* * pmap_mapdev support pre initialization (i.e. console) */ #define PMAP_PREINIT_MAPPING_COUNT 8 static struct pmap_preinit_mapping { vm_paddr_t pa; vm_offset_t va; vm_size_t sz; int mode; } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT]; static int pmap_initialized; static struct rwlock_padalign pvh_global_lock; /* * Data for the pv entry allocation mechanism */ static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; static struct md_page *pv_table; static int shpgperproc = PMAP_SHPGPERPROC; struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ int pv_maxchunks; /* How many chunks we have KVA for */ vm_offset_t pv_vafree; /* freelist stored in the PTE */ /* * All those kernel PT submaps that BSD is so fond of */ pt_entry_t *CMAP3; static pd_entry_t *KPTD; caddr_t ptvmmap = 0; caddr_t CADDR3; /* * Crashdump maps. */ static caddr_t crashdumpmap; static pt_entry_t *PMAP1 = NULL, *PMAP2, *PMAP3; static pt_entry_t *PADDR1 = NULL, *PADDR2, *PADDR3; #ifdef SMP static int PMAP1cpu, PMAP3cpu; static int PMAP1changedcpu; SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, &PMAP1changedcpu, 0, "Number of times pmap_pte_quick changed CPU with same PMAP1"); #endif static int PMAP1changed; SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, &PMAP1changed, 0, "Number of times pmap_pte_quick changed PMAP1"); static int PMAP1unchanged; SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, &PMAP1unchanged, 0, "Number of times pmap_pte_quick didn't change PMAP1"); static struct mtx PMAP2mutex; int pti; /* * Internal flags for pmap_enter()'s helper functions. */ #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ static void free_pv_chunk(struct pv_chunk *pc); static void free_pv_entry(pmap_t pmap, pv_entry_t pv); static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags); #if VM_NRESERVLEVEL > 0 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); #endif static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static int pmap_pvh_wired_mappings(struct md_page *pvh, int count); static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); static bool pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot); static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags, vm_page_t m); static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte); static void pmap_flush_page(vm_page_t m); static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte); static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva); static void pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva); static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde); static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); static boolean_t pmap_is_modified_pvh(struct md_page *pvh); static boolean_t pmap_is_referenced_pvh(struct md_page *pvh); static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde); static void pmap_pde_attr(pd_entry_t *pde, int cache_bits); #if VM_NRESERVLEVEL > 0 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); #endif static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot); static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, struct spglist *free); static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, struct spglist *free); static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); static void pmap_remove_page(struct pmap *pmap, vm_offset_t va, struct spglist *free); static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, struct spglist *free); static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde); static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde); static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags); static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags); static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free); static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); static void pmap_pte_release(pt_entry_t *pte); static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *); #if defined(PAE) || defined(PAE_TABLES) static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, int wait); #endif static void pmap_init_trm(void); static __inline void pagezero(void *page); CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); void pmap_cold(void); extern char _end[]; u_long physfree; /* phys addr of next free page */ u_long vm86phystk; /* PA of vm86/bios stack */ u_long vm86paddr; /* address of vm86 region */ int vm86pa; /* phys addr of vm86 region */ u_long KERNend; /* phys addr end of kernel (just after bss) */ pd_entry_t *IdlePTD; /* phys addr of kernel PTD */ #if defined(PAE) || defined(PAE_TABLES) pdpt_entry_t *IdlePDPT; /* phys addr of kernel PDPT */ #endif pt_entry_t *KPTmap; /* address of kernel page tables */ u_long KPTphys; /* phys addr of kernel page tables */ extern u_long tramp_idleptd; static u_long allocpages(u_int cnt, u_long *physfree) { u_long res; res = *physfree; *physfree += PAGE_SIZE * cnt; bzero((void *)res, PAGE_SIZE * cnt); return (res); } static void pmap_cold_map(u_long pa, u_long va, u_long cnt) { pt_entry_t *pt; for (pt = (pt_entry_t *)KPTphys + atop(va); cnt > 0; cnt--, pt++, va += PAGE_SIZE, pa += PAGE_SIZE) *pt = pa | PG_V | PG_RW | PG_A | PG_M; } static void pmap_cold_mapident(u_long pa, u_long cnt) { pmap_cold_map(pa, pa, cnt); } _Static_assert(2 * NBPDR == KERNBASE, "Broken double-map of zero PTD"); /* * Called from locore.s before paging is enabled. Sets up the first * kernel page table. Since kernel is mapped with PA == VA, this code * does not require relocations. */ void pmap_cold(void) { pt_entry_t *pt; u_long a; u_int cr3, ncr4; physfree = (u_long)&_end; if (bootinfo.bi_esymtab != 0) physfree = bootinfo.bi_esymtab; if (bootinfo.bi_kernend != 0) physfree = bootinfo.bi_kernend; physfree = roundup2(physfree, NBPDR); KERNend = physfree; /* Allocate Kernel Page Tables */ KPTphys = allocpages(NKPT, &physfree); KPTmap = (pt_entry_t *)KPTphys; /* Allocate Page Table Directory */ #if defined(PAE) || defined(PAE_TABLES) /* XXX only need 32 bytes (easier for now) */ IdlePDPT = (pdpt_entry_t *)allocpages(1, &physfree); #endif IdlePTD = (pd_entry_t *)allocpages(NPGPTD, &physfree); /* * Allocate KSTACK. Leave a guard page between IdlePTD and * proc0kstack, to control stack overflow for thread0 and * prevent corruption of the page table. We leak the guard * physical memory due to 1:1 mappings. */ allocpages(1, &physfree); proc0kstack = allocpages(TD0_KSTACK_PAGES, &physfree); /* vm86/bios stack */ vm86phystk = allocpages(1, &physfree); /* pgtable + ext + IOPAGES */ vm86paddr = vm86pa = allocpages(3, &physfree); /* Install page tables into PTD. Page table page 1 is wasted. */ for (a = 0; a < NKPT; a++) IdlePTD[a] = (KPTphys + ptoa(a)) | PG_V | PG_RW | PG_A | PG_M; #if defined(PAE) || defined(PAE_TABLES) /* PAE install PTD pointers into PDPT */ for (a = 0; a < NPGPTD; a++) IdlePDPT[a] = ((u_int)IdlePTD + ptoa(a)) | PG_V; #endif /* * Install recursive mapping for kernel page tables into * itself. */ for (a = 0; a < NPGPTD; a++) IdlePTD[PTDPTDI + a] = ((u_int)IdlePTD + ptoa(a)) | PG_V | PG_RW; /* * Initialize page table pages mapping physical address zero * through the (physical) end of the kernel. Many of these * pages must be reserved, and we reserve them all and map * them linearly for convenience. We do this even if we've * enabled PSE above; we'll just switch the corresponding * kernel PDEs before we turn on paging. * * This and all other page table entries allow read and write * access for various reasons. Kernel mappings never have any * access restrictions. */ pmap_cold_mapident(0, atop(NBPDR)); pmap_cold_map(0, NBPDR, atop(NBPDR)); pmap_cold_mapident(KERNBASE, atop(KERNend - KERNBASE)); /* Map page table directory */ #if defined(PAE) || defined(PAE_TABLES) pmap_cold_mapident((u_long)IdlePDPT, 1); #endif pmap_cold_mapident((u_long)IdlePTD, NPGPTD); /* Map early KPTmap. It is really pmap_cold_mapident. */ pmap_cold_map(KPTphys, (u_long)KPTmap, NKPT); /* Map proc0kstack */ pmap_cold_mapident(proc0kstack, TD0_KSTACK_PAGES); /* ISA hole already mapped */ pmap_cold_mapident(vm86phystk, 1); pmap_cold_mapident(vm86pa, 3); /* Map page 0 into the vm86 page table */ *(pt_entry_t *)vm86pa = 0 | PG_RW | PG_U | PG_A | PG_M | PG_V; /* ...likewise for the ISA hole for vm86 */ for (pt = (pt_entry_t *)vm86pa + atop(ISA_HOLE_START), a = 0; a < atop(ISA_HOLE_LENGTH); a++, pt++) *pt = (ISA_HOLE_START + ptoa(a)) | PG_RW | PG_U | PG_A | PG_M | PG_V; /* Enable PSE, PGE, VME, and PAE if configured. */ ncr4 = 0; if ((cpu_feature & CPUID_PSE) != 0) { ncr4 |= CR4_PSE; pseflag = PG_PS; /* * Superpage mapping of the kernel text. Existing 4k * page table pages are wasted. */ for (a = KERNBASE; a < KERNend; a += NBPDR) IdlePTD[a >> PDRSHIFT] = a | PG_PS | PG_A | PG_M | PG_RW | PG_V; } if ((cpu_feature & CPUID_PGE) != 0) { ncr4 |= CR4_PGE; pgeflag = PG_G; } ncr4 |= (cpu_feature & CPUID_VME) != 0 ? CR4_VME : 0; #if defined(PAE) || defined(PAE_TABLES) ncr4 |= CR4_PAE; #endif if (ncr4 != 0) load_cr4(rcr4() | ncr4); /* Now enable paging */ #if defined(PAE) || defined(PAE_TABLES) cr3 = (u_int)IdlePDPT; #else cr3 = (u_int)IdlePTD; #endif tramp_idleptd = cr3; load_cr3(cr3); load_cr0(rcr0() | CR0_PG); /* * Now running relocated at KERNBASE where the system is * linked to run. */ /* * Remove the lowest part of the double mapping of low memory * to get some null pointer checks. */ IdlePTD[0] = 0; load_cr3(cr3); /* invalidate TLB */ } /* * Bootstrap the system enough to run with virtual memory. * * On the i386 this is called after mapping has already been enabled * in locore.s with the page table created in pmap_cold(), * and just syncs the pmap module with what has already been done. */ void pmap_bootstrap(vm_paddr_t firstaddr) { vm_offset_t va; pt_entry_t *pte, *unused; struct pcpu *pc; u_long res; int i; res = atop(firstaddr - (vm_paddr_t)KERNLOAD); /* * Add a physical memory segment (vm_phys_seg) corresponding to the * preallocated kernel page table pages so that vm_page structures * representing these pages will be created. The vm_page structures * are required for promotion of the corresponding kernel virtual * addresses to superpage mappings. */ vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt)); /* * Initialize the first available kernel virtual address. However, * using "firstaddr" may waste a few pages of the kernel virtual * address space, because locore may not have mapped every physical * page that it allocated. Preferably, locore would provide a first * unused virtual address in addition to "firstaddr". */ virtual_avail = (vm_offset_t)firstaddr; virtual_end = VM_MAX_KERNEL_ADDRESS; /* * Initialize the kernel pmap (which is statically allocated). * Count bootstrap data as being resident in case any of this data is * later unmapped (using pmap_remove()) and freed. */ PMAP_LOCK_INIT(kernel_pmap); kernel_pmap->pm_pdir = IdlePTD; #if defined(PAE) || defined(PAE_TABLES) kernel_pmap->pm_pdpt = IdlePDPT; #endif CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ kernel_pmap->pm_stats.resident_count = res; TAILQ_INIT(&kernel_pmap->pm_pvchunk); /* * Initialize the global pv list lock. */ rw_init(&pvh_global_lock, "pmap pv global"); /* * Reserve some special page table entries/VA space for temporary * mapping of pages. */ #define SYSMAP(c, p, v, n) \ v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); va = virtual_avail; pte = vtopte(va); /* * Initialize temporary map objects on the current CPU for use * during early boot. * CMAP1/CMAP2 are used for zeroing and copying pages. * CMAP3 is used for the boot-time memory test. */ pc = get_pcpu(); mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); SYSMAP(caddr_t, pc->pc_cmap_pte1, pc->pc_cmap_addr1, 1) SYSMAP(caddr_t, pc->pc_cmap_pte2, pc->pc_cmap_addr2, 1) SYSMAP(vm_offset_t, pte, pc->pc_qmap_addr, 1) SYSMAP(caddr_t, CMAP3, CADDR3, 1); /* * Crashdump maps. */ SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS) /* * ptvmmap is used for reading arbitrary physical pages via /dev/mem. */ SYSMAP(caddr_t, unused, ptvmmap, 1) /* * msgbufp is used to map the system message buffer. */ SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize))) /* * KPTmap is used by pmap_kextract(). * * KPTmap is first initialized by locore. However, that initial * KPTmap can only support NKPT page table pages. Here, a larger * KPTmap is created that can support KVA_PAGES page table pages. */ SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES) for (i = 0; i < NKPT; i++) KPTD[i] = (KPTphys + ptoa(i)) | PG_RW | PG_V; /* * PADDR1 and PADDR2 are used by pmap_pte_quick() and pmap_pte(), * respectively. */ SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1) SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1) SYSMAP(pt_entry_t *, PMAP3, PADDR3, 1) mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); virtual_avail = va; /* * Initialize the PAT MSR if present. * pmap_init_pat() clears and sets CR4_PGE, which, as a * side-effect, invalidates stale PG_G TLB entries that might * have been created in our pre-boot environment. We assume * that PAT support implies PGE and in reverse, PGE presence * comes with PAT. Both features were added for Pentium Pro. */ pmap_init_pat(); } static void pmap_init_reserved_pages(void) { struct pcpu *pc; vm_offset_t pages; int i; CPU_FOREACH(i) { pc = pcpu_find(i); mtx_init(&pc->pc_copyout_mlock, "cpmlk", NULL, MTX_DEF | MTX_NEW); pc->pc_copyout_maddr = kva_alloc(ptoa(2)); if (pc->pc_copyout_maddr == 0) panic("unable to allocate non-sleepable copyout KVA"); sx_init(&pc->pc_copyout_slock, "cpslk"); pc->pc_copyout_saddr = kva_alloc(ptoa(2)); if (pc->pc_copyout_saddr == 0) panic("unable to allocate sleepable copyout KVA"); pc->pc_pmap_eh_va = kva_alloc(ptoa(1)); if (pc->pc_pmap_eh_va == 0) panic("unable to allocate pmap_extract_and_hold KVA"); pc->pc_pmap_eh_ptep = (char *)vtopte(pc->pc_pmap_eh_va); /* * Skip if the mappings have already been initialized, * i.e. this is the BSP. */ if (pc->pc_cmap_addr1 != 0) continue; mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); pages = kva_alloc(PAGE_SIZE * 3); if (pages == 0) panic("unable to allocate CMAP KVA"); pc->pc_cmap_pte1 = vtopte(pages); pc->pc_cmap_pte2 = vtopte(pages + PAGE_SIZE); pc->pc_cmap_addr1 = (caddr_t)pages; pc->pc_cmap_addr2 = (caddr_t)(pages + PAGE_SIZE); pc->pc_qmap_addr = pages + atop(2); } } SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); /* * Setup the PAT MSR. */ void pmap_init_pat(void) { int pat_table[PAT_INDEX_SIZE]; uint64_t pat_msr; u_long cr0, cr4; int i; /* Set default PAT index table. */ for (i = 0; i < PAT_INDEX_SIZE; i++) pat_table[i] = -1; pat_table[PAT_WRITE_BACK] = 0; pat_table[PAT_WRITE_THROUGH] = 1; pat_table[PAT_UNCACHEABLE] = 3; pat_table[PAT_WRITE_COMBINING] = 3; pat_table[PAT_WRITE_PROTECTED] = 3; pat_table[PAT_UNCACHED] = 3; /* * Bail if this CPU doesn't implement PAT. * We assume that PAT support implies PGE. */ if ((cpu_feature & CPUID_PAT) == 0) { for (i = 0; i < PAT_INDEX_SIZE; i++) pat_index[i] = pat_table[i]; pat_works = 0; return; } /* * Due to some Intel errata, we can only safely use the lower 4 * PAT entries. * * Intel Pentium III Processor Specification Update * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B * or Mode C Paging) * * Intel Pentium IV Processor Specification Update * Errata N46 (PAT Index MSB May Be Calculated Incorrectly) */ if (cpu_vendor_id == CPU_VENDOR_INTEL && !(CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) pat_works = 0; /* Initialize default PAT entries. */ pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) | PAT_VALUE(1, PAT_WRITE_THROUGH) | PAT_VALUE(2, PAT_UNCACHED) | PAT_VALUE(3, PAT_UNCACHEABLE) | PAT_VALUE(4, PAT_WRITE_BACK) | PAT_VALUE(5, PAT_WRITE_THROUGH) | PAT_VALUE(6, PAT_UNCACHED) | PAT_VALUE(7, PAT_UNCACHEABLE); if (pat_works) { /* * Leave the indices 0-3 at the default of WB, WT, UC-, and UC. * Program 5 and 6 as WP and WC. * Leave 4 and 7 as WB and UC. */ pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6)); pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) | PAT_VALUE(6, PAT_WRITE_COMBINING); pat_table[PAT_UNCACHED] = 2; pat_table[PAT_WRITE_PROTECTED] = 5; pat_table[PAT_WRITE_COMBINING] = 6; } else { /* * Just replace PAT Index 2 with WC instead of UC-. */ pat_msr &= ~PAT_MASK(2); pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING); pat_table[PAT_WRITE_COMBINING] = 2; } /* Disable PGE. */ cr4 = rcr4(); load_cr4(cr4 & ~CR4_PGE); /* Disable caches (CD = 1, NW = 0). */ cr0 = rcr0(); load_cr0((cr0 & ~CR0_NW) | CR0_CD); /* Flushes caches and TLBs. */ wbinvd(); invltlb(); /* Update PAT and index table. */ wrmsr(MSR_PAT, pat_msr); for (i = 0; i < PAT_INDEX_SIZE; i++) pat_index[i] = pat_table[i]; /* Flush caches and TLBs again. */ wbinvd(); invltlb(); /* Restore caches and PGE. */ load_cr0(cr0); load_cr4(cr4); } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); m->md.pat_mode = PAT_WRITE_BACK; } #if defined(PAE) || defined(PAE_TABLES) static void * pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, int wait) { /* Inform UMA that this allocator uses kernel_map/object. */ *flags = UMA_SLAB_KERNEL; - return ((void *)kmem_alloc_contig_domain(domain, bytes, wait, 0x0ULL, - 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); + return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain), + bytes, wait, 0x0ULL, 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT)); } #endif /* * Abuse the pte nodes for unmapped kva to thread a kva freelist through. * Requirements: * - Must deal with pages in order to ensure that none of the PG_* bits * are ever set, PG_V in particular. * - Assumes we can write to ptes without pte_store() atomic ops, even * on PAE systems. This should be ok. * - Assumes nothing will ever test these addresses for 0 to indicate * no mapping instead of correctly checking PG_V. * - Assumes a vm_offset_t will fit in a pte (true for i386). * Because PG_V is never set, there can be no mappings to invalidate. */ static vm_offset_t pmap_ptelist_alloc(vm_offset_t *head) { pt_entry_t *pte; vm_offset_t va; va = *head; if (va == 0) panic("pmap_ptelist_alloc: exhausted ptelist KVA"); pte = vtopte(va); *head = *pte; if (*head & PG_V) panic("pmap_ptelist_alloc: va with PG_V set!"); *pte = 0; return (va); } static void pmap_ptelist_free(vm_offset_t *head, vm_offset_t va) { pt_entry_t *pte; if (va & PG_V) panic("pmap_ptelist_free: freeing va with PG_V set!"); pte = vtopte(va); *pte = *head; /* virtual! PG_V is 0 though */ *head = va; } static void pmap_ptelist_init(vm_offset_t *head, void *base, int npages) { int i; vm_offset_t va; *head = 0; for (i = npages - 1; i >= 0; i--) { va = (vm_offset_t)base + i * PAGE_SIZE; pmap_ptelist_free(head, va); } } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ void pmap_init(void) { struct pmap_preinit_mapping *ppim; vm_page_t mpte; vm_size_t s; int i, pv_npg; /* * Initialize the vm page array entries for the kernel pmap's * page table pages. */ PMAP_LOCK(kernel_pmap); for (i = 0; i < NKPT; i++) { mpte = PHYS_TO_VM_PAGE(KPTphys + ptoa(i)); KASSERT(mpte >= vm_page_array && mpte < &vm_page_array[vm_page_array_size], ("pmap_init: page table page is out of range")); mpte->pindex = i + KPTDI; mpte->phys_addr = KPTphys + ptoa(i); mpte->wire_count = 1; if (pseflag != 0 && KERNBASE <= i << PDRSHIFT && i << PDRSHIFT < KERNend && pmap_insert_pt_page(kernel_pmap, mpte)) panic("pmap_init: pmap_insert_pt_page failed"); } PMAP_UNLOCK(kernel_pmap); vm_wire_add(NKPT); /* * Initialize the address space (zone) for the pv entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); pv_entry_max = roundup(pv_entry_max, _NPCPV); pv_entry_high_water = 9 * (pv_entry_max / 10); /* * If the kernel is running on a virtual machine, then it must assume * that MCA is enabled by the hypervisor. Moreover, the kernel must * be prepared for the hypervisor changing the vendor and family that * are reported by CPUID. Consequently, the workaround for AMD Family * 10h Erratum 383 is enabled if the processor's feature set does not * include at least one feature that is only supported by older Intel * or newer AMD processors. */ if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 && (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI | CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP | AMDID2_FMA4)) == 0) workaround_erratum383 = 1; /* * Are large page mappings supported and enabled? */ TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled); if (pseflag == 0) pg_ps_enabled = 0; else if (pg_ps_enabled) { KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, ("pmap_init: can't assign to pagesizes[1]")); pagesizes[1] = NBPDR; } /* * Calculate the size of the pv head table for superpages. * Handle the possibility that "vm_phys_segs[...].end" is zero. */ pv_npg = trunc_4mpage(vm_phys_segs[vm_phys_nsegs - 1].end - PAGE_SIZE) / NBPDR + 1; /* * Allocate memory for the pv head table for superpages. */ s = (vm_size_t)(pv_npg * sizeof(struct md_page)); s = round_page(s); pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); for (i = 0; i < pv_npg; i++) TAILQ_INIT(&pv_table[i].pv_list); pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); if (pv_chunkbase == NULL) panic("pmap_init: not enough kvm for pv chunks"); pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks); #if defined(PAE) || defined(PAE_TABLES) pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1, UMA_ZONE_VM | UMA_ZONE_NOFREE); uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf); #endif pmap_initialized = 1; pmap_init_trm(); if (!bootverbose) return; for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; if (ppim->va == 0) continue; printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i, (uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode); } } SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, "Max number of PV entries"); SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, "Page share factor per proc"); static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0, "2/4MB page mapping counters"); static u_long pmap_pde_demotions; SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD, &pmap_pde_demotions, 0, "2/4MB page demotions"); static u_long pmap_pde_mappings; SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD, &pmap_pde_mappings, 0, "2/4MB page mappings"); static u_long pmap_pde_p_failures; SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD, &pmap_pde_p_failures, 0, "2/4MB page promotion failures"); static u_long pmap_pde_promotions; SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD, &pmap_pde_promotions, 0, "2/4MB page promotions"); /*************************************************** * Low level helper routines..... ***************************************************/ boolean_t pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) { return (mode >= 0 && mode < PAT_INDEX_SIZE && pat_index[(int)mode] >= 0); } /* * Determine the appropriate bits to set in a PTE or PDE for a specified * caching mode. */ int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde) { int cache_bits, pat_flag, pat_idx; if (!pmap_is_valid_memattr(pmap, mode)) panic("Unknown caching mode %d\n", mode); /* The PAT bit is different for PTE's and PDE's. */ pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; /* Map the caching mode to a PAT index. */ pat_idx = pat_index[mode]; /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ cache_bits = 0; if (pat_idx & 0x4) cache_bits |= pat_flag; if (pat_idx & 0x2) cache_bits |= PG_NC_PCD; if (pat_idx & 0x1) cache_bits |= PG_NC_PWT; return (cache_bits); } bool pmap_ps_enabled(pmap_t pmap __unused) { return (pg_ps_enabled); } /* * The caller is responsible for maintaining TLB consistency. */ static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde) { pd_entry_t *pde; pde = pmap_pde(kernel_pmap, va); pde_store(pde, newpde); } /* * After changing the page size for the specified virtual address in the page * table, flush the corresponding entries from the processor's TLB. Only the * calling processor's TLB is affected. * * The calling thread must be pinned to a processor. */ static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde) { if ((newpde & PG_PS) == 0) /* Demotion: flush a specific 2MB page mapping. */ invlpg(va); else /* if ((newpde & PG_G) == 0) */ /* * Promotion: flush every 4KB page mapping from the TLB * because there are too many to flush individually. */ invltlb(); } void invltlb_glob(void) { invltlb(); } #ifdef SMP /* * For SMP, these functions have to use the IPI mechanism for coherence. * * N.B.: Before calling any of the following TLB invalidation functions, * the calling processor must ensure that all stores updating a non- * kernel page table are globally performed. Otherwise, another * processor could cache an old, pre-update entry without being * invalidated. This can happen one of two ways: (1) The pmap becomes * active on another processor after its pm_active field is checked by * one of the following functions but before a store updating the page * table is globally performed. (2) The pmap becomes active on another * processor before its pm_active field is checked but due to * speculative loads one of the following functions stills reads the * pmap as inactive on the other processor. * * The kernel page table is exempt because its pm_active field is * immutable. The kernel page table is always active on every * processor. */ void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { cpuset_t *mask, other_cpus; u_int cpuid; sched_pin(); if (pmap == kernel_pmap) { invlpg(va); mask = &all_cpus; } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { mask = &all_cpus; } else { cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); CPU_AND(&other_cpus, &pmap->pm_active); mask = &other_cpus; } smp_masked_invlpg(*mask, va, pmap); sched_unpin(); } /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */ #define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE) void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { cpuset_t *mask, other_cpus; vm_offset_t addr; u_int cpuid; if (eva - sva >= PMAP_INVLPG_THRESHOLD) { pmap_invalidate_all(pmap); return; } sched_pin(); if (pmap == kernel_pmap) { for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); mask = &all_cpus; } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { mask = &all_cpus; } else { cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); CPU_AND(&other_cpus, &pmap->pm_active); mask = &other_cpus; } smp_masked_invlpg_range(*mask, sva, eva, pmap); sched_unpin(); } void pmap_invalidate_all(pmap_t pmap) { cpuset_t *mask, other_cpus; u_int cpuid; sched_pin(); if (pmap == kernel_pmap) { invltlb(); mask = &all_cpus; } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) { mask = &all_cpus; } else { cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); CPU_AND(&other_cpus, &pmap->pm_active); mask = &other_cpus; } smp_masked_invltlb(*mask, pmap); sched_unpin(); } void pmap_invalidate_cache(void) { sched_pin(); wbinvd(); smp_cache_flush(); sched_unpin(); } struct pde_action { cpuset_t invalidate; /* processors that invalidate their TLB */ vm_offset_t va; pd_entry_t *pde; pd_entry_t newpde; u_int store; /* processor that updates the PDE */ }; static void pmap_update_pde_kernel(void *arg) { struct pde_action *act = arg; pd_entry_t *pde; if (act->store == PCPU_GET(cpuid)) { pde = pmap_pde(kernel_pmap, act->va); pde_store(pde, act->newpde); } } static void pmap_update_pde_user(void *arg) { struct pde_action *act = arg; if (act->store == PCPU_GET(cpuid)) pde_store(act->pde, act->newpde); } static void pmap_update_pde_teardown(void *arg) { struct pde_action *act = arg; if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate)) pmap_update_pde_invalidate(act->va, act->newpde); } /* * Change the page size for the specified virtual address in a way that * prevents any possibility of the TLB ever having two entries that map the * same virtual address using different page sizes. This is the recommended * workaround for Erratum 383 on AMD Family 10h processors. It prevents a * machine check exception for a TLB state that is improperly diagnosed as a * hardware error. */ static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) { struct pde_action act; cpuset_t active, other_cpus; u_int cpuid; sched_pin(); cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); if (pmap == kernel_pmap) active = all_cpus; else active = pmap->pm_active; if (CPU_OVERLAP(&active, &other_cpus)) { act.store = cpuid; act.invalidate = active; act.va = va; act.pde = pde; act.newpde = newpde; CPU_SET(cpuid, &active); smp_rendezvous_cpus(active, smp_no_rendezvous_barrier, pmap == kernel_pmap ? pmap_update_pde_kernel : pmap_update_pde_user, pmap_update_pde_teardown, &act); } else { if (pmap == kernel_pmap) pmap_kenter_pde(va, newpde); else pde_store(pde, newpde); if (CPU_ISSET(cpuid, &active)) pmap_update_pde_invalidate(va, newpde); } sched_unpin(); } #else /* !SMP */ /* * Normal, non-SMP, 486+ invalidation functions. * We inline these within pmap.c for speed. */ PMAP_INLINE void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { if (pmap == kernel_pmap) invlpg(va); } PMAP_INLINE void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t addr; if (pmap == kernel_pmap) for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); } PMAP_INLINE void pmap_invalidate_all(pmap_t pmap) { if (pmap == kernel_pmap) invltlb(); } PMAP_INLINE void pmap_invalidate_cache(void) { wbinvd(); } static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) { if (pmap == kernel_pmap) pmap_kenter_pde(va, newpde); else pde_store(pde, newpde); if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) pmap_update_pde_invalidate(va, newpde); } #endif /* !SMP */ static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde) { /* * When the PDE has PG_PROMOTED set, the 2- or 4MB page mapping was * created by a promotion that did not invalidate the 512 or 1024 4KB * page mappings that might exist in the TLB. Consequently, at this * point, the TLB may hold both 4KB and 2- or 4MB page mappings for * the address range [va, va + NBPDR). Therefore, the entire range * must be invalidated here. In contrast, when PG_PROMOTED is clear, * the TLB will not hold any 4KB page mappings for the address range * [va, va + NBPDR), and so a single INVLPG suffices to invalidate the * 2- or 4MB page mapping from the TLB. */ if ((pde & PG_PROMOTED) != 0) pmap_invalidate_range(pmap, va, va + NBPDR - 1); else pmap_invalidate_page(pmap, va); } DEFINE_IFUNC(, void, pmap_invalidate_cache_range, (vm_offset_t, vm_offset_t), static) { if ((cpu_feature & CPUID_SS) != 0) return (pmap_invalidate_cache_range_selfsnoop); if ((cpu_feature & CPUID_CLFSH) != 0) return (pmap_force_invalidate_cache_range); return (pmap_invalidate_cache_range_all); } #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024) static void pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva) { KASSERT((sva & PAGE_MASK) == 0, ("pmap_invalidate_cache_range: sva not page-aligned")); KASSERT((eva & PAGE_MASK) == 0, ("pmap_invalidate_cache_range: eva not page-aligned")); } static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva) { pmap_invalidate_cache_range_check_align(sva, eva); } void pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) { sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1); if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) { /* * The supplied range is bigger than 2MB. * Globally invalidate cache. */ pmap_invalidate_cache(); return; } #ifdef DEV_APIC /* * XXX: Some CPUs fault, hang, or trash the local APIC * registers if we use CLFLUSH on the local APIC * range. The local APIC is always uncached, so we * don't need to flush for that range anyway. */ if (pmap_kextract(sva) == lapic_paddr) return; #endif if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) { /* * Do per-cache line flush. Use the sfence * instruction to insure that previous stores are * included in the write-back. The processor * propagates flush to other processors in the cache * coherence domain. */ sfence(); for (; sva < eva; sva += cpu_clflush_line_size) clflushopt(sva); sfence(); } else { /* * Writes are ordered by CLFLUSH on Intel CPUs. */ if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); for (; sva < eva; sva += cpu_clflush_line_size) clflush(sva); if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); } } static void pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva) { pmap_invalidate_cache_range_check_align(sva, eva); pmap_invalidate_cache(); } void pmap_invalidate_cache_pages(vm_page_t *pages, int count) { int i; if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE || (cpu_feature & CPUID_CLFSH) == 0) { pmap_invalidate_cache(); } else { for (i = 0; i < count; i++) pmap_flush_page(pages[i]); } } /* * Are we current address space or kernel? */ static __inline int pmap_is_current(pmap_t pmap) { return (pmap == kernel_pmap); } /* * If the given pmap is not the current or kernel pmap, the returned pte must * be released by passing it to pmap_pte_release(). */ pt_entry_t * pmap_pte(pmap_t pmap, vm_offset_t va) { pd_entry_t newpf; pd_entry_t *pde; pde = pmap_pde(pmap, va); if (*pde & PG_PS) return (pde); if (*pde != 0) { /* are we current address space or kernel? */ if (pmap_is_current(pmap)) return (vtopte(va)); mtx_lock(&PMAP2mutex); newpf = *pde & PG_FRAME; if ((*PMAP2 & PG_FRAME) != newpf) { *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M; pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2); } return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); } return (NULL); } /* * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte * being NULL. */ static __inline void pmap_pte_release(pt_entry_t *pte) { if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) mtx_unlock(&PMAP2mutex); } /* * NB: The sequence of updating a page table followed by accesses to the * corresponding pages is subject to the situation described in the "AMD64 * Architecture Programmer's Manual Volume 2: System Programming" rev. 3.23, * "7.3.1 Special Coherency Considerations". Therefore, issuing the INVLPG * right after modifying the PTE bits is crucial. */ static __inline void invlcaddr(void *caddr) { invlpg((u_int)caddr); } /* * Super fast pmap_pte routine best used when scanning * the pv lists. This eliminates many coarse-grained * invltlb calls. Note that many of the pv list * scans are across different pmaps. It is very wasteful * to do an entire invltlb for checking a single mapping. * * If the given pmap is not the current pmap, pvh_global_lock * must be held and curthread pinned to a CPU. */ static pt_entry_t * pmap_pte_quick(pmap_t pmap, vm_offset_t va) { pd_entry_t newpf; pd_entry_t *pde; pde = pmap_pde(pmap, va); if (*pde & PG_PS) return (pde); if (*pde != 0) { /* are we current address space or kernel? */ if (pmap_is_current(pmap)) return (vtopte(va)); rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); newpf = *pde & PG_FRAME; if ((*PMAP1 & PG_FRAME) != newpf) { *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M; #ifdef SMP PMAP1cpu = PCPU_GET(cpuid); #endif invlcaddr(PADDR1); PMAP1changed++; } else #ifdef SMP if (PMAP1cpu != PCPU_GET(cpuid)) { PMAP1cpu = PCPU_GET(cpuid); invlcaddr(PADDR1); PMAP1changedcpu++; } else #endif PMAP1unchanged++; return (PADDR1 + (i386_btop(va) & (NPTEPG - 1))); } return (0); } static pt_entry_t * pmap_pte_quick3(pmap_t pmap, vm_offset_t va) { pd_entry_t newpf; pd_entry_t *pde; pde = pmap_pde(pmap, va); if (*pde & PG_PS) return (pde); if (*pde != 0) { rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); newpf = *pde & PG_FRAME; if ((*PMAP3 & PG_FRAME) != newpf) { *PMAP3 = newpf | PG_RW | PG_V | PG_A | PG_M; #ifdef SMP PMAP3cpu = PCPU_GET(cpuid); #endif invlcaddr(PADDR3); PMAP1changed++; } else #ifdef SMP if (PMAP3cpu != PCPU_GET(cpuid)) { PMAP3cpu = PCPU_GET(cpuid); invlcaddr(PADDR3); PMAP1changedcpu++; } else #endif PMAP1unchanged++; return (PADDR3 + (i386_btop(va) & (NPTEPG - 1))); } return (0); } static pt_entry_t pmap_pte_ufast(pmap_t pmap, vm_offset_t va, pd_entry_t pde) { pt_entry_t *eh_ptep, pte, *ptep; PMAP_LOCK_ASSERT(pmap, MA_OWNED); pde &= PG_FRAME; critical_enter(); eh_ptep = (pt_entry_t *)PCPU_GET(pmap_eh_ptep); if ((*eh_ptep & PG_FRAME) != pde) { *eh_ptep = pde | PG_RW | PG_V | PG_A | PG_M; invlcaddr((void *)PCPU_GET(pmap_eh_va)); } ptep = (pt_entry_t *)PCPU_GET(pmap_eh_va) + (i386_btop(va) & (NPTEPG - 1)); pte = *ptep; critical_exit(); return (pte); } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va) { vm_paddr_t rtval; pt_entry_t pte; pd_entry_t pde; rtval = 0; PMAP_LOCK(pmap); pde = pmap->pm_pdir[va >> PDRSHIFT]; if (pde != 0) { if ((pde & PG_PS) != 0) rtval = (pde & PG_PS_FRAME) | (va & PDRMASK); else { pte = pmap_pte_ufast(pmap, va, pde); rtval = (pte & PG_FRAME) | (va & PAGE_MASK); } } PMAP_UNLOCK(pmap); return (rtval); } /* * Routine: pmap_extract_and_hold * Function: * Atomically extract and hold the physical page * with the given pmap and virtual address pair * if that mapping permits the given protection. */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { pd_entry_t pde; pt_entry_t pte; vm_page_t m; vm_paddr_t pa; pa = 0; m = NULL; PMAP_LOCK(pmap); retry: pde = *pmap_pde(pmap, va); if (pde != 0) { if (pde & PG_PS) { if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { if (vm_page_pa_tryrelock(pmap, (pde & PG_PS_FRAME) | (va & PDRMASK), &pa)) goto retry; m = PHYS_TO_VM_PAGE(pa); } } else { pte = pmap_pte_ufast(pmap, va, pde); if (pte != 0 && ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME, &pa)) goto retry; m = PHYS_TO_VM_PAGE(pa); } } if (m != NULL) vm_page_hold(m); } PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } /*************************************************** * Low level mapping routines..... ***************************************************/ /* * Add a wired page to the kva. * Note: not SMP coherent. * * This function may be used before pmap_bootstrap() is called. */ PMAP_INLINE void pmap_kenter(vm_offset_t va, vm_paddr_t pa) { pt_entry_t *pte; pte = vtopte(va); pte_store(pte, pa | PG_RW | PG_V); } static __inline void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) { pt_entry_t *pte; pte = vtopte(va); pte_store(pte, pa | PG_RW | PG_V | pmap_cache_bits(kernel_pmap, mode, 0)); } /* * Remove a page from the kernel pagetables. * Note: not SMP coherent. * * This function may be used before pmap_bootstrap() is called. */ PMAP_INLINE void pmap_kremove(vm_offset_t va) { pt_entry_t *pte; pte = vtopte(va); pte_clear(pte); } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { vm_offset_t va, sva; vm_paddr_t superpage_offset; pd_entry_t newpde; va = *virt; /* * Does the physical address range's size and alignment permit at * least one superpage mapping to be created? */ superpage_offset = start & PDRMASK; if ((end - start) - ((NBPDR - superpage_offset) & PDRMASK) >= NBPDR) { /* * Increase the starting virtual address so that its alignment * does not preclude the use of superpage mappings. */ if ((va & PDRMASK) < superpage_offset) va = (va & ~PDRMASK) + superpage_offset; else if ((va & PDRMASK) > superpage_offset) va = ((va + PDRMASK) & ~PDRMASK) + superpage_offset; } sva = va; while (start < end) { if ((start & PDRMASK) == 0 && end - start >= NBPDR && pseflag != 0) { KASSERT((va & PDRMASK) == 0, ("pmap_map: misaligned va %#x", va)); newpde = start | PG_PS | PG_RW | PG_V; pmap_kenter_pde(va, newpde); va += NBPDR; start += NBPDR; } else { pmap_kenter(va, start); va += PAGE_SIZE; start += PAGE_SIZE; } } pmap_invalidate_range(kernel_pmap, sva, va); *virt = va; return (sva); } /* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. * Note that old mappings are simply written * over. The page *must* be wired. * Note: SMP coherent. Uses a ranged shootdown IPI. */ void pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) { pt_entry_t *endpte, oldpte, pa, *pte; vm_page_t m; oldpte = 0; pte = vtopte(sva); endpte = pte + count; while (pte < endpte) { m = *ma++; pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) { oldpte |= *pte; #if defined(PAE) || defined(PAE_TABLES) pte_store(pte, pa | pg_nx | PG_RW | PG_V); #else pte_store(pte, pa | PG_RW | PG_V); #endif } pte++; } if (__predict_false((oldpte & PG_V) != 0)) pmap_invalidate_range(kernel_pmap, sva, sva + count * PAGE_SIZE); } /* * This routine tears out page mappings from the * kernel -- it is meant only for temporary mappings. * Note: SMP coherent. Uses a ranged shootdown IPI. */ void pmap_qremove(vm_offset_t sva, int count) { vm_offset_t va; va = sva; while (count-- > 0) { pmap_kremove(va); va += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); } /*************************************************** * Page table page management routines..... ***************************************************/ /* * Schedule the specified unused page table page to be freed. Specifically, * add the page to the specified list of pages that will be released to the * physical memory manager after the TLB has been updated. */ static __inline void pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, boolean_t set_PG_ZERO) { if (set_PG_ZERO) m->flags |= PG_ZERO; else m->flags &= ~PG_ZERO; SLIST_INSERT_HEAD(free, m, plinks.s.ss); } /* * Inserts the specified page table page into the specified pmap's collection * of idle page table pages. Each of a pmap's page table pages is responsible * for mapping a distinct range of virtual addresses. The pmap's collection is * ordered by this virtual address range. */ static __inline int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); return (vm_radix_insert(&pmap->pm_root, mpte)); } /* * Removes the page table page mapping the specified virtual address from the * specified pmap's collection of idle page table pages, and returns it. * Otherwise, returns NULL if there is no page table page corresponding to the * specified virtual address. */ static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); return (vm_radix_remove(&pmap->pm_root, va >> PDRSHIFT)); } /* * Decrements a page table page's wire count, which is used to record the * number of valid page table entries within the page. If the wire count * drops to zero, then the page table page is unmapped. Returns TRUE if the * page table page was unmapped and FALSE otherwise. */ static inline boolean_t pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) { --m->wire_count; if (m->wire_count == 0) { _pmap_unwire_ptp(pmap, m, free); return (TRUE); } else return (FALSE); } static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) { /* * unmap the page table page */ pmap->pm_pdir[m->pindex] = 0; --pmap->pm_stats.resident_count; /* * There is not need to invalidate the recursive mapping since * we never instantiate such mapping for the usermode pmaps, * and never remove page table pages from the kernel pmap. * Put page on a list so that it is released since all TLB * shootdown is done. */ MPASS(pmap != kernel_pmap); pmap_add_delayed_free_list(m, free, TRUE); } /* * After removing a page table entry, this routine is used to * conditionally free the page, and manage the hold/wire counts. */ static int pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free) { pd_entry_t ptepde; vm_page_t mpte; if (pmap == kernel_pmap) return (0); ptepde = *pmap_pde(pmap, va); mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); return (pmap_unwire_ptp(pmap, mpte, free)); } /* * Initialize the pmap for the swapper process. */ void pmap_pinit0(pmap_t pmap) { PMAP_LOCK_INIT(pmap); pmap->pm_pdir = IdlePTD; #if defined(PAE) || defined(PAE_TABLES) pmap->pm_pdpt = IdlePDPT; #endif pmap->pm_root.rt_root = 0; CPU_ZERO(&pmap->pm_active); TAILQ_INIT(&pmap->pm_pvchunk); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); pmap_activate_boot(pmap); } /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ int pmap_pinit(pmap_t pmap) { vm_page_t m; int i; /* * No need to allocate page table space yet but we do need a valid * page directory table. */ if (pmap->pm_pdir == NULL) { pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD); if (pmap->pm_pdir == NULL) return (0); #if defined(PAE) || defined(PAE_TABLES) pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO); KASSERT(((vm_offset_t)pmap->pm_pdpt & ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0, ("pmap_pinit: pdpt misaligned")); KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), ("pmap_pinit: pdpt above 4g")); #endif pmap->pm_root.rt_root = 0; } KASSERT(vm_radix_is_empty(&pmap->pm_root), ("pmap_pinit: pmap has reserved page table page(s)")); /* * allocate the page directory page(s) */ for (i = 0; i < NPGPTD;) { m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (m == NULL) { vm_wait(NULL); } else { pmap->pm_ptdpg[i] = m; #if defined(PAE) || defined(PAE_TABLES) pmap->pm_pdpt[i] = VM_PAGE_TO_PHYS(m) | PG_V; #endif i++; } } pmap_qenter((vm_offset_t)pmap->pm_pdir, pmap->pm_ptdpg, NPGPTD); for (i = 0; i < NPGPTD; i++) if ((pmap->pm_ptdpg[i]->flags & PG_ZERO) == 0) pagezero(pmap->pm_pdir + (i * NPDEPG)); /* Install the trampoline mapping. */ pmap->pm_pdir[TRPTDI] = PTD[TRPTDI]; CPU_ZERO(&pmap->pm_active); TAILQ_INIT(&pmap->pm_pvchunk); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); return (1); } /* * this routine is called if the page table page is not * mapped correctly. */ static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags) { vm_paddr_t ptepa; vm_page_t m; /* * Allocate a page table page. */ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if ((flags & PMAP_ENTER_NOSLEEP) == 0) { PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); vm_wait(NULL); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } /* * Indicate the need to retry. While waiting, the page table * page may have been allocated. */ return (NULL); } if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); /* * Map the pagetable page into the process address space, if * it isn't already there. */ pmap->pm_stats.resident_count++; ptepa = VM_PAGE_TO_PHYS(m); pmap->pm_pdir[ptepindex] = (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M); return (m); } static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags) { u_int ptepindex; pd_entry_t ptepa; vm_page_t m; /* * Calculate pagetable page index */ ptepindex = va >> PDRSHIFT; retry: /* * Get the page directory entry */ ptepa = pmap->pm_pdir[ptepindex]; /* * This supports switching from a 4MB page to a * normal 4K page. */ if (ptepa & PG_PS) { (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va); ptepa = pmap->pm_pdir[ptepindex]; } /* * If the page table page is mapped, we just increment the * hold count, and activate it. */ if (ptepa) { m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); m->wire_count++; } else { /* * Here if the pte page isn't mapped, or if it has * been deallocated. */ m = _pmap_allocpte(pmap, ptepindex, flags); if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) goto retry; } return (m); } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { vm_page_t m; int i; KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); KASSERT(vm_radix_is_empty(&pmap->pm_root), ("pmap_release: pmap has reserved page table page(s)")); KASSERT(CPU_EMPTY(&pmap->pm_active), ("releasing active pmap %p", pmap)); pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); for (i = 0; i < NPGPTD; i++) { m = pmap->pm_ptdpg[i]; #if defined(PAE) || defined(PAE_TABLES) KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), ("pmap_release: got wrong ptd page")); #endif vm_page_unwire_noq(m); vm_page_free(m); } } static int kvm_size(SYSCTL_HANDLER_ARGS) { unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE; return (sysctl_handle_long(oidp, &ksize, 0, req)); } SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 0, 0, kvm_size, "IU", "Size of KVM"); static int kvm_free(SYSCTL_HANDLER_ARGS) { unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; return (sysctl_handle_long(oidp, &kfree, 0, req)); } SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 0, 0, kvm_free, "IU", "Amount of KVM free"); /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { vm_paddr_t ptppaddr; vm_page_t nkpg; pd_entry_t newpdir; mtx_assert(&kernel_map->system_mtx, MA_OWNED); addr = roundup2(addr, NBPDR); if (addr - 1 >= vm_map_max(kernel_map)) addr = vm_map_max(kernel_map); while (kernel_vm_end < addr) { if (pdir_pde(PTD, kernel_vm_end)) { kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { kernel_vm_end = vm_map_max(kernel_map); break; } continue; } nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); nkpt++; if ((nkpg->flags & PG_ZERO) == 0) pmap_zero_page(nkpg); ptppaddr = VM_PAGE_TO_PHYS(nkpg); newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); pdir_pde(KPTD, kernel_vm_end) = newpdir; pmap_kenter_pde(kernel_vm_end, newpdir); kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { kernel_vm_end = vm_map_max(kernel_map); break; } } } /*************************************************** * page management routines. ***************************************************/ CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); CTASSERT(_NPCM == 11); CTASSERT(_NPCPV == 336); static __inline struct pv_chunk * pv_to_chunk(pv_entry_t pv) { return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); } #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) #define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ #define PC_FREE10 0x0000fffful /* Free values for index 10 */ static const uint32_t pc_freemask[_NPCM] = { PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, PC_FREE10 }; SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, "Current number of pv entries"); #ifdef PV_STATS static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, "Current number of pv entry chunks"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, "Current number of pv entry chunks allocated"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, "Current number of pv entry chunks frees"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, "Number of times tried to get a chunk page but failed."); static long pv_entry_frees, pv_entry_allocs; static int pv_entry_spare; SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, "Current number of pv entry frees"); SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, "Current number of pv entry allocs"); SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, "Current number of spare pv entries"); #endif /* * We are in a serious low memory condition. Resort to * drastic measures to free some pages so we can allocate * another pv entry chunk. */ static vm_page_t pmap_pv_reclaim(pmap_t locked_pmap) { struct pch newtail; struct pv_chunk *pc; struct md_page *pvh; pd_entry_t *pde; pmap_t pmap; pt_entry_t *pte, tpte; pv_entry_t pv; vm_offset_t va; vm_page_t m, m_pc; struct spglist free; uint32_t inuse; int bit, field, freed; PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); pmap = NULL; m_pc = NULL; SLIST_INIT(&free); TAILQ_INIT(&newtail); while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || SLIST_EMPTY(&free))) { TAILQ_REMOVE(&pv_chunks, pc, pc_lru); if (pmap != pc->pc_pmap) { if (pmap != NULL) { pmap_invalidate_all(pmap); if (pmap != locked_pmap) PMAP_UNLOCK(pmap); } pmap = pc->pc_pmap; /* Avoid deadlock and lock recursion. */ if (pmap > locked_pmap) PMAP_LOCK(pmap); else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { pmap = NULL; TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); continue; } } /* * Destroy every non-wired, 4 KB page mapping in the chunk. */ freed = 0; for (field = 0; field < _NPCM; field++) { for (inuse = ~pc->pc_map[field] & pc_freemask[field]; inuse != 0; inuse &= ~(1UL << bit)) { bit = bsfl(inuse); pv = &pc->pc_pventry[field * 32 + bit]; va = pv->pv_va; pde = pmap_pde(pmap, va); if ((*pde & PG_PS) != 0) continue; pte = pmap_pte(pmap, va); tpte = *pte; if ((tpte & PG_W) == 0) tpte = pte_load_clear(pte); pmap_pte_release(pte); if ((tpte & PG_W) != 0) continue; KASSERT(tpte != 0, ("pmap_pv_reclaim: pmap %p va %x zero pte", pmap, va)); if ((tpte & PG_G) != 0) pmap_invalidate_page(pmap, va); m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if ((tpte & PG_A) != 0) vm_page_aflag_set(m, PGA_REFERENCED); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) { vm_page_aflag_clear(m, PGA_WRITEABLE); } } pc->pc_map[field] |= 1UL << bit; pmap_unuse_pt(pmap, va, &free); freed++; } } if (freed == 0) { TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); continue; } /* Every freed mapping is for a 4 KB page. */ pmap->pm_stats.resident_count -= freed; PV_STAT(pv_entry_frees += freed); PV_STAT(pv_entry_spare += freed); pv_entry_count -= freed; TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); for (field = 0; field < _NPCM; field++) if (pc->pc_map[field] != pc_freemask[field]) { TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); /* * One freed pv entry in locked_pmap is * sufficient. */ if (pmap == locked_pmap) goto out; break; } if (field == _NPCM) { PV_STAT(pv_entry_spare -= _NPCPV); PV_STAT(pc_chunk_count--); PV_STAT(pc_chunk_frees++); /* Entire chunk is free; return it. */ m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); pmap_qremove((vm_offset_t)pc, 1); pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); break; } } out: TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); if (pmap != NULL) { pmap_invalidate_all(pmap); if (pmap != locked_pmap) PMAP_UNLOCK(pmap); } if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) { m_pc = SLIST_FIRST(&free); SLIST_REMOVE_HEAD(&free, plinks.s.ss); /* Recycle a freed page table page. */ m_pc->wire_count = 1; } vm_page_free_pages_toq(&free, true); return (m_pc); } /* * free the pv_entry back to the free list */ static void free_pv_entry(pmap_t pmap, pv_entry_t pv) { struct pv_chunk *pc; int idx, field, bit; rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); PV_STAT(pv_entry_frees++); PV_STAT(pv_entry_spare++); pv_entry_count--; pc = pv_to_chunk(pv); idx = pv - &pc->pc_pventry[0]; field = idx / 32; bit = idx % 32; pc->pc_map[field] |= 1ul << bit; for (idx = 0; idx < _NPCM; idx++) if (pc->pc_map[idx] != pc_freemask[idx]) { /* * 98% of the time, pc is already at the head of the * list. If it isn't already, move it to the head. */ if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != pc)) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); } return; } TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); free_pv_chunk(pc); } static void free_pv_chunk(struct pv_chunk *pc) { vm_page_t m; TAILQ_REMOVE(&pv_chunks, pc, pc_lru); PV_STAT(pv_entry_spare -= _NPCPV); PV_STAT(pc_chunk_count--); PV_STAT(pc_chunk_frees++); /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); pmap_qremove((vm_offset_t)pc, 1); vm_page_unwire(m, PQ_NONE); vm_page_free(m); pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc); } /* * get a new pv_entry, allocating a block from the system * when needed. */ static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try) { static const struct timeval printinterval = { 60, 0 }; static struct timeval lastprint; int bit, field; pv_entry_t pv; struct pv_chunk *pc; vm_page_t m; rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); PV_STAT(pv_entry_allocs++); pv_entry_count++; if (pv_entry_count > pv_entry_high_water) if (ratecheck(&lastprint, &printinterval)) printf("Approaching the limit on PV entries, consider " "increasing either the vm.pmap.shpgperproc or the " "vm.pmap.pv_entry_max tunable.\n"); retry: pc = TAILQ_FIRST(&pmap->pm_pvchunk); if (pc != NULL) { for (field = 0; field < _NPCM; field++) { if (pc->pc_map[field]) { bit = bsfl(pc->pc_map[field]); break; } } if (field < _NPCM) { pv = &pc->pc_pventry[field * 32 + bit]; pc->pc_map[field] &= ~(1ul << bit); /* If this was the last item, move it to tail */ for (field = 0; field < _NPCM; field++) if (pc->pc_map[field] != 0) { PV_STAT(pv_entry_spare--); return (pv); /* not full, return */ } TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); PV_STAT(pv_entry_spare--); return (pv); } } /* * Access to the ptelist "pv_vafree" is synchronized by the pvh * global lock. If "pv_vafree" is currently non-empty, it will * remain non-empty until pmap_ptelist_alloc() completes. */ if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { if (try) { pv_entry_count--; PV_STAT(pc_chunk_tryfail++); return (NULL); } m = pmap_pv_reclaim(pmap); if (m == NULL) goto retry; } PV_STAT(pc_chunk_count++); PV_STAT(pc_chunk_allocs++); pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree); pmap_qenter((vm_offset_t)pc, &m, 1); pc->pc_pmap = pmap; pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ for (field = 1; field < _NPCM; field++) pc->pc_map[field] = pc_freemask[field]; TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); pv = &pc->pc_pventry[0]; TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); PV_STAT(pv_entry_spare += _NPCPV - 1); return (pv); } static __inline pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) { pv_entry_t pv; rw_assert(&pvh_global_lock, RA_WLOCKED); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (pmap == PV_PMAP(pv) && va == pv->pv_va) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); break; } } return (pv); } static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) { struct md_page *pvh; pv_entry_t pv; vm_offset_t va_last; vm_page_t m; rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT((pa & PDRMASK) == 0, ("pmap_pv_demote_pde: pa is not 4mpage aligned")); /* * Transfer the 4mpage's pv entry for this mapping to the first * page's pv list. */ pvh = pa_to_pvh(pa); va = trunc_4mpage(va); pv = pmap_pvh_remove(pvh, pmap, va); KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); m = PHYS_TO_VM_PAGE(pa); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); /* Instantiate the remaining NPTEPG - 1 pv entries. */ va_last = va + NBPDR - PAGE_SIZE; do { m++; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_pv_demote_pde: page %p is not managed", m)); va += PAGE_SIZE; pmap_insert_entry(pmap, va, m); } while (va < va_last); } #if VM_NRESERVLEVEL > 0 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) { struct md_page *pvh; pv_entry_t pv; vm_offset_t va_last; vm_page_t m; rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT((pa & PDRMASK) == 0, ("pmap_pv_promote_pde: pa is not 4mpage aligned")); /* * Transfer the first page's pv entry for this mapping to the * 4mpage's pv list. Aside from avoiding the cost of a call * to get_pv_entry(), a transfer avoids the possibility that * get_pv_entry() calls pmap_collect() and that pmap_collect() * removes one of the mappings that is being promoted. */ m = PHYS_TO_VM_PAGE(pa); va = trunc_4mpage(va); pv = pmap_pvh_remove(&m->md, pmap, va); KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); pvh = pa_to_pvh(pa); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); /* Free the remaining NPTEPG - 1 pv entries. */ va_last = va + NBPDR - PAGE_SIZE; do { m++; va += PAGE_SIZE; pmap_pvh_free(&m->md, pmap, va); } while (va < va_last); } #endif /* VM_NRESERVLEVEL > 0 */ static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) { pv_entry_t pv; pv = pmap_pvh_remove(pvh, pmap, va); KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); free_pv_entry(pmap, pv); } static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) { struct md_page *pvh; rw_assert(&pvh_global_lock, RA_WLOCKED); pmap_pvh_free(&m->md, pmap, va); if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); } } /* * Create a pv entry for page at pa for * (pmap, va). */ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); pv = get_pv_entry(pmap, FALSE); pv->pv_va = va; TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); } /* * Conditionally create a pv entry. */ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); if (pv_entry_count < pv_entry_high_water && (pv = get_pv_entry(pmap, TRUE)) != NULL) { pv->pv_va = va; TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); return (TRUE); } else return (FALSE); } /* * Create the pv entries for each of the pages within a superpage. */ static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags) { struct md_page *pvh; pv_entry_t pv; bool noreclaim; rw_assert(&pvh_global_lock, RA_WLOCKED); noreclaim = (flags & PMAP_ENTER_NORECLAIM) != 0; if ((noreclaim && pv_entry_count >= pv_entry_high_water) || (pv = get_pv_entry(pmap, noreclaim)) == NULL) return (false); pv->pv_va = va; pvh = pa_to_pvh(pde & PG_PS_FRAME); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); return (true); } /* * Fills a page table page with mappings to consecutive physical pages. */ static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte) { pt_entry_t *pte; for (pte = firstpte; pte < firstpte + NPTEPG; pte++) { *pte = newpte; newpte += PAGE_SIZE; } } /* * Tries to demote a 2- or 4MB page mapping. If demotion fails, the * 2- or 4MB page mapping is invalidated. */ static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) { pd_entry_t newpde, oldpde; pt_entry_t *firstpte, newpte; vm_paddr_t mptepa; vm_page_t mpte; struct spglist free; vm_offset_t sva; PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldpde = *pde; KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V), ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V")); if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) == NULL) { KASSERT((oldpde & PG_W) == 0, ("pmap_demote_pde: page table page for a wired mapping" " is missing")); /* * Invalidate the 2- or 4MB page mapping and return * "failure" if the mapping was never accessed or the * allocation of the new page table page fails. */ if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL, va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED)) == NULL) { SLIST_INIT(&free); sva = trunc_4mpage(va); pmap_remove_pde(pmap, pde, sva, &free); if ((oldpde & PG_G) == 0) pmap_invalidate_pde_page(pmap, sva, oldpde); vm_page_free_pages_toq(&free, true); CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x" " in pmap %p", va, pmap); return (FALSE); } if (pmap != kernel_pmap) pmap->pm_stats.resident_count++; } mptepa = VM_PAGE_TO_PHYS(mpte); /* * If the page mapping is in the kernel's address space, then the * KPTmap can provide access to the page table page. Otherwise, * temporarily map the page table page (mpte) into the kernel's * address space at either PADDR1 or PADDR2. */ if (pmap == kernel_pmap) firstpte = &KPTmap[i386_btop(trunc_4mpage(va))]; else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { if ((*PMAP1 & PG_FRAME) != mptepa) { *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M; #ifdef SMP PMAP1cpu = PCPU_GET(cpuid); #endif invlcaddr(PADDR1); PMAP1changed++; } else #ifdef SMP if (PMAP1cpu != PCPU_GET(cpuid)) { PMAP1cpu = PCPU_GET(cpuid); invlcaddr(PADDR1); PMAP1changedcpu++; } else #endif PMAP1unchanged++; firstpte = PADDR1; } else { mtx_lock(&PMAP2mutex); if ((*PMAP2 & PG_FRAME) != mptepa) { *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M; pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2); } firstpte = PADDR2; } newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V; KASSERT((oldpde & PG_A) != 0, ("pmap_demote_pde: oldpde is missing PG_A")); KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW, ("pmap_demote_pde: oldpde is missing PG_M")); newpte = oldpde & ~PG_PS; if ((newpte & PG_PDE_PAT) != 0) newpte ^= PG_PDE_PAT | PG_PTE_PAT; /* * If the page table page is new, initialize it. */ if (mpte->wire_count == 1) { mpte->wire_count = NPTEPG; pmap_fill_ptp(firstpte, newpte); } KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME), ("pmap_demote_pde: firstpte and newpte map different physical" " addresses")); /* * If the mapping has changed attributes, update the page table * entries. */ if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE)) pmap_fill_ptp(firstpte, newpte); /* * Demote the mapping. This pmap is locked. The old PDE has * PG_A set. If the old PDE has PG_RW set, it also has PG_M * set. Thus, there is no danger of a race with another * processor changing the setting of PG_A and/or PG_M between * the read above and the store below. */ if (workaround_erratum383) pmap_update_pde(pmap, va, pde, newpde); else if (pmap == kernel_pmap) pmap_kenter_pde(va, newpde); else pde_store(pde, newpde); if (firstpte == PADDR2) mtx_unlock(&PMAP2mutex); /* * Invalidate the recursive mapping of the page table page. */ pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va)); /* * Demote the pv entry. This depends on the earlier demotion * of the mapping. Specifically, the (re)creation of a per- * page pv entry might trigger the execution of pmap_collect(), * which might reclaim a newly (re)created per-page pv entry * and destroy the associated mapping. In order to destroy * the mapping, the PDE must have already changed from mapping * the 2mpage to referencing the page table page. */ if ((oldpde & PG_MANAGED) != 0) pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME); pmap_pde_demotions++; CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x" " in pmap %p", va, pmap); return (TRUE); } /* * Removes a 2- or 4MB page mapping from the kernel pmap. */ static void pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) { pd_entry_t newpde; vm_paddr_t mptepa; vm_page_t mpte; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mpte = pmap_remove_pt_page(pmap, va); if (mpte == NULL) panic("pmap_remove_kernel_pde: Missing pt page."); mptepa = VM_PAGE_TO_PHYS(mpte); newpde = mptepa | PG_M | PG_A | PG_RW | PG_V; /* * Initialize the page table page. */ pagezero((void *)&KPTmap[i386_btop(trunc_4mpage(va))]); /* * Remove the mapping. */ if (workaround_erratum383) pmap_update_pde(pmap, va, pde, newpde); else pmap_kenter_pde(va, newpde); /* * Invalidate the recursive mapping of the page table page. */ pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va)); } /* * pmap_remove_pde: do the things to unmap a superpage in a process */ static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, struct spglist *free) { struct md_page *pvh; pd_entry_t oldpde; vm_offset_t eva, va; vm_page_t m, mpte; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT((sva & PDRMASK) == 0, ("pmap_remove_pde: sva is not 4mpage aligned")); oldpde = pte_load_clear(pdq); if (oldpde & PG_W) pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE; /* * Machines that don't support invlpg, also don't support * PG_G. */ if ((oldpde & PG_G) != 0) pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; if (oldpde & PG_MANAGED) { pvh = pa_to_pvh(oldpde & PG_PS_FRAME); pmap_pvh_free(pvh, pmap, sva); eva = sva + NBPDR; for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); va < eva; va += PAGE_SIZE, m++) { if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpde & PG_A) vm_page_aflag_set(m, PGA_REFERENCED); if (TAILQ_EMPTY(&m->md.pv_list) && TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); } } if (pmap == kernel_pmap) { pmap_remove_kernel_pde(pmap, pdq, sva); } else { mpte = pmap_remove_pt_page(pmap, sva); if (mpte != NULL) { pmap->pm_stats.resident_count--; KASSERT(mpte->wire_count == NPTEPG, ("pmap_remove_pde: pte page wire count error")); mpte->wire_count = 0; pmap_add_delayed_free_list(mpte, free, FALSE); } } } /* * pmap_remove_pte: do the things to unmap a page in a process */ static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, struct spglist *free) { pt_entry_t oldpte; vm_page_t m; rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); oldpte = pte_load_clear(ptq); KASSERT(oldpte != 0, ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va)); if (oldpte & PG_W) pmap->pm_stats.wired_count -= 1; /* * Machines that don't support invlpg, also don't support * PG_G. */ if (oldpte & PG_G) pmap_invalidate_page(kernel_pmap, va); pmap->pm_stats.resident_count -= 1; if (oldpte & PG_MANAGED) { m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpte & PG_A) vm_page_aflag_set(m, PGA_REFERENCED); pmap_remove_entry(pmap, m, va); } return (pmap_unuse_pt(pmap, va, free)); } /* * Remove a single page from a process address space */ static void pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free) { pt_entry_t *pte; rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0) return; pmap_remove_pte(pmap, pte, va, free); pmap_invalidate_page(pmap, va); } /* * Removes the specified range of addresses from the page table page. */ static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, struct spglist *free) { pt_entry_t *pte; bool anyvalid; rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT(curthread->td_pinned > 0, ("curthread not pinned")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); anyvalid = false; for (pte = pmap_pte_quick(pmap, sva); sva != eva; pte++, sva += PAGE_SIZE) { if (*pte == 0) continue; /* * The TLB entry for a PG_G mapping is invalidated by * pmap_remove_pte(). */ if ((*pte & PG_G) == 0) anyvalid = true; if (pmap_remove_pte(pmap, pte, sva, free)) break; } return (anyvalid); } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t pdnxt; pd_entry_t ptpaddr; struct spglist free; int anyvalid; /* * Perform an unsynchronized read. This is, however, safe. */ if (pmap->pm_stats.resident_count == 0) return; anyvalid = 0; SLIST_INIT(&free); rw_wlock(&pvh_global_lock); sched_pin(); PMAP_LOCK(pmap); /* * special handling of removing one page. a very * common operation and easy to short circuit some * code. */ if ((sva + PAGE_SIZE == eva) && ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) { pmap_remove_page(pmap, sva, &free); goto out; } for (; sva < eva; sva = pdnxt) { u_int pdirindex; /* * Calculate index for next page table. */ pdnxt = (sva + NBPDR) & ~PDRMASK; if (pdnxt < sva) pdnxt = eva; if (pmap->pm_stats.resident_count == 0) break; pdirindex = sva >> PDRSHIFT; ptpaddr = pmap->pm_pdir[pdirindex]; /* * Weed out invalid mappings. Note: we assume that the page * directory table is always allocated, and in kernel virtual. */ if (ptpaddr == 0) continue; /* * Check for large page. */ if ((ptpaddr & PG_PS) != 0) { /* * Are we removing the entire large page? If not, * demote the mapping and fall through. */ if (sva + NBPDR == pdnxt && eva >= pdnxt) { /* * The TLB entry for a PG_G mapping is * invalidated by pmap_remove_pde(). */ if ((ptpaddr & PG_G) == 0) anyvalid = 1; pmap_remove_pde(pmap, &pmap->pm_pdir[pdirindex], sva, &free); continue; } else if (!pmap_demote_pde(pmap, &pmap->pm_pdir[pdirindex], sva)) { /* The large page mapping was destroyed. */ continue; } } /* * Limit our scan to either the end of the va represented * by the current page table page, or to the end of the * range being removed. */ if (pdnxt > eva) pdnxt = eva; if (pmap_remove_ptes(pmap, sva, pdnxt, &free)) anyvalid = 1; } out: sched_unpin(); if (anyvalid) pmap_invalidate_all(pmap); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); vm_page_free_pages_toq(&free, true); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { struct md_page *pvh; pv_entry_t pv; pmap_t pmap; pt_entry_t *pte, tpte; pd_entry_t *pde; vm_offset_t va; struct spglist free; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); SLIST_INIT(&free); rw_wlock(&pvh_global_lock); sched_pin(); if ((m->flags & PG_FICTITIOUS) != 0) goto small_mappings; pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, va); (void)pmap_demote_pde(pmap, pde, va); PMAP_UNLOCK(pmap); } small_mappings: while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pmap->pm_stats.resident_count--; pde = pmap_pde(pmap, pv->pv_va); KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found" " a 4mpage in page %p's pv list", m)); pte = pmap_pte_quick(pmap, pv->pv_va); tpte = pte_load_clear(pte); KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte", pmap, pv->pv_va)); if (tpte & PG_W) pmap->pm_stats.wired_count--; if (tpte & PG_A) vm_page_aflag_set(m, PGA_REFERENCED); /* * Update the vm_page_t clean and reference bits. */ if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); pmap_unuse_pt(pmap, pv->pv_va, &free); pmap_invalidate_page(pmap, pv->pv_va); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } vm_page_aflag_clear(m, PGA_WRITEABLE); sched_unpin(); rw_wunlock(&pvh_global_lock); vm_page_free_pages_toq(&free, true); } /* * pmap_protect_pde: do the things to protect a 4mpage in a process */ static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) { pd_entry_t newpde, oldpde; vm_offset_t eva, va; vm_page_t m; boolean_t anychanged; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT((sva & PDRMASK) == 0, ("pmap_protect_pde: sva is not 4mpage aligned")); anychanged = FALSE; retry: oldpde = newpde = *pde; if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == (PG_MANAGED | PG_M | PG_RW)) { eva = sva + NBPDR; for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); va < eva; va += PAGE_SIZE, m++) vm_page_dirty(m); } if ((prot & VM_PROT_WRITE) == 0) newpde &= ~(PG_RW | PG_M); #if defined(PAE) || defined(PAE_TABLES) if ((prot & VM_PROT_EXECUTE) == 0) newpde |= pg_nx; #endif if (newpde != oldpde) { /* * As an optimization to future operations on this PDE, clear * PG_PROMOTED. The impending invalidation will remove any * lingering 4KB page mappings from the TLB. */ if (!pde_cmpset(pde, oldpde, newpde & ~PG_PROMOTED)) goto retry; if ((oldpde & PG_G) != 0) pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); else anychanged = TRUE; } return (anychanged); } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { vm_offset_t pdnxt; pd_entry_t ptpaddr; pt_entry_t *pte; boolean_t anychanged, pv_lists_locked; KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); if (prot == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } #if defined(PAE) || defined(PAE_TABLES) if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == (VM_PROT_WRITE|VM_PROT_EXECUTE)) return; #else if (prot & VM_PROT_WRITE) return; #endif if (pmap_is_current(pmap)) pv_lists_locked = FALSE; else { pv_lists_locked = TRUE; resume: rw_wlock(&pvh_global_lock); sched_pin(); } anychanged = FALSE; PMAP_LOCK(pmap); for (; sva < eva; sva = pdnxt) { pt_entry_t obits, pbits; u_int pdirindex; pdnxt = (sva + NBPDR) & ~PDRMASK; if (pdnxt < sva) pdnxt = eva; pdirindex = sva >> PDRSHIFT; ptpaddr = pmap->pm_pdir[pdirindex]; /* * Weed out invalid mappings. Note: we assume that the page * directory table is always allocated, and in kernel virtual. */ if (ptpaddr == 0) continue; /* * Check for large page. */ if ((ptpaddr & PG_PS) != 0) { /* * Are we protecting the entire large page? If not, * demote the mapping and fall through. */ if (sva + NBPDR == pdnxt && eva >= pdnxt) { /* * The TLB entry for a PG_G mapping is * invalidated by pmap_protect_pde(). */ if (pmap_protect_pde(pmap, &pmap->pm_pdir[pdirindex], sva, prot)) anychanged = TRUE; continue; } else { if (!pv_lists_locked) { pv_lists_locked = TRUE; if (!rw_try_wlock(&pvh_global_lock)) { if (anychanged) pmap_invalidate_all( pmap); PMAP_UNLOCK(pmap); goto resume; } sched_pin(); } if (!pmap_demote_pde(pmap, &pmap->pm_pdir[pdirindex], sva)) { /* * The large page mapping was * destroyed. */ continue; } } } if (pdnxt > eva) pdnxt = eva; for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, sva += PAGE_SIZE) { vm_page_t m; retry: /* * Regardless of whether a pte is 32 or 64 bits in * size, PG_RW, PG_A, and PG_M are among the least * significant 32 bits. */ obits = pbits = *pte; if ((pbits & PG_V) == 0) continue; if ((prot & VM_PROT_WRITE) == 0) { if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == (PG_MANAGED | PG_M | PG_RW)) { m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); vm_page_dirty(m); } pbits &= ~(PG_RW | PG_M); } #if defined(PAE) || defined(PAE_TABLES) if ((prot & VM_PROT_EXECUTE) == 0) pbits |= pg_nx; #endif if (pbits != obits) { #if defined(PAE) || defined(PAE_TABLES) if (!atomic_cmpset_64(pte, obits, pbits)) goto retry; #else if (!atomic_cmpset_int((u_int *)pte, obits, pbits)) goto retry; #endif if (obits & PG_G) pmap_invalidate_page(pmap, sva); else anychanged = TRUE; } } } if (anychanged) pmap_invalidate_all(pmap); if (pv_lists_locked) { sched_unpin(); rw_wunlock(&pvh_global_lock); } PMAP_UNLOCK(pmap); } #if VM_NRESERVLEVEL > 0 /* * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are * within a single page table page (PTP) to a single 2- or 4MB page mapping. * For promotion to occur, two conditions must be met: (1) the 4KB page * mappings must map aligned, contiguous physical memory and (2) the 4KB page * mappings must have identical characteristics. * * Managed (PG_MANAGED) mappings within the kernel address space are not * promoted. The reason is that kernel PDEs are replicated in each pmap but * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel * pmap. */ static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) { pd_entry_t newpde; pt_entry_t *firstpte, oldpte, pa, *pte; vm_offset_t oldpteva; vm_page_t mpte; PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * Examine the first PTE in the specified PTP. Abort if this PTE is * either invalid, unused, or does not map the first 4KB physical page * within a 2- or 4MB page. */ firstpte = pmap_pte_quick(pmap, trunc_4mpage(va)); setpde: newpde = *firstpte; if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) { pmap_pde_p_failures++; CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" " in pmap %p", va, pmap); return; } if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) { pmap_pde_p_failures++; CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" " in pmap %p", va, pmap); return; } if ((newpde & (PG_M | PG_RW)) == PG_RW) { /* * When PG_M is already clear, PG_RW can be cleared without * a TLB invalidation. */ if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde & ~PG_RW)) goto setpde; newpde &= ~PG_RW; } /* * Examine each of the other PTEs in the specified PTP. Abort if this * PTE maps an unexpected 4KB physical page or does not have identical * characteristics to the first PTE. */ pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE; for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) { setpte: oldpte = *pte; if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) { pmap_pde_p_failures++; CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" " in pmap %p", va, pmap); return; } if ((oldpte & (PG_M | PG_RW)) == PG_RW) { /* * When PG_M is already clear, PG_RW can be cleared * without a TLB invalidation. */ if (!atomic_cmpset_int((u_int *)pte, oldpte, oldpte & ~PG_RW)) goto setpte; oldpte &= ~PG_RW; oldpteva = (oldpte & PG_FRAME & PDRMASK) | (va & ~PDRMASK); CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x" " in pmap %p", oldpteva, pmap); } if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) { pmap_pde_p_failures++; CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x" " in pmap %p", va, pmap); return; } pa -= PAGE_SIZE; } /* * Save the page table page in its current state until the PDE * mapping the superpage is demoted by pmap_demote_pde() or * destroyed by pmap_remove_pde(). */ mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); KASSERT(mpte >= vm_page_array && mpte < &vm_page_array[vm_page_array_size], ("pmap_promote_pde: page table page is out of range")); KASSERT(mpte->pindex == va >> PDRSHIFT, ("pmap_promote_pde: page table page's pindex is wrong")); if (pmap_insert_pt_page(pmap, mpte)) { pmap_pde_p_failures++; CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x in pmap %p", va, pmap); return; } /* * Promote the pv entries. */ if ((newpde & PG_MANAGED) != 0) pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME); /* * Propagate the PAT index to its proper position. */ if ((newpde & PG_PTE_PAT) != 0) newpde ^= PG_PDE_PAT | PG_PTE_PAT; /* * Map the superpage. */ if (workaround_erratum383) pmap_update_pde(pmap, va, pde, PG_PS | newpde); else if (pmap == kernel_pmap) pmap_kenter_pde(va, PG_PROMOTED | PG_PS | newpde); else pde_store(pde, PG_PROMOTED | PG_PS | newpde); pmap_pde_promotions++; CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x" " in pmap %p", va, pmap); } #endif /* VM_NRESERVLEVEL > 0 */ /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind) { pd_entry_t *pde; pt_entry_t *pte; pt_entry_t newpte, origpte; pv_entry_t pv; vm_paddr_t opa, pa; vm_page_t mpte, om; int rv; va = trunc_page(va); KASSERT((pmap == kernel_pmap && va < VM_MAX_KERNEL_ADDRESS) || (pmap != kernel_pmap && va < VM_MAXUSER_ADDRESS), ("pmap_enter: toobig k%d %#x", pmap == kernel_pmap, va)); KASSERT(va < PMAP_TRM_MIN_ADDRESS, ("pmap_enter: invalid to pmap_enter into trampoline (va: 0x%x)", va)); KASSERT(pmap != kernel_pmap || (m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) VM_OBJECT_ASSERT_LOCKED(m->object); KASSERT((flags & PMAP_ENTER_RESERVED) == 0, ("pmap_enter: flags %u has reserved bits set", flags)); pa = VM_PAGE_TO_PHYS(m); newpte = (pt_entry_t)(pa | PG_A | PG_V); if ((flags & VM_PROT_WRITE) != 0) newpte |= PG_M; if ((prot & VM_PROT_WRITE) != 0) newpte |= PG_RW; KASSERT((newpte & (PG_M | PG_RW)) != PG_M, ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't")); #if defined(PAE) || defined(PAE_TABLES) if ((prot & VM_PROT_EXECUTE) == 0) newpte |= pg_nx; #endif if ((flags & PMAP_ENTER_WIRED) != 0) newpte |= PG_W; if (pmap != kernel_pmap) newpte |= PG_U; newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0); if ((m->oflags & VPO_UNMANAGED) == 0) newpte |= PG_MANAGED; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); sched_pin(); if (psind == 1) { /* Assert the required virtual and physical alignment. */ KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned")); KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind")); rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m); goto out; } pde = pmap_pde(pmap, va); if (pmap != kernel_pmap) { /* * va is for UVA. * In the case that a page table page is not resident, * we are creating it here. pmap_allocpte() handles * demotion. */ mpte = pmap_allocpte(pmap, va, flags); if (mpte == NULL) { KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, ("pmap_allocpte failed with sleep allowed")); rv = KERN_RESOURCE_SHORTAGE; goto out; } } else { /* * va is for KVA, so pmap_demote_pde() will never fail * to install a page table page. PG_V is also * asserted by pmap_demote_pde(). */ mpte = NULL; KASSERT(pde != NULL && (*pde & PG_V) != 0, ("KVA %#x invalid pde pdir %#jx", va, (uintmax_t)pmap->pm_pdir[PTDPTDI])); if ((*pde & PG_PS) != 0) pmap_demote_pde(pmap, pde, va); } pte = pmap_pte_quick(pmap, va); /* * Page Directory table entry is not valid, which should not * happen. We should have either allocated the page table * page or demoted the existing mapping above. */ if (pte == NULL) { panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x", (uintmax_t)pmap->pm_pdir[PTDPTDI], va); } origpte = *pte; pv = NULL; /* * Is the specified virtual address already mapped? */ if ((origpte & PG_V) != 0) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0) pmap->pm_stats.wired_count++; else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0) pmap->pm_stats.wired_count--; /* * Remove the extra PT page reference. */ if (mpte != NULL) { mpte->wire_count--; KASSERT(mpte->wire_count > 0, ("pmap_enter: missing reference to page table page," " va: 0x%x", va)); } /* * Has the physical page changed? */ opa = origpte & PG_FRAME; if (opa == pa) { /* * No, might be a protection or wiring change. */ if ((origpte & PG_MANAGED) != 0 && (newpte & PG_RW) != 0) vm_page_aflag_set(m, PGA_WRITEABLE); if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) goto unchanged; goto validate; } /* * The physical page has changed. Temporarily invalidate * the mapping. This ensures that all threads sharing the * pmap keep a consistent view of the mapping, which is * necessary for the correct handling of COW faults. It * also permits reuse of the old mapping's PV entry, * avoiding an allocation. * * For consistency, handle unmanaged mappings the same way. */ origpte = pte_load_clear(pte); KASSERT((origpte & PG_FRAME) == opa, ("pmap_enter: unexpected pa update for %#x", va)); if ((origpte & PG_MANAGED) != 0) { om = PHYS_TO_VM_PAGE(opa); /* * The pmap lock is sufficient to synchronize with * concurrent calls to pmap_page_test_mappings() and * pmap_ts_referenced(). */ if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(om); if ((origpte & PG_A) != 0) vm_page_aflag_set(om, PGA_REFERENCED); pv = pmap_pvh_remove(&om->md, pmap, va); KASSERT(pv != NULL, ("pmap_enter: no PV entry for %#x", va)); if ((newpte & PG_MANAGED) == 0) free_pv_entry(pmap, pv); if ((om->aflags & PGA_WRITEABLE) != 0 && TAILQ_EMPTY(&om->md.pv_list) && ((om->flags & PG_FICTITIOUS) != 0 || TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) vm_page_aflag_clear(om, PGA_WRITEABLE); } if ((origpte & PG_A) != 0) pmap_invalidate_page(pmap, va); origpte = 0; } else { /* * Increment the counters. */ if ((newpte & PG_W) != 0) pmap->pm_stats.wired_count++; pmap->pm_stats.resident_count++; } /* * Enter on the PV list if part of our managed memory. */ if ((newpte & PG_MANAGED) != 0) { if (pv == NULL) { pv = get_pv_entry(pmap, FALSE); pv->pv_va = va; } TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); if ((newpte & PG_RW) != 0) vm_page_aflag_set(m, PGA_WRITEABLE); } /* * Update the PTE. */ if ((origpte & PG_V) != 0) { validate: origpte = pte_load_store(pte, newpte); KASSERT((origpte & PG_FRAME) == pa, ("pmap_enter: unexpected pa update for %#x", va)); if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { if ((origpte & PG_MANAGED) != 0) vm_page_dirty(m); /* * Although the PTE may still have PG_RW set, TLB * invalidation may nonetheless be required because * the PTE no longer has PG_M set. */ } #if defined(PAE) || defined(PAE_TABLES) else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) { /* * This PTE change does not require TLB invalidation. */ goto unchanged; } #endif if ((origpte & PG_A) != 0) pmap_invalidate_page(pmap, va); } else pte_store(pte, newpte); unchanged: #if VM_NRESERVLEVEL > 0 /* * If both the page table page and the reservation are fully * populated, then attempt promotion. */ if ((mpte == NULL || mpte->wire_count == NPTEPG) && pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) pmap_promote_pde(pmap, pde, va); #endif rv = KERN_SUCCESS; out: sched_unpin(); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); return (rv); } /* * Tries to create a read- and/or execute-only 2 or 4 MB page mapping. Returns * true if successful. Returns false if (1) a mapping already exists at the * specified virtual address or (2) a PV entry cannot be allocated without * reclaiming another PV entry. */ static bool pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { pd_entry_t newpde; PMAP_LOCK_ASSERT(pmap, MA_OWNED); newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | PG_PS | PG_V; if ((m->oflags & VPO_UNMANAGED) == 0) newpde |= PG_MANAGED; #if defined(PAE) || defined(PAE_TABLES) if ((prot & VM_PROT_EXECUTE) == 0) newpde |= pg_nx; #endif if (pmap != kernel_pmap) newpde |= PG_U; return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP | PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL) == KERN_SUCCESS); } /* * Tries to create the specified 2 or 4 MB page mapping. Returns KERN_SUCCESS * if the mapping was created, and either KERN_FAILURE or * KERN_RESOURCE_SHORTAGE otherwise. Returns KERN_FAILURE if * PMAP_ENTER_NOREPLACE was specified and a mapping already exists at the * specified virtual address. Returns KERN_RESOURCE_SHORTAGE if * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed. * * The parameter "m" is only used when creating a managed, writeable mapping. */ static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags, vm_page_t m) { struct spglist free; pd_entry_t oldpde, *pde; vm_page_t mt; rw_assert(&pvh_global_lock, RA_WLOCKED); KASSERT((newpde & (PG_M | PG_RW)) != PG_RW, ("pmap_enter_pde: newpde is missing PG_M")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); pde = pmap_pde(pmap, va); oldpde = *pde; if ((oldpde & PG_V) != 0) { if ((flags & PMAP_ENTER_NOREPLACE) != 0) { CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" " in pmap %p", va, pmap); return (KERN_FAILURE); } /* Break the existing mapping(s). */ SLIST_INIT(&free); if ((oldpde & PG_PS) != 0) { /* * If the PDE resulted from a promotion, then a * reserved PT page could be freed. */ (void)pmap_remove_pde(pmap, pde, va, &free); if ((oldpde & PG_G) == 0) pmap_invalidate_pde_page(pmap, va, oldpde); } else { if (pmap_remove_ptes(pmap, va, va + NBPDR, &free)) pmap_invalidate_all(pmap); } vm_page_free_pages_toq(&free, true); if (pmap == kernel_pmap) { mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME); if (pmap_insert_pt_page(pmap, mt)) { /* * XXX Currently, this can't happen because * we do not perform pmap_enter(psind == 1) * on the kernel pmap. */ panic("pmap_enter_pde: trie insert failed"); } } else KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p", pde)); } if ((newpde & PG_MANAGED) != 0) { /* * Abort this mapping if its PV entry could not be created. */ if (!pmap_pv_insert_pde(pmap, va, newpde, flags)) { CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" " in pmap %p", va, pmap); return (KERN_RESOURCE_SHORTAGE); } if ((newpde & PG_RW) != 0) { for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) vm_page_aflag_set(mt, PGA_WRITEABLE); } } /* * Increment counters. */ if ((newpde & PG_W) != 0) pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE; pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; /* * Map the superpage. (This is not a promoted mapping; there will not * be any lingering 4KB page mappings in the TLB.) */ pde_store(pde, newpde); pmap_pde_mappings++; CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx" " in pmap %p", va, pmap); return (KERN_SUCCESS); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { vm_offset_t va; vm_page_t m, mpte; vm_pindex_t diff, psize; VM_OBJECT_ASSERT_LOCKED(m_start->object); psize = atop(end - start); mpte = NULL; m = m_start; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { va = start + ptoa(diff); if ((va & PDRMASK) == 0 && va + NBPDR <= end && m->psind == 1 && pg_ps_enabled && pmap_enter_4mpage(pmap, va, m, prot)) m = &m[NBPDR / PAGE_SIZE - 1]; else mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte); m = TAILQ_NEXT(m, listq); } rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte) { pt_entry_t *pte; vm_paddr_t pa; struct spglist free; KASSERT(pmap != kernel_pmap || va < kmi.clean_sva || va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * In the case that a page table page is not * resident, we are creating it here. */ if (pmap != kernel_pmap) { u_int ptepindex; pd_entry_t ptepa; /* * Calculate pagetable page index */ ptepindex = va >> PDRSHIFT; if (mpte && (mpte->pindex == ptepindex)) { mpte->wire_count++; } else { /* * Get the page directory entry */ ptepa = pmap->pm_pdir[ptepindex]; /* * If the page table page is mapped, we just increment * the hold count, and activate it. */ if (ptepa) { if (ptepa & PG_PS) return (NULL); mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME); mpte->wire_count++; } else { mpte = _pmap_allocpte(pmap, ptepindex, PMAP_ENTER_NOSLEEP); if (mpte == NULL) return (mpte); } } } else { mpte = NULL; } sched_pin(); pte = pmap_pte_quick(pmap, va); if (*pte) { if (mpte != NULL) { mpte->wire_count--; mpte = NULL; } sched_unpin(); return (mpte); } /* * Enter on the PV list if part of our managed memory. */ if ((m->oflags & VPO_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m)) { if (mpte != NULL) { SLIST_INIT(&free); if (pmap_unwire_ptp(pmap, mpte, &free)) { pmap_invalidate_page(pmap, va); vm_page_free_pages_toq(&free, true); } mpte = NULL; } sched_unpin(); return (mpte); } /* * Increment counters */ pmap->pm_stats.resident_count++; pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 0); #if defined(PAE) || defined(PAE_TABLES) if ((prot & VM_PROT_EXECUTE) == 0) pa |= pg_nx; #endif /* * Now validate mapping with RO protection */ if ((m->oflags & VPO_UNMANAGED) != 0) pte_store(pte, pa | PG_V | PG_U); else pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); sched_unpin(); return (mpte); } /* * Make a temporary mapping for a physical address. This is only intended * to be used for panic dumps. */ void * pmap_kenter_temporary(vm_paddr_t pa, int i) { vm_offset_t va; va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); pmap_kenter(va, pa); invlpg(va); return ((void *)crashdumpmap); } /* * This code maps large physical mmap regions into the * processor address space. Note that some shortcuts * are taken, but the code works. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { pd_entry_t *pde; vm_paddr_t pa, ptepa; vm_page_t p; int pat_mode; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, ("pmap_object_init_pt: non-device object")); if (pg_ps_enabled && (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) { if (!vm_object_populate(object, pindex, pindex + atop(size))) return; p = vm_page_lookup(object, pindex); KASSERT(p->valid == VM_PAGE_BITS_ALL, ("pmap_object_init_pt: invalid page %p", p)); pat_mode = p->md.pat_mode; /* * Abort the mapping if the first page is not physically * aligned to a 2/4MB page boundary. */ ptepa = VM_PAGE_TO_PHYS(p); if (ptepa & (NBPDR - 1)) return; /* * Skip the first page. Abort the mapping if the rest of * the pages are not physically contiguous or have differing * memory attributes. */ p = TAILQ_NEXT(p, listq); for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; pa += PAGE_SIZE) { KASSERT(p->valid == VM_PAGE_BITS_ALL, ("pmap_object_init_pt: invalid page %p", p)); if (pa != VM_PAGE_TO_PHYS(p) || pat_mode != p->md.pat_mode) return; p = TAILQ_NEXT(p, listq); } /* * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and * "size" is a multiple of 2/4M, adding the PAT setting to * "pa" will not affect the termination of this loop. */ PMAP_LOCK(pmap); for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1); pa < ptepa + size; pa += NBPDR) { pde = pmap_pde(pmap, addr); if (*pde == 0) { pde_store(pde, pa | PG_PS | PG_M | PG_A | PG_U | PG_RW | PG_V); pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; pmap_pde_mappings++; } /* Else continue on if the PDE is already valid. */ addr += NBPDR; } PMAP_UNLOCK(pmap); } } /* * Clear the wired attribute from the mappings for the specified range of * addresses in the given pmap. Every valid mapping within that range * must have the wired attribute set. In contrast, invalid mappings * cannot have the wired attribute set, so they are ignored. * * The wired attribute of the page table entry is not a hardware feature, * so there is no need to invalidate any TLB entries. */ void pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t pdnxt; pd_entry_t *pde; pt_entry_t *pte; boolean_t pv_lists_locked; if (pmap_is_current(pmap)) pv_lists_locked = FALSE; else { pv_lists_locked = TRUE; resume: rw_wlock(&pvh_global_lock); sched_pin(); } PMAP_LOCK(pmap); for (; sva < eva; sva = pdnxt) { pdnxt = (sva + NBPDR) & ~PDRMASK; if (pdnxt < sva) pdnxt = eva; pde = pmap_pde(pmap, sva); if ((*pde & PG_V) == 0) continue; if ((*pde & PG_PS) != 0) { if ((*pde & PG_W) == 0) panic("pmap_unwire: pde %#jx is missing PG_W", (uintmax_t)*pde); /* * Are we unwiring the entire large page? If not, * demote the mapping and fall through. */ if (sva + NBPDR == pdnxt && eva >= pdnxt) { /* * Regardless of whether a pde (or pte) is 32 * or 64 bits in size, PG_W is among the least * significant 32 bits. */ atomic_clear_int((u_int *)pde, PG_W); pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE; continue; } else { if (!pv_lists_locked) { pv_lists_locked = TRUE; if (!rw_try_wlock(&pvh_global_lock)) { PMAP_UNLOCK(pmap); /* Repeat sva. */ goto resume; } sched_pin(); } if (!pmap_demote_pde(pmap, pde, sva)) panic("pmap_unwire: demotion failed"); } } if (pdnxt > eva) pdnxt = eva; for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, sva += PAGE_SIZE) { if ((*pte & PG_V) == 0) continue; if ((*pte & PG_W) == 0) panic("pmap_unwire: pte %#jx is missing PG_W", (uintmax_t)*pte); /* * PG_W must be cleared atomically. Although the pmap * lock synchronizes access to PG_W, another processor * could be setting PG_M and/or PG_A concurrently. * * PG_W is among the least significant 32 bits. */ atomic_clear_int((u_int *)pte, PG_W); pmap->pm_stats.wired_count--; } } if (pv_lists_locked) { sched_unpin(); rw_wunlock(&pvh_global_lock); } PMAP_UNLOCK(pmap); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. Since * current pmap is always the kernel pmap when executing in * kernel, and we do not copy from the kernel pmap to a user * pmap, this optimization is not usable in 4/4G full split i386 * world. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { struct spglist free; pt_entry_t *src_pte, *dst_pte, ptetemp; pd_entry_t srcptepaddr; vm_page_t dstmpte, srcmpte; vm_offset_t addr, end_addr, pdnxt; u_int ptepindex; if (dst_addr != src_addr) return; end_addr = src_addr + len; rw_wlock(&pvh_global_lock); if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); } else { PMAP_LOCK(src_pmap); PMAP_LOCK(dst_pmap); } sched_pin(); for (addr = src_addr; addr < end_addr; addr = pdnxt) { KASSERT(addr < PMAP_TRM_MIN_ADDRESS, ("pmap_copy: invalid to pmap_copy the trampoline")); pdnxt = (addr + NBPDR) & ~PDRMASK; if (pdnxt < addr) pdnxt = end_addr; ptepindex = addr >> PDRSHIFT; srcptepaddr = src_pmap->pm_pdir[ptepindex]; if (srcptepaddr == 0) continue; if (srcptepaddr & PG_PS) { if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr) continue; if (dst_pmap->pm_pdir[ptepindex] == 0 && ((srcptepaddr & PG_MANAGED) == 0 || pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr, PMAP_ENTER_NORECLAIM))) { dst_pmap->pm_pdir[ptepindex] = srcptepaddr & ~PG_W; dst_pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE; pmap_pde_mappings++; } continue; } srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME); KASSERT(srcmpte->wire_count > 0, ("pmap_copy: source page table page is unused")); if (pdnxt > end_addr) pdnxt = end_addr; src_pte = pmap_pte_quick3(src_pmap, addr); while (addr < pdnxt) { ptetemp = *src_pte; /* * we only virtual copy managed pages */ if ((ptetemp & PG_MANAGED) != 0) { dstmpte = pmap_allocpte(dst_pmap, addr, PMAP_ENTER_NOSLEEP); if (dstmpte == NULL) goto out; dst_pte = pmap_pte_quick(dst_pmap, addr); if (*dst_pte == 0 && pmap_try_insert_pv_entry(dst_pmap, addr, PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) { /* * Clear the wired, modified, and * accessed (referenced) bits * during the copy. */ *dst_pte = ptetemp & ~(PG_W | PG_M | PG_A); dst_pmap->pm_stats.resident_count++; } else { SLIST_INIT(&free); if (pmap_unwire_ptp(dst_pmap, dstmpte, &free)) { pmap_invalidate_page(dst_pmap, addr); vm_page_free_pages_toq(&free, true); } goto out; } if (dstmpte->wire_count >= srcmpte->wire_count) break; } addr += PAGE_SIZE; src_pte++; } } out: sched_unpin(); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); } /* * Zero 1 page of virtual memory mapped from a hardware page by the caller. */ static __inline void pagezero(void *page) { #if defined(I686_CPU) if (cpu_class == CPUCLASS_686) { if (cpu_feature & CPUID_SSE2) sse2_pagezero(page); else i686_pagezero(page); } else #endif bzero(page, PAGE_SIZE); } /* * Zero the specified hardware page. */ void pmap_zero_page(vm_page_t m) { pt_entry_t *cmap_pte2; struct pcpu *pc; sched_pin(); pc = get_pcpu(); cmap_pte2 = pc->pc_cmap_pte2; mtx_lock(&pc->pc_cmap_lock); if (*cmap_pte2) panic("pmap_zero_page: CMAP2 busy"); *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); invlcaddr(pc->pc_cmap_addr2); pagezero(pc->pc_cmap_addr2); *cmap_pte2 = 0; /* * Unpin the thread before releasing the lock. Otherwise the thread * could be rescheduled while still bound to the current CPU, only * to unpin itself immediately upon resuming execution. */ sched_unpin(); mtx_unlock(&pc->pc_cmap_lock); } /* * Zero an an area within a single hardware page. off and size must not * cover an area beyond a single hardware page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { pt_entry_t *cmap_pte2; struct pcpu *pc; sched_pin(); pc = get_pcpu(); cmap_pte2 = pc->pc_cmap_pte2; mtx_lock(&pc->pc_cmap_lock); if (*cmap_pte2) panic("pmap_zero_page_area: CMAP2 busy"); *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); invlcaddr(pc->pc_cmap_addr2); if (off == 0 && size == PAGE_SIZE) pagezero(pc->pc_cmap_addr2); else bzero(pc->pc_cmap_addr2 + off, size); *cmap_pte2 = 0; sched_unpin(); mtx_unlock(&pc->pc_cmap_lock); } /* * Copy 1 specified hardware page to another. */ void pmap_copy_page(vm_page_t src, vm_page_t dst) { pt_entry_t *cmap_pte1, *cmap_pte2; struct pcpu *pc; sched_pin(); pc = get_pcpu(); cmap_pte1 = pc->pc_cmap_pte1; cmap_pte2 = pc->pc_cmap_pte2; mtx_lock(&pc->pc_cmap_lock); if (*cmap_pte1) panic("pmap_copy_page: CMAP1 busy"); if (*cmap_pte2) panic("pmap_copy_page: CMAP2 busy"); *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A | pmap_cache_bits(kernel_pmap, src->md.pat_mode, 0); invlcaddr(pc->pc_cmap_addr1); *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M | pmap_cache_bits(kernel_pmap, dst->md.pat_mode, 0); invlcaddr(pc->pc_cmap_addr2); bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE); *cmap_pte1 = 0; *cmap_pte2 = 0; sched_unpin(); mtx_unlock(&pc->pc_cmap_lock); } int unmapped_buf_allowed = 1; void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], vm_offset_t b_offset, int xfersize) { vm_page_t a_pg, b_pg; char *a_cp, *b_cp; vm_offset_t a_pg_offset, b_pg_offset; pt_entry_t *cmap_pte1, *cmap_pte2; struct pcpu *pc; int cnt; sched_pin(); pc = get_pcpu(); cmap_pte1 = pc->pc_cmap_pte1; cmap_pte2 = pc->pc_cmap_pte2; mtx_lock(&pc->pc_cmap_lock); if (*cmap_pte1 != 0) panic("pmap_copy_pages: CMAP1 busy"); if (*cmap_pte2 != 0) panic("pmap_copy_pages: CMAP2 busy"); while (xfersize > 0) { a_pg = ma[a_offset >> PAGE_SHIFT]; a_pg_offset = a_offset & PAGE_MASK; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); b_pg = mb[b_offset >> PAGE_SHIFT]; b_pg_offset = b_offset & PAGE_MASK; cnt = min(cnt, PAGE_SIZE - b_pg_offset); *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A | pmap_cache_bits(kernel_pmap, a_pg->md.pat_mode, 0); invlcaddr(pc->pc_cmap_addr1); *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A | PG_M | pmap_cache_bits(kernel_pmap, b_pg->md.pat_mode, 0); invlcaddr(pc->pc_cmap_addr2); a_cp = pc->pc_cmap_addr1 + a_pg_offset; b_cp = pc->pc_cmap_addr2 + b_pg_offset; bcopy(a_cp, b_cp, cnt); a_offset += cnt; b_offset += cnt; xfersize -= cnt; } *cmap_pte1 = 0; *cmap_pte2 = 0; sched_unpin(); mtx_unlock(&pc->pc_cmap_lock); } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { struct md_page *pvh; pv_entry_t pv; int loops = 0; boolean_t rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { rv = TRUE; break; } loops++; if (loops >= 16) break; } if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { rv = TRUE; break; } loops++; if (loops >= 16) break; } } rw_wunlock(&pvh_global_lock); return (rv); } /* * pmap_page_wired_mappings: * * Return the number of managed mappings to the given physical page * that are wired. */ int pmap_page_wired_mappings(vm_page_t m) { int count; count = 0; if ((m->oflags & VPO_UNMANAGED) != 0) return (count); rw_wlock(&pvh_global_lock); count = pmap_pvh_wired_mappings(&m->md, count); if ((m->flags & PG_FICTITIOUS) == 0) { count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count); } rw_wunlock(&pvh_global_lock); return (count); } /* * pmap_pvh_wired_mappings: * * Return the updated number "count" of managed mappings that are wired. */ static int pmap_pvh_wired_mappings(struct md_page *pvh, int count) { pmap_t pmap; pt_entry_t *pte; pv_entry_t pv; rw_assert(&pvh_global_lock, RA_WLOCKED); sched_pin(); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pte = pmap_pte_quick(pmap, pv->pv_va); if ((*pte & PG_W) != 0) count++; PMAP_UNLOCK(pmap); } sched_unpin(); return (count); } /* * Returns TRUE if the given page is mapped individually or as part of * a 4mpage. Otherwise, returns FALSE. */ boolean_t pmap_page_is_mapped(vm_page_t m) { boolean_t rv; if ((m->oflags & VPO_UNMANAGED) != 0) return (FALSE); rw_wlock(&pvh_global_lock); rv = !TAILQ_EMPTY(&m->md.pv_list) || ((m->flags & PG_FICTITIOUS) == 0 && !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); rw_wunlock(&pvh_global_lock); return (rv); } /* * Remove all pages from specified address space * this aids process exit speeds. Also, this code * is special cased for current process only, but * can have the more generic (and slightly slower) * mode enabled. This is much faster than pmap_remove * in the case of running down an entire address space. */ void pmap_remove_pages(pmap_t pmap) { pt_entry_t *pte, tpte; vm_page_t m, mpte, mt; pv_entry_t pv; struct md_page *pvh; struct pv_chunk *pc, *npc; struct spglist free; int field, idx; int32_t bit; uint32_t inuse, bitmask; int allfree; if (pmap != PCPU_GET(curpmap)) { printf("warning: pmap_remove_pages called with non-current pmap\n"); return; } SLIST_INIT(&free); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); sched_pin(); TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap, pc->pc_pmap)); allfree = 1; for (field = 0; field < _NPCM; field++) { inuse = ~pc->pc_map[field] & pc_freemask[field]; while (inuse != 0) { bit = bsfl(inuse); bitmask = 1UL << bit; idx = field * 32 + bit; pv = &pc->pc_pventry[idx]; inuse &= ~bitmask; pte = pmap_pde(pmap, pv->pv_va); tpte = *pte; if ((tpte & PG_PS) == 0) { pte = pmap_pte_quick(pmap, pv->pv_va); tpte = *pte & ~PG_PTE_PAT; } if (tpte == 0) { printf( "TPTE at %p IS ZERO @ VA %08x\n", pte, pv->pv_va); panic("bad pte"); } /* * We cannot remove wired pages from a process' mapping at this time */ if (tpte & PG_W) { allfree = 0; continue; } m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); KASSERT(m->phys_addr == (tpte & PG_FRAME), ("vm_page_t %p phys_addr mismatch %016jx %016jx", m, (uintmax_t)m->phys_addr, (uintmax_t)tpte)); KASSERT((m->flags & PG_FICTITIOUS) != 0 || m < &vm_page_array[vm_page_array_size], ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte)); pte_clear(pte); /* * Update the vm_page_t clean/reference bits. */ if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { if ((tpte & PG_PS) != 0) { for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) vm_page_dirty(mt); } else vm_page_dirty(m); } /* Mark free */ PV_STAT(pv_entry_frees++); PV_STAT(pv_entry_spare++); pv_entry_count--; pc->pc_map[field] |= bitmask; if ((tpte & PG_PS) != 0) { pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; pvh = pa_to_pvh(tpte & PG_PS_FRAME); TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); if (TAILQ_EMPTY(&pvh->pv_list)) { for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) if (TAILQ_EMPTY(&mt->md.pv_list)) vm_page_aflag_clear(mt, PGA_WRITEABLE); } mpte = pmap_remove_pt_page(pmap, pv->pv_va); if (mpte != NULL) { pmap->pm_stats.resident_count--; KASSERT(mpte->wire_count == NPTEPG, ("pmap_remove_pages: pte page wire count error")); mpte->wire_count = 0; pmap_add_delayed_free_list(mpte, &free, FALSE); } } else { pmap->pm_stats.resident_count--; TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); } pmap_unuse_pt(pmap, pv->pv_va, &free); } } } if (allfree) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); free_pv_chunk(pc); } } sched_unpin(); pmap_invalidate_all(pmap); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); vm_page_free_pages_toq(&free, true); } /* * pmap_is_modified: * * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t pmap_is_modified(vm_page_t m) { boolean_t rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); rw_wlock(&pvh_global_lock); rv = pmap_is_modified_pvh(&m->md) || ((m->flags & PG_FICTITIOUS) == 0 && pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); rw_wunlock(&pvh_global_lock); return (rv); } /* * Returns TRUE if any of the given mappings were used to modify * physical memory. Otherwise, returns FALSE. Both page and 2mpage * mappings are supported. */ static boolean_t pmap_is_modified_pvh(struct md_page *pvh) { pv_entry_t pv; pt_entry_t *pte; pmap_t pmap; boolean_t rv; rw_assert(&pvh_global_lock, RA_WLOCKED); rv = FALSE; sched_pin(); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pte = pmap_pte_quick(pmap, pv->pv_va); rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW); PMAP_UNLOCK(pmap); if (rv) break; } sched_unpin(); return (rv); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is elgible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pd_entry_t pde; boolean_t rv; rv = FALSE; PMAP_LOCK(pmap); pde = *pmap_pde(pmap, addr); if (pde != 0 && (pde & PG_PS) == 0) rv = pmap_pte_ufast(pmap, addr, pde) == 0; PMAP_UNLOCK(pmap); return (rv); } /* * pmap_is_referenced: * * Return whether or not the specified physical page was referenced * in any physical maps. */ boolean_t pmap_is_referenced(vm_page_t m) { boolean_t rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); rw_wlock(&pvh_global_lock); rv = pmap_is_referenced_pvh(&m->md) || ((m->flags & PG_FICTITIOUS) == 0 && pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); rw_wunlock(&pvh_global_lock); return (rv); } /* * Returns TRUE if any of the given mappings were referenced and FALSE * otherwise. Both page and 4mpage mappings are supported. */ static boolean_t pmap_is_referenced_pvh(struct md_page *pvh) { pv_entry_t pv; pt_entry_t *pte; pmap_t pmap; boolean_t rv; rw_assert(&pvh_global_lock, RA_WLOCKED); rv = FALSE; sched_pin(); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pte = pmap_pte_quick(pmap, pv->pv_va); rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V); PMAP_UNLOCK(pmap); if (rv) break; } sched_unpin(); return (rv); } /* * Clear the write and modified bits in each of the given page's mappings. */ void pmap_remove_write(vm_page_t m) { struct md_page *pvh; pv_entry_t next_pv, pv; pmap_t pmap; pd_entry_t *pde; pt_entry_t oldpte, *pte; vm_offset_t va; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * set by another thread while the object is locked. Thus, * if PGA_WRITEABLE is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) return; rw_wlock(&pvh_global_lock); sched_pin(); if ((m->flags & PG_FICTITIOUS) != 0) goto small_mappings; pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, va); if ((*pde & PG_RW) != 0) (void)pmap_demote_pde(pmap, pde, va); PMAP_UNLOCK(pmap); } small_mappings: TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, pv->pv_va); KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found" " a 4mpage in page %p's pv list", m)); pte = pmap_pte_quick(pmap, pv->pv_va); retry: oldpte = *pte; if ((oldpte & PG_RW) != 0) { /* * Regardless of whether a pte is 32 or 64 bits * in size, PG_RW and PG_M are among the least * significant 32 bits. */ if (!atomic_cmpset_int((u_int *)pte, oldpte, oldpte & ~(PG_RW | PG_M))) goto retry; if ((oldpte & PG_M) != 0) vm_page_dirty(m); pmap_invalidate_page(pmap, pv->pv_va); } PMAP_UNLOCK(pmap); } vm_page_aflag_clear(m, PGA_WRITEABLE); sched_unpin(); rw_wunlock(&pvh_global_lock); } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * As an optimization, update the page's dirty field if a modified bit is * found while counting reference bits. This opportunistic update can be * performed at low cost and can eliminate the need for some future calls * to pmap_is_modified(). However, since this function stops after * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some * dirty pages. Those dirty pages will only be detected by a future call * to pmap_is_modified(). */ int pmap_ts_referenced(vm_page_t m) { struct md_page *pvh; pv_entry_t pv, pvf; pmap_t pmap; pd_entry_t *pde; pt_entry_t *pte; vm_paddr_t pa; int rtval = 0; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); pa = VM_PAGE_TO_PHYS(m); pvh = pa_to_pvh(pa); rw_wlock(&pvh_global_lock); sched_pin(); if ((m->flags & PG_FICTITIOUS) != 0 || (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) goto small_mappings; pv = pvf; do { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, pv->pv_va); if ((*pde & (PG_M | PG_RW)) == (PG_M | PG_RW)) { /* * Although "*pde" is mapping a 2/4MB page, because * this function is called at a 4KB page granularity, * we only update the 4KB page under test. */ vm_page_dirty(m); } if ((*pde & PG_A) != 0) { /* * Since this reference bit is shared by either 1024 * or 512 4KB pages, it should not be cleared every * time it is tested. Apply a simple "hash" function * on the physical page number, the virtual superpage * number, and the pmap address to select one 4KB page * out of the 1024 or 512 on which testing the * reference bit will result in clearing that bit. * This function is designed to avoid the selection of * the same 4KB page for every 2- or 4MB page mapping. * * On demotion, a mapping that hasn't been referenced * is simply destroyed. To avoid the possibility of a * subsequent page fault on a demoted wired mapping, * always leave its reference bit set. Moreover, * since the superpage is wired, the current state of * its reference bit won't affect page replacement. */ if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^ (uintptr_t)pmap) & (NPTEPG - 1)) == 0 && (*pde & PG_W) == 0) { atomic_clear_int((u_int *)pde, PG_A); pmap_invalidate_page(pmap, pv->pv_va); } rtval++; } PMAP_UNLOCK(pmap); /* Rotate the PV list if it has more than one entry. */ if (TAILQ_NEXT(pv, pv_next) != NULL) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); } if (rtval >= PMAP_TS_REFERENCED_MAX) goto out; } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); small_mappings: if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) goto out; pv = pvf; do { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, pv->pv_va); KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced: found a 4mpage in page %p's pv list", m)); pte = pmap_pte_quick(pmap, pv->pv_va); if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if ((*pte & PG_A) != 0) { atomic_clear_int((u_int *)pte, PG_A); pmap_invalidate_page(pmap, pv->pv_va); rtval++; } PMAP_UNLOCK(pmap); /* Rotate the PV list if it has more than one entry. */ if (TAILQ_NEXT(pv, pv_next) != NULL) { TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); } } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval < PMAP_TS_REFERENCED_MAX); out: sched_unpin(); rw_wunlock(&pvh_global_lock); return (rtval); } /* * Apply the given advice to the specified range of addresses within the * given pmap. Depending on the advice, clear the referenced and/or * modified flags in each mapping and set the mapped page's dirty field. */ void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) { pd_entry_t oldpde, *pde; pt_entry_t *pte; vm_offset_t va, pdnxt; vm_page_t m; boolean_t anychanged, pv_lists_locked; if (advice != MADV_DONTNEED && advice != MADV_FREE) return; if (pmap_is_current(pmap)) pv_lists_locked = FALSE; else { pv_lists_locked = TRUE; resume: rw_wlock(&pvh_global_lock); sched_pin(); } anychanged = FALSE; PMAP_LOCK(pmap); for (; sva < eva; sva = pdnxt) { pdnxt = (sva + NBPDR) & ~PDRMASK; if (pdnxt < sva) pdnxt = eva; pde = pmap_pde(pmap, sva); oldpde = *pde; if ((oldpde & PG_V) == 0) continue; else if ((oldpde & PG_PS) != 0) { if ((oldpde & PG_MANAGED) == 0) continue; if (!pv_lists_locked) { pv_lists_locked = TRUE; if (!rw_try_wlock(&pvh_global_lock)) { if (anychanged) pmap_invalidate_all(pmap); PMAP_UNLOCK(pmap); goto resume; } sched_pin(); } if (!pmap_demote_pde(pmap, pde, sva)) { /* * The large page mapping was destroyed. */ continue; } /* * Unless the page mappings are wired, remove the * mapping to a single page so that a subsequent * access may repromote. Since the underlying page * table page is fully populated, this removal never * frees a page table page. */ if ((oldpde & PG_W) == 0) { pte = pmap_pte_quick(pmap, sva); KASSERT((*pte & PG_V) != 0, ("pmap_advise: invalid PTE")); pmap_remove_pte(pmap, pte, sva, NULL); anychanged = TRUE; } } if (pdnxt > eva) pdnxt = eva; va = pdnxt; for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++, sva += PAGE_SIZE) { if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V)) goto maybe_invlrng; else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { if (advice == MADV_DONTNEED) { /* * Future calls to pmap_is_modified() * can be avoided by making the page * dirty now. */ m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); vm_page_dirty(m); } atomic_clear_int((u_int *)pte, PG_M | PG_A); } else if ((*pte & PG_A) != 0) atomic_clear_int((u_int *)pte, PG_A); else goto maybe_invlrng; if ((*pte & PG_G) != 0) { if (va == pdnxt) va = sva; } else anychanged = TRUE; continue; maybe_invlrng: if (va != pdnxt) { pmap_invalidate_range(pmap, va, sva); va = pdnxt; } } if (va != pdnxt) pmap_invalidate_range(pmap, va, sva); } if (anychanged) pmap_invalidate_all(pmap); if (pv_lists_locked) { sched_unpin(); rw_wunlock(&pvh_global_lock); } PMAP_UNLOCK(pmap); } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { struct md_page *pvh; pv_entry_t next_pv, pv; pmap_t pmap; pd_entry_t oldpde, *pde; pt_entry_t oldpte, *pte; vm_offset_t va; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); KASSERT(!vm_page_xbusied(m), ("pmap_clear_modify: page %p is exclusive busied", m)); /* * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. * If the object containing the page is locked and the page is not * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; rw_wlock(&pvh_global_lock); sched_pin(); if ((m->flags & PG_FICTITIOUS) != 0) goto small_mappings; pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, va); oldpde = *pde; if ((oldpde & PG_RW) != 0) { if (pmap_demote_pde(pmap, pde, va)) { if ((oldpde & PG_W) == 0) { /* * Write protect the mapping to a * single page so that a subsequent * write access may repromote. */ va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); pte = pmap_pte_quick(pmap, va); oldpte = *pte; if ((oldpte & PG_V) != 0) { /* * Regardless of whether a pte is 32 or 64 bits * in size, PG_RW and PG_M are among the least * significant 32 bits. */ while (!atomic_cmpset_int((u_int *)pte, oldpte, oldpte & ~(PG_M | PG_RW))) oldpte = *pte; vm_page_dirty(m); pmap_invalidate_page(pmap, va); } } } } PMAP_UNLOCK(pmap); } small_mappings: TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, pv->pv_va); KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found" " a 4mpage in page %p's pv list", m)); pte = pmap_pte_quick(pmap, pv->pv_va); if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { /* * Regardless of whether a pte is 32 or 64 bits * in size, PG_M is among the least significant * 32 bits. */ atomic_clear_int((u_int *)pte, PG_M); pmap_invalidate_page(pmap, pv->pv_va); } PMAP_UNLOCK(pmap); } sched_unpin(); rw_wunlock(&pvh_global_lock); } /* * Miscellaneous support routines follow */ /* Adjust the cache mode for a 4KB page mapped via a PTE. */ static __inline void pmap_pte_attr(pt_entry_t *pte, int cache_bits) { u_int opte, npte; /* * The cache mode bits are all in the low 32-bits of the * PTE, so we can just spin on updating the low 32-bits. */ do { opte = *(u_int *)pte; npte = opte & ~PG_PTE_CACHE; npte |= cache_bits; } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); } /* Adjust the cache mode for a 2/4MB page mapped via a PDE. */ static __inline void pmap_pde_attr(pd_entry_t *pde, int cache_bits) { u_int opde, npde; /* * The cache mode bits are all in the low 32-bits of the * PDE, so we can just spin on updating the low 32-bits. */ do { opde = *(u_int *)pde; npde = opde & ~PG_PDE_CACHE; npde |= cache_bits; } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde)); } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) { struct pmap_preinit_mapping *ppim; vm_offset_t va, offset; vm_size_t tmpsize; int i; offset = pa & PAGE_MASK; size = round_page(offset + size); pa = pa & PG_FRAME; if (pa < PMAP_MAP_LOW && pa + size <= PMAP_MAP_LOW) va = pa + PMAP_MAP_LOW; else if (!pmap_initialized) { va = 0; for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; if (ppim->va == 0) { ppim->pa = pa; ppim->sz = size; ppim->mode = mode; ppim->va = virtual_avail; virtual_avail += size; va = ppim->va; break; } } if (va == 0) panic("%s: too many preinit mappings", __func__); } else { /* * If we have a preinit mapping, re-use it. */ for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; if (ppim->pa == pa && ppim->sz == size && ppim->mode == mode) return ((void *)(ppim->va + offset)); } va = kva_alloc(size); if (va == 0) panic("%s: Couldn't allocate KVA", __func__); } for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode); pmap_invalidate_range(kernel_pmap, va, va + tmpsize); pmap_invalidate_cache_range(va, va + size); return ((void *)(va + offset)); } void * pmap_mapdev(vm_paddr_t pa, vm_size_t size) { return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); } void * pmap_mapbios(vm_paddr_t pa, vm_size_t size) { return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); } void pmap_unmapdev(vm_offset_t va, vm_size_t size) { struct pmap_preinit_mapping *ppim; vm_offset_t offset; int i; if (va >= PMAP_MAP_LOW && va <= KERNBASE && va + size <= KERNBASE) return; offset = va & PAGE_MASK; size = round_page(offset + size); va = trunc_page(va); for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) { ppim = pmap_preinit_mapping + i; if (ppim->va == va && ppim->sz == size) { if (pmap_initialized) return; ppim->pa = 0; ppim->va = 0; ppim->sz = 0; ppim->mode = 0; if (va + size == virtual_avail) virtual_avail = va; return; } } if (pmap_initialized) kva_free(va, size); } /* * Sets the memory attribute for the specified page. */ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { m->md.pat_mode = ma; if ((m->flags & PG_FICTITIOUS) != 0) return; /* * If "m" is a normal page, flush it from the cache. * See pmap_invalidate_cache_range(). * * First, try to find an existing mapping of the page by sf * buffer. sf_buf_invalidate_cache() modifies mapping and * flushes the cache. */ if (sf_buf_invalidate_cache(m)) return; /* * If page is not mapped by sf buffer, but CPU does not * support self snoop, map the page transient and do * invalidation. In the worst case, whole cache is flushed by * pmap_invalidate_cache_range(). */ if ((cpu_feature & CPUID_SS) == 0) pmap_flush_page(m); } static void pmap_flush_page(vm_page_t m) { pt_entry_t *cmap_pte2; struct pcpu *pc; vm_offset_t sva, eva; bool useclflushopt; useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0; if (useclflushopt || (cpu_feature & CPUID_CLFSH) != 0) { sched_pin(); pc = get_pcpu(); cmap_pte2 = pc->pc_cmap_pte2; mtx_lock(&pc->pc_cmap_lock); if (*cmap_pte2) panic("pmap_flush_page: CMAP2 busy"); *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); invlcaddr(pc->pc_cmap_addr2); sva = (vm_offset_t)pc->pc_cmap_addr2; eva = sva + PAGE_SIZE; /* * Use mfence or sfence despite the ordering implied by * mtx_{un,}lock() because clflush on non-Intel CPUs * and clflushopt are not guaranteed to be ordered by * any other instruction. */ if (useclflushopt) sfence(); else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); for (; sva < eva; sva += cpu_clflush_line_size) { if (useclflushopt) clflushopt(sva); else clflush(sva); } if (useclflushopt) sfence(); else if (cpu_vendor_id != CPU_VENDOR_INTEL) mfence(); *cmap_pte2 = 0; sched_unpin(); mtx_unlock(&pc->pc_cmap_lock); } else pmap_invalidate_cache(); } /* * Changes the specified virtual address range's memory type to that given by * the parameter "mode". The specified virtual address range must be * completely contained within either the kernel map. * * Returns zero if the change completed successfully, and either EINVAL or * ENOMEM if the change failed. Specifically, EINVAL is returned if some part * of the virtual address range was not mapped, and ENOMEM is returned if * there was insufficient memory available to complete the change. */ int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) { vm_offset_t base, offset, tmpva; pd_entry_t *pde; pt_entry_t *pte; int cache_bits_pte, cache_bits_pde; boolean_t changed; base = trunc_page(va); offset = va & PAGE_MASK; size = round_page(offset + size); /* * Only supported on kernel virtual addresses above the recursive map. */ if (base < VM_MIN_KERNEL_ADDRESS) return (EINVAL); cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1); cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0); changed = FALSE; /* * Pages that aren't mapped aren't supported. Also break down * 2/4MB pages into 4KB pages if required. */ PMAP_LOCK(kernel_pmap); for (tmpva = base; tmpva < base + size; ) { pde = pmap_pde(kernel_pmap, tmpva); if (*pde == 0) { PMAP_UNLOCK(kernel_pmap); return (EINVAL); } if (*pde & PG_PS) { /* * If the current 2/4MB page already has * the required memory type, then we need not * demote this page. Just increment tmpva to * the next 2/4MB page frame. */ if ((*pde & PG_PDE_CACHE) == cache_bits_pde) { tmpva = trunc_4mpage(tmpva) + NBPDR; continue; } /* * If the current offset aligns with a 2/4MB * page frame and there is at least 2/4MB left * within the range, then we need not break * down this page into 4KB pages. */ if ((tmpva & PDRMASK) == 0 && tmpva + PDRMASK < base + size) { tmpva += NBPDR; continue; } if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) { PMAP_UNLOCK(kernel_pmap); return (ENOMEM); } } pte = vtopte(tmpva); if (*pte == 0) { PMAP_UNLOCK(kernel_pmap); return (EINVAL); } tmpva += PAGE_SIZE; } PMAP_UNLOCK(kernel_pmap); /* * Ok, all the pages exist, so run through them updating their * cache mode if required. */ for (tmpva = base; tmpva < base + size; ) { pde = pmap_pde(kernel_pmap, tmpva); if (*pde & PG_PS) { if ((*pde & PG_PDE_CACHE) != cache_bits_pde) { pmap_pde_attr(pde, cache_bits_pde); changed = TRUE; } tmpva = trunc_4mpage(tmpva) + NBPDR; } else { pte = vtopte(tmpva); if ((*pte & PG_PTE_CACHE) != cache_bits_pte) { pmap_pte_attr(pte, cache_bits_pte); changed = TRUE; } tmpva += PAGE_SIZE; } } /* * Flush CPU caches to make sure any data isn't cached that * shouldn't be, etc. */ if (changed) { pmap_invalidate_range(kernel_pmap, base, tmpva); pmap_invalidate_cache_range(base, tmpva); } return (0); } /* * perform the pmap work for mincore */ int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { pd_entry_t pde; pt_entry_t pte; vm_paddr_t pa; int val; PMAP_LOCK(pmap); retry: pde = *pmap_pde(pmap, addr); if (pde != 0) { if ((pde & PG_PS) != 0) { pte = pde; /* Compute the physical address of the 4KB page. */ pa = ((pde & PG_PS_FRAME) | (addr & PDRMASK)) & PG_FRAME; val = MINCORE_SUPER; } else { pte = pmap_pte_ufast(pmap, addr, pde); pa = pte & PG_FRAME; val = 0; } } else { pte = 0; pa = 0; val = 0; } if ((pte & PG_V) != 0) { val |= MINCORE_INCORE; if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; if ((pte & PG_A) != 0) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; } if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) goto retry; } else PA_UNLOCK_COND(*locked_pa); PMAP_UNLOCK(pmap); return (val); } void pmap_activate(struct thread *td) { pmap_t pmap, oldpmap; u_int cpuid; u_int32_t cr3; critical_enter(); pmap = vmspace_pmap(td->td_proc->p_vmspace); oldpmap = PCPU_GET(curpmap); cpuid = PCPU_GET(cpuid); #if defined(SMP) CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); CPU_SET_ATOMIC(cpuid, &pmap->pm_active); #else CPU_CLR(cpuid, &oldpmap->pm_active); CPU_SET(cpuid, &pmap->pm_active); #endif #if defined(PAE) || defined(PAE_TABLES) cr3 = vtophys(pmap->pm_pdpt); #else cr3 = vtophys(pmap->pm_pdir); #endif /* * pmap_activate is for the current thread on the current cpu */ td->td_pcb->pcb_cr3 = cr3; PCPU_SET(curpmap, pmap); critical_exit(); } void pmap_activate_boot(pmap_t pmap) { u_int cpuid; cpuid = PCPU_GET(cpuid); #if defined(SMP) CPU_SET_ATOMIC(cpuid, &pmap->pm_active); #else CPU_SET(cpuid, &pmap->pm_active); #endif PCPU_SET(curpmap, pmap); } void pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) { } /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. */ void pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size) { vm_offset_t superpage_offset; if (size < NBPDR) return; if (object != NULL && (object->flags & OBJ_COLORED) != 0) offset += ptoa(object->pg_color); superpage_offset = offset & PDRMASK; if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR || (*addr & PDRMASK) == superpage_offset) return; if ((*addr & PDRMASK) < superpage_offset) *addr = (*addr & ~PDRMASK) + superpage_offset; else *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset; } vm_offset_t pmap_quick_enter_page(vm_page_t m) { vm_offset_t qaddr; pt_entry_t *pte; critical_enter(); qaddr = PCPU_GET(qmap_addr); pte = vtopte(qaddr); KASSERT(*pte == 0, ("pmap_quick_enter_page: PTE busy")); *pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(m), 0); invlpg(qaddr); return (qaddr); } void pmap_quick_remove_page(vm_offset_t addr) { vm_offset_t qaddr; pt_entry_t *pte; qaddr = PCPU_GET(qmap_addr); pte = vtopte(qaddr); KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use")); KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address")); *pte = 0; critical_exit(); } static vmem_t *pmap_trm_arena; static vmem_addr_t pmap_trm_arena_last = PMAP_TRM_MIN_ADDRESS; static int trm_guard = PAGE_SIZE; static int pmap_trm_import(void *unused __unused, vmem_size_t size, int flags, vmem_addr_t *addrp) { vm_page_t m; vmem_addr_t af, addr, prev_addr; pt_entry_t *trm_pte; prev_addr = atomic_load_long(&pmap_trm_arena_last); size = round_page(size) + trm_guard; for (;;) { if (prev_addr + size < prev_addr || prev_addr + size < size || prev_addr + size > PMAP_TRM_MAX_ADDRESS) return (ENOMEM); addr = prev_addr + size; if (atomic_fcmpset_int(&pmap_trm_arena_last, &prev_addr, addr)) break; } prev_addr += trm_guard; trm_pte = PTmap + atop(prev_addr); for (af = prev_addr; af < addr; af += PAGE_SIZE) { m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY | VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK); pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) | PG_M | PG_A | PG_RW | PG_V | pgeflag | pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE)); } *addrp = prev_addr; return (0); } static void pmap_init_trm(void) { vm_page_t pd_m; TUNABLE_INT_FETCH("machdep.trm_guard", &trm_guard); if ((trm_guard & PAGE_MASK) != 0) trm_guard = 0; pmap_trm_arena = vmem_create("i386trampoline", 0, 0, 1, 0, M_WAITOK); vmem_set_import(pmap_trm_arena, pmap_trm_import, NULL, NULL, PAGE_SIZE); pd_m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY | VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_WAITOK | VM_ALLOC_ZERO); if ((pd_m->flags & PG_ZERO) == 0) pmap_zero_page(pd_m); PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V | pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE); } void * pmap_trm_alloc(size_t size, int flags) { vmem_addr_t res; int error; MPASS((flags & ~(M_WAITOK | M_NOWAIT | M_ZERO)) == 0); error = vmem_xalloc(pmap_trm_arena, roundup2(size, 4), sizeof(int), 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags | M_FIRSTFIT, &res); if (error != 0) return (NULL); if ((flags & M_ZERO) != 0) bzero((void *)res, size); return ((void *)res); } void pmap_trm_free(void *addr, size_t size) { vmem_free(pmap_trm_arena, (uintptr_t)addr, roundup2(size, 4)); } #if defined(PMAP_DEBUG) pmap_pid_dump(int pid) { pmap_t pmap; struct proc *p; int npte = 0; int index; sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { if (p->p_pid != pid) continue; if (p->p_vmspace) { int i,j; index = 0; pmap = vmspace_pmap(p->p_vmspace); for (i = 0; i < NPDEPTD; i++) { pd_entry_t *pde; pt_entry_t *pte; vm_offset_t base = i << PDRSHIFT; pde = &pmap->pm_pdir[i]; if (pde && pmap_pde_v(pde)) { for (j = 0; j < NPTEPG; j++) { vm_offset_t va = base + (j << PAGE_SHIFT); if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { if (index) { index = 0; printf("\n"); } sx_sunlock(&allproc_lock); return (npte); } pte = pmap_pte(pmap, va); if (pte && pmap_pte_v(pte)) { pt_entry_t pa; vm_page_t m; pa = *pte; m = PHYS_TO_VM_PAGE(pa & PG_FRAME); printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", va, pa, m->hold_count, m->wire_count, m->flags); npte++; index++; if (index >= 2) { index = 0; printf("\n"); } else { printf(" "); } } } } } } } sx_sunlock(&allproc_lock); return (npte); } #endif Index: head/sys/kern/kern_cpuset.c =================================================================== --- head/sys/kern/kern_cpuset.c (revision 339926) +++ head/sys/kern/kern_cpuset.c (revision 339927) @@ -1,2295 +1,2302 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008, Jeffrey Roberson * All rights reserved. * * Copyright (c) 2008 Nokia Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif /* DDB */ /* * cpusets provide a mechanism for creating and manipulating sets of * processors for the purpose of constraining the scheduling of threads to * specific processors. * * Each process belongs to an identified set, by default this is set 1. Each * thread may further restrict the cpus it may run on to a subset of this * named set. This creates an anonymous set which other threads and processes * may not join by number. * * The named set is referred to herein as the 'base' set to avoid ambiguity. * This set is usually a child of a 'root' set while the anonymous set may * simply be referred to as a mask. In the syscall api these are referred to * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. * * Threads inherit their set from their creator whether it be anonymous or * not. This means that anonymous sets are immutable because they may be * shared. To modify an anonymous set a new set is created with the desired * mask and the same parent as the existing anonymous set. This gives the * illusion of each thread having a private mask. * * Via the syscall apis a user may ask to retrieve or modify the root, base, * or mask that is discovered via a pid, tid, or setid. Modifying a set * modifies all numbered and anonymous child sets to comply with the new mask. * Modifying a pid or tid's mask applies only to that tid but must still * exist within the assigned parent set. * * A thread may not be assigned to a group separate from other threads in * the process. This is to remove ambiguity when the setid is queried with * a pid argument. There is no other technical limitation. * * This somewhat complex arrangement is intended to make it easy for * applications to query available processors and bind their threads to * specific processors while also allowing administrators to dynamically * reprovision by changing sets which apply to groups of processes. * * A simple application should not concern itself with sets at all and * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id * meaning 'curthread'. It may query available cpus for that tid with a * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). */ LIST_HEAD(domainlist, domainset); +struct domainset __read_mostly domainset_fixed[MAXMEMDOM]; struct domainset __read_mostly domainset_prefer[MAXMEMDOM]; struct domainset __read_mostly domainset_roundrobin; static uma_zone_t cpuset_zone; static uma_zone_t domainset_zone; static struct mtx cpuset_lock; static struct setlist cpuset_ids; static struct domainlist cpuset_domains; static struct unrhdr *cpuset_unr; static struct cpuset *cpuset_zero, *cpuset_default, *cpuset_kernel; static struct domainset domainset0, domainset2; /* Return the size of cpuset_t at the kernel level */ SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD, SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)"); cpuset_t *cpuset_root; cpuset_t cpuset_domain[MAXMEMDOM]; static int domainset_valid(const struct domainset *, const struct domainset *); /* * Find the first non-anonymous set starting from 'set'. */ static struct cpuset * cpuset_getbase(struct cpuset *set) { if (set->cs_id == CPUSET_INVALID) set = set->cs_parent; return (set); } /* * Walks up the tree from 'set' to find the root. */ static struct cpuset * cpuset_getroot(struct cpuset *set) { while ((set->cs_flags & CPU_SET_ROOT) == 0 && set->cs_parent != NULL) set = set->cs_parent; return (set); } /* * Acquire a reference to a cpuset, all pointers must be tracked with refs. */ struct cpuset * cpuset_ref(struct cpuset *set) { refcount_acquire(&set->cs_ref); return (set); } /* * Walks up the tree from 'set' to find the root. Returns the root * referenced. */ static struct cpuset * cpuset_refroot(struct cpuset *set) { return (cpuset_ref(cpuset_getroot(set))); } /* * Find the first non-anonymous set starting from 'set'. Returns this set * referenced. May return the passed in set with an extra ref if it is * not anonymous. */ static struct cpuset * cpuset_refbase(struct cpuset *set) { return (cpuset_ref(cpuset_getbase(set))); } /* * Release a reference in a context where it is safe to allocate. */ void cpuset_rel(struct cpuset *set) { cpusetid_t id; if (refcount_release(&set->cs_ref) == 0) return; mtx_lock_spin(&cpuset_lock); LIST_REMOVE(set, cs_siblings); id = set->cs_id; if (id != CPUSET_INVALID) LIST_REMOVE(set, cs_link); mtx_unlock_spin(&cpuset_lock); cpuset_rel(set->cs_parent); uma_zfree(cpuset_zone, set); if (id != CPUSET_INVALID) free_unr(cpuset_unr, id); } /* * Deferred release must be used when in a context that is not safe to * allocate/free. This places any unreferenced sets on the list 'head'. */ static void cpuset_rel_defer(struct setlist *head, struct cpuset *set) { if (refcount_release(&set->cs_ref) == 0) return; mtx_lock_spin(&cpuset_lock); LIST_REMOVE(set, cs_siblings); if (set->cs_id != CPUSET_INVALID) LIST_REMOVE(set, cs_link); LIST_INSERT_HEAD(head, set, cs_link); mtx_unlock_spin(&cpuset_lock); } /* * Complete a deferred release. Removes the set from the list provided to * cpuset_rel_defer. */ static void cpuset_rel_complete(struct cpuset *set) { LIST_REMOVE(set, cs_link); cpuset_rel(set->cs_parent); uma_zfree(cpuset_zone, set); } /* * Find a set based on an id. Returns it with a ref. */ static struct cpuset * cpuset_lookup(cpusetid_t setid, struct thread *td) { struct cpuset *set; if (setid == CPUSET_INVALID) return (NULL); mtx_lock_spin(&cpuset_lock); LIST_FOREACH(set, &cpuset_ids, cs_link) if (set->cs_id == setid) break; if (set) cpuset_ref(set); mtx_unlock_spin(&cpuset_lock); KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); if (set != NULL && jailed(td->td_ucred)) { struct cpuset *jset, *tset; jset = td->td_ucred->cr_prison->pr_cpuset; for (tset = set; tset != NULL; tset = tset->cs_parent) if (tset == jset) break; if (tset == NULL) { cpuset_rel(set); set = NULL; } } return (set); } /* * Create a set in the space provided in 'set' with the provided parameters. * The set is returned with a single ref. May return EDEADLK if the set * will have no valid cpu based on restrictions from the parent. */ static int _cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask, struct domainset *domain, cpusetid_t id) { if (domain == NULL) domain = parent->cs_domain; if (mask == NULL) mask = &parent->cs_mask; if (!CPU_OVERLAP(&parent->cs_mask, mask)) return (EDEADLK); /* The domain must be prepared ahead of time. */ if (!domainset_valid(parent->cs_domain, domain)) return (EDEADLK); CPU_COPY(mask, &set->cs_mask); LIST_INIT(&set->cs_children); refcount_init(&set->cs_ref, 1); set->cs_flags = 0; mtx_lock_spin(&cpuset_lock); set->cs_domain = domain; CPU_AND(&set->cs_mask, &parent->cs_mask); set->cs_id = id; set->cs_parent = cpuset_ref(parent); LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); if (set->cs_id != CPUSET_INVALID) LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); mtx_unlock_spin(&cpuset_lock); return (0); } /* * Create a new non-anonymous set with the requested parent and mask. May * return failures if the mask is invalid or a new number can not be * allocated. */ static int cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask) { struct cpuset *set; cpusetid_t id; int error; id = alloc_unr(cpuset_unr); if (id == -1) return (ENFILE); *setp = set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); error = _cpuset_create(set, parent, mask, NULL, id); if (error == 0) return (0); free_unr(cpuset_unr, id); uma_zfree(cpuset_zone, set); return (error); } static void cpuset_freelist_add(struct setlist *list, int count) { struct cpuset *set; int i; for (i = 0; i < count; i++) { set = uma_zalloc(cpuset_zone, M_ZERO | M_WAITOK); LIST_INSERT_HEAD(list, set, cs_link); } } static void cpuset_freelist_init(struct setlist *list, int count) { LIST_INIT(list); cpuset_freelist_add(list, count); } static void cpuset_freelist_free(struct setlist *list) { struct cpuset *set; while ((set = LIST_FIRST(list)) != NULL) { LIST_REMOVE(set, cs_link); uma_zfree(cpuset_zone, set); } } static void domainset_freelist_add(struct domainlist *list, int count) { struct domainset *set; int i; for (i = 0; i < count; i++) { set = uma_zalloc(domainset_zone, M_ZERO | M_WAITOK); LIST_INSERT_HEAD(list, set, ds_link); } } static void domainset_freelist_init(struct domainlist *list, int count) { LIST_INIT(list); domainset_freelist_add(list, count); } static void domainset_freelist_free(struct domainlist *list) { struct domainset *set; while ((set = LIST_FIRST(list)) != NULL) { LIST_REMOVE(set, ds_link); uma_zfree(domainset_zone, set); } } /* Copy a domainset preserving mask and policy. */ static void domainset_copy(const struct domainset *from, struct domainset *to) { DOMAINSET_COPY(&from->ds_mask, &to->ds_mask); to->ds_policy = from->ds_policy; to->ds_prefer = from->ds_prefer; } /* Return 1 if mask and policy are equal, otherwise 0. */ static int domainset_equal(const struct domainset *one, const struct domainset *two) { return (DOMAINSET_CMP(&one->ds_mask, &two->ds_mask) == 0 && one->ds_policy == two->ds_policy && one->ds_prefer == two->ds_prefer); } /* Return 1 if child is a valid subset of parent. */ static int domainset_valid(const struct domainset *parent, const struct domainset *child) { if (child->ds_policy != DOMAINSET_POLICY_PREFER) return (DOMAINSET_SUBSET(&parent->ds_mask, &child->ds_mask)); return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask)); } static int domainset_restrict(const struct domainset *parent, const struct domainset *child) { if (child->ds_policy != DOMAINSET_POLICY_PREFER) return (DOMAINSET_OVERLAP(&parent->ds_mask, &child->ds_mask)); return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask)); } /* * Lookup or create a domainset. The key is provided in ds_mask and * ds_policy. If the domainset does not yet exist the storage in * 'domain' is used to insert. Otherwise this storage is freed to the * domainset_zone and the existing domainset is returned. */ static struct domainset * _domainset_create(struct domainset *domain, struct domainlist *freelist) { struct domainset *ndomain; int i, j, max; KASSERT(domain->ds_cnt <= vm_ndomains, ("invalid domain count in domainset %p", domain)); KASSERT(domain->ds_policy != DOMAINSET_POLICY_PREFER || domain->ds_prefer < vm_ndomains, ("invalid preferred domain in domains %p", domain)); mtx_lock_spin(&cpuset_lock); LIST_FOREACH(ndomain, &cpuset_domains, ds_link) if (domainset_equal(ndomain, domain)) break; /* * If the domain does not yet exist we insert it and initialize * various iteration helpers which are not part of the key. */ if (ndomain == NULL) { LIST_INSERT_HEAD(&cpuset_domains, domain, ds_link); domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask); max = DOMAINSET_FLS(&domain->ds_mask) + 1; for (i = 0, j = 0; i < max; i++) if (DOMAINSET_ISSET(i, &domain->ds_mask)) domain->ds_order[j++] = i; } mtx_unlock_spin(&cpuset_lock); if (ndomain == NULL) return (domain); if (freelist != NULL) LIST_INSERT_HEAD(freelist, domain, ds_link); else uma_zfree(domainset_zone, domain); return (ndomain); } /* * Are any of the domains in the mask empty? If so, silently * remove them and update the domainset accordingly. If only empty * domains are present, we must return failure. */ static bool domainset_empty_vm(struct domainset *domain) { int i, j, max; max = DOMAINSET_FLS(&domain->ds_mask) + 1; for (i = 0; i < max; i++) if (DOMAINSET_ISSET(i, &domain->ds_mask) && VM_DOMAIN_EMPTY(i)) DOMAINSET_CLR(i, &domain->ds_mask); domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask); max = DOMAINSET_FLS(&domain->ds_mask) + 1; for (i = j = 0; i < max; i++) { if (DOMAINSET_ISSET(i, &domain->ds_mask)) domain->ds_order[j++] = i; else if (domain->ds_policy == DOMAINSET_POLICY_PREFER && domain->ds_prefer == i && domain->ds_cnt > 1) { domain->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; domain->ds_prefer = -1; } } return (DOMAINSET_EMPTY(&domain->ds_mask)); } /* * Create or lookup a domainset based on the key held in 'domain'. */ struct domainset * domainset_create(const struct domainset *domain) { struct domainset *ndomain; /* * Validate the policy. It must specify a useable policy number with * only valid domains. Preferred must include the preferred domain * in the mask. */ if (domain->ds_policy <= DOMAINSET_POLICY_INVALID || domain->ds_policy > DOMAINSET_POLICY_MAX) return (NULL); if (domain->ds_policy == DOMAINSET_POLICY_PREFER && !DOMAINSET_ISSET(domain->ds_prefer, &domain->ds_mask)) return (NULL); if (!DOMAINSET_SUBSET(&domainset0.ds_mask, &domain->ds_mask)) return (NULL); ndomain = uma_zalloc(domainset_zone, M_WAITOK | M_ZERO); domainset_copy(domain, ndomain); return _domainset_create(ndomain, NULL); } /* * Update thread domainset pointers. */ static void domainset_notify(void) { struct thread *td; struct proc *p; sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { PROC_LOCK(p); if (p->p_state == PRS_NEW) { PROC_UNLOCK(p); continue; } FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); td->td_domain.dr_policy = td->td_cpuset->cs_domain; thread_unlock(td); } PROC_UNLOCK(p); } sx_sunlock(&allproc_lock); kernel_object->domain.dr_policy = cpuset_kernel->cs_domain; } /* * Create a new set that is a subset of a parent. */ static struct domainset * domainset_shadow(const struct domainset *pdomain, const struct domainset *domain, struct domainlist *freelist) { struct domainset *ndomain; ndomain = LIST_FIRST(freelist); LIST_REMOVE(ndomain, ds_link); /* * Initialize the key from the request. */ domainset_copy(domain, ndomain); /* * Restrict the key by the parent. */ DOMAINSET_AND(&ndomain->ds_mask, &pdomain->ds_mask); return _domainset_create(ndomain, freelist); } /* * Recursively check for errors that would occur from applying mask to * the tree of sets starting at 'set'. Checks for sets that would become * empty as well as RDONLY flags. */ static int cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int check_mask) { struct cpuset *nset; cpuset_t newmask; int error; mtx_assert(&cpuset_lock, MA_OWNED); if (set->cs_flags & CPU_SET_RDONLY) return (EPERM); if (check_mask) { if (!CPU_OVERLAP(&set->cs_mask, mask)) return (EDEADLK); CPU_COPY(&set->cs_mask, &newmask); CPU_AND(&newmask, mask); } else CPU_COPY(mask, &newmask); error = 0; LIST_FOREACH(nset, &set->cs_children, cs_siblings) if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0) break; return (error); } /* * Applies the mask 'mask' without checking for empty sets or permissions. */ static void cpuset_update(struct cpuset *set, cpuset_t *mask) { struct cpuset *nset; mtx_assert(&cpuset_lock, MA_OWNED); CPU_AND(&set->cs_mask, mask); LIST_FOREACH(nset, &set->cs_children, cs_siblings) cpuset_update(nset, &set->cs_mask); return; } /* * Modify the set 'set' to use a copy of the mask provided. Apply this new * mask to restrict all children in the tree. Checks for validity before * applying the changes. */ static int cpuset_modify(struct cpuset *set, cpuset_t *mask) { struct cpuset *root; int error; error = priv_check(curthread, PRIV_SCHED_CPUSET); if (error) return (error); /* * In case we are called from within the jail * we do not allow modifying the dedicated root * cpuset of the jail but may still allow to * change child sets. */ if (jailed(curthread->td_ucred) && set->cs_flags & CPU_SET_ROOT) return (EPERM); /* * Verify that we have access to this set of * cpus. */ root = cpuset_getroot(set); mtx_lock_spin(&cpuset_lock); if (root && !CPU_SUBSET(&root->cs_mask, mask)) { error = EINVAL; goto out; } error = cpuset_testupdate(set, mask, 0); if (error) goto out; CPU_COPY(mask, &set->cs_mask); cpuset_update(set, mask); out: mtx_unlock_spin(&cpuset_lock); return (error); } /* * Recursively check for errors that would occur from applying mask to * the tree of sets starting at 'set'. Checks for sets that would become * empty as well as RDONLY flags. */ static int cpuset_testupdate_domain(struct cpuset *set, struct domainset *dset, struct domainset *orig, int *count, int check_mask) { struct cpuset *nset; struct domainset *domain; struct domainset newset; int error; mtx_assert(&cpuset_lock, MA_OWNED); if (set->cs_flags & CPU_SET_RDONLY) return (EPERM); domain = set->cs_domain; domainset_copy(domain, &newset); if (!domainset_equal(domain, orig)) { if (!domainset_restrict(domain, dset)) return (EDEADLK); DOMAINSET_AND(&newset.ds_mask, &dset->ds_mask); /* Count the number of domains that are changing. */ (*count)++; } error = 0; LIST_FOREACH(nset, &set->cs_children, cs_siblings) if ((error = cpuset_testupdate_domain(nset, &newset, domain, count, 1)) != 0) break; return (error); } /* * Applies the mask 'mask' without checking for empty sets or permissions. */ static void cpuset_update_domain(struct cpuset *set, struct domainset *domain, struct domainset *orig, struct domainlist *domains) { struct cpuset *nset; mtx_assert(&cpuset_lock, MA_OWNED); /* * If this domainset has changed from the parent we must calculate * a new set. Otherwise it simply inherits from the parent. When * we inherit from the parent we get a new mask and policy. If the * set is modified from the parent we keep the policy and only * update the mask. */ if (set->cs_domain != orig) { orig = set->cs_domain; set->cs_domain = domainset_shadow(domain, orig, domains); } else set->cs_domain = domain; LIST_FOREACH(nset, &set->cs_children, cs_siblings) cpuset_update_domain(nset, set->cs_domain, orig, domains); return; } /* * Modify the set 'set' to use a copy the domainset provided. Apply this new * mask to restrict all children in the tree. Checks for validity before * applying the changes. */ static int cpuset_modify_domain(struct cpuset *set, struct domainset *domain) { struct domainlist domains; struct domainset temp; struct domainset *dset; struct cpuset *root; int ndomains, needed; int error; error = priv_check(curthread, PRIV_SCHED_CPUSET); if (error) return (error); /* * In case we are called from within the jail * we do not allow modifying the dedicated root * cpuset of the jail but may still allow to * change child sets. */ if (jailed(curthread->td_ucred) && set->cs_flags & CPU_SET_ROOT) return (EPERM); domainset_freelist_init(&domains, 0); domain = domainset_create(domain); ndomains = needed = 0; do { if (ndomains < needed) { domainset_freelist_add(&domains, needed - ndomains); ndomains = needed; } root = cpuset_getroot(set); mtx_lock_spin(&cpuset_lock); dset = root->cs_domain; /* * Verify that we have access to this set of domains. */ if (root && !domainset_valid(dset, domain)) { error = EINVAL; goto out; } /* * If applying prefer we keep the current set as the fallback. */ if (domain->ds_policy == DOMAINSET_POLICY_PREFER) DOMAINSET_COPY(&set->cs_domain->ds_mask, &domain->ds_mask); /* * Determine whether we can apply this set of domains and * how many new domain structures it will require. */ domainset_copy(domain, &temp); needed = 0; error = cpuset_testupdate_domain(set, &temp, set->cs_domain, &needed, 0); if (error) goto out; } while (ndomains < needed); dset = set->cs_domain; cpuset_update_domain(set, domain, dset, &domains); out: mtx_unlock_spin(&cpuset_lock); domainset_freelist_free(&domains); if (error == 0) domainset_notify(); return (error); } /* * Resolve the 'which' parameter of several cpuset apis. * * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also * checks for permission via p_cansched(). * * For WHICH_SET returns a valid set with a new reference. * * -1 may be supplied for any argument to mean the current proc/thread or * the base set of the current thread. May fail with ESRCH/EPERM. */ int cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, struct cpuset **setp) { struct cpuset *set; struct thread *td; struct proc *p; int error; *pp = p = NULL; *tdp = td = NULL; *setp = set = NULL; switch (which) { case CPU_WHICH_PID: if (id == -1) { PROC_LOCK(curproc); p = curproc; break; } if ((p = pfind(id)) == NULL) return (ESRCH); break; case CPU_WHICH_TID: if (id == -1) { PROC_LOCK(curproc); p = curproc; td = curthread; break; } td = tdfind(id, -1); if (td == NULL) return (ESRCH); p = td->td_proc; break; case CPU_WHICH_CPUSET: if (id == -1) { thread_lock(curthread); set = cpuset_refbase(curthread->td_cpuset); thread_unlock(curthread); } else set = cpuset_lookup(id, curthread); if (set) { *setp = set; return (0); } return (ESRCH); case CPU_WHICH_JAIL: { /* Find `set' for prison with given id. */ struct prison *pr; sx_slock(&allprison_lock); pr = prison_find_child(curthread->td_ucred->cr_prison, id); sx_sunlock(&allprison_lock); if (pr == NULL) return (ESRCH); cpuset_ref(pr->pr_cpuset); *setp = pr->pr_cpuset; mtx_unlock(&pr->pr_mtx); return (0); } case CPU_WHICH_IRQ: case CPU_WHICH_DOMAIN: return (0); default: return (EINVAL); } error = p_cansched(curthread, p); if (error) { PROC_UNLOCK(p); return (error); } if (td == NULL) td = FIRST_THREAD_IN_PROC(p); *pp = p; *tdp = td; return (0); } static int cpuset_testshadow(struct cpuset *set, const cpuset_t *mask, const struct domainset *domain) { struct cpuset *parent; struct domainset *dset; parent = cpuset_getbase(set); /* * If we are restricting a cpu mask it must be a subset of the * parent or invalid CPUs have been specified. */ if (mask != NULL && !CPU_SUBSET(&parent->cs_mask, mask)) return (EINVAL); /* * If we are restricting a domain mask it must be a subset of the * parent or invalid domains have been specified. */ dset = parent->cs_domain; if (domain != NULL && !domainset_valid(dset, domain)) return (EINVAL); return (0); } /* * Create an anonymous set with the provided mask in the space provided by * 'nset'. If the passed in set is anonymous we use its parent otherwise * the new set is a child of 'set'. */ static int cpuset_shadow(struct cpuset *set, struct cpuset **nsetp, const cpuset_t *mask, const struct domainset *domain, struct setlist *cpusets, struct domainlist *domains) { struct cpuset *parent; struct cpuset *nset; struct domainset *dset; struct domainset *d; int error; error = cpuset_testshadow(set, mask, domain); if (error) return (error); parent = cpuset_getbase(set); dset = parent->cs_domain; if (mask == NULL) mask = &set->cs_mask; if (domain != NULL) d = domainset_shadow(dset, domain, domains); else d = set->cs_domain; nset = LIST_FIRST(cpusets); error = _cpuset_create(nset, parent, mask, d, CPUSET_INVALID); if (error == 0) { LIST_REMOVE(nset, cs_link); *nsetp = nset; } return (error); } static struct cpuset * cpuset_update_thread(struct thread *td, struct cpuset *nset) { struct cpuset *tdset; tdset = td->td_cpuset; td->td_cpuset = nset; td->td_domain.dr_policy = nset->cs_domain; sched_affinity(td); return (tdset); } static int cpuset_setproc_test_maskthread(struct cpuset *tdset, cpuset_t *mask, struct domainset *domain) { struct cpuset *parent; parent = cpuset_getbase(tdset); if (mask == NULL) mask = &tdset->cs_mask; if (domain == NULL) domain = tdset->cs_domain; return cpuset_testshadow(parent, mask, domain); } static int cpuset_setproc_maskthread(struct cpuset *tdset, cpuset_t *mask, struct domainset *domain, struct cpuset **nsetp, struct setlist *freelist, struct domainlist *domainlist) { struct cpuset *parent; parent = cpuset_getbase(tdset); if (mask == NULL) mask = &tdset->cs_mask; if (domain == NULL) domain = tdset->cs_domain; return cpuset_shadow(parent, nsetp, mask, domain, freelist, domainlist); } static int cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set, cpuset_t *mask, struct domainset *domain) { struct cpuset *parent; parent = cpuset_getbase(tdset); /* * If the thread restricted its mask then apply that same * restriction to the new set, otherwise take it wholesale. */ if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) { CPU_COPY(&tdset->cs_mask, mask); CPU_AND(mask, &set->cs_mask); } else CPU_COPY(&set->cs_mask, mask); /* * If the thread restricted the domain then we apply the * restriction to the new set but retain the policy. */ if (tdset->cs_domain != parent->cs_domain) { domainset_copy(tdset->cs_domain, domain); DOMAINSET_AND(&domain->ds_mask, &set->cs_domain->ds_mask); } else domainset_copy(set->cs_domain, domain); if (CPU_EMPTY(mask) || DOMAINSET_EMPTY(&domain->ds_mask)) return (EDEADLK); return (0); } static int cpuset_setproc_test_setthread(struct cpuset *tdset, struct cpuset *set) { struct domainset domain; cpuset_t mask; if (tdset->cs_id != CPUSET_INVALID) return (0); return cpuset_setproc_setthread_mask(tdset, set, &mask, &domain); } static int cpuset_setproc_setthread(struct cpuset *tdset, struct cpuset *set, struct cpuset **nsetp, struct setlist *freelist, struct domainlist *domainlist) { struct domainset domain; cpuset_t mask; int error; /* * If we're replacing on a thread that has not constrained the * original set we can simply accept the new set. */ if (tdset->cs_id != CPUSET_INVALID) { *nsetp = cpuset_ref(set); return (0); } error = cpuset_setproc_setthread_mask(tdset, set, &mask, &domain); if (error) return (error); return cpuset_shadow(tdset, nsetp, &mask, &domain, freelist, domainlist); } /* * Handle three cases for updating an entire process. * * 1) Set is non-null. This reparents all anonymous sets to the provided * set and replaces all non-anonymous td_cpusets with the provided set. * 2) Mask is non-null. This replaces or creates anonymous sets for every * thread with the existing base as a parent. * 3) domain is non-null. This creates anonymous sets for every thread * and replaces the domain set. * * This is overly complicated because we can't allocate while holding a * spinlock and spinlocks must be held while changing and examining thread * state. */ static int cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask, struct domainset *domain) { struct setlist freelist; struct setlist droplist; struct domainlist domainlist; struct cpuset *nset; struct thread *td; struct proc *p; int threads; int nfree; int error; /* * The algorithm requires two passes due to locking considerations. * * 1) Lookup the process and acquire the locks in the required order. * 2) If enough cpusets have not been allocated release the locks and * allocate them. Loop. */ cpuset_freelist_init(&freelist, 1); domainset_freelist_init(&domainlist, 1); nfree = 1; LIST_INIT(&droplist); nfree = 0; for (;;) { error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); if (error) goto out; if (nfree >= p->p_numthreads) break; threads = p->p_numthreads; PROC_UNLOCK(p); if (nfree < threads) { cpuset_freelist_add(&freelist, threads - nfree); domainset_freelist_add(&domainlist, threads - nfree); nfree = threads; } } PROC_LOCK_ASSERT(p, MA_OWNED); /* * Now that the appropriate locks are held and we have enough cpusets, * make sure the operation will succeed before applying changes. The * proc lock prevents td_cpuset from changing between calls. */ error = 0; FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); if (set != NULL) error = cpuset_setproc_test_setthread(td->td_cpuset, set); else error = cpuset_setproc_test_maskthread(td->td_cpuset, mask, domain); thread_unlock(td); if (error) goto unlock_out; } /* * Replace each thread's cpuset while using deferred release. We * must do this because the thread lock must be held while operating * on the thread and this limits the type of operations allowed. */ FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); if (set != NULL) error = cpuset_setproc_setthread(td->td_cpuset, set, &nset, &freelist, &domainlist); else error = cpuset_setproc_maskthread(td->td_cpuset, mask, domain, &nset, &freelist, &domainlist); if (error) { thread_unlock(td); break; } cpuset_rel_defer(&droplist, cpuset_update_thread(td, nset)); thread_unlock(td); } unlock_out: PROC_UNLOCK(p); out: while ((nset = LIST_FIRST(&droplist)) != NULL) cpuset_rel_complete(nset); cpuset_freelist_free(&freelist); domainset_freelist_free(&domainlist); return (error); } static int bitset_strprint(char *buf, size_t bufsiz, const struct bitset *set, int setlen) { size_t bytes; int i, once; char *p; once = 0; p = buf; for (i = 0; i < __bitset_words(setlen); i++) { if (once != 0) { if (bufsiz < 1) return (0); *p = ','; p++; bufsiz--; } else once = 1; if (bufsiz < sizeof(__STRING(ULONG_MAX))) return (0); bytes = snprintf(p, bufsiz, "%lx", set->__bits[i]); p += bytes; bufsiz -= bytes; } return (p - buf); } static int bitset_strscan(struct bitset *set, int setlen, const char *buf) { int i, ret; const char *p; BIT_ZERO(setlen, set); p = buf; for (i = 0; i < __bitset_words(setlen); i++) { if (*p == ',') { p++; continue; } ret = sscanf(p, "%lx", &set->__bits[i]); if (ret == 0 || ret == -1) break; while (isxdigit(*p)) p++; } return (p - buf); } /* * Return a string representing a valid layout for a cpuset_t object. * It expects an incoming buffer at least sized as CPUSETBUFSIZ. */ char * cpusetobj_strprint(char *buf, const cpuset_t *set) { bitset_strprint(buf, CPUSETBUFSIZ, (const struct bitset *)set, CPU_SETSIZE); return (buf); } /* * Build a valid cpuset_t object from a string representation. * It expects an incoming buffer at least sized as CPUSETBUFSIZ. */ int cpusetobj_strscan(cpuset_t *set, const char *buf) { char p; if (strlen(buf) > CPUSETBUFSIZ - 1) return (-1); p = buf[bitset_strscan((struct bitset *)set, CPU_SETSIZE, buf)]; if (p != '\0') return (-1); return (0); } /* * Handle a domainset specifier in the sysctl tree. A poiner to a pointer to * a domainset is in arg1. If the user specifies a valid domainset the * pointer is updated. * * Format is: * hex mask word 0,hex mask word 1,...:decimal policy:decimal preferred */ int sysctl_handle_domainset(SYSCTL_HANDLER_ARGS) { char buf[DOMAINSETBUFSIZ]; struct domainset *dset; struct domainset key; int policy, prefer, error; char *p; dset = *(struct domainset **)arg1; error = 0; if (dset != NULL) { p = buf + bitset_strprint(buf, DOMAINSETBUFSIZ, (const struct bitset *)&dset->ds_mask, DOMAINSET_SETSIZE); sprintf(p, ":%d:%d", dset->ds_policy, dset->ds_prefer); } else sprintf(buf, ""); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); /* * Read in and validate the string. */ memset(&key, 0, sizeof(key)); p = &buf[bitset_strscan((struct bitset *)&key.ds_mask, DOMAINSET_SETSIZE, buf)]; if (p == buf) return (EINVAL); if (sscanf(p, ":%d:%d", &policy, &prefer) != 2) return (EINVAL); key.ds_policy = policy; key.ds_prefer = prefer; /* Domainset_create() validates the policy.*/ dset = domainset_create(&key); if (dset == NULL) return (EINVAL); *(struct domainset **)arg1 = dset; return (error); } /* * Apply an anonymous mask or a domain to a single thread. */ static int _cpuset_setthread(lwpid_t id, cpuset_t *mask, struct domainset *domain) { struct setlist cpusets; struct domainlist domainlist; struct cpuset *nset; struct cpuset *set; struct thread *td; struct proc *p; int error; cpuset_freelist_init(&cpusets, 1); domainset_freelist_init(&domainlist, domain != NULL); error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); if (error) goto out; set = NULL; thread_lock(td); error = cpuset_shadow(td->td_cpuset, &nset, mask, domain, &cpusets, &domainlist); if (error == 0) set = cpuset_update_thread(td, nset); thread_unlock(td); PROC_UNLOCK(p); if (set) cpuset_rel(set); out: cpuset_freelist_free(&cpusets); domainset_freelist_free(&domainlist); return (error); } /* * Apply an anonymous mask to a single thread. */ int cpuset_setthread(lwpid_t id, cpuset_t *mask) { return _cpuset_setthread(id, mask, NULL); } /* * Apply new cpumask to the ithread. */ int cpuset_setithread(lwpid_t id, int cpu) { cpuset_t mask; CPU_ZERO(&mask); if (cpu == NOCPU) CPU_COPY(cpuset_root, &mask); else CPU_SET(cpu, &mask); return _cpuset_setthread(id, &mask, NULL); } /* * Initialize static domainsets after NUMA information is available. This is * called before memory allocators are initialized. */ void domainset_init(void) { struct domainset *dset; int i; dset = &domainset_roundrobin; DOMAINSET_COPY(&all_domains, &dset->ds_mask); dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; dset->ds_prefer = -1; _domainset_create(dset, NULL); for (i = 0; i < vm_ndomains; i++) { + dset = &domainset_fixed[i]; + DOMAINSET_ZERO(&dset->ds_mask); + DOMAINSET_SET(i, &dset->ds_mask); + dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; + _domainset_create(dset, NULL); + dset = &domainset_prefer[i]; DOMAINSET_COPY(&all_domains, &dset->ds_mask); dset->ds_policy = DOMAINSET_POLICY_PREFER; dset->ds_prefer = i; _domainset_create(dset, NULL); } } /* * Create the domainset for cpuset 0, 1 and cpuset 2. */ void domainset_zero(void) { struct domainset *dset, *tmp; mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); dset = &domainset0; DOMAINSET_COPY(&all_domains, &dset->ds_mask); dset->ds_policy = DOMAINSET_POLICY_FIRSTTOUCH; dset->ds_prefer = -1; curthread->td_domain.dr_policy = _domainset_create(dset, NULL); domainset_copy(dset, &domainset2); domainset2.ds_policy = DOMAINSET_POLICY_INTERLEAVE; kernel_object->domain.dr_policy = _domainset_create(&domainset2, NULL); /* Remove empty domains from the global policies. */ LIST_FOREACH_SAFE(dset, &cpuset_domains, ds_link, tmp) if (domainset_empty_vm(dset)) LIST_REMOVE(dset, ds_link); } /* * Creates system-wide cpusets and the cpuset for thread0 including three * sets: * * 0 - The root set which should represent all valid processors in the * system. It is initially created with a mask of all processors * because we don't know what processors are valid until cpuset_init() * runs. This set is immutable. * 1 - The default set which all processes are a member of until changed. * This allows an administrator to move all threads off of given cpus to * dedicate them to high priority tasks or save power etc. * 2 - The kernel set which allows restriction and policy to be applied only * to kernel threads and the kernel_object. */ struct cpuset * cpuset_thread0(void) { struct cpuset *set; int i; int error __unused; cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); domainset_zone = uma_zcreate("domainset", sizeof(struct domainset), NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); /* * Create the root system set (0) for the whole machine. Doesn't use * cpuset_create() due to NULL parent. */ set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); CPU_COPY(&all_cpus, &set->cs_mask); LIST_INIT(&set->cs_children); LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); set->cs_ref = 1; set->cs_flags = CPU_SET_ROOT | CPU_SET_RDONLY; set->cs_domain = &domainset0; cpuset_zero = set; cpuset_root = &set->cs_mask; /* * Now derive a default (1), modifiable set from that to give out. */ set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); error = _cpuset_create(set, cpuset_zero, NULL, NULL, 1); KASSERT(error == 0, ("Error creating default set: %d\n", error)); cpuset_default = set; /* * Create the kernel set (2). */ set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); error = _cpuset_create(set, cpuset_zero, NULL, NULL, 2); KASSERT(error == 0, ("Error creating kernel set: %d\n", error)); set->cs_domain = &domainset2; cpuset_kernel = set; /* * Initialize the unit allocator. 0 and 1 are allocated above. */ cpuset_unr = new_unrhdr(2, INT_MAX, NULL); /* * If MD code has not initialized per-domain cpusets, place all * CPUs in domain 0. */ for (i = 0; i < MAXMEMDOM; i++) if (!CPU_EMPTY(&cpuset_domain[i])) goto domains_set; CPU_COPY(&all_cpus, &cpuset_domain[0]); domains_set: return (cpuset_default); } void cpuset_kernthread(struct thread *td) { struct cpuset *set; thread_lock(td); set = td->td_cpuset; td->td_cpuset = cpuset_ref(cpuset_kernel); thread_unlock(td); cpuset_rel(set); } /* * Create a cpuset, which would be cpuset_create() but * mark the new 'set' as root. * * We are not going to reparent the td to it. Use cpuset_setproc_update_set() * for that. * * In case of no error, returns the set in *setp locked with a reference. */ int cpuset_create_root(struct prison *pr, struct cpuset **setp) { struct cpuset *set; int error; KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); if (error) return (error); KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", __func__, __LINE__)); /* Mark the set as root. */ set = *setp; set->cs_flags |= CPU_SET_ROOT; return (0); } int cpuset_setproc_update_set(struct proc *p, struct cpuset *set) { int error; KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); cpuset_ref(set); error = cpuset_setproc(p->p_pid, set, NULL, NULL); if (error) return (error); cpuset_rel(set); return (0); } #ifndef _SYS_SYSPROTO_H_ struct cpuset_args { cpusetid_t *setid; }; #endif int sys_cpuset(struct thread *td, struct cpuset_args *uap) { struct cpuset *root; struct cpuset *set; int error; thread_lock(td); root = cpuset_refroot(td->td_cpuset); thread_unlock(td); error = cpuset_create(&set, root, &root->cs_mask); cpuset_rel(root); if (error) return (error); error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); if (error == 0) error = cpuset_setproc(-1, set, NULL, NULL); cpuset_rel(set); return (error); } #ifndef _SYS_SYSPROTO_H_ struct cpuset_setid_args { cpuwhich_t which; id_t id; cpusetid_t setid; }; #endif int sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) { return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid)); } int kern_cpuset_setid(struct thread *td, cpuwhich_t which, id_t id, cpusetid_t setid) { struct cpuset *set; int error; /* * Presently we only support per-process sets. */ if (which != CPU_WHICH_PID) return (EINVAL); set = cpuset_lookup(setid, td); if (set == NULL) return (ESRCH); error = cpuset_setproc(id, set, NULL, NULL); cpuset_rel(set); return (error); } #ifndef _SYS_SYSPROTO_H_ struct cpuset_getid_args { cpulevel_t level; cpuwhich_t which; id_t id; cpusetid_t *setid; }; #endif int sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) { return (kern_cpuset_getid(td, uap->level, uap->which, uap->id, uap->setid)); } int kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid) { struct cpuset *nset; struct cpuset *set; struct thread *ttd; struct proc *p; cpusetid_t tmpid; int error; if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET) return (EINVAL); error = cpuset_which(which, id, &p, &ttd, &set); if (error) return (error); switch (which) { case CPU_WHICH_TID: case CPU_WHICH_PID: thread_lock(ttd); set = cpuset_refbase(ttd->td_cpuset); thread_unlock(ttd); PROC_UNLOCK(p); break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: break; case CPU_WHICH_IRQ: case CPU_WHICH_DOMAIN: return (EINVAL); } switch (level) { case CPU_LEVEL_ROOT: nset = cpuset_refroot(set); cpuset_rel(set); set = nset; break; case CPU_LEVEL_CPUSET: break; case CPU_LEVEL_WHICH: break; } tmpid = set->cs_id; cpuset_rel(set); if (error == 0) error = copyout(&tmpid, setid, sizeof(tmpid)); return (error); } #ifndef _SYS_SYSPROTO_H_ struct cpuset_getaffinity_args { cpulevel_t level; cpuwhich_t which; id_t id; size_t cpusetsize; cpuset_t *mask; }; #endif int sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) { return (kern_cpuset_getaffinity(td, uap->level, uap->which, uap->id, uap->cpusetsize, uap->mask)); } int kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *maskp) { struct thread *ttd; struct cpuset *nset; struct cpuset *set; struct proc *p; cpuset_t *mask; int error; size_t size; if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY) return (ERANGE); /* In Capability mode, you can only get your own CPU set. */ if (IN_CAPABILITY_MODE(td)) { if (level != CPU_LEVEL_WHICH) return (ECAPMODE); if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) return (ECAPMODE); if (id != -1) return (ECAPMODE); } size = cpusetsize; mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); error = cpuset_which(which, id, &p, &ttd, &set); if (error) goto out; switch (level) { case CPU_LEVEL_ROOT: case CPU_LEVEL_CPUSET: switch (which) { case CPU_WHICH_TID: case CPU_WHICH_PID: thread_lock(ttd); set = cpuset_ref(ttd->td_cpuset); thread_unlock(ttd); break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: break; case CPU_WHICH_IRQ: case CPU_WHICH_INTRHANDLER: case CPU_WHICH_ITHREAD: case CPU_WHICH_DOMAIN: error = EINVAL; goto out; } if (level == CPU_LEVEL_ROOT) nset = cpuset_refroot(set); else nset = cpuset_refbase(set); CPU_COPY(&nset->cs_mask, mask); cpuset_rel(nset); break; case CPU_LEVEL_WHICH: switch (which) { case CPU_WHICH_TID: thread_lock(ttd); CPU_COPY(&ttd->td_cpuset->cs_mask, mask); thread_unlock(ttd); break; case CPU_WHICH_PID: FOREACH_THREAD_IN_PROC(p, ttd) { thread_lock(ttd); CPU_OR(mask, &ttd->td_cpuset->cs_mask); thread_unlock(ttd); } break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: CPU_COPY(&set->cs_mask, mask); break; case CPU_WHICH_IRQ: case CPU_WHICH_INTRHANDLER: case CPU_WHICH_ITHREAD: error = intr_getaffinity(id, which, mask); break; case CPU_WHICH_DOMAIN: if (id < 0 || id >= MAXMEMDOM) error = ESRCH; else CPU_COPY(&cpuset_domain[id], mask); break; } break; default: error = EINVAL; break; } if (set) cpuset_rel(set); if (p) PROC_UNLOCK(p); if (error == 0) error = copyout(mask, maskp, size); out: free(mask, M_TEMP); return (error); } #ifndef _SYS_SYSPROTO_H_ struct cpuset_setaffinity_args { cpulevel_t level; cpuwhich_t which; id_t id; size_t cpusetsize; const cpuset_t *mask; }; #endif int sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) { return (kern_cpuset_setaffinity(td, uap->level, uap->which, uap->id, uap->cpusetsize, uap->mask)); } int kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *maskp) { struct cpuset *nset; struct cpuset *set; struct thread *ttd; struct proc *p; cpuset_t *mask; int error; if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY) return (ERANGE); /* In Capability mode, you can only set your own CPU set. */ if (IN_CAPABILITY_MODE(td)) { if (level != CPU_LEVEL_WHICH) return (ECAPMODE); if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) return (ECAPMODE); if (id != -1) return (ECAPMODE); } mask = malloc(cpusetsize, M_TEMP, M_WAITOK | M_ZERO); error = copyin(maskp, mask, cpusetsize); if (error) goto out; /* * Verify that no high bits are set. */ if (cpusetsize > sizeof(cpuset_t)) { char *end; char *cp; end = cp = (char *)&mask->__bits; end += cpusetsize; cp += sizeof(cpuset_t); while (cp != end) if (*cp++ != 0) { error = EINVAL; goto out; } } switch (level) { case CPU_LEVEL_ROOT: case CPU_LEVEL_CPUSET: error = cpuset_which(which, id, &p, &ttd, &set); if (error) break; switch (which) { case CPU_WHICH_TID: case CPU_WHICH_PID: thread_lock(ttd); set = cpuset_ref(ttd->td_cpuset); thread_unlock(ttd); PROC_UNLOCK(p); break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: break; case CPU_WHICH_IRQ: case CPU_WHICH_INTRHANDLER: case CPU_WHICH_ITHREAD: case CPU_WHICH_DOMAIN: error = EINVAL; goto out; } if (level == CPU_LEVEL_ROOT) nset = cpuset_refroot(set); else nset = cpuset_refbase(set); error = cpuset_modify(nset, mask); cpuset_rel(nset); cpuset_rel(set); break; case CPU_LEVEL_WHICH: switch (which) { case CPU_WHICH_TID: error = cpuset_setthread(id, mask); break; case CPU_WHICH_PID: error = cpuset_setproc(id, NULL, mask, NULL); break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: error = cpuset_which(which, id, &p, &ttd, &set); if (error == 0) { error = cpuset_modify(set, mask); cpuset_rel(set); } break; case CPU_WHICH_IRQ: case CPU_WHICH_INTRHANDLER: case CPU_WHICH_ITHREAD: error = intr_setaffinity(id, which, mask); break; default: error = EINVAL; break; } break; default: error = EINVAL; break; } out: free(mask, M_TEMP); return (error); } #ifndef _SYS_SYSPROTO_H_ struct cpuset_getdomain_args { cpulevel_t level; cpuwhich_t which; id_t id; size_t domainsetsize; domainset_t *mask; int *policy; }; #endif int sys_cpuset_getdomain(struct thread *td, struct cpuset_getdomain_args *uap) { return (kern_cpuset_getdomain(td, uap->level, uap->which, uap->id, uap->domainsetsize, uap->mask, uap->policy)); } int kern_cpuset_getdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *maskp, int *policyp) { struct domainset outset; struct thread *ttd; struct cpuset *nset; struct cpuset *set; struct domainset *dset; struct proc *p; domainset_t *mask; int error; if (domainsetsize < sizeof(domainset_t) || domainsetsize > DOMAINSET_MAXSIZE / NBBY) return (ERANGE); /* In Capability mode, you can only get your own domain set. */ if (IN_CAPABILITY_MODE(td)) { if (level != CPU_LEVEL_WHICH) return (ECAPMODE); if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) return (ECAPMODE); if (id != -1) return (ECAPMODE); } mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO); bzero(&outset, sizeof(outset)); error = cpuset_which(which, id, &p, &ttd, &set); if (error) goto out; switch (level) { case CPU_LEVEL_ROOT: case CPU_LEVEL_CPUSET: switch (which) { case CPU_WHICH_TID: case CPU_WHICH_PID: thread_lock(ttd); set = cpuset_ref(ttd->td_cpuset); thread_unlock(ttd); break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: break; case CPU_WHICH_IRQ: case CPU_WHICH_INTRHANDLER: case CPU_WHICH_ITHREAD: case CPU_WHICH_DOMAIN: error = EINVAL; goto out; } if (level == CPU_LEVEL_ROOT) nset = cpuset_refroot(set); else nset = cpuset_refbase(set); domainset_copy(nset->cs_domain, &outset); cpuset_rel(nset); break; case CPU_LEVEL_WHICH: switch (which) { case CPU_WHICH_TID: thread_lock(ttd); domainset_copy(ttd->td_cpuset->cs_domain, &outset); thread_unlock(ttd); break; case CPU_WHICH_PID: FOREACH_THREAD_IN_PROC(p, ttd) { thread_lock(ttd); dset = ttd->td_cpuset->cs_domain; /* Show all domains in the proc. */ DOMAINSET_OR(&outset.ds_mask, &dset->ds_mask); /* Last policy wins. */ outset.ds_policy = dset->ds_policy; outset.ds_prefer = dset->ds_prefer; thread_unlock(ttd); } break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: domainset_copy(set->cs_domain, &outset); break; case CPU_WHICH_IRQ: case CPU_WHICH_INTRHANDLER: case CPU_WHICH_ITHREAD: case CPU_WHICH_DOMAIN: error = EINVAL; break; } break; default: error = EINVAL; break; } if (set) cpuset_rel(set); if (p) PROC_UNLOCK(p); /* * Translate prefer into a set containing only the preferred domain, * not the entire fallback set. */ if (outset.ds_policy == DOMAINSET_POLICY_PREFER) { DOMAINSET_ZERO(&outset.ds_mask); DOMAINSET_SET(outset.ds_prefer, &outset.ds_mask); } DOMAINSET_COPY(&outset.ds_mask, mask); if (error == 0) error = copyout(mask, maskp, domainsetsize); if (error == 0) if (suword32(policyp, outset.ds_policy) != 0) error = EFAULT; out: free(mask, M_TEMP); return (error); } #ifndef _SYS_SYSPROTO_H_ struct cpuset_setdomain_args { cpulevel_t level; cpuwhich_t which; id_t id; size_t domainsetsize; domainset_t *mask; int policy; }; #endif int sys_cpuset_setdomain(struct thread *td, struct cpuset_setdomain_args *uap) { return (kern_cpuset_setdomain(td, uap->level, uap->which, uap->id, uap->domainsetsize, uap->mask, uap->policy)); } int kern_cpuset_setdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, const domainset_t *maskp, int policy) { struct cpuset *nset; struct cpuset *set; struct thread *ttd; struct proc *p; struct domainset domain; domainset_t *mask; int error; if (domainsetsize < sizeof(domainset_t) || domainsetsize > DOMAINSET_MAXSIZE / NBBY) return (ERANGE); if (policy <= DOMAINSET_POLICY_INVALID || policy > DOMAINSET_POLICY_MAX) return (EINVAL); /* In Capability mode, you can only set your own CPU set. */ if (IN_CAPABILITY_MODE(td)) { if (level != CPU_LEVEL_WHICH) return (ECAPMODE); if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) return (ECAPMODE); if (id != -1) return (ECAPMODE); } memset(&domain, 0, sizeof(domain)); mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO); error = copyin(maskp, mask, domainsetsize); if (error) goto out; /* * Verify that no high bits are set. */ if (domainsetsize > sizeof(domainset_t)) { char *end; char *cp; end = cp = (char *)&mask->__bits; end += domainsetsize; cp += sizeof(domainset_t); while (cp != end) if (*cp++ != 0) { error = EINVAL; goto out; } } DOMAINSET_COPY(mask, &domain.ds_mask); domain.ds_policy = policy; /* Translate preferred policy into a mask and fallback. */ if (policy == DOMAINSET_POLICY_PREFER) { /* Only support a single preferred domain. */ if (DOMAINSET_COUNT(&domain.ds_mask) != 1) { error = EINVAL; goto out; } domain.ds_prefer = DOMAINSET_FFS(&domain.ds_mask) - 1; /* This will be constrained by domainset_shadow(). */ DOMAINSET_FILL(&domain.ds_mask); } /* * When given an impossible policy, fall back to interleaving * across all domains */ if (domainset_empty_vm(&domain)) domainset_copy(&domainset2, &domain); switch (level) { case CPU_LEVEL_ROOT: case CPU_LEVEL_CPUSET: error = cpuset_which(which, id, &p, &ttd, &set); if (error) break; switch (which) { case CPU_WHICH_TID: case CPU_WHICH_PID: thread_lock(ttd); set = cpuset_ref(ttd->td_cpuset); thread_unlock(ttd); PROC_UNLOCK(p); break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: break; case CPU_WHICH_IRQ: case CPU_WHICH_INTRHANDLER: case CPU_WHICH_ITHREAD: case CPU_WHICH_DOMAIN: error = EINVAL; goto out; } if (level == CPU_LEVEL_ROOT) nset = cpuset_refroot(set); else nset = cpuset_refbase(set); error = cpuset_modify_domain(nset, &domain); cpuset_rel(nset); cpuset_rel(set); break; case CPU_LEVEL_WHICH: switch (which) { case CPU_WHICH_TID: error = _cpuset_setthread(id, NULL, &domain); break; case CPU_WHICH_PID: error = cpuset_setproc(id, NULL, NULL, &domain); break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: error = cpuset_which(which, id, &p, &ttd, &set); if (error == 0) { error = cpuset_modify_domain(set, &domain); cpuset_rel(set); } break; case CPU_WHICH_IRQ: case CPU_WHICH_INTRHANDLER: case CPU_WHICH_ITHREAD: default: error = EINVAL; break; } break; default: error = EINVAL; break; } out: free(mask, M_TEMP); return (error); } #ifdef DDB static void ddb_display_bitset(const struct bitset *set, int size) { int bit, once; for (once = 0, bit = 0; bit < size; bit++) { if (CPU_ISSET(bit, set)) { if (once == 0) { db_printf("%d", bit); once = 1; } else db_printf(",%d", bit); } } if (once == 0) db_printf(""); } void ddb_display_cpuset(const cpuset_t *set) { ddb_display_bitset((const struct bitset *)set, CPU_SETSIZE); } static void ddb_display_domainset(const domainset_t *set) { ddb_display_bitset((const struct bitset *)set, DOMAINSET_SETSIZE); } DB_SHOW_COMMAND(cpusets, db_show_cpusets) { struct cpuset *set; LIST_FOREACH(set, &cpuset_ids, cs_link) { db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", set, set->cs_id, set->cs_ref, set->cs_flags, (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); db_printf(" cpu mask="); ddb_display_cpuset(&set->cs_mask); db_printf("\n"); db_printf(" domain policy %d prefer %d mask=", set->cs_domain->ds_policy, set->cs_domain->ds_prefer); ddb_display_domainset(&set->cs_domain->ds_mask); db_printf("\n"); if (db_pager_quit) break; } } DB_SHOW_COMMAND(domainsets, db_show_domainsets) { struct domainset *set; LIST_FOREACH(set, &cpuset_domains, ds_link) { db_printf("set=%p policy %d prefer %d cnt %d\n", set, set->ds_policy, set->ds_prefer, set->ds_cnt); db_printf(" mask ="); ddb_display_domainset(&set->ds_mask); db_printf("\n"); } } #endif /* DDB */ Index: head/sys/kern/kern_malloc.c =================================================================== --- head/sys/kern/kern_malloc.c (revision 339926) +++ head/sys/kern/kern_malloc.c (revision 339927) @@ -1,1297 +1,1315 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1987, 1991, 1993 * The Regents of the University of California. * Copyright (c) 2005-2009 Robert N. M. Watson * Copyright (c) 2008 Otto Moerbeek (mallocarray) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 */ /* * Kernel malloc(9) implementation -- general purpose kernel memory allocator * based on memory types. Back end is implemented using the UMA(9) zone * allocator. A set of fixed-size buckets are used for smaller allocations, * and a special UMA allocation interface is used for larger allocations. * Callers declare memory types, and statistics are maintained independently * for each memory type. Statistics are maintained per-CPU for performance * reasons. See malloc(9) and comments in malloc.h for a detailed * description. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #ifdef DEBUG_MEMGUARD #include #endif #ifdef DEBUG_REDZONE #include #endif #if defined(INVARIANTS) && defined(__i386__) #include #endif #include #ifdef KDTRACE_HOOKS #include bool __read_frequently dtrace_malloc_enabled; dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe; #endif #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \ defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE) #define MALLOC_DEBUG 1 #endif /* * When realloc() is called, if the new size is sufficiently smaller than * the old size, realloc() will allocate a new, smaller block to avoid * wasting memory. 'Sufficiently smaller' is defined as: newsize <= * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. */ #ifndef REALLOC_FRACTION #define REALLOC_FRACTION 1 /* new block if <= half the size */ #endif /* * Centrally define some common malloc types. */ MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); static struct malloc_type *kmemstatistics; static int kmemcount; #define KMEM_ZSHIFT 4 #define KMEM_ZBASE 16 #define KMEM_ZMASK (KMEM_ZBASE - 1) #define KMEM_ZMAX 65536 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) static uint8_t kmemsize[KMEM_ZSIZE + 1]; #ifndef MALLOC_DEBUG_MAXZONES #define MALLOC_DEBUG_MAXZONES 1 #endif static int numzones = MALLOC_DEBUG_MAXZONES; /* * Small malloc(9) memory allocations are allocated from a set of UMA buckets * of various sizes. * * XXX: The comment here used to read "These won't be powers of two for * long." It's possible that a significant amount of wasted memory could be * recovered by tuning the sizes of these buckets. */ struct { int kz_size; char *kz_name; uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES]; } kmemzones[] = { {16, "16", }, {32, "32", }, {64, "64", }, {128, "128", }, {256, "256", }, {512, "512", }, {1024, "1024", }, {2048, "2048", }, {4096, "4096", }, {8192, "8192", }, {16384, "16384", }, {32768, "32768", }, {65536, "65536", }, {0, NULL}, }; /* * Zone to allocate malloc type descriptions from. For ABI reasons, memory * types are described by a data structure passed by the declaring code, but * the malloc(9) implementation has its own data structure describing the * type and statistics. This permits the malloc(9)-internal data structures * to be modified without breaking binary-compiled kernel modules that * declare malloc types. */ static uma_zone_t mt_zone; static uma_zone_t mt_stats_zone; u_long vm_kmem_size; SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0, "Size of kernel memory"); static u_long kmem_zmax = KMEM_ZMAX; SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0, "Maximum allocation size that malloc(9) would use UMA as backend"); static u_long vm_kmem_size_min; SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0, "Minimum size of kernel memory"); static u_long vm_kmem_size_max; SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0, "Maximum size of kernel memory"); static u_int vm_kmem_size_scale; SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0, "Scale factor for kernel memory size"); static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size, CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, sysctl_kmem_map_size, "LU", "Current kmem allocation size"); static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free, CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, sysctl_kmem_map_free, "LU", "Free space in kmem"); /* * The malloc_mtx protects the kmemstatistics linked list. */ struct mtx malloc_mtx; #ifdef MALLOC_PROFILE uint64_t krequests[KMEM_ZSIZE + 1]; static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); #endif static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); /* * time_uptime of the last malloc(9) failure (induced or real). */ static time_t t_malloc_fail; #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1) static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, "Kernel malloc debugging options"); #endif /* * malloc(9) fault injection -- cause malloc failures every (n) mallocs when * the caller specifies M_NOWAIT. If set to 0, no failures are caused. */ #ifdef MALLOC_MAKE_FAILURES static int malloc_failure_rate; static int malloc_nowait_count; static int malloc_failure_count; SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN, &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); #endif static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS) { u_long size; size = uma_size(); return (sysctl_handle_long(oidp, &size, 0, req)); } static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS) { u_long size, limit; /* The sysctl is unsigned, implement as a saturation value. */ size = uma_size(); limit = uma_limit(); if (size > limit) size = 0; else size = limit - size; return (sysctl_handle_long(oidp, &size, 0, req)); } /* * malloc(9) uma zone separation -- sub-page buffer overruns in one * malloc type will affect only a subset of other malloc types. */ #if MALLOC_DEBUG_MAXZONES > 1 static void tunable_set_numzones(void) { TUNABLE_INT_FETCH("debug.malloc.numzones", &numzones); /* Sanity check the number of malloc uma zones. */ if (numzones <= 0) numzones = 1; if (numzones > MALLOC_DEBUG_MAXZONES) numzones = MALLOC_DEBUG_MAXZONES; } SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL); SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &numzones, 0, "Number of malloc uma subzones"); /* * Any number that changes regularly is an okay choice for the * offset. Build numbers are pretty good of you have them. */ static u_int zone_offset = __FreeBSD_version; TUNABLE_INT("debug.malloc.zone_offset", &zone_offset); SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN, &zone_offset, 0, "Separate malloc types by examining the " "Nth character in the malloc type short description."); static void mtp_set_subzone(struct malloc_type *mtp) { struct malloc_type_internal *mtip; const char *desc; size_t len; u_int val; mtip = mtp->ks_handle; desc = mtp->ks_shortdesc; if (desc == NULL || (len = strlen(desc)) == 0) val = 0; else val = desc[zone_offset % len]; mtip->mti_zone = (val % numzones); } static inline u_int mtp_get_subzone(struct malloc_type *mtp) { struct malloc_type_internal *mtip; mtip = mtp->ks_handle; KASSERT(mtip->mti_zone < numzones, ("mti_zone %u out of range %d", mtip->mti_zone, numzones)); return (mtip->mti_zone); } #elif MALLOC_DEBUG_MAXZONES == 0 #error "MALLOC_DEBUG_MAXZONES must be positive." #else static void mtp_set_subzone(struct malloc_type *mtp) { struct malloc_type_internal *mtip; mtip = mtp->ks_handle; mtip->mti_zone = 0; } static inline u_int mtp_get_subzone(struct malloc_type *mtp) { return (0); } #endif /* MALLOC_DEBUG_MAXZONES > 1 */ int malloc_last_fail(void) { return (time_uptime - t_malloc_fail); } /* * An allocation has succeeded -- update malloc type statistics for the * amount of bucket size. Occurs within a critical section so that the * thread isn't preempted and doesn't migrate while updating per-PCU * statistics. */ static void malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, int zindx) { struct malloc_type_internal *mtip; struct malloc_type_stats *mtsp; critical_enter(); mtip = mtp->ks_handle; mtsp = zpcpu_get(mtip->mti_stats); if (size > 0) { mtsp->mts_memalloced += size; mtsp->mts_numallocs++; } if (zindx != -1) mtsp->mts_size |= 1 << zindx; #ifdef KDTRACE_HOOKS if (__predict_false(dtrace_malloc_enabled)) { uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC]; if (probe_id != 0) (dtrace_malloc_probe)(probe_id, (uintptr_t) mtp, (uintptr_t) mtip, (uintptr_t) mtsp, size, zindx); } #endif critical_exit(); } void malloc_type_allocated(struct malloc_type *mtp, unsigned long size) { if (size > 0) malloc_type_zone_allocated(mtp, size, -1); } /* * A free operation has occurred -- update malloc type statistics for the * amount of the bucket size. Occurs within a critical section so that the * thread isn't preempted and doesn't migrate while updating per-CPU * statistics. */ void malloc_type_freed(struct malloc_type *mtp, unsigned long size) { struct malloc_type_internal *mtip; struct malloc_type_stats *mtsp; critical_enter(); mtip = mtp->ks_handle; mtsp = zpcpu_get(mtip->mti_stats); mtsp->mts_memfreed += size; mtsp->mts_numfrees++; #ifdef KDTRACE_HOOKS if (__predict_false(dtrace_malloc_enabled)) { uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE]; if (probe_id != 0) (dtrace_malloc_probe)(probe_id, (uintptr_t) mtp, (uintptr_t) mtip, (uintptr_t) mtsp, size, 0); } #endif critical_exit(); } /* * contigmalloc: * * Allocate a block of physically contiguous memory. * * If M_NOWAIT is set, this routine will not block and return NULL if * the allocation fails. */ void * contigmalloc(unsigned long size, struct malloc_type *type, int flags, vm_paddr_t low, vm_paddr_t high, unsigned long alignment, vm_paddr_t boundary) { void *ret; ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment, boundary, VM_MEMATTR_DEFAULT); if (ret != NULL) malloc_type_allocated(type, round_page(size)); return (ret); } void * -contigmalloc_domain(unsigned long size, struct malloc_type *type, - int domain, int flags, vm_paddr_t low, vm_paddr_t high, +contigmalloc_domainset(unsigned long size, struct malloc_type *type, + struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high, unsigned long alignment, vm_paddr_t boundary) { void *ret; - ret = (void *)kmem_alloc_contig_domain(domain, size, flags, low, high, + ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high, alignment, boundary, VM_MEMATTR_DEFAULT); if (ret != NULL) malloc_type_allocated(type, round_page(size)); return (ret); } /* * contigfree: * * Free a block of memory allocated by contigmalloc. * * This routine may not block. */ void contigfree(void *addr, unsigned long size, struct malloc_type *type) { kmem_free((vm_offset_t)addr, size); malloc_type_freed(type, round_page(size)); } #ifdef MALLOC_DEBUG static int malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp, int flags) { #ifdef INVARIANTS int indx; KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic")); /* * Check that exactly one of M_WAITOK or M_NOWAIT is specified. */ indx = flags & (M_WAITOK | M_NOWAIT); if (indx != M_NOWAIT && indx != M_WAITOK) { static struct timeval lasterr; static int curerr, once; if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { printf("Bad malloc flags: %x\n", indx); kdb_backtrace(); flags |= M_WAITOK; once++; } } #endif #ifdef MALLOC_MAKE_FAILURES if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { atomic_add_int(&malloc_nowait_count, 1); if ((malloc_nowait_count % malloc_failure_rate) == 0) { atomic_add_int(&malloc_failure_count, 1); t_malloc_fail = time_uptime; *vap = NULL; return (EJUSTRETURN); } } #endif if (flags & M_WAITOK) { KASSERT(curthread->td_intr_nesting_level == 0, ("malloc(M_WAITOK) in interrupt context")); KASSERT(curthread->td_epochnest == 0, ("malloc(M_WAITOK) in epoch context")); } KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("malloc: called with spinlock or critical section held")); #ifdef DEBUG_MEMGUARD if (memguard_cmp_mtp(mtp, *sizep)) { *vap = memguard_alloc(*sizep, flags); if (*vap != NULL) return (EJUSTRETURN); /* This is unfortunate but should not be fatal. */ } #endif #ifdef DEBUG_REDZONE *sizep = redzone_size_ntor(*sizep); #endif return (0); } #endif /* * malloc: * * Allocate a block of memory. * * If M_NOWAIT is set, this routine will not block and return NULL if * the allocation fails. */ void * (malloc)(size_t size, struct malloc_type *mtp, int flags) { int indx; caddr_t va; uma_zone_t zone; #if defined(DEBUG_REDZONE) unsigned long osize = size; #endif #ifdef MALLOC_DEBUG va = NULL; if (malloc_dbg(&va, &size, mtp, flags) != 0) return (va); #endif if (size <= kmem_zmax && (flags & M_EXEC) == 0) { if (size & KMEM_ZMASK) size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; indx = kmemsize[size >> KMEM_ZSHIFT]; zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)]; #ifdef MALLOC_PROFILE krequests[size >> KMEM_ZSHIFT]++; #endif va = uma_zalloc(zone, flags); if (va != NULL) size = zone->uz_size; malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); } else { size = roundup(size, PAGE_SIZE); zone = NULL; va = uma_large_malloc(size, flags); malloc_type_allocated(mtp, va == NULL ? 0 : size); } if (flags & M_WAITOK) KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); else if (va == NULL) t_malloc_fail = time_uptime; #ifdef DEBUG_REDZONE if (va != NULL) va = redzone_setup(va, osize); #endif return ((void *) va); } -void * -malloc_domain(size_t size, struct malloc_type *mtp, int domain, - int flags) +static void * +malloc_domain(size_t size, struct malloc_type *mtp, int domain, int flags) { int indx; caddr_t va; uma_zone_t zone; #if defined(DEBUG_REDZONE) unsigned long osize = size; #endif #ifdef MALLOC_DEBUG va = NULL; if (malloc_dbg(&va, &size, mtp, flags) != 0) return (va); #endif if (size <= kmem_zmax && (flags & M_EXEC) == 0) { if (size & KMEM_ZMASK) size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; indx = kmemsize[size >> KMEM_ZSHIFT]; zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)]; #ifdef MALLOC_PROFILE krequests[size >> KMEM_ZSHIFT]++; #endif va = uma_zalloc_domain(zone, NULL, domain, flags); if (va != NULL) size = zone->uz_size; malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); } else { size = roundup(size, PAGE_SIZE); zone = NULL; va = uma_large_malloc_domain(size, domain, flags); malloc_type_allocated(mtp, va == NULL ? 0 : size); } if (flags & M_WAITOK) KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); else if (va == NULL) t_malloc_fail = time_uptime; #ifdef DEBUG_REDZONE if (va != NULL) va = redzone_setup(va, osize); #endif return ((void *) va); +} + +void * +malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds, + int flags) +{ + struct vm_domainset_iter di; + void *ret; + int domain; + + vm_domainset_iter_policy_init(&di, ds, &domain, &flags); + do { + ret = malloc_domain(size, mtp, domain, flags); + if (ret != NULL) + break; + } while (vm_domainset_iter_policy(&di, &domain) == 0); + + return (ret); } void * mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags) { if (WOULD_OVERFLOW(nmemb, size)) panic("mallocarray: %zu * %zu overflowed", nmemb, size); return (malloc(size * nmemb, type, flags)); } #ifdef INVARIANTS static void free_save_type(void *addr, struct malloc_type *mtp, u_long size) { struct malloc_type **mtpp = addr; /* * Cache a pointer to the malloc_type that most recently freed * this memory here. This way we know who is most likely to * have stepped on it later. * * This code assumes that size is a multiple of 8 bytes for * 64 bit machines */ mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR); mtpp += (size - sizeof(struct malloc_type *)) / sizeof(struct malloc_type *); *mtpp = mtp; } #endif #ifdef MALLOC_DEBUG static int free_dbg(void **addrp, struct malloc_type *mtp) { void *addr; addr = *addrp; KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic")); KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("free: called with spinlock or critical section held")); /* free(NULL, ...) does nothing */ if (addr == NULL) return (EJUSTRETURN); #ifdef DEBUG_MEMGUARD if (is_memguard_addr(addr)) { memguard_free(addr); return (EJUSTRETURN); } #endif #ifdef DEBUG_REDZONE redzone_check(addr); *addrp = redzone_addr_ntor(addr); #endif return (0); } #endif /* * free: * * Free a block of memory allocated by malloc. * * This routine may not block. */ void free(void *addr, struct malloc_type *mtp) { uma_slab_t slab; u_long size; #ifdef MALLOC_DEBUG if (free_dbg(&addr, mtp) != 0) return; #endif /* free(NULL, ...) does nothing */ if (addr == NULL) return; slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); if (slab == NULL) panic("free: address %p(%p) has not been allocated.\n", addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); if (!(slab->us_flags & UMA_SLAB_MALLOC)) { size = slab->us_keg->uk_size; #ifdef INVARIANTS free_save_type(addr, mtp, size); #endif uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); } else { size = slab->us_size; uma_large_free(slab); } malloc_type_freed(mtp, size); } void free_domain(void *addr, struct malloc_type *mtp) { uma_slab_t slab; u_long size; #ifdef MALLOC_DEBUG if (free_dbg(&addr, mtp) != 0) return; #endif /* free(NULL, ...) does nothing */ if (addr == NULL) return; slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); if (slab == NULL) panic("free_domain: address %p(%p) has not been allocated.\n", addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); if (!(slab->us_flags & UMA_SLAB_MALLOC)) { size = slab->us_keg->uk_size; #ifdef INVARIANTS free_save_type(addr, mtp, size); #endif uma_zfree_domain(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); } else { size = slab->us_size; uma_large_free(slab); } malloc_type_freed(mtp, size); } /* * realloc: change the size of a memory block */ void * realloc(void *addr, size_t size, struct malloc_type *mtp, int flags) { uma_slab_t slab; unsigned long alloc; void *newaddr; KASSERT(mtp->ks_magic == M_MAGIC, ("realloc: bad malloc type magic")); KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("realloc: called with spinlock or critical section held")); /* realloc(NULL, ...) is equivalent to malloc(...) */ if (addr == NULL) return (malloc(size, mtp, flags)); /* * XXX: Should report free of old memory and alloc of new memory to * per-CPU stats. */ #ifdef DEBUG_MEMGUARD if (is_memguard_addr(addr)) return (memguard_realloc(addr, size, mtp, flags)); #endif #ifdef DEBUG_REDZONE slab = NULL; alloc = redzone_get_size(addr); #else slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); /* Sanity check */ KASSERT(slab != NULL, ("realloc: address %p out of range", (void *)addr)); /* Get the size of the original block */ if (!(slab->us_flags & UMA_SLAB_MALLOC)) alloc = slab->us_keg->uk_size; else alloc = slab->us_size; /* Reuse the original block if appropriate */ if (size <= alloc && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) return (addr); #endif /* !DEBUG_REDZONE */ /* Allocate a new, bigger (or smaller) block */ if ((newaddr = malloc(size, mtp, flags)) == NULL) return (NULL); /* Copy over original contents */ bcopy(addr, newaddr, min(size, alloc)); free(addr, mtp); return (newaddr); } /* * reallocf: same as realloc() but free memory on failure. */ void * reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags) { void *mem; if ((mem = realloc(addr, size, mtp, flags)) == NULL) free(addr, mtp); return (mem); } #ifndef __sparc64__ CTASSERT(VM_KMEM_SIZE_SCALE >= 1); #endif /* * Initialize the kernel memory (kmem) arena. */ void kmeminit(void) { u_long mem_size; u_long tmp; #ifdef VM_KMEM_SIZE if (vm_kmem_size == 0) vm_kmem_size = VM_KMEM_SIZE; #endif #ifdef VM_KMEM_SIZE_MIN if (vm_kmem_size_min == 0) vm_kmem_size_min = VM_KMEM_SIZE_MIN; #endif #ifdef VM_KMEM_SIZE_MAX if (vm_kmem_size_max == 0) vm_kmem_size_max = VM_KMEM_SIZE_MAX; #endif /* * Calculate the amount of kernel virtual address (KVA) space that is * preallocated to the kmem arena. In order to support a wide range * of machines, it is a function of the physical memory size, * specifically, * * min(max(physical memory size / VM_KMEM_SIZE_SCALE, * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX) * * Every architecture must define an integral value for * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN * and VM_KMEM_SIZE_MAX, which represent respectively the floor and * ceiling on this preallocation, are optional. Typically, * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on * a given architecture. */ mem_size = vm_cnt.v_page_count; if (mem_size <= 32768) /* delphij XXX 128MB */ kmem_zmax = PAGE_SIZE; if (vm_kmem_size_scale < 1) vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; /* * Check if we should use defaults for the "vm_kmem_size" * variable: */ if (vm_kmem_size == 0) { vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE; if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) vm_kmem_size = vm_kmem_size_min; if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) vm_kmem_size = vm_kmem_size_max; } /* * The amount of KVA space that is preallocated to the * kmem arena can be set statically at compile-time or manually * through the kernel environment. However, it is still limited to * twice the physical memory size, which has been sufficient to handle * the most severe cases of external fragmentation in the kmem arena. */ if (vm_kmem_size / 2 / PAGE_SIZE > mem_size) vm_kmem_size = 2 * mem_size * PAGE_SIZE; vm_kmem_size = round_page(vm_kmem_size); #ifdef DEBUG_MEMGUARD tmp = memguard_fudge(vm_kmem_size, kernel_map); #else tmp = vm_kmem_size; #endif uma_set_limit(tmp); #ifdef DEBUG_MEMGUARD /* * Initialize MemGuard if support compiled in. MemGuard is a * replacement allocator used for detecting tamper-after-free * scenarios as they occur. It is only used for debugging. */ memguard_init(kernel_arena); #endif } /* * Initialize the kernel memory allocator */ /* ARGSUSED*/ static void mallocinit(void *dummy) { int i; uint8_t indx; mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); kmeminit(); if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX) kmem_zmax = KMEM_ZMAX; mt_stats_zone = uma_zcreate("mt_stats_zone", sizeof(struct malloc_type_stats), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU); mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal), #ifdef INVARIANTS mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, #else NULL, NULL, NULL, NULL, #endif UMA_ALIGN_PTR, UMA_ZONE_MALLOC); for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { int size = kmemzones[indx].kz_size; char *name = kmemzones[indx].kz_name; int subzone; for (subzone = 0; subzone < numzones; subzone++) { kmemzones[indx].kz_zone[subzone] = uma_zcreate(name, size, #ifdef INVARIANTS mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, #else NULL, NULL, NULL, NULL, #endif UMA_ALIGN_PTR, UMA_ZONE_MALLOC); } for (;i <= size; i+= KMEM_ZBASE) kmemsize[i >> KMEM_ZSHIFT] = indx; } } SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL); void malloc_init(void *data) { struct malloc_type_internal *mtip; struct malloc_type *mtp; KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init")); mtp = data; if (mtp->ks_magic != M_MAGIC) panic("malloc_init: bad malloc type magic"); mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO); mtip->mti_stats = uma_zalloc_pcpu(mt_stats_zone, M_WAITOK | M_ZERO); mtp->ks_handle = mtip; mtp_set_subzone(mtp); mtx_lock(&malloc_mtx); mtp->ks_next = kmemstatistics; kmemstatistics = mtp; kmemcount++; mtx_unlock(&malloc_mtx); } void malloc_uninit(void *data) { struct malloc_type_internal *mtip; struct malloc_type_stats *mtsp; struct malloc_type *mtp, *temp; uma_slab_t slab; long temp_allocs, temp_bytes; int i; mtp = data; KASSERT(mtp->ks_magic == M_MAGIC, ("malloc_uninit: bad malloc type magic")); KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL")); mtx_lock(&malloc_mtx); mtip = mtp->ks_handle; mtp->ks_handle = NULL; if (mtp != kmemstatistics) { for (temp = kmemstatistics; temp != NULL; temp = temp->ks_next) { if (temp->ks_next == mtp) { temp->ks_next = mtp->ks_next; break; } } KASSERT(temp, ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc)); } else kmemstatistics = mtp->ks_next; kmemcount--; mtx_unlock(&malloc_mtx); /* * Look for memory leaks. */ temp_allocs = temp_bytes = 0; for (i = 0; i <= mp_maxid; i++) { mtsp = zpcpu_get_cpu(mtip->mti_stats, i); temp_allocs += mtsp->mts_numallocs; temp_allocs -= mtsp->mts_numfrees; temp_bytes += mtsp->mts_memalloced; temp_bytes -= mtsp->mts_memfreed; } if (temp_allocs > 0 || temp_bytes > 0) { printf("Warning: memory type %s leaked memory on destroy " "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, temp_allocs, temp_bytes); } slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK)); uma_zfree_pcpu(mt_stats_zone, mtip->mti_stats); uma_zfree_arg(mt_zone, mtip, slab); } struct malloc_type * malloc_desc2type(const char *desc) { struct malloc_type *mtp; mtx_assert(&malloc_mtx, MA_OWNED); for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { if (strcmp(mtp->ks_shortdesc, desc) == 0) return (mtp); } return (NULL); } static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) { struct malloc_type_stream_header mtsh; struct malloc_type_internal *mtip; struct malloc_type_stats *mtsp, zeromts; struct malloc_type_header mth; struct malloc_type *mtp; int error, i; struct sbuf sbuf; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 128, req); sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); mtx_lock(&malloc_mtx); bzero(&zeromts, sizeof(zeromts)); /* * Insert stream header. */ bzero(&mtsh, sizeof(mtsh)); mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; mtsh.mtsh_maxcpus = MAXCPU; mtsh.mtsh_count = kmemcount; (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)); /* * Insert alternating sequence of type headers and type statistics. */ for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { mtip = (struct malloc_type_internal *)mtp->ks_handle; /* * Insert type header. */ bzero(&mth, sizeof(mth)); strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); (void)sbuf_bcat(&sbuf, &mth, sizeof(mth)); /* * Insert type statistics for each CPU. */ for (i = 0; i <= mp_maxid; i++) { mtsp = zpcpu_get_cpu(mtip->mti_stats, i); (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp)); } /* * Fill in the missing CPUs. */ for (; i < MAXCPU; i++) { (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts)); } } mtx_unlock(&malloc_mtx); error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats", "Return malloc types"); SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, "Count of kernel malloc types"); void malloc_type_list(malloc_type_list_func_t *func, void *arg) { struct malloc_type *mtp, **bufmtp; int count, i; size_t buflen; mtx_lock(&malloc_mtx); restart: mtx_assert(&malloc_mtx, MA_OWNED); count = kmemcount; mtx_unlock(&malloc_mtx); buflen = sizeof(struct malloc_type *) * count; bufmtp = malloc(buflen, M_TEMP, M_WAITOK); mtx_lock(&malloc_mtx); if (count < kmemcount) { free(bufmtp, M_TEMP); goto restart; } for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++) bufmtp[i] = mtp; mtx_unlock(&malloc_mtx); for (i = 0; i < count; i++) (func)(bufmtp[i], arg); free(bufmtp, M_TEMP); } #ifdef DDB DB_SHOW_COMMAND(malloc, db_show_malloc) { struct malloc_type_internal *mtip; struct malloc_type_internal *mtsp; struct malloc_type *mtp; uint64_t allocs, frees; uint64_t alloced, freed; int i; db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse", "Requests"); for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { mtip = (struct malloc_type_internal *)mtp->ks_handle; allocs = 0; frees = 0; alloced = 0; freed = 0; for (i = 0; i <= mp_maxid; i++) { mtsp = zpcpu_get_cpu(mtip->mti_stats, i); allocs += mtip->mti_stats[i].mts_numallocs; frees += mtip->mti_stats[i].mts_numfrees; alloced += mtip->mti_stats[i].mts_memalloced; freed += mtip->mti_stats[i].mts_memfreed; } db_printf("%18s %12ju %12juK %12ju\n", mtp->ks_shortdesc, allocs - frees, (alloced - freed + 1023) / 1024, allocs); if (db_pager_quit) break; } } #if MALLOC_DEBUG_MAXZONES > 1 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches) { struct malloc_type_internal *mtip; struct malloc_type *mtp; u_int subzone; if (!have_addr) { db_printf("Usage: show multizone_matches \n"); return; } mtp = (void *)addr; if (mtp->ks_magic != M_MAGIC) { db_printf("Magic %lx does not match expected %x\n", mtp->ks_magic, M_MAGIC); return; } mtip = mtp->ks_handle; subzone = mtip->mti_zone; for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { mtip = mtp->ks_handle; if (mtip->mti_zone != subzone) continue; db_printf("%s\n", mtp->ks_shortdesc); if (db_pager_quit) break; } } #endif /* MALLOC_DEBUG_MAXZONES > 1 */ #endif /* DDB */ #ifdef MALLOC_PROFILE static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) { struct sbuf sbuf; uint64_t count; uint64_t waste; uint64_t mem; int error; int rsize; int size; int i; waste = 0; mem = 0; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 128, req); sbuf_printf(&sbuf, "\n Size Requests Real Size\n"); for (i = 0; i < KMEM_ZSIZE; i++) { size = i << KMEM_ZSHIFT; rsize = kmemzones[kmemsize[i]].kz_size; count = (long long unsigned)krequests[i]; sbuf_printf(&sbuf, "%6d%28llu%11d\n", size, (unsigned long long)count, rsize); if ((rsize * count) > (size * count)) waste += (rsize * count) - (size * count); mem += (rsize * count); } sbuf_printf(&sbuf, "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", (unsigned long long)mem, (unsigned long long)waste); error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); #endif /* MALLOC_PROFILE */ Index: head/sys/kern/kern_mbuf.c =================================================================== --- head/sys/kern/kern_mbuf.c (revision 339926) +++ head/sys/kern/kern_mbuf.c (revision 339927) @@ -1,1145 +1,1147 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2004, 2005, * Bosko Milekic . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_param.h" #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA * Zones. * * Mbuf Clusters (2K, contiguous) are allocated from the Cluster * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the * administrator so desires. * * Mbufs are allocated from a UMA Master Zone called the Mbuf * Zone. * * Additionally, FreeBSD provides a Packet Zone, which it * configures as a Secondary Zone to the Mbuf Master Zone, * thus sharing backend Slab kegs with the Mbuf Master Zone. * * Thus common-case allocations and locking are simplified: * * m_clget() m_getcl() * | | * | .------------>[(Packet Cache)] m_get(), m_gethdr() * | | [ Packet ] | * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ] * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ] * | \________ | * [ Cluster Keg ] \ / * | [ Mbuf Keg ] * [ Cluster Slabs ] | * | [ Mbuf Slabs ] * \____________(VM)_________________/ * * * Whenever an object is allocated with uma_zalloc() out of * one of the Zones its _ctor_ function is executed. The same * for any deallocation through uma_zfree() the _dtor_ function * is executed. * * Caches are per-CPU and are filled from the Master Zone. * * Whenever an object is allocated from the underlying global * memory pool it gets pre-initialized with the _zinit_ functions. * When the Keg's are overfull objects get decommissioned with * _zfini_ functions and free'd back to the global memory pool. * */ int nmbufs; /* limits number of mbufs */ int nmbclusters; /* limits number of mbuf clusters */ int nmbjumbop; /* limits number of page size jumbo clusters */ int nmbjumbo9; /* limits number of 9k jumbo clusters */ int nmbjumbo16; /* limits number of 16k jumbo clusters */ static quad_t maxmbufmem; /* overall real memory limit for all mbufs */ SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &maxmbufmem, 0, "Maximum real memory allocatable to various mbuf types"); /* * tunable_mbinit() has to be run before any mbuf allocations are done. */ static void tunable_mbinit(void *dummy) { quad_t realmem; /* * The default limit for all mbuf related memory is 1/2 of all * available kernel memory (physical or kmem). * At most it can be 3/4 of available kernel memory. */ realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size); maxmbufmem = realmem / 2; TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem); if (maxmbufmem > realmem / 4 * 3) maxmbufmem = realmem / 4 * 3; TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); if (nmbclusters == 0) nmbclusters = maxmbufmem / MCLBYTES / 4; TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop); if (nmbjumbop == 0) nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4; TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9); if (nmbjumbo9 == 0) nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6; TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16); if (nmbjumbo16 == 0) nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6; /* * We need at least as many mbufs as we have clusters of * the various types added together. */ TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) nmbufs = lmax(maxmbufmem / MSIZE / 5, nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16); } SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL); static int sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) { int error, newnmbclusters; newnmbclusters = nmbclusters; error = sysctl_handle_int(oidp, &newnmbclusters, 0, req); if (error == 0 && req->newptr && newnmbclusters != nmbclusters) { if (newnmbclusters > nmbclusters && nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { nmbclusters = newnmbclusters; nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); EVENTHANDLER_INVOKE(nmbclusters_change); } else error = EINVAL; } return (error); } SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW, &nmbclusters, 0, sysctl_nmbclusters, "IU", "Maximum number of mbuf clusters allowed"); static int sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS) { int error, newnmbjumbop; newnmbjumbop = nmbjumbop; error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req); if (error == 0 && req->newptr && newnmbjumbop != nmbjumbop) { if (newnmbjumbop > nmbjumbop && nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { nmbjumbop = newnmbjumbop; nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); } else error = EINVAL; } return (error); } SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW, &nmbjumbop, 0, sysctl_nmbjumbop, "IU", "Maximum number of mbuf page size jumbo clusters allowed"); static int sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS) { int error, newnmbjumbo9; newnmbjumbo9 = nmbjumbo9; error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req); if (error == 0 && req->newptr && newnmbjumbo9 != nmbjumbo9) { if (newnmbjumbo9 > nmbjumbo9 && nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { nmbjumbo9 = newnmbjumbo9; nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); } else error = EINVAL; } return (error); } SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW, &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU", "Maximum number of mbuf 9k jumbo clusters allowed"); static int sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS) { int error, newnmbjumbo16; newnmbjumbo16 = nmbjumbo16; error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req); if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) { if (newnmbjumbo16 > nmbjumbo16 && nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { nmbjumbo16 = newnmbjumbo16; nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); } else error = EINVAL; } return (error); } SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW, &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU", "Maximum number of mbuf 16k jumbo clusters allowed"); static int sysctl_nmbufs(SYSCTL_HANDLER_ARGS) { int error, newnmbufs; newnmbufs = nmbufs; error = sysctl_handle_int(oidp, &newnmbufs, 0, req); if (error == 0 && req->newptr && newnmbufs != nmbufs) { if (newnmbufs > nmbufs) { nmbufs = newnmbufs; nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); EVENTHANDLER_INVOKE(nmbufs_change); } else error = EINVAL; } return (error); } SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT|CTLFLAG_RW, &nmbufs, 0, sysctl_nmbufs, "IU", "Maximum number of mbufs allowed"); /* * Zones from which we allocate. */ uma_zone_t zone_mbuf; uma_zone_t zone_clust; uma_zone_t zone_pack; uma_zone_t zone_jumbop; uma_zone_t zone_jumbo9; uma_zone_t zone_jumbo16; /* * Local prototypes. */ static int mb_ctor_mbuf(void *, int, void *, int); static int mb_ctor_clust(void *, int, void *, int); static int mb_ctor_pack(void *, int, void *, int); static void mb_dtor_mbuf(void *, int, void *); static void mb_dtor_pack(void *, int, void *); static int mb_zinit_pack(void *, int, int); static void mb_zfini_pack(void *, int); static void mb_reclaim(uma_zone_t, int); static void *mbuf_jumbo_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); /* Ensure that MSIZE is a power of 2. */ CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); /* * Initialize FreeBSD Network buffer allocation. */ static void mbuf_init(void *dummy) { /* * Configure UMA zones for Mbufs, Clusters, and Packets. */ zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, mb_ctor_mbuf, mb_dtor_mbuf, #ifdef INVARIANTS trash_init, trash_fini, #else NULL, NULL, #endif MSIZE - 1, UMA_ZONE_MAXBUCKET); if (nmbufs > 0) nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached"); uma_zone_set_maxaction(zone_mbuf, mb_reclaim); zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, mb_ctor_clust, #ifdef INVARIANTS trash_dtor, trash_init, trash_fini, #else NULL, NULL, NULL, #endif UMA_ALIGN_PTR, 0); if (nmbclusters > 0) nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached"); uma_zone_set_maxaction(zone_clust, mb_reclaim); zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack, mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf); /* Make jumbo frame zone too. Page size, 9k and 16k. */ zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, mb_ctor_clust, #ifdef INVARIANTS trash_dtor, trash_init, trash_fini, #else NULL, NULL, NULL, #endif UMA_ALIGN_PTR, 0); if (nmbjumbop > 0) nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached"); uma_zone_set_maxaction(zone_jumbop, mb_reclaim); zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, mb_ctor_clust, #ifdef INVARIANTS trash_dtor, trash_init, trash_fini, #else NULL, NULL, NULL, #endif UMA_ALIGN_PTR, 0); uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc); if (nmbjumbo9 > 0) nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached"); uma_zone_set_maxaction(zone_jumbo9, mb_reclaim); zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, mb_ctor_clust, #ifdef INVARIANTS trash_dtor, trash_init, trash_fini, #else NULL, NULL, NULL, #endif UMA_ALIGN_PTR, 0); uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc); if (nmbjumbo16 > 0) nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached"); uma_zone_set_maxaction(zone_jumbo16, mb_reclaim); /* * Hook event handler for low-memory situation, used to * drain protocols and push data back to the caches (UMA * later pushes it back to VM). */ EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, EVENTHANDLER_PRI_FIRST); } SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL); #ifdef NETDUMP /* * netdump makes use of a pre-allocated pool of mbufs and clusters. When * netdump is configured, we initialize a set of UMA cache zones which return * items from this pool. At panic-time, the regular UMA zone pointers are * overwritten with those of the cache zones so that drivers may allocate and * free mbufs and clusters without attempting to allocate physical memory. * * We keep mbufs and clusters in a pair of mbuf queues. In particular, for * the purpose of caching clusters, we treat them as mbufs. */ static struct mbufq nd_mbufq = { STAILQ_HEAD_INITIALIZER(nd_mbufq.mq_head), 0, INT_MAX }; static struct mbufq nd_clustq = { STAILQ_HEAD_INITIALIZER(nd_clustq.mq_head), 0, INT_MAX }; static int nd_clsize; static uma_zone_t nd_zone_mbuf; static uma_zone_t nd_zone_clust; static uma_zone_t nd_zone_pack; static int nd_buf_import(void *arg, void **store, int count, int domain __unused, int flags) { struct mbufq *q; struct mbuf *m; int i; KASSERT(!dumping, ("%s: ran out of pre-allocated mbufs", __func__)); q = arg; for (i = 0; i < count; i++) { m = mbufq_dequeue(q); if (m == NULL) break; trash_init(m, q == &nd_mbufq ? MSIZE : nd_clsize, flags); store[i] = m; } return (i); } static void nd_buf_release(void *arg, void **store, int count) { struct mbufq *q; struct mbuf *m; int i; q = arg; for (i = 0; i < count; i++) { m = store[i]; (void)mbufq_enqueue(q, m); } } static int nd_pack_import(void *arg __unused, void **store, int count, int domain __unused, int flags __unused) { struct mbuf *m; void *clust; int i; KASSERT(!dumping, ("%s: ran out of pre-allocated mbufs", __func__)); for (i = 0; i < count; i++) { m = m_get(MT_DATA, M_NOWAIT); if (m == NULL) break; clust = uma_zalloc(nd_zone_clust, M_NOWAIT); if (clust == NULL) { m_free(m); break; } mb_ctor_clust(clust, nd_clsize, m, 0); store[i] = m; } return (i); } static void nd_pack_release(void *arg __unused, void **store, int count) { struct mbuf *m; void *clust; int i; for (i = 0; i < count; i++) { m = store[i]; clust = m->m_ext.ext_buf; uma_zfree(nd_zone_clust, clust); uma_zfree(nd_zone_mbuf, m); } } /* * Free the pre-allocated mbufs and clusters reserved for netdump, and destroy * the corresponding UMA cache zones. */ void netdump_mbuf_drain(void) { struct mbuf *m; void *item; if (nd_zone_mbuf != NULL) { uma_zdestroy(nd_zone_mbuf); nd_zone_mbuf = NULL; } if (nd_zone_clust != NULL) { uma_zdestroy(nd_zone_clust); nd_zone_clust = NULL; } if (nd_zone_pack != NULL) { uma_zdestroy(nd_zone_pack); nd_zone_pack = NULL; } while ((m = mbufq_dequeue(&nd_mbufq)) != NULL) m_free(m); while ((item = mbufq_dequeue(&nd_clustq)) != NULL) uma_zfree(m_getzone(nd_clsize), item); } /* * Callback invoked immediately prior to starting a netdump. */ void netdump_mbuf_dump(void) { /* * All cluster zones return buffers of the size requested by the * drivers. It's up to the driver to reinitialize the zones if the * MTU of a netdump-enabled interface changes. */ printf("netdump: overwriting mbuf zone pointers\n"); zone_mbuf = nd_zone_mbuf; zone_clust = nd_zone_clust; zone_pack = nd_zone_pack; zone_jumbop = nd_zone_clust; zone_jumbo9 = nd_zone_clust; zone_jumbo16 = nd_zone_clust; } /* * Reinitialize the netdump mbuf+cluster pool and cache zones. */ void netdump_mbuf_reinit(int nmbuf, int nclust, int clsize) { struct mbuf *m; void *item; netdump_mbuf_drain(); nd_clsize = clsize; nd_zone_mbuf = uma_zcache_create("netdump_" MBUF_MEM_NAME, MSIZE, mb_ctor_mbuf, mb_dtor_mbuf, #ifdef INVARIANTS trash_init, trash_fini, #else NULL, NULL, #endif nd_buf_import, nd_buf_release, &nd_mbufq, UMA_ZONE_NOBUCKET); nd_zone_clust = uma_zcache_create("netdump_" MBUF_CLUSTER_MEM_NAME, clsize, mb_ctor_clust, #ifdef INVARIANTS trash_dtor, trash_init, trash_fini, #else NULL, NULL, NULL, #endif nd_buf_import, nd_buf_release, &nd_clustq, UMA_ZONE_NOBUCKET); nd_zone_pack = uma_zcache_create("netdump_" MBUF_PACKET_MEM_NAME, MCLBYTES, mb_ctor_pack, mb_dtor_pack, NULL, NULL, nd_pack_import, nd_pack_release, NULL, UMA_ZONE_NOBUCKET); while (nmbuf-- > 0) { m = m_get(MT_DATA, M_WAITOK); uma_zfree(nd_zone_mbuf, m); } while (nclust-- > 0) { item = uma_zalloc(m_getzone(nd_clsize), M_WAITOK); uma_zfree(nd_zone_clust, item); } } #endif /* NETDUMP */ /* * UMA backend page allocator for the jumbo frame zones. * * Allocates kernel virtual memory that is backed by contiguous physical * pages. */ static void * mbuf_jumbo_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, int wait) { /* Inform UMA that this allocator uses kernel_map/object. */ *flags = UMA_SLAB_KERNEL; - return ((void *)kmem_alloc_contig_domain(domain, bytes, wait, - (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT)); + return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain), + bytes, wait, (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, + VM_MEMATTR_DEFAULT)); } /* * Constructor for Mbuf master zone. * * The 'arg' pointer points to a mb_args structure which * contains call-specific information required to support the * mbuf allocation API. See mbuf.h. */ static int mb_ctor_mbuf(void *mem, int size, void *arg, int how) { struct mbuf *m; struct mb_args *args; int error; int flags; short type; #ifdef INVARIANTS trash_ctor(mem, size, arg, how); #endif args = (struct mb_args *)arg; type = args->type; /* * The mbuf is initialized later. The caller has the * responsibility to set up any MAC labels too. */ if (type == MT_NOINIT) return (0); m = (struct mbuf *)mem; flags = args->flags; MPASS((flags & M_NOFREE) == 0); error = m_init(m, how, type, flags); return (error); } /* * The Mbuf master zone destructor. */ static void mb_dtor_mbuf(void *mem, int size, void *arg) { struct mbuf *m; unsigned long flags; m = (struct mbuf *)mem; flags = (unsigned long)arg; KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__)); if (!(flags & MB_DTOR_SKIP) && (m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags)) m_tag_delete_chain(m, NULL); #ifdef INVARIANTS trash_dtor(mem, size, arg); #endif } /* * The Mbuf Packet zone destructor. */ static void mb_dtor_pack(void *mem, int size, void *arg) { struct mbuf *m; m = (struct mbuf *)mem; if ((m->m_flags & M_PKTHDR) != 0) m_tag_delete_chain(m, NULL); /* Make sure we've got a clean cluster back. */ KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__)); KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__)); KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); #ifdef INVARIANTS trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); #endif /* * If there are processes blocked on zone_clust, waiting for pages * to be freed up, * cause them to be woken up by draining the * packet zone. We are exposed to a race here * (in the check for * the UMA_ZFLAG_FULL) where we might miss the flag set, but that * is deliberate. We don't want to acquire the zone lock for every * mbuf free. */ if (uma_zone_exhausted_nolock(zone_clust)) zone_drain(zone_pack); } /* * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor. * * Here the 'arg' pointer points to the Mbuf which we * are configuring cluster storage for. If 'arg' is * empty we allocate just the cluster without setting * the mbuf to it. See mbuf.h. */ static int mb_ctor_clust(void *mem, int size, void *arg, int how) { struct mbuf *m; #ifdef INVARIANTS trash_ctor(mem, size, arg, how); #endif m = (struct mbuf *)arg; if (m != NULL) { m->m_ext.ext_buf = (char *)mem; m->m_data = m->m_ext.ext_buf; m->m_flags |= M_EXT; m->m_ext.ext_free = NULL; m->m_ext.ext_arg1 = NULL; m->m_ext.ext_arg2 = NULL; m->m_ext.ext_size = size; m->m_ext.ext_type = m_gettype(size); m->m_ext.ext_flags = EXT_FLAG_EMBREF; m->m_ext.ext_count = 1; } return (0); } /* * The Packet secondary zone's init routine, executed on the * object's transition from mbuf keg slab to zone cache. */ static int mb_zinit_pack(void *mem, int size, int how) { struct mbuf *m; m = (struct mbuf *)mem; /* m is virgin. */ if (uma_zalloc_arg(zone_clust, m, how) == NULL || m->m_ext.ext_buf == NULL) return (ENOMEM); m->m_ext.ext_type = EXT_PACKET; /* Override. */ #ifdef INVARIANTS trash_init(m->m_ext.ext_buf, MCLBYTES, how); #endif return (0); } /* * The Packet secondary zone's fini routine, executed on the * object's transition from zone cache to keg slab. */ static void mb_zfini_pack(void *mem, int size) { struct mbuf *m; m = (struct mbuf *)mem; #ifdef INVARIANTS trash_fini(m->m_ext.ext_buf, MCLBYTES); #endif uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); #ifdef INVARIANTS trash_dtor(mem, size, NULL); #endif } /* * The "packet" keg constructor. */ static int mb_ctor_pack(void *mem, int size, void *arg, int how) { struct mbuf *m; struct mb_args *args; int error, flags; short type; m = (struct mbuf *)mem; args = (struct mb_args *)arg; flags = args->flags; type = args->type; MPASS((flags & M_NOFREE) == 0); #ifdef INVARIANTS trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); #endif error = m_init(m, how, type, flags); /* m_ext is already initialized. */ m->m_data = m->m_ext.ext_buf; m->m_flags = (flags | M_EXT); return (error); } /* * This is the protocol drain routine. Called by UMA whenever any of the * mbuf zones is closed to its limit. * * No locks should be held when this is called. The drain routines have to * presently acquire some locks which raises the possibility of lock order * reversal. */ static void mb_reclaim(uma_zone_t zone __unused, int pending __unused) { struct domain *dp; struct protosw *pr; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, __func__); for (dp = domains; dp != NULL; dp = dp->dom_next) for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) if (pr->pr_drain != NULL) (*pr->pr_drain)(); } /* * Clean up after mbufs with M_EXT storage attached to them if the * reference count hits 1. */ void mb_free_ext(struct mbuf *m) { volatile u_int *refcnt; struct mbuf *mref; int freembuf; KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); /* See if this is the mbuf that holds the embedded refcount. */ if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { refcnt = &m->m_ext.ext_count; mref = m; } else { KASSERT(m->m_ext.ext_cnt != NULL, ("%s: no refcounting pointer on %p", __func__, m)); refcnt = m->m_ext.ext_cnt; mref = __containerof(refcnt, struct mbuf, m_ext.ext_count); } /* * Check if the header is embedded in the cluster. It is * important that we can't touch any of the mbuf fields * after we have freed the external storage, since mbuf * could have been embedded in it. For now, the mbufs * embedded into the cluster are always of type EXT_EXTREF, * and for this type we won't free the mref. */ if (m->m_flags & M_NOFREE) { freembuf = 0; KASSERT(m->m_ext.ext_type == EXT_EXTREF, ("%s: no-free mbuf %p has wrong type", __func__, m)); } else freembuf = 1; /* Free attached storage if this mbuf is the only reference to it. */ if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) { switch (m->m_ext.ext_type) { case EXT_PACKET: /* The packet zone is special. */ if (*refcnt == 0) *refcnt = 1; uma_zfree(zone_pack, mref); break; case EXT_CLUSTER: uma_zfree(zone_clust, m->m_ext.ext_buf); uma_zfree(zone_mbuf, mref); break; case EXT_JUMBOP: uma_zfree(zone_jumbop, m->m_ext.ext_buf); uma_zfree(zone_mbuf, mref); break; case EXT_JUMBO9: uma_zfree(zone_jumbo9, m->m_ext.ext_buf); uma_zfree(zone_mbuf, mref); break; case EXT_JUMBO16: uma_zfree(zone_jumbo16, m->m_ext.ext_buf); uma_zfree(zone_mbuf, mref); break; case EXT_SFBUF: case EXT_NET_DRV: case EXT_MOD_TYPE: case EXT_DISPOSABLE: KASSERT(mref->m_ext.ext_free != NULL, ("%s: ext_free not set", __func__)); mref->m_ext.ext_free(mref); uma_zfree(zone_mbuf, mref); break; case EXT_EXTREF: KASSERT(m->m_ext.ext_free != NULL, ("%s: ext_free not set", __func__)); m->m_ext.ext_free(m); break; default: KASSERT(m->m_ext.ext_type == 0, ("%s: unknown ext_type", __func__)); } } if (freembuf && m != mref) uma_zfree(zone_mbuf, m); } /* * Official mbuf(9) allocation KPI for stack and drivers: * * m_get() - a single mbuf without any attachments, sys/mbuf.h. * m_gethdr() - a single mbuf initialized as M_PKTHDR, sys/mbuf.h. * m_getcl() - an mbuf + 2k cluster, sys/mbuf.h. * m_clget() - attach cluster to already allocated mbuf. * m_cljget() - attach jumbo cluster to already allocated mbuf. * m_get2() - allocate minimum mbuf that would fit size argument. * m_getm2() - allocate a chain of mbufs/clusters. * m_extadd() - attach external cluster to mbuf. * * m_free() - free single mbuf with its tags and ext, sys/mbuf.h. * m_freem() - free chain of mbufs. */ int m_clget(struct mbuf *m, int how) { KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", __func__, m)); m->m_ext.ext_buf = (char *)NULL; uma_zalloc_arg(zone_clust, m, how); /* * On a cluster allocation failure, drain the packet zone and retry, * we might be able to loosen a few clusters up on the drain. */ if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) { zone_drain(zone_pack); uma_zalloc_arg(zone_clust, m, how); } MBUF_PROBE2(m__clget, m, how); return (m->m_flags & M_EXT); } /* * m_cljget() is different from m_clget() as it can allocate clusters without * attaching them to an mbuf. In that case the return value is the pointer * to the cluster of the requested size. If an mbuf was specified, it gets * the cluster attached to it and the return value can be safely ignored. * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. */ void * m_cljget(struct mbuf *m, int how, int size) { uma_zone_t zone; void *retval; if (m != NULL) { KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", __func__, m)); m->m_ext.ext_buf = NULL; } zone = m_getzone(size); retval = uma_zalloc_arg(zone, m, how); MBUF_PROBE4(m__cljget, m, how, size, retval); return (retval); } /* * m_get2() allocates minimum mbuf that would fit "size" argument. */ struct mbuf * m_get2(int size, int how, short type, int flags) { struct mb_args args; struct mbuf *m, *n; args.flags = flags; args.type = type; if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0)) return (uma_zalloc_arg(zone_mbuf, &args, how)); if (size <= MCLBYTES) return (uma_zalloc_arg(zone_pack, &args, how)); if (size > MJUMPAGESIZE) return (NULL); m = uma_zalloc_arg(zone_mbuf, &args, how); if (m == NULL) return (NULL); n = uma_zalloc_arg(zone_jumbop, m, how); if (n == NULL) { uma_zfree(zone_mbuf, m); return (NULL); } return (m); } /* * m_getjcl() returns an mbuf with a cluster of the specified size attached. * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. */ struct mbuf * m_getjcl(int how, short type, int flags, int size) { struct mb_args args; struct mbuf *m, *n; uma_zone_t zone; if (size == MCLBYTES) return m_getcl(how, type, flags); args.flags = flags; args.type = type; m = uma_zalloc_arg(zone_mbuf, &args, how); if (m == NULL) return (NULL); zone = m_getzone(size); n = uma_zalloc_arg(zone, m, how); if (n == NULL) { uma_zfree(zone_mbuf, m); return (NULL); } return (m); } /* * Allocate a given length worth of mbufs and/or clusters (whatever fits * best) and return a pointer to the top of the allocated chain. If an * existing mbuf chain is provided, then we will append the new chain * to the existing one but still return the top of the newly allocated * chain. */ struct mbuf * m_getm2(struct mbuf *m, int len, int how, short type, int flags) { struct mbuf *mb, *nm = NULL, *mtail = NULL; KASSERT(len >= 0, ("%s: len is < 0", __func__)); /* Validate flags. */ flags &= (M_PKTHDR | M_EOR); /* Packet header mbuf must be first in chain. */ if ((flags & M_PKTHDR) && m != NULL) flags &= ~M_PKTHDR; /* Loop and append maximum sized mbufs to the chain tail. */ while (len > 0) { if (len > MCLBYTES) mb = m_getjcl(how, type, (flags & M_PKTHDR), MJUMPAGESIZE); else if (len >= MINCLSIZE) mb = m_getcl(how, type, (flags & M_PKTHDR)); else if (flags & M_PKTHDR) mb = m_gethdr(how, type); else mb = m_get(how, type); /* Fail the whole operation if one mbuf can't be allocated. */ if (mb == NULL) { if (nm != NULL) m_freem(nm); return (NULL); } /* Book keeping. */ len -= M_SIZE(mb); if (mtail != NULL) mtail->m_next = mb; else nm = mb; mtail = mb; flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */ } if (flags & M_EOR) mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */ /* If mbuf was supplied, append new chain to the end of it. */ if (m != NULL) { for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next) ; mtail->m_next = nm; mtail->m_flags &= ~M_EOR; } else m = nm; return (m); } /*- * Configure a provided mbuf to refer to the provided external storage * buffer and setup a reference count for said buffer. * * Arguments: * mb The existing mbuf to which to attach the provided buffer. * buf The address of the provided external storage buffer. * size The size of the provided buffer. * freef A pointer to a routine that is responsible for freeing the * provided external storage buffer. * args A pointer to an argument structure (of any type) to be passed * to the provided freef routine (may be NULL). * flags Any other flags to be passed to the provided mbuf. * type The type that the external storage buffer should be * labeled with. * * Returns: * Nothing. */ void m_extadd(struct mbuf *mb, char *buf, u_int size, m_ext_free_t freef, void *arg1, void *arg2, int flags, int type) { KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__)); mb->m_flags |= (M_EXT | flags); mb->m_ext.ext_buf = buf; mb->m_data = mb->m_ext.ext_buf; mb->m_ext.ext_size = size; mb->m_ext.ext_free = freef; mb->m_ext.ext_arg1 = arg1; mb->m_ext.ext_arg2 = arg2; mb->m_ext.ext_type = type; if (type != EXT_EXTREF) { mb->m_ext.ext_count = 1; mb->m_ext.ext_flags = EXT_FLAG_EMBREF; } else mb->m_ext.ext_flags = 0; } /* * Free an entire chain of mbufs and associated external buffers, if * applicable. */ void m_freem(struct mbuf *mb) { MBUF_PROBE1(m__freem, mb); while (mb != NULL) mb = m_free(mb); } Index: head/sys/kern/kern_pmc.c =================================================================== --- head/sys/kern/kern_pmc.c (revision 339926) +++ head/sys/kern/kern_pmc.c (revision 339927) @@ -1,380 +1,367 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2008 Joseph Koshy * Copyright (c) 2007 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by A. Joseph Koshy under * sponsorship from the FreeBSD Foundation and Google, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_hwpmc_hooks.h" -#include +#include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS FEATURE(hwpmc_hooks, "Kernel support for HW PMC"); #define PMC_KERNEL_VERSION PMC_VERSION #else #define PMC_KERNEL_VERSION 0 #endif MALLOC_DECLARE(M_PMCHOOKS); MALLOC_DEFINE(M_PMCHOOKS, "pmchooks", "Memory space for PMC hooks"); /* memory pool */ MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module"); const int pmc_kernel_version = PMC_KERNEL_VERSION; /* Hook variable. */ int __read_mostly (*pmc_hook)(struct thread *td, int function, void *arg) = NULL; /* Interrupt handler */ int __read_mostly (*pmc_intr)(struct trapframe *tf) = NULL; DPCPU_DEFINE(uint8_t, pmc_sampled); /* * A global count of SS mode PMCs. When non-zero, this means that * we have processes that are sampling the system as a whole. */ volatile int pmc_ss_count; /* * Since PMC(4) may not be loaded in the current kernel, the * convention followed is that a non-NULL value of 'pmc_hook' implies * the presence of this kernel module. * * This requires us to protect 'pmc_hook' with a * shared (sx) lock -- thus making the process of calling into PMC(4) * somewhat more expensive than a simple 'if' check and indirect call. */ struct sx pmc_sx; SX_SYSINIT(pmcsx, &pmc_sx, "pmc-sx"); /* * PMC Soft per cpu trapframe. */ struct trapframe pmc_tf[MAXCPU]; /* * Per domain list of buffer headers */ __read_mostly struct pmc_domain_buffer_header *pmc_dom_hdrs[MAXMEMDOM]; /* * PMC Soft use a global table to store registered events. */ SYSCTL_NODE(_kern, OID_AUTO, hwpmc, CTLFLAG_RW, 0, "HWPMC parameters"); static int pmc_softevents = 16; SYSCTL_INT(_kern_hwpmc, OID_AUTO, softevents, CTLFLAG_RDTUN, &pmc_softevents, 0, "maximum number of soft events"); int pmc_softs_count; struct pmc_soft **pmc_softs; struct mtx pmc_softs_mtx; MTX_SYSINIT(pmc_soft_mtx, &pmc_softs_mtx, "pmc-softs", MTX_SPIN); /* * Helper functions. */ /* * A note on the CPU numbering scheme used by the hwpmc(4) driver. * * CPUs are denoted using numbers in the range 0..[pmc_cpu_max()-1]. * CPUs could be numbered "sparsely" in this range; the predicate * `pmc_cpu_is_present()' is used to test whether a given CPU is * physically present. * * Further, a CPU that is physically present may be administratively * disabled or otherwise unavailable for use by hwpmc(4). The * `pmc_cpu_is_active()' predicate tests for CPU usability. An * "active" CPU participates in thread scheduling and can field * interrupts raised by PMC hardware. * * On systems with hyperthreaded CPUs, multiple logical CPUs may share * PMC hardware resources. For such processors one logical CPU is * denoted as the primary owner of the in-CPU PMC resources. The * pmc_cpu_is_primary() predicate is used to distinguish this primary * CPU from the others. */ int pmc_cpu_is_active(int cpu) { #ifdef SMP return (pmc_cpu_is_present(cpu) && !CPU_ISSET(cpu, &hlt_cpus_mask)); #else return (1); #endif } /* Deprecated. */ int pmc_cpu_is_disabled(int cpu) { return (!pmc_cpu_is_active(cpu)); } int pmc_cpu_is_present(int cpu) { #ifdef SMP return (!CPU_ABSENT(cpu)); #else return (1); #endif } int pmc_cpu_is_primary(int cpu) { #ifdef SMP return (!CPU_ISSET(cpu, &logical_cpus_mask)); #else return (1); #endif } /* * Return the maximum CPU number supported by the system. The return * value is used for scaling internal data structures and for runtime * checks. */ unsigned int pmc_cpu_max(void) { #ifdef SMP return (mp_maxid+1); #else return (1); #endif } #ifdef INVARIANTS /* * Return the count of CPUs in the `active' state in the system. */ int pmc_cpu_max_active(void) { #ifdef SMP /* * When support for CPU hot-plugging is added to the kernel, * this function would change to return the current number * of "active" CPUs. */ return (mp_ncpus); #else return (1); #endif } #endif /* * Cleanup event name: * - remove duplicate '_' * - all uppercase */ static void pmc_soft_namecleanup(char *name) { char *p, *q; p = q = name; for ( ; *p == '_' ; p++) ; for ( ; *p ; p++) { if (*p == '_' && (*(p + 1) == '_' || *(p + 1) == '\0')) continue; else *q++ = toupper(*p); } *q = '\0'; } void pmc_soft_ev_register(struct pmc_soft *ps) { static int warned = 0; int n; ps->ps_running = 0; ps->ps_ev.pm_ev_code = 0; /* invalid */ pmc_soft_namecleanup(ps->ps_ev.pm_ev_name); mtx_lock_spin(&pmc_softs_mtx); if (pmc_softs_count >= pmc_softevents) { /* * XXX Reusing events can enter a race condition where * new allocated event will be used as an old one. */ for (n = 0; n < pmc_softevents; n++) if (pmc_softs[n] == NULL) break; if (n == pmc_softevents) { mtx_unlock_spin(&pmc_softs_mtx); if (!warned) { printf("hwpmc: too many soft events, " "increase kern.hwpmc.softevents tunable\n"); warned = 1; } return; } ps->ps_ev.pm_ev_code = PMC_EV_SOFT_FIRST + n; pmc_softs[n] = ps; } else { ps->ps_ev.pm_ev_code = PMC_EV_SOFT_FIRST + pmc_softs_count; pmc_softs[pmc_softs_count++] = ps; } mtx_unlock_spin(&pmc_softs_mtx); } void pmc_soft_ev_deregister(struct pmc_soft *ps) { KASSERT(ps != NULL, ("pmc_soft_deregister: called with NULL")); mtx_lock_spin(&pmc_softs_mtx); if (ps->ps_ev.pm_ev_code != 0 && (ps->ps_ev.pm_ev_code - PMC_EV_SOFT_FIRST) < pmc_softevents) { KASSERT((int)ps->ps_ev.pm_ev_code >= PMC_EV_SOFT_FIRST && (int)ps->ps_ev.pm_ev_code <= PMC_EV_SOFT_LAST, ("pmc_soft_deregister: invalid event value")); pmc_softs[ps->ps_ev.pm_ev_code - PMC_EV_SOFT_FIRST] = NULL; } mtx_unlock_spin(&pmc_softs_mtx); } struct pmc_soft * pmc_soft_ev_acquire(enum pmc_event ev) { struct pmc_soft *ps; if (ev == 0 || (ev - PMC_EV_SOFT_FIRST) >= pmc_softevents) return NULL; KASSERT((int)ev >= PMC_EV_SOFT_FIRST && (int)ev <= PMC_EV_SOFT_LAST, ("event out of range")); mtx_lock_spin(&pmc_softs_mtx); ps = pmc_softs[ev - PMC_EV_SOFT_FIRST]; if (ps == NULL) mtx_unlock_spin(&pmc_softs_mtx); return ps; } void pmc_soft_ev_release(struct pmc_soft *ps) { mtx_unlock_spin(&pmc_softs_mtx); } -#ifdef NUMA -#define NDOMAINS vm_ndomains - -static int -getdomain(int cpu) -{ - struct pcpu *pc; - - pc = pcpu_find(cpu); - return (pc->pc_domain); -} -#else -#define NDOMAINS 1 -#define malloc_domain(size, type, domain, flags) malloc((size), (type), (flags)) -#define getdomain(cpu) 0 -#endif /* * Initialise hwpmc. */ static void init_hwpmc(void *dummy __unused) { int domain, cpu; if (pmc_softevents <= 0 || pmc_softevents > PMC_EV_DYN_COUNT) { (void) printf("hwpmc: tunable \"softevents\"=%d out of " "range.\n", pmc_softevents); pmc_softevents = PMC_EV_DYN_COUNT; } pmc_softs = malloc(pmc_softevents * sizeof(*pmc_softs), M_PMCHOOKS, M_WAITOK | M_ZERO); - for (domain = 0; domain < NDOMAINS; domain++) { - pmc_dom_hdrs[domain] = malloc_domain(sizeof(struct pmc_domain_buffer_header), M_PMC, domain, - M_WAITOK|M_ZERO); + + for (domain = 0; domain < vm_ndomains; domain++) { + pmc_dom_hdrs[domain] = malloc_domainset( + sizeof(struct pmc_domain_buffer_header), M_PMC, + DOMAINSET_PREF(domain), M_WAITOK | M_ZERO); mtx_init(&pmc_dom_hdrs[domain]->pdbh_mtx, "pmc_bufferlist_mtx", "pmc-leaf", MTX_SPIN); TAILQ_INIT(&pmc_dom_hdrs[domain]->pdbh_head); } CPU_FOREACH(cpu) { - domain = getdomain(cpu); + domain = pcpu_find(cpu)->pc_domain; KASSERT(pmc_dom_hdrs[domain] != NULL, ("no mem allocated for domain: %d", domain)); pmc_dom_hdrs[domain]->pdbh_ncpus++; } } SYSINIT(hwpmc, SI_SUB_KDTRACE, SI_ORDER_FIRST, init_hwpmc, NULL); Index: head/sys/kern/subr_busdma_bufalloc.c =================================================================== --- head/sys/kern/subr_busdma_bufalloc.c (revision 339926) +++ head/sys/kern/subr_busdma_bufalloc.c (revision 339927) @@ -1,176 +1,174 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Buffer allocation support routines for bus_dmamem_alloc implementations. */ #include #include #include #include +#include #include #include #include #include #include /* * We manage buffer zones up to a page in size. Buffers larger than a page can * be managed by one of the kernel's page-oriented memory allocation routines as * efficiently as what we can do here. Also, a page is the largest size for * which we can g'tee contiguity when using uma, and contiguity is one of the * requirements we have to fulfill. */ #define MIN_ZONE_BUFSIZE 32 #define MAX_ZONE_BUFSIZE PAGE_SIZE /* * The static array of 12 bufzones is big enough to handle all the zones for the * smallest supported allocation size of 32 through the largest supported page * size of 64K. If you up the biggest page size number, up the array size too. * Basically the size of the array needs to be log2(maxsize)-log2(minsize)+1, * but I don't know of an easy way to express that as a compile-time constant. */ #if PAGE_SIZE > 65536 #error Unsupported page size #endif struct busdma_bufalloc { bus_size_t min_size; size_t num_zones; struct busdma_bufzone buf_zones[12]; }; busdma_bufalloc_t busdma_bufalloc_create(const char *name, bus_size_t minimum_alignment, uma_alloc alloc_func, uma_free free_func, u_int32_t zcreate_flags) { struct busdma_bufalloc *ba; struct busdma_bufzone *bz; int i; bus_size_t cursize; ba = malloc(sizeof(struct busdma_bufalloc), M_DEVBUF, M_ZERO | M_WAITOK); ba->min_size = MAX(MIN_ZONE_BUFSIZE, minimum_alignment); /* * Each uma zone is created with an alignment of size-1, meaning that * the alignment is equal to the size (I.E., 64 byte buffers are aligned * to 64 byte boundaries, etc). This allows for a fast efficient test * when deciding whether a pool buffer meets the constraints of a given * tag used for allocation: the buffer is usable if tag->alignment <= * bufzone->size. */ for (i = 0, bz = ba->buf_zones, cursize = ba->min_size; i < nitems(ba->buf_zones) && cursize <= MAX_ZONE_BUFSIZE; ++i, ++bz, cursize <<= 1) { snprintf(bz->name, sizeof(bz->name), "dma %.10s %ju", name, (uintmax_t)cursize); bz->size = cursize; bz->umazone = uma_zcreate(bz->name, bz->size, NULL, NULL, NULL, NULL, bz->size - 1, zcreate_flags); if (bz->umazone == NULL) { busdma_bufalloc_destroy(ba); return (NULL); } if (alloc_func != NULL) uma_zone_set_allocf(bz->umazone, alloc_func); if (free_func != NULL) uma_zone_set_freef(bz->umazone, free_func); ++ba->num_zones; } return (ba); } void busdma_bufalloc_destroy(busdma_bufalloc_t ba) { struct busdma_bufzone *bz; int i; if (ba == NULL) return; for (i = 0, bz = ba->buf_zones; i < ba->num_zones; ++i, ++bz) { uma_zdestroy(bz->umazone); } free(ba, M_DEVBUF); } struct busdma_bufzone * busdma_bufalloc_findzone(busdma_bufalloc_t ba, bus_size_t size) { struct busdma_bufzone *bz; int i; if (size > MAX_ZONE_BUFSIZE) return (NULL); for (i = 0, bz = ba->buf_zones; i < ba->num_zones; ++i, ++bz) { if (bz->size >= size) return (bz); } panic("Didn't find a buffer zone of the right size"); } void * busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, vm_size_t size, int domain, uint8_t *pflag, int wait) { -#ifdef VM_MEMATTR_UNCACHEABLE +#ifdef VM_MEMATTR_UNCACHEABLE /* Inform UMA that this allocator uses kernel_arena/object. */ *pflag = UMA_SLAB_KERNEL; - return ((void *)kmem_alloc_attr_domain(domain, size, wait, 0, - BUS_SPACE_MAXADDR, VM_MEMATTR_UNCACHEABLE)); - + return ((void *)kmem_alloc_attr_domainset(DOMAINSET_FIXED(domain), size, + wait, 0, BUS_SPACE_MAXADDR, VM_MEMATTR_UNCACHEABLE)); #else - panic("VM_MEMATTR_UNCACHEABLE unavailable"); - #endif /* VM_MEMATTR_UNCACHEABLE */ } void busdma_bufalloc_free_uncacheable(void *item, vm_size_t size, uint8_t pflag) { kmem_free((vm_offset_t)item, size); } Index: head/sys/sys/domainset.h =================================================================== --- head/sys/sys/domainset.h (revision 339926) +++ head/sys/sys/domainset.h (revision 339927) @@ -1,126 +1,127 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2017, Jeffrey Roberson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_DOMAINSET_H_ #define _SYS_DOMAINSET_H_ #include #include #include #define _NDOMAINSETBITS _BITSET_BITS #define _NDOMAINSETWORDS __bitset_words(DOMAINSET_SETSIZE) #define DOMAINSETBUFSIZ \ (((2 + sizeof(long) * 2) * _NDOMAINSETWORDS) + \ sizeof("::") + sizeof(__XSTRING(DOMAINSET_POLICY_MAX)) + \ sizeof(__XSTRING(MAXMEMDOM))) #define DOMAINSET_CLR(n, p) BIT_CLR(DOMAINSET_SETSIZE, n, p) #define DOMAINSET_COPY(f, t) BIT_COPY(DOMAINSET_SETSIZE, f, t) #define DOMAINSET_ISSET(n, p) BIT_ISSET(DOMAINSET_SETSIZE, n, p) #define DOMAINSET_SET(n, p) BIT_SET(DOMAINSET_SETSIZE, n, p) #define DOMAINSET_ZERO(p) BIT_ZERO(DOMAINSET_SETSIZE, p) #define DOMAINSET_FILL(p) BIT_FILL(DOMAINSET_SETSIZE, p) #define DOMAINSET_SETOF(n, p) BIT_SETOF(DOMAINSET_SETSIZE, n, p) #define DOMAINSET_EMPTY(p) BIT_EMPTY(DOMAINSET_SETSIZE, p) #define DOMAINSET_ISFULLSET(p) BIT_ISFULLSET(DOMAINSET_SETSIZE, p) #define DOMAINSET_SUBSET(p, c) BIT_SUBSET(DOMAINSET_SETSIZE, p, c) #define DOMAINSET_OVERLAP(p, c) BIT_OVERLAP(DOMAINSET_SETSIZE, p, c) #define DOMAINSET_CMP(p, c) BIT_CMP(DOMAINSET_SETSIZE, p, c) #define DOMAINSET_OR(d, s) BIT_OR(DOMAINSET_SETSIZE, d, s) #define DOMAINSET_AND(d, s) BIT_AND(DOMAINSET_SETSIZE, d, s) #define DOMAINSET_NAND(d, s) BIT_NAND(DOMAINSET_SETSIZE, d, s) #define DOMAINSET_CLR_ATOMIC(n, p) BIT_CLR_ATOMIC(DOMAINSET_SETSIZE, n, p) #define DOMAINSET_SET_ATOMIC(n, p) BIT_SET_ATOMIC(DOMAINSET_SETSIZE, n, p) #define DOMAINSET_SET_ATOMIC_ACQ(n, p) \ BIT_SET_ATOMIC_ACQ(DOMAINSET_SETSIZE, n, p) #define DOMAINSET_AND_ATOMIC(n, p) BIT_AND_ATOMIC(DOMAINSET_SETSIZE, n, p) #define DOMAINSET_OR_ATOMIC(d, s) BIT_OR_ATOMIC(DOMAINSET_SETSIZE, d, s) #define DOMAINSET_COPY_STORE_REL(f, t) \ BIT_COPY_STORE_REL(DOMAINSET_SETSIZE, f, t) #define DOMAINSET_FFS(p) BIT_FFS(DOMAINSET_SETSIZE, p) #define DOMAINSET_FLS(p) BIT_FLS(DOMAINSET_SETSIZE, p) #define DOMAINSET_COUNT(p) BIT_COUNT(DOMAINSET_SETSIZE, p) #define DOMAINSET_FSET BITSET_FSET(_NDOMAINSETWORDS) #define DOMAINSET_T_INITIALIZER BITSET_T_INITIALIZER #define DOMAINSET_POLICY_INVALID 0 #define DOMAINSET_POLICY_ROUNDROBIN 1 #define DOMAINSET_POLICY_FIRSTTOUCH 2 #define DOMAINSET_POLICY_PREFER 3 #define DOMAINSET_POLICY_INTERLEAVE 4 #define DOMAINSET_POLICY_MAX DOMAINSET_POLICY_INTERLEAVE #ifdef _KERNEL #if MAXMEMDOM < 256 typedef uint8_t domainid_t; #else typedef uint16_t domainid_t; #endif struct domainset { LIST_ENTRY(domainset) ds_link; domainset_t ds_mask; /* allowed domains. */ uint16_t ds_policy; /* Policy type. */ domainid_t ds_prefer; /* Preferred domain or -1. */ domainid_t ds_cnt; /* popcnt from above. */ domainid_t ds_order[MAXMEMDOM]; /* nth domain table. */ }; -extern struct domainset domainset_prefer[MAXMEMDOM]; +extern struct domainset domainset_fixed[MAXMEMDOM], domainset_prefer[MAXMEMDOM]; +#define DOMAINSET_FIXED(domain) (&domainset_fixed[(domain)]) #define DOMAINSET_PREF(domain) (&domainset_prefer[(domain)]) extern struct domainset domainset_roundrobin; #define DOMAINSET_RR() (&domainset_roundrobin) void domainset_init(void); void domainset_zero(void); /* * Add a domainset to the system based on a key initializing policy, prefer, * and mask. Do not create and directly use domainset structures. The * returned value will not match the key pointer. */ struct domainset *domainset_create(const struct domainset *); #ifdef _SYS_SYSCTL_H_ int sysctl_handle_domainset(SYSCTL_HANDLER_ARGS); #endif #else __BEGIN_DECLS int cpuset_getdomain(cpulevel_t, cpuwhich_t, id_t, size_t, domainset_t *, int *); int cpuset_setdomain(cpulevel_t, cpuwhich_t, id_t, size_t, const domainset_t *, int); __END_DECLS #endif #endif /* !_SYS_DOMAINSET_H_ */ Index: head/sys/sys/malloc.h =================================================================== --- head/sys/sys/malloc.h (revision 339926) +++ head/sys/sys/malloc.h (revision 339927) @@ -1,266 +1,268 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1987, 1993 * The Regents of the University of California. * Copyright (c) 2005, 2009 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)malloc.h 8.5 (Berkeley) 5/3/95 * $FreeBSD$ */ #ifndef _SYS_MALLOC_H_ #define _SYS_MALLOC_H_ #include #ifdef _KERNEL #include #endif #include #include #include #include #define MINALLOCSIZE UMA_SMALLEST_UNIT /* * Flags to memory allocation functions. */ #define M_NOWAIT 0x0001 /* do not block */ #define M_WAITOK 0x0002 /* ok to block */ #define M_ZERO 0x0100 /* bzero the allocation */ #define M_NOVM 0x0200 /* don't ask VM for pages */ #define M_USE_RESERVE 0x0400 /* can alloc out of reserve memory */ #define M_NODUMP 0x0800 /* don't dump pages in this allocation */ #define M_FIRSTFIT 0x1000 /* Only for vmem, fast fit. */ #define M_BESTFIT 0x2000 /* Only for vmem, low fragmentation. */ #define M_EXEC 0x4000 /* allocate executable space. */ #define M_MAGIC 877983977 /* time when first defined :-) */ /* * Two malloc type structures are present: malloc_type, which is used by a * type owner to declare the type, and malloc_type_internal, which holds * malloc-owned statistics and other ABI-sensitive fields, such as the set of * malloc statistics indexed by the compile-time MAXCPU constant. * Applications should avoid introducing dependence on the allocator private * data layout and size. * * The malloc_type ks_next field is protected by malloc_mtx. Other fields in * malloc_type are static after initialization so unsynchronized. * * Statistics in malloc_type_stats are written only when holding a critical * section and running on the CPU associated with the index into the stat * array, but read lock-free resulting in possible (minor) races, which the * monitoring app should take into account. */ struct malloc_type_stats { uint64_t mts_memalloced; /* Bytes allocated on CPU. */ uint64_t mts_memfreed; /* Bytes freed on CPU. */ uint64_t mts_numallocs; /* Number of allocates on CPU. */ uint64_t mts_numfrees; /* number of frees on CPU. */ uint64_t mts_size; /* Bitmask of sizes allocated on CPU. */ uint64_t _mts_reserved1; /* Reserved field. */ uint64_t _mts_reserved2; /* Reserved field. */ uint64_t _mts_reserved3; /* Reserved field. */ }; /* * Index definitions for the mti_probes[] array. */ #define DTMALLOC_PROBE_MALLOC 0 #define DTMALLOC_PROBE_FREE 1 #define DTMALLOC_PROBE_MAX 2 struct malloc_type_internal { uint32_t mti_probes[DTMALLOC_PROBE_MAX]; /* DTrace probe ID array. */ u_char mti_zone; struct malloc_type_stats *mti_stats; }; /* * Public data structure describing a malloc type. Private data is hung off * of ks_handle to avoid encoding internal malloc(9) data structures in * modules, which will statically allocate struct malloc_type. */ struct malloc_type { struct malloc_type *ks_next; /* Next in global chain. */ u_long ks_magic; /* Detect programmer error. */ const char *ks_shortdesc; /* Printable type name. */ void *ks_handle; /* Priv. data, was lo_class. */ }; /* * Statistics structure headers for user space. The kern.malloc sysctl * exposes a structure stream consisting of a stream header, then a series of * malloc type headers and statistics structures (quantity maxcpus). For * convenience, the kernel will provide the current value of maxcpus at the * head of the stream. */ #define MALLOC_TYPE_STREAM_VERSION 0x00000001 struct malloc_type_stream_header { uint32_t mtsh_version; /* Stream format version. */ uint32_t mtsh_maxcpus; /* Value of MAXCPU for stream. */ uint32_t mtsh_count; /* Number of records. */ uint32_t _mtsh_pad; /* Pad/reserved field. */ }; #define MALLOC_MAX_NAME 32 struct malloc_type_header { char mth_name[MALLOC_MAX_NAME]; }; #ifdef _KERNEL #define MALLOC_DEFINE(type, shortdesc, longdesc) \ struct malloc_type type[1] = { \ { NULL, M_MAGIC, shortdesc, NULL } \ }; \ SYSINIT(type##_init, SI_SUB_KMEM, SI_ORDER_THIRD, malloc_init, \ type); \ SYSUNINIT(type##_uninit, SI_SUB_KMEM, SI_ORDER_ANY, \ malloc_uninit, type) #define MALLOC_DECLARE(type) \ extern struct malloc_type type[1] MALLOC_DECLARE(M_CACHE); MALLOC_DECLARE(M_DEVBUF); MALLOC_DECLARE(M_TEMP); /* * XXX this should be declared in , but that tends to fail * because is included in a header before the source file * has a chance to include to get MALLOC_DECLARE() defined. */ MALLOC_DECLARE(M_IOV); +struct domainset; extern struct mtx malloc_mtx; /* * Function type used when iterating over the list of malloc types. */ typedef void malloc_type_list_func_t(struct malloc_type *, void *); void contigfree(void *addr, unsigned long size, struct malloc_type *type); void *contigmalloc(unsigned long size, struct malloc_type *type, int flags, vm_paddr_t low, vm_paddr_t high, unsigned long alignment, vm_paddr_t boundary) __malloc_like __result_use_check __alloc_size(1) __alloc_align(6); -void *contigmalloc_domain(unsigned long size, struct malloc_type *type, - int domain, int flags, vm_paddr_t low, vm_paddr_t high, +void *contigmalloc_domainset(unsigned long size, struct malloc_type *type, + struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high, unsigned long alignment, vm_paddr_t boundary) __malloc_like __result_use_check __alloc_size(1) __alloc_align(6); void free(void *addr, struct malloc_type *type); void free_domain(void *addr, struct malloc_type *type); void *malloc(size_t size, struct malloc_type *type, int flags) __malloc_like __result_use_check __alloc_size(1); /* * Try to optimize malloc(..., ..., M_ZERO) allocations by doing zeroing in * place if the size is known at compilation time. * * Passing the flag down requires malloc to blindly zero the entire object. * In practice a lot of the zeroing can be avoided if most of the object * gets explicitly initialized after the allocation. Letting the compiler * zero in place gives it the opportunity to take advantage of this state. * * Note that the operation is only applicable if both flags and size are * known at compilation time. If M_ZERO is passed but M_WAITOK is not, the * allocation can fail and a NULL check is needed. However, if M_WAITOK is * passed we know the allocation must succeed and the check can be elided. * * _malloc_item = malloc(_size, type, (flags) &~ M_ZERO); * if (((flags) & M_WAITOK) != 0 || _malloc_item != NULL) * bzero(_malloc_item, _size); * * If the flag is set, the compiler knows the left side is always true, * therefore the entire statement is true and the callsite is: * * _malloc_item = malloc(_size, type, (flags) &~ M_ZERO); * bzero(_malloc_item, _size); * * If the flag is not set, the compiler knows the left size is always false * and the NULL check is needed, therefore the callsite is: * * _malloc_item = malloc(_size, type, (flags) &~ M_ZERO); * if (_malloc_item != NULL) * bzero(_malloc_item, _size); * * The implementation is a macro because of what appears to be a clang 6 bug: * an inline function variant ended up being compiled to a mere malloc call * regardless of argument. gcc generates expected code (like the above). */ #define malloc(size, type, flags) ({ \ void *_malloc_item; \ size_t _size = (size); \ if (__builtin_constant_p(size) && __builtin_constant_p(flags) &&\ ((flags) & M_ZERO) != 0) { \ _malloc_item = malloc(_size, type, (flags) &~ M_ZERO); \ if (((flags) & M_WAITOK) != 0 || \ __predict_true(_malloc_item != NULL)) \ bzero(_malloc_item, _size); \ } else { \ _malloc_item = malloc(_size, type, flags); \ } \ _malloc_item; \ }) -void *malloc_domain(size_t size, struct malloc_type *type, int domain, - int flags) __malloc_like __result_use_check __alloc_size(1); +void *malloc_domainset(size_t size, struct malloc_type *type, + struct domainset *ds, int flags) __malloc_like __result_use_check + __alloc_size(1); void *mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags) __malloc_like __result_use_check __alloc_size2(1, 2); void malloc_init(void *); int malloc_last_fail(void); void malloc_type_allocated(struct malloc_type *type, unsigned long size); void malloc_type_freed(struct malloc_type *type, unsigned long size); void malloc_type_list(malloc_type_list_func_t *, void *); void malloc_uninit(void *); void *realloc(void *addr, size_t size, struct malloc_type *type, int flags) __result_use_check __alloc_size(2); void *reallocf(void *addr, size_t size, struct malloc_type *type, int flags) __result_use_check __alloc_size(2); struct malloc_type *malloc_desc2type(const char *desc); /* * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW */ #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 8 / 2)) static inline bool WOULD_OVERFLOW(size_t nmemb, size_t size) { return ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && nmemb > 0 && __SIZE_T_MAX / nmemb < size); } #undef MUL_NO_OVERFLOW #endif /* _KERNEL */ #endif /* !_SYS_MALLOC_H_ */ Index: head/sys/vm/uma_core.c =================================================================== --- head/sys/vm/uma_core.c (revision 339926) +++ head/sys/vm/uma_core.c (revision 339927) @@ -1,4265 +1,4265 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson * Copyright (c) 2004, 2005 Bosko Milekic * Copyright (c) 2004-2006 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * uma_core.c Implementation of the Universal Memory allocator * * This allocator is intended to replace the multitude of similar object caches * in the standard FreeBSD kernel. The intent is to be flexible as well as * efficient. A primary design goal is to return unused memory to the rest of * the system. This will make the system as a whole more flexible due to the * ability to move memory to subsystems which most need it instead of leaving * pools of reserved memory unused. * * The basic ideas stem from similar slab/zone based allocators whose algorithms * are well known. * */ /* * TODO: * - Improve memory usage for large allocations * - Investigate cache size adjustments */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_param.h" #include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEBUG_MEMGUARD #include #endif /* * This is the zone and keg from which all zones are spawned. */ static uma_zone_t kegs; static uma_zone_t zones; /* This is the zone from which all offpage uma_slab_ts are allocated. */ static uma_zone_t slabzone; /* * The initial hash tables come out of this zone so they can be allocated * prior to malloc coming up. */ static uma_zone_t hashzone; /* The boot-time adjusted value for cache line alignment. */ int uma_align_cache = 64 - 1; static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); /* * Are we allowed to allocate buckets? */ static int bucketdisable = 1; /* Linked list of all kegs in the system */ static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); /* Linked list of all cache-only zones in the system */ static LIST_HEAD(,uma_zone) uma_cachezones = LIST_HEAD_INITIALIZER(uma_cachezones); /* This RW lock protects the keg list */ static struct rwlock_padalign __exclusive_cache_line uma_rwlock; /* * Pointer and counter to pool of pages, that is preallocated at * startup to bootstrap UMA. */ static char *bootmem; static int boot_pages; static struct sx uma_drain_lock; /* kmem soft limit. */ static unsigned long uma_kmem_limit = LONG_MAX; static volatile unsigned long uma_kmem_total; /* Is the VM done starting up? */ static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS, BOOT_RUNNING } booted = BOOT_COLD; /* * This is the handle used to schedule events that need to happen * outside of the allocation fast path. */ static struct callout uma_callout; #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ /* * This structure is passed as the zone ctor arg so that I don't have to create * a special allocation function just for zones. */ struct uma_zctor_args { const char *name; size_t size; uma_ctor ctor; uma_dtor dtor; uma_init uminit; uma_fini fini; uma_import import; uma_release release; void *arg; uma_keg_t keg; int align; uint32_t flags; }; struct uma_kctor_args { uma_zone_t zone; size_t size; uma_init uminit; uma_fini fini; int align; uint32_t flags; }; struct uma_bucket_zone { uma_zone_t ubz_zone; char *ubz_name; int ubz_entries; /* Number of items it can hold. */ int ubz_maxsize; /* Maximum allocation size per-item. */ }; /* * Compute the actual number of bucket entries to pack them in power * of two sizes for more efficient space utilization. */ #define BUCKET_SIZE(n) \ (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) #define BUCKET_MAX BUCKET_SIZE(256) struct uma_bucket_zone bucket_zones[] = { { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 }, { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 }, { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, { NULL, NULL, 0} }; /* * Flags and enumerations to be passed to internal functions. */ enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; #define UMA_ANYDOMAIN -1 /* Special value for domain search. */ /* Prototypes.. */ int uma_startup_count(int); void uma_startup(void *, int); void uma_startup1(void); void uma_startup2(void); static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void page_free(void *, vm_size_t, uint8_t); static void pcpu_page_free(void *, vm_size_t, uint8_t); static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int); static void cache_drain(uma_zone_t); static void bucket_drain(uma_zone_t, uma_bucket_t); static void bucket_cache_drain(uma_zone_t zone); static int keg_ctor(void *, int, void *, int); static void keg_dtor(void *, int, void *); static int zone_ctor(void *, int, void *, int); static void zone_dtor(void *, int, void *); static int zero_init(void *, int, int); static void keg_small_init(uma_keg_t keg); static void keg_large_init(uma_keg_t keg); static void zone_foreach(void (*zfunc)(uma_zone_t)); static void zone_timeout(uma_zone_t zone); static int hash_alloc(struct uma_hash *); static int hash_expand(struct uma_hash *, struct uma_hash *); static void hash_free(struct uma_hash *hash); static void uma_timeout(void *); static void uma_startup3(void); static void *zone_alloc_item(uma_zone_t, void *, int, int); static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); static void bucket_enable(void); static void bucket_init(void); static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); static void bucket_zone_drain(void); static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int); static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int); static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int); static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, int align, uint32_t flags); static int zone_import(uma_zone_t, void **, int, int, int); static void zone_release(uma_zone_t, void **, int); static void uma_zero_item(void *, uma_zone_t); void uma_print_zone(uma_zone_t); void uma_print_stats(void); static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); #ifdef INVARIANTS static bool uma_dbg_kskip(uma_keg_t keg, void *mem); static bool uma_dbg_zskip(uma_zone_t zone, void *mem); static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0, "Memory allocation debugging"); static u_int dbg_divisor = 1; SYSCTL_UINT(_vm_debug, OID_AUTO, divisor, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0, "Debug & thrash every this item in memory allocator"); static counter_u64_t uma_dbg_cnt = EARLY_COUNTER; static counter_u64_t uma_skip_cnt = EARLY_COUNTER; SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD, &uma_dbg_cnt, "memory items debugged"); SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD, &uma_skip_cnt, "memory items skipped, not debugged"); #endif SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); static int zone_warnings = 1; SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, "Warn when UMA zones becomes full"); /* Adjust bytes under management by UMA. */ static inline void uma_total_dec(unsigned long size) { atomic_subtract_long(&uma_kmem_total, size); } static inline void uma_total_inc(unsigned long size) { if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) uma_reclaim_wakeup(); } /* * This routine checks to see whether or not it's safe to enable buckets. */ static void bucket_enable(void) { bucketdisable = vm_page_count_min(); } /* * Initialize bucket_zones, the array of zones of buckets of various sizes. * * For each zone, calculate the memory required for each bucket, consisting * of the header and an array of pointers. */ static void bucket_init(void) { struct uma_bucket_zone *ubz; int size; for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { size = roundup(sizeof(struct uma_bucket), sizeof(void *)); size += sizeof(void *) * ubz->ubz_entries; ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA); } } /* * Given a desired number of entries for a bucket, return the zone from which * to allocate the bucket. */ static struct uma_bucket_zone * bucket_zone_lookup(int entries) { struct uma_bucket_zone *ubz; for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) if (ubz->ubz_entries >= entries) return (ubz); ubz--; return (ubz); } static int bucket_select(int size) { struct uma_bucket_zone *ubz; ubz = &bucket_zones[0]; if (size > ubz->ubz_maxsize) return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); for (; ubz->ubz_entries != 0; ubz++) if (ubz->ubz_maxsize < size) break; ubz--; return (ubz->ubz_entries); } static uma_bucket_t bucket_alloc(uma_zone_t zone, void *udata, int flags) { struct uma_bucket_zone *ubz; uma_bucket_t bucket; /* * This is to stop us from allocating per cpu buckets while we're * running out of vm.boot_pages. Otherwise, we would exhaust the * boot pages. This also prevents us from allocating buckets in * low memory situations. */ if (bucketdisable) return (NULL); /* * To limit bucket recursion we store the original zone flags * in a cookie passed via zalloc_arg/zfree_arg. This allows the * NOVM flag to persist even through deep recursions. We also * store ZFLAG_BUCKET once we have recursed attempting to allocate * a bucket for a bucket zone so we do not allow infinite bucket * recursion. This cookie will even persist to frees of unused * buckets via the allocation path or bucket allocations in the * free path. */ if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) udata = (void *)(uintptr_t)zone->uz_flags; else { if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) return (NULL); udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); } if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) flags |= M_NOVM; ubz = bucket_zone_lookup(zone->uz_count); if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) ubz++; bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); if (bucket) { #ifdef INVARIANTS bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); #endif bucket->ub_cnt = 0; bucket->ub_entries = ubz->ubz_entries; } return (bucket); } static void bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) { struct uma_bucket_zone *ubz; KASSERT(bucket->ub_cnt == 0, ("bucket_free: Freeing a non free bucket.")); if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) udata = (void *)(uintptr_t)zone->uz_flags; ubz = bucket_zone_lookup(bucket->ub_entries); uma_zfree_arg(ubz->ubz_zone, bucket, udata); } static void bucket_zone_drain(void) { struct uma_bucket_zone *ubz; for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) zone_drain(ubz->ubz_zone); } static void zone_log_warning(uma_zone_t zone) { static const struct timeval warninterval = { 300, 0 }; if (!zone_warnings || zone->uz_warning == NULL) return; if (ratecheck(&zone->uz_ratecheck, &warninterval)) printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); } static inline void zone_maxaction(uma_zone_t zone) { if (zone->uz_maxaction.ta_func != NULL) taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); } static void zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) { uma_klink_t klink; LIST_FOREACH(klink, &zone->uz_kegs, kl_link) kegfn(klink->kl_keg); } /* * Routine called by timeout which is used to fire off some time interval * based calculations. (stats, hash size, etc.) * * Arguments: * arg Unused * * Returns: * Nothing */ static void uma_timeout(void *unused) { bucket_enable(); zone_foreach(zone_timeout); /* Reschedule this event */ callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); } /* * Routine to perform timeout driven calculations. This expands the * hashes and does per cpu statistics aggregation. * * Returns nothing. */ static void keg_timeout(uma_keg_t keg) { KEG_LOCK(keg); /* * Expand the keg hash table. * * This is done if the number of slabs is larger than the hash size. * What I'm trying to do here is completely reduce collisions. This * may be a little aggressive. Should I allow for two collisions max? */ if (keg->uk_flags & UMA_ZONE_HASH && keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { struct uma_hash newhash; struct uma_hash oldhash; int ret; /* * This is so involved because allocating and freeing * while the keg lock is held will lead to deadlock. * I have to do everything in stages and check for * races. */ newhash = keg->uk_hash; KEG_UNLOCK(keg); ret = hash_alloc(&newhash); KEG_LOCK(keg); if (ret) { if (hash_expand(&keg->uk_hash, &newhash)) { oldhash = keg->uk_hash; keg->uk_hash = newhash; } else oldhash = newhash; KEG_UNLOCK(keg); hash_free(&oldhash); return; } } KEG_UNLOCK(keg); } static void zone_timeout(uma_zone_t zone) { zone_foreach_keg(zone, &keg_timeout); } /* * Allocate and zero fill the next sized hash table from the appropriate * backing store. * * Arguments: * hash A new hash structure with the old hash size in uh_hashsize * * Returns: * 1 on success and 0 on failure. */ static int hash_alloc(struct uma_hash *hash) { int oldsize; int alloc; oldsize = hash->uh_hashsize; /* We're just going to go to a power of two greater */ if (oldsize) { hash->uh_hashsize = oldsize * 2; alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; hash->uh_slab_hash = (struct slabhead *)malloc(alloc, M_UMAHASH, M_NOWAIT); } else { alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, UMA_ANYDOMAIN, M_WAITOK); hash->uh_hashsize = UMA_HASH_SIZE_INIT; } if (hash->uh_slab_hash) { bzero(hash->uh_slab_hash, alloc); hash->uh_hashmask = hash->uh_hashsize - 1; return (1); } return (0); } /* * Expands the hash table for HASH zones. This is done from zone_timeout * to reduce collisions. This must not be done in the regular allocation * path, otherwise, we can recurse on the vm while allocating pages. * * Arguments: * oldhash The hash you want to expand * newhash The hash structure for the new table * * Returns: * Nothing * * Discussion: */ static int hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) { uma_slab_t slab; int hval; int i; if (!newhash->uh_slab_hash) return (0); if (oldhash->uh_hashsize >= newhash->uh_hashsize) return (0); /* * I need to investigate hash algorithms for resizing without a * full rehash. */ for (i = 0; i < oldhash->uh_hashsize; i++) while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); hval = UMA_HASH(newhash, slab->us_data); SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], slab, us_hlink); } return (1); } /* * Free the hash bucket to the appropriate backing store. * * Arguments: * slab_hash The hash bucket we're freeing * hashsize The number of entries in that hash bucket * * Returns: * Nothing */ static void hash_free(struct uma_hash *hash) { if (hash->uh_slab_hash == NULL) return; if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); else free(hash->uh_slab_hash, M_UMAHASH); } /* * Frees all outstanding items in a bucket * * Arguments: * zone The zone to free to, must be unlocked. * bucket The free/alloc bucket with items, cpu queue must be locked. * * Returns: * Nothing */ static void bucket_drain(uma_zone_t zone, uma_bucket_t bucket) { int i; if (bucket == NULL) return; if (zone->uz_fini) for (i = 0; i < bucket->ub_cnt; i++) zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); bucket->ub_cnt = 0; } /* * Drains the per cpu caches for a zone. * * NOTE: This may only be called while the zone is being turn down, and not * during normal operation. This is necessary in order that we do not have * to migrate CPUs to drain the per-CPU caches. * * Arguments: * zone The zone to drain, must be unlocked. * * Returns: * Nothing */ static void cache_drain(uma_zone_t zone) { uma_cache_t cache; int cpu; /* * XXX: It is safe to not lock the per-CPU caches, because we're * tearing down the zone anyway. I.e., there will be no further use * of the caches at this point. * * XXX: It would good to be able to assert that the zone is being * torn down to prevent improper use of cache_drain(). * * XXX: We lock the zone before passing into bucket_cache_drain() as * it is used elsewhere. Should the tear-down path be made special * there in some form? */ CPU_FOREACH(cpu) { cache = &zone->uz_cpu[cpu]; bucket_drain(zone, cache->uc_allocbucket); bucket_drain(zone, cache->uc_freebucket); if (cache->uc_allocbucket != NULL) bucket_free(zone, cache->uc_allocbucket, NULL); if (cache->uc_freebucket != NULL) bucket_free(zone, cache->uc_freebucket, NULL); cache->uc_allocbucket = cache->uc_freebucket = NULL; } ZONE_LOCK(zone); bucket_cache_drain(zone); ZONE_UNLOCK(zone); } static void cache_shrink(uma_zone_t zone) { if (zone->uz_flags & UMA_ZFLAG_INTERNAL) return; ZONE_LOCK(zone); zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2; ZONE_UNLOCK(zone); } static void cache_drain_safe_cpu(uma_zone_t zone) { uma_cache_t cache; uma_bucket_t b1, b2; int domain; if (zone->uz_flags & UMA_ZFLAG_INTERNAL) return; b1 = b2 = NULL; ZONE_LOCK(zone); critical_enter(); if (zone->uz_flags & UMA_ZONE_NUMA) domain = PCPU_GET(domain); else domain = 0; cache = &zone->uz_cpu[curcpu]; if (cache->uc_allocbucket) { if (cache->uc_allocbucket->ub_cnt != 0) LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets, cache->uc_allocbucket, ub_link); else b1 = cache->uc_allocbucket; cache->uc_allocbucket = NULL; } if (cache->uc_freebucket) { if (cache->uc_freebucket->ub_cnt != 0) LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets, cache->uc_freebucket, ub_link); else b2 = cache->uc_freebucket; cache->uc_freebucket = NULL; } critical_exit(); ZONE_UNLOCK(zone); if (b1) bucket_free(zone, b1, NULL); if (b2) bucket_free(zone, b2, NULL); } /* * Safely drain per-CPU caches of a zone(s) to alloc bucket. * This is an expensive call because it needs to bind to all CPUs * one by one and enter a critical section on each of them in order * to safely access their cache buckets. * Zone lock must not be held on call this function. */ static void cache_drain_safe(uma_zone_t zone) { int cpu; /* * Polite bucket sizes shrinking was not enouth, shrink aggressively. */ if (zone) cache_shrink(zone); else zone_foreach(cache_shrink); CPU_FOREACH(cpu) { thread_lock(curthread); sched_bind(curthread, cpu); thread_unlock(curthread); if (zone) cache_drain_safe_cpu(zone); else zone_foreach(cache_drain_safe_cpu); } thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } /* * Drain the cached buckets from a zone. Expects a locked zone on entry. */ static void bucket_cache_drain(uma_zone_t zone) { uma_zone_domain_t zdom; uma_bucket_t bucket; int i; /* * Drain the bucket queues and free the buckets. */ for (i = 0; i < vm_ndomains; i++) { zdom = &zone->uz_domain[i]; while ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { LIST_REMOVE(bucket, ub_link); ZONE_UNLOCK(zone); bucket_drain(zone, bucket); bucket_free(zone, bucket, NULL); ZONE_LOCK(zone); } } /* * Shrink further bucket sizes. Price of single zone lock collision * is probably lower then price of global cache drain. */ if (zone->uz_count > zone->uz_count_min) zone->uz_count--; } static void keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) { uint8_t *mem; int i; uint8_t flags; CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes", keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera); mem = slab->us_data; flags = slab->us_flags; i = start; if (keg->uk_fini != NULL) { for (i--; i > -1; i--) #ifdef INVARIANTS /* * trash_fini implies that dtor was trash_dtor. trash_fini * would check that memory hasn't been modified since free, * which executed trash_dtor. * That's why we need to run uma_dbg_kskip() check here, * albeit we don't make skip check for other init/fini * invocations. */ if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) || keg->uk_fini != trash_fini) #endif keg->uk_fini(slab->us_data + (keg->uk_rsize * i), keg->uk_size); } if (keg->uk_flags & UMA_ZONE_OFFPAGE) zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); uma_total_dec(PAGE_SIZE * keg->uk_ppera); } /* * Frees pages from a keg back to the system. This is done on demand from * the pageout daemon. * * Returns nothing. */ static void keg_drain(uma_keg_t keg) { struct slabhead freeslabs = { 0 }; uma_domain_t dom; uma_slab_t slab, tmp; int i; /* * We don't want to take pages from statically allocated kegs at this * time */ if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) return; CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u", keg->uk_name, keg, keg->uk_free); KEG_LOCK(keg); if (keg->uk_free == 0) goto finished; for (i = 0; i < vm_ndomains; i++) { dom = &keg->uk_domain[i]; LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) { /* We have nowhere to free these to. */ if (slab->us_flags & UMA_SLAB_BOOT) continue; LIST_REMOVE(slab, us_link); keg->uk_pages -= keg->uk_ppera; keg->uk_free -= keg->uk_ipers; if (keg->uk_flags & UMA_ZONE_HASH) UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); } } finished: KEG_UNLOCK(keg); while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); keg_free_slab(keg, slab, keg->uk_ipers); } } static void zone_drain_wait(uma_zone_t zone, int waitok) { /* * Set draining to interlock with zone_dtor() so we can release our * locks as we go. Only dtor() should do a WAITOK call since it * is the only call that knows the structure will still be available * when it wakes up. */ ZONE_LOCK(zone); while (zone->uz_flags & UMA_ZFLAG_DRAINING) { if (waitok == M_NOWAIT) goto out; msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); } zone->uz_flags |= UMA_ZFLAG_DRAINING; bucket_cache_drain(zone); ZONE_UNLOCK(zone); /* * The DRAINING flag protects us from being freed while * we're running. Normally the uma_rwlock would protect us but we * must be able to release and acquire the right lock for each keg. */ zone_foreach_keg(zone, &keg_drain); ZONE_LOCK(zone); zone->uz_flags &= ~UMA_ZFLAG_DRAINING; wakeup(zone); out: ZONE_UNLOCK(zone); } void zone_drain(uma_zone_t zone) { zone_drain_wait(zone, M_NOWAIT); } /* * Allocate a new slab for a keg. This does not insert the slab onto a list. * If the allocation was successful, the keg lock will be held upon return, * otherwise the keg will be left unlocked. * * Arguments: * wait Shall we wait? * * Returns: * The slab that was allocated or NULL if there is no memory and the * caller specified M_NOWAIT. */ static uma_slab_t keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait) { uma_alloc allocf; uma_slab_t slab; unsigned long size; uint8_t *mem; uint8_t flags; int i; KASSERT(domain >= 0 && domain < vm_ndomains, ("keg_alloc_slab: domain %d out of range", domain)); mtx_assert(&keg->uk_lock, MA_OWNED); allocf = keg->uk_allocf; KEG_UNLOCK(keg); slab = NULL; mem = NULL; if (keg->uk_flags & UMA_ZONE_OFFPAGE) { slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait); if (slab == NULL) goto out; } /* * This reproduces the old vm_zone behavior of zero filling pages the * first time they are added to a zone. * * Malloced items are zeroed in uma_zalloc. */ if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) wait |= M_ZERO; else wait &= ~M_ZERO; if (keg->uk_flags & UMA_ZONE_NODUMP) wait |= M_NODUMP; /* zone is passed for legacy reasons. */ size = keg->uk_ppera * PAGE_SIZE; mem = allocf(zone, size, domain, &flags, wait); if (mem == NULL) { if (keg->uk_flags & UMA_ZONE_OFFPAGE) zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); slab = NULL; goto out; } uma_total_inc(size); /* Point the slab into the allocated memory */ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) slab = (uma_slab_t )(mem + keg->uk_pgoff); if (keg->uk_flags & UMA_ZONE_VTOSLAB) for (i = 0; i < keg->uk_ppera; i++) vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); slab->us_keg = keg; slab->us_data = mem; slab->us_freecount = keg->uk_ipers; slab->us_flags = flags; slab->us_domain = domain; BIT_FILL(SLAB_SETSIZE, &slab->us_free); #ifdef INVARIANTS BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); #endif if (keg->uk_init != NULL) { for (i = 0; i < keg->uk_ipers; i++) if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), keg->uk_size, wait) != 0) break; if (i != keg->uk_ipers) { keg_free_slab(keg, slab, i); slab = NULL; goto out; } } KEG_LOCK(keg); CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)", slab, keg->uk_name, keg); if (keg->uk_flags & UMA_ZONE_HASH) UMA_HASH_INSERT(&keg->uk_hash, slab, mem); keg->uk_pages += keg->uk_ppera; keg->uk_free += keg->uk_ipers; out: return (slab); } /* * This function is intended to be used early on in place of page_alloc() so * that we may use the boot time page cache to satisfy allocations before * the VM is ready. */ static void * startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, int wait) { uma_keg_t keg; void *mem; int pages; keg = zone_first_keg(zone); /* * If we are in BOOT_BUCKETS or higher, than switch to real * allocator. Zones with page sized slabs switch at BOOT_PAGEALLOC. */ switch (booted) { case BOOT_COLD: case BOOT_STRAPPED: break; case BOOT_PAGEALLOC: if (keg->uk_ppera > 1) break; case BOOT_BUCKETS: case BOOT_RUNNING: #ifdef UMA_MD_SMALL_ALLOC keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc; #else keg->uk_allocf = page_alloc; #endif return keg->uk_allocf(zone, bytes, domain, pflag, wait); } /* * Check our small startup cache to see if it has pages remaining. */ pages = howmany(bytes, PAGE_SIZE); KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__)); if (pages > boot_pages) panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name); #ifdef DIAGNOSTIC printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name, boot_pages); #endif mem = bootmem; boot_pages -= pages; bootmem += pages * PAGE_SIZE; *pflag = UMA_SLAB_BOOT; return (mem); } /* * Allocates a number of pages from the system * * Arguments: * bytes The number of bytes requested * wait Shall we wait? * * Returns: * A pointer to the alloced memory or possibly * NULL if M_NOWAIT is set. */ static void * page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, int wait) { void *p; /* Returned page */ *pflag = UMA_SLAB_KERNEL; - p = (void *) kmem_malloc_domain(domain, bytes, wait); + p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait); return (p); } static void * pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, int wait) { struct pglist alloctail; vm_offset_t addr, zkva; int cpu, flags; vm_page_t p, p_next; #ifdef NUMA struct pcpu *pc; #endif MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE); TAILQ_INIT(&alloctail); flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | malloc2vm_flags(wait); *pflag = UMA_SLAB_KERNEL; for (cpu = 0; cpu <= mp_maxid; cpu++) { if (CPU_ABSENT(cpu)) { p = vm_page_alloc(NULL, 0, flags); } else { #ifndef NUMA p = vm_page_alloc(NULL, 0, flags); #else pc = pcpu_find(cpu); p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags); if (__predict_false(p == NULL)) p = vm_page_alloc(NULL, 0, flags); #endif } if (__predict_false(p == NULL)) goto fail; TAILQ_INSERT_TAIL(&alloctail, p, listq); } if ((addr = kva_alloc(bytes)) == 0) goto fail; zkva = addr; TAILQ_FOREACH(p, &alloctail, listq) { pmap_qenter(zkva, &p, 1); zkva += PAGE_SIZE; } return ((void*)addr); fail: TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { vm_page_unwire(p, PQ_NONE); vm_page_free(p); } return (NULL); } /* * Allocates a number of pages from within an object * * Arguments: * bytes The number of bytes requested * wait Shall we wait? * * Returns: * A pointer to the alloced memory or possibly * NULL if M_NOWAIT is set. */ static void * noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, int wait) { TAILQ_HEAD(, vm_page) alloctail; u_long npages; vm_offset_t retkva, zkva; vm_page_t p, p_next; uma_keg_t keg; TAILQ_INIT(&alloctail); keg = zone_first_keg(zone); npages = howmany(bytes, PAGE_SIZE); while (npages > 0) { p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : VM_ALLOC_NOWAIT)); if (p != NULL) { /* * Since the page does not belong to an object, its * listq is unused. */ TAILQ_INSERT_TAIL(&alloctail, p, listq); npages--; continue; } /* * Page allocation failed, free intermediate pages and * exit. */ TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { vm_page_unwire(p, PQ_NONE); vm_page_free(p); } return (NULL); } *flags = UMA_SLAB_PRIV; zkva = keg->uk_kva + atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); retkva = zkva; TAILQ_FOREACH(p, &alloctail, listq) { pmap_qenter(zkva, &p, 1); zkva += PAGE_SIZE; } return ((void *)retkva); } /* * Frees a number of pages to the system * * Arguments: * mem A pointer to the memory to be freed * size The size of the memory being freed * flags The original p->us_flags field * * Returns: * Nothing */ static void page_free(void *mem, vm_size_t size, uint8_t flags) { if ((flags & UMA_SLAB_KERNEL) == 0) panic("UMA: page_free used with invalid flags %x", flags); kmem_free((vm_offset_t)mem, size); } /* * Frees pcpu zone allocations * * Arguments: * mem A pointer to the memory to be freed * size The size of the memory being freed * flags The original p->us_flags field * * Returns: * Nothing */ static void pcpu_page_free(void *mem, vm_size_t size, uint8_t flags) { vm_offset_t sva, curva; vm_paddr_t paddr; vm_page_t m; MPASS(size == (mp_maxid+1)*PAGE_SIZE); sva = (vm_offset_t)mem; for (curva = sva; curva < sva + size; curva += PAGE_SIZE) { paddr = pmap_kextract(curva); m = PHYS_TO_VM_PAGE(paddr); vm_page_unwire(m, PQ_NONE); vm_page_free(m); } pmap_qremove(sva, size >> PAGE_SHIFT); kva_free(sva, size); } /* * Zero fill initializer * * Arguments/Returns follow uma_init specifications */ static int zero_init(void *mem, int size, int flags) { bzero(mem, size); return (0); } /* * Finish creating a small uma keg. This calculates ipers, and the keg size. * * Arguments * keg The zone we should initialize * * Returns * Nothing */ static void keg_small_init(uma_keg_t keg) { u_int rsize; u_int memused; u_int wastedspace; u_int shsize; u_int slabsize; if (keg->uk_flags & UMA_ZONE_PCPU) { u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU; slabsize = UMA_PCPU_ALLOC_SIZE; keg->uk_ppera = ncpus; } else { slabsize = UMA_SLAB_SIZE; keg->uk_ppera = 1; } /* * Calculate the size of each allocation (rsize) according to * alignment. If the requested size is smaller than we have * allocation bits for we round it up. */ rsize = keg->uk_size; if (rsize < slabsize / SLAB_SETSIZE) rsize = slabsize / SLAB_SETSIZE; if (rsize & keg->uk_align) rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); keg->uk_rsize = rsize; KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || keg->uk_rsize < UMA_PCPU_ALLOC_SIZE, ("%s: size %u too large", __func__, keg->uk_rsize)); if (keg->uk_flags & UMA_ZONE_OFFPAGE) shsize = 0; else shsize = sizeof(struct uma_slab); if (rsize <= slabsize - shsize) keg->uk_ipers = (slabsize - shsize) / rsize; else { /* Handle special case when we have 1 item per slab, so * alignment requirement can be relaxed. */ KASSERT(keg->uk_size <= slabsize - shsize, ("%s: size %u greater than slab", __func__, keg->uk_size)); keg->uk_ipers = 1; } KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); memused = keg->uk_ipers * rsize + shsize; wastedspace = slabsize - memused; /* * We can't do OFFPAGE if we're internal or if we've been * asked to not go to the VM for buckets. If we do this we * may end up going to the VM for slabs which we do not * want to do if we're UMA_ZFLAG_CACHEONLY as a result * of UMA_ZONE_VM, which clearly forbids it. */ if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) return; /* * See if using an OFFPAGE slab will limit our waste. Only do * this if it permits more items per-slab. * * XXX We could try growing slabsize to limit max waste as well. * Historically this was not done because the VM could not * efficiently handle contiguous allocations. */ if ((wastedspace >= slabsize / UMA_MAX_WASTE) && (keg->uk_ipers < (slabsize / keg->uk_rsize))) { keg->uk_ipers = slabsize / keg->uk_rsize; KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); CTR6(KTR_UMA, "UMA decided we need offpage slab headers for " "keg: %s(%p), calculated wastedspace = %d, " "maximum wasted space allowed = %d, " "calculated ipers = %d, " "new wasted space = %d\n", keg->uk_name, keg, wastedspace, slabsize / UMA_MAX_WASTE, keg->uk_ipers, slabsize - keg->uk_ipers * keg->uk_rsize); keg->uk_flags |= UMA_ZONE_OFFPAGE; } if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) keg->uk_flags |= UMA_ZONE_HASH; } /* * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be * more complicated. * * Arguments * keg The keg we should initialize * * Returns * Nothing */ static void keg_large_init(uma_keg_t keg) { u_int shsize; KASSERT(keg != NULL, ("Keg is null in keg_large_init")); KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); keg->uk_ipers = 1; keg->uk_rsize = keg->uk_size; /* Check whether we have enough space to not do OFFPAGE. */ if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { shsize = sizeof(struct uma_slab); if (shsize & UMA_ALIGN_PTR) shsize = (shsize & ~UMA_ALIGN_PTR) + (UMA_ALIGN_PTR + 1); if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) { /* * We can't do OFFPAGE if we're internal, in which case * we need an extra page per allocation to contain the * slab header. */ if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0) keg->uk_flags |= UMA_ZONE_OFFPAGE; else keg->uk_ppera++; } } if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) keg->uk_flags |= UMA_ZONE_HASH; } static void keg_cachespread_init(uma_keg_t keg) { int alignsize; int trailer; int pages; int rsize; KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); alignsize = keg->uk_align + 1; rsize = keg->uk_size; /* * We want one item to start on every align boundary in a page. To * do this we will span pages. We will also extend the item by the * size of align if it is an even multiple of align. Otherwise, it * would fall on the same boundary every time. */ if (rsize & keg->uk_align) rsize = (rsize & ~keg->uk_align) + alignsize; if ((rsize & alignsize) == 0) rsize += alignsize; trailer = rsize - keg->uk_size; pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; pages = MIN(pages, (128 * 1024) / PAGE_SIZE); keg->uk_rsize = rsize; keg->uk_ppera = pages; keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; KASSERT(keg->uk_ipers <= SLAB_SETSIZE, ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, keg->uk_ipers)); } /* * Keg header ctor. This initializes all fields, locks, etc. And inserts * the keg onto the global keg list. * * Arguments/Returns follow uma_ctor specifications * udata Actually uma_kctor_args */ static int keg_ctor(void *mem, int size, void *udata, int flags) { struct uma_kctor_args *arg = udata; uma_keg_t keg = mem; uma_zone_t zone; bzero(keg, size); keg->uk_size = arg->size; keg->uk_init = arg->uminit; keg->uk_fini = arg->fini; keg->uk_align = arg->align; keg->uk_free = 0; keg->uk_reserve = 0; keg->uk_pages = 0; keg->uk_flags = arg->flags; keg->uk_slabzone = NULL; /* * We use a global round-robin policy by default. Zones with * UMA_ZONE_NUMA set will use first-touch instead, in which case the * iterator is never run. */ keg->uk_dr.dr_policy = DOMAINSET_RR(); keg->uk_dr.dr_iter = 0; /* * The master zone is passed to us at keg-creation time. */ zone = arg->zone; keg->uk_name = zone->uz_name; if (arg->flags & UMA_ZONE_VM) keg->uk_flags |= UMA_ZFLAG_CACHEONLY; if (arg->flags & UMA_ZONE_ZINIT) keg->uk_init = zero_init; if (arg->flags & UMA_ZONE_MALLOC) keg->uk_flags |= UMA_ZONE_VTOSLAB; if (arg->flags & UMA_ZONE_PCPU) #ifdef SMP keg->uk_flags |= UMA_ZONE_OFFPAGE; #else keg->uk_flags &= ~UMA_ZONE_PCPU; #endif if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { keg_cachespread_init(keg); } else { if (keg->uk_size > UMA_SLAB_SPACE) keg_large_init(keg); else keg_small_init(keg); } if (keg->uk_flags & UMA_ZONE_OFFPAGE) keg->uk_slabzone = slabzone; /* * If we haven't booted yet we need allocations to go through the * startup cache until the vm is ready. */ if (booted < BOOT_PAGEALLOC) keg->uk_allocf = startup_alloc; #ifdef UMA_MD_SMALL_ALLOC else if (keg->uk_ppera == 1) keg->uk_allocf = uma_small_alloc; #endif else if (keg->uk_flags & UMA_ZONE_PCPU) keg->uk_allocf = pcpu_page_alloc; else keg->uk_allocf = page_alloc; #ifdef UMA_MD_SMALL_ALLOC if (keg->uk_ppera == 1) keg->uk_freef = uma_small_free; else #endif if (keg->uk_flags & UMA_ZONE_PCPU) keg->uk_freef = pcpu_page_free; else keg->uk_freef = page_free; /* * Initialize keg's lock */ KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); /* * If we're putting the slab header in the actual page we need to * figure out where in each page it goes. This calculates a right * justified offset into the memory on an ALIGN_PTR boundary. */ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { u_int totsize; /* Size of the slab struct and free list */ totsize = sizeof(struct uma_slab); if (totsize & UMA_ALIGN_PTR) totsize = (totsize & ~UMA_ALIGN_PTR) + (UMA_ALIGN_PTR + 1); keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; /* * The only way the following is possible is if with our * UMA_ALIGN_PTR adjustments we are now bigger than * UMA_SLAB_SIZE. I haven't checked whether this is * mathematically possible for all cases, so we make * sure here anyway. */ totsize = keg->uk_pgoff + sizeof(struct uma_slab); if (totsize > PAGE_SIZE * keg->uk_ppera) { printf("zone %s ipers %d rsize %d size %d\n", zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size); panic("UMA slab won't fit."); } } if (keg->uk_flags & UMA_ZONE_HASH) hash_alloc(&keg->uk_hash); CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n", keg, zone->uz_name, zone, (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, keg->uk_free); LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); rw_wlock(&uma_rwlock); LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); rw_wunlock(&uma_rwlock); return (0); } /* * Zone header ctor. This initializes all fields, locks, etc. * * Arguments/Returns follow uma_ctor specifications * udata Actually uma_zctor_args */ static int zone_ctor(void *mem, int size, void *udata, int flags) { struct uma_zctor_args *arg = udata; uma_zone_t zone = mem; uma_zone_t z; uma_keg_t keg; bzero(zone, size); zone->uz_name = arg->name; zone->uz_ctor = arg->ctor; zone->uz_dtor = arg->dtor; zone->uz_slab = zone_fetch_slab; zone->uz_init = NULL; zone->uz_fini = NULL; zone->uz_allocs = 0; zone->uz_frees = 0; zone->uz_fails = 0; zone->uz_sleeps = 0; zone->uz_count = 0; zone->uz_count_min = 0; zone->uz_flags = 0; zone->uz_warning = NULL; /* The domain structures follow the cpu structures. */ zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus]; timevalclear(&zone->uz_ratecheck); keg = arg->keg; ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); /* * This is a pure cache zone, no kegs. */ if (arg->import) { if (arg->flags & UMA_ZONE_VM) arg->flags |= UMA_ZFLAG_CACHEONLY; zone->uz_flags = arg->flags; zone->uz_size = arg->size; zone->uz_import = arg->import; zone->uz_release = arg->release; zone->uz_arg = arg->arg; zone->uz_lockptr = &zone->uz_lock; rw_wlock(&uma_rwlock); LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); rw_wunlock(&uma_rwlock); goto out; } /* * Use the regular zone/keg/slab allocator. */ zone->uz_import = (uma_import)zone_import; zone->uz_release = (uma_release)zone_release; zone->uz_arg = zone; if (arg->flags & UMA_ZONE_SECONDARY) { KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); zone->uz_init = arg->uminit; zone->uz_fini = arg->fini; zone->uz_lockptr = &keg->uk_lock; zone->uz_flags |= UMA_ZONE_SECONDARY; rw_wlock(&uma_rwlock); ZONE_LOCK(zone); LIST_FOREACH(z, &keg->uk_zones, uz_link) { if (LIST_NEXT(z, uz_link) == NULL) { LIST_INSERT_AFTER(z, zone, uz_link); break; } } ZONE_UNLOCK(zone); rw_wunlock(&uma_rwlock); } else if (keg == NULL) { if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, arg->align, arg->flags)) == NULL) return (ENOMEM); } else { struct uma_kctor_args karg; int error; /* We should only be here from uma_startup() */ karg.size = arg->size; karg.uminit = arg->uminit; karg.fini = arg->fini; karg.align = arg->align; karg.flags = arg->flags; karg.zone = zone; error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, flags); if (error) return (error); } /* * Link in the first keg. */ zone->uz_klink.kl_keg = keg; LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); zone->uz_lockptr = &keg->uk_lock; zone->uz_size = keg->uk_size; zone->uz_flags |= (keg->uk_flags & (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); /* * Some internal zones don't have room allocated for the per cpu * caches. If we're internal, bail out here. */ if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, ("Secondary zone requested UMA_ZFLAG_INTERNAL")); return (0); } out: KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) != (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET), ("Invalid zone flag combination")); if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0) zone->uz_count = BUCKET_MAX; else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0) zone->uz_count = 0; else zone->uz_count = bucket_select(zone->uz_size); zone->uz_count_min = zone->uz_count; return (0); } /* * Keg header dtor. This frees all data, destroys locks, frees the hash * table and removes the keg from the global list. * * Arguments/Returns follow uma_dtor specifications * udata unused */ static void keg_dtor(void *arg, int size, void *udata) { uma_keg_t keg; keg = (uma_keg_t)arg; KEG_LOCK(keg); if (keg->uk_free != 0) { printf("Freed UMA keg (%s) was not empty (%d items). " " Lost %d pages of memory.\n", keg->uk_name ? keg->uk_name : "", keg->uk_free, keg->uk_pages); } KEG_UNLOCK(keg); hash_free(&keg->uk_hash); KEG_LOCK_FINI(keg); } /* * Zone header dtor. * * Arguments/Returns follow uma_dtor specifications * udata unused */ static void zone_dtor(void *arg, int size, void *udata) { uma_klink_t klink; uma_zone_t zone; uma_keg_t keg; zone = (uma_zone_t)arg; keg = zone_first_keg(zone); if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) cache_drain(zone); rw_wlock(&uma_rwlock); LIST_REMOVE(zone, uz_link); rw_wunlock(&uma_rwlock); /* * XXX there are some races here where * the zone can be drained but zone lock * released and then refilled before we * remove it... we dont care for now */ zone_drain_wait(zone, M_WAITOK); /* * Unlink all of our kegs. */ while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { klink->kl_keg = NULL; LIST_REMOVE(klink, kl_link); if (klink == &zone->uz_klink) continue; free(klink, M_TEMP); } /* * We only destroy kegs from non secondary zones. */ if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { rw_wlock(&uma_rwlock); LIST_REMOVE(keg, uk_link); rw_wunlock(&uma_rwlock); zone_free_item(kegs, keg, NULL, SKIP_NONE); } ZONE_LOCK_FINI(zone); } /* * Traverses every zone in the system and calls a callback * * Arguments: * zfunc A pointer to a function which accepts a zone * as an argument. * * Returns: * Nothing */ static void zone_foreach(void (*zfunc)(uma_zone_t)) { uma_keg_t keg; uma_zone_t zone; rw_rlock(&uma_rwlock); LIST_FOREACH(keg, &uma_kegs, uk_link) { LIST_FOREACH(zone, &keg->uk_zones, uz_link) zfunc(zone); } rw_runlock(&uma_rwlock); } /* * Count how many pages do we need to bootstrap. VM supplies * its need in early zones in the argument, we add up our zones, * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The * zone of zones and zone of kegs are accounted separately. */ #define UMA_BOOT_ZONES 11 /* Zone of zones and zone of kegs have arbitrary alignment. */ #define UMA_BOOT_ALIGN 32 static int zsize, ksize; int uma_startup_count(int vm_zones) { int zones, pages; ksize = sizeof(struct uma_keg) + (sizeof(struct uma_domain) * vm_ndomains); zsize = sizeof(struct uma_zone) + (sizeof(struct uma_cache) * (mp_maxid + 1)) + (sizeof(struct uma_zone_domain) * vm_ndomains); /* * Memory for the zone of kegs and its keg, * and for zone of zones. */ pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 + roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE); #ifdef UMA_MD_SMALL_ALLOC zones = UMA_BOOT_ZONES; #else zones = UMA_BOOT_ZONES + vm_zones; vm_zones = 0; #endif /* Memory for the rest of startup zones, UMA and VM, ... */ if (zsize > UMA_SLAB_SPACE) pages += (zones + vm_zones) * howmany(roundup2(zsize, UMA_BOOT_ALIGN), UMA_SLAB_SIZE); else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE) pages += zones; else pages += howmany(zones, UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN)); /* ... and their kegs. Note that zone of zones allocates a keg! */ pages += howmany(zones + 1, UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN)); /* * Most of startup zones are not going to be offpages, that's * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all * calculations. Some large bucket zones will be offpage, and * thus will allocate hashes. We take conservative approach * and assume that all zones may allocate hash. This may give * us some positive inaccuracy, usually an extra single page. */ pages += howmany(zones, UMA_SLAB_SPACE / (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT)); return (pages); } void uma_startup(void *mem, int npages) { struct uma_zctor_args args; uma_keg_t masterkeg; uintptr_t m; #ifdef DIAGNOSTIC printf("Entering %s with %d boot pages configured\n", __func__, npages); #endif rw_init(&uma_rwlock, "UMA lock"); /* Use bootpages memory for the zone of zones and zone of kegs. */ m = (uintptr_t)mem; zones = (uma_zone_t)m; m += roundup(zsize, CACHE_LINE_SIZE); kegs = (uma_zone_t)m; m += roundup(zsize, CACHE_LINE_SIZE); masterkeg = (uma_keg_t)m; m += roundup(ksize, CACHE_LINE_SIZE); m = roundup(m, PAGE_SIZE); npages -= (m - (uintptr_t)mem) / PAGE_SIZE; mem = (void *)m; /* "manually" create the initial zone */ memset(&args, 0, sizeof(args)); args.name = "UMA Kegs"; args.size = ksize; args.ctor = keg_ctor; args.dtor = keg_dtor; args.uminit = zero_init; args.fini = NULL; args.keg = masterkeg; args.align = UMA_BOOT_ALIGN - 1; args.flags = UMA_ZFLAG_INTERNAL; zone_ctor(kegs, zsize, &args, M_WAITOK); bootmem = mem; boot_pages = npages; args.name = "UMA Zones"; args.size = zsize; args.ctor = zone_ctor; args.dtor = zone_dtor; args.uminit = zero_init; args.fini = NULL; args.keg = NULL; args.align = UMA_BOOT_ALIGN - 1; args.flags = UMA_ZFLAG_INTERNAL; zone_ctor(zones, zsize, &args, M_WAITOK); /* Now make a zone for slab headers */ slabzone = uma_zcreate("UMA Slabs", sizeof(struct uma_slab), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); hashzone = uma_zcreate("UMA Hash", sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); bucket_init(); booted = BOOT_STRAPPED; } void uma_startup1(void) { #ifdef DIAGNOSTIC printf("Entering %s with %d boot pages left\n", __func__, boot_pages); #endif booted = BOOT_PAGEALLOC; } void uma_startup2(void) { #ifdef DIAGNOSTIC printf("Entering %s with %d boot pages left\n", __func__, boot_pages); #endif booted = BOOT_BUCKETS; sx_init(&uma_drain_lock, "umadrain"); bucket_enable(); } /* * Initialize our callout handle * */ static void uma_startup3(void) { #ifdef INVARIANTS TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor); uma_dbg_cnt = counter_u64_alloc(M_WAITOK); uma_skip_cnt = counter_u64_alloc(M_WAITOK); #endif callout_init(&uma_callout, 1); callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); booted = BOOT_RUNNING; } static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, int align, uint32_t flags) { struct uma_kctor_args args; args.size = size; args.uminit = uminit; args.fini = fini; args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; args.flags = flags; args.zone = zone; return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK)); } /* Public functions */ /* See uma.h */ void uma_set_align(int align) { if (align != UMA_ALIGN_CACHE) uma_align_cache = align; } /* See uma.h */ uma_zone_t uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, uma_init uminit, uma_fini fini, int align, uint32_t flags) { struct uma_zctor_args args; uma_zone_t res; bool locked; KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"", align, name)); /* This stuff is essential for the zone ctor */ memset(&args, 0, sizeof(args)); args.name = name; args.size = size; args.ctor = ctor; args.dtor = dtor; args.uminit = uminit; args.fini = fini; #ifdef INVARIANTS /* * If a zone is being created with an empty constructor and * destructor, pass UMA constructor/destructor which checks for * memory use after free. */ if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) && ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) { args.ctor = trash_ctor; args.dtor = trash_dtor; args.uminit = trash_init; args.fini = trash_fini; } #endif args.align = align; args.flags = flags; args.keg = NULL; if (booted < BOOT_BUCKETS) { locked = false; } else { sx_slock(&uma_drain_lock); locked = true; } res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); if (locked) sx_sunlock(&uma_drain_lock); return (res); } /* See uma.h */ uma_zone_t uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, uma_init zinit, uma_fini zfini, uma_zone_t master) { struct uma_zctor_args args; uma_keg_t keg; uma_zone_t res; bool locked; keg = zone_first_keg(master); memset(&args, 0, sizeof(args)); args.name = name; args.size = keg->uk_size; args.ctor = ctor; args.dtor = dtor; args.uminit = zinit; args.fini = zfini; args.align = keg->uk_align; args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; args.keg = keg; if (booted < BOOT_BUCKETS) { locked = false; } else { sx_slock(&uma_drain_lock); locked = true; } /* XXX Attaches only one keg of potentially many. */ res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); if (locked) sx_sunlock(&uma_drain_lock); return (res); } /* See uma.h */ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, uma_init zinit, uma_fini zfini, uma_import zimport, uma_release zrelease, void *arg, int flags) { struct uma_zctor_args args; memset(&args, 0, sizeof(args)); args.name = name; args.size = size; args.ctor = ctor; args.dtor = dtor; args.uminit = zinit; args.fini = zfini; args.import = zimport; args.release = zrelease; args.arg = arg; args.align = 0; args.flags = flags; return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); } static void zone_lock_pair(uma_zone_t a, uma_zone_t b) { if (a < b) { ZONE_LOCK(a); mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); } else { ZONE_LOCK(b); mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); } } static void zone_unlock_pair(uma_zone_t a, uma_zone_t b) { ZONE_UNLOCK(a); ZONE_UNLOCK(b); } int uma_zsecond_add(uma_zone_t zone, uma_zone_t master) { uma_klink_t klink; uma_klink_t kl; int error; error = 0; klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); zone_lock_pair(zone, master); /* * zone must use vtoslab() to resolve objects and must already be * a secondary. */ if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { error = EINVAL; goto out; } /* * The new master must also use vtoslab(). */ if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { error = EINVAL; goto out; } /* * The underlying object must be the same size. rsize * may be different. */ if (master->uz_size != zone->uz_size) { error = E2BIG; goto out; } /* * Put it at the end of the list. */ klink->kl_keg = zone_first_keg(master); LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { if (LIST_NEXT(kl, kl_link) == NULL) { LIST_INSERT_AFTER(kl, klink, kl_link); break; } } klink = NULL; zone->uz_flags |= UMA_ZFLAG_MULTI; zone->uz_slab = zone_fetch_slab_multi; out: zone_unlock_pair(zone, master); if (klink != NULL) free(klink, M_TEMP); return (error); } /* See uma.h */ void uma_zdestroy(uma_zone_t zone) { sx_slock(&uma_drain_lock); zone_free_item(zones, zone, NULL, SKIP_NONE); sx_sunlock(&uma_drain_lock); } void uma_zwait(uma_zone_t zone) { void *item; item = uma_zalloc_arg(zone, NULL, M_WAITOK); uma_zfree(zone, item); } void * uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags) { void *item; #ifdef SMP int i; MPASS(zone->uz_flags & UMA_ZONE_PCPU); #endif item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO); if (item != NULL && (flags & M_ZERO)) { #ifdef SMP for (i = 0; i <= mp_maxid; i++) bzero(zpcpu_get_cpu(item, i), zone->uz_size); #else bzero(item, zone->uz_size); #endif } return (item); } /* * A stub while both regular and pcpu cases are identical. */ void uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata) { #ifdef SMP MPASS(zone->uz_flags & UMA_ZONE_PCPU); #endif uma_zfree_arg(zone, item, udata); } /* See uma.h */ void * uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) { uma_zone_domain_t zdom; uma_bucket_t bucket; uma_cache_t cache; void *item; int cpu, domain, lockfail; #ifdef INVARIANTS bool skipdbg; #endif /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); /* This is the fast path allocation */ CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d", curthread, zone->uz_name, zone, flags); if (flags & M_WAITOK) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "uma_zalloc_arg: zone \"%s\"", zone->uz_name); } KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC")); KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zalloc_arg: called with spinlock or critical section held")); if (zone->uz_flags & UMA_ZONE_PCPU) KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone " "with M_ZERO passed")); #ifdef DEBUG_MEMGUARD if (memguard_cmp_zone(zone)) { item = memguard_alloc(zone->uz_size, flags); if (item != NULL) { if (zone->uz_init != NULL && zone->uz_init(item, zone->uz_size, flags) != 0) return (NULL); if (zone->uz_ctor != NULL && zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { zone->uz_fini(item, zone->uz_size); return (NULL); } return (item); } /* This is unfortunate but should not be fatal. */ } #endif /* * If possible, allocate from the per-CPU cache. There are two * requirements for safe access to the per-CPU cache: (1) the thread * accessing the cache must not be preempted or yield during access, * and (2) the thread must not migrate CPUs without switching which * cache it accesses. We rely on a critical section to prevent * preemption and migration. We release the critical section in * order to acquire the zone mutex if we are unable to allocate from * the current cache; when we re-acquire the critical section, we * must detect and handle migration if it has occurred. */ zalloc_restart: critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; zalloc_start: bucket = cache->uc_allocbucket; if (bucket != NULL && bucket->ub_cnt > 0) { bucket->ub_cnt--; item = bucket->ub_bucket[bucket->ub_cnt]; #ifdef INVARIANTS bucket->ub_bucket[bucket->ub_cnt] = NULL; #endif KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); cache->uc_allocs++; critical_exit(); #ifdef INVARIANTS skipdbg = uma_dbg_zskip(zone, item); #endif if (zone->uz_ctor != NULL && #ifdef INVARIANTS (!skipdbg || zone->uz_ctor != trash_ctor || zone->uz_dtor != trash_dtor) && #endif zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { atomic_add_long(&zone->uz_fails, 1); zone_free_item(zone, item, udata, SKIP_DTOR); return (NULL); } #ifdef INVARIANTS if (!skipdbg) uma_dbg_alloc(zone, NULL, item); #endif if (flags & M_ZERO) uma_zero_item(item, zone); return (item); } /* * We have run out of items in our alloc bucket. * See if we can switch with our free bucket. */ bucket = cache->uc_freebucket; if (bucket != NULL && bucket->ub_cnt > 0) { CTR2(KTR_UMA, "uma_zalloc: zone %s(%p) swapping empty with alloc", zone->uz_name, zone); cache->uc_freebucket = cache->uc_allocbucket; cache->uc_allocbucket = bucket; goto zalloc_start; } /* * Discard any empty allocation bucket while we hold no locks. */ bucket = cache->uc_allocbucket; cache->uc_allocbucket = NULL; critical_exit(); if (bucket != NULL) bucket_free(zone, bucket, udata); if (zone->uz_flags & UMA_ZONE_NUMA) { domain = PCPU_GET(domain); if (VM_DOMAIN_EMPTY(domain)) domain = UMA_ANYDOMAIN; } else domain = UMA_ANYDOMAIN; /* Short-circuit for zones without buckets and low memory. */ if (zone->uz_count == 0 || bucketdisable) goto zalloc_item; /* * Attempt to retrieve the item from the per-CPU cache has failed, so * we must go back to the zone. This requires the zone lock, so we * must drop the critical section, then re-acquire it when we go back * to the cache. Since the critical section is released, we may be * preempted or migrate. As such, make sure not to maintain any * thread-local state specific to the cache from prior to releasing * the critical section. */ lockfail = 0; if (ZONE_TRYLOCK(zone) == 0) { /* Record contention to size the buckets. */ ZONE_LOCK(zone); lockfail = 1; } critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; /* See if we lost the race to fill the cache. */ if (cache->uc_allocbucket != NULL) { ZONE_UNLOCK(zone); goto zalloc_start; } /* * Check the zone's cache of buckets. */ if (domain == UMA_ANYDOMAIN) zdom = &zone->uz_domain[0]; else zdom = &zone->uz_domain[domain]; if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { KASSERT(bucket->ub_cnt != 0, ("uma_zalloc_arg: Returning an empty bucket.")); LIST_REMOVE(bucket, ub_link); cache->uc_allocbucket = bucket; ZONE_UNLOCK(zone); goto zalloc_start; } /* We are no longer associated with this CPU. */ critical_exit(); /* * We bump the uz count when the cache size is insufficient to * handle the working set. */ if (lockfail && zone->uz_count < BUCKET_MAX) zone->uz_count++; ZONE_UNLOCK(zone); /* * Now lets just fill a bucket and put it on the free list. If that * works we'll restart the allocation from the beginning and it * will use the just filled bucket. */ bucket = zone_alloc_bucket(zone, udata, domain, flags); CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", zone->uz_name, zone, bucket); if (bucket != NULL) { ZONE_LOCK(zone); critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; /* * See if we lost the race or were migrated. Cache the * initialized bucket to make this less likely or claim * the memory directly. */ if (cache->uc_allocbucket == NULL && ((zone->uz_flags & UMA_ZONE_NUMA) == 0 || domain == PCPU_GET(domain))) { cache->uc_allocbucket = bucket; } else if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) { critical_exit(); ZONE_UNLOCK(zone); bucket_drain(zone, bucket); bucket_free(zone, bucket, udata); goto zalloc_restart; } else LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); ZONE_UNLOCK(zone); goto zalloc_start; } /* * We may not be able to get a bucket so return an actual item. */ zalloc_item: item = zone_alloc_item(zone, udata, domain, flags); return (item); } void * uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) { /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); /* This is the fast path allocation */ CTR5(KTR_UMA, "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d", curthread, zone->uz_name, zone, domain, flags); if (flags & M_WAITOK) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "uma_zalloc_domain: zone \"%s\"", zone->uz_name); } KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zalloc_domain: called with spinlock or critical section held")); return (zone_alloc_item(zone, udata, domain, flags)); } /* * Find a slab with some space. Prefer slabs that are partially used over those * that are totally full. This helps to reduce fragmentation. * * If 'rr' is 1, search all domains starting from 'domain'. Otherwise check * only 'domain'. */ static uma_slab_t keg_first_slab(uma_keg_t keg, int domain, bool rr) { uma_domain_t dom; uma_slab_t slab; int start; KASSERT(domain >= 0 && domain < vm_ndomains, ("keg_first_slab: domain %d out of range", domain)); slab = NULL; start = domain; do { dom = &keg->uk_domain[domain]; if (!LIST_EMPTY(&dom->ud_part_slab)) return (LIST_FIRST(&dom->ud_part_slab)); if (!LIST_EMPTY(&dom->ud_free_slab)) { slab = LIST_FIRST(&dom->ud_free_slab); LIST_REMOVE(slab, us_link); LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); return (slab); } if (rr) domain = (domain + 1) % vm_ndomains; } while (domain != start); return (NULL); } static uma_slab_t keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags) { uint32_t reserve; mtx_assert(&keg->uk_lock, MA_OWNED); reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve; if (keg->uk_free <= reserve) return (NULL); return (keg_first_slab(keg, domain, rr)); } static uma_slab_t keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags) { struct vm_domainset_iter di; uma_domain_t dom; uma_slab_t slab; int aflags, domain; bool rr; restart: mtx_assert(&keg->uk_lock, MA_OWNED); /* * Use the keg's policy if upper layers haven't already specified a * domain (as happens with first-touch zones). * * To avoid races we run the iterator with the keg lock held, but that * means that we cannot allow the vm_domainset layer to sleep. Thus, * clear M_WAITOK and handle low memory conditions locally. */ rr = rdomain == UMA_ANYDOMAIN; if (rr) { aflags = (flags & ~M_WAITOK) | M_NOWAIT; vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, &aflags); } else { aflags = flags; domain = rdomain; } for (;;) { slab = keg_fetch_free_slab(keg, domain, rr, flags); if (slab != NULL) { MPASS(slab->us_keg == keg); return (slab); } /* * M_NOVM means don't ask at all! */ if (flags & M_NOVM) break; if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { keg->uk_flags |= UMA_ZFLAG_FULL; /* * If this is not a multi-zone, set the FULL bit. * Otherwise slab_multi() takes care of it. */ if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { zone->uz_flags |= UMA_ZFLAG_FULL; zone_log_warning(zone); zone_maxaction(zone); } if (flags & M_NOWAIT) return (NULL); zone->uz_sleeps++; msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); continue; } slab = keg_alloc_slab(keg, zone, domain, aflags); /* * If we got a slab here it's safe to mark it partially used * and return. We assume that the caller is going to remove * at least one item. */ if (slab) { MPASS(slab->us_keg == keg); dom = &keg->uk_domain[slab->us_domain]; LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); return (slab); } KEG_LOCK(keg); if (rr && vm_domainset_iter_policy(&di, &domain) != 0) { if ((flags & M_WAITOK) != 0) { KEG_UNLOCK(keg); vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask); KEG_LOCK(keg); goto restart; } break; } } /* * We might not have been able to get a slab but another cpu * could have while we were unlocked. Check again before we * fail. */ if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) { MPASS(slab->us_keg == keg); return (slab); } return (NULL); } static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags) { uma_slab_t slab; if (keg == NULL) { keg = zone_first_keg(zone); KEG_LOCK(keg); } for (;;) { slab = keg_fetch_slab(keg, zone, domain, flags); if (slab) return (slab); if (flags & (M_NOWAIT | M_NOVM)) break; } KEG_UNLOCK(keg); return (NULL); } /* * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns * with the keg locked. On NULL no lock is held. * * The last pointer is used to seed the search. It is not required. */ static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags) { uma_klink_t klink; uma_slab_t slab; uma_keg_t keg; int flags; int empty; int full; /* * Don't wait on the first pass. This will skip limit tests * as well. We don't want to block if we can find a provider * without blocking. */ flags = (rflags & ~M_WAITOK) | M_NOWAIT; /* * Use the last slab allocated as a hint for where to start * the search. */ if (last != NULL) { slab = keg_fetch_slab(last, zone, domain, flags); if (slab) return (slab); KEG_UNLOCK(last); } /* * Loop until we have a slab incase of transient failures * while M_WAITOK is specified. I'm not sure this is 100% * required but we've done it for so long now. */ for (;;) { empty = 0; full = 0; /* * Search the available kegs for slabs. Be careful to hold the * correct lock while calling into the keg layer. */ LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { keg = klink->kl_keg; KEG_LOCK(keg); if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { slab = keg_fetch_slab(keg, zone, domain, flags); if (slab) return (slab); } if (keg->uk_flags & UMA_ZFLAG_FULL) full++; else empty++; KEG_UNLOCK(keg); } if (rflags & (M_NOWAIT | M_NOVM)) break; flags = rflags; /* * All kegs are full. XXX We can't atomically check all kegs * and sleep so just sleep for a short period and retry. */ if (full && !empty) { ZONE_LOCK(zone); zone->uz_flags |= UMA_ZFLAG_FULL; zone->uz_sleeps++; zone_log_warning(zone); zone_maxaction(zone); msleep(zone, zone->uz_lockptr, PVM, "zonelimit", hz/100); zone->uz_flags &= ~UMA_ZFLAG_FULL; ZONE_UNLOCK(zone); continue; } } return (NULL); } static void * slab_alloc_item(uma_keg_t keg, uma_slab_t slab) { uma_domain_t dom; void *item; uint8_t freei; MPASS(keg == slab->us_keg); mtx_assert(&keg->uk_lock, MA_OWNED); freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); item = slab->us_data + (keg->uk_rsize * freei); slab->us_freecount--; keg->uk_free--; /* Move this slab to the full list */ if (slab->us_freecount == 0) { LIST_REMOVE(slab, us_link); dom = &keg->uk_domain[slab->us_domain]; LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link); } return (item); } static int zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags) { uma_slab_t slab; uma_keg_t keg; #ifdef NUMA int stripe; #endif int i; slab = NULL; keg = NULL; /* Try to keep the buckets totally full */ for (i = 0; i < max; ) { if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL) break; keg = slab->us_keg; #ifdef NUMA stripe = howmany(max, vm_ndomains); #endif while (slab->us_freecount && i < max) { bucket[i++] = slab_alloc_item(keg, slab); if (keg->uk_free <= keg->uk_reserve) break; #ifdef NUMA /* * If the zone is striped we pick a new slab for every * N allocations. Eliminating this conditional will * instead pick a new domain for each bucket rather * than stripe within each bucket. The current option * produces more fragmentation and requires more cpu * time but yields better distribution. */ if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 && vm_ndomains > 1 && --stripe == 0) break; #endif } /* Don't block if we allocated any successfully. */ flags &= ~M_WAITOK; flags |= M_NOWAIT; } if (slab != NULL) KEG_UNLOCK(keg); return i; } static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags) { uma_bucket_t bucket; int max; CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain); /* Don't wait for buckets, preserve caller's NOVM setting. */ bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); if (bucket == NULL) return (NULL); max = MIN(bucket->ub_entries, zone->uz_count); bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, max, domain, flags); /* * Initialize the memory if necessary. */ if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { int i; for (i = 0; i < bucket->ub_cnt; i++) if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, flags) != 0) break; /* * If we couldn't initialize the whole bucket, put the * rest back onto the freelist. */ if (i != bucket->ub_cnt) { zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], bucket->ub_cnt - i); #ifdef INVARIANTS bzero(&bucket->ub_bucket[i], sizeof(void *) * (bucket->ub_cnt - i)); #endif bucket->ub_cnt = i; } } if (bucket->ub_cnt == 0) { bucket_free(zone, bucket, udata); atomic_add_long(&zone->uz_fails, 1); return (NULL); } return (bucket); } /* * Allocates a single item from a zone. * * Arguments * zone The zone to alloc for. * udata The data to be passed to the constructor. * domain The domain to allocate from or UMA_ANYDOMAIN. * flags M_WAITOK, M_NOWAIT, M_ZERO. * * Returns * NULL if there is no memory and M_NOWAIT is set * An item if successful */ static void * zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) { void *item; #ifdef INVARIANTS bool skipdbg; #endif item = NULL; if (domain != UMA_ANYDOMAIN) { /* avoid allocs targeting empty domains */ if (VM_DOMAIN_EMPTY(domain)) domain = UMA_ANYDOMAIN; } if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) goto fail; atomic_add_long(&zone->uz_allocs, 1); #ifdef INVARIANTS skipdbg = uma_dbg_zskip(zone, item); #endif /* * We have to call both the zone's init (not the keg's init) * and the zone's ctor. This is because the item is going from * a keg slab directly to the user, and the user is expecting it * to be both zone-init'd as well as zone-ctor'd. */ if (zone->uz_init != NULL) { if (zone->uz_init(item, zone->uz_size, flags) != 0) { zone_free_item(zone, item, udata, SKIP_FINI); goto fail; } } if (zone->uz_ctor != NULL && #ifdef INVARIANTS (!skipdbg || zone->uz_ctor != trash_ctor || zone->uz_dtor != trash_dtor) && #endif zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { zone_free_item(zone, item, udata, SKIP_DTOR); goto fail; } #ifdef INVARIANTS if (!skipdbg) uma_dbg_alloc(zone, NULL, item); #endif if (flags & M_ZERO) uma_zero_item(item, zone); CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, zone->uz_name, zone); return (item); fail: CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", zone->uz_name, zone); atomic_add_long(&zone->uz_fails, 1); return (NULL); } /* See uma.h */ void uma_zfree_arg(uma_zone_t zone, void *item, void *udata) { uma_cache_t cache; uma_bucket_t bucket; uma_zone_domain_t zdom; int cpu, domain, lockfail; #ifdef INVARIANTS bool skipdbg; #endif /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, zone->uz_name); KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zfree_arg: called with spinlock or critical section held")); /* uma_zfree(..., NULL) does nothing, to match free(9). */ if (item == NULL) return; #ifdef DEBUG_MEMGUARD if (is_memguard_addr(item)) { if (zone->uz_dtor != NULL) zone->uz_dtor(item, zone->uz_size, udata); if (zone->uz_fini != NULL) zone->uz_fini(item, zone->uz_size); memguard_free(item); return; } #endif #ifdef INVARIANTS skipdbg = uma_dbg_zskip(zone, item); if (skipdbg == false) { if (zone->uz_flags & UMA_ZONE_MALLOC) uma_dbg_free(zone, udata, item); else uma_dbg_free(zone, NULL, item); } if (zone->uz_dtor != NULL && (!skipdbg || zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor)) #else if (zone->uz_dtor != NULL) #endif zone->uz_dtor(item, zone->uz_size, udata); /* * The race here is acceptable. If we miss it we'll just have to wait * a little longer for the limits to be reset. */ if (zone->uz_flags & UMA_ZFLAG_FULL) goto zfree_item; /* * If possible, free to the per-CPU cache. There are two * requirements for safe access to the per-CPU cache: (1) the thread * accessing the cache must not be preempted or yield during access, * and (2) the thread must not migrate CPUs without switching which * cache it accesses. We rely on a critical section to prevent * preemption and migration. We release the critical section in * order to acquire the zone mutex if we are unable to free to the * current cache; when we re-acquire the critical section, we must * detect and handle migration if it has occurred. */ zfree_restart: critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; zfree_start: /* * Try to free into the allocbucket first to give LIFO ordering * for cache-hot datastructures. Spill over into the freebucket * if necessary. Alloc will swap them if one runs dry. */ bucket = cache->uc_allocbucket; if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) bucket = cache->uc_freebucket; if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, ("uma_zfree: Freeing to non free bucket index.")); bucket->ub_bucket[bucket->ub_cnt] = item; bucket->ub_cnt++; cache->uc_frees++; critical_exit(); return; } /* * We must go back the zone, which requires acquiring the zone lock, * which in turn means we must release and re-acquire the critical * section. Since the critical section is released, we may be * preempted or migrate. As such, make sure not to maintain any * thread-local state specific to the cache from prior to releasing * the critical section. */ critical_exit(); if (zone->uz_count == 0 || bucketdisable) goto zfree_item; lockfail = 0; if (ZONE_TRYLOCK(zone) == 0) { /* Record contention to size the buckets. */ ZONE_LOCK(zone); lockfail = 1; } critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; bucket = cache->uc_freebucket; if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { ZONE_UNLOCK(zone); goto zfree_start; } cache->uc_freebucket = NULL; /* We are no longer associated with this CPU. */ critical_exit(); if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) { domain = PCPU_GET(domain); if (VM_DOMAIN_EMPTY(domain)) domain = UMA_ANYDOMAIN; } else domain = 0; zdom = &zone->uz_domain[0]; /* Can we throw this on the zone full list? */ if (bucket != NULL) { CTR3(KTR_UMA, "uma_zfree: zone %s(%p) putting bucket %p on free list", zone->uz_name, zone, bucket); /* ub_cnt is pointing to the last free item */ KASSERT(bucket->ub_cnt != 0, ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) { ZONE_UNLOCK(zone); bucket_drain(zone, bucket); bucket_free(zone, bucket, udata); goto zfree_restart; } else LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); } /* * We bump the uz count when the cache size is insufficient to * handle the working set. */ if (lockfail && zone->uz_count < BUCKET_MAX) zone->uz_count++; ZONE_UNLOCK(zone); bucket = bucket_alloc(zone, udata, M_NOWAIT); CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", zone->uz_name, zone, bucket); if (bucket) { critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; if (cache->uc_freebucket == NULL && ((zone->uz_flags & UMA_ZONE_NUMA) == 0 || domain == PCPU_GET(domain))) { cache->uc_freebucket = bucket; goto zfree_start; } /* * We lost the race, start over. We have to drop our * critical section to free the bucket. */ critical_exit(); bucket_free(zone, bucket, udata); goto zfree_restart; } /* * If nothing else caught this, we'll just do an internal free. */ zfree_item: zone_free_item(zone, item, udata, SKIP_DTOR); return; } void uma_zfree_domain(uma_zone_t zone, void *item, void *udata) { /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread, zone->uz_name); KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zfree_domain: called with spinlock or critical section held")); /* uma_zfree(..., NULL) does nothing, to match free(9). */ if (item == NULL) return; zone_free_item(zone, item, udata, SKIP_NONE); } static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) { uma_domain_t dom; uint8_t freei; mtx_assert(&keg->uk_lock, MA_OWNED); MPASS(keg == slab->us_keg); dom = &keg->uk_domain[slab->us_domain]; /* Do we need to remove from any lists? */ if (slab->us_freecount+1 == keg->uk_ipers) { LIST_REMOVE(slab, us_link); LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); } else if (slab->us_freecount == 0) { LIST_REMOVE(slab, us_link); LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); } /* Slab management. */ freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); slab->us_freecount++; /* Keg statistics. */ keg->uk_free++; } static void zone_release(uma_zone_t zone, void **bucket, int cnt) { void *item; uma_slab_t slab; uma_keg_t keg; uint8_t *mem; int clearfull; int i; clearfull = 0; keg = zone_first_keg(zone); KEG_LOCK(keg); for (i = 0; i < cnt; i++) { item = bucket[i]; if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); if (zone->uz_flags & UMA_ZONE_HASH) { slab = hash_sfind(&keg->uk_hash, mem); } else { mem += keg->uk_pgoff; slab = (uma_slab_t)mem; } } else { slab = vtoslab((vm_offset_t)item); if (slab->us_keg != keg) { KEG_UNLOCK(keg); keg = slab->us_keg; KEG_LOCK(keg); } } slab_free_item(keg, slab, item); if (keg->uk_flags & UMA_ZFLAG_FULL) { if (keg->uk_pages < keg->uk_maxpages) { keg->uk_flags &= ~UMA_ZFLAG_FULL; clearfull = 1; } /* * We can handle one more allocation. Since we're * clearing ZFLAG_FULL, wake up all procs blocked * on pages. This should be uncommon, so keeping this * simple for now (rather than adding count of blocked * threads etc). */ wakeup(keg); } } KEG_UNLOCK(keg); if (clearfull) { ZONE_LOCK(zone); zone->uz_flags &= ~UMA_ZFLAG_FULL; wakeup(zone); ZONE_UNLOCK(zone); } } /* * Frees a single item to any zone. * * Arguments: * zone The zone to free to * item The item we're freeing * udata User supplied data for the dtor * skip Skip dtors and finis */ static void zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) { #ifdef INVARIANTS bool skipdbg; skipdbg = uma_dbg_zskip(zone, item); if (skip == SKIP_NONE && !skipdbg) { if (zone->uz_flags & UMA_ZONE_MALLOC) uma_dbg_free(zone, udata, item); else uma_dbg_free(zone, NULL, item); } if (skip < SKIP_DTOR && zone->uz_dtor != NULL && (!skipdbg || zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor)) #else if (skip < SKIP_DTOR && zone->uz_dtor != NULL) #endif zone->uz_dtor(item, zone->uz_size, udata); if (skip < SKIP_FINI && zone->uz_fini) zone->uz_fini(item, zone->uz_size); atomic_add_long(&zone->uz_frees, 1); zone->uz_release(zone->uz_arg, &item, 1); } /* See uma.h */ int uma_zone_set_max(uma_zone_t zone, int nitems) { uma_keg_t keg; keg = zone_first_keg(zone); if (keg == NULL) return (0); KEG_LOCK(keg); keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; if (keg->uk_maxpages * keg->uk_ipers < nitems) keg->uk_maxpages += keg->uk_ppera; nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; KEG_UNLOCK(keg); return (nitems); } /* See uma.h */ int uma_zone_get_max(uma_zone_t zone) { int nitems; uma_keg_t keg; keg = zone_first_keg(zone); if (keg == NULL) return (0); KEG_LOCK(keg); nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; KEG_UNLOCK(keg); return (nitems); } /* See uma.h */ void uma_zone_set_warning(uma_zone_t zone, const char *warning) { ZONE_LOCK(zone); zone->uz_warning = warning; ZONE_UNLOCK(zone); } /* See uma.h */ void uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction) { ZONE_LOCK(zone); TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone); ZONE_UNLOCK(zone); } /* See uma.h */ int uma_zone_get_cur(uma_zone_t zone) { int64_t nitems; u_int i; ZONE_LOCK(zone); nitems = zone->uz_allocs - zone->uz_frees; CPU_FOREACH(i) { /* * See the comment in sysctl_vm_zone_stats() regarding the * safety of accessing the per-cpu caches. With the zone lock * held, it is safe, but can potentially result in stale data. */ nitems += zone->uz_cpu[i].uc_allocs - zone->uz_cpu[i].uc_frees; } ZONE_UNLOCK(zone); return (nitems < 0 ? 0 : nitems); } /* See uma.h */ void uma_zone_set_init(uma_zone_t zone, uma_init uminit) { uma_keg_t keg; keg = zone_first_keg(zone); KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); KEG_LOCK(keg); KASSERT(keg->uk_pages == 0, ("uma_zone_set_init on non-empty keg")); keg->uk_init = uminit; KEG_UNLOCK(keg); } /* See uma.h */ void uma_zone_set_fini(uma_zone_t zone, uma_fini fini) { uma_keg_t keg; keg = zone_first_keg(zone); KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type")); KEG_LOCK(keg); KASSERT(keg->uk_pages == 0, ("uma_zone_set_fini on non-empty keg")); keg->uk_fini = fini; KEG_UNLOCK(keg); } /* See uma.h */ void uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) { ZONE_LOCK(zone); KASSERT(zone_first_keg(zone)->uk_pages == 0, ("uma_zone_set_zinit on non-empty keg")); zone->uz_init = zinit; ZONE_UNLOCK(zone); } /* See uma.h */ void uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) { ZONE_LOCK(zone); KASSERT(zone_first_keg(zone)->uk_pages == 0, ("uma_zone_set_zfini on non-empty keg")); zone->uz_fini = zfini; ZONE_UNLOCK(zone); } /* See uma.h */ /* XXX uk_freef is not actually used with the zone locked */ void uma_zone_set_freef(uma_zone_t zone, uma_free freef) { uma_keg_t keg; keg = zone_first_keg(zone); KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); KEG_LOCK(keg); keg->uk_freef = freef; KEG_UNLOCK(keg); } /* See uma.h */ /* XXX uk_allocf is not actually used with the zone locked */ void uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) { uma_keg_t keg; keg = zone_first_keg(zone); KEG_LOCK(keg); keg->uk_allocf = allocf; KEG_UNLOCK(keg); } /* See uma.h */ void uma_zone_reserve(uma_zone_t zone, int items) { uma_keg_t keg; keg = zone_first_keg(zone); if (keg == NULL) return; KEG_LOCK(keg); keg->uk_reserve = items; KEG_UNLOCK(keg); return; } /* See uma.h */ int uma_zone_reserve_kva(uma_zone_t zone, int count) { uma_keg_t keg; vm_offset_t kva; u_int pages; keg = zone_first_keg(zone); if (keg == NULL) return (0); pages = count / keg->uk_ipers; if (pages * keg->uk_ipers < count) pages++; pages *= keg->uk_ppera; #ifdef UMA_MD_SMALL_ALLOC if (keg->uk_ppera > 1) { #else if (1) { #endif kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); if (kva == 0) return (0); } else kva = 0; KEG_LOCK(keg); keg->uk_kva = kva; keg->uk_offset = 0; keg->uk_maxpages = pages; #ifdef UMA_MD_SMALL_ALLOC keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; #else keg->uk_allocf = noobj_alloc; #endif keg->uk_flags |= UMA_ZONE_NOFREE; KEG_UNLOCK(keg); return (1); } /* See uma.h */ void uma_prealloc(uma_zone_t zone, int items) { struct vm_domainset_iter di; uma_domain_t dom; uma_slab_t slab; uma_keg_t keg; int domain, flags, slabs; keg = zone_first_keg(zone); if (keg == NULL) return; KEG_LOCK(keg); slabs = items / keg->uk_ipers; if (slabs * keg->uk_ipers < items) slabs++; flags = M_WAITOK; vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, &flags); while (slabs-- > 0) { slab = keg_alloc_slab(keg, zone, domain, flags); if (slab == NULL) return; MPASS(slab->us_keg == keg); dom = &keg->uk_domain[slab->us_domain]; LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); if (vm_domainset_iter_policy(&di, &domain) != 0) break; } KEG_UNLOCK(keg); } /* See uma.h */ static void uma_reclaim_locked(bool kmem_danger) { CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); sx_assert(&uma_drain_lock, SA_XLOCKED); bucket_enable(); zone_foreach(zone_drain); if (vm_page_count_min() || kmem_danger) { cache_drain_safe(NULL); zone_foreach(zone_drain); } /* * Some slabs may have been freed but this zone will be visited early * we visit again so that we can free pages that are empty once other * zones are drained. We have to do the same for buckets. */ zone_drain(slabzone); bucket_zone_drain(); } void uma_reclaim(void) { sx_xlock(&uma_drain_lock); uma_reclaim_locked(false); sx_xunlock(&uma_drain_lock); } static volatile int uma_reclaim_needed; void uma_reclaim_wakeup(void) { if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) wakeup(uma_reclaim); } void uma_reclaim_worker(void *arg __unused) { for (;;) { sx_xlock(&uma_drain_lock); while (atomic_load_int(&uma_reclaim_needed) == 0) sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl", hz); sx_xunlock(&uma_drain_lock); EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); sx_xlock(&uma_drain_lock); uma_reclaim_locked(true); atomic_store_int(&uma_reclaim_needed, 0); sx_xunlock(&uma_drain_lock); /* Don't fire more than once per-second. */ pause("umarclslp", hz); } } /* See uma.h */ int uma_zone_exhausted(uma_zone_t zone) { int full; ZONE_LOCK(zone); full = (zone->uz_flags & UMA_ZFLAG_FULL); ZONE_UNLOCK(zone); return (full); } int uma_zone_exhausted_nolock(uma_zone_t zone) { return (zone->uz_flags & UMA_ZFLAG_FULL); } void * uma_large_malloc_domain(vm_size_t size, int domain, int wait) { + struct domainset *policy; vm_offset_t addr; uma_slab_t slab; if (domain != UMA_ANYDOMAIN) { /* avoid allocs targeting empty domains */ if (VM_DOMAIN_EMPTY(domain)) domain = UMA_ANYDOMAIN; } slab = zone_alloc_item(slabzone, NULL, domain, wait); if (slab == NULL) return (NULL); - if (domain == UMA_ANYDOMAIN) - addr = kmem_malloc(size, wait); - else - addr = kmem_malloc_domain(domain, size, wait); + policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() : + DOMAINSET_FIXED(domain); + addr = kmem_malloc_domainset(policy, size, wait); if (addr != 0) { vsetslab(addr, slab); slab->us_data = (void *)addr; slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC; slab->us_size = size; slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE( pmap_kextract(addr))); uma_total_inc(size); } else { zone_free_item(slabzone, slab, NULL, SKIP_NONE); } return ((void *)addr); } void * uma_large_malloc(vm_size_t size, int wait) { return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait); } void uma_large_free(uma_slab_t slab) { KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0, ("uma_large_free: Memory not allocated with uma_large_malloc.")); kmem_free((vm_offset_t)slab->us_data, slab->us_size); uma_total_dec(slab->us_size); zone_free_item(slabzone, slab, NULL, SKIP_NONE); } static void uma_zero_item(void *item, uma_zone_t zone) { bzero(item, zone->uz_size); } unsigned long uma_limit(void) { return (uma_kmem_limit); } void uma_set_limit(unsigned long limit) { uma_kmem_limit = limit; } unsigned long uma_size(void) { return (uma_kmem_total); } long uma_avail(void) { return (uma_kmem_limit - uma_kmem_total); } void uma_print_stats(void) { zone_foreach(uma_print_zone); } static void slab_print(uma_slab_t slab) { printf("slab: keg %p, data %p, freecount %d\n", slab->us_keg, slab->us_data, slab->us_freecount); } static void cache_print(uma_cache_t cache) { printf("alloc: %p(%d), free: %p(%d)\n", cache->uc_allocbucket, cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, cache->uc_freebucket, cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); } static void uma_print_keg(uma_keg_t keg) { uma_domain_t dom; uma_slab_t slab; int i; printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " "out %d free %d limit %d\n", keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, keg->uk_ipers, keg->uk_ppera, (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); for (i = 0; i < vm_ndomains; i++) { dom = &keg->uk_domain[i]; printf("Part slabs:\n"); LIST_FOREACH(slab, &dom->ud_part_slab, us_link) slab_print(slab); printf("Free slabs:\n"); LIST_FOREACH(slab, &dom->ud_free_slab, us_link) slab_print(slab); printf("Full slabs:\n"); LIST_FOREACH(slab, &dom->ud_full_slab, us_link) slab_print(slab); } } void uma_print_zone(uma_zone_t zone) { uma_cache_t cache; uma_klink_t kl; int i; printf("zone: %s(%p) size %d flags %#x\n", zone->uz_name, zone, zone->uz_size, zone->uz_flags); LIST_FOREACH(kl, &zone->uz_kegs, kl_link) uma_print_keg(kl->kl_keg); CPU_FOREACH(i) { cache = &zone->uz_cpu[i]; printf("CPU %d Cache:\n", i); cache_print(cache); } } #ifdef DDB /* * Generate statistics across both the zone and its per-cpu cache's. Return * desired statistics if the pointer is non-NULL for that statistic. * * Note: does not update the zone statistics, as it can't safely clear the * per-CPU cache statistic. * * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't * safe from off-CPU; we should modify the caches to track this information * directly so that we don't have to. */ static void uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp, uint64_t *freesp, uint64_t *sleepsp) { uma_cache_t cache; uint64_t allocs, frees, sleeps; int cachefree, cpu; allocs = frees = sleeps = 0; cachefree = 0; CPU_FOREACH(cpu) { cache = &z->uz_cpu[cpu]; if (cache->uc_allocbucket != NULL) cachefree += cache->uc_allocbucket->ub_cnt; if (cache->uc_freebucket != NULL) cachefree += cache->uc_freebucket->ub_cnt; allocs += cache->uc_allocs; frees += cache->uc_frees; } allocs += z->uz_allocs; frees += z->uz_frees; sleeps += z->uz_sleeps; if (cachefreep != NULL) *cachefreep = cachefree; if (allocsp != NULL) *allocsp = allocs; if (freesp != NULL) *freesp = frees; if (sleepsp != NULL) *sleepsp = sleeps; } #endif /* DDB */ static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) { uma_keg_t kz; uma_zone_t z; int count; count = 0; rw_rlock(&uma_rwlock); LIST_FOREACH(kz, &uma_kegs, uk_link) { LIST_FOREACH(z, &kz->uk_zones, uz_link) count++; } rw_runlock(&uma_rwlock); return (sysctl_handle_int(oidp, &count, 0, req)); } static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) { struct uma_stream_header ush; struct uma_type_header uth; struct uma_percpu_stat *ups; uma_bucket_t bucket; uma_zone_domain_t zdom; struct sbuf sbuf; uma_cache_t cache; uma_klink_t kl; uma_keg_t kz; uma_zone_t z; uma_keg_t k; int count, error, i; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 128, req); sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK); count = 0; rw_rlock(&uma_rwlock); LIST_FOREACH(kz, &uma_kegs, uk_link) { LIST_FOREACH(z, &kz->uk_zones, uz_link) count++; } /* * Insert stream header. */ bzero(&ush, sizeof(ush)); ush.ush_version = UMA_STREAM_VERSION; ush.ush_maxcpus = (mp_maxid + 1); ush.ush_count = count; (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); LIST_FOREACH(kz, &uma_kegs, uk_link) { LIST_FOREACH(z, &kz->uk_zones, uz_link) { bzero(&uth, sizeof(uth)); ZONE_LOCK(z); strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); uth.uth_align = kz->uk_align; uth.uth_size = kz->uk_size; uth.uth_rsize = kz->uk_rsize; LIST_FOREACH(kl, &z->uz_kegs, kl_link) { k = kl->kl_keg; uth.uth_maxpages += k->uk_maxpages; uth.uth_pages += k->uk_pages; uth.uth_keg_free += k->uk_free; uth.uth_limit = (k->uk_maxpages / k->uk_ppera) * k->uk_ipers; } /* * A zone is secondary is it is not the first entry * on the keg's zone list. */ if ((z->uz_flags & UMA_ZONE_SECONDARY) && (LIST_FIRST(&kz->uk_zones) != z)) uth.uth_zone_flags = UTH_ZONE_SECONDARY; for (i = 0; i < vm_ndomains; i++) { zdom = &z->uz_domain[i]; LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link) uth.uth_zone_free += bucket->ub_cnt; } uth.uth_allocs = z->uz_allocs; uth.uth_frees = z->uz_frees; uth.uth_fails = z->uz_fails; uth.uth_sleeps = z->uz_sleeps; /* * While it is not normally safe to access the cache * bucket pointers while not on the CPU that owns the * cache, we only allow the pointers to be exchanged * without the zone lock held, not invalidated, so * accept the possible race associated with bucket * exchange during monitoring. */ for (i = 0; i < mp_maxid + 1; i++) { bzero(&ups[i], sizeof(*ups)); if (kz->uk_flags & UMA_ZFLAG_INTERNAL || CPU_ABSENT(i)) continue; cache = &z->uz_cpu[i]; if (cache->uc_allocbucket != NULL) ups[i].ups_cache_free += cache->uc_allocbucket->ub_cnt; if (cache->uc_freebucket != NULL) ups[i].ups_cache_free += cache->uc_freebucket->ub_cnt; ups[i].ups_allocs = cache->uc_allocs; ups[i].ups_frees = cache->uc_frees; } ZONE_UNLOCK(z); (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); for (i = 0; i < mp_maxid + 1; i++) (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); } } rw_runlock(&uma_rwlock); error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); free(ups, M_TEMP); return (error); } int sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) { uma_zone_t zone = *(uma_zone_t *)arg1; int error, max; max = uma_zone_get_max(zone); error = sysctl_handle_int(oidp, &max, 0, req); if (error || !req->newptr) return (error); uma_zone_set_max(zone, max); return (0); } int sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) { uma_zone_t zone = *(uma_zone_t *)arg1; int cur; cur = uma_zone_get_cur(zone); return (sysctl_handle_int(oidp, &cur, 0, req)); } #ifdef INVARIANTS static uma_slab_t uma_dbg_getslab(uma_zone_t zone, void *item) { uma_slab_t slab; uma_keg_t keg; uint8_t *mem; mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); if (zone->uz_flags & UMA_ZONE_VTOSLAB) { slab = vtoslab((vm_offset_t)mem); } else { /* * It is safe to return the slab here even though the * zone is unlocked because the item's allocation state * essentially holds a reference. */ ZONE_LOCK(zone); keg = LIST_FIRST(&zone->uz_kegs)->kl_keg; if (keg->uk_flags & UMA_ZONE_HASH) slab = hash_sfind(&keg->uk_hash, mem); else slab = (uma_slab_t)(mem + keg->uk_pgoff); ZONE_UNLOCK(zone); } return (slab); } static bool uma_dbg_zskip(uma_zone_t zone, void *mem) { uma_keg_t keg; if ((keg = zone_first_keg(zone)) == NULL) return (true); return (uma_dbg_kskip(keg, mem)); } static bool uma_dbg_kskip(uma_keg_t keg, void *mem) { uintptr_t idx; if (dbg_divisor == 0) return (true); if (dbg_divisor == 1) return (false); idx = (uintptr_t)mem >> PAGE_SHIFT; if (keg->uk_ipers > 1) { idx *= keg->uk_ipers; idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize; } if ((idx / dbg_divisor) * dbg_divisor != idx) { counter_u64_add(uma_skip_cnt, 1); return (true); } counter_u64_add(uma_dbg_cnt, 1); return (false); } /* * Set up the slab's freei data such that uma_dbg_free can function. * */ static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) { uma_keg_t keg; int freei; if (slab == NULL) { slab = uma_dbg_getslab(zone, item); if (slab == NULL) panic("uma: item %p did not belong to zone %s\n", item, zone->uz_name); } keg = slab->us_keg; freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); return; } /* * Verifies freed addresses. Checks for alignment, valid slab membership * and duplicate frees. * */ static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) { uma_keg_t keg; int freei; if (slab == NULL) { slab = uma_dbg_getslab(zone, item); if (slab == NULL) panic("uma: Freed item %p did not belong to zone %s\n", item, zone->uz_name); } keg = slab->us_keg; freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; if (freei >= keg->uk_ipers) panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); if (((freei * keg->uk_rsize) + slab->us_data) != item) panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); } #endif /* INVARIANTS */ #ifdef DDB DB_SHOW_COMMAND(uma, db_show_uma) { uma_bucket_t bucket; uma_keg_t kz; uma_zone_t z; uma_zone_domain_t zdom; uint64_t allocs, frees, sleeps; int cachefree, i; db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used", "Free", "Requests", "Sleeps", "Bucket"); LIST_FOREACH(kz, &uma_kegs, uk_link) { LIST_FOREACH(z, &kz->uk_zones, uz_link) { if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { allocs = z->uz_allocs; frees = z->uz_frees; sleeps = z->uz_sleeps; cachefree = 0; } else uma_zone_sumstat(z, &cachefree, &allocs, &frees, &sleeps); if (!((z->uz_flags & UMA_ZONE_SECONDARY) && (LIST_FIRST(&kz->uk_zones) != z))) cachefree += kz->uk_free; for (i = 0; i < vm_ndomains; i++) { zdom = &z->uz_domain[i]; LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link) cachefree += bucket->ub_cnt; } db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n", z->uz_name, (uintmax_t)kz->uk_size, (intmax_t)(allocs - frees), cachefree, (uintmax_t)allocs, sleeps, z->uz_count); if (db_pager_quit) return; } } } DB_SHOW_COMMAND(umacache, db_show_umacache) { uma_bucket_t bucket; uma_zone_t z; uma_zone_domain_t zdom; uint64_t allocs, frees; int cachefree, i; db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", "Requests", "Bucket"); LIST_FOREACH(z, &uma_cachezones, uz_link) { uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL); for (i = 0; i < vm_ndomains; i++) { zdom = &z->uz_domain[i]; LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link) cachefree += bucket->ub_cnt; } db_printf("%18s %8ju %8jd %8d %12ju %8u\n", z->uz_name, (uintmax_t)z->uz_size, (intmax_t)(allocs - frees), cachefree, (uintmax_t)allocs, z->uz_count); if (db_pager_quit) return; } } #endif /* DDB */ Index: head/sys/vm/vm_extern.h =================================================================== --- head/sys/vm/vm_extern.h (revision 339926) +++ head/sys/vm/vm_extern.h (revision 339927) @@ -1,131 +1,133 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)vm_extern.h 8.2 (Berkeley) 1/12/94 * $FreeBSD$ */ #ifndef _VM_EXTERN_H_ #define _VM_EXTERN_H_ struct pmap; struct proc; struct vmspace; struct vnode; struct vmem; #ifdef _KERNEL struct cdev; struct cdevsw; +struct domainset; /* These operate on kernel virtual addresses only. */ vm_offset_t kva_alloc(vm_size_t); void kva_free(vm_offset_t, vm_size_t); /* These operate on pageable virtual addresses. */ vm_offset_t kmap_alloc_wait(vm_map_t, vm_size_t); void kmap_free_wakeup(vm_map_t, vm_offset_t, vm_size_t); /* These operate on virtual addresses backed by memory. */ vm_offset_t kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); -vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, - vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); +vm_offset_t kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, + int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr); vm_offset_t kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr); -vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, - vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, - vm_memattr_t memattr); +vm_offset_t kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, + int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, + vm_paddr_t boundary, vm_memattr_t memattr); vm_offset_t kmem_malloc(vm_size_t size, int flags); -vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags); +vm_offset_t kmem_malloc_domainset(struct domainset *ds, vm_size_t size, + int flags); void kmem_free(vm_offset_t addr, vm_size_t size); /* This provides memory for previously allocated address space. */ int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int); int kmem_back_domain(int, vm_object_t, vm_offset_t, vm_size_t, int); void kmem_unback(vm_object_t, vm_offset_t, vm_size_t); /* Bootstrapping. */ void kmem_bootstrap_free(vm_offset_t, vm_size_t); vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, boolean_t); void kmem_init(vm_offset_t, vm_offset_t); void kmem_init_zero_region(void); void kmeminit(void); int kernacc(void *, int, int); int useracc(void *, int, int); int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int); void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t, vm_ooffset_t *); int vm_fault_disable_pagefaults(void); void vm_fault_enable_pagefaults(int save); int vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags, vm_page_t *m_hold); int vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, vm_prot_t prot, vm_page_t *ma, int max_count); int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int); void vm_waitproc(struct proc *); int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t); int vm_mmap_object(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, vm_object_t, vm_ooffset_t, boolean_t, struct thread *); int vm_mmap_to_errno(int rv); int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, int *, struct cdev *, struct cdevsw *, vm_ooffset_t *, vm_object_t *); int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, int *, struct vnode *, vm_ooffset_t *, vm_object_t *, boolean_t *); void vm_set_page_size(void); void vm_sync_icache(vm_map_t, vm_offset_t, vm_size_t); typedef int (*pmap_pinit_t)(struct pmap *pmap); struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t, pmap_pinit_t); struct vmspace *vmspace_fork(struct vmspace *, vm_ooffset_t *); int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t); int vmspace_unshare(struct proc *); void vmspace_exit(struct thread *); struct vmspace *vmspace_acquire_ref(struct proc *); void vmspace_free(struct vmspace *); void vmspace_exitfree(struct proc *); void vmspace_switch_aio(struct vmspace *); void vnode_pager_setsize(struct vnode *, vm_ooffset_t); int vslock(void *, size_t); void vsunlock(void *, size_t); struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset); void vm_imgact_unmap_page(struct sf_buf *sf); void vm_thread_dispose(struct thread *td); int vm_thread_new(struct thread *td, int pages); u_int vm_active_count(void); u_int vm_inactive_count(void); u_int vm_laundry_count(void); u_int vm_wait_count(void); #endif /* _KERNEL */ #endif /* !_VM_EXTERN_H_ */ Index: head/sys/vm/vm_kern.c =================================================================== --- head/sys/vm/vm_kern.c (revision 339926) +++ head/sys/vm/vm_kern.c (revision 339927) @@ -1,836 +1,862 @@ /*- * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) * * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * Kernel memory management. */ #include __FBSDID("$FreeBSD$"); #include "opt_vm.h" #include #include #include /* for ticks and hz */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include vm_map_t kernel_map; vm_map_t exec_map; vm_map_t pipe_map; const void *zero_region; CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0); /* NB: Used by kernel debuggers. */ const u_long vm_maxuser_address = VM_MAXUSER_ADDRESS; u_int exec_map_entry_size; u_int exec_map_entries; SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD, SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address"); SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD, #if defined(__arm__) || defined(__sparc64__) &vm_max_kernel_address, 0, #else SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS, #endif "Max kernel address"); #if VM_NRESERVLEVEL > 0 #define KVA_QUANTUM_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT) #else /* On non-superpage architectures want large import sizes. */ #define KVA_QUANTUM_SHIFT (10 + PAGE_SHIFT) #endif #define KVA_QUANTUM (1 << KVA_QUANTUM_SHIFT) /* * kva_alloc: * * Allocate a virtual address range with no underlying object and * no initial mapping to physical memory. Any mapping from this * range to physical memory must be explicitly created prior to * its use, typically with pmap_qenter(). Any attempt to create * a mapping on demand through vm_fault() will result in a panic. */ vm_offset_t kva_alloc(vm_size_t size) { vm_offset_t addr; size = round_page(size); if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr)) return (0); return (addr); } /* * kva_free: * * Release a region of kernel virtual memory allocated * with kva_alloc, and return the physical pages * associated with that region. * * This routine may not block on kernel maps. */ void kva_free(vm_offset_t addr, vm_size_t size) { size = round_page(size); vmem_free(kernel_arena, addr, size); } /* * Allocates a region from the kernel address map and physical pages * within the specified address range to the kernel object. Creates a * wired mapping from this region to these pages, and returns the * region's starting virtual address. The allocated pages are not * necessarily physically contiguous. If M_ZERO is specified through the * given flags, then the pages are zeroed before they are mapped. */ -vm_offset_t +static vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr) { vmem_t *vmem; vm_object_t object = kernel_object; vm_offset_t addr, i, offset; vm_page_t m; int pflags, tries; size = round_page(size); vmem = vm_dom[domain].vmd_kernel_arena; if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) return (0); offset = addr - VM_MIN_KERNEL_ADDRESS; pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); pflags |= VM_ALLOC_NOWAIT; VM_OBJECT_WLOCK(object); for (i = 0; i < size; i += PAGE_SIZE) { tries = 0; retry: m = vm_page_alloc_contig_domain(object, atop(offset + i), domain, pflags, 1, low, high, PAGE_SIZE, 0, memattr); if (m == NULL) { VM_OBJECT_WUNLOCK(object); if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { if (!vm_page_reclaim_contig_domain(domain, pflags, 1, low, high, PAGE_SIZE, 0) && (flags & M_WAITOK) != 0) vm_wait_domain(domain); VM_OBJECT_WLOCK(object); tries++; goto retry; } kmem_unback(object, addr, i); vmem_free(vmem, addr, size); return (0); } KASSERT(vm_phys_domain(m) == domain, ("kmem_alloc_attr_domain: Domain mismatch %d != %d", vm_phys_domain(m), domain)); if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; pmap_enter(kernel_pmap, addr + i, m, VM_PROT_RW, VM_PROT_RW | PMAP_ENTER_WIRED, 0); } VM_OBJECT_WUNLOCK(object); return (addr); } vm_offset_t kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr) { + + return (kmem_alloc_attr_domainset(DOMAINSET_RR(), size, flags, low, + high, memattr)); +} + +vm_offset_t +kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, int flags, + vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr) +{ struct vm_domainset_iter di; vm_offset_t addr; int domain; - vm_domainset_iter_policy_init(&di, DOMAINSET_RR(), &domain, &flags); + vm_domainset_iter_policy_init(&di, ds, &domain, &flags); do { addr = kmem_alloc_attr_domain(domain, size, flags, low, high, memattr); if (addr != 0) break; } while (vm_domainset_iter_policy(&di, &domain) == 0); return (addr); } /* * Allocates a region from the kernel address map and physically * contiguous pages within the specified address range to the kernel * object. Creates a wired mapping from this region to these pages, and * returns the region's starting virtual address. If M_ZERO is specified * through the given flags, then the pages are zeroed before they are * mapped. */ -vm_offset_t +static vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { vmem_t *vmem; vm_object_t object = kernel_object; vm_offset_t addr, offset, tmp; vm_page_t end_m, m; u_long npages; int pflags, tries; size = round_page(size); vmem = vm_dom[domain].vmd_kernel_arena; if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) return (0); offset = addr - VM_MIN_KERNEL_ADDRESS; pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); pflags |= VM_ALLOC_NOWAIT; npages = atop(size); VM_OBJECT_WLOCK(object); tries = 0; retry: m = vm_page_alloc_contig_domain(object, atop(offset), domain, pflags, npages, low, high, alignment, boundary, memattr); if (m == NULL) { VM_OBJECT_WUNLOCK(object); if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { if (!vm_page_reclaim_contig_domain(domain, pflags, npages, low, high, alignment, boundary) && (flags & M_WAITOK) != 0) vm_wait_domain(domain); VM_OBJECT_WLOCK(object); tries++; goto retry; } vmem_free(vmem, addr, size); return (0); } KASSERT(vm_phys_domain(m) == domain, ("kmem_alloc_contig_domain: Domain mismatch %d != %d", vm_phys_domain(m), domain)); end_m = m + npages; tmp = addr; for (; m < end_m; m++) { if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW, VM_PROT_RW | PMAP_ENTER_WIRED, 0); tmp += PAGE_SIZE; } VM_OBJECT_WUNLOCK(object); return (addr); } vm_offset_t kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { + + return (kmem_alloc_contig_domainset(DOMAINSET_RR(), size, flags, low, + high, alignment, boundary, memattr)); +} + +vm_offset_t +kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags, + vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, + vm_memattr_t memattr) +{ struct vm_domainset_iter di; vm_offset_t addr; int domain; - vm_domainset_iter_policy_init(&di, DOMAINSET_RR(), &domain, &flags); + vm_domainset_iter_policy_init(&di, ds, &domain, &flags); do { addr = kmem_alloc_contig_domain(domain, size, flags, low, high, alignment, boundary, memattr); if (addr != 0) break; } while (vm_domainset_iter_policy(&di, &domain) == 0); return (addr); } /* * kmem_suballoc: * * Allocates a map to manage a subrange * of the kernel virtual address space. * * Arguments are as follows: * * parent Map to take range from * min, max Returned endpoints of map * size Size of range to find * superpage_align Request that min is superpage aligned */ vm_map_t kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, vm_size_t size, boolean_t superpage_align) { int ret; vm_map_t result; size = round_page(size); *min = vm_map_min(parent); ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ? VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_ACC_NO_CHARGE); if (ret != KERN_SUCCESS) panic("kmem_suballoc: bad status return of %d", ret); *max = *min + size; result = vm_map_create(vm_map_pmap(parent), *min, *max); if (result == NULL) panic("kmem_suballoc: cannot create submap"); if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) panic("kmem_suballoc: unable to change range to submap"); return (result); } /* - * kmem_malloc: + * kmem_malloc_domain: * * Allocate wired-down pages in the kernel's address space. */ -vm_offset_t +static vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags) { vmem_t *arena; vm_offset_t addr; int rv; #if VM_NRESERVLEVEL > 0 if (__predict_true((flags & M_EXEC) == 0)) arena = vm_dom[domain].vmd_kernel_arena; else arena = vm_dom[domain].vmd_kernel_rwx_arena; #else arena = vm_dom[domain].vmd_kernel_arena; #endif size = round_page(size); if (vmem_alloc(arena, size, flags | M_BESTFIT, &addr)) return (0); rv = kmem_back_domain(domain, kernel_object, addr, size, flags); if (rv != KERN_SUCCESS) { vmem_free(arena, addr, size); return (0); } return (addr); } vm_offset_t kmem_malloc(vm_size_t size, int flags) { + + return (kmem_malloc_domainset(DOMAINSET_RR(), size, flags)); +} + +vm_offset_t +kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags) +{ struct vm_domainset_iter di; vm_offset_t addr; int domain; - vm_domainset_iter_policy_init(&di, DOMAINSET_RR(), &domain, &flags); + vm_domainset_iter_policy_init(&di, ds, &domain, &flags); do { addr = kmem_malloc_domain(domain, size, flags); if (addr != 0) break; } while (vm_domainset_iter_policy(&di, &domain) == 0); return (addr); } /* * kmem_back_domain: * * Allocate physical pages from the specified domain for the specified * virtual address range. */ int kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) { vm_offset_t offset, i; vm_page_t m, mpred; vm_prot_t prot; int pflags; KASSERT(object == kernel_object, ("kmem_back_domain: only supports kernel object.")); offset = addr - VM_MIN_KERNEL_ADDRESS; pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); if (flags & M_WAITOK) pflags |= VM_ALLOC_WAITFAIL; prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW; i = 0; VM_OBJECT_WLOCK(object); retry: mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i)); for (; i < size; i += PAGE_SIZE, mpred = m) { m = vm_page_alloc_domain_after(object, atop(offset + i), domain, pflags, mpred); /* * Ran out of space, free everything up and return. Don't need * to lock page queues here as we know that the pages we got * aren't on any queues. */ if (m == NULL) { if ((flags & M_NOWAIT) == 0) goto retry; VM_OBJECT_WUNLOCK(object); kmem_unback(object, addr, i); return (KERN_NO_SPACE); } KASSERT(vm_phys_domain(m) == domain, ("kmem_back_domain: Domain mismatch %d != %d", vm_phys_domain(m), domain)); if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("kmem_malloc: page %p is managed", m)); m->valid = VM_PAGE_BITS_ALL; pmap_enter(kernel_pmap, addr + i, m, prot, prot | PMAP_ENTER_WIRED, 0); #if VM_NRESERVLEVEL > 0 if (__predict_false((prot & VM_PROT_EXECUTE) != 0)) m->oflags |= VPO_KMEM_EXEC; #endif } VM_OBJECT_WUNLOCK(object); return (KERN_SUCCESS); } /* * kmem_back: * * Allocate physical pages for the specified virtual address range. */ int kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) { vm_offset_t end, next, start; int domain, rv; KASSERT(object == kernel_object, ("kmem_back: only supports kernel object.")); for (start = addr, end = addr + size; addr < end; addr = next) { /* * We must ensure that pages backing a given large virtual page * all come from the same physical domain. */ if (vm_ndomains > 1) { domain = (addr >> KVA_QUANTUM_SHIFT) % vm_ndomains; while (VM_DOMAIN_EMPTY(domain)) domain++; next = roundup2(addr + 1, KVA_QUANTUM); if (next > end || next < start) next = end; } else { domain = 0; next = end; } rv = kmem_back_domain(domain, object, addr, next - addr, flags); if (rv != KERN_SUCCESS) { kmem_unback(object, start, addr - start); break; } } return (rv); } /* * kmem_unback: * * Unmap and free the physical pages underlying the specified virtual * address range. * * A physical page must exist within the specified object at each index * that is being unmapped. */ static struct vmem * _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) { struct vmem *arena; vm_page_t m, next; vm_offset_t end, offset; int domain; KASSERT(object == kernel_object, ("kmem_unback: only supports kernel object.")); if (size == 0) return (NULL); pmap_remove(kernel_pmap, addr, addr + size); offset = addr - VM_MIN_KERNEL_ADDRESS; end = offset + size; VM_OBJECT_WLOCK(object); m = vm_page_lookup(object, atop(offset)); domain = vm_phys_domain(m); #if VM_NRESERVLEVEL > 0 if (__predict_true((m->oflags & VPO_KMEM_EXEC) == 0)) arena = vm_dom[domain].vmd_kernel_arena; else arena = vm_dom[domain].vmd_kernel_rwx_arena; #else arena = vm_dom[domain].vmd_kernel_arena; #endif for (; offset < end; offset += PAGE_SIZE, m = next) { next = vm_page_next(m); vm_page_unwire(m, PQ_NONE); vm_page_free(m); } VM_OBJECT_WUNLOCK(object); return (arena); } void kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) { (void)_kmem_unback(object, addr, size); } /* * kmem_free: * * Free memory allocated with kmem_malloc. The size must match the * original allocation. */ void kmem_free(vm_offset_t addr, vm_size_t size) { struct vmem *arena; size = round_page(size); arena = _kmem_unback(kernel_object, addr, size); if (arena != NULL) vmem_free(arena, addr, size); } /* * kmap_alloc_wait: * * Allocates pageable memory from a sub-map of the kernel. If the submap * has no room, the caller sleeps waiting for more memory in the submap. * * This routine may block. */ vm_offset_t kmap_alloc_wait(vm_map_t map, vm_size_t size) { vm_offset_t addr; size = round_page(size); if (!swap_reserve(size)) return (0); for (;;) { /* * To make this work for more than one map, use the map's lock * to lock out sleepers/wakers. */ vm_map_lock(map); if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) break; /* no space now; see if we can ever get space */ if (vm_map_max(map) - vm_map_min(map) < size) { vm_map_unlock(map); swap_release(size); return (0); } map->needs_wakeup = TRUE; vm_map_unlock_and_wait(map, 0); } vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, MAP_ACC_CHARGED); vm_map_unlock(map); return (addr); } /* * kmap_free_wakeup: * * Returns memory to a submap of the kernel, and wakes up any processes * waiting for memory in that map. */ void kmap_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) { vm_map_lock(map); (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); if (map->needs_wakeup) { map->needs_wakeup = FALSE; vm_map_wakeup(map); } vm_map_unlock(map); } void kmem_init_zero_region(void) { vm_offset_t addr, i; vm_page_t m; /* * Map a single physical page of zeros to a larger virtual range. * This requires less looping in places that want large amounts of * zeros, while not using much more physical resources. */ addr = kva_alloc(ZERO_REGION_SIZE); m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE) pmap_qenter(addr + i, &m, 1); pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ); zero_region = (const void *)addr; } /* * Import KVA from the kernel map into the kernel arena. */ static int kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp) { vm_offset_t addr; int result; KASSERT((size % KVA_QUANTUM) == 0, ("kva_import: Size %jd is not a multiple of %d", (intmax_t)size, (int)KVA_QUANTUM)); addr = vm_map_min(kernel_map); result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0, VMFS_SUPER_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); if (result != KERN_SUCCESS) return (ENOMEM); *addrp = addr; return (0); } /* * Import KVA from a parent arena into a per-domain arena. Imports must be * KVA_QUANTUM-aligned and a multiple of KVA_QUANTUM in size. */ static int kva_import_domain(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp) { KASSERT((size % KVA_QUANTUM) == 0, ("kva_import_domain: Size %jd is not a multiple of %d", (intmax_t)size, (int)KVA_QUANTUM)); return (vmem_xalloc(arena, size, KVA_QUANTUM, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, addrp)); } /* * kmem_init: * * Create the kernel map; insert a mapping covering kernel text, * data, bss, and all space allocated thus far (`boostrap' data). The * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and * `start' as allocated, and the range between `start' and `end' as free. * Create the kernel vmem arena and its per-domain children. */ void kmem_init(vm_offset_t start, vm_offset_t end) { vm_map_t m; int domain; m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); m->system_map = 1; vm_map_lock(m); /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ kernel_map = m; (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, #ifdef __amd64__ KERNBASE, #else VM_MIN_KERNEL_ADDRESS, #endif start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); /* ... and ending with the completion of the above `insert' */ vm_map_unlock(m); /* * Initialize the kernel_arena. This can grow on demand. */ vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0); vmem_set_import(kernel_arena, kva_import, NULL, NULL, KVA_QUANTUM); for (domain = 0; domain < vm_ndomains; domain++) { /* * Initialize the per-domain arenas. These are used to color * the KVA space in a way that ensures that virtual large pages * are backed by memory from the same physical domain, * maximizing the potential for superpage promotion. */ vm_dom[domain].vmd_kernel_arena = vmem_create( "kernel arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK); vmem_set_import(vm_dom[domain].vmd_kernel_arena, kva_import_domain, NULL, kernel_arena, KVA_QUANTUM); /* * In architectures with superpages, maintain separate arenas * for allocations with permissions that differ from the * "standard" read/write permissions used for kernel memory, * so as not to inhibit superpage promotion. */ #if VM_NRESERVLEVEL > 0 vm_dom[domain].vmd_kernel_rwx_arena = vmem_create( "kernel rwx arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK); vmem_set_import(vm_dom[domain].vmd_kernel_rwx_arena, kva_import_domain, (vmem_release_t *)vmem_xfree, kernel_arena, KVA_QUANTUM); #endif } } /* * kmem_bootstrap_free: * * Free pages backing preloaded data (e.g., kernel modules) to the * system. Currently only supported on platforms that create a * vm_phys segment for preloaded data. */ void kmem_bootstrap_free(vm_offset_t start, vm_size_t size) { #if defined(__i386__) || defined(__amd64__) struct vm_domain *vmd; vm_offset_t end, va; vm_paddr_t pa; vm_page_t m; end = trunc_page(start + size); start = round_page(start); for (va = start; va < end; va += PAGE_SIZE) { pa = pmap_kextract(va); m = PHYS_TO_VM_PAGE(pa); vmd = vm_pagequeue_domain(m); vm_domain_free_lock(vmd); vm_phys_free_pages(m, 0); vm_domain_free_unlock(vmd); vm_domain_freecnt_inc(vmd, 1); vm_cnt.v_page_count++; } pmap_remove(kernel_pmap, start, end); (void)vmem_add(kernel_arena, start, end - start, M_WAITOK); #endif } #ifdef DIAGNOSTIC /* * Allow userspace to directly trigger the VM drain routine for testing * purposes. */ static int debug_vm_lowmem(SYSCTL_HANDLER_ARGS) { int error, i; i = 0; error = sysctl_handle_int(oidp, &i, 0, req); if (error) return (error); if ((i & ~(VM_LOW_KMEM | VM_LOW_PAGES)) != 0) return (EINVAL); if (i != 0) EVENTHANDLER_INVOKE(vm_lowmem, i); return (0); } SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0, debug_vm_lowmem, "I", "set to trigger vm_lowmem event with given flags"); #endif Index: head/sys/x86/iommu/busdma_dmar.c =================================================================== --- head/sys/x86/iommu/busdma_dmar.c (revision 339926) +++ head/sys/x86/iommu/busdma_dmar.c (revision 339927) @@ -1,925 +1,926 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * busdma_dmar.c, the implementation of the busdma(9) interface using * DMAR units from Intel VT-d. */ static bool dmar_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func) { char str[128], *env; int default_bounce; bool ret; static const char bounce_str[] = "bounce"; static const char dmar_str[] = "dmar"; default_bounce = 0; env = kern_getenv("hw.busdma.default"); if (env != NULL) { if (strcmp(env, bounce_str) == 0) default_bounce = 1; else if (strcmp(env, dmar_str) == 0) default_bounce = 0; freeenv(env); } snprintf(str, sizeof(str), "hw.busdma.pci%d.%d.%d.%d", domain, bus, slot, func); env = kern_getenv(str); if (env == NULL) return (default_bounce != 0); if (strcmp(env, bounce_str) == 0) ret = true; else if (strcmp(env, dmar_str) == 0) ret = false; else ret = default_bounce != 0; freeenv(env); return (ret); } /* * Given original device, find the requester ID that will be seen by * the DMAR unit and used for page table lookup. PCI bridges may take * ownership of transactions from downstream devices, so it may not be * the same as the BSF of the target device. In those cases, all * devices downstream of the bridge must share a single mapping * domain, and must collectively be assigned to use either DMAR or * bounce mapping. */ device_t dmar_get_requester(device_t dev, uint16_t *rid) { devclass_t pci_class; device_t l, pci, pcib, pcip, pcibp, requester; int cap_offset; uint16_t pcie_flags; bool bridge_is_pcie; pci_class = devclass_find("pci"); l = requester = dev; *rid = pci_get_rid(dev); /* * Walk the bridge hierarchy from the target device to the * host port to find the translating bridge nearest the DMAR * unit. */ for (;;) { pci = device_get_parent(l); KASSERT(pci != NULL, ("dmar_get_requester(%s): NULL parent " "for %s", device_get_name(dev), device_get_name(l))); KASSERT(device_get_devclass(pci) == pci_class, ("dmar_get_requester(%s): non-pci parent %s for %s", device_get_name(dev), device_get_name(pci), device_get_name(l))); pcib = device_get_parent(pci); KASSERT(pcib != NULL, ("dmar_get_requester(%s): NULL bridge " "for %s", device_get_name(dev), device_get_name(pci))); /* * The parent of our "bridge" isn't another PCI bus, * so pcib isn't a PCI->PCI bridge but rather a host * port, and the requester ID won't be translated * further. */ pcip = device_get_parent(pcib); if (device_get_devclass(pcip) != pci_class) break; pcibp = device_get_parent(pcip); if (pci_find_cap(l, PCIY_EXPRESS, &cap_offset) == 0) { /* * Do not stop the loop even if the target * device is PCIe, because it is possible (but * unlikely) to have a PCI->PCIe bridge * somewhere in the hierarchy. */ l = pcib; } else { /* * Device is not PCIe, it cannot be seen as a * requester by DMAR unit. Check whether the * bridge is PCIe. */ bridge_is_pcie = pci_find_cap(pcib, PCIY_EXPRESS, &cap_offset) == 0; requester = pcib; /* * Check for a buggy PCIe/PCI bridge that * doesn't report the express capability. If * the bridge above it is express but isn't a * PCI bridge, then we know pcib is actually a * PCIe/PCI bridge. */ if (!bridge_is_pcie && pci_find_cap(pcibp, PCIY_EXPRESS, &cap_offset) == 0) { pcie_flags = pci_read_config(pcibp, cap_offset + PCIER_FLAGS, 2); if ((pcie_flags & PCIEM_FLAGS_TYPE) != PCIEM_TYPE_PCI_BRIDGE) bridge_is_pcie = true; } if (bridge_is_pcie) { /* * The current device is not PCIe, but * the bridge above it is. This is a * PCIe->PCI bridge. Assume that the * requester ID will be the secondary * bus number with slot and function * set to zero. * * XXX: Doesn't handle the case where * the bridge is PCIe->PCI-X, and the * bridge will only take ownership of * requests in some cases. We should * provide context entries with the * same page tables for taken and * non-taken transactions. */ *rid = PCI_RID(pci_get_bus(l), 0, 0); l = pcibp; } else { /* * Neither the device nor the bridge * above it are PCIe. This is a * conventional PCI->PCI bridge, which * will use the bridge's BSF as the * requester ID. */ *rid = pci_get_rid(pcib); l = pcib; } } } return (requester); } struct dmar_ctx * dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr) { device_t requester; struct dmar_ctx *ctx; bool disabled; uint16_t rid; requester = dmar_get_requester(dev, &rid); /* * If the user requested the IOMMU disabled for the device, we * cannot disable the DMAR, due to possibility of other * devices on the same DMAR still requiring translation. * Instead provide the identity mapping for the device * context. */ disabled = dmar_bus_dma_is_dev_disabled(pci_get_domain(requester), pci_get_bus(requester), pci_get_slot(requester), pci_get_function(requester)); ctx = dmar_get_ctx_for_dev(dmar, requester, rid, disabled, rmrr); if (ctx == NULL) return (NULL); if (disabled) { /* * Keep the first reference on context, release the * later refs. */ DMAR_LOCK(dmar); if ((ctx->flags & DMAR_CTX_DISABLED) == 0) { ctx->flags |= DMAR_CTX_DISABLED; DMAR_UNLOCK(dmar); } else { dmar_free_ctx_locked(dmar, ctx); } ctx = NULL; } return (ctx); } bus_dma_tag_t dmar_get_dma_tag(device_t dev, device_t child) { struct dmar_unit *dmar; struct dmar_ctx *ctx; bus_dma_tag_t res; dmar = dmar_find(child); /* Not in scope of any DMAR ? */ if (dmar == NULL) return (NULL); if (!dmar->dma_enabled) return (NULL); dmar_quirks_pre_use(dmar); dmar_instantiate_rmrr_ctxs(dmar); ctx = dmar_instantiate_ctx(dmar, child, false); res = ctx == NULL ? NULL : (bus_dma_tag_t)&ctx->ctx_tag; return (res); } static MALLOC_DEFINE(M_DMAR_DMAMAP, "dmar_dmamap", "Intel DMAR DMA Map"); static void dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map); static int dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) { struct bus_dma_tag_dmar *newtag, *oldtag; int error; *dmat = NULL; error = common_bus_dma_tag_create(parent != NULL ? &((struct bus_dma_tag_dmar *)parent)->common : NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg, maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, sizeof(struct bus_dma_tag_dmar), (void **)&newtag); if (error != 0) goto out; oldtag = (struct bus_dma_tag_dmar *)parent; newtag->common.impl = &bus_dma_dmar_impl; newtag->ctx = oldtag->ctx; newtag->owner = oldtag->owner; *dmat = (bus_dma_tag_t)newtag; out: CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), error); return (error); } static int dmar_bus_dma_tag_set_domain(bus_dma_tag_t dmat) { return (0); } static int dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1) { struct bus_dma_tag_dmar *dmat, *dmat_copy, *parent; int error; error = 0; dmat_copy = dmat = (struct bus_dma_tag_dmar *)dmat1; if (dmat != NULL) { if (dmat->map_count != 0) { error = EBUSY; goto out; } while (dmat != NULL) { parent = (struct bus_dma_tag_dmar *)dmat->common.parent; if (atomic_fetchadd_int(&dmat->common.ref_count, -1) == 1) { if (dmat == &dmat->ctx->ctx_tag) dmar_free_ctx(dmat->ctx); free_domain(dmat->segments, M_DMAR_DMAMAP); free(dmat, M_DEVBUF); dmat = parent; } else dmat = NULL; } } out: CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); return (error); } static int dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "%s", __func__); tag = (struct bus_dma_tag_dmar *)dmat; - map = malloc_domain(sizeof(*map), M_DMAR_DMAMAP, - tag->common.domain, M_NOWAIT | M_ZERO); + map = malloc_domainset(sizeof(*map), M_DMAR_DMAMAP, + DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO); if (map == NULL) { *mapp = NULL; return (ENOMEM); } if (tag->segments == NULL) { - tag->segments = malloc_domain(sizeof(bus_dma_segment_t) * + tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) * tag->common.nsegments, M_DMAR_DMAMAP, - tag->common.domain, M_NOWAIT); + DOMAINSET_PREF(tag->common.domain), M_NOWAIT); if (tag->segments == NULL) { free_domain(map, M_DMAR_DMAMAP); *mapp = NULL; return (ENOMEM); } } TAILQ_INIT(&map->map_entries); map->tag = tag; map->locked = true; map->cansleep = false; tag->map_count++; *mapp = (bus_dmamap_t)map; return (0); } static int dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; struct dmar_domain *domain; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; if (map != NULL) { domain = tag->ctx->domain; DMAR_DOMAIN_LOCK(domain); if (!TAILQ_EMPTY(&map->map_entries)) { DMAR_DOMAIN_UNLOCK(domain); return (EBUSY); } DMAR_DOMAIN_UNLOCK(domain); free_domain(map, M_DMAR_DMAMAP); } tag->map_count--; return (0); } static int dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; int error, mflags; vm_memattr_t attr; error = dmar_bus_dmamap_create(dmat, flags, mapp); if (error != 0) return (error); mflags = (flags & BUS_DMA_NOWAIT) != 0 ? M_NOWAIT : M_WAITOK; mflags |= (flags & BUS_DMA_ZERO) != 0 ? M_ZERO : 0; attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE : VM_MEMATTR_DEFAULT; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)*mapp; if (tag->common.maxsize < PAGE_SIZE && tag->common.alignment <= tag->common.maxsize && attr == VM_MEMATTR_DEFAULT) { - *vaddr = malloc_domain(tag->common.maxsize, M_DEVBUF, - tag->common.domain, mflags); + *vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF, + DOMAINSET_PREF(tag->common.domain), mflags); map->flags |= BUS_DMAMAP_DMAR_MALLOC; } else { - *vaddr = (void *)kmem_alloc_attr_domain(tag->common.domain, - tag->common.maxsize, mflags, 0ul, BUS_SPACE_MAXADDR, - attr); + *vaddr = (void *)kmem_alloc_attr_domainset( + DOMAINSET_PREF(tag->common.domain), tag->common.maxsize, + mflags, 0ul, BUS_SPACE_MAXADDR, attr); map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC; } if (*vaddr == NULL) { dmar_bus_dmamap_destroy(dmat, *mapp); *mapp = NULL; return (ENOMEM); } return (0); } static void dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; if ((map->flags & BUS_DMAMAP_DMAR_MALLOC) != 0) { free_domain(vaddr, M_DEVBUF); map->flags &= ~BUS_DMAMAP_DMAR_MALLOC; } else { KASSERT((map->flags & BUS_DMAMAP_DMAR_KMEM_ALLOC) != 0, ("dmar_bus_dmamem_free for non alloced map %p", map)); kmem_free((vm_offset_t)vaddr, tag->common.maxsize); map->flags &= ~BUS_DMAMAP_DMAR_KMEM_ALLOC; } dmar_bus_dmamap_destroy(dmat, map1); } static int dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag, struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp, struct dmar_map_entries_tailq *unroll_list) { struct dmar_ctx *ctx; struct dmar_domain *domain; struct dmar_map_entry *entry; dmar_gaddr_t size; bus_size_t buflen1; int error, idx, gas_flags, seg; KASSERT(offset < DMAR_PAGE_SIZE, ("offset %d", offset)); if (segs == NULL) segs = tag->segments; ctx = tag->ctx; domain = ctx->domain; seg = *segp; error = 0; idx = 0; while (buflen > 0) { seg++; if (seg >= tag->common.nsegments) { error = EFBIG; break; } buflen1 = buflen > tag->common.maxsegsz ? tag->common.maxsegsz : buflen; size = round_page(offset + buflen1); /* * (Too) optimistically allow split if there are more * then one segments left. */ gas_flags = map->cansleep ? DMAR_GM_CANWAIT : 0; if (seg + 1 < tag->common.nsegments) gas_flags |= DMAR_GM_CANSPLIT; error = dmar_gas_map(domain, &tag->common, size, offset, DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE, gas_flags, ma + idx, &entry); if (error != 0) break; if ((gas_flags & DMAR_GM_CANSPLIT) != 0) { KASSERT(size >= entry->end - entry->start, ("split increased entry size %jx %jx %jx", (uintmax_t)size, (uintmax_t)entry->start, (uintmax_t)entry->end)); size = entry->end - entry->start; if (buflen1 > size) buflen1 = size; } else { KASSERT(entry->end - entry->start == size, ("no split allowed %jx %jx %jx", (uintmax_t)size, (uintmax_t)entry->start, (uintmax_t)entry->end)); } if (offset + buflen1 > size) buflen1 = size - offset; if (buflen1 > tag->common.maxsegsz) buflen1 = tag->common.maxsegsz; KASSERT(((entry->start + offset) & (tag->common.alignment - 1)) == 0, ("alignment failed: ctx %p start 0x%jx offset %x " "align 0x%jx", ctx, (uintmax_t)entry->start, offset, (uintmax_t)tag->common.alignment)); KASSERT(entry->end <= tag->common.lowaddr || entry->start >= tag->common.highaddr, ("entry placement failed: ctx %p start 0x%jx end 0x%jx " "lowaddr 0x%jx highaddr 0x%jx", ctx, (uintmax_t)entry->start, (uintmax_t)entry->end, (uintmax_t)tag->common.lowaddr, (uintmax_t)tag->common.highaddr)); KASSERT(dmar_test_boundary(entry->start + offset, buflen1, tag->common.boundary), ("boundary failed: ctx %p start 0x%jx end 0x%jx " "boundary 0x%jx", ctx, (uintmax_t)entry->start, (uintmax_t)entry->end, (uintmax_t)tag->common.boundary)); KASSERT(buflen1 <= tag->common.maxsegsz, ("segment too large: ctx %p start 0x%jx end 0x%jx " "buflen1 0x%jx maxsegsz 0x%jx", ctx, (uintmax_t)entry->start, (uintmax_t)entry->end, (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz)); DMAR_DOMAIN_LOCK(domain); TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); entry->flags |= DMAR_MAP_ENTRY_MAP; DMAR_DOMAIN_UNLOCK(domain); TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link); segs[seg].ds_addr = entry->start + offset; segs[seg].ds_len = buflen1; idx += OFF_TO_IDX(trunc_page(offset + buflen1)); offset += buflen1; offset &= DMAR_PAGE_MASK; buflen -= buflen1; } if (error == 0) *segp = seg; return (error); } static int dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag, struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { struct dmar_ctx *ctx; struct dmar_domain *domain; struct dmar_map_entry *entry, *entry1; struct dmar_map_entries_tailq unroll_list; int error; ctx = tag->ctx; domain = ctx->domain; atomic_add_long(&ctx->loads, 1); TAILQ_INIT(&unroll_list); error = dmar_bus_dmamap_load_something1(tag, map, ma, offset, buflen, flags, segs, segp, &unroll_list); if (error != 0) { /* * The busdma interface does not allow us to report * partial buffer load, so unfortunately we have to * revert all work done. */ DMAR_DOMAIN_LOCK(domain); TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link, entry1) { /* * No entries other than what we have created * during the failed run might have been * inserted there in between, since we own ctx * pglock. */ TAILQ_REMOVE(&map->map_entries, entry, dmamap_link); TAILQ_REMOVE(&unroll_list, entry, unroll_link); TAILQ_INSERT_TAIL(&domain->unload_entries, entry, dmamap_link); } DMAR_DOMAIN_UNLOCK(domain); taskqueue_enqueue(domain->dmar->delayed_taskqueue, &domain->unload_task); } if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 && !map->cansleep) error = EINPROGRESS; if (error == EINPROGRESS) dmar_bus_schedule_dmamap(domain->dmar, map); return (error); } static int dmar_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; return (dmar_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen, flags, segs, segp)); } static int dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; vm_page_t *ma; vm_paddr_t pstart, pend; int error, i, ma_cnt, offset; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; pstart = trunc_page(buf); pend = round_page(buf + buflen); offset = buf & PAGE_MASK; ma_cnt = OFF_TO_IDX(pend - pstart); ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, map->cansleep ? M_WAITOK : M_NOWAIT); if (ma == NULL) return (ENOMEM); for (i = 0; i < ma_cnt; i++) ma[i] = PHYS_TO_VM_PAGE(pstart + i * PAGE_SIZE); error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen, flags, segs, segp); free(ma, M_DEVBUF); return (error); } static int dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf, bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; vm_page_t *ma, fma; vm_paddr_t pstart, pend, paddr; int error, i, ma_cnt, offset; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; pstart = trunc_page((vm_offset_t)buf); pend = round_page((vm_offset_t)buf + buflen); offset = (vm_offset_t)buf & PAGE_MASK; ma_cnt = OFF_TO_IDX(pend - pstart); ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, map->cansleep ? M_WAITOK : M_NOWAIT); if (ma == NULL) return (ENOMEM); if (dumping) { /* * If dumping, do not attempt to call * PHYS_TO_VM_PAGE() at all. It may return non-NULL * but the vm_page returned might be not initialized, * e.g. for the kernel itself. */ KASSERT(pmap == kernel_pmap, ("non-kernel address write")); fma = malloc(sizeof(struct vm_page) * ma_cnt, M_DEVBUF, M_ZERO | (map->cansleep ? M_WAITOK : M_NOWAIT)); if (fma == NULL) { free(ma, M_DEVBUF); return (ENOMEM); } for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) { paddr = pmap_kextract(pstart); vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT); ma[i] = &fma[i]; } } else { fma = NULL; for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) { if (pmap == kernel_pmap) paddr = pmap_kextract(pstart); else paddr = pmap_extract(pmap, pstart); ma[i] = PHYS_TO_VM_PAGE(paddr); KASSERT(VM_PAGE_TO_PHYS(ma[i]) == paddr, ("PHYS_TO_VM_PAGE failed %jx %jx m %p", (uintmax_t)paddr, (uintmax_t)VM_PAGE_TO_PHYS(ma[i]), ma[i])); } } error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen, flags, segs, segp); free(ma, M_DEVBUF); free(fma, M_DEVBUF); return (error); } static void dmar_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) { struct bus_dmamap_dmar *map; if (map1 == NULL) return; map = (struct bus_dmamap_dmar *)map1; map->mem = *mem; map->tag = (struct bus_dma_tag_dmar *)dmat; map->callback = callback; map->callback_arg = callback_arg; } static bus_dma_segment_t * dmar_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1, bus_dma_segment_t *segs, int nsegs, int error) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; if (!map->locked) { KASSERT(map->cansleep, ("map not locked and not sleepable context %p", map)); /* * We are called from the delayed context. Relock the * driver. */ (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK); map->locked = true; } if (segs == NULL) segs = tag->segments; return (segs); } /* * The limitations of busdma KPI forces the dmar to perform the actual * unload, consisting of the unmapping of the map entries page tables, * from the delayed context on i386, since page table page mapping * might require a sleep to be successfull. The unfortunate * consequence is that the DMA requests can be served some time after * the bus_dmamap_unload() call returned. * * On amd64, we assume that sf allocation cannot fail. */ static void dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; struct dmar_ctx *ctx; struct dmar_domain *domain; #if defined(__amd64__) struct dmar_map_entries_tailq entries; #endif tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; ctx = tag->ctx; domain = ctx->domain; atomic_add_long(&ctx->unloads, 1); #if defined(__i386__) DMAR_DOMAIN_LOCK(domain); TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link); DMAR_DOMAIN_UNLOCK(domain); taskqueue_enqueue(domain->dmar->delayed_taskqueue, &domain->unload_task); #else /* defined(__amd64__) */ TAILQ_INIT(&entries); DMAR_DOMAIN_LOCK(domain); TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link); DMAR_DOMAIN_UNLOCK(domain); THREAD_NO_SLEEPING(); dmar_domain_unload(domain, &entries, false); THREAD_SLEEPING_OK(); KASSERT(TAILQ_EMPTY(&entries), ("lazy dmar_ctx_unload %p", ctx)); #endif } static void dmar_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { } struct bus_dma_impl bus_dma_dmar_impl = { .tag_create = dmar_bus_dma_tag_create, .tag_destroy = dmar_bus_dma_tag_destroy, .tag_set_domain = dmar_bus_dma_tag_set_domain, .map_create = dmar_bus_dmamap_create, .map_destroy = dmar_bus_dmamap_destroy, .mem_alloc = dmar_bus_dmamem_alloc, .mem_free = dmar_bus_dmamem_free, .load_phys = dmar_bus_dmamap_load_phys, .load_buffer = dmar_bus_dmamap_load_buffer, .load_ma = dmar_bus_dmamap_load_ma, .map_waitok = dmar_bus_dmamap_waitok, .map_complete = dmar_bus_dmamap_complete, .map_unload = dmar_bus_dmamap_unload, .map_sync = dmar_bus_dmamap_sync, }; static void dmar_bus_task_dmamap(void *arg, int pending) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; struct dmar_unit *unit; unit = arg; DMAR_LOCK(unit); while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) { TAILQ_REMOVE(&unit->delayed_maps, map, delay_link); DMAR_UNLOCK(unit); tag = map->tag; map->cansleep = true; map->locked = false; bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map, &map->mem, map->callback, map->callback_arg, BUS_DMA_WAITOK); map->cansleep = false; if (map->locked) { (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_UNLOCK); } else map->locked = true; map->cansleep = false; DMAR_LOCK(unit); } DMAR_UNLOCK(unit); } static void dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map) { map->locked = false; DMAR_LOCK(unit); TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link); DMAR_UNLOCK(unit); taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task); } int dmar_init_busdma(struct dmar_unit *unit) { unit->dma_enabled = 1; TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled); TAILQ_INIT(&unit->delayed_maps); TASK_INIT(&unit->dmamap_load_task, 0, dmar_bus_task_dmamap, unit); unit->delayed_taskqueue = taskqueue_create("dmar", M_WAITOK, taskqueue_thread_enqueue, &unit->delayed_taskqueue); taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK, "dmar%d busdma taskq", unit->unit); return (0); } void dmar_fini_busdma(struct dmar_unit *unit) { if (unit->delayed_taskqueue == NULL) return; taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task); taskqueue_free(unit->delayed_taskqueue); unit->delayed_taskqueue = NULL; } Index: head/sys/x86/x86/busdma_bounce.c =================================================================== --- head/sys/x86/x86/busdma_bounce.c (revision 339926) +++ head/sys/x86/x86/busdma_bounce.c (revision 339927) @@ -1,1315 +1,1317 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __i386__ #define MAX_BPAGES 512 #else #define MAX_BPAGES 8192 #endif enum { BUS_DMA_COULD_BOUNCE = 0x01, BUS_DMA_MIN_ALLOC_COMP = 0x02, BUS_DMA_KMEM_ALLOC = 0x04, }; struct bounce_zone; struct bus_dma_tag { struct bus_dma_tag_common common; int map_count; int bounce_flags; bus_dma_segment_t *segments; struct bounce_zone *bounce_zone; }; struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ vm_offset_t dataoffs; /* page offset of client data */ vm_page_t datapage[2]; /* physical page(s) of client data */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; int busdma_swi_pending; struct bounce_zone { STAILQ_ENTRY(bounce_zone) links; STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; int total_bpages; int free_bpages; int reserved_bpages; int active_bpages; int total_bounced; int total_deferred; int map_count; int domain; bus_size_t alignment; bus_addr_t lowaddr; char zoneid[8]; char lowaddrid[20]; struct sysctl_ctx_list sysctl_tree; struct sysctl_oid *sysctl_tree_top; }; static struct mtx bounce_lock; static int total_bpages; static int busdma_zonecount; static STAILQ_HEAD(, bounce_zone) bounce_zone_list; static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, "Total bounce pages"); struct bus_dmamap { struct bp_list bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; struct memdesc mem; bus_dmamap_callback_t *callback; void *callback_arg; STAILQ_ENTRY(bus_dmamap) links; }; static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; static struct bus_dmamap nobounce_dmamap; static void init_bounce_pages(void *dummy); static int alloc_bounce_zone(bus_dma_tag_t dmat); static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_addr_t addr1, bus_addr_t addr2, bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags); static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags); static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags); static int bounce_bus_dma_zone_setup(bus_dma_tag_t dmat) { struct bounce_zone *bz; int error; /* Must bounce */ if ((error = alloc_bounce_zone(dmat)) != 0) return (error); bz = dmat->bounce_zone; if (ptoa(bz->total_bpages) < dmat->common.maxsize) { int pages; pages = atop(dmat->common.maxsize) - bz->total_bpages; /* Add pages to our bounce pool */ if (alloc_bounce_pages(dmat, pages) < pages) return (ENOMEM); } /* Performed initial allocation */ dmat->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP; return (0); } /* * Allocate a device specific dma_tag. */ static int bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) { bus_dma_tag_t newtag; int error; *dmat = NULL; error = common_bus_dma_tag_create(parent != NULL ? &parent->common : NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg, maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, sizeof (struct bus_dma_tag), (void **)&newtag); if (error != 0) return (error); newtag->common.impl = &bus_dma_bounce_impl; newtag->map_count = 0; newtag->segments = NULL; if (parent != NULL && ((newtag->common.filter != NULL) || ((parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0))) newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE; if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) || newtag->common.alignment > 1) newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE; if (((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && (flags & BUS_DMA_ALLOCNOW) != 0) error = bounce_bus_dma_zone_setup(newtag); else error = 0; if (error != 0) free(newtag, M_DEVBUF); else *dmat = newtag; CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), error); return (error); } /* * Update the domain for the tag. We may need to reallocate the zone and * bounce pages. */ static int bounce_bus_dma_tag_set_domain(bus_dma_tag_t dmat) { KASSERT(dmat->map_count == 0, ("bounce_bus_dma_tag_set_domain: Domain set after use.\n")); if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0 || dmat->bounce_zone == NULL) return (0); dmat->bounce_flags &= ~BUS_DMA_MIN_ALLOC_COMP; return (bounce_bus_dma_zone_setup(dmat)); } static int bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat) { bus_dma_tag_t dmat_copy, parent; int error; error = 0; dmat_copy = dmat; if (dmat != NULL) { if (dmat->map_count != 0) { error = EBUSY; goto out; } while (dmat != NULL) { parent = (bus_dma_tag_t)dmat->common.parent; atomic_subtract_int(&dmat->common.ref_count, 1); if (dmat->common.ref_count == 0) { if (dmat->segments != NULL) free_domain(dmat->segments, M_DEVBUF); free(dmat, M_DEVBUF); /* * Last reference count, so * release our reference * count on our parent. */ dmat = parent; } else dmat = NULL; } } out: CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); return (error); } /* * Allocate a handle for mapping from kva/uva/physical * address space into bus device space. */ static int bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { struct bounce_zone *bz; int error, maxpages, pages; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "%s", __func__); error = 0; if (dmat->segments == NULL) { - dmat->segments = (bus_dma_segment_t *)malloc_domain( + dmat->segments = (bus_dma_segment_t *)malloc_domainset( sizeof(bus_dma_segment_t) * dmat->common.nsegments, - M_DEVBUF, dmat->common.domain, M_NOWAIT); + M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT); if (dmat->segments == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (ENOMEM); } } /* * Bouncing might be required if the driver asks for an active * exclusion region, a data alignment that is stricter than 1, and/or * an active address boundary. */ if (dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) { /* Must bounce */ if (dmat->bounce_zone == NULL) { if ((error = alloc_bounce_zone(dmat)) != 0) return (error); } bz = dmat->bounce_zone; - *mapp = (bus_dmamap_t)malloc_domain(sizeof(**mapp), M_DEVBUF, - dmat->common.domain, M_NOWAIT | M_ZERO); + *mapp = (bus_dmamap_t)malloc_domainset(sizeof(**mapp), M_DEVBUF, + DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO); if (*mapp == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (ENOMEM); } /* Initialize the new map */ STAILQ_INIT(&((*mapp)->bpages)); /* * Attempt to add pages to our pool on a per-instance * basis up to a sane limit. */ if (dmat->common.alignment > 1) maxpages = MAX_BPAGES; else maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->common.lowaddr)); if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { pages = MAX(atop(dmat->common.maxsize), 1); pages = MIN(maxpages - bz->total_bpages, pages); pages = MAX(pages, 1); if (alloc_bounce_pages(dmat, pages) < pages) error = ENOMEM; if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { if (error == 0) { dmat->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP; } } else error = 0; } bz->map_count++; } else { *mapp = NULL; } if (error == 0) dmat->map_count++; CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->common.flags, error); return (error); } /* * Destroy a handle for mapping from kva/uva/physical * address space into bus device space. */ static int bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { if (map != NULL && map != &nobounce_dmamap) { if (STAILQ_FIRST(&map->bpages) != NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY); return (EBUSY); } if (dmat->bounce_zone) dmat->bounce_zone->map_count--; free_domain(map, M_DEVBUF); } dmat->map_count--; CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); return (0); } /* * Allocate a piece of memory that can be efficiently mapped into * bus device space based on the constraints lited in the dma tag. * A dmamap to for use with dmamap_load is also allocated. */ static int bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { vm_memattr_t attr; int mflags; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "%s", __func__); if (flags & BUS_DMA_NOWAIT) mflags = M_NOWAIT; else mflags = M_WAITOK; /* If we succeed, no mapping/bouncing will be required */ *mapp = NULL; if (dmat->segments == NULL) { - dmat->segments = (bus_dma_segment_t *)malloc_domain( + dmat->segments = (bus_dma_segment_t *)malloc_domainset( sizeof(bus_dma_segment_t) * dmat->common.nsegments, - M_DEVBUF, dmat->common.domain, mflags); + M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags); if (dmat->segments == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->common.flags, ENOMEM); return (ENOMEM); } } if (flags & BUS_DMA_ZERO) mflags |= M_ZERO; if (flags & BUS_DMA_NOCACHE) attr = VM_MEMATTR_UNCACHEABLE; else attr = VM_MEMATTR_DEFAULT; /* * Allocate the buffer from the malloc(9) allocator if... * - It's small enough to fit into a single power of two sized bucket. * - The alignment is less than or equal to the maximum size * - The low address requirement is fulfilled. * else allocate non-contiguous pages if... * - The page count that could get allocated doesn't exceed * nsegments also when the maximum segment size is less * than PAGE_SIZE. * - The alignment constraint isn't larger than a page boundary. * - There are no boundary-crossing constraints. * else allocate a block of contiguous pages because one or more of the * constraints is something that only the contig allocator can fulfill. * * NOTE: The (dmat->common.alignment <= dmat->maxsize) check * below is just a quick hack. The exact alignment guarantees * of malloc(9) need to be nailed down, and the code below * should be rewritten to take that into account. * * In the meantime warn the user if malloc gets it wrong. */ if ((dmat->common.maxsize <= PAGE_SIZE) && (dmat->common.alignment <= dmat->common.maxsize) && dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) && attr == VM_MEMATTR_DEFAULT) { - *vaddr = malloc_domain(dmat->common.maxsize, M_DEVBUF, - dmat->common.domain, mflags); + *vaddr = malloc_domainset(dmat->common.maxsize, M_DEVBUF, + DOMAINSET_PREF(dmat->common.domain), mflags); } else if (dmat->common.nsegments >= howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) && dmat->common.alignment <= PAGE_SIZE && (dmat->common.boundary % PAGE_SIZE) == 0) { /* Page-based multi-segment allocations allowed */ - *vaddr = (void *)kmem_alloc_attr_domain(dmat->common.domain, - dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr, - attr); + *vaddr = (void *)kmem_alloc_attr_domainset( + DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize, + mflags, 0ul, dmat->common.lowaddr, attr); dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC; } else { - *vaddr = (void *)kmem_alloc_contig_domain(dmat->common.domain, - dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr, + *vaddr = (void *)kmem_alloc_contig_domainset( + DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize, + mflags, 0ul, dmat->common.lowaddr, dmat->common.alignment != 0 ? dmat->common.alignment : 1ul, dmat->common.boundary, attr); dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC; } if (*vaddr == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->common.flags, ENOMEM); return (ENOMEM); } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) { printf("bus_dmamem_alloc failed to align memory properly.\n"); } CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->common.flags, 0); return (0); } /* * Free a piece of memory and it's allociated dmamap, that was allocated * via bus_dmamem_alloc. Make the same choice for free/contigfree. */ static void bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) { /* * dmamem does not need to be bounced, so the map should be * NULL and the BUS_DMA_KMEM_ALLOC flag cleared if malloc() * was used and set if kmem_alloc_contig() was used. */ if (map != NULL) panic("bus_dmamem_free: Invalid map freed\n"); if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0) free_domain(vaddr, M_DEVBUF); else kmem_free((vm_offset_t)vaddr, dmat->common.maxsize); CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->bounce_flags); } static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags) { bus_addr_t curaddr; bus_size_t sgsize; if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { /* * Count the number of bounce pages * needed in order to complete this transfer */ curaddr = buf; while (buflen != 0) { sgsize = MIN(buflen, dmat->common.maxsegsz); if (bus_dma_run_filter(&dmat->common, curaddr)) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); map->pagesneeded++; } curaddr += sgsize; buflen -= sgsize; } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } } static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags) { vm_offset_t vaddr; vm_offset_t vendaddr; bus_addr_t paddr; bus_size_t sg_len; if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " "alignment= %d", dmat->common.lowaddr, ptoa((vm_paddr_t)Maxmem), dmat->common.boundary, dmat->common.alignment); CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", map, &nobounce_dmamap, map->pagesneeded); /* * Count the number of bounce pages * needed in order to complete this transfer */ vaddr = (vm_offset_t)buf; vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); if (pmap == kernel_pmap) paddr = pmap_kextract(vaddr); else paddr = pmap_extract(pmap, vaddr); if (bus_dma_run_filter(&dmat->common, paddr) != 0) { sg_len = roundup2(sg_len, dmat->common.alignment); map->pagesneeded++; } vaddr += sg_len; } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } } static void _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, int ma_offs, bus_size_t buflen, int flags) { bus_size_t sg_len, max_sgsize; int page_index; vm_paddr_t paddr; if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " "alignment= %d", dmat->common.lowaddr, ptoa((vm_paddr_t)Maxmem), dmat->common.boundary, dmat->common.alignment); CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", map, &nobounce_dmamap, map->pagesneeded); /* * Count the number of bounce pages * needed in order to complete this transfer */ page_index = 0; while (buflen > 0) { paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs; sg_len = PAGE_SIZE - ma_offs; max_sgsize = MIN(buflen, dmat->common.maxsegsz); sg_len = MIN(sg_len, max_sgsize); if (bus_dma_run_filter(&dmat->common, paddr) != 0) { sg_len = roundup2(sg_len, dmat->common.alignment); sg_len = MIN(sg_len, max_sgsize); KASSERT((sg_len & (dmat->common.alignment - 1)) == 0, ("Segment size is not aligned")); map->pagesneeded++; } if (((ma_offs + sg_len) & ~PAGE_MASK) != 0) page_index++; ma_offs = (ma_offs + sg_len) & PAGE_MASK; KASSERT(buflen >= sg_len, ("Segment length overruns original buffer")); buflen -= sg_len; } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } } static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) { /* Reserve Necessary Bounce Pages */ mtx_lock(&bounce_lock); if (flags & BUS_DMA_NOWAIT) { if (reserve_bounce_pages(dmat, map, 0) != 0) { mtx_unlock(&bounce_lock); return (ENOMEM); } } else { if (reserve_bounce_pages(dmat, map, 1) != 0) { /* Queue us for resources */ STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); mtx_unlock(&bounce_lock); return (EINPROGRESS); } } mtx_unlock(&bounce_lock); return (0); } /* * Add a single contiguous physical range to the segment list. */ static int _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) { bus_addr_t baddr, bmask; int seg; /* * Make sure we don't cross any boundaries. */ bmask = ~(dmat->common.boundary - 1); if (dmat->common.boundary > 0) { baddr = (curaddr + dmat->common.boundary) & bmask; if (sgsize > (baddr - curaddr)) sgsize = (baddr - curaddr); } /* * Insert chunk into a segment, coalescing with * previous segment if possible. */ seg = *segp; if (seg == -1) { seg = 0; segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } else { if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz && (dmat->common.boundary == 0 || (segs[seg].ds_addr & bmask) == (curaddr & bmask))) segs[seg].ds_len += sgsize; else { if (++seg >= dmat->common.nsegments) return (0); segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } } *segp = seg; return (sgsize); } /* * Utility function to load a physical buffer. segp contains * the starting segment on entrace, and the ending segment on exit. */ static int bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { bus_size_t sgsize; bus_addr_t curaddr; int error; if (map == NULL) map = &nobounce_dmamap; if (segs == NULL) segs = dmat->segments; if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } while (buflen > 0) { curaddr = buf; sgsize = MIN(buflen, dmat->common.maxsegsz); if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && map->pagesneeded != 0 && bus_dma_run_filter(&dmat->common, curaddr)) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); curaddr = add_bounce_page(dmat, map, 0, curaddr, 0, sgsize); } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, segp); if (sgsize == 0) break; buf += sgsize; buflen -= sgsize; } /* * Did we fit? */ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } /* * Utility function to load a linear buffer. segp contains * the starting segment on entrace, and the ending segment on exit. */ static int bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp) { bus_size_t sgsize, max_sgsize; bus_addr_t curaddr; vm_offset_t kvaddr, vaddr; int error; if (map == NULL) map = &nobounce_dmamap; if (segs == NULL) segs = dmat->segments; if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } vaddr = (vm_offset_t)buf; while (buflen > 0) { /* * Get the physical address for this segment. */ if (pmap == kernel_pmap) { curaddr = pmap_kextract(vaddr); kvaddr = vaddr; } else { curaddr = pmap_extract(pmap, vaddr); kvaddr = 0; } /* * Compute the segment size, and adjust counts. */ max_sgsize = MIN(buflen, dmat->common.maxsegsz); sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && map->pagesneeded != 0 && bus_dma_run_filter(&dmat->common, curaddr)) { sgsize = roundup2(sgsize, dmat->common.alignment); sgsize = MIN(sgsize, max_sgsize); curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0, sgsize); } else { sgsize = MIN(sgsize, max_sgsize); } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, segp); if (sgsize == 0) break; vaddr += sgsize; buflen -= sgsize; } /* * Did we fit? */ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } static int bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp) { vm_paddr_t paddr, next_paddr; int error, page_index; bus_size_t sgsize, max_sgsize; if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { /* * If we have to keep the offset of each page this function * is not suitable, switch back to bus_dmamap_load_ma_triv * which is going to do the right thing in this case. */ error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs, flags, segs, segp); return (error); } if (map == NULL) map = &nobounce_dmamap; if (segs == NULL) segs = dmat->segments; if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { _bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } page_index = 0; while (buflen > 0) { /* * Compute the segment size, and adjust counts. */ paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs; max_sgsize = MIN(buflen, dmat->common.maxsegsz); sgsize = PAGE_SIZE - ma_offs; if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && map->pagesneeded != 0 && bus_dma_run_filter(&dmat->common, paddr)) { sgsize = roundup2(sgsize, dmat->common.alignment); sgsize = MIN(sgsize, max_sgsize); KASSERT((sgsize & (dmat->common.alignment - 1)) == 0, ("Segment size is not aligned")); /* * Check if two pages of the user provided buffer * are used. */ if ((ma_offs + sgsize) > PAGE_SIZE) next_paddr = VM_PAGE_TO_PHYS(ma[page_index + 1]); else next_paddr = 0; paddr = add_bounce_page(dmat, map, 0, paddr, next_paddr, sgsize); } else { sgsize = MIN(sgsize, max_sgsize); } sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs, segp); if (sgsize == 0) break; KASSERT(buflen >= sgsize, ("Segment length overruns original buffer")); buflen -= sgsize; if (((ma_offs + sgsize) & ~PAGE_MASK) != 0) page_index++; ma_offs = (ma_offs + sgsize) & PAGE_MASK; } /* * Did we fit? */ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } static void bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) { if (map == NULL) return; map->mem = *mem; map->dmat = dmat; map->callback = callback; map->callback_arg = callback_arg; } static bus_dma_segment_t * bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error) { if (segs == NULL) segs = dmat->segments; return (segs); } /* * Release the mapping held by map. */ static void bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) { struct bounce_page *bpage; if (map == NULL) return; while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { STAILQ_REMOVE_HEAD(&map->bpages, links); free_bounce_page(dmat, bpage); } } static void bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { struct bounce_page *bpage; vm_offset_t datavaddr, tempvaddr; bus_size_t datacount1, datacount2; if (map == NULL || (bpage = STAILQ_FIRST(&map->bpages)) == NULL) return; /* * Handle data bouncing. We might also want to add support for * invalidating the caches on broken hardware. */ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " "performing bounce", __func__, dmat, dmat->common.flags, op); if ((op & BUS_DMASYNC_PREWRITE) != 0) { while (bpage != NULL) { tempvaddr = 0; datavaddr = bpage->datavaddr; datacount1 = bpage->datacount; if (datavaddr == 0) { tempvaddr = pmap_quick_enter_page(bpage->datapage[0]); datavaddr = tempvaddr | bpage->dataoffs; datacount1 = min(PAGE_SIZE - bpage->dataoffs, datacount1); } bcopy((void *)datavaddr, (void *)bpage->vaddr, datacount1); if (tempvaddr != 0) pmap_quick_remove_page(tempvaddr); if (bpage->datapage[1] == 0) { KASSERT(datacount1 == bpage->datacount, ("Mismatch between data size and provided memory space")); goto next_w; } /* * We are dealing with an unmapped buffer that expands * over two pages. */ datavaddr = pmap_quick_enter_page(bpage->datapage[1]); datacount2 = bpage->datacount - datacount1; bcopy((void *)datavaddr, (void *)(bpage->vaddr + datacount1), datacount2); pmap_quick_remove_page(datavaddr); next_w: bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; } if ((op & BUS_DMASYNC_POSTREAD) != 0) { while (bpage != NULL) { tempvaddr = 0; datavaddr = bpage->datavaddr; datacount1 = bpage->datacount; if (datavaddr == 0) { tempvaddr = pmap_quick_enter_page(bpage->datapage[0]); datavaddr = tempvaddr | bpage->dataoffs; datacount1 = min(PAGE_SIZE - bpage->dataoffs, datacount1); } bcopy((void *)bpage->vaddr, (void *)datavaddr, datacount1); if (tempvaddr != 0) pmap_quick_remove_page(tempvaddr); if (bpage->datapage[1] == 0) { KASSERT(datacount1 == bpage->datacount, ("Mismatch between data size and provided memory space")); goto next_r; } /* * We are dealing with an unmapped buffer that expands * over two pages. */ datavaddr = pmap_quick_enter_page(bpage->datapage[1]); datacount2 = bpage->datacount - datacount1; bcopy((void *)(bpage->vaddr + datacount1), (void *)datavaddr, datacount2); pmap_quick_remove_page(datavaddr); next_r: bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; } } static void init_bounce_pages(void *dummy __unused) { total_bpages = 0; STAILQ_INIT(&bounce_zone_list); STAILQ_INIT(&bounce_map_waitinglist); STAILQ_INIT(&bounce_map_callbacklist); mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); } SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); static struct sysctl_ctx_list * busdma_sysctl_tree(struct bounce_zone *bz) { return (&bz->sysctl_tree); } static struct sysctl_oid * busdma_sysctl_tree_top(struct bounce_zone *bz) { return (bz->sysctl_tree_top); } static int alloc_bounce_zone(bus_dma_tag_t dmat) { struct bounce_zone *bz; /* Check to see if we already have a suitable zone */ STAILQ_FOREACH(bz, &bounce_zone_list, links) { if ((dmat->common.alignment <= bz->alignment) && (dmat->common.lowaddr >= bz->lowaddr) && (dmat->common.domain == bz->domain)) { dmat->bounce_zone = bz; return (0); } } if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) return (ENOMEM); STAILQ_INIT(&bz->bounce_page_list); bz->free_bpages = 0; bz->reserved_bpages = 0; bz->active_bpages = 0; bz->lowaddr = dmat->common.lowaddr; bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE); bz->map_count = 0; bz->domain = dmat->common.domain; snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); busdma_zonecount++; snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); dmat->bounce_zone = bz; sysctl_ctx_init(&bz->sysctl_tree); bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, CTLFLAG_RD, 0, ""); if (bz->sysctl_tree_top == NULL) { sysctl_ctx_free(&bz->sysctl_tree); return (0); /* XXX error code? */ } SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, "Total bounce pages"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, "Free bounce pages"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, "Reserved bounce pages"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, "Active bounce pages"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, "Total bounce requests"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, "Total bounce requests that were deferred"); SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "alignment", CTLFLAG_RD, &bz->alignment, ""); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "domain", CTLFLAG_RD, &bz->domain, 0, "memory domain"); return (0); } static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) { struct bounce_zone *bz; int count; bz = dmat->bounce_zone; count = 0; while (numpages > 0) { struct bounce_page *bpage; - bpage = (struct bounce_page *)malloc_domain(sizeof(*bpage), - M_DEVBUF, dmat->common.domain, M_NOWAIT | M_ZERO); + bpage = malloc_domainset(sizeof(*bpage), M_DEVBUF, + DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO); if (bpage == NULL) break; - bpage->vaddr = (vm_offset_t)contigmalloc_domain(PAGE_SIZE, - M_DEVBUF, dmat->common.domain, M_NOWAIT, 0ul, - bz->lowaddr, PAGE_SIZE, 0); + bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE, + M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT, + 0ul, bz->lowaddr, PAGE_SIZE, 0); if (bpage->vaddr == 0) { free_domain(bpage, M_DEVBUF); break; } bpage->busaddr = pmap_kextract(bpage->vaddr); mtx_lock(&bounce_lock); STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); total_bpages++; bz->total_bpages++; bz->free_bpages++; mtx_unlock(&bounce_lock); count++; numpages--; } return (count); } static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) { struct bounce_zone *bz; int pages; mtx_assert(&bounce_lock, MA_OWNED); bz = dmat->bounce_zone; pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) return (map->pagesneeded - (map->pagesreserved + pages)); bz->free_bpages -= pages; bz->reserved_bpages += pages; map->pagesreserved += pages; pages = map->pagesneeded - map->pagesreserved; return (pages); } static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_addr_t addr1, bus_addr_t addr2, bus_size_t size) { struct bounce_zone *bz; struct bounce_page *bpage; KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); KASSERT(map != NULL && map != &nobounce_dmamap, ("add_bounce_page: bad map %p", map)); bz = dmat->bounce_zone; if (map->pagesneeded == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesneeded--; if (map->pagesreserved == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesreserved--; mtx_lock(&bounce_lock); bpage = STAILQ_FIRST(&bz->bounce_page_list); if (bpage == NULL) panic("add_bounce_page: free page list is empty"); STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); bz->reserved_bpages--; bz->active_bpages++; mtx_unlock(&bounce_lock); if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { /* Page offset needs to be preserved. */ bpage->vaddr |= addr1 & PAGE_MASK; bpage->busaddr |= addr1 & PAGE_MASK; KASSERT(addr2 == 0, ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET")); } bpage->datavaddr = vaddr; bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1); KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned")); bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2); bpage->dataoffs = addr1 & PAGE_MASK; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); } static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) { struct bus_dmamap *map; struct bounce_zone *bz; bz = dmat->bounce_zone; bpage->datavaddr = 0; bpage->datacount = 0; if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { /* * Reset the bounce page to start at offset 0. Other uses * of this bounce page may need to store a full page of * data and/or assume it starts on a page boundary. */ bpage->vaddr &= ~PAGE_MASK; bpage->busaddr &= ~PAGE_MASK; } mtx_lock(&bounce_lock); STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); bz->free_bpages++; bz->active_bpages--; if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { if (reserve_bounce_pages(map->dmat, map, 1) == 0) { STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); busdma_swi_pending = 1; bz->total_deferred++; swi_sched(vm_ih, 0); } } mtx_unlock(&bounce_lock); } void busdma_swi(void) { bus_dma_tag_t dmat; struct bus_dmamap *map; mtx_lock(&bounce_lock); while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); mtx_unlock(&bounce_lock); dmat = map->dmat; (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK); bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, map->callback_arg, BUS_DMA_WAITOK); (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_UNLOCK); mtx_lock(&bounce_lock); } mtx_unlock(&bounce_lock); } struct bus_dma_impl bus_dma_bounce_impl = { .tag_create = bounce_bus_dma_tag_create, .tag_destroy = bounce_bus_dma_tag_destroy, .tag_set_domain = bounce_bus_dma_tag_set_domain, .map_create = bounce_bus_dmamap_create, .map_destroy = bounce_bus_dmamap_destroy, .mem_alloc = bounce_bus_dmamem_alloc, .mem_free = bounce_bus_dmamem_free, .load_phys = bounce_bus_dmamap_load_phys, .load_buffer = bounce_bus_dmamap_load_buffer, .load_ma = bounce_bus_dmamap_load_ma, .map_waitok = bounce_bus_dmamap_waitok, .map_complete = bounce_bus_dmamap_complete, .map_unload = bounce_bus_dmamap_unload, .map_sync = bounce_bus_dmamap_sync, };