Index: head/ObsoleteFiles.inc =================================================================== --- head/ObsoleteFiles.inc (revision 296271) +++ head/ObsoleteFiles.inc (revision 296272) @@ -1,8079 +1,8081 @@ # # $FreeBSD$ # # This file lists old files (OLD_FILES), libraries (OLD_LIBS) and # directories (OLD_DIRS) which should get removed at an update. Recently # removed entries first (with the date as a comment). Dynamic libraries are # special cased (OLD_LIBS). Static libraries or the generic links to # the dynamic libraries (lib*.so) should (if you don't know why to make an # exception, make this a "must") be viewed as normal files (OLD_FILES). # # In case of a complete directory hierarchy the sorting is in depth first # order. # # The file is partitioned: OLD_FILES first, then OLD_LIBS and OLD_DIRS last. # # Before you commit changes to this file please check if any entries in # tools/build/mk/OptionalObsoleteFiles.inc can be removed. The following # command tells which files are listed more than once regardless of some # architecture specific conditionals, so you can not blindly trust the # output: # ( grep '+=' /usr/src/ObsoleteFiles.inc | sort -u ; \ # grep '+=' /usr/src/tools/build/mk/OptionalObsoleteFiles.inc | sort -u) | \ # sort | uniq -d # # To find regular duplicates not dependant on optional components, you can # also use something that will not give you false positives, e.g.: # for t in `make -V TARGETS universe`; do # __MAKE_CONF=/dev/null make -f Makefile.inc1 TARGET=$t \ # -V OLD_FILES -V OLD_LIBS -V OLD_DIRS check-old | \ # xargs -n1 | sort | uniq -d; # done # # For optional components, you can use the following to see if some entries # in OptionalObsoleteFiles.inc have been obsoleted by ObsoleteFiles.inc # for o in tools/build/options/WITH*; do # __MAKE_CONF=/dev/null make -f Makefile.inc1 -D${o##*/} \ # -V OLD_FILES -V OLD_LIBS -V OLD_DIRS check-old | \ # xargs -n1 | sort | uniq -d; # done +# 20160301: Remove taskqueue_enqueue_fast +OLD_FILES+=usr/share/man/man9/taskqueue_enqueue_fast.9.gz # 20160225: Remove casperd and libcapsicum. OLD_FILES+=sbin/casperd OLD_FILES+=etc/rc.d/casperd OLD_FILES+=usr/share/man/man8/casperd.8.gz OLD_FILES+=usr/include/libcapsicum.h OLD_FILES+=usr/include/libcapsicum_service.h OLD_FILES+=usr/include/libcapsicum.h OLD_FILES+=usr/share/man/man3/libcapsicum.3.gz OLD_FILES+=usr/include/libcapsicum_dns.h OLD_FILES+=usr/include/libcapsicum_grp.h OLD_FILES+=usr/include/libcapsicum_impl.h OLD_FILES+=usr/include/libcapsicum_pwd.h OLD_FILES+=usr/include/libcapsicum_random.h OLD_FILES+=usr/include/libcapsicum_sysctl.h OLD_FILES+=libexec/casper/dns OLD_FILES+=libexec/casper/grp OLD_FILES+=libexec/casper/pwd OLD_FILES+=libexec/casper/random OLD_FILES+=libexec/casper/sysctl # 20160223: functionality from mkulzma(1) merged into mkuzip(1) OLD_FILES+=usr/bin/mkulzma # 20160211: Remove obsolete unbound-control-setup OLD_FILES+=usr/sbin/unbound-control-setup # 20160116: Update mandoc to cvs snapshot 20160116 OLD_FILES+=usr/share/mdocml/example.style.css OLD_FILES+=usr/share/mdocml/style.css OLD_DIRS+=usr/share/mdocml # 20151225: new clang import which bumps version from 3.7.0 to 3.7.1. OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/allocator_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/asan_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/common_interface_defs.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/coverage_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/dfsan_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/linux_syscall_hooks.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/lsan_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/msan_interface.h OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/tsan_interface_atomic.h OLD_DIRS+=usr/lib/clang/3.7.0/include/sanitizer OLD_FILES+=usr/lib/clang/3.7.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.7.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.7.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.7.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/altivec.h OLD_FILES+=usr/lib/clang/3.7.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.7.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512cdintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512dqintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vldqintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/cpuid.h OLD_FILES+=usr/lib/clang/3.7.0/include/cuda_builtin_vars.h OLD_FILES+=usr/lib/clang/3.7.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/fxsrintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/htmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/htmxlintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/immintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.7.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.7.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/module.modulemap OLD_FILES+=usr/lib/clang/3.7.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/s390intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/vadefs.h OLD_FILES+=usr/lib/clang/3.7.0/include/vecintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/xopintrin.h OLD_FILES+=usr/lib/clang/3.7.0/include/xtestintrin.h OLD_DIRS+=usr/lib/clang/3.7.0/include OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-preinit-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.safestack-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.safestack-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.7.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.7.0/lib OLD_DIRS+=usr/lib/clang/3.7.0 # 20151130: libelf moved from /usr/lib to /lib (libkvm dependency in r291406) OLD_LIBS+=usr/lib/libelf.so.2 # 20151115: Fox bad upgrade scheme OLD_FILES+=usr/share/locale/zh_CN.GB18030/zh_Hans_CN.GB18030 OLD_FILES+=usr/share/locale/zh_CN.GB2312/zh_Hans_CN.GB2312 OLD_FILES+=usr/share/locale/zh_CN.GBK/zh_Hans_CN.GBK OLD_FILES+=usr/share/locale/zh_CN.UTF-8/zh_Hans_CN.UTF-8 OLD_FILES+=usr/share/locale/zh_CN.eucCN/zh_Hans_CN.eucCN OLD_FILES+=usr/share/locale/zh_TW.Big5/zh_Hant_TW.Big5 OLD_FILES+=usr/share/locale/zh_TW.UTF-8/zh_Hant_TW.UTF-8 # 20151107: String collation improvements OLD_FILES+=usr/share/locale/UTF-8/LC_CTYPE OLD_DIRS+=usr/share/locale/UTF-8 OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_COLLATE OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_CTYPE OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MESSAGES OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MONETARY OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_NUMERIC OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_TIME OLD_DIRS+=usr/share/locale/kk_KZ.PT154/ OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/kk_KZ.UTF-8 OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.ISO8859-1 OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_CTYPE OLD_DIRS+=usr/share/locale/la_LN.ISO8859-13 OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.ISO8859-15 OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.ISO8859-2 OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.ISO8859-4 OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_COLLATE OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_CTYPE OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_TIME OLD_DIRS+=usr/share/locale/la_LN.US-ASCII OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_MESSAGES OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_TIME OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_COLLATE OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_MONETARY OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_CTYPE OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_NUMERIC OLD_DIRS+=usr/share/locale/lt_LT.ISO8859-4 OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/mn_MN.UTF-8 OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_COLLATE OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_CTYPE OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MESSAGES OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MONETARY OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_NUMERIC OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_TIME OLD_DIRS+=usr/share/locale/no_NO.ISO8859-1 OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_COLLATE OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_CTYPE OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MESSAGES OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MONETARY OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_NUMERIC OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_TIME OLD_DIRS+=usr/share/locale/no_NO.ISO8859-15 OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MESSAGES OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_TIME OLD_DIRS+=usr/share/locale/no_NO.UTF-8 OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_COLLATE OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_TIME OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_CTYPE OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_MESSAGES OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_MONETARY OLD_DIRS+=usr/share/locale/sr_YU.ISO8859-2 OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_COLLATE OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_MONETARY OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_CTYPE OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_TIME OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_MESSAGES OLD_DIRS+=usr/share/locale/sr_YU.ISO8859-5 OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_COLLATE OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_MONETARY OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_CTYPE OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_TIME OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_NUMERIC OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_MESSAGES OLD_DIRS+=usr/share/locale/sr_YU.UTF-8 # 20151101: added missing _test suffix on multiple tests in lib/libc OLD_FILES+=usr/tests/lib/libc/c063/faccessat OLD_FILES+=usr/tests/lib/libc/c063/fchmodat OLD_FILES+=usr/tests/lib/libc/c063/fchownat OLD_FILES+=usr/tests/lib/libc/c063/fexecve OLD_FILES+=usr/tests/lib/libc/c063/fstatat OLD_FILES+=usr/tests/lib/libc/c063/linkat OLD_FILES+=usr/tests/lib/libc/c063/mkdirat OLD_FILES+=usr/tests/lib/libc/c063/mkfifoat OLD_FILES+=usr/tests/lib/libc/c063/mknodat OLD_FILES+=usr/tests/lib/libc/c063/openat OLD_FILES+=usr/tests/lib/libc/c063/readlinkat OLD_FILES+=usr/tests/lib/libc/c063/renameat OLD_FILES+=usr/tests/lib/libc/c063/symlinkat OLD_FILES+=usr/tests/lib/libc/c063/unlinkat OLD_FILES+=usr/tests/lib/libc/c063/utimensat OLD_FILES+=usr/tests/lib/libc/string/memchr OLD_FILES+=usr/tests/lib/libc/string/memcpy OLD_FILES+=usr/tests/lib/libc/string/memmem OLD_FILES+=usr/tests/lib/libc/string/memset OLD_FILES+=usr/tests/lib/libc/string/strcat OLD_FILES+=usr/tests/lib/libc/string/strchr OLD_FILES+=usr/tests/lib/libc/string/strcmp OLD_FILES+=usr/tests/lib/libc/string/strcpy OLD_FILES+=usr/tests/lib/libc/string/strcspn OLD_FILES+=usr/tests/lib/libc/string/strerror OLD_FILES+=usr/tests/lib/libc/string/strlen OLD_FILES+=usr/tests/lib/libc/string/strpbrk OLD_FILES+=usr/tests/lib/libc/string/strrchr OLD_FILES+=usr/tests/lib/libc/string/strspn OLD_FILES+=usr/tests/lib/libc/string/swab # 20151101: 430.status-rwho was renamed to 430.status-uptime OLD_FILES+=etc/periodic/daily/430.status-rwho # 20151030: OpenSSL 1.0.2d import OLD_FILES+=usr/share/openssl/man/man3/CMS_set1_signer_certs.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_ctrl.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_ctrl_str.3.gz OLD_FILES+=usr/share/openssl/man/man3/d2i_509_CRL_fp.3.gz OLD_LIBS+=lib/libcrypto.so.7 OLD_LIBS+=usr/lib/libssl.so.7 OLD_LIBS+=usr/lib32/libcrypto.so.7 OLD_LIBS+=usr/lib32/libssl.so.7 # 20151015: test symbols moved to /usr/lib/debug OLD_DIRS+=usr/tests/lib/atf/libatf-c++/.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/atf_c++_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/build_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/check_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/config_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/macros_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/tests_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/utils_test.debug OLD_DIRS+=usr/tests/lib/atf/libatf-c++/detail/.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/application_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/env_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/exceptions_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/fs_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/process_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/sanity_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/text_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/version_helper.debug OLD_DIRS+=usr/tests/lib/atf/libatf-c/.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/atf_c_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/build_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/check_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/config_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/error_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/macros_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/tc_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/tp_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/utils_test.debug OLD_DIRS+=usr/tests/lib/atf/libatf-c/detail/.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/dynstr_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/env_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/fs_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/list_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/map_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/process_helpers.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/process_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/sanity_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/text_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/user_test.debug OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/version_helper.debug OLD_DIRS+=usr/tests/lib/atf/test-programs/.debug OLD_FILES+=usr/tests/lib/atf/test-programs/.debug/c_helpers.debug OLD_FILES+=usr/tests/lib/atf/test-programs/.debug/cpp_helpers.debug OLD_DIRS+=usr/tests/lib/libc/c063/.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/faccessat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/fchmodat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/fchownat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/fexecve.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/fstatat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/linkat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/mkdirat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/mkfifoat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/mknodat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/openat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/readlinkat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/renameat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/symlinkat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/unlinkat.debug OLD_FILES+=usr/tests/lib/libc/c063/.debug/utimensat.debug OLD_DIRS+=usr/tests/lib/libc/db/.debug OLD_FILES+=usr/tests/lib/libc/db/.debug/h_db.debug OLD_DIRS+=usr/tests/lib/libc/gen/.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/alarm_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/arc4random_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/assert_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/basedirname_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/dir_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/floatunditf_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fnmatch_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpclassify2_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpclassify_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpsetmask_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpsetround_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/ftok_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/getcwd_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/getgrent_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/glob_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/humanize_number_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/isnan_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/nice_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/pause_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/raise_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/realpath_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/setdomainname_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/sethostname_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/sleep_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/syslog_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/time_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/ttyname_test.debug OLD_FILES+=usr/tests/lib/libc/gen/.debug/vis_test.debug OLD_DIRS+=usr/tests/lib/libc/gen/execve/.debug OLD_FILES+=usr/tests/lib/libc/gen/execve/.debug/execve_test.debug OLD_DIRS+=usr/tests/lib/libc/gen/posix_spawn/.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/fileactions_test.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_fileactions.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_spawn.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_spawnattr.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/spawn_test.debug OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/spawnattr_test.debug OLD_DIRS+=usr/tests/lib/libc/hash/.debug OLD_FILES+=usr/tests/lib/libc/hash/.debug/h_hash.debug OLD_FILES+=usr/tests/lib/libc/hash/.debug/sha2_test.debug OLD_DIRS+=usr/tests/lib/libc/inet/.debug OLD_FILES+=usr/tests/lib/libc/inet/.debug/inet_network_test.debug OLD_DIRS+=usr/tests/lib/libc/locale/.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/io_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbrtowc_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbsnrtowcs_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbstowcs_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbtowc_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcscspn_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcspbrk_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcsspn_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcstod_test.debug OLD_FILES+=usr/tests/lib/libc/locale/.debug/wctomb_test.debug OLD_DIRS+=usr/tests/lib/libc/net/.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/ether_aton_test.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/getprotoent_test.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/h_dns_server.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/h_nsd_recurse.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/h_protoent.debug OLD_FILES+=usr/tests/lib/libc/net/.debug/h_servent.debug OLD_DIRS+=usr/tests/lib/libc/regex/.debug OLD_FILES+=usr/tests/lib/libc/regex/.debug/exhaust_test.debug OLD_FILES+=usr/tests/lib/libc/regex/.debug/h_regex.debug OLD_FILES+=usr/tests/lib/libc/regex/.debug/regex_att_test.debug OLD_DIRS+=usr/tests/lib/libc/ssp/.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_fgets.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_getcwd.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_gets.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memcpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memmove.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memset.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_raw.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_read.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_readlink.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_snprintf.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_sprintf.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_stpcpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_stpncpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strcat.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strcpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strncat.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strncpy.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_vsnprintf.debug OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_vsprintf.debug OLD_DIRS+=usr/tests/lib/libc/stdio/.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/clearerr_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fflush_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fmemopen2_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fmemopen_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fopen_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fputc_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/mktemp_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/popen_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/printf_test.debug OLD_FILES+=usr/tests/lib/libc/stdio/.debug/scanf_test.debug OLD_DIRS+=usr/tests/lib/libc/stdlib/.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/abs_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/atoi_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/div_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/exit_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/getenv_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/h_getopt.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/h_getopt_long.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/hsearch_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/posix_memalign_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/random_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/strtod_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/strtol_test.debug OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/system_test.debug OLD_DIRS+=usr/tests/lib/libc/string/.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/memchr.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/memcpy.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/memmem.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/memset.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strcat.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strchr.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strcmp.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strcpy.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strcspn.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strerror.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strlen.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strpbrk.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strrchr.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/strspn.debug OLD_FILES+=usr/tests/lib/libc/string/.debug/swab.debug OLD_DIRS+=usr/tests/lib/libc/sys/.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/access_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/chroot_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/clock_gettime_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/connect_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/dup_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/fsync_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getcontext_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getgroups_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getitimer_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getlogin_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getpid_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getrusage_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/getsid_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/gettimeofday_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/issetugid_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/kevent_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/kill_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/link_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/listen_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mincore_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mkdir_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mkfifo_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mknod_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mlock_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mmap_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/mprotect_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgctl_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgget_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgrcv_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgsnd_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/msync_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/nanosleep_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/pipe2_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/pipe_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/poll_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/revoke_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/select_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/setrlimit_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/setuid_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigaction_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigqueue_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigtimedwait_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/socketpair_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/stat_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/timer_create_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/truncate_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/ucontext_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/umask_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/unlink_test.debug OLD_FILES+=usr/tests/lib/libc/sys/.debug/write_test.debug OLD_DIRS+=usr/tests/lib/libc/termios/.debug OLD_FILES+=usr/tests/lib/libc/termios/.debug/tcsetpgrp_test.debug OLD_DIRS+=usr/tests/lib/libc/tls/.debug OLD_FILES+=usr/tests/lib/libc/tls/.debug/h_tls_dlopen.so.debug OLD_FILES+=usr/tests/lib/libc/tls/.debug/libh_tls_dynamic.so.1.debug OLD_FILES+=usr/tests/lib/libc/tls/.debug/tls_dlopen_test.debug OLD_FILES+=usr/tests/lib/libc/tls/.debug/tls_dynamic_test.debug OLD_DIRS+=usr/tests/lib/libc/ttyio/.debug OLD_FILES+=usr/tests/lib/libc/ttyio/.debug/ttyio_test.debug OLD_DIRS+=usr/tests/lib/libcrypt/.debug OLD_FILES+=usr/tests/lib/libcrypt/.debug/crypt_tests.debug OLD_DIRS+=usr/tests/lib/libmp/.debug OLD_FILES+=usr/tests/lib/libmp/.debug/legacy_test.debug OLD_DIRS+=usr/tests/lib/libnv/.debug OLD_FILES+=usr/tests/lib/libnv/.debug/dnv_tests.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nv_array_tests.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nv_tests.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_add_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_exists_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_free_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_get_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_move_test.debug OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_send_recv_test.debug OLD_DIRS+=usr/tests/lib/libpam/.debug OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_ctype.debug OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_readlinev.debug OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_readword.debug OLD_DIRS+=usr/tests/lib/libproc/.debug OLD_FILES+=usr/tests/lib/libproc/.debug/proc_test.debug OLD_FILES+=usr/tests/lib/libproc/.debug/target_prog.debug OLD_DIRS+=usr/tests/lib/librt/.debug OLD_FILES+=usr/tests/lib/librt/.debug/sched_test.debug OLD_FILES+=usr/tests/lib/librt/.debug/sem_test.debug OLD_DIRS+=usr/tests/lib/libthr/.debug OLD_FILES+=usr/tests/lib/libthr/.debug/barrier_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/cond_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/condwait_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/detach_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/equal_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/fork_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/fpu_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/h_atexit.debug OLD_FILES+=usr/tests/lib/libthr/.debug/h_cancel.debug OLD_FILES+=usr/tests/lib/libthr/.debug/h_exit.debug OLD_FILES+=usr/tests/lib/libthr/.debug/h_resolv.debug OLD_FILES+=usr/tests/lib/libthr/.debug/join_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/kill_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/mutex_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/once_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/preempt_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/rwlock_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/sem_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/siglongjmp_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/sigmask_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/sigsuspend_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/sleep_test.debug OLD_FILES+=usr/tests/lib/libthr/.debug/swapcontext_test.debug OLD_DIRS+=usr/tests/lib/libthr/dlopen/.debug OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/dlopen_test.debug OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/h_pthread_dlopen.so.1.debug OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/main_pthread_create_test.debug OLD_DIRS+=usr/tests/lib/libutil/.debug OLD_FILES+=usr/tests/lib/libutil/.debug/flopen_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/grp_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/humanize_number_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/pidfile_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/trimdomain-nodomain_test.debug OLD_FILES+=usr/tests/lib/libutil/.debug/trimdomain_test.debug OLD_DIRS+=usr/tests/lib/libxo/.debug OLD_FILES+=usr/tests/lib/libxo/.debug/libenc_test.so.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_01.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_02.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_03.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_04.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_05.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_06.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_07.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_08.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_09.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_10.debug OLD_FILES+=usr/tests/lib/libxo/.debug/test_11.debug OLD_DIRS+=usr/tests/lib/msun/.debug OLD_FILES+=usr/tests/lib/msun/.debug/acos_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/asin_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/atan_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/cbrt_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/ceil_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/cos_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/cosh_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/erf_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/exp_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/fmod_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/infinity_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/ldexp_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/log_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/pow_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/precision_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/round_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/scalbn_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/sin_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/sinh_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/sqrt_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/tan_test.debug OLD_FILES+=usr/tests/lib/msun/.debug/tanh_test.debug OLD_DIRS+=usr/tests/libexec/rtld-elf/.debug OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/ld_library_pathfds.debug OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/libpythagoras.so.0.debug OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/target.debug OLD_DIRS+=usr/tests/sbin/devd/.debug OLD_FILES+=usr/tests/sbin/devd/.debug/client_test.debug OLD_DIRS+=usr/tests/sbin/dhclient/.debug OLD_FILES+=usr/tests/sbin/dhclient/.debug/option-domain-search_test.debug OLD_DIRS+=usr/tests/share/examples/tests/atf/.debug OLD_FILES+=usr/tests/share/examples/tests/atf/.debug/printf_test.debug OLD_DIRS+=usr/tests/share/examples/tests/plain/.debug OLD_FILES+=usr/tests/share/examples/tests/plain/.debug/printf_test.debug OLD_DIRS+=usr/tests/sys/aio/.debug OLD_FILES+=usr/tests/sys/aio/.debug/aio_kqueue_test.debug OLD_FILES+=usr/tests/sys/aio/.debug/aio_test.debug OLD_FILES+=usr/tests/sys/aio/.debug/lio_kqueue_test.debug OLD_DIRS+=usr/tests/sys/fifo/.debug OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_create.debug OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_io.debug OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_misc.debug OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_open.debug OLD_DIRS+=usr/tests/sys/file/.debug OLD_FILES+=usr/tests/sys/file/.debug/closefrom_test.debug OLD_FILES+=usr/tests/sys/file/.debug/dup_test.debug OLD_FILES+=usr/tests/sys/file/.debug/fcntlflags_test.debug OLD_FILES+=usr/tests/sys/file/.debug/flock_helper.debug OLD_FILES+=usr/tests/sys/file/.debug/ftruncate_test.debug OLD_FILES+=usr/tests/sys/file/.debug/newfileops_on_fork_test.debug OLD_DIRS+=usr/tests/sys/kern/.debug OLD_FILES+=usr/tests/sys/kern/.debug/kern_descrip_test.debug OLD_FILES+=usr/tests/sys/kern/.debug/ptrace_test.debug OLD_FILES+=usr/tests/sys/kern/.debug/unix_seqpacket_test.debug OLD_DIRS+=usr/tests/sys/kern/execve/.debug OLD_FILES+=usr/tests/sys/kern/execve/.debug/execve_helper.debug OLD_FILES+=usr/tests/sys/kern/execve/.debug/good_aout.debug OLD_DIRS+=usr/tests/sys/kqueue/.debug OLD_FILES+=usr/tests/sys/kqueue/.debug/kqtest.debug OLD_DIRS+=usr/tests/sys/mqueue/.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest1.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest2.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest3.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest4.debug OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest5.debug OLD_DIRS+=usr/tests/sys/netinet/.debug OLD_FILES+=usr/tests/sys/netinet/.debug/udp_dontroute.debug OLD_DIRS+=usr/tests/sys/pjdfstest/.debug OLD_FILES+=usr/tests/sys/pjdfstest/.debug/pjdfstest.debug OLD_DIRS+=usr/tests/sys/vm/.debug OLD_FILES+=usr/tests/sys/vm/.debug/mmap_test.debug # 20151015: Rename files due to file-installed-as-dir bug OLD_FILES+=usr/share/doc/legal/realtek OLD_FILES+=usr/share/doc/legal/realtek/LICENSE OLD_DIRS+=usr/share/doc/legal/realtek OLD_DIRS+=usr/share/doc/legal/intel_ipw OLD_FILES+=usr/share/doc/legal/intel_ipw/LICENSE OLD_FILES+=usr/share/doc/legal/intel_iwn OLD_FILES+=usr/share/doc/legal/intel_iwn/LICENSE OLD_DIRS+=usr/share/doc/legal/intel_iwn OLD_DIRS+=usr/share/doc/legal/intel_iwi OLD_FILES+=usr/share/doc/legal/intel_iwi/LICENSE OLD_DIRS+=usr/share/doc/legal/intel_wpi OLD_FILES+=usr/share/doc/legal/intel_wpi/LICENSE # 20151006: new libc++ import OLD_FILES+=usr/include/c++/__tuple_03 OLD_FILES+=usr/include/c++/v1/__tuple_03 OLD_FILES+=usr/include/c++/v1/tr1/__tuple_03 # 20151006: new clang import which bumps version from 3.6.1 to 3.7.0. OLD_FILES+=usr/lib/clang/3.6.1/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.6.1/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.6.1/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.6.1/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/altivec.h OLD_FILES+=usr/lib/clang/3.6.1/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.6.1/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/cpuid.h OLD_FILES+=usr/lib/clang/3.6.1/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/immintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.6.1/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.6.1/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/module.modulemap OLD_FILES+=usr/lib/clang/3.6.1/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.6.1/include/xopintrin.h OLD_DIRS+=usr/lib/clang/3.6.1/include OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.san-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.san-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan-x86_64.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.6.1/lib/freebsd OLD_DIRS+=usr/lib/clang/3.6.1/lib OLD_DIRS+=usr/lib/clang/3.6.1 # 20150928: unused sgsmsg utility is removed OLD_FILES+=usr/bin/sgsmsg # 20150926: remove links to removed/unimplemented mbuf(9) macros OLD_FILES+=usr/share/man/man9/MEXT_ADD_REF.9.gz OLD_FILES+=usr/share/man/man9/MEXTFREE.9.gz OLD_FILES+=usr/share/man/man9/MEXT_IS_REF.9.gz OLD_FILES+=usr/share/man/man9/MEXT_REM_REF.9.gz OLD_FILES+=usr/share/man/man9/MFREE.9.gz # 20150818: *allocm() are gone in jemalloc 4.0.0 OLD_FILES+=usr/share/man/man3/allocm.3.gz OLD_FILES+=usr/share/man/man3/dallocm.3.gz OLD_FILES+=usr/share/man/man3/nallocm.3.gz OLD_FILES+=usr/share/man/man3/rallocm.3.gz OLD_FILES+=usr/share/man/man3/sallocm.3.gz # 20150802: Remove netbsd's test on pw(8) OLD_FILES+=usr/tests/usr.sbin/pw/pw_test # 20150719: Remove libarchive.pc OLD_FILES+=usr/libdata/pkgconfig/libarchive.pc # 20150705: Rename DTrace provider man pages. OLD_FILES+=usr/share/man/man4/dtrace-io.4.gz OLD_FILES+=usr/share/man/man4/dtrace-ip.4.gz OLD_FILES+=usr/share/man/man4/dtrace-proc.4.gz OLD_FILES+=usr/share/man/man4/dtrace-sched.4.gz OLD_FILES+=usr/share/man/man4/dtrace-tcp.4.gz OLD_FILES+=usr/share/man/man4/dtrace-udp.4.gz # 20150704: nvlist private headers no longer installed OLD_FILES+=usr/include/sys/nv_impl.h OLD_FILES+=usr/include/sys/nvlist_impl.h OLD_FILES+=usr/include/sys/nvpair_impl.h # 20150624 OLD_LIBS+=usr/lib/libugidfw.so.4 OLD_LIBS+=usr/lib32/libugidfw.so.4 # 20150604: Move nvlist man pages to section 9. OLD_FILES+=usr/share/man/man3/libnv.3.gz OLD_FILES+=usr/share/man/man3/nv.3.gz OLD_FILES+=usr/share/man/man3/nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_stringf.3.gz OLD_FILES+=usr/share/man/man3/nvlist_add_stringv.3.gz OLD_FILES+=usr/share/man/man3/nvlist_clone.3.gz OLD_FILES+=usr/share/man/man3/nvlist_create.3.gz OLD_FILES+=usr/share/man/man3/nvlist_destroy.3.gz OLD_FILES+=usr/share/man/man3/nvlist_dump.3.gz OLD_FILES+=usr/share/man/man3/nvlist_empty.3.gz OLD_FILES+=usr/share/man/man3/nvlist_error.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_exists_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_fdump.3.gz OLD_FILES+=usr/share/man/man3/nvlist_flags.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_free_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_parent.3.gz OLD_FILES+=usr/share/man/man3/nvlist_get_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_move_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_move_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_move_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_move_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_next.3.gz OLD_FILES+=usr/share/man/man3/nvlist_pack.3.gz OLD_FILES+=usr/share/man/man3/nvlist_recv.3.gz OLD_FILES+=usr/share/man/man3/nvlist_send.3.gz OLD_FILES+=usr/share/man/man3/nvlist_set_error.3.gz OLD_FILES+=usr/share/man/man3/nvlist_size.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_take_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_unpack.3.gz OLD_FILES+=usr/share/man/man3/nvlist_xfer.3.gz # 20150702: Remove duplicated nvlist includes. OLD_FILES+=usr/include/dnv.h OLD_FILES+=usr/include/nv.h # 20150528: PCI IOV device driver methods moved to a separate kobj interface. OLD_FILES+=usr/share/man/man9/PCI_ADD_VF.9.gz OLD_FILES+=usr/share/man/man9/PCI_INIT_IOV.9.gz OLD_FILES+=usr/share/man/man9/PCI_UNINIT_IOV.9.gz # 20150525: new clang import which bumps version from 3.6.0 to 3.6.1. OLD_FILES+=usr/lib/clang/3.6.0/include/__stddef_max_align_t.h OLD_FILES+=usr/lib/clang/3.6.0/include/__wmmintrin_aes.h OLD_FILES+=usr/lib/clang/3.6.0/include/__wmmintrin_pclmul.h OLD_FILES+=usr/lib/clang/3.6.0/include/adxintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/altivec.h OLD_FILES+=usr/lib/clang/3.6.0/include/ammintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/arm_acle.h OLD_FILES+=usr/lib/clang/3.6.0/include/arm_neon.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx2intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512bwintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512erintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512fintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512vlbwintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avx512vlintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/avxintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/bmi2intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/bmiintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/cpuid.h OLD_FILES+=usr/lib/clang/3.6.0/include/emmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/f16cintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/fma4intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/fmaintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/ia32intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/immintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/lzcntintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/mm3dnow.h OLD_FILES+=usr/lib/clang/3.6.0/include/mm_malloc.h OLD_FILES+=usr/lib/clang/3.6.0/include/mmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/module.modulemap OLD_FILES+=usr/lib/clang/3.6.0/include/nmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/pmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/popcntintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/prfchwintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/rdseedintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/rtmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/shaintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/smmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/tbmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/tmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/wmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/x86intrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/xmmintrin.h OLD_FILES+=usr/lib/clang/3.6.0/include/xopintrin.h OLD_DIRS+=usr/lib/clang/3.6.0/include OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.san-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.san-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan-x86_64.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.6.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.6.0/lib OLD_DIRS+=usr/lib/clang/3.6.0 # 20150521 OLD_FILES+=usr/bin/demandoc OLD_FILES+=usr/share/man/man1/demandoc.1.gz OLD_FILES+=usr/share/man/man3/mandoc.3.gz OLD_FILES+=usr/share/man/man3/mandoc_headers.3.gz # 20150520 OLD_FILES+=usr/lib/libheimsqlite.a OLD_FILES+=usr/lib/libheimsqlite.so OLD_LIBS+=usr/lib/libheimsqlite.so.11 OLD_FILES+=usr/lib/libheimsqlite_p.a OLD_FILES+=usr/lib32/libheimsqlite.a OLD_FILES+=usr/lib32/libheimsqlite.so OLD_LIBS+=usr/lib32/libheimsqlite.so.11 OLD_FILES+=usr/lib32/libheimsqlite_p.a # 20150518: tzdata2015c update OLD_FILES+=usr/share/zoneinfo/America/Montreal # 20150506 OLD_FILES+=usr/share/man/man9/NDHASGIANT.9.gz # 20150504 OLD_FILES+=usr/share/examples/etc/libmap32.conf OLD_FILES+=usr/include/bsdstat.h OLD_LIBS+=usr/lib32/private/libatf-c++.so.2 OLD_LIBS+=usr/lib32/private/libbsdstat.so.1 OLD_LIBS+=usr/lib32/private/libheimipcs.so.11 OLD_LIBS+=usr/lib32/private/libsqlite3.so.0 OLD_LIBS+=usr/lib32/private/libunbound.so.5 OLD_LIBS+=usr/lib32/private/libatf-c.so.1 OLD_LIBS+=usr/lib32/private/libheimipcc.so.11 OLD_LIBS+=usr/lib32/private/libldns.so.5 OLD_LIBS+=usr/lib32/private/libssh.so.5 OLD_LIBS+=usr/lib32/private/libucl.so.1 OLD_DIRS+=usr/lib32/private OLD_LIBS+=usr/lib/private/libatf-c++.so.2 OLD_LIBS+=usr/lib/private/libbsdstat.so.1 OLD_LIBS+=usr/lib/private/libheimipcs.so.11 OLD_LIBS+=usr/lib/private/libsqlite3.so.0 OLD_LIBS+=usr/lib/private/libunbound.so.5 OLD_LIBS+=usr/lib/private/libatf-c.so.1 OLD_LIBS+=usr/lib/private/libheimipcc.so.11 OLD_LIBS+=usr/lib/private/libldns.so.5 OLD_LIBS+=usr/lib/private/libssh.so.5 OLD_LIBS+=usr/lib/private/libucl.so.1 OLD_DIRS+=usr/lib/private # 20150501 OLD_FILES+=usr/bin/soeliminate OLD_FILES+=usr/share/man/man1/soeliminate.1.gz # 20150501: Remove the nvlist_.*[vf] functions manpages. OLD_FILES+=usr/share/man/man3/nvlist_addf_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addf_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_addv_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsf_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_existsv_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freef_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_null.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_freev_type.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getf_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_getv_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movef_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movef_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movef_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movef_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movev_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movev_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movev_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_movev_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takef_string.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_binary.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_bool.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_descriptor.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_number.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_nvlist.3.gz OLD_FILES+=usr/share/man/man3/nvlist_takev_string.3.gz # 20150429: remove never written documentation OLD_FILES+=usr/share/doc/papers/hwpmc.ascii.gz # 20150427: test/sys/kern/mmap_test moved to test/sys/vm/mmap_test OLD_FILES+=usr/tests/sys/kern/mmap_test # 20150422: zlib.c moved from net to libkern OLD_FILES+=usr/include/net/zlib.h OLD_FILES+=usr/include/net/zutil.h # 20150418 OLD_FILES+=sbin/mount_oldnfs OLD_FILES+=usr/share/man/man8/mount_oldnfs.8.gz # 20150416: ALTQ moved to net/altq OLD_FILES+=usr/include/altq/altq_rmclass_debug.h OLD_FILES+=usr/include/altq/altq.h OLD_FILES+=usr/include/altq/altq_cdnr.h OLD_FILES+=usr/include/altq/altq_hfsc.h OLD_FILES+=usr/include/altq/altq_priq.h OLD_FILES+=usr/include/altq/altqconf.h OLD_FILES+=usr/include/altq/altq_classq.h OLD_FILES+=usr/include/altq/altq_red.h OLD_FILES+=usr/include/altq/if_altq.h OLD_FILES+=usr/include/altq/altq_var.h OLD_FILES+=usr/include/altq/altq_rmclass.h OLD_FILES+=usr/include/altq/altq_cbq.h OLD_FILES+=usr/include/altq/altq_rio.h OLD_DIRS+=usr/include/altq # 20150330: ntp 4.2.8p1 OLD_FILES+=usr/share/doc/ntp/driver1.html OLD_FILES+=usr/share/doc/ntp/driver10.html OLD_FILES+=usr/share/doc/ntp/driver11.html OLD_FILES+=usr/share/doc/ntp/driver12.html OLD_FILES+=usr/share/doc/ntp/driver16.html OLD_FILES+=usr/share/doc/ntp/driver18.html OLD_FILES+=usr/share/doc/ntp/driver19.html OLD_FILES+=usr/share/doc/ntp/driver2.html OLD_FILES+=usr/share/doc/ntp/driver20.html OLD_FILES+=usr/share/doc/ntp/driver22.html OLD_FILES+=usr/share/doc/ntp/driver26.html OLD_FILES+=usr/share/doc/ntp/driver27.html OLD_FILES+=usr/share/doc/ntp/driver28.html OLD_FILES+=usr/share/doc/ntp/driver29.html OLD_FILES+=usr/share/doc/ntp/driver3.html OLD_FILES+=usr/share/doc/ntp/driver30.html OLD_FILES+=usr/share/doc/ntp/driver32.html OLD_FILES+=usr/share/doc/ntp/driver33.html OLD_FILES+=usr/share/doc/ntp/driver34.html OLD_FILES+=usr/share/doc/ntp/driver35.html OLD_FILES+=usr/share/doc/ntp/driver36.html OLD_FILES+=usr/share/doc/ntp/driver37.html OLD_FILES+=usr/share/doc/ntp/driver4.html OLD_FILES+=usr/share/doc/ntp/driver5.html OLD_FILES+=usr/share/doc/ntp/driver6.html OLD_FILES+=usr/share/doc/ntp/driver7.html OLD_FILES+=usr/share/doc/ntp/driver8.html OLD_FILES+=usr/share/doc/ntp/driver9.html OLD_FILES+=usr/share/doc/ntp/ldisc.html OLD_FILES+=usr/share/doc/ntp/measure.html OLD_FILES+=usr/share/doc/ntp/mx4200data.html OLD_FILES+=usr/share/doc/ntp/notes.html OLD_FILES+=usr/share/doc/ntp/patches.html OLD_FILES+=usr/share/doc/ntp/porting.html OLD_FILES+=usr/share/man/man1/sntp.1.gz # 20150329 .if ${TARGET_ARCH} == "arm" OLD_FILES+=usr/include/bootconfig.h .endif # 20150326 OLD_FILES+=usr/share/man/man1/pmcstudy.1.gz # 20150315: new clang import which bumps version from 3.5.1 to 3.6.0. OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.5.1/altivec.h OLD_FILES+=usr/include/clang/3.5.1/ammintrin.h OLD_FILES+=usr/include/clang/3.5.1/arm_acle.h OLD_FILES+=usr/include/clang/3.5.1/arm_neon.h OLD_FILES+=usr/include/clang/3.5.1/avx2intrin.h OLD_FILES+=usr/include/clang/3.5.1/avxintrin.h OLD_FILES+=usr/include/clang/3.5.1/bmi2intrin.h OLD_FILES+=usr/include/clang/3.5.1/bmiintrin.h OLD_FILES+=usr/include/clang/3.5.1/cpuid.h OLD_FILES+=usr/include/clang/3.5.1/emmintrin.h OLD_FILES+=usr/include/clang/3.5.1/f16cintrin.h OLD_FILES+=usr/include/clang/3.5.1/fma4intrin.h OLD_FILES+=usr/include/clang/3.5.1/fmaintrin.h OLD_FILES+=usr/include/clang/3.5.1/ia32intrin.h OLD_FILES+=usr/include/clang/3.5.1/immintrin.h OLD_FILES+=usr/include/clang/3.5.1/lzcntintrin.h OLD_FILES+=usr/include/clang/3.5.1/mm3dnow.h OLD_FILES+=usr/include/clang/3.5.1/mm_malloc.h OLD_FILES+=usr/include/clang/3.5.1/mmintrin.h OLD_FILES+=usr/include/clang/3.5.1/module.modulemap OLD_FILES+=usr/include/clang/3.5.1/nmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/pmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/popcntintrin.h OLD_FILES+=usr/include/clang/3.5.1/prfchwintrin.h OLD_FILES+=usr/include/clang/3.5.1/rdseedintrin.h OLD_FILES+=usr/include/clang/3.5.1/rtmintrin.h OLD_FILES+=usr/include/clang/3.5.1/shaintrin.h OLD_FILES+=usr/include/clang/3.5.1/smmintrin.h OLD_FILES+=usr/include/clang/3.5.1/tbmintrin.h OLD_FILES+=usr/include/clang/3.5.1/tmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/wmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/x86intrin.h OLD_FILES+=usr/include/clang/3.5.1/xmmintrin.h OLD_FILES+=usr/include/clang/3.5.1/xopintrin.h OLD_DIRS+=usr/include/clang/3.5.1 OLD_DIRS+=usr/include/clang OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-x86_64.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.5.1/lib/freebsd OLD_DIRS+=usr/lib/clang/3.5.1/lib OLD_DIRS+=usr/lib/clang/3.5.1 # 20150302: binutils documentation distributed as a manpage OLD_FILES+=usr/share/doc/binutils/as.txt OLD_FILES+=usr/share/doc/binutils/ld.txt OLD_DIRS+=usr/share/doc/binutils # 20150222: Removed bcd(6) and ppt(6) OLD_FILES+=usr/bin/bcd OLD_FILES+=usr/bin/ppt OLD_FILES+=usr/share/man/man6/bcd.6.gz OLD_FILES+=usr/share/man/man6/ppt.6.gz # 20150217: Removed remnants of ar(4) driver OLD_FILES+=usr/include/dev/ic/hd64570.h # 20150212: /usr/games moving into /usr/bin OLD_FILES+=usr/games/bcd OLD_FILES+=usr/games/caesar OLD_FILES+=usr/games/factor OLD_FILES+=usr/games/fortune OLD_FILES+=usr/games/grdc OLD_FILES+=usr/games/morse OLD_FILES+=usr/games/number OLD_FILES+=usr/games/pom OLD_FILES+=usr/games/ppt OLD_FILES+=usr/games/primes OLD_FILES+=usr/games/random OLD_FILES+=usr/games/rot13 OLD_FILES+=usr/games/strfile OLD_FILES+=usr/games/unstr OLD_DIRS+=usr/games # 20150209: liblzma header OLD_FILES+=usr/include/lzma/lzma.h # 20150124: spl.9 and friends OLD_FILES+=usr/share/man/man9/spl.9.gz OLD_FILES+=usr/share/man/man9/spl0.9.gz OLD_FILES+=usr/share/man/man9/splbio.9.gz OLD_FILES+=usr/share/man/man9/splclock.9.gz OLD_FILES+=usr/share/man/man9/splhigh.9.gz OLD_FILES+=usr/share/man/man9/splimp.9.gz OLD_FILES+=usr/share/man/man9/splnet.9.gz OLD_FILES+=usr/share/man/man9/splsoftclock.9.gz OLD_FILES+=usr/share/man/man9/splsofttty.9.gz OLD_FILES+=usr/share/man/man9/splstatclock.9.gz OLD_FILES+=usr/share/man/man9/spltty.9.gz OLD_FILES+=usr/share/man/man9/splvm.9.gz OLD_FILES+=usr/share/man/man9/splx.9.gz # 20150118: toeplitz.c moved from netinet to net OLD_FILES+=usr/include/netinet/toeplitz.h # 20150118: new clang import which bumps version from 3.5.0 to 3.5.1. OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.5.0/altivec.h OLD_FILES+=usr/include/clang/3.5.0/ammintrin.h OLD_FILES+=usr/include/clang/3.5.0/arm_acle.h OLD_FILES+=usr/include/clang/3.5.0/arm_neon.h OLD_FILES+=usr/include/clang/3.5.0/avx2intrin.h OLD_FILES+=usr/include/clang/3.5.0/avxintrin.h OLD_FILES+=usr/include/clang/3.5.0/bmi2intrin.h OLD_FILES+=usr/include/clang/3.5.0/bmiintrin.h OLD_FILES+=usr/include/clang/3.5.0/cpuid.h OLD_FILES+=usr/include/clang/3.5.0/emmintrin.h OLD_FILES+=usr/include/clang/3.5.0/f16cintrin.h OLD_FILES+=usr/include/clang/3.5.0/fma4intrin.h OLD_FILES+=usr/include/clang/3.5.0/fmaintrin.h OLD_FILES+=usr/include/clang/3.5.0/ia32intrin.h OLD_FILES+=usr/include/clang/3.5.0/immintrin.h OLD_FILES+=usr/include/clang/3.5.0/lzcntintrin.h OLD_FILES+=usr/include/clang/3.5.0/mm3dnow.h OLD_FILES+=usr/include/clang/3.5.0/mm_malloc.h OLD_FILES+=usr/include/clang/3.5.0/mmintrin.h OLD_FILES+=usr/include/clang/3.5.0/module.modulemap OLD_FILES+=usr/include/clang/3.5.0/nmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/pmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/popcntintrin.h OLD_FILES+=usr/include/clang/3.5.0/prfchwintrin.h OLD_FILES+=usr/include/clang/3.5.0/rdseedintrin.h OLD_FILES+=usr/include/clang/3.5.0/rtmintrin.h OLD_FILES+=usr/include/clang/3.5.0/shaintrin.h OLD_FILES+=usr/include/clang/3.5.0/smmintrin.h OLD_FILES+=usr/include/clang/3.5.0/tbmintrin.h OLD_FILES+=usr/include/clang/3.5.0/tmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/wmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/x86intrin.h OLD_FILES+=usr/include/clang/3.5.0/xmmintrin.h OLD_FILES+=usr/include/clang/3.5.0/xopintrin.h OLD_DIRS+=usr/include/clang/3.5.0 OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-arm.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-x86_64.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a OLD_DIRS+=usr/lib/clang/3.5.0/lib/freebsd OLD_DIRS+=usr/lib/clang/3.5.0/lib OLD_DIRS+=usr/lib/clang/3.5.0 # 20150102: removal of asr(4) OLD_FILES+=usr/share/man/man4/asr.4.gz # 20150102: removal of texinfo OLD_FILES+=usr/bin/info OLD_FILES+=usr/bin/infokey OLD_FILES+=usr/bin/install-info OLD_FILES+=usr/bin/makeinfo OLD_FILES+=usr/bin/texindex OLD_FILES+=usr/share/info/am-utils.info.gz OLD_FILES+=usr/share/info/as-utils.info.gz OLD_FILES+=usr/share/info/binutils.info.gz OLD_FILES+=usr/share/info/com_err.info.gz OLD_FILES+=usr/share/info/diff.info.gz OLD_FILES+=usr/share/info/gdb.info.gz OLD_FILES+=usr/share/info/gdbint.info.gz OLD_FILES+=usr/share/info/gperf.info.gz OLD_FILES+=usr/share/info/grep.info.gz OLD_FILES+=usr/share/info/groff.info.gz OLD_FILES+=usr/share/info/heimdal.info.gz OLD_FILES+=usr/share/info/history.info.gz OLD_FILES+=usr/share/info/info-stnd.info.gz OLD_FILES+=usr/share/info/info.info.gz OLD_FILES+=usr/share/info/ld.info.gz OLD_FILES+=usr/share/info/regex.info.gz OLD_FILES+=usr/share/info/rluserman.info.gz OLD_FILES+=usr/share/info/stabs.info.gz OLD_FILES+=usr/share/info/texinfo.info.gz OLD_FILES+=usr/share/man/man1/info.1.gz OLD_FILES+=usr/share/man/man1/infokey.1.gz OLD_FILES+=usr/share/man/man1/install-info.1.gz OLD_FILES+=usr/share/man/man1/makeinfo.1.gz OLD_FILES+=usr/share/man/man1/texindex.1.gz OLD_FILES+=usr/share/man/man5/info.5.gz OLD_FILES+=usr/share/man/man5/texinfo.5.gz # 20141231: new clang import which bumps version from 3.4.1 to 3.5.0. OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.4.1/altivec.h OLD_FILES+=usr/include/clang/3.4.1/ammintrin.h OLD_FILES+=usr/include/clang/3.4.1/arm_neon.h OLD_FILES+=usr/include/clang/3.4.1/avx2intrin.h OLD_FILES+=usr/include/clang/3.4.1/avxintrin.h OLD_FILES+=usr/include/clang/3.4.1/bmi2intrin.h OLD_FILES+=usr/include/clang/3.4.1/bmiintrin.h OLD_FILES+=usr/include/clang/3.4.1/cpuid.h OLD_FILES+=usr/include/clang/3.4.1/emmintrin.h OLD_FILES+=usr/include/clang/3.4.1/f16cintrin.h OLD_FILES+=usr/include/clang/3.4.1/fma4intrin.h OLD_FILES+=usr/include/clang/3.4.1/fmaintrin.h OLD_FILES+=usr/include/clang/3.4.1/immintrin.h OLD_FILES+=usr/include/clang/3.4.1/lzcntintrin.h OLD_FILES+=usr/include/clang/3.4.1/mm3dnow.h OLD_FILES+=usr/include/clang/3.4.1/mm_malloc.h OLD_FILES+=usr/include/clang/3.4.1/mmintrin.h OLD_FILES+=usr/include/clang/3.4.1/module.map OLD_FILES+=usr/include/clang/3.4.1/nmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/pmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/popcntintrin.h OLD_FILES+=usr/include/clang/3.4.1/prfchwintrin.h OLD_FILES+=usr/include/clang/3.4.1/rdseedintrin.h OLD_FILES+=usr/include/clang/3.4.1/rtmintrin.h OLD_FILES+=usr/include/clang/3.4.1/shaintrin.h OLD_FILES+=usr/include/clang/3.4.1/smmintrin.h OLD_FILES+=usr/include/clang/3.4.1/tbmintrin.h OLD_FILES+=usr/include/clang/3.4.1/tmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/wmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/x86intrin.h OLD_FILES+=usr/include/clang/3.4.1/xmmintrin.h OLD_FILES+=usr/include/clang/3.4.1/xopintrin.h OLD_DIRS+=usr/include/clang/3.4.1 # 20141225: Remove gpib/ieee488 OLD_FILES+=usr/include/dev/ieee488/ibfoo_int.h OLD_FILES+=usr/include/dev/ieee488/tnt4882.h OLD_FILES+=usr/include/dev/ieee488/ugpib.h OLD_FILES+=usr/include/dev/ieee488/upd7210.h OLD_DIRS+=usr/include/dev/ieee488 OLD_FILES+=usr/include/gpib/gpib.h OLD_DIRS+=usr/include/gpib OLD_FILES+=usr/lib/libgpib.a OLD_FILES+=usr/lib/libgpib_p.a OLD_FILES+=usr/lib/libgpib.so OLD_LIBS+=usr/lib/libgpib.so.3 OLD_FILES+=usr/lib/libgpib_p.a OLD_FILES+=usr/lib32/libgpib.a OLD_FILES+=usr/lib32/libgpib_p.a OLD_FILES+=usr/lib32/libgpib.so OLD_LIBS+=usr/lib32/libgpib.so.3 OLD_FILES+=usr/share/man/man3/gpib.3.gz OLD_FILES+=usr/share/man/man3/ibclr.3.gz OLD_FILES+=usr/share/man/man3/ibdev.3.gz OLD_FILES+=usr/share/man/man3/ibdma.3.gz OLD_FILES+=usr/share/man/man3/ibeos.3.gz OLD_FILES+=usr/share/man/man3/ibeot.3.gz OLD_FILES+=usr/share/man/man3/ibloc.3.gz OLD_FILES+=usr/share/man/man3/ibonl.3.gz OLD_FILES+=usr/share/man/man3/ibpad.3.gz OLD_FILES+=usr/share/man/man3/ibrd.3.gz OLD_FILES+=usr/share/man/man3/ibsad.3.gz OLD_FILES+=usr/share/man/man3/ibsic.3.gz OLD_FILES+=usr/share/man/man3/ibtmo.3.gz OLD_FILES+=usr/share/man/man3/ibtrg.3.gz OLD_FILES+=usr/share/man/man3/ibwrt.3.gz OLD_FILES+=usr/share/man/man4/gpib.4.gz OLD_FILES+=usr/share/man/man4/pcii.4.gz OLD_FILES+=usr/share/man/man4/tnt4882.4.gz # 20141224: libxo moved to /lib OLD_LIBS+=usr/lib/libxo.so.0 # 20141223: remove in6_gif.h, in_gif.h and if_stf.h OLD_FILES+=usr/include/net/if_stf.h OLD_FILES+=usr/include/netinet/in_gif.h OLD_FILES+=usr/include/netinet6/in6_gif.h # 20141209: pw tests broken into a file per command OLD_FILES+=usr/tests/usr.sbin/pw/pw_delete OLD_FILES+=usr/tests/usr.sbin/pw/pw_modify # 20141202: update to mandoc CVS 20141201 OLD_FILES+=usr.bin/preconv OLD_FILES+=share/man/man1/preconv.1.gz # 20141129: mrouted rc.d scripts removed from base OLD_FILES+=etc/rc.d/mrouted # 20141126: convert sbin/mdconfig/tests to ATF format tests OLD_FILES+=usr/tests/sbin/mdconfig/legacy_test OLD_FILES+=usr/tests/sbin/mdconfig/mdconfig.test OLD_FILES+=usr/tests/sbin/mdconfig/run.pl # 20141126: remove xform_ipip decapsulation fallback OLD_FILES+=usr/include/netipsec/ipip_var.h # 20141122: mandoc updated to 1.13.1 OLD_FILES+=usr/share/mdocml/external.png # 20141111: SF_KQUEUE code removed OLD_FILES+=usr/include/sys/sf_base.h OLD_FILES+=usr/include/sys/sf_sync.h # 20141109: faith/faithd removal OLD_FILES+=etc/rc.d/faith OLD_FILES+=usr/share/man/man4/faith.4.gz OLD_FILES+=usr/share/man/man4/if_faith.4.gz OLD_FILES+=usr/sbin/faithd OLD_FILES+=usr/share/man/man8/faithd.8.gz # 20141107: overhaul if_gre(4) OLD_FILES+=usr/include/netinet/ip_gre.h # 20141102: postrandom obsoleted by new /dev/random code OLD_FILES+=etc/rc.d/postrandom # 20141031: initrandom obsoleted by new /dev/random code OLD_FILES+=etc/rc.d/initrandom # 20141030: atf 0.21 import OLD_FILES+=usr/share/man/man3/atf-c++-api.3.gz # 20141028: debug files accidentally installed as directory name OLD_FILES+=usr/lib/debug/usr/lib/i18n OLD_FILES+=usr/lib/debug/usr/lib/private OLD_FILES+=usr/lib/debug/usr/lib32/i18n OLD_FILES+=usr/lib/debug/usr/lib32/private # 20141015: OpenSSL 1.0.1j import OLD_FILES+=usr/share/openssl/man/man3/CMS_sign_add1_signer.3.gz # 20141003: libproc version bump OLD_LIBS+=usr/lib/libproc.so.2 OLD_LIBS+=usr/lib32/libproc.so.2 # 20140922: sleepq_calc_signal_retval.9 and sleepq_catch_signals.9 removed OLD_FILES+=usr/share/man/man9/sleepq_calc_signal_retval.9.gz OLD_FILES+=usr/share/man/man9/sleepq_catch_signals.9.gz # 20140917: hv_kvpd rc.d script removed in favor of devd configuration OLD_FILES+=etc/rc.d/hv_kvpd # 20140917: libnv was accidentally being installed to /usr/lib instead of /lib OLD_LIBS+=usr/lib/libnv.so.0 # 20140829: rc.d/kerberos removed OLD_FILES+=etc/rc.d/kerberos # 20140827: tzdata2014f import OLD_FILES+=usr/share/zoneinfo/Asia/Chongqing OLD_FILES+=usr/share/zoneinfo/Asia/Harbin OLD_FILES+=usr/share/zoneinfo/Asia/Kashgar # 20140814: libopie version bump OLD_LIBS+=usr/lib/libopie.so.7 OLD_LIBS+=usr/lib32/libopie.so.7 # 20140811: otp-sha renamed to otp-sha1 OLD_FILES+=usr/bin/otp-sha OLD_FILES+=usr/share/man/man1/otp-sha.1.gz # 20140807: Remove private lib files that should not be installed. OLD_FILES+=usr/lib32/private/libatf-c.a OLD_FILES+=usr/lib32/private/libatf-c.so OLD_FILES+=usr/lib32/private/libatf-c_p.a OLD_FILES+=usr/lib32/private/libatf-c++.a OLD_FILES+=usr/lib32/private/libatf-c++.so OLD_FILES+=usr/lib32/private/libatf-c++_p.a OLD_FILES+=usr/lib32/private/libbsdstat.a OLD_FILES+=usr/lib32/private/libbsdstat.so OLD_FILES+=usr/lib32/private/libbsdstat_p.a OLD_FILES+=usr/lib32/private/libheimipcc.a OLD_FILES+=usr/lib32/private/libheimipcc.so OLD_FILES+=usr/lib32/private/libheimipcc_p.a OLD_FILES+=usr/lib32/private/libheimipcs.a OLD_FILES+=usr/lib32/private/libheimipcs.so OLD_FILES+=usr/lib32/private/libheimipcs_p.a OLD_FILES+=usr/lib32/private/libldns.a OLD_FILES+=usr/lib32/private/libldns.so OLD_FILES+=usr/lib32/private/libldns_p.a OLD_FILES+=usr/lib32/private/libssh.a OLD_FILES+=usr/lib32/private/libssh.so OLD_FILES+=usr/lib32/private/libssh_p.a OLD_FILES+=usr/lib32/private/libunbound.a OLD_FILES+=usr/lib32/private/libunbound.so OLD_FILES+=usr/lib32/private/libunbound_p.a OLD_FILES+=usr/lib32/private/libucl.a OLD_FILES+=usr/lib32/private/libucl.so OLD_FILES+=usr/lib32/private/libucl_p.a OLD_FILES+=usr/lib/private/libatf-c.a OLD_FILES+=usr/lib/private/libatf-c.so OLD_FILES+=usr/lib/private/libatf-c_p.a OLD_FILES+=usr/lib/private/libatf-c++.a OLD_FILES+=usr/lib/private/libatf-c++.so OLD_FILES+=usr/lib/private/libatf-c++_p.a OLD_FILES+=usr/lib/private/libbsdstat.a OLD_FILES+=usr/lib/private/libbsdstat.so OLD_FILES+=usr/lib/private/libbsdstat_p.a OLD_FILES+=usr/lib/private/libheimipcc.a OLD_FILES+=usr/lib/private/libheimipcc.so OLD_FILES+=usr/lib/private/libheimipcc_p.a OLD_FILES+=usr/lib/private/libheimipcs.a OLD_FILES+=usr/lib/private/libheimipcs.so OLD_FILES+=usr/lib/private/libheimipcs_p.a OLD_FILES+=usr/lib/private/libldns.a OLD_FILES+=usr/lib/private/libldns.so OLD_FILES+=usr/lib/private/libldns_p.a OLD_FILES+=usr/lib/private/libssh.a OLD_FILES+=usr/lib/private/libssh.so OLD_FILES+=usr/lib/private/libssh_p.a OLD_FILES+=usr/lib/private/libunbound.a OLD_FILES+=usr/lib/private/libunbound.so OLD_FILES+=usr/lib/private/libunbound_p.a OLD_FILES+=usr/lib/private/libucl.a OLD_FILES+=usr/lib/private/libucl.so OLD_FILES+=usr/lib/private/libucl_p.a # 20140803: Remove an obsolete man page OLD_FILES+=usr/share/man/man9/pmap_change_wiring.9.gz # 20140731 OLD_FILES+=usr/share/man/man9/SYSCTL_ADD_OID.9.gz # 20140728: libsbuf restored to old version. OLD_LIBS+=lib/libsbuf.so.7 OLD_LIBS+=usr/lib32/libsbuf.so.7 # 20140728: Remove an obsolete man page OLD_FILES+=usr/share/man/man9/VOP_GETVOBJECT.9.gz OLD_FILES+=usr/share/man/man9/VOP_CREATEVOBJECT.9.gz OLD_FILES+=usr/share/man/man9/VOP_DESTROYVOBJECT.9.gz # 20140723: renamed to PCBGROUP.9 OLD_FILES+=usr/share/man/man9/PCBGROUPS.9.gz # 20140722: browse_packages_ftp.sh removed OLD_FILES+=usr/share/examples/bsdconfig/browse_packages_ftp.sh # 20140718: Remove obsolete man pages OLD_FILES+=usr/share/man/man9/zero_copy.9.gz OLD_FILES+=usr/share/man/man9/zero_copy_sockets.9.gz # 20140718: Remove an obsolete man page OLD_FILES+=usr/share/man/man9/pmap_page_protect.9.gz # 20140717: Remove an obsolete man page OLD_FILES+=usr/share/man/man9/pmap_clear_reference.9.gz # 20140716: Remove an incorrectly named man page OLD_FILES+=usr/share/man/man9/pmap_ts_modified.9.gz # 20140712: Removal of bsd.dtrace.mk OLD_FILES+=usr/share/mk/bsd.dtrace.mk # 20140705: turn libreadline into an internal lib OLD_LIBS+=lib/libreadline.so.8 OLD_FILES+=usr/lib/libreadline.a OLD_FILES+=usr/lib/libreadline_p.a OLD_FILES+=usr/lib/libreadline.so OLD_FILES+=usr/lib/libhistory.a OLD_FILES+=usr/lib/libhistory_p.a OLD_FILES+=usr/lib/libhistory.so OLD_LIBS+=usr/lib/libhistory.so.8 OLD_FILES+=usr/lib32/libhistory.a OLD_FILES+=usr/lib32/libhistory.so OLD_LIBS+=usr/lib32/libhistory.so.8 OLD_FILES+=usr/lib32/libhistory_p.a OLD_FILES+=usr/lib32/libreadline.a OLD_FILES+=usr/lib32/libreadline.so OLD_LIBS+=usr/lib32/libreadline.so.8 OLD_FILES+=usr/lib32/libreadline_p.a OLD_FILES+=usr/include/readline/chardefs.h OLD_FILES+=usr/include/readline/history.h OLD_FILES+=usr/include/readline/keymaps.h OLD_FILES+=usr/include/readline/readline.h OLD_FILES+=usr/include/readline/tilde.h OLD_FILES+=usr/include/readline/rlconf.h OLD_FILES+=usr/include/readline/rlstdc.h OLD_FILES+=usr/include/readline/rltypedefs.h OLD_FILES+=usr/include/readline/rltypedefs.h OLD_DIRS+=usr/include/readline OLD_FILES+=usr/share/info/readline.info.gz OLD_FILES+=usr/share/man/man3/readline.3.gz OLD_FILES+=usr/share/man/man3/rlhistory.3.gz # 20140625: csup removal OLD_FILES+=usr/bin/csup OLD_FILES+=usr/bin/cpasswd OLD_FILES+=usr/share/man/man1/csup.1.gz OLD_FILES+=usr/share/man/man1/cpasswd.1.gz OLD_FILES+=usr/share/examples/cvsup/README OLD_FILES+=usr/share/examples/cvsup/cvs-supfile OLD_FILES+=usr/share/examples/cvsup/stable-supfile OLD_FILES+=usr/share/examples/cvsup/standard-supfile OLD_DIRS+=usr/share/examples/cvsup # 20140614: send-pr removal OLD_FILES+=usr/bin/sendbug OLD_FILES+=usr/share/info/send-pr.info.gz OLD_FILES+=usr/share/man/man1/send-pr.1.gz OLD_FILES+=usr/share/man/man1/sendbug.1.gz OLD_FILES+=etc/gnats/freefall OLD_DIRS+=etc/gnats # 20140512: new clang import which bumps version from 3.4 to 3.4.1. OLD_FILES+=usr/include/clang/3.4/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.4/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.4/altivec.h OLD_FILES+=usr/include/clang/3.4/ammintrin.h OLD_FILES+=usr/include/clang/3.4/avx2intrin.h OLD_FILES+=usr/include/clang/3.4/avxintrin.h OLD_FILES+=usr/include/clang/3.4/bmi2intrin.h OLD_FILES+=usr/include/clang/3.4/bmiintrin.h OLD_FILES+=usr/include/clang/3.4/cpuid.h OLD_FILES+=usr/include/clang/3.4/emmintrin.h OLD_FILES+=usr/include/clang/3.4/f16cintrin.h OLD_FILES+=usr/include/clang/3.4/fma4intrin.h OLD_FILES+=usr/include/clang/3.4/fmaintrin.h OLD_FILES+=usr/include/clang/3.4/immintrin.h OLD_FILES+=usr/include/clang/3.4/lzcntintrin.h OLD_FILES+=usr/include/clang/3.4/mm3dnow.h OLD_FILES+=usr/include/clang/3.4/mm_malloc.h OLD_FILES+=usr/include/clang/3.4/mmintrin.h OLD_FILES+=usr/include/clang/3.4/module.map OLD_FILES+=usr/include/clang/3.4/nmmintrin.h OLD_FILES+=usr/include/clang/3.4/pmmintrin.h OLD_FILES+=usr/include/clang/3.4/popcntintrin.h OLD_FILES+=usr/include/clang/3.4/prfchwintrin.h OLD_FILES+=usr/include/clang/3.4/rdseedintrin.h OLD_FILES+=usr/include/clang/3.4/rtmintrin.h OLD_FILES+=usr/include/clang/3.4/shaintrin.h OLD_FILES+=usr/include/clang/3.4/smmintrin.h OLD_FILES+=usr/include/clang/3.4/tbmintrin.h OLD_FILES+=usr/include/clang/3.4/tmmintrin.h OLD_FILES+=usr/include/clang/3.4/wmmintrin.h OLD_FILES+=usr/include/clang/3.4/x86intrin.h OLD_FILES+=usr/include/clang/3.4/xmmintrin.h OLD_FILES+=usr/include/clang/3.4/xopintrin.h OLD_FILES+=usr/include/clang/3.4/arm_neon.h OLD_FILES+=usr/include/clang/3.4/module.map OLD_DIRS+=usr/include/clang/3.4 # 20140505: Bogusly installing src.opts.mk OLD_FILES+=usr/share/mk/src.opts.mk # 20140505: Reject PR kern/187551 OLD_FILES+=usr/tests/sbin/ifconfig/fibs_test # 20140502: Removal of lindev(4) OLD_FILES+=usr/share/man/man4/lindev.4.gz # 20140425 OLD_FILES+=usr/lib/libssp_p.a OLD_FILES+=usr/lib/libstand_p.a OLD_FILES+=usr/lib32/libc_pic.a OLD_FILES+=usr/lib32/libssp_p.a OLD_FILES+=usr/lib32/libstand_p.a # 20140314: AppleTalk OLD_DIRS+=usr/include/netatalk OLD_FILES+=usr/include/netatalk/aarp.h OLD_FILES+=usr/include/netatalk/at.h OLD_FILES+=usr/include/netatalk/at_extern.h OLD_FILES+=usr/include/netatalk/at_var.h OLD_FILES+=usr/include/netatalk/ddp.h OLD_FILES+=usr/include/netatalk/ddp_pcb.h OLD_FILES+=usr/include/netatalk/ddp_var.h OLD_FILES+=usr/include/netatalk/endian.h OLD_FILES+=usr/include/netatalk/phase2.h # 20140314: Remove IPX/SPX OLD_LIBS+=lib/libipx.so.5 OLD_FILES+=usr/include/netipx/ipx.h OLD_FILES+=usr/include/netipx/ipx_if.h OLD_FILES+=usr/include/netipx/ipx_pcb.h OLD_FILES+=usr/include/netipx/ipx_var.h OLD_FILES+=usr/include/netipx/spx.h OLD_FILES+=usr/include/netipx/spx_debug.h OLD_FILES+=usr/include/netipx/spx_timer.h OLD_FILES+=usr/include/netipx/spx_var.h OLD_DIRS+=usr/include/netipx OLD_FILES+=usr/lib/libipx.a OLD_FILES+=usr/lib/libipx.so OLD_FILES+=usr/lib/libipx_p.a OLD_FILES+=usr/lib32/libipx.a OLD_FILES+=usr/lib32/libipx.so OLD_LIBS+=usr/lib32/libipx.so.5 OLD_FILES+=usr/lib32/libipx_p.a OLD_FILES+=usr/sbin/IPXrouted OLD_FILES+=usr/share/man/man3/ipx.3.gz OLD_FILES+=usr/share/man/man3/ipx_addr.3.gz OLD_FILES+=usr/share/man/man3/ipx_ntoa.3.gz OLD_FILES+=usr/share/man/man4/ef.4.gz OLD_FILES+=usr/share/man/man4/if_ef.4.gz OLD_FILES+=usr/share/man/man8/IPXrouted.8.gz # 20140314: bsdconfig usermgmt rewrite OLD_FILES+=usr/libexec/bsdconfig/070.usermgmt/userinput # 20140307: bsdconfig groupmgmt rewrite OLD_FILES+=usr/libexec/bsdconfig/070.usermgmt/groupinput # 20140223: Remove libyaml OLD_FILES+=usr/lib/private/libyaml.a OLD_FILES+=usr/lib/private/libyaml.so OLD_LIBS+=usr/lib/private/libyaml.so.1 OLD_FILES+=usr/lib/private/libyaml_p.a OLD_FILES+=usr/lib32/private/libyaml.a OLD_FILES+=usr/lib32/private/libyaml.so OLD_LIBS+=usr/lib32/private/libyaml.so.1 OLD_FILES+=usr/lib32/private/libyaml_p.a # 20140216: new clang import which bumps version from 3.3 to 3.4. OLD_FILES+=usr/bin/llvm-prof OLD_FILES+=usr/bin/llvm-ranlib OLD_FILES+=usr/include/clang/3.3/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.3/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.3/altivec.h OLD_FILES+=usr/include/clang/3.3/ammintrin.h OLD_FILES+=usr/include/clang/3.3/avx2intrin.h OLD_FILES+=usr/include/clang/3.3/avxintrin.h OLD_FILES+=usr/include/clang/3.3/bmi2intrin.h OLD_FILES+=usr/include/clang/3.3/bmiintrin.h OLD_FILES+=usr/include/clang/3.3/cpuid.h OLD_FILES+=usr/include/clang/3.3/emmintrin.h OLD_FILES+=usr/include/clang/3.3/f16cintrin.h OLD_FILES+=usr/include/clang/3.3/fma4intrin.h OLD_FILES+=usr/include/clang/3.3/fmaintrin.h OLD_FILES+=usr/include/clang/3.3/immintrin.h OLD_FILES+=usr/include/clang/3.3/lzcntintrin.h OLD_FILES+=usr/include/clang/3.3/mm3dnow.h OLD_FILES+=usr/include/clang/3.3/mm_malloc.h OLD_FILES+=usr/include/clang/3.3/mmintrin.h OLD_FILES+=usr/include/clang/3.3/module.map OLD_FILES+=usr/include/clang/3.3/nmmintrin.h OLD_FILES+=usr/include/clang/3.3/pmmintrin.h OLD_FILES+=usr/include/clang/3.3/popcntintrin.h OLD_FILES+=usr/include/clang/3.3/prfchwintrin.h OLD_FILES+=usr/include/clang/3.3/rdseedintrin.h OLD_FILES+=usr/include/clang/3.3/rtmintrin.h OLD_FILES+=usr/include/clang/3.3/smmintrin.h OLD_FILES+=usr/include/clang/3.3/tmmintrin.h OLD_FILES+=usr/include/clang/3.3/wmmintrin.h OLD_FILES+=usr/include/clang/3.3/x86intrin.h OLD_FILES+=usr/include/clang/3.3/xmmintrin.h OLD_FILES+=usr/include/clang/3.3/xopintrin.h OLD_FILES+=usr/share/man/man1/llvm-prof.1.gz OLD_FILES+=usr/share/man/man1/llvm-ranlib.1.gz OLD_DIRS+=usr/include/clang/3.3 # 20140216: nve(4) removed OLD_FILES+=usr/share/man/man4/if_nve.4.gz OLD_FILES+=usr/share/man/man4/nve.4.gz # 20140205: Open Firmware device moved OLD_FILES+=usr/include/dev/ofw/ofw_nexus.h # 20140128: libelf and libdwarf import OLD_LIBS+=usr/lib/libelf.so.1 OLD_LIBS+=usr/lib32/libelf.so.1 OLD_LIBS+=usr/lib/libdwarf.so.3 OLD_LIBS+=usr/lib32/libdwarf.so.3 # 20140123: apicvar header moved to x86 OLD_FILES+=usr/include/machine/apicvar.h # 20131215: libcam version bumped OLD_LIBS+=lib/libcam.so.6 usr/lib32/libcam.so.6 # 20131202: libcapsicum and libcasper moved to /lib/ OLD_LIBS+=usr/lib/libcapsicum.so.0 OLD_LIBS+=usr/lib/libcasper.so.0 # 20131109: extattr(2) mlinks fixed OLD_FILES+=usr/share/man/man2/extattr_delete_list.2.gz OLD_FILES+=usr/share/man/man2/extattr_get_list.2.gz # 20131107: example files removed OLD_FILES+=usr/share/examples/libusb20/aux.c OLD_FILES+=usr/share/examples/libusb20/aux.h # 20131105: tzdata 2013h import OLD_FILES+=usr/share/zoneinfo/America/Shiprock OLD_FILES+=usr/share/zoneinfo/Antarctica/South_Pole # 20131103: WITH_LIBICONV_COMPAT removal OLD_FILES+=usr/include/_libiconv_compat.h OLD_FILES+=usr/lib/libiconv.a OLD_FILES+=usr/lib/libiconv.so OLD_FILES+=usr/lib/libiconv.so.3 OLD_FILES+=usr/lib/libiconv_p.a OLD_FILES+=usr/lib32/libiconv.a OLD_FILES+=usr/lib32/libiconv.so OLD_FILES+=usr/lib32/libiconv.so.3 OLD_FILES+=usr/lib32/libiconv_p.a # 20131103: removal of utxrm(8), use 'utx rm' instead. OLD_FILES+=usr/sbin/utxrm OLD_FILES+=usr/share/man/man8/utxrm.8.gz # 20131031: pkg_install has been removed OLD_FILES+=etc/periodic/daily/220.backup-pkgdb OLD_FILES+=etc/periodic/daily/490.status-pkg-changes OLD_FILES+=etc/periodic/security/460.chkportsum OLD_FILES+=etc/periodic/weekly/400.status-pkg OLD_FILES+=usr/sbin/pkg_add OLD_FILES+=usr/sbin/pkg_create OLD_FILES+=usr/sbin/pkg_delete OLD_FILES+=usr/sbin/pkg_info OLD_FILES+=usr/sbin/pkg_updating OLD_FILES+=usr/sbin/pkg_version OLD_FILES+=usr/share/man/man1/pkg_add.1.gz OLD_FILES+=usr/share/man/man1/pkg_create.1.gz OLD_FILES+=usr/share/man/man1/pkg_delete.1.gz OLD_FILES+=usr/share/man/man1/pkg_info.1.gz OLD_FILES+=usr/share/man/man1/pkg_updating.1.gz OLD_FILES+=usr/share/man/man1/pkg_version.1.gz # 20131030: /etc/keys moved to /usr/share/keys OLD_DIRS+=etc/keys OLD_DIRS+=etc/keys/pkg OLD_DIRS+=etc/keys/pkg/revoked OLD_DIRS+=etc/keys/pkg/trusted OLD_FILES+=etc/keys/pkg/trusted/pkg.freebsd.org.2013102301 # 20131028: ng_fec(4) removed OLD_FILES+=usr/include/netgraph/ng_fec.h OLD_FILES+=usr/share/man/man4/ng_fec.4.gz # 20131027: header moved OLD_FILES+=usr/include/net/pf_mtag.h # 20131023: remove never used iscsi directory OLD_DIRS+=usr/share/examples/iscsi # 20131021: isf(4) removed OLD_FILES+=usr/sbin/isfctl OLD_FILES+=usr/share/man/man4/isf.4.gz OLD_FILES+=usr/share/man/man8/isfctl.8.gz # 20131014: libbsdyml becomes private OLD_FILES+=usr/lib/libbsdyml.a OLD_FILES+=usr/lib/libbsdyml.so OLD_LIBS+=usr/lib/libbsdyml.so.0 OLD_FILES+=usr/lib/libbsdyml_p.a OLD_FILES+=usr/lib32/libbsdyml.a OLD_FILES+=usr/lib32/libbsdyml.so OLD_LIBS+=usr/lib32/libbsdyml.so.0 OLD_FILES+=usr/lib32/libbsdyml_p.a OLD_FILES+=usr/share/man/man3/libbsdyml.3.gz OLD_FILES+=usr/include/bsdyml.h # 20131013: Removal of the ATF tools OLD_FILES+=etc/atf/FreeBSD.conf OLD_FILES+=etc/atf/atf-run.hooks OLD_FILES+=etc/atf/common.conf OLD_FILES+=usr/bin/atf-config OLD_FILES+=usr/bin/atf-report OLD_FILES+=usr/bin/atf-run OLD_FILES+=usr/bin/atf-version OLD_FILES+=usr/share/atf/atf-run.hooks OLD_FILES+=usr/share/examples/atf/atf-run.hooks OLD_FILES+=usr/share/examples/atf/tests-results.css OLD_FILES+=usr/share/man/man1/atf-config.1.gz OLD_FILES+=usr/share/man/man1/atf-report.1.gz OLD_FILES+=usr/share/man/man1/atf-run.1.gz OLD_FILES+=usr/share/man/man1/atf-version.1.gz OLD_FILES+=usr/share/man/man5/atf-formats.5.gz OLD_FILES+=usr/share/xml/atf/tests-results.dtd OLD_FILES+=usr/share/xsl/atf/tests-results.xsl # 20131009: freebsd-version moved from /libexec to /bin OLD_FILES+=libexec/freebsd-version # 20131001: ar and ranlib from binutils not used OLD_FILES+=usr/bin/gnu-ar OLD_FILES+=usr/bin/gnu-ranlib OLD_FILES+=usr/share/man/man1/gnu-ar.1.gz OLD_FILES+=usr/share/man/man1/gnu-ranlib.1.gz # 20130930: BIND removed from base OLD_FILES+=etc/mtree/BIND.chroot.dist OLD_FILES+=etc/namedb OLD_FILES+=etc/periodic/daily/470.status-named OLD_FILES+=usr/bin/dig OLD_FILES+=usr/bin/nslookup OLD_FILES+=usr/bin/nsupdate OLD_DIRS+=usr/include/lwres OLD_FILES+=usr/include/lwres/context.h OLD_FILES+=usr/include/lwres/int.h OLD_FILES+=usr/include/lwres/ipv6.h OLD_FILES+=usr/include/lwres/lang.h OLD_FILES+=usr/include/lwres/list.h OLD_FILES+=usr/include/lwres/lwbuffer.h OLD_FILES+=usr/include/lwres/lwpacket.h OLD_FILES+=usr/include/lwres/lwres.h OLD_FILES+=usr/include/lwres/net.h OLD_FILES+=usr/include/lwres/netdb.h OLD_FILES+=usr/include/lwres/platform.h OLD_FILES+=usr/include/lwres/result.h OLD_FILES+=usr/include/lwres/string.h OLD_FILES+=usr/include/lwres/version.h OLD_FILES+=usr/lib/liblwres.a OLD_FILES+=usr/lib/liblwres.so OLD_LIBS+=usr/lib/liblwres.so.90 OLD_FILES+=usr/lib/liblwres_p.a OLD_FILES+=usr/sbin/arpaname OLD_FILES+=usr/sbin/ddns-confgen OLD_FILES+=usr/sbin/dnssec-dsfromkey OLD_FILES+=usr/sbin/dnssec-keyfromlabel OLD_FILES+=usr/sbin/dnssec-keygen OLD_FILES+=usr/sbin/dnssec-revoke OLD_FILES+=usr/sbin/dnssec-settime OLD_FILES+=usr/sbin/dnssec-signzone OLD_FILES+=usr/sbin/dnssec-verify OLD_FILES+=usr/sbin/genrandom OLD_FILES+=usr/sbin/isc-hmac-fixup OLD_FILES+=usr/sbin/lwresd OLD_FILES+=usr/sbin/named OLD_FILES+=usr/sbin/named-checkconf OLD_FILES+=usr/sbin/named-checkzone OLD_FILES+=usr/sbin/named-compilezone OLD_FILES+=usr/sbin/named-journalprint OLD_FILES+=usr/sbin/named.reconfig OLD_FILES+=usr/sbin/named.reload OLD_FILES+=usr/sbin/nsec3hash OLD_FILES+=usr/sbin/rndc OLD_FILES+=usr/sbin/rndc-confgen OLD_DIRS+=usr/share/doc/bind9 OLD_FILES+=usr/share/doc/bind9/CHANGES OLD_FILES+=usr/share/doc/bind9/COPYRIGHT OLD_FILES+=usr/share/doc/bind9/FAQ OLD_FILES+=usr/share/doc/bind9/HISTORY OLD_FILES+=usr/share/doc/bind9/README OLD_DIRS+=usr/share/doc/bind9/arm OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch01.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch02.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch03.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch04.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch05.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch06.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch07.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch08.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch09.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch10.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.html OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.pdf OLD_FILES+=usr/share/doc/bind9/arm/man.arpaname.html OLD_FILES+=usr/share/doc/bind9/arm/man.ddns-confgen.html OLD_FILES+=usr/share/doc/bind9/arm/man.dig.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-dsfromkey.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-keyfromlabel.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-keygen.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-revoke.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-settime.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-signzone.html OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-verify.html OLD_FILES+=usr/share/doc/bind9/arm/man.genrandom.html OLD_FILES+=usr/share/doc/bind9/arm/man.host.html OLD_FILES+=usr/share/doc/bind9/arm/man.isc-hmac-fixup.html OLD_FILES+=usr/share/doc/bind9/arm/man.named-checkconf.html OLD_FILES+=usr/share/doc/bind9/arm/man.named-checkzone.html OLD_FILES+=usr/share/doc/bind9/arm/man.named-journalprint.html OLD_FILES+=usr/share/doc/bind9/arm/man.named.html OLD_FILES+=usr/share/doc/bind9/arm/man.nsec3hash.html OLD_FILES+=usr/share/doc/bind9/arm/man.nsupdate.html OLD_FILES+=usr/share/doc/bind9/arm/man.rndc-confgen.html OLD_FILES+=usr/share/doc/bind9/arm/man.rndc.conf.html OLD_FILES+=usr/share/doc/bind9/arm/man.rndc.html OLD_DIRS+=usr/share/doc/bind9/misc OLD_FILES+=usr/share/doc/bind9/misc/dnssec OLD_FILES+=usr/share/doc/bind9/misc/format-options.pl OLD_FILES+=usr/share/doc/bind9/misc/ipv6 OLD_FILES+=usr/share/doc/bind9/misc/migration OLD_FILES+=usr/share/doc/bind9/misc/migration-4to9 OLD_FILES+=usr/share/doc/bind9/misc/options OLD_FILES+=usr/share/doc/bind9/misc/rfc-compliance OLD_FILES+=usr/share/doc/bind9/misc/roadmap OLD_FILES+=usr/share/doc/bind9/misc/sdb OLD_FILES+=usr/share/doc/bind9/misc/sort-options.pl OLD_FILES+=usr/share/man/man1/arpaname.1.gz OLD_FILES+=usr/share/man/man1/dig.1.gz OLD_FILES+=usr/share/man/man1/nslookup.1.gz OLD_FILES+=usr/share/man/man1/nsupdate.1.gz OLD_FILES+=usr/share/man/man3/lwres.3.gz OLD_FILES+=usr/share/man/man3/lwres_addr_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_add.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_back.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_clear.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_first.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_forward.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getmem.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint16.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint32.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint8.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_init.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_invalidate.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putmem.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint16.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint32.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint8.3.gz OLD_FILES+=usr/share/man/man3/lwres_buffer_subtract.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_clear.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_get.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_init.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_conf_print.3.gz OLD_FILES+=usr/share/man/man3/lwres_config.3.gz OLD_FILES+=usr/share/man/man3/lwres_context.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_allocmem.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_create.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_destroy.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_freemem.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_initserial.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_nextserial.3.gz OLD_FILES+=usr/share/man/man3/lwres_context_sendrecv.3.gz OLD_FILES+=usr/share/man/man3/lwres_endhostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_endhostent_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_freeaddrinfo.3.gz OLD_FILES+=usr/share/man/man3/lwres_freehostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabn.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_gai_strerror.3.gz OLD_FILES+=usr/share/man/man3/lwres_getaddrinfo.3.gz OLD_FILES+=usr/share/man/man3/lwres_getaddrsbyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyname2.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostbyname_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_gethostent_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_getipnode.3.gz OLD_FILES+=usr/share/man/man3/lwres_getipnodebyaddr.3.gz OLD_FILES+=usr/share/man/man3/lwres_getipnodebyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_getnamebyaddr.3.gz OLD_FILES+=usr/share/man/man3/lwres_getnameinfo.3.gz OLD_FILES+=usr/share/man/man3/lwres_getrrsetbyname.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnba.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_herror.3.gz OLD_FILES+=usr/share/man/man3/lwres_hstrerror.3.gz OLD_FILES+=usr/share/man/man3/lwres_inetntop.3.gz OLD_FILES+=usr/share/man/man3/lwres_lwpacket_parseheader.3.gz OLD_FILES+=usr/share/man/man3/lwres_lwpacket_renderheader.3.gz OLD_FILES+=usr/share/man/man3/lwres_net_ntop.3.gz OLD_FILES+=usr/share/man/man3/lwres_noop.3.gz OLD_FILES+=usr/share/man/man3/lwres_nooprequest_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_nooprequest_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_nooprequest_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_noopresponse_free.3.gz OLD_FILES+=usr/share/man/man3/lwres_noopresponse_parse.3.gz OLD_FILES+=usr/share/man/man3/lwres_noopresponse_render.3.gz OLD_FILES+=usr/share/man/man3/lwres_packet.3.gz OLD_FILES+=usr/share/man/man3/lwres_resutil.3.gz OLD_FILES+=usr/share/man/man3/lwres_sethostent.3.gz OLD_FILES+=usr/share/man/man3/lwres_sethostent_r.3.gz OLD_FILES+=usr/share/man/man3/lwres_string_parse.3.gz OLD_FILES+=usr/share/man/man5/named.conf.5.gz OLD_FILES+=usr/share/man/man5/rndc.conf.5.gz OLD_FILES+=usr/share/man/man8/ddns-confgen.8.gz OLD_FILES+=usr/share/man/man8/dnssec-dsfromkey.8.gz OLD_FILES+=usr/share/man/man8/dnssec-keyfromlabel.8.gz OLD_FILES+=usr/share/man/man8/dnssec-keygen.8.gz OLD_FILES+=usr/share/man/man8/dnssec-revoke.8.gz OLD_FILES+=usr/share/man/man8/dnssec-settime.8.gz OLD_FILES+=usr/share/man/man8/dnssec-signzone.8.gz OLD_FILES+=usr/share/man/man8/dnssec-verify.8.gz OLD_FILES+=usr/share/man/man8/genrandom.8.gz OLD_FILES+=usr/share/man/man8/isc-hmac-fixup.8.gz OLD_FILES+=usr/share/man/man8/lwresd.8.gz OLD_FILES+=usr/share/man/man8/named-checkconf.8.gz OLD_FILES+=usr/share/man/man8/named-checkzone.8.gz OLD_FILES+=usr/share/man/man8/named-compilezone.8.gz OLD_FILES+=usr/share/man/man8/named-journalprint.8.gz OLD_FILES+=usr/share/man/man8/named.8.gz OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz OLD_FILES+=usr/share/man/man8/named.reload.8.gz OLD_FILES+=usr/share/man/man8/nsec3hash.8.gz OLD_FILES+=usr/share/man/man8/rndc-confgen.8.gz OLD_FILES+=usr/share/man/man8/rndc.8.gz OLD_DIRS+=var/named/dev OLD_DIRS+=var/named/etc OLD_DIRS+=var/named/etc/namedb OLD_FILES+=var/named/etc/namedb/PROTO.localhost-v6.rev OLD_FILES+=var/named/etc/namedb/PROTO.localhost.rev OLD_DIRS+=var/named/etc/namedb/dynamic OLD_FILES+=var/named/etc/namedb/make-localhost OLD_DIRS+=var/named/etc/namedb/master OLD_FILES+=var/named/etc/namedb/master/empty.db OLD_FILES+=var/named/etc/namedb/master/localhost-forward.db OLD_FILES+=var/named/etc/namedb/master/localhost-reverse.db #OLD_FILES+=var/named/etc/namedb/named.conf # intentionally left out OLD_FILES+=var/named/etc/namedb/named.root OLD_DIRS+=var/named/etc/namedb/working OLD_DIRS+=var/named/etc/namedb/slave OLD_DIRS+=var/named/var OLD_DIRS+=var/named/var/dump OLD_DIRS+=var/named/var/log OLD_DIRS+=var/named/var/run OLD_DIRS+=var/named/var/run/named OLD_DIRS+=var/named/var/stats OLD_DIRS+=var/run/named # 20130923: example moved OLD_FILES+=usr/share/examples/bsdconfig/browse_packages.sh # 20130908: libssh becomes private OLD_FILES+=usr/lib/libssh.a OLD_FILES+=usr/lib/libssh.so OLD_LIBS+=usr/lib/libssh.so.5 OLD_FILES+=usr/lib/libssh_p.a OLD_FILES+=usr/lib32/libssh.a OLD_FILES+=usr/lib32/libssh.so OLD_LIBS+=usr/lib32/libssh.so.5 OLD_FILES+=usr/lib32/libssh_p.a # 20130903: gnupatch is no more OLD_FILES+=usr/bin/gnupatch OLD_FILES+=usr/share/man/man1/gnupatch.1.gz # 20130829: bsdpatch is patch unconditionally OLD_FILES+=usr/bin/bsdpatch OLD_FILES+=usr/share/man/man1/bsdpatch.1.gz # 20130822: bind 9.9.3-P2 import OLD_LIBS+=usr/lib/liblwres.so.80 # 20130814: vm_page_busy(9) OLD_FILES+=usr/share/man/man9/vm_page_flash.9.gz OLD_FILES+=usr/share/man/man9/vm_page_io.9.gz OLD_FILES+=usr/share/man/man9/vm_page_io_finish.9.gz OLD_FILES+=usr/share/man/man9/vm_page_io_start.9.gz OLD_FILES+=usr/share/man/man9/vm_page_wakeup.9.gz # 20130710: libkvm version bump OLD_LIBS+=lib/libkvm.so.5 OLD_LIBS+=usr/lib32/libkvm.so.5 # 20130623: dialog update from 1.1 to 1.2 OLD_LIBS+=usr/lib/libdialog.so.7 OLD_LIBS+=usr/lib32/libdialog.so.7 # 20130616: vfs_mount.9 removed OLD_FILES+=usr/share/man/man9/vfs_mount.9.gz # 20130614: remove CVS from base OLD_FILES+=usr/bin/cvs OLD_FILES+=usr/bin/cvsbug OLD_FILES+=usr/share/doc/psd/28.cvs/paper.ascii.gz OLD_FILES+=usr/share/doc/psd/28.cvs/paper.ps.gz OLD_DIRS+=usr/share/doc/psd/28.cvs OLD_FILES+=usr/share/examples/cvs/contrib/README OLD_FILES+=usr/share/examples/cvs/contrib/clmerge OLD_FILES+=usr/share/examples/cvs/contrib/cln_hist OLD_FILES+=usr/share/examples/cvs/contrib/commit_prep OLD_FILES+=usr/share/examples/cvs/contrib/cvs2vendor OLD_FILES+=usr/share/examples/cvs/contrib/cvs_acls OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck.man OLD_FILES+=usr/share/examples/cvs/contrib/cvshelp.man OLD_FILES+=usr/share/examples/cvs/contrib/descend.man OLD_FILES+=usr/share/examples/cvs/contrib/easy-import OLD_FILES+=usr/share/examples/cvs/contrib/intro.doc OLD_FILES+=usr/share/examples/cvs/contrib/log OLD_FILES+=usr/share/examples/cvs/contrib/log_accum OLD_FILES+=usr/share/examples/cvs/contrib/mfpipe OLD_FILES+=usr/share/examples/cvs/contrib/rcs-to-cvs OLD_FILES+=usr/share/examples/cvs/contrib/rcs2log OLD_FILES+=usr/share/examples/cvs/contrib/rcslock OLD_FILES+=usr/share/examples/cvs/contrib/sccs2rcs OLD_DIRS+=usr/share/examples/cvs/contrib OLD_DIRS+=usr/share/examples/cvs OLD_FILES+=usr/share/info/cvs.info.gz OLD_FILES+=usr/share/info/cvsclient.info.gz OLD_FILES+=usr/share/man/man1/cvs.1.gz OLD_FILES+=usr/share/man/man5/cvs.5.gz OLD_FILES+=usr/share/man/man8/cvsbug.8.gz # 20130607: WITH_DEBUG_FILES added OLD_FILES+=lib/libufs.so.6.symbols OLD_FILES+=usr/lib32/libufs.so.6.symbols # 20130417: nfs fha moved from nfsserver to nfs OLD_FILES+=usr/include/nfsserver/nfs_fha.h # 20130411: new clang import which bumps version from 3.2 to 3.3. OLD_FILES+=usr/include/clang/3.2/__wmmintrin_aes.h OLD_FILES+=usr/include/clang/3.2/__wmmintrin_pclmul.h OLD_FILES+=usr/include/clang/3.2/altivec.h OLD_FILES+=usr/include/clang/3.2/ammintrin.h OLD_FILES+=usr/include/clang/3.2/avx2intrin.h OLD_FILES+=usr/include/clang/3.2/avxintrin.h OLD_FILES+=usr/include/clang/3.2/bmi2intrin.h OLD_FILES+=usr/include/clang/3.2/bmiintrin.h OLD_FILES+=usr/include/clang/3.2/cpuid.h OLD_FILES+=usr/include/clang/3.2/emmintrin.h OLD_FILES+=usr/include/clang/3.2/f16cintrin.h OLD_FILES+=usr/include/clang/3.2/fma4intrin.h OLD_FILES+=usr/include/clang/3.2/fmaintrin.h OLD_FILES+=usr/include/clang/3.2/immintrin.h OLD_FILES+=usr/include/clang/3.2/lzcntintrin.h OLD_FILES+=usr/include/clang/3.2/mm3dnow.h OLD_FILES+=usr/include/clang/3.2/mm_malloc.h OLD_FILES+=usr/include/clang/3.2/mmintrin.h OLD_FILES+=usr/include/clang/3.2/module.map OLD_FILES+=usr/include/clang/3.2/nmmintrin.h OLD_FILES+=usr/include/clang/3.2/pmmintrin.h OLD_FILES+=usr/include/clang/3.2/popcntintrin.h OLD_FILES+=usr/include/clang/3.2/rtmintrin.h OLD_FILES+=usr/include/clang/3.2/smmintrin.h OLD_FILES+=usr/include/clang/3.2/tmmintrin.h OLD_FILES+=usr/include/clang/3.2/wmmintrin.h OLD_FILES+=usr/include/clang/3.2/x86intrin.h OLD_FILES+=usr/include/clang/3.2/xmmintrin.h OLD_FILES+=usr/include/clang/3.2/xopintrin.h OLD_DIRS+=usr/include/clang/3.2 # 20130404: legacy ATA stack removed OLD_FILES+=etc/periodic/daily/405.status-ata-raid OLD_FILES+=rescue/atacontrol OLD_FILES+=sbin/atacontrol OLD_FILES+=usr/share/man/man8/atacontrol.8.gz OLD_FILES+=usr/share/man/man4/atapicam.4.gz OLD_FILES+=usr/share/man/man4/ataraid.4.gz OLD_FILES+=usr/sbin/burncd OLD_FILES+=usr/share/man/man8/burncd.8.gz # 20130316: vinum.4 removed OLD_FILES+=usr/share/man/man4/vinum.4.gz # 20130312: fortunes-o removed OLD_FILES+=usr/share/games/fortune/fortunes-o OLD_FILES+=usr/share/games/fortune/fortunes-o.dat # 20130311: Ports are no more available via cvsup OLD_FILES+=usr/share/examples/cvsup/ports-supfile OLD_FILES+=usr/share/examples/cvsup/refuse OLD_FILES+=usr/share/examples/cvsup/refuse.README # 20130309: NWFS and NCP supports removed OLD_FILES+=usr/bin/ncplist OLD_FILES+=usr/bin/ncplogin OLD_FILES+=usr/bin/ncplogout OLD_FILES+=usr/include/fs/nwfs/nwfs.h OLD_FILES+=usr/include/fs/nwfs/nwfs_mount.h OLD_FILES+=usr/include/fs/nwfs/nwfs_node.h OLD_FILES+=usr/include/fs/nwfs/nwfs_subr.h OLD_DIRS+=usr/include/fs/nwfs OLD_FILES+=usr/include/netncp/ncp.h OLD_FILES+=usr/include/netncp/ncp_cfg.h OLD_FILES+=usr/include/netncp/ncp_conn.h OLD_FILES+=usr/include/netncp/ncp_file.h OLD_FILES+=usr/include/netncp/ncp_lib.h OLD_FILES+=usr/include/netncp/ncp_ncp.h OLD_FILES+=usr/include/netncp/ncp_nls.h OLD_FILES+=usr/include/netncp/ncp_rcfile.h OLD_FILES+=usr/include/netncp/ncp_rq.h OLD_FILES+=usr/include/netncp/ncp_sock.h OLD_FILES+=usr/include/netncp/ncp_subr.h OLD_FILES+=usr/include/netncp/ncp_user.h OLD_FILES+=usr/include/netncp/ncpio.h OLD_FILES+=usr/include/netncp/nwerror.h OLD_DIRS+=usr/include/netncp OLD_FILES+=usr/lib/libncp.a OLD_FILES+=usr/lib/libncp.so OLD_LIBS+=usr/lib/libncp.so.4 OLD_FILES+=usr/lib/libncp_p.a OLD_FILES+=usr/lib32/libncp.a OLD_FILES+=usr/lib32/libncp.so OLD_LIBS+=usr/lib32/libncp.so.4 OLD_FILES+=usr/lib32/libncp_p.a OLD_FILES+=usr/sbin/mount_nwfs OLD_FILES+=usr/share/examples/nwclient/dot.nwfsrc OLD_FILES+=usr/share/examples/nwclient/nwfs.sh.sample OLD_DIRS+=usr/share/examples/nwclient OLD_FILES+=usr/share/man/man1/ncplist.1.gz OLD_FILES+=usr/share/man/man1/ncplogin.1.gz OLD_FILES+=usr/share/man/man1/ncplogout.1.gz OLD_FILES+=usr/share/man/man8/mount_nwfs.8.gz # 20130302: NTFS support removed OLD_FILES+=rescue/mount_ntfs OLD_FILES+=sbin/mount_ntfs OLD_FILES+=usr/include/fs/ntfs/ntfs.h OLD_FILES+=usr/include/fs/ntfs/ntfs_compr.h OLD_FILES+=usr/include/fs/ntfs/ntfs_ihash.h OLD_FILES+=usr/include/fs/ntfs/ntfs_inode.h OLD_FILES+=usr/include/fs/ntfs/ntfs_subr.h OLD_FILES+=usr/include/fs/ntfs/ntfs_vfsops.h OLD_FILES+=usr/include/fs/ntfs/ntfsmount.h OLD_DIRS+=usr/include/fs/ntfs OLD_FILES+=usr/share/man/man8/mount_ntfs.8.gz # 20130302: PORTALFS support removed OLD_FILES+=usr/include/fs/portalfs/portal.h OLD_DIRS+=usr/include/fs/portalfs OLD_FILES+=usr/sbin/mount_portalfs OLD_FILES+=usr/share/examples/portal/README OLD_FILES+=usr/share/examples/portal/portal.conf OLD_DIRS+=usr/share/examples/portal OLD_FILES+=usr/share/man/man8/mount_portalfs.8.gz # 20130302: CODAFS support removed OLD_FILES+=usr/share/man/man4/coda.4.gz # 20130302: XFS support removed OLD_FILES+=usr/share/man/man5/xfs.5.gz # 20130302: Capsicum overhaul OLD_FILES+=usr/share/man/man2/cap_getrights.2.gz OLD_FILES+=usr/share/man/man2/cap_new.2.gz # 20130213: OpenSSL 1.0.1e import OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_verifyrecover.3.gz OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_verifyrecover_init.3.gz # 20130116: removed long unused directories for .1aout section manpages OLD_FILES+=usr/share/man/en.ISO8859-1/man1aout OLD_FILES+=usr/share/man/en.UTF-8/man1aout OLD_DIRS+=usr/share/man/man1aout OLD_DIRS+=usr/share/man/cat1aout OLD_DIRS+=usr/share/man/en.ISO8859-1/cat1aout OLD_DIRS+=usr/share/man/en.UTF-8/cat1aout # 20130110: bsd.compat.mk removed OLD_FILES+=usr/share/mk/bsd.compat.mk # 20130103: gnats-supfile removed OLD_FILES+=usr/share/examples/cvsup/gnats-supfile # 20121230: libdisk removed OLD_FILES+=usr/share/man/man3/libdisk.3.gz usr/include/libdisk.h OLD_FILES+=usr/lib/libdisk.a usr/lib32/libdisk.a # 20121230: remove wrongly created directories for auditdistd OLD_DIRS+=var/dist OLD_DIRS+=var/remote # 20121114: zpool-features manual page moved from section 5 to 7 OLD_FILES+=usr/share/man/man5/zpool-features.5.gz # 20121022: remove harp, hfa and idt man page OLD_FILES+=usr/share/man/man4/harp.4.gz OLD_FILES+=usr/share/man/man4/hfa.4.gz OLD_FILES+=usr/share/man/man4/idt.4.gz OLD_FILES+=usr/share/man/man4/if_idt.4.gz # 20121022: VFS_LOCK_GIANT elimination OLD_FILES+=usr/share/man/man9/VFS_LOCK_GIANT.9.gz OLD_FILES+=usr/share/man/man9/VFS_UNLOCK_GIANT.9.gz # 20121004: remove incomplete unwind.h OLD_FILES+=usr/include/clang/3.2/unwind.h # 20120910: NetBSD compat shims removed OLD_FILES+=usr/include/cam/scsi/scsi_low_pisa.h OLD_FILES+=usr/include/sys/device_port.h # 20120909: doc and www supfiles removed OLD_FILES+=usr/share/examples/cvsup/doc-supfile OLD_FILES+=usr/share/examples/cvsup/www-supfile # 20120908: pf cleanup OLD_FILES+=usr/include/net/if_pflow.h # 20120816: new clang import which bumps version from 3.1 to 3.2 OLD_FILES+=usr/bin/llvm-ld OLD_FILES+=usr/bin/llvm-stub OLD_FILES+=usr/include/clang/3.1/altivec.h OLD_FILES+=usr/include/clang/3.1/avx2intrin.h OLD_FILES+=usr/include/clang/3.1/avxintrin.h OLD_FILES+=usr/include/clang/3.1/bmi2intrin.h OLD_FILES+=usr/include/clang/3.1/bmiintrin.h OLD_FILES+=usr/include/clang/3.1/cpuid.h OLD_FILES+=usr/include/clang/3.1/emmintrin.h OLD_FILES+=usr/include/clang/3.1/fma4intrin.h OLD_FILES+=usr/include/clang/3.1/immintrin.h OLD_FILES+=usr/include/clang/3.1/lzcntintrin.h OLD_FILES+=usr/include/clang/3.1/mm3dnow.h OLD_FILES+=usr/include/clang/3.1/mm_malloc.h OLD_FILES+=usr/include/clang/3.1/mmintrin.h OLD_FILES+=usr/include/clang/3.1/module.map OLD_FILES+=usr/include/clang/3.1/nmmintrin.h OLD_FILES+=usr/include/clang/3.1/pmmintrin.h OLD_FILES+=usr/include/clang/3.1/popcntintrin.h OLD_FILES+=usr/include/clang/3.1/smmintrin.h OLD_FILES+=usr/include/clang/3.1/tmmintrin.h OLD_FILES+=usr/include/clang/3.1/unwind.h OLD_FILES+=usr/include/clang/3.1/wmmintrin.h OLD_FILES+=usr/include/clang/3.1/x86intrin.h OLD_FILES+=usr/include/clang/3.1/xmmintrin.h OLD_DIRS+=usr/include/clang/3.1 OLD_FILES+=usr/share/man/man1/llvm-ld.1.gz # 20120712: OpenSSL 1.0.1c import OLD_LIBS+=lib/libcrypto.so.6 OLD_LIBS+=usr/lib/libssl.so.6 OLD_LIBS+=usr/lib32/libcrypto.so.6 OLD_LIBS+=usr/lib32/libssl.so.6 OLD_FILES+=usr/include/openssl/aes_locl.h OLD_FILES+=usr/include/openssl/bio_lcl.h OLD_FILES+=usr/include/openssl/e_os.h OLD_FILES+=usr/include/openssl/fips.h OLD_FILES+=usr/include/openssl/fips_rand.h OLD_FILES+=usr/include/openssl/md2.h OLD_FILES+=usr/include/openssl/pq_compat.h OLD_FILES+=usr/include/openssl/store.h OLD_FILES+=usr/include/openssl/tmdiff.h OLD_FILES+=usr/include/openssl/ui_locl.h OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_id_callback.3.gz # 20120621: remove old man page OLD_FILES+=usr/share/man/man8/vnconfig.8.gz # 20120619: TOE support updated OLD_FILES+=usr/include/netinet/toedev.h # 20120613: auth.conf removed OLD_FILES+=etc/auth.conf OLD_FILES+=usr/share/examples/etc/auth.conf OLD_FILES+=usr/share/man/man3/auth.3.gz OLD_FILES+=usr/share/man/man3/auth_getval.3.gz OLD_FILES+=usr/share/man/man5/auth.conf.5.gz # 20120530: kde pam lives now in ports OLD_FILES+=etc/pam.d/kde # 20120521: byacc import OLD_FILES+=usr/bin/yyfix OLD_FILES+=usr/share/man/man1/yyfix.1.gz # 20120505: new clang import installed a redundant internal header OLD_FILES+=usr/include/clang/3.1/stdalign.h # 20120428: MD2 removed from libmd OLD_LIBS+=lib/libmd.so.5 OLD_FILES+=usr/include/md2.h OLD_LIBS+=usr/lib32/libmd.so.5 OLD_FILES+=usr/share/man/man3/MD2Data.3.gz OLD_FILES+=usr/share/man/man3/MD2End.3.gz OLD_FILES+=usr/share/man/man3/MD2File.3.gz OLD_FILES+=usr/share/man/man3/MD2FileChunk.3.gz OLD_FILES+=usr/share/man/man3/MD2Final.3.gz OLD_FILES+=usr/share/man/man3/MD2Init.3.gz OLD_FILES+=usr/share/man/man3/MD2Update.3.gz OLD_FILES+=usr/share/man/man3/md2.3.gz # 20120425: libusb version bump (r234684) OLD_LIBS+=usr/lib/libusb.so.2 OLD_LIBS+=usr/lib32/libusb.so.2 OLD_FILES+=usr/share/man/man3/libsub_get_active_config_descriptor.3.gz # 20120415: new clang import which bumps version from 3.0 to 3.1 OLD_FILES+=usr/include/clang/3.0/altivec.h OLD_FILES+=usr/include/clang/3.0/avxintrin.h OLD_FILES+=usr/include/clang/3.0/emmintrin.h OLD_FILES+=usr/include/clang/3.0/immintrin.h OLD_FILES+=usr/include/clang/3.0/mm3dnow.h OLD_FILES+=usr/include/clang/3.0/mm_malloc.h OLD_FILES+=usr/include/clang/3.0/mmintrin.h OLD_FILES+=usr/include/clang/3.0/nmmintrin.h OLD_FILES+=usr/include/clang/3.0/pmmintrin.h OLD_FILES+=usr/include/clang/3.0/smmintrin.h OLD_FILES+=usr/include/clang/3.0/tmmintrin.h OLD_FILES+=usr/include/clang/3.0/wmmintrin.h OLD_FILES+=usr/include/clang/3.0/x86intrin.h OLD_FILES+=usr/include/clang/3.0/xmmintrin.h OLD_DIRS+=usr/include/clang/3.0 # 20120412: BIND 9.8.1 release notes removed OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.pdf OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.txt OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.html OLD_FILES+=usr/share/doc/bind9/release-notes.css # 20120330: legacy(4) moved to x86 OLD_FILES+=usr/include/machine/legacyvar.h # 20120324: MPI headers updated OLD_FILES+=usr/include/dev/mpt/mpilib/mpi_inb.h # 20120322: hwpmc_mips24k.h removed OLD_FILES+=usr/include/dev/hwpmc/hwpmc_mips24k.h # 20120322: Update heimdal to 1.5.1. OLD_FILES+=usr/include/krb5-v4compat.h \ usr/include/krb_err.h \ usr/include/hdb-private.h \ usr/share/man/man3/krb5_addresses.3.gz \ usr/share/man/man3/krb5_cc_cursor.3.gz \ usr/share/man/man3/krb5_cc_ops.3.gz \ usr/share/man/man3/krb5_config.3.gz \ usr/share/man/man3/krb5_config_get_int_default.3.gz \ usr/share/man/man3/krb5_context.3.gz \ usr/share/man/man3/krb5_data.3.gz \ usr/share/man/man3/krb5_err.3.gz \ usr/share/man/man3/krb5_errx.3.gz \ usr/share/man/man3/krb5_keyblock.3.gz \ usr/share/man/man3/krb5_keytab_entry.3.gz \ usr/share/man/man3/krb5_kt_cursor.3.gz \ usr/share/man/man3/krb5_kt_ops.3.gz \ usr/share/man/man3/krb5_set_warn_dest.3.gz \ usr/share/man/man3/krb5_verr.3.gz \ usr/share/man/man3/krb5_verrx.3.gz \ usr/share/man/man3/krb5_vwarnx.3.gz \ usr/share/man/man3/krb5_warn.3.gz \ usr/share/man/man3/krb5_warnx.3.gz OLD_LIBS+=usr/lib/libasn1.so.10 \ usr/lib/libhdb.so.10 \ usr/lib/libheimntlm.so.10 \ usr/lib/libhx509.so.10 \ usr/lib/libkadm5clnt.so.10 \ usr/lib/libkadm5srv.so.10 \ usr/lib/libkafs5.so.10 \ usr/lib/libkrb5.so.10 \ usr/lib/libroken.so.10 \ usr/lib32/libasn1.so.10 \ usr/lib32/libhdb.so.10 \ usr/lib32/libheimntlm.so.10 \ usr/lib32/libhx509.so.10 \ usr/lib32/libkadm5clnt.so.10 \ usr/lib32/libkadm5srv.so.10 \ usr/lib32/libkafs5.so.10 \ usr/lib32/libkrb5.so.10 \ usr/lib32/libroken.so.10 # 20120309: Remove fifofs header files. OLD_FILES+=usr/include/fs/fifofs/fifo.h OLD_DIRS+=usr/include/fs/fifofs # 20120304: xlocale cleanup OLD_FILES+=usr/include/_xlocale_ctype.h # 20120225: libarchive 3.0.3 OLD_FILES+=usr/share/man/man3/archive_read_data_into_buffer.3.gz \ usr/share/man/man3/archive_read_support_compression_all.3.gz \ usr/share/man/man3/archive_read_support_compression_bzip2.3.gz \ usr/share/man/man3/archive_read_support_compression_compress.3.gz \ usr/share/man/man3/archive_read_support_compression_gzip.3.gz \ usr/share/man/man3/archive_read_support_compression_lzma.3.gz \ usr/share/man/man3/archive_read_support_compression_none.3.gz \ usr/share/man/man3/archive_read_support_compression_program.3.gz \ usr/share/man/man3/archive_read_support_compression_program_signature.3.gz \ usr/share/man/man3/archive_read_support_compression_xz.3.gz \ usr/share/man/man3/archive_write_set_callbacks.3.gz \ usr/share/man/man3/archive_write_set_compression_bzip2.3.gz \ usr/share/man/man3/archive_write_set_compression_compress.3.gz \ usr/share/man/man3/archive_write_set_compression_gzip.3.gz \ usr/share/man/man3/archive_write_set_compression_none.3.gz \ usr/share/man/man3/archive_write_set_compression_program.3.gz OLD_LIBS+=usr/lib/libarchive.so.5 OLD_LIBS+=usr/lib32/libarchive.so.5 # 20120113: removal of wtmpcvt(1) OLD_FILES+=usr/bin/wtmpcvt OLD_FILES+=usr/share/man/man1/wtmpcvt.1.gz # 20111214: eventtimers(7) moved to eventtimers(4) OLD_FILES+=usr/share/man/man7/eventtimers.7.gz # 20111125: amd(4) removed OLD_FILES+=usr/share/man/man4/amd.4.gz # 20111125: libodialog removed OLD_FILES+=usr/lib/libodialog.a OLD_FILES+=usr/lib/libodialog.so OLD_LIBS+=usr/lib/libodialog.so.7 OLD_FILES+=usr/lib/libodialog_p.a OLD_FILES+=usr/lib32/libodialog.a OLD_FILES+=usr/lib32/libodialog.so OLD_LIBS+=usr/lib32/libodialog.so.7 OLD_FILES+=usr/lib32/libodialog_p.a # 20110930: sysinstall removed OLD_FILES+=usr/sbin/sysinstall OLD_FILES+=usr/share/man/man8/sysinstall.8.gz OLD_FILES+=usr/lib/libftpio.a OLD_FILES+=usr/lib/libftpio.so OLD_LIBS+=usr/lib/libftpio.so.8 OLD_FILES+=usr/lib/libftpio_p.a OLD_FILES+=usr/lib32/libftpio.a OLD_FILES+=usr/lib32/libftpio.so OLD_LIBS+=usr/lib32/libftpio.so.8 OLD_FILES+=usr/lib32/libftpio_p.a OLD_FILES+=usr/include/ftpio.h OLD_FILES+=usr/share/man/man3/ftpio.3.gz # 20110915: rename congestion control manpages OLD_FILES+=usr/share/man/man4/cc.4.gz OLD_FILES+=usr/share/man/man9/cc.9.gz # 20110831: atomic page flags operations OLD_FILES+=usr/share/man/man9/vm_page_flag.9.gz OLD_FILES+=usr/share/man/man9/vm_page_flag_clear.9.gz OLD_FILES+=usr/share/man/man9/vm_page_flag_set.9.gz # 20110828: library version bump for 9.0 OLD_LIBS+=lib/libcam.so.5 OLD_LIBS+=lib/libpcap.so.7 OLD_LIBS+=lib/libufs.so.5 OLD_LIBS+=usr/lib/libbsnmp.so.5 OLD_LIBS+=usr/lib/libdwarf.so.2 OLD_LIBS+=usr/lib/libopie.so.6 OLD_LIBS+=usr/lib/librtld_db.so.1 OLD_LIBS+=usr/lib/libtacplus.so.4 OLD_LIBS+=usr/lib32/libcam.so.5 OLD_LIBS+=usr/lib32/libpcap.so.7 OLD_LIBS+=usr/lib32/libufs.so.5 OLD_LIBS+=usr/lib32/libbsnmp.so.5 OLD_LIBS+=usr/lib32/libdwarf.so.2 OLD_LIBS+=usr/lib32/libopie.so.6 OLD_LIBS+=usr/lib32/librtld_db.so.1 OLD_LIBS+=usr/lib32/libtacplus.so.4 # 20110817: no more acd.4, ad.4, afd.4 and ast.4 OLD_FILES+=usr/share/man/man4/acd.4.gz OLD_FILES+=usr/share/man/man4/ad.4.gz OLD_FILES+=usr/share/man/man4/afd.4.gz OLD_FILES+=usr/share/man/man4/ast.4.gz # 20110718: no longer useful in the age of rc.d OLD_FILES+=usr/sbin/named.reconfig OLD_FILES+=usr/sbin/named.reload OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz OLD_FILES+=usr/share/man/man8/named.reload.8.gz # 20110716: bind 9.8.0 import OLD_LIBS+=usr/lib/liblwres.so.50 OLD_FILES+=usr/share/doc/bind9/KNOWN-DEFECTS OLD_FILES+=usr/share/doc/bind9/NSEC3-NOTES OLD_FILES+=usr/share/doc/bind9/README.idnkit OLD_FILES+=usr/share/doc/bind9/README.pkcs11 # 20110709: vm_map_clean.9 -> vm_map_sync.9 OLD_FILES+=usr/share/man/man9/vm_map_clean.9.gz # 20110709: Catch up with removal of these functions. OLD_FILES+=usr/share/man/man9/vm_page_copy.9.gz OLD_FILES+=usr/share/man/man9/vm_page_protect.9.gz OLD_FILES+=usr/share/man/man9/vm_page_zero_fill.9.gz # 20110707: script no longer needed by /etc/rc.d/nfsd OLD_FILES+=etc/rc.d/nfsserver # 20110705: files moved so both NFS clients can share them OLD_FILES+=usr/include/nfsclient/krpc.h OLD_FILES+=usr/include/nfsclient/nfsdiskless.h # 20110705: the switch of default NFS client to the new one OLD_FILES+=sbin/mount_newnfs OLD_FILES+=usr/share/man/man8/mount_newnfs.8.gz OLD_FILES+=usr/include/nfsclient/nfs_kdtrace.h # 20110628: calendar.msk removed OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.msk # 20110517: libpkg removed OLD_FILES+=usr/include/pkg.h OLD_FILES+=usr/lib/libpkg.a OLD_FILES+=usr/lib/libpkg.so OLD_LIBS+=usr/lib/libpkg.so.0 OLD_FILES+=usr/lib/libpkg_p.a OLD_FILES+=usr/lib32/libpkg.a OLD_FILES+=usr/lib32/libpkg.so OLD_LIBS+=usr/lib32/libpkg.so.0 OLD_FILES+=usr/lib32/libpkg_p.a # 20110517: libsbuf version bump OLD_LIBS+=lib/libsbuf.so.5 OLD_LIBS+=usr/lib32/libsbuf.so.5 # 20110502: new clang import which bumps version from 2.9 to 3.0 OLD_FILES+=usr/include/clang/2.9/emmintrin.h OLD_FILES+=usr/include/clang/2.9/mm_malloc.h OLD_FILES+=usr/include/clang/2.9/mmintrin.h OLD_FILES+=usr/include/clang/2.9/pmmintrin.h OLD_FILES+=usr/include/clang/2.9/tmmintrin.h OLD_FILES+=usr/include/clang/2.9/xmmintrin.h OLD_DIRS+=usr/include/clang/2.9 # 20110417: removal of Objective-C support OLD_FILES+=usr/include/objc/encoding.h OLD_FILES+=usr/include/objc/hash.h OLD_FILES+=usr/include/objc/NXConstStr.h OLD_FILES+=usr/include/objc/objc-api.h OLD_FILES+=usr/include/objc/objc-decls.h OLD_FILES+=usr/include/objc/objc-list.h OLD_FILES+=usr/include/objc/objc.h OLD_FILES+=usr/include/objc/Object.h OLD_FILES+=usr/include/objc/Protocol.h OLD_FILES+=usr/include/objc/runtime.h OLD_FILES+=usr/include/objc/sarray.h OLD_FILES+=usr/include/objc/thr.h OLD_FILES+=usr/include/objc/typedstream.h OLD_FILES+=usr/lib/libobjc.a OLD_FILES+=usr/lib/libobjc.so OLD_FILES+=usr/lib/libobjc_p.a OLD_FILES+=usr/libexec/cc1obj OLD_LIBS+=usr/lib/libobjc.so.4 OLD_DIRS+=usr/include/objc OLD_FILES+=usr/lib32/libobjc.a OLD_FILES+=usr/lib32/libobjc.so OLD_FILES+=usr/lib32/libobjc_p.a OLD_LIBS+=usr/lib32/libobjc.so.4 # 20110331: firmware.img created at build time OLD_FILES+=usr/share/examples/kld/firmware/fwimage/firmware.img # 20110224: sticky.8 -> sticky.7 OLD_FILES+=usr/share/man/man8/sticky.8.gz # 20110220: new clang import which bumps version from 2.8 to 2.9 OLD_FILES+=usr/include/clang/2.8/emmintrin.h OLD_FILES+=usr/include/clang/2.8/mm_malloc.h OLD_FILES+=usr/include/clang/2.8/mmintrin.h OLD_FILES+=usr/include/clang/2.8/pmmintrin.h OLD_FILES+=usr/include/clang/2.8/tmmintrin.h OLD_FILES+=usr/include/clang/2.8/xmmintrin.h OLD_DIRS+=usr/include/clang/2.8 # 20110119: netinet/sctp_cc_functions.h removed OLD_FILES+=usr/include/netinet/sctp_cc_functions.h # 20110119: Remove SYSCTL_*X* sysctl additions. OLD_FILES+=usr/share/man/man9/SYSCTL_XINT.9.gz \ usr/share/man/man9/SYSCTL_XLONG.9.gz # 20110112: Update dialog to new version, rename old libdialog to libodialog, # removing associated man pages and header files. OLD_FILES+=usr/share/man/man3/draw_shadow.3.gz \ usr/share/man/man3/draw_box.3.gz usr/share/man/man3/line_edit.3.gz \ usr/share/man/man3/strheight.3.gz usr/share/man/man3/strwidth.3.gz \ usr/share/man/man3/dialog_create_rc.3.gz \ usr/share/man/man3/dialog_yesno.3.gz usr/share/man/man3/dialog_noyes.3.gz \ usr/share/man/man3/dialog_prgbox.3.gz \ usr/share/man/man3/dialog_textbox.3.gz usr/share/man/man3/dialog_menu.3.gz \ usr/share/man/man3/dialog_checklist.3.gz \ usr/share/man/man3/dialog_radiolist.3.gz \ usr/share/man/man3/dialog_inputbox.3.gz \ usr/share/man/man3/dialog_clear_norefresh.3.gz \ usr/share/man/man3/dialog_clear.3.gz usr/share/man/man3/dialog_update.3.gz \ usr/share/man/man3/dialog_fselect.3.gz \ usr/share/man/man3/dialog_notify.3.gz \ usr/share/man/man3/dialog_mesgbox.3.gz \ usr/share/man/man3/dialog_gauge.3.gz usr/share/man/man3/init_dialog.3.gz \ usr/share/man/man3/end_dialog.3.gz usr/share/man/man3/use_helpfile.3.gz \ usr/share/man/man3/use_helpline.3.gz usr/share/man/man3/get_helpline.3.gz \ usr/share/man/man3/restore_helpline.3.gz \ usr/share/man/man3/dialog_msgbox.3.gz \ usr/share/man/man3/dialog_ftree.3.gz usr/share/man/man3/dialog_tree.3.gz \ usr/share/examples/dialog/README usr/share/examples/dialog/checklist \ usr/share/examples/dialog/ftreebox usr/share/examples/dialog/infobox \ usr/share/examples/dialog/inputbox usr/share/examples/dialog/menubox \ usr/share/examples/dialog/msgbox usr/share/examples/dialog/prgbox \ usr/share/examples/dialog/radiolist usr/share/examples/dialog/textbox \ usr/share/examples/dialog/treebox usr/share/examples/dialog/yesno \ usr/share/examples/libdialog/Makefile usr/share/examples/libdialog/check1.c\ usr/share/examples/libdialog/check2.c usr/share/examples/libdialog/check3.c\ usr/share/examples/libdialog/dselect.c \ usr/share/examples/libdialog/fselect.c \ usr/share/examples/libdialog/ftree1.c \ usr/share/examples/libdialog/ftree1.test \ usr/share/examples/libdialog/ftree2.c \ usr/share/examples/libdialog/ftree2.test \ usr/share/examples/libdialog/gauge.c usr/share/examples/libdialog/input1.c \ usr/share/examples/libdialog/input2.c usr/share/examples/libdialog/menu1.c \ usr/share/examples/libdialog/menu2.c usr/share/examples/libdialog/menu3.c \ usr/share/examples/libdialog/msg.c usr/share/examples/libdialog/prgbox.c \ usr/share/examples/libdialog/radio1.c usr/share/examples/libdialog/radio2.c\ usr/share/examples/libdialog/radio3.c usr/share/examples/libdialog/text.c \ usr/share/examples/libdialog/tree.c usr/share/examples/libdialog/yesno.c OLD_DIRS+=usr/share/examples/libdialog usr/share/examples/dialog # 20101114: Remove long-obsolete MAKEDEV.8 OLD_FILES+=usr/share/man/man8/MAKEDEV.8.gz # 20101112: vgonel(9) has gone to private API a while ago OLD_FILES+=usr/share/man/man9/vgonel.9.gz # 20101112: removed gasp.info OLD_FILES+=usr/share/info/gasp.info.gz # 20101109: machine/mutex.h removed OLD_FILES+=usr/include/machine/mutex.h # 20101109: headers moved from machine/ to x86/ .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/mptable.h .endif # 20101101: headers moved from machine/ to x86/ .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/apicreg.h OLD_FILES+=usr/include/machine/mca.h .endif # 20101020: catch up with vm_page_sleep_if_busy rename OLD_FILES+=usr/share/man/man9/vm_page_sleep_busy.9.gz # 20101018: taskqueue(9) updates OLD_FILES+=usr/share/man/man9/taskqueue_find.9.gz # 20101011: removed subblock.h from liblzma OLD_FILES+=usr/include/lzma/subblock.h # 20101002: removed manpath.config OLD_FILES+=etc/manpath.config OLD_FILES+=usr/share/examples/etc/manpath.config # 20100910: renamed sbuf_overflowed to sbuf_error OLD_FILES+=usr/share/man/man9/sbuf_overflowed.9.gz # 20100815: retired last traces of chooseproc(9) OLD_FILES+=usr/share/man/man9/chooseproc.9.gz # 20100806: removal of unused libcompat routines OLD_FILES+=usr/share/man/man3/ascftime.3.gz OLD_FILES+=usr/share/man/man3/cfree.3.gz OLD_FILES+=usr/share/man/man3/cftime.3.gz OLD_FILES+=usr/share/man/man3/getpw.3.gz # 20100801: tzdata2010k import OLD_FILES+=usr/share/zoneinfo/Pacific/Ponape OLD_FILES+=usr/share/zoneinfo/Pacific/Truk # 20100725: acpi_aiboost(4) removal. OLD_FILES+=usr/share/man/man4/acpi_aiboost.4.gz # 20100724: nfsclient/nfs_lock.h moved to nfs/nfs_lock.h OLD_FILES+=usr/include/nfsclient/nfs_lock.h # 20100720: new clang import which bumps version from 2.0 to 2.8 OLD_FILES+=usr/include/clang/2.0/emmintrin.h OLD_FILES+=usr/include/clang/2.0/mm_malloc.h OLD_FILES+=usr/include/clang/2.0/mmintrin.h OLD_FILES+=usr/include/clang/2.0/pmmintrin.h OLD_FILES+=usr/include/clang/2.0/tmmintrin.h OLD_FILES+=usr/include/clang/2.0/xmmintrin.h OLD_DIRS+=usr/include/clang/2.0 # 20100706: removed pc-sysinstall's detect-vmware.sh OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-vmware.sh # 20100701: [powerpc] removed .if ${TARGET_ARCH} == "powerpc" OLD_FILES+=usr/include/machine/intr.h .endif # 20100514: library version bump for versioned symbols for liblzma OLD_LIBS+=usr/lib/liblzma.so.0 OLD_LIBS+=usr/lib32/liblzma.so.0 # 20100511: move GCC-specific headers to /usr/include/gcc .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/emmintrin.h OLD_FILES+=usr/include/mm_malloc.h OLD_FILES+=usr/include/pmmintrin.h OLD_FILES+=usr/include/xmmintrin.h .endif .if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" || ${TARGET_ARCH} == "arm" OLD_FILES+=usr/include/mmintrin.h .endif .if ${TARGET_ARCH} == "powerpc" OLD_FILES+=usr/include/altivec.h OLD_FILES+=usr/include/ppc-asm.h OLD_FILES+=usr/include/spe.h .endif # 20100416: [mips] removed .if ${TARGET_ARCH} == "mips" OLD_FILES+=usr/include/machine/psl.h .endif # 20100415: [mips] removed unused headers .if ${TARGET_ARCH} == "mips" OLD_FILES+=usr/include/machine/archtype.h OLD_FILES+=usr/include/machine/segments.h OLD_FILES+=usr/include/machine/rm7000.h OLD_FILES+=usr/include/machine/defs.h OLD_FILES+=usr/include/machine/queue.h .endif # 20100326: gcpio removal OLD_FILES+=usr/bin/gcpio OLD_FILES+=usr/share/info/cpio.info.gz OLD_FILES+=usr/share/man/man1/gcpio.1.gz # 20100322: libz update OLD_LIBS+=lib/libz.so.5 OLD_LIBS+=usr/lib32/libz.so.5 # 20100314: removal of regexp.h OLD_FILES+=usr/include/regexp.h OLD_FILES+=usr/share/man/man3/regexp.3.gz OLD_FILES+=usr/share/man/man3/regsub.3.gz # 20100303: actual removal of utmp.h OLD_FILES+=usr/include/utmp.h # 20100208: man pages moved .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/share/man/man4/i386/alpm.4.gz OLD_FILES+=usr/share/man/man4/i386/amdpm.4.gz OLD_FILES+=usr/share/man/man4/i386/mcd.4.gz OLD_FILES+=usr/share/man/man4/i386/padlock.4.gz OLD_FILES+=usr/share/man/man4/i386/pcf.4.gz OLD_FILES+=usr/share/man/man4/i386/scd.4.gz OLD_FILES+=usr/share/man/man4/i386/viapm.4.gz .endif # 20100122: move BSDL bc/dc USD documents to /usr/share/doc/usd OLD_FILES+=usr/share/doc/papers/bc.ascii.gz OLD_FILES+=usr/share/doc/papers/dc.ascii.gz # 20100120: replacing GNU bc/dc with BSDL versions OLD_FILES+=usr/share/examples/bc/ckbook.b OLD_FILES+=usr/share/examples/bc/pi.b OLD_FILES+=usr/share/examples/bc/primes.b OLD_FILES+=usr/share/examples/bc/twins.b OLD_FILES+=usr/share/info/dc.info.gz OLD_DIRS+=usr/share/examples/bc # 20100114: removal of ttyslot(3) OLD_FILES+=usr/share/man/man3/ttyslot.3.gz # 20100113: remove utmp.h, replace it by utmpx.h OLD_FILES+=usr/share/man/man3/login.3.gz OLD_FILES+=usr/share/man/man3/logout.3.gz OLD_FILES+=usr/share/man/man3/logwtmp.3.gz OLD_FILES+=usr/share/man/man3/ulog_endutxent.3.gz OLD_FILES+=usr/share/man/man3/ulog_getutxent.3.gz OLD_FILES+=usr/share/man/man3/ulog_getutxline.3.gz OLD_FILES+=usr/share/man/man3/ulog_getutxuser.3.gz OLD_FILES+=usr/share/man/man3/ulog_pututxline.3.gz OLD_FILES+=usr/share/man/man3/ulog_setutxent.3.gz OLD_FILES+=usr/share/man/man3/ulog_setutxfile.3.gz OLD_FILES+=usr/share/man/man5/lastlog.5.gz OLD_FILES+=usr/share/man/man5/utmp.5.gz OLD_FILES+=usr/share/man/man5/wtmp.5.gz OLD_LIBS+=lib/libutil.so.8 OLD_LIBS+=usr/lib32/libutil.so.8 # 20100105: new userland semaphore implementation OLD_FILES+=usr/include/sys/semaphore.h # 20100103: ntptrace(8) removed OLD_FILES+=usr/sbin/ntptrace OLD_FILES+=usr/share/man/man8/ntptrace.8.gz # 20091229: remove no longer relevant examples OLD_FILES+=usr/share/examples/pppd/auth-down.sample OLD_FILES+=usr/share/examples/pppd/auth-up.sample OLD_FILES+=usr/share/examples/pppd/chap-secrets.sample OLD_FILES+=usr/share/examples/pppd/chat.sh.sample OLD_FILES+=usr/share/examples/pppd/ip-down.sample OLD_FILES+=usr/share/examples/pppd/ip-up.sample OLD_FILES+=usr/share/examples/pppd/options.sample OLD_FILES+=usr/share/examples/pppd/pap-secrets.sample OLD_FILES+=usr/share/examples/pppd/ppp.deny.sample OLD_FILES+=usr/share/examples/pppd/ppp.shells.sample OLD_DIRS+=usr/share/examples/pppd OLD_FILES+=usr/share/examples/slattach/unit-command.sh OLD_DIRS+=usr/share/examples/slattach OLD_FILES+=usr/share/examples/sliplogin/slip.hosts OLD_FILES+=usr/share/examples/sliplogin/slip.login OLD_FILES+=usr/share/examples/sliplogin/slip.logout OLD_FILES+=usr/share/examples/sliplogin/slip.slparms OLD_DIRS+=usr/share/examples/sliplogin OLD_FILES+=usr/share/examples/startslip/sldown.sh OLD_FILES+=usr/share/examples/startslip/slip.sh OLD_FILES+=usr/share/examples/startslip/slup.sh OLD_DIRS+=usr/share/examples/startslip # 20091202: unify rc.firewall and rc.firewall6. OLD_FILES+=etc/rc.d/ip6fw OLD_FILES+=etc/rc.firewall6 OLD_FILES+=usr/share/examples/etc/rc.firewall6 # 20091117: removal of rc.early(8) link OLD_FILES+=usr/share/man/man8/rc.early.8.gz # 20091117: usr/share/zoneinfo/GMT link removed OLD_FILES+=usr/share/zoneinfo/GMT # 20091027: pselect.3 implemented as syscall OLD_FILES+=usr/share/man/man3/pselect.3.gz # 20091005: fusword.9 and susword.9 removed OLD_FILES+=usr/share/man/man9/fusword.9.gz OLD_FILES+=usr/share/man/man9/susword.9.gz # 20090909: vesa and dpms promoted to be i386/amd64 common OLD_FILES+=usr/include/machine/pc/vesa.h OLD_FILES+=usr/share/man/man4/i386/dpms.4.gz # 20090904: remove lukemftpd OLD_FILES+=usr/libexec/lukemftpd OLD_FILES+=usr/share/man/man5/ftpd.conf.5.gz OLD_FILES+=usr/share/man/man5/ftpusers.5.gz OLD_FILES+=usr/share/man/man8/lukemftpd.8.gz # 20090902: BSD.{x11,x11-4}.dist are dead and BSD.local.dist lives in ports/ OLD_FILES+=etc/mtree/BSD.local.dist OLD_FILES+=etc/mtree/BSD.x11.dist OLD_FILES+=etc/mtree/BSD.x11-4.dist # 20090812: net80211 documentation overhaul OLD_FILES+=usr/share/man/man9/ieee80211_add_rates.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_add_xrates.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_alloc_node.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_attach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_begin_scan.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_cfgget.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_cfgset.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_chan2ieee.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_chan2mode.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_create_ibss.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_crypto_attach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_crypto_detach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_decap.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_dump_pkt.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_dup_bss.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_encap.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_end_scan.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_find_node.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_fix_rate.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_free_allnodes.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_ieee2mhz.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_ioctl.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_lookup_node.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_media2rate.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_media_change.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_media_init.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_media_status.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_mhz2ieee.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_next_scan.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_node_attach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_node_detach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_node_lateattach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_print_essid.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_proto_attach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_proto_detach.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_rate2media.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_recv_mgmt.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_send_mgmt.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_setmode.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_timeout_nodes.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_watchdog.9.gz OLD_FILES+=usr/share/man/man9/ieee80211_wep_crypt.9.gz # 20090801: vimage.h removed in favour of vnet.h OLD_FILES+=usr/include/sys/vimage.h # 20101208: libbsnmp was moved to usr/lib OLD_LIBS+=lib/libbsnmp.so.5 # 20090719: library version bump for 8.0 OLD_LIBS+=lib/libalias.so.6 OLD_LIBS+=lib/libavl.so.1 OLD_LIBS+=lib/libbegemot.so.3 OLD_LIBS+=lib/libbsdxml.so.3 OLD_LIBS+=lib/libbsnmp.so.4 OLD_LIBS+=lib/libcam.so.4 OLD_LIBS+=lib/libcrypt.so.4 OLD_LIBS+=lib/libcrypto.so.5 OLD_LIBS+=lib/libctf.so.1 OLD_LIBS+=lib/libdevstat.so.6 OLD_LIBS+=lib/libdtrace.so.1 OLD_LIBS+=lib/libedit.so.6 OLD_LIBS+=lib/libgeom.so.4 OLD_LIBS+=lib/libipsec.so.3 OLD_LIBS+=lib/libipx.so.4 OLD_LIBS+=lib/libkiconv.so.3 OLD_LIBS+=lib/libkvm.so.4 OLD_LIBS+=lib/libmd.so.4 OLD_LIBS+=lib/libncurses.so.7 OLD_LIBS+=lib/libncursesw.so.7 OLD_LIBS+=lib/libnvpair.so.1 OLD_LIBS+=lib/libpcap.so.6 OLD_LIBS+=lib/libreadline.so.7 OLD_LIBS+=lib/libsbuf.so.4 OLD_LIBS+=lib/libufs.so.4 OLD_LIBS+=lib/libumem.so.1 OLD_LIBS+=lib/libutil.so.7 OLD_LIBS+=lib/libuutil.so.1 OLD_LIBS+=lib/libz.so.4 OLD_LIBS+=lib/libzfs.so.1 OLD_LIBS+=lib/libzpool.so.1 OLD_LIBS+=usr/lib/libarchive.so.4 OLD_LIBS+=usr/lib/libauditd.so.4 OLD_LIBS+=usr/lib/libbluetooth.so.3 OLD_LIBS+=usr/lib/libbsm.so.2 OLD_LIBS+=usr/lib/libbz2.so.3 OLD_LIBS+=usr/lib/libcalendar.so.4 OLD_LIBS+=usr/lib/libcom_err.so.4 OLD_LIBS+=usr/lib/libdevinfo.so.4 OLD_LIBS+=usr/lib/libdialog.so.6 OLD_LIBS+=usr/lib/libdwarf.so.1 OLD_LIBS+=usr/lib/libfetch.so.5 OLD_LIBS+=usr/lib/libform.so.4 OLD_LIBS+=usr/lib/libformw.so.4 OLD_LIBS+=usr/lib/libftpio.so.7 OLD_LIBS+=usr/lib/libgnuregex.so.4 OLD_LIBS+=usr/lib/libgpib.so.2 OLD_LIBS+=usr/lib/libhistory.so.7 OLD_LIBS+=usr/lib/libmagic.so.3 OLD_LIBS+=usr/lib/libmemstat.so.2 OLD_LIBS+=usr/lib/libmenu.so.4 OLD_LIBS+=usr/lib/libmenuw.so.4 OLD_LIBS+=usr/lib/libmilter.so.4 OLD_LIBS+=usr/lib/libncp.so.3 OLD_LIBS+=usr/lib/libnetgraph.so.3 OLD_LIBS+=usr/lib/libngatm.so.3 OLD_LIBS+=usr/lib/libobjc.so.3 OLD_LIBS+=usr/lib/libopie.so.5 OLD_LIBS+=usr/lib/libpam.so.4 OLD_LIBS+=usr/lib/libpanel.so.4 OLD_LIBS+=usr/lib/libpanelw.so.4 OLD_LIBS+=usr/lib/libpmc.so.4 OLD_LIBS+=usr/lib/libproc.so.1 OLD_LIBS+=usr/lib/libradius.so.3 OLD_LIBS+=usr/lib/librpcsvc.so.4 OLD_LIBS+=usr/lib/libsdp.so.3 OLD_LIBS+=usr/lib/libsmb.so.3 OLD_LIBS+=usr/lib/libssh.so.4 OLD_LIBS+=usr/lib/libssl.so.5 OLD_LIBS+=usr/lib/libtacplus.so.3 OLD_LIBS+=usr/lib/libugidfw.so.3 OLD_LIBS+=usr/lib/libusb.so.1 OLD_LIBS+=usr/lib/libusbhid.so.3 OLD_LIBS+=usr/lib/libvgl.so.5 OLD_LIBS+=usr/lib/libwrap.so.5 OLD_LIBS+=usr/lib/libypclnt.so.3 OLD_LIBS+=usr/lib/pam_chroot.so.4 OLD_LIBS+=usr/lib/pam_deny.so.4 OLD_LIBS+=usr/lib/pam_echo.so.4 OLD_LIBS+=usr/lib/pam_exec.so.4 OLD_LIBS+=usr/lib/pam_ftpusers.so.4 OLD_LIBS+=usr/lib/pam_group.so.4 OLD_LIBS+=usr/lib/pam_guest.so.4 OLD_LIBS+=usr/lib/pam_krb5.so.4 OLD_LIBS+=usr/lib/pam_ksu.so.4 OLD_LIBS+=usr/lib/pam_lastlog.so.4 OLD_LIBS+=usr/lib/pam_login_access.so.4 OLD_LIBS+=usr/lib/pam_nologin.so.4 OLD_LIBS+=usr/lib/pam_opie.so.4 OLD_LIBS+=usr/lib/pam_opieaccess.so.4 OLD_LIBS+=usr/lib/pam_passwdqc.so.4 OLD_LIBS+=usr/lib/pam_permit.so.4 OLD_LIBS+=usr/lib/pam_radius.so.4 OLD_LIBS+=usr/lib/pam_rhosts.so.4 OLD_LIBS+=usr/lib/pam_rootok.so.4 OLD_LIBS+=usr/lib/pam_securetty.so.4 OLD_LIBS+=usr/lib/pam_self.so.4 OLD_LIBS+=usr/lib/pam_ssh.so.4 OLD_LIBS+=usr/lib/pam_tacplus.so.4 OLD_LIBS+=usr/lib/pam_unix.so.4 OLD_LIBS+=usr/lib/snmp_atm.so.5 OLD_LIBS+=usr/lib/snmp_bridge.so.5 OLD_LIBS+=usr/lib/snmp_hostres.so.5 OLD_LIBS+=usr/lib/snmp_mibII.so.5 OLD_LIBS+=usr/lib/snmp_netgraph.so.5 OLD_LIBS+=usr/lib/snmp_pf.so.5 OLD_LIBS+=usr/lib32/libalias.so.6 OLD_LIBS+=usr/lib32/libarchive.so.4 OLD_LIBS+=usr/lib32/libauditd.so.4 OLD_LIBS+=usr/lib32/libavl.so.1 OLD_LIBS+=usr/lib32/libbegemot.so.3 OLD_LIBS+=usr/lib32/libbluetooth.so.3 OLD_LIBS+=usr/lib32/libbsdxml.so.3 OLD_LIBS+=usr/lib32/libbsm.so.2 OLD_LIBS+=usr/lib32/libbsnmp.so.4 OLD_LIBS+=usr/lib32/libbz2.so.3 OLD_LIBS+=usr/lib32/libcalendar.so.4 OLD_LIBS+=usr/lib32/libcam.so.4 OLD_LIBS+=usr/lib32/libcom_err.so.4 OLD_LIBS+=usr/lib32/libcrypt.so.4 OLD_LIBS+=usr/lib32/libcrypto.so.5 OLD_LIBS+=usr/lib32/libctf.so.1 OLD_LIBS+=usr/lib32/libdevinfo.so.4 OLD_LIBS+=usr/lib32/libdevstat.so.6 OLD_LIBS+=usr/lib32/libdialog.so.6 OLD_LIBS+=usr/lib32/libdtrace.so.1 OLD_LIBS+=usr/lib32/libdwarf.so.1 OLD_LIBS+=usr/lib32/libedit.so.6 OLD_LIBS+=usr/lib32/libfetch.so.5 OLD_LIBS+=usr/lib32/libform.so.4 OLD_LIBS+=usr/lib32/libformw.so.4 OLD_LIBS+=usr/lib32/libftpio.so.7 OLD_LIBS+=usr/lib32/libgeom.so.4 OLD_LIBS+=usr/lib32/libgnuregex.so.4 OLD_LIBS+=usr/lib32/libgpib.so.2 OLD_LIBS+=usr/lib32/libhistory.so.7 OLD_LIBS+=usr/lib32/libipsec.so.3 OLD_LIBS+=usr/lib32/libipx.so.4 OLD_LIBS+=usr/lib32/libkiconv.so.3 OLD_LIBS+=usr/lib32/libkvm.so.4 OLD_LIBS+=usr/lib32/libmagic.so.3 OLD_LIBS+=usr/lib32/libmd.so.4 OLD_LIBS+=usr/lib32/libmemstat.so.2 OLD_LIBS+=usr/lib32/libmenu.so.4 OLD_LIBS+=usr/lib32/libmenuw.so.4 OLD_LIBS+=usr/lib32/libmilter.so.4 OLD_LIBS+=usr/lib32/libncp.so.3 OLD_LIBS+=usr/lib32/libncurses.so.7 OLD_LIBS+=usr/lib32/libncursesw.so.7 OLD_LIBS+=usr/lib32/libnetgraph.so.3 OLD_LIBS+=usr/lib32/libngatm.so.3 OLD_LIBS+=usr/lib32/libnvpair.so.1 OLD_LIBS+=usr/lib32/libobjc.so.3 OLD_LIBS+=usr/lib32/libopie.so.5 OLD_LIBS+=usr/lib32/libpam.so.4 OLD_LIBS+=usr/lib32/libpanel.so.4 OLD_LIBS+=usr/lib32/libpanelw.so.4 OLD_LIBS+=usr/lib32/libpcap.so.6 OLD_LIBS+=usr/lib32/libpmc.so.4 OLD_LIBS+=usr/lib32/libproc.so.1 OLD_LIBS+=usr/lib32/libradius.so.3 OLD_LIBS+=usr/lib32/libreadline.so.7 OLD_LIBS+=usr/lib32/librpcsvc.so.4 OLD_LIBS+=usr/lib32/libsbuf.so.4 OLD_LIBS+=usr/lib32/libsdp.so.3 OLD_LIBS+=usr/lib32/libsmb.so.3 OLD_LIBS+=usr/lib32/libssh.so.4 OLD_LIBS+=usr/lib32/libssl.so.5 OLD_LIBS+=usr/lib32/libtacplus.so.3 OLD_LIBS+=usr/lib32/libufs.so.4 OLD_LIBS+=usr/lib32/libugidfw.so.3 OLD_LIBS+=usr/lib32/libumem.so.1 OLD_LIBS+=usr/lib32/libusb.so.1 OLD_LIBS+=usr/lib32/libusbhid.so.3 OLD_LIBS+=usr/lib32/libutil.so.7 OLD_LIBS+=usr/lib32/libuutil.so.1 OLD_LIBS+=usr/lib32/libvgl.so.5 OLD_LIBS+=usr/lib32/libwrap.so.5 OLD_LIBS+=usr/lib32/libypclnt.so.3 OLD_LIBS+=usr/lib32/libz.so.4 OLD_LIBS+=usr/lib32/libzfs.so.1 OLD_LIBS+=usr/lib32/libzpool.so.1 OLD_LIBS+=usr/lib32/pam_chroot.so.4 OLD_LIBS+=usr/lib32/pam_deny.so.4 OLD_LIBS+=usr/lib32/pam_echo.so.4 OLD_LIBS+=usr/lib32/pam_exec.so.4 OLD_LIBS+=usr/lib32/pam_ftpusers.so.4 OLD_LIBS+=usr/lib32/pam_group.so.4 OLD_LIBS+=usr/lib32/pam_guest.so.4 OLD_LIBS+=usr/lib32/pam_krb5.so.4 OLD_LIBS+=usr/lib32/pam_ksu.so.4 OLD_LIBS+=usr/lib32/pam_lastlog.so.4 OLD_LIBS+=usr/lib32/pam_login_access.so.4 OLD_LIBS+=usr/lib32/pam_nologin.so.4 OLD_LIBS+=usr/lib32/pam_opie.so.4 OLD_LIBS+=usr/lib32/pam_opieaccess.so.4 OLD_LIBS+=usr/lib32/pam_passwdqc.so.4 OLD_LIBS+=usr/lib32/pam_permit.so.4 OLD_LIBS+=usr/lib32/pam_radius.so.4 OLD_LIBS+=usr/lib32/pam_rhosts.so.4 OLD_LIBS+=usr/lib32/pam_rootok.so.4 OLD_LIBS+=usr/lib32/pam_securetty.so.4 OLD_LIBS+=usr/lib32/pam_self.so.4 OLD_LIBS+=usr/lib32/pam_ssh.so.4 OLD_LIBS+=usr/lib32/pam_tacplus.so.4 OLD_LIBS+=usr/lib32/pam_unix.so.4 # 20090718: the gdm pam.d file is no longer required. OLD_FILES+=etc/pam.d/gdm # 20090714: net_add_domain(9) renamed to domain_add(9) OLD_FILES+=usr/share/man/man9/net_add_domain.9.gz # 20090713: vimage container structs removed. OLD_FILES+=usr/include/netinet/vinet.h OLD_FILES+=usr/include/netinet6/vinet6.h OLD_FILES+=usr/include/netipsec/vipsec.h # 20090712: ieee80211.4 -> net80211.4 OLD_FILES+=usr/share/man/man4/ieee80211.4.gz # 20090711: typo fixed, kproc_resume,.9 -> kproc_resume.9 OLD_FILES+=usr/share/man/man9/kproc_resume,.9.gz # 20090709: msgctl.3 msgget.3 msgrcv.3 msgsnd.3 manual pages moved OLD_FILES+=usr/share/man/man3/msgctl.3.gz OLD_FILES+=usr/share/man/man3/msgget.3.gz OLD_FILES+=usr/share/man/man3/msgrcv.3.gz OLD_FILES+=usr/share/man/man3/msgsnd.3.gz # 20090630: old kernel RPC implementation removal OLD_FILES+=usr/include/nfs/rpcv2.h # 20090624: update usbdi(9) OLD_FILES+=usr/share/man/man9/usbd_abort_default_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_abort_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_alloc_buffer.9.gz OLD_FILES+=usr/share/man/man9/usbd_alloc_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_stall.9.gz OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_stall_async.9.gz OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_toggle.9.gz OLD_FILES+=usr/share/man/man9/usbd_close_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_device2interface_handle.9.gz OLD_FILES+=usr/share/man/man9/usbd_do_request_async.9.gz OLD_FILES+=usr/share/man/man9/usbd_do_request_flags_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_endpoint_count.9.gz OLD_FILES+=usr/share/man/man9/usbd_find_edesc.9.gz OLD_FILES+=usr/share/man/man9/usbd_find_idesc.9.gz OLD_FILES+=usr/share/man/man9/usbd_free_buffer.9.gz OLD_FILES+=usr/share/man/man9/usbd_free_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_buffer.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_config.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_config_desc.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_config_desc_full.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_config_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_device_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_endpoint_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_interface_altindex.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_interface_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_no_alts.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_quirks.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_speed.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_string.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_string_desc.9.gz OLD_FILES+=usr/share/man/man9/usbd_get_xfer_status.9.gz OLD_FILES+=usr/share/man/man9/usbd_interface2device_handle.9.gz OLD_FILES+=usr/share/man/man9/usbd_interface2endpoint_descriptor.9.gz OLD_FILES+=usr/share/man/man9/usbd_interface_count.9.gz OLD_FILES+=usr/share/man/man9/usbd_open_pipe.9.gz OLD_FILES+=usr/share/man/man9/usbd_open_pipe_intr.9.gz OLD_FILES+=usr/share/man/man9/usbd_pipe2device_handle.9.gz OLD_FILES+=usr/share/man/man9/usbd_set_config_index.9.gz OLD_FILES+=usr/share/man/man9/usbd_set_config_no.9.gz OLD_FILES+=usr/share/man/man9/usbd_set_interface.9.gz OLD_FILES+=usr/share/man/man9/usbd_setup_default_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_setup_isoc_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_setup_xfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_sync_transfer.9.gz OLD_FILES+=usr/share/man/man9/usbd_transfer.9.gz OLD_FILES+=usr/share/man/man9/usb_find_desc.9.gz # 20090623: number of headers needed for a usb driver reduced OLD_FILES+=usr/include/dev/usb/usb_defs.h OLD_FILES+=usr/include/dev/usb/usb_error.h OLD_FILES+=usr/include/dev/usb/usb_handle_request.h OLD_FILES+=usr/include/dev/usb/usb_hid.h OLD_FILES+=usr/include/dev/usb/usb_lookup.h OLD_FILES+=usr/include/dev/usb/usb_mfunc.h OLD_FILES+=usr/include/dev/usb/usb_parse.h OLD_FILES+=usr/include/dev/usb/usb_revision.h # 20090609: devclass_add_driver is no longer public OLD_FILES+=usr/share/man/man9/devclass_add_driver.9.gz OLD_FILES+=usr/share/man/man9/devclass_delete_driver.9.gz OLD_FILES+=usr/share/man/man9/devclass_find_driver.9.gz # 20090605: removal of clists OLD_FILES+=usr/include/sys/clist.h # 20090602: removal of window(1) OLD_FILES+=usr/bin/window OLD_FILES+=usr/share/man/man1/window.1.gz # 20090531: bind 9.6.1rc1 import OLD_LIBS+=usr/lib/liblwres.so.30 # 20090530: removal of early.sh OLD_FILES+=etc/rc.d/early.sh # 20090527: renaming of S{LIST,TAILQ}_REMOVE_NEXT() to _REMOVE_AFTER() OLD_FILES+=usr/share/man/man3/SLIST_REMOVE_NEXT.3.gz OLD_FILES+=usr/share/man/man3/STAILQ_REMOVE_NEXT.3.gz # 20090527: removal of legacy USB stack OLD_FILES+=usr/include/legacy/dev/usb/dsbr100io.h OLD_FILES+=usr/include/legacy/dev/usb/ehcireg.h OLD_FILES+=usr/include/legacy/dev/usb/ehcivar.h OLD_FILES+=usr/include/legacy/dev/usb/hid.h OLD_FILES+=usr/include/legacy/dev/usb/if_urtwreg.h OLD_FILES+=usr/include/legacy/dev/usb/if_urtwvar.h OLD_FILES+=usr/include/legacy/dev/usb/ohcireg.h OLD_FILES+=usr/include/legacy/dev/usb/ohcivar.h OLD_FILES+=usr/include/legacy/dev/usb/rio500_usb.h OLD_FILES+=usr/include/legacy/dev/usb/rt2573_ucode.h OLD_FILES+=usr/include/legacy/dev/usb/sl811hsreg.h OLD_FILES+=usr/include/legacy/dev/usb/sl811hsvar.h OLD_FILES+=usr/include/legacy/dev/usb/ubser.h OLD_FILES+=usr/include/legacy/dev/usb/ucomvar.h OLD_FILES+=usr/include/legacy/dev/usb/udbp.h OLD_FILES+=usr/include/legacy/dev/usb/uftdireg.h OLD_FILES+=usr/include/legacy/dev/usb/ugraphire_rdesc.h OLD_FILES+=usr/include/legacy/dev/usb/uhcireg.h OLD_FILES+=usr/include/legacy/dev/usb/uhcivar.h OLD_FILES+=usr/include/legacy/dev/usb/usb.h OLD_FILES+=usr/include/legacy/dev/usb/usb_mem.h OLD_FILES+=usr/include/legacy/dev/usb/usb_port.h OLD_FILES+=usr/include/legacy/dev/usb/usb_quirks.h OLD_FILES+=usr/include/legacy/dev/usb/usbcdc.h OLD_FILES+=usr/include/legacy/dev/usb/usbdi.h OLD_FILES+=usr/include/legacy/dev/usb/usbdi_util.h OLD_FILES+=usr/include/legacy/dev/usb/usbdivar.h OLD_FILES+=usr/include/legacy/dev/usb/usbhid.h OLD_FILES+=usr/include/legacy/dev/usb/uxb360gp_rdesc.h OLD_DIRS+=usr/include/legacy/dev/usb OLD_DIRS+=usr/include/legacy/dev OLD_DIRS+=usr/include/legacy # 20090526: removal of makekey(8) OLD_FILES+=usr/libexec/makekey OLD_FILES+=usr/share/man/man8/makekey.8.gz # 20090522: removal of University of Michigan NFSv4 client OLD_FILES+=etc/rc.d/idmapd OLD_FILES+=sbin/idmapd OLD_FILES+=sbin/mount_nfs4 OLD_FILES+=usr/share/man/man8/idmapd.8.gz OLD_FILES+=usr/share/man/man8/mount_nfs4.8.gz # 20090513: removal of legacy versions of USB network interface drivers OLD_FILES+=usr/include/legacy/dev/usb/if_upgtvar.h OLD_FILES+=usr/include/legacy/dev/usb/usb_ethersubr.h # 20090417: removal of legacy versions of USB network interface drivers OLD_FILES+=usr/include/legacy/dev/usb/if_auereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_axereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_cdcereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_cuereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_kuereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_ruereg.h OLD_FILES+=usr/include/legacy/dev/usb/if_rumreg.h OLD_FILES+=usr/include/legacy/dev/usb/if_rumvar.h OLD_FILES+=usr/include/legacy/dev/usb/if_udavreg.h OLD_FILES+=usr/include/legacy/dev/usb/if_uralreg.h OLD_FILES+=usr/include/legacy/dev/usb/if_uralvar.h OLD_FILES+=usr/include/legacy/dev/usb/if_zydfw.h OLD_FILES+=usr/include/legacy/dev/usb/if_zydreg.h OLD_FILES+=usr/include/legacy/dev/usb/kue_fw.h # 20090416: removal of ar(4), ray(4), sr(4), raycontrol(8) OLD_FILES+=usr/sbin/raycontrol OLD_FILES+=usr/share/man/man4/i386/ar.4.gz OLD_FILES+=usr/share/man/man4/i386/ray.4.gz OLD_FILES+=usr/share/man/man4/i386/sr.4.gz OLD_FILES+=usr/share/man/man8/raycontrol.8.gz # 20090410: VOP_LEASE.9 removed OLD_FILES+=usr/share/man/man9/VOP_LEASE.9.gz # 20090406: usb_sw_transfer.h removed OLD_FILES+=usr/include/dev/usb/usb_sw_transfer.h # 20090405: removal of if_ppp(4) and if_sl(4) OLD_FILES+=sbin/slattach rescue/slattach OLD_FILES+=sbin/startslip rescue/startslip OLD_FILES+=usr/include/net/if_ppp.h OLD_FILES+=usr/include/net/if_pppvar.h OLD_FILES+=usr/include/net/if_slvar.h OLD_FILES+=usr/include/net/ppp_comp.h OLD_FILES+=usr/include/net/slip.h OLD_FILES+=usr/sbin/sliplogin OLD_FILES+=usr/sbin/slstat OLD_FILES+=usr/sbin/pppd OLD_FILES+=usr/sbin/pppstats OLD_FILES+=usr/share/man/man1/startslip.1.gz OLD_FILES+=usr/share/man/man4/if_ppp.4.gz OLD_FILES+=usr/share/man/man4/if_sl.4.gz OLD_FILES+=usr/share/man/man4/ppp.4.gz OLD_FILES+=usr/share/man/man4/sl.4.gz OLD_FILES+=usr/share/man/man8/pppd.8.gz OLD_FILES+=usr/share/man/man8/pppstats.8.gz OLD_FILES+=usr/share/man/man8/slattach.8.gz OLD_FILES+=usr/share/man/man8/slip.8.gz OLD_FILES+=usr/share/man/man8/sliplogin.8.gz OLD_FILES+=usr/share/man/man8/slstat.8.gz # 20090321: libpcap upgraded to 1.0.0 OLD_LIBS+=lib/libpcap.so.5 OLD_LIBS+=usr/lib32/libpcap.so.5 # 20090319: uscanner(4) has been removed OLD_FILES+=usr/share/man/man4/uscanner.4.gz # 20090313: k8temp(4) renamed to amdtemp(4) OLD_FILES+=usr/share/man/man4/k8temp.4.gz # 20090308: libusb.so.1 renamed OLD_LIBS+=usr/lib/libusb20.so.1 OLD_FILES+=usr/lib/libusb20.a OLD_FILES+=usr/lib/libusb20.so OLD_FILES+=usr/lib/libusb20_p.a OLD_FILES+=usr/include/libusb20_compat01.h OLD_FILES+=usr/include/libusb20_compat10.h OLD_LIBS+=usr/lib32/libusb20.so.1 OLD_FILES+=usr/lib32/libusb20.a OLD_FILES+=usr/lib32/libusb20.so OLD_FILES+=usr/lib32/libusb20_p.a # 20090226: libmp(3) functions renamed OLD_LIBS+=usr/lib/libmp.so.6 OLD_LIBS+=usr/lib32/libmp.so.6 # 20090223: changeover of USB stacks OLD_FILES+=usr/include/dev/usb2/include/ufm2_ioctl.h OLD_FILES+=usr/include/dev/usb2/include/urio2_ioctl.h OLD_FILES+=usr/include/dev/usb2/include/usb2_cdc.h OLD_FILES+=usr/include/dev/usb2/include/usb2_defs.h OLD_FILES+=usr/include/dev/usb2/include/usb2_devid.h OLD_FILES+=usr/include/dev/usb2/include/usb2_devtable.h OLD_FILES+=usr/include/dev/usb2/include/usb2_endian.h OLD_FILES+=usr/include/dev/usb2/include/usb2_error.h OLD_FILES+=usr/include/dev/usb2/include/usb2_hid.h OLD_FILES+=usr/include/dev/usb2/include/usb2_ioctl.h OLD_FILES+=usr/include/dev/usb2/include/usb2_mfunc.h OLD_FILES+=usr/include/dev/usb2/include/usb2_revision.h OLD_FILES+=usr/include/dev/usb2/include/usb2_standard.h OLD_DIRS+=usr/include/dev/usb2/include OLD_DIRS+=usr/include/dev/usb2 OLD_FILES+=usr/include/dev/usb/dsbr100io.h OLD_FILES+=usr/include/dev/usb/ehcireg.h OLD_FILES+=usr/include/dev/usb/ehcivar.h OLD_FILES+=usr/include/dev/usb/hid.h OLD_FILES+=usr/include/dev/usb/if_auereg.h OLD_FILES+=usr/include/dev/usb/if_axereg.h OLD_FILES+=usr/include/dev/usb/if_cdcereg.h OLD_FILES+=usr/include/dev/usb/if_cuereg.h OLD_FILES+=usr/include/dev/usb/if_kuereg.h OLD_FILES+=usr/include/dev/usb/if_ruereg.h OLD_FILES+=usr/include/dev/usb/if_rumreg.h OLD_FILES+=usr/include/dev/usb/if_rumvar.h OLD_FILES+=usr/include/dev/usb/if_udavreg.h OLD_FILES+=usr/include/dev/usb/if_upgtvar.h OLD_FILES+=usr/include/dev/usb/if_uralreg.h OLD_FILES+=usr/include/dev/usb/if_uralvar.h OLD_FILES+=usr/include/dev/usb/if_urtwreg.h OLD_FILES+=usr/include/dev/usb/if_urtwvar.h OLD_FILES+=usr/include/dev/usb/if_zydfw.h OLD_FILES+=usr/include/dev/usb/if_zydreg.h OLD_FILES+=usr/include/dev/usb/kue_fw.h OLD_FILES+=usr/include/dev/usb/ohcireg.h OLD_FILES+=usr/include/dev/usb/ohcivar.h OLD_FILES+=usr/include/dev/usb/rio500_usb.h OLD_FILES+=usr/include/dev/usb/rt2573_ucode.h OLD_FILES+=usr/include/dev/usb/sl811hsreg.h OLD_FILES+=usr/include/dev/usb/sl811hsvar.h OLD_FILES+=usr/include/dev/usb/ubser.h OLD_FILES+=usr/include/dev/usb/ucomvar.h OLD_FILES+=usr/include/dev/usb/udbp.h OLD_FILES+=usr/include/dev/usb/uftdireg.h OLD_FILES+=usr/include/dev/usb/ugraphire_rdesc.h OLD_FILES+=usr/include/dev/usb/uhcireg.h OLD_FILES+=usr/include/dev/usb/uhcivar.h OLD_FILES+=usr/include/dev/usb/usb_ethersubr.h OLD_FILES+=usr/include/dev/usb/usb_mem.h OLD_FILES+=usr/include/dev/usb/usb_port.h OLD_FILES+=usr/include/dev/usb/usb_quirks.h OLD_FILES+=usr/include/dev/usb/usbcdc.h OLD_FILES+=usr/include/dev/usb/usbdivar.h OLD_FILES+=usr/include/dev/usb/uxb360gp_rdesc.h OLD_FILES+=usr/sbin/usbdevs OLD_FILES+=usr/share/man/man8/usbdevs.8.gz # 20090203: removal of pccard header files OLD_FILES+=usr/include/pccard/cardinfo.h OLD_FILES+=usr/include/pccard/cis.h OLD_DIRS+=usr/include/pccard # 20090203: adding_user.8 moved to adding_user.7 OLD_FILES+=usr/share/man/man8/adding_user.8.gz # 20090122: tzdata2009a import OLD_FILES+=usr/share/zoneinfo/Asia/Katmandu # 20090102: file 4.26 import OLD_FILES+=usr/share/misc/magic.mime OLD_FILES+=usr/share/misc/magic.mime.mgc # 20081223: bind 9.4.3 import, nsupdate.8 moved to nsupdate.1 OLD_FILES+=usr/share/man/man8/nsupdate.8.gz # 20081223: ipprotosw.h removed OLD_FILES+=usr/include/netinet/ipprotosw.h # 20081123: vfs_mountedon.9 removed OLD_FILES+=usr/share/man/man9/vfs_mountedon.9.gz # 20081023: FREE.9 and MALLOC.9 removed OLD_FILES+=usr/share/man/man9/FREE.9.gz OLD_FILES+=usr/share/man/man9/MALLOC.9.gz # 20080928: removal of inaccurate device_ids(9) manual page OLD_FILES+=usr/share/man/man9/device_ids.9.gz OLD_FILES+=usr/share/man/man9/major.9.gz OLD_FILES+=usr/share/man/man9/minor.9.gz OLD_FILES+=usr/share/man/man9/umajor.9.gz OLD_FILES+=usr/share/man/man9/uminor.9.gz # 20080917: removal of manpage for axed kernel primitive suser(9) OLD_FILES+=usr/share/man/man9/suser.9.gz OLD_FILES+=usr/share/man/man9/suser_cred.9.gz # 20080913: pax removed from rescue OLD_FILES+=rescue/pax # 20080823: removal of unneeded pt_chown, to implement grantpt(3) OLD_FILES+=usr/libexec/pt_chown # 20080822: ntp 4.2.4p5 import OLD_FILES+=usr/share/doc/ntp/driver23.html OLD_FILES+=usr/share/doc/ntp/driver24.html # 20080821: several man pages moved from man4.i386 to man4 .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/share/man/man4/i386/acpi_aiboost.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_asus.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_fujitsu.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_ibm.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_panasonic.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_sony.4.gz OLD_FILES+=usr/share/man/man4/i386/acpi_toshiba.4.gz OLD_FILES+=usr/share/man/man4/i386/ichwd.4.gz OLD_FILES+=usr/share/man/man4/i386/if_ndis.4.gz OLD_FILES+=usr/share/man/man4/i386/io.4.gz OLD_FILES+=usr/share/man/man4/i386/linux.4.gz OLD_FILES+=usr/share/man/man4/i386/ndis.4.gz .endif # 20080820: MPSAFE TTY layer integrated OLD_FILES+=usr/include/sys/linedisc.h OLD_FILES+=usr/share/man/man3/posix_openpt.3.gz # 20080725: sgtty.h removed OLD_FILES+=usr/include/sgtty.h # 20080706: bsdlabel(8) removed on powerpc .if ${TARGET_ARCH} == "powerpc" OLD_FILES+=sbin/bsdlabel OLD_FILES+=usr/share/man/man8/bsdlabel.8.gz .endif # 20080704: sbsh(4) removed OLD_FILES+=usr/share/man/man4/if_sbsh.4.gz OLD_FILES+=usr/share/man/man4/sbsh.4.gz # 20080704: cnw(4) removed OLD_FILES+=usr/share/man/man4/if_cnw.4.gz OLD_FILES+=usr/share/man/man4/cnw.4.gz # 20080704: oltr(4) removed .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/share/man/man4/i386/if_oltr.4.gz OLD_FILES+=usr/share/man/man4/i386/oltr.4.gz .endif # 20080704: arl(4) removed .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/sbin/arlcontrol OLD_FILES+=usr/share/man/man4/i386/arl.4.gz OLD_FILES+=usr/share/man/man8/arlcontrol.8.gz .endif # 20080703: sunlabel only for sparc64 .if ${TARGET_ARCH} != "sparc64" OLD_FILES+=sbin/sunlabel OLD_FILES+=usr/share/man/man8/sunlabel.8.gz .endif # 20080701: wpa_supplicant.conf moved to share/examples/etc/ OLD_FILES+=usr/share/examples/wpa_supplicant/wpa_supplicant.conf OLD_DIRS+=usr/share/examples/wpa_supplicant # 20080614: pecoff image activator removed .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/pecoff_machdep.h .endif # 20080614: sgtty removed OLD_FILES+=usr/include/sys/ttychars.h OLD_FILES+=usr/include/sys/ttydev.h OLD_FILES+=usr/share/man/man3/gtty.3.gz OLD_FILES+=usr/share/man/man3/stty.3.gz # 20080609: gpt(8) removed OLD_FILES+=sbin/gpt OLD_FILES+=usr/share/man/man8/gpt.8.gz # 20080525: I4B removed OLD_FILES+=etc/isdn/answer OLD_FILES+=etc/isdn/isdntel OLD_FILES+=etc/isdn/record OLD_FILES+=etc/isdn/tell OLD_FILES+=etc/isdn/tell-record OLD_FILES+=etc/isdn/unknown_incoming OLD_FILES+=etc/isdn/holidays.D OLD_FILES+=etc/isdn/isdnd.rates.A OLD_FILES+=etc/isdn/isdnd.rates.D OLD_FILES+=etc/isdn/isdnd.rates.F OLD_FILES+=etc/isdn/isdnd.rates.L OLD_FILES+=etc/isdn/isdnd.rates.UK.BT OLD_FILES+=etc/isdn/isdnd.rc.sample OLD_FILES+=etc/isdn/isdntel.alias.sample OLD_DIRS+=etc/isdn OLD_FILES+=etc/rc.d/isdnd OLD_FILES+=usr/include/i4b/i4b_cause.h OLD_FILES+=usr/include/i4b/i4b_debug.h OLD_FILES+=usr/include/i4b/i4b_ioctl.h OLD_FILES+=usr/include/i4b/i4b_rbch_ioctl.h OLD_FILES+=usr/include/i4b/i4b_tel_ioctl.h OLD_FILES+=usr/include/i4b/i4b_trace.h OLD_DIRS+=usr/include/i4b OLD_FILES+=usr/sbin/dtmfdecode OLD_FILES+=usr/sbin/g711conv OLD_FILES+=usr/sbin/isdnd OLD_FILES+=usr/sbin/isdndebug OLD_FILES+=usr/sbin/isdndecode OLD_FILES+=usr/sbin/isdnmonitor OLD_FILES+=usr/sbin/isdnphone OLD_FILES+=usr/sbin/isdntel OLD_FILES+=usr/sbin/isdntelctl OLD_FILES+=usr/sbin/isdntrace OLD_FILES+=usr/share/isdn/0.al OLD_FILES+=usr/share/isdn/1.al OLD_FILES+=usr/share/isdn/2.al OLD_FILES+=usr/share/isdn/3.al OLD_FILES+=usr/share/isdn/4.al OLD_FILES+=usr/share/isdn/5.al OLD_FILES+=usr/share/isdn/6.al OLD_FILES+=usr/share/isdn/7.al OLD_FILES+=usr/share/isdn/8.al OLD_FILES+=usr/share/isdn/9.al OLD_FILES+=usr/share/isdn/beep.al OLD_FILES+=usr/share/isdn/msg.al OLD_DIRS+=usr/share/isdn OLD_FILES+=usr/share/man/man1/dtmfdecode.1.gz OLD_FILES+=usr/share/man/man1/g711conv.1.gz OLD_FILES+=usr/share/man/man4/i4b.4.gz OLD_FILES+=usr/share/man/man4/i4bcapi.4.gz OLD_FILES+=usr/share/man/man4/i4bctl.4.gz OLD_FILES+=usr/share/man/man4/i4bing.4.gz OLD_FILES+=usr/share/man/man4/i4bipr.4.gz OLD_FILES+=usr/share/man/man4/i4bisppp.4.gz OLD_FILES+=usr/share/man/man4/i4bq921.4.gz OLD_FILES+=usr/share/man/man4/i4bq931.4.gz OLD_FILES+=usr/share/man/man4/i4brbch.4.gz OLD_FILES+=usr/share/man/man4/i4btel.4.gz OLD_FILES+=usr/share/man/man4/i4btrc.4.gz OLD_FILES+=usr/share/man/man4/iavc.4.gz OLD_FILES+=usr/share/man/man4/isic.4.gz OLD_FILES+=usr/share/man/man4/ifpi.4.gz OLD_FILES+=usr/share/man/man4/ifpi2.4.gz OLD_FILES+=usr/share/man/man4/ifpnp.4.gz OLD_FILES+=usr/share/man/man4/ihfc.4.gz OLD_FILES+=usr/share/man/man4/itjc.4.gz OLD_FILES+=usr/share/man/man4/iwic.4.gz OLD_FILES+=usr/share/man/man5/isdnd.rc.5.gz OLD_FILES+=usr/share/man/man5/isdnd.rates.5.gz OLD_FILES+=usr/share/man/man5/isdnd.acct.5.gz OLD_FILES+=usr/share/man/man8/isdnd.8.gz OLD_FILES+=usr/share/man/man8/isdndebug.8.gz OLD_FILES+=usr/share/man/man8/isdndecode.8.gz OLD_FILES+=usr/share/man/man8/isdnmonitor.8.gz OLD_FILES+=usr/share/man/man8/isdnphone.8.gz OLD_FILES+=usr/share/man/man8/isdntel.8.gz OLD_FILES+=usr/share/man/man8/isdntelctl.8.gz OLD_FILES+=usr/share/man/man8/isdntrace.8.gz OLD_FILES+=usr/share/examples/isdn/contrib/README OLD_FILES+=usr/share/examples/isdn/contrib/anleitung.ppp OLD_FILES+=usr/share/examples/isdn/contrib/answer.c OLD_FILES+=usr/share/examples/isdn/contrib/answer.sh OLD_FILES+=usr/share/examples/isdn/contrib/convert.sh OLD_FILES+=usr/share/examples/isdn/contrib/hplay.c OLD_FILES+=usr/share/examples/isdn/contrib/i4b-ppp-newbie.txt OLD_FILES+=usr/share/examples/isdn/contrib/isdnctl OLD_FILES+=usr/share/examples/isdn/contrib/isdnd_acct OLD_FILES+=usr/share/examples/isdn/contrib/isdnd_acct.pl OLD_FILES+=usr/share/examples/isdn/contrib/isdntelmux.c OLD_FILES+=usr/share/examples/isdn/contrib/mrtg-isp0.sh OLD_FILES+=usr/share/examples/isdn/i4brunppp/Makefile OLD_FILES+=usr/share/examples/isdn/i4brunppp/README OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp-isdnd.rc OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp.8 OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp.c OLD_FILES+=usr/share/examples/isdn/v21/Makefile OLD_FILES+=usr/share/examples/isdn/v21/README OLD_FILES+=usr/share/examples/isdn/v21/v21modem.c OLD_FILES+=usr/share/examples/isdn/FAQ OLD_FILES+=usr/share/examples/isdn/KERNEL OLD_FILES+=usr/share/examples/isdn/Overview OLD_FILES+=usr/share/examples/isdn/README OLD_FILES+=usr/share/examples/isdn/ROADMAP OLD_FILES+=usr/share/examples/isdn/ReleaseNotes OLD_FILES+=usr/share/examples/isdn/Resources OLD_FILES+=usr/share/examples/isdn/SupportedCards OLD_FILES+=usr/share/examples/isdn/ThankYou OLD_DIRS+=usr/share/examples/isdn/contrib OLD_DIRS+=usr/share/examples/isdn/i4brunppp OLD_DIRS+=usr/share/examples/isdn/v21 OLD_DIRS+=usr/share/examples/isdn OLD_FILES+=usr/share/examples/ppp/isdnd.rc OLD_FILES+=usr/share/examples/ppp/ppp.conf.isdn # 20080525: ng_atmpif removed OLD_FILES+=usr/include/netgraph/atm/ng_atmpif.h OLD_FILES+=usr/share/man/man4/ng_atmpif.4.gz # 20080522: pmap_addr_hint removed OLD_FILES+=usr/share/man/man9/pmap_addr_hint.9.gz # 20080517: ipsec_osdep.h removed OLD_FILES+=usr/include/netipsec/ipsec_osdep.h # 20080507: heimdal 1.1 import OLD_LIBS+=usr/lib/libasn1.so.9 OLD_LIBS+=usr/lib/libgssapi.so.9 OLD_LIBS+=usr/lib/libgssapi_krb5.so.9 OLD_LIBS+=usr/lib/libhdb.so.9 OLD_LIBS+=usr/lib/libkadm5clnt.so.9 OLD_LIBS+=usr/lib/libkadm5srv.so.9 OLD_LIBS+=usr/lib/libkafs5.so.9 OLD_LIBS+=usr/lib/libkrb5.so.9 OLD_LIBS+=usr/lib/libroken.so.9 OLD_LIBS+=usr/lib32/libgssapi.so.9 # 20080420: Symbol card support dropped OLD_FILES+=usr/include/dev/wi/spectrum24t_cf.h # 20080420: awi removal OLD_FILES+=usr/share/man/man4/awi.4.gz OLD_FILES+=usr/share/man/man4/if_awi.4.gz # 20080331: pkg_sign has been removed OLD_FILES+=usr/sbin/pkg_check OLD_FILES+=usr/sbin/pkg_sign OLD_FILES+=usr/share/man/man1/pkg_check.1.gz OLD_FILES+=usr/share/man/man1/pkg_sign.1.gz # 20080325: tzdata2008b import OLD_FILES+=usr/share/zoneinfo/Asia/Calcutta OLD_FILES+=usr/share/zoneinfo/Asia/Saigon # 20080314: stack_print(9) mlink fixed OLD_FILES+=usr/share/man/man9/stack_printf.9.gz # 20080312: libkse removal OLD_FILES+=usr/include/sys/kse.h OLD_FILES+=usr/lib/libkse.so OLD_LIBS+=usr/lib/libkse.so.3 OLD_FILES+=usr/share/man/man2/kse.2.gz OLD_FILES+=usr/share/man/man2/kse_create.2.gz OLD_FILES+=usr/share/man/man2/kse_exit.2.gz OLD_FILES+=usr/share/man/man2/kse_release.2.gz OLD_FILES+=usr/share/man/man2/kse_switchin.2.gz OLD_FILES+=usr/share/man/man2/kse_thr_interrupt.2.gz OLD_FILES+=usr/share/man/man2/kse_wakeup.2.gz OLD_FILES+=usr/lib32/libkse.so OLD_LIBS+=usr/lib32/libkse.so.3 # 20080225: bsdar/bsdranlib rename to ar/ranlib OLD_FILES+=usr/bin/bsdar OLD_FILES+=usr/bin/bsdranlib OLD_FILES+=usr/share/man/man1/bsdar.1.gz OLD_FILES+=usr/share/man/man1/bsdranlib.1.gz # 20080220: geom_lvm rename to geom_linux_lvm OLD_FILES+=usr/share/man/man4/geom_lvm.4.gz # 20080126: oldcard.4 removal OLD_FILES+=usr/share/man/man4/card.4.gz OLD_FILES+=usr/share/man/man4/oldcard.4.gz # 20080122: Removed from the tree OLD_FILES+=usr/share/man/man9/BUF_REFCNT.9.gz # 20080108: Moved to section 2 OLD_FILES+=usr/share/man/man3/shm_open.3.gz OLD_FILES+=usr/share/man/man3/shm_unlink.3.gz # 20071207: Merged with fortunes-o.real OLD_FILES+=usr/share/games/fortune/fortunes2-o OLD_FILES+=usr/share/games/fortune/fortunes2-o.dat # 20071201: Removal of XRPU driver OLD_FILES+=usr/include/sys/xrpuio.h # 20071129: Disabled static versions of libkse by default OLD_FILES+=usr/lib/libkse.a OLD_FILES+=usr/lib/libkse_p.a OLD_FILES+=usr/lib/libkse_pic.a OLD_FILES+=usr/lib32/libkse.a OLD_FILES+=usr/lib32/libkse_p.a OLD_FILES+=usr/lib32/libkse_pic.a # 20071129: Removed a Solaris compatibility header OLD_FILES+=usr/include/sys/_elf_solaris.h # 20071125: Renamed to pmc_get_msr() OLD_FILES+=usr/share/man/man3/pmc_x86_get_msr.3.gz # 20071108: Removed very crunch OLDCARD support file OLD_FILES+=etc/defaults/pccard.conf # 20071025: rc.d/nfslocking superceeded by rc.d/lockd and rc.d/statd OLD_FILES+=etc/rc.d/nfslocking # 20070930: rename of cached to nscd OLD_FILES+=etc/cached.conf OLD_FILES+=etc/rc.d/cached OLD_FILES+=usr/sbin/cached OLD_FILES+=usr/share/man/man5/cached.conf.5.gz OLD_FILES+=usr/share/man/man8/cached.8.gz # 20070807: removal of PowerPC specific header file. .if ${TARGET_ARCH} == "powerpc" OLD_FILES+=usr/include/machine/interruptvar.h .endif # 20070801: fast_ipsec.4 gone OLD_FILES+=usr/share/man/man4/fast_ipsec.4.gz # 20070715: netatm temporarily disconnected (removed 20080525) OLD_FILES+=rescue/atm OLD_FILES+=rescue/fore_dnld OLD_FILES+=rescue/ilmid OLD_FILES+=sbin/atm OLD_FILES+=sbin/fore_dnld OLD_FILES+=sbin/ilmid OLD_FILES+=usr/include/libatm.h OLD_FILES+=usr/include/netatm/atm.h OLD_FILES+=usr/include/netatm/atm_cm.h OLD_FILES+=usr/include/netatm/atm_if.h OLD_FILES+=usr/include/netatm/atm_ioctl.h OLD_FILES+=usr/include/netatm/atm_pcb.h OLD_FILES+=usr/include/netatm/atm_sap.h OLD_FILES+=usr/include/netatm/atm_sigmgr.h OLD_FILES+=usr/include/netatm/atm_stack.h OLD_FILES+=usr/include/netatm/atm_sys.h OLD_FILES+=usr/include/netatm/atm_var.h OLD_FILES+=usr/include/netatm/atm_vc.h OLD_FILES+=usr/include/netatm/ipatm/ipatm.h OLD_FILES+=usr/include/netatm/ipatm/ipatm_serv.h OLD_FILES+=usr/include/netatm/ipatm/ipatm_var.h OLD_FILES+=usr/include/netatm/port.h OLD_FILES+=usr/include/netatm/queue.h OLD_FILES+=usr/include/netatm/sigpvc/sigpvc_var.h OLD_FILES+=usr/include/netatm/spans/spans_cls.h OLD_FILES+=usr/include/netatm/spans/spans_kxdr.h OLD_FILES+=usr/include/netatm/spans/spans_var.h OLD_FILES+=usr/include/netatm/uni/sscf_uni.h OLD_FILES+=usr/include/netatm/uni/sscf_uni_var.h OLD_FILES+=usr/include/netatm/uni/sscop.h OLD_FILES+=usr/include/netatm/uni/sscop_misc.h OLD_FILES+=usr/include/netatm/uni/sscop_pdu.h OLD_FILES+=usr/include/netatm/uni/sscop_var.h OLD_FILES+=usr/include/netatm/uni/uni.h OLD_FILES+=usr/include/netatm/uni/uniip_var.h OLD_FILES+=usr/include/netatm/uni/unisig.h OLD_FILES+=usr/include/netatm/uni/unisig_decode.h OLD_FILES+=usr/include/netatm/uni/unisig_mbuf.h OLD_FILES+=usr/include/netatm/uni/unisig_msg.h OLD_FILES+=usr/include/netatm/uni/unisig_print.h OLD_FILES+=usr/include/netatm/uni/unisig_var.h OLD_FILES+=usr/lib/libatm.a OLD_FILES+=usr/lib/libatm_p.a OLD_FILES+=usr/sbin/atmarpd OLD_FILES+=usr/sbin/scspd OLD_FILES+=usr/share/man/en.ISO8859-1/man8/atm.8.gz OLD_FILES+=usr/share/man/en.ISO8859-1/man8/atmarpd.8.gz OLD_FILES+=usr/share/man/en.ISO8859-1/man8/fore_dnld.8.gz OLD_FILES+=usr/share/man/en.ISO8859-1/man8/ilmid.8.gz OLD_FILES+=usr/share/man/en.ISO8859-1/man8/scspd.8.gz OLD_FILES+=usr/share/man/man8/atm.8.gz OLD_FILES+=usr/share/man/man8/atmarpd.8.gz OLD_FILES+=usr/share/man/man8/fore_dnld.8.gz OLD_FILES+=usr/share/man/man8/ilmid.8.gz OLD_FILES+=usr/share/man/man8/scspd.8.gz OLD_FILES+=usr/share/examples/atm/NOTES OLD_FILES+=usr/share/examples/atm/README OLD_FILES+=usr/share/examples/atm/Startup OLD_FILES+=usr/share/examples/atm/atm-config.sh OLD_FILES+=usr/share/examples/atm/atm-sockets.txt OLD_FILES+=usr/share/examples/atm/cpcs-design.txt OLD_FILES+=usr/share/examples/atm/fore-microcode.txt OLD_FILES+=usr/share/examples/atm/sscf-design.txt OLD_FILES+=usr/share/examples/atm/sscop-design.txt OLD_LIBS+=lib/libatm.so.5 OLD_LIBS+=usr/lib/libatm.so OLD_DIRS+=usr/include/netatm/sigpvc OLD_DIRS+=usr/include/netatm/spans OLD_DIRS+=usr/include/netatm/ipatm OLD_DIRS+=usr/include/netatm/uni OLD_DIRS+=usr/include/netatm OLD_DIRS+=usr/share/examples/atm OLD_FILES+=usr/lib32/libatm.a OLD_FILES+=usr/lib32/libatm.so OLD_LIBS+=usr/lib32/libatm.so.5 OLD_FILES+=usr/lib32/libatm_p.a # 20070705: I4B headers repo-copied to include/i4b/ .if ${TARGET_ARCH} == "i386" OLD_FILES+=usr/include/machine/i4b_cause.h OLD_FILES+=usr/include/machine/i4b_debug.h OLD_FILES+=usr/include/machine/i4b_ioctl.h OLD_FILES+=usr/include/machine/i4b_rbch_ioctl.h OLD_FILES+=usr/include/machine/i4b_tel_ioctl.h OLD_FILES+=usr/include/machine/i4b_trace.h .endif # 20070703: pf 4.1 import OLD_FILES+=usr/libexec/ftp-proxy # 20070701: KAME IPSec removal OLD_FILES+=usr/include/netinet6/ah.h OLD_FILES+=usr/include/netinet6/ah6.h OLD_FILES+=usr/include/netinet6/ah_aesxcbcmac.h OLD_FILES+=usr/include/netinet6/esp.h OLD_FILES+=usr/include/netinet6/esp6.h OLD_FILES+=usr/include/netinet6/esp_aesctr.h OLD_FILES+=usr/include/netinet6/esp_camellia.h OLD_FILES+=usr/include/netinet6/esp_rijndael.h OLD_FILES+=usr/include/netinet6/ipsec.h OLD_FILES+=usr/include/netinet6/ipsec6.h OLD_FILES+=usr/include/netinet6/ipcomp.h OLD_FILES+=usr/include/netinet6/ipcomp6.h OLD_FILES+=usr/include/netkey/key.h OLD_FILES+=usr/include/netkey/key_debug.h OLD_FILES+=usr/include/netkey/key_var.h OLD_FILES+=usr/include/netkey/keydb.h OLD_FILES+=usr/include/netkey/keysock.h OLD_DIRS+=usr/include/netkey # 20070701: remove wicontrol OLD_FILES+=usr/sbin/wicontrol OLD_FILES+=usr/share/man/man8/wicontrol.8.gz # 20070625: umapfs removal OLD_FILES+=rescue/mount_umapfs OLD_FILES+=sbin/mount_umapfs OLD_FILES+=usr/include/fs/umapfs/umap.h OLD_FILES+=usr/share/man/man8/mount_umapfs.8.gz OLD_DIRS+=usr/include/fs/umapfs # 20070618: Removal of the PROTO.localhost* files OLD_FILES+=etc/namedb/PROTO.localhost-v6.rev OLD_FILES+=etc/namedb/PROTO.localhost.rev OLD_FILES+=etc/namedb/make-localhost # 20070618: shared library version bump OLD_LIBS+=lib/libalias.so.5 OLD_LIBS+=lib/libbsnmp.so.3 OLD_LIBS+=lib/libncurses.so.6 OLD_LIBS+=lib/libncursesw.so.6 OLD_LIBS+=lib/libreadline.so.6 OLD_LIBS+=usr/lib/libdialog.so.5 OLD_LIBS+=usr/lib/libgnuregex.so.3 OLD_LIBS+=usr/lib/libhistory.so.6 OLD_LIBS+=usr/lib/libpam.so.3 OLD_LIBS+=usr/lib/libssh.so.3 OLD_LIBS+=usr/lib/pam_chroot.so.3 OLD_LIBS+=usr/lib/pam_deny.so.3 OLD_LIBS+=usr/lib/pam_echo.so.3 OLD_LIBS+=usr/lib/pam_exec.so.3 OLD_LIBS+=usr/lib/pam_ftpusers.so.3 OLD_LIBS+=usr/lib/pam_group.so.3 OLD_LIBS+=usr/lib/pam_guest.so.3 OLD_LIBS+=usr/lib/pam_krb5.so.3 OLD_LIBS+=usr/lib/pam_ksu.so.3 OLD_LIBS+=usr/lib/pam_lastlog.so.3 OLD_LIBS+=usr/lib/pam_login_access.so.3 OLD_LIBS+=usr/lib/pam_nologin.so.3 OLD_LIBS+=usr/lib/pam_opie.so.3 OLD_LIBS+=usr/lib/pam_opieaccess.so.3 OLD_LIBS+=usr/lib/pam_passwdqc.so.3 OLD_LIBS+=usr/lib/pam_permit.so.3 OLD_LIBS+=usr/lib/pam_radius.so.3 OLD_LIBS+=usr/lib/pam_rhosts.so.3 OLD_LIBS+=usr/lib/pam_rootok.so.3 OLD_LIBS+=usr/lib/pam_securetty.so.3 OLD_LIBS+=usr/lib/pam_self.so.3 OLD_LIBS+=usr/lib/pam_ssh.so.3 OLD_LIBS+=usr/lib/pam_tacplus.so.3 OLD_LIBS+=usr/lib/pam_unix.so.3 OLD_LIBS+=usr/lib/snmp_atm.so.4 OLD_LIBS+=usr/lib/snmp_bridge.so.4 OLD_LIBS+=usr/lib/snmp_hostres.so.4 OLD_LIBS+=usr/lib/snmp_mibII.so.4 OLD_LIBS+=usr/lib/snmp_netgraph.so.4 OLD_LIBS+=usr/lib/snmp_pf.so.4 OLD_LIBS+=usr/lib32/libalias.so.5 OLD_LIBS+=usr/lib32/libbsnmp.so.3 OLD_LIBS+=usr/lib32/libdialog.so.5 OLD_LIBS+=usr/lib32/libgnuregex.so.3 OLD_LIBS+=usr/lib32/libhistory.so.6 OLD_LIBS+=usr/lib32/libncurses.so.6 OLD_LIBS+=usr/lib32/libncursesw.so.6 OLD_LIBS+=usr/lib32/libpam.so.3 OLD_LIBS+=usr/lib32/libreadline.so.6 OLD_LIBS+=usr/lib32/libssh.so.3 OLD_LIBS+=usr/lib32/pam_chroot.so.3 OLD_LIBS+=usr/lib32/pam_deny.so.3 OLD_LIBS+=usr/lib32/pam_echo.so.3 OLD_LIBS+=usr/lib32/pam_exec.so.3 OLD_LIBS+=usr/lib32/pam_ftpusers.so.3 OLD_LIBS+=usr/lib32/pam_group.so.3 OLD_LIBS+=usr/lib32/pam_guest.so.3 OLD_LIBS+=usr/lib32/pam_krb5.so.3 OLD_LIBS+=usr/lib32/pam_ksu.so.3 OLD_LIBS+=usr/lib32/pam_lastlog.so.3 OLD_LIBS+=usr/lib32/pam_login_access.so.3 OLD_LIBS+=usr/lib32/pam_nologin.so.3 OLD_LIBS+=usr/lib32/pam_opie.so.3 OLD_LIBS+=usr/lib32/pam_opieaccess.so.3 OLD_LIBS+=usr/lib32/pam_passwdqc.so.3 OLD_LIBS+=usr/lib32/pam_permit.so.3 OLD_LIBS+=usr/lib32/pam_radius.so.3 OLD_LIBS+=usr/lib32/pam_rhosts.so.3 OLD_LIBS+=usr/lib32/pam_rootok.so.3 OLD_LIBS+=usr/lib32/pam_securetty.so.3 OLD_LIBS+=usr/lib32/pam_self.so.3 OLD_LIBS+=usr/lib32/pam_ssh.so.3 OLD_LIBS+=usr/lib32/pam_tacplus.so.3 OLD_LIBS+=usr/lib32/pam_unix.so.3 # 20070613: IPX over IP tunnel removal OLD_FILES+=usr/include/netipx/ipx_ip.h # 20070605: sched_core removal OLD_FILES+=usr/share/man/man4/sched_core.4.gz # 20070603: BIND 9.4.1 import OLD_LIBS+=usr/lib/liblwres.so.10 # 20070521: shared library version bump OLD_LIBS+=lib/libatm.so.4 OLD_LIBS+=lib/libbegemot.so.2 OLD_LIBS+=lib/libbsdxml.so.2 OLD_LIBS+=lib/libcam.so.3 OLD_LIBS+=lib/libcrypt.so.3 OLD_LIBS+=lib/libdevstat.so.5 OLD_LIBS+=lib/libedit.so.5 OLD_LIBS+=lib/libgeom.so.3 OLD_LIBS+=lib/libipsec.so.2 OLD_LIBS+=lib/libipx.so.3 OLD_LIBS+=lib/libkiconv.so.2 OLD_LIBS+=lib/libkse.so.2 OLD_LIBS+=lib/libkvm.so.3 OLD_LIBS+=lib/libm.so.4 OLD_LIBS+=lib/libmd.so.3 OLD_LIBS+=lib/libpcap.so.4 OLD_LIBS+=lib/libpthread.so.2 OLD_LIBS+=lib/libsbuf.so.3 OLD_LIBS+=lib/libthr.so.2 OLD_LIBS+=lib/libufs.so.3 OLD_LIBS+=lib/libutil.so.6 OLD_LIBS+=lib/libz.so.3 OLD_LIBS+=usr/lib/libbluetooth.so.2 OLD_LIBS+=usr/lib/libbsm.so.1 OLD_LIBS+=usr/lib/libbz2.so.2 OLD_LIBS+=usr/lib/libcalendar.so.3 OLD_LIBS+=usr/lib/libcom_err.so.3 OLD_LIBS+=usr/lib/libdevinfo.so.3 OLD_LIBS+=usr/lib/libfetch.so.4 OLD_LIBS+=usr/lib/libform.so.3 OLD_LIBS+=usr/lib/libformw.so.3 OLD_LIBS+=usr/lib/libftpio.so.6 OLD_LIBS+=usr/lib/libgpib.so.1 OLD_LIBS+=usr/lib/libkse.so.2 OLD_LIBS+=usr/lib/libmagic.so.2 OLD_LIBS+=usr/lib/libmemstat.so.1 OLD_LIBS+=usr/lib/libmenu.so.3 OLD_LIBS+=usr/lib/libmenuw.so.3 OLD_LIBS+=usr/lib/libmilter.so.3 OLD_LIBS+=usr/lib/libmp.so.5 OLD_LIBS+=usr/lib/libncp.so.2 OLD_LIBS+=usr/lib/libnetgraph.so.2 OLD_LIBS+=usr/lib/libngatm.so.2 OLD_LIBS+=usr/lib/libopie.so.4 OLD_LIBS+=usr/lib/libpanel.so.3 OLD_LIBS+=usr/lib/libpanelw.so.3 OLD_LIBS+=usr/lib/libpmc.so.3 OLD_LIBS+=usr/lib/libradius.so.2 OLD_LIBS+=usr/lib/librpcsvc.so.3 OLD_LIBS+=usr/lib/libsdp.so.2 OLD_LIBS+=usr/lib/libsmb.so.2 OLD_LIBS+=usr/lib/libstdc++.so.5 OLD_LIBS+=usr/lib/libtacplus.so.2 OLD_LIBS+=usr/lib/libthr.so.2 OLD_LIBS+=usr/lib/libthread_db.so.2 OLD_LIBS+=usr/lib/libugidfw.so.2 OLD_LIBS+=usr/lib/libusbhid.so.2 OLD_LIBS+=usr/lib/libvgl.so.4 OLD_LIBS+=usr/lib/libwrap.so.4 OLD_LIBS+=usr/lib/libypclnt.so.2 OLD_LIBS+=usr/lib/snmp_bridge.so.3 OLD_LIBS+=usr/lib/snmp_hostres.so.3 OLD_LIBS+=usr/lib32/libatm.so.4 OLD_LIBS+=usr/lib32/libbegemot.so.2 OLD_LIBS+=usr/lib32/libbluetooth.so.2 OLD_LIBS+=usr/lib32/libbsdxml.so.2 OLD_LIBS+=usr/lib32/libbsm.so.1 OLD_LIBS+=usr/lib32/libbz2.so.2 OLD_LIBS+=usr/lib32/libcalendar.so.3 OLD_LIBS+=usr/lib32/libcam.so.3 OLD_LIBS+=usr/lib32/libcom_err.so.3 OLD_LIBS+=usr/lib32/libcrypt.so.3 OLD_LIBS+=usr/lib32/libdevinfo.so.3 OLD_LIBS+=usr/lib32/libdevstat.so.5 OLD_LIBS+=usr/lib32/libedit.so.5 OLD_LIBS+=usr/lib32/libfetch.so.4 OLD_LIBS+=usr/lib32/libform.so.3 OLD_LIBS+=usr/lib32/libformw.so.3 OLD_LIBS+=usr/lib32/libftpio.so.6 OLD_LIBS+=usr/lib32/libgeom.so.3 OLD_LIBS+=usr/lib32/libgpib.so.1 OLD_LIBS+=usr/lib32/libipsec.so.2 OLD_LIBS+=usr/lib32/libipx.so.3 OLD_LIBS+=usr/lib32/libkiconv.so.2 OLD_LIBS+=usr/lib32/libkse.so.2 OLD_LIBS+=usr/lib32/libkvm.so.3 OLD_LIBS+=usr/lib32/libm.so.4 OLD_LIBS+=usr/lib32/libmagic.so.2 OLD_LIBS+=usr/lib32/libmd.so.3 OLD_LIBS+=usr/lib32/libmemstat.so.1 OLD_LIBS+=usr/lib32/libmenu.so.3 OLD_LIBS+=usr/lib32/libmenuw.so.3 OLD_LIBS+=usr/lib32/libmilter.so.3 OLD_LIBS+=usr/lib32/libmp.so.5 OLD_LIBS+=usr/lib32/libncp.so.2 OLD_LIBS+=usr/lib32/libnetgraph.so.2 OLD_LIBS+=usr/lib32/libngatm.so.2 OLD_LIBS+=usr/lib32/libopie.so.4 OLD_LIBS+=usr/lib32/libpanel.so.3 OLD_LIBS+=usr/lib32/libpanelw.so.3 OLD_LIBS+=usr/lib32/libpcap.so.4 OLD_LIBS+=usr/lib32/libpmc.so.3 OLD_LIBS+=usr/lib32/libpthread.so.2 OLD_LIBS+=usr/lib32/libradius.so.2 OLD_LIBS+=usr/lib32/librpcsvc.so.3 OLD_LIBS+=usr/lib32/libsbuf.so.3 OLD_LIBS+=usr/lib32/libsdp.so.2 OLD_LIBS+=usr/lib32/libsmb.so.2 OLD_LIBS+=usr/lib32/libstdc++.so.5 OLD_LIBS+=usr/lib32/libtacplus.so.2 OLD_LIBS+=usr/lib32/libthr.so.2 OLD_LIBS+=usr/lib32/libthread_db.so.2 OLD_LIBS+=usr/lib32/libufs.so.3 OLD_LIBS+=usr/lib32/libugidfw.so.2 OLD_LIBS+=usr/lib32/libusbhid.so.2 OLD_LIBS+=usr/lib32/libutil.so.6 OLD_LIBS+=usr/lib32/libvgl.so.4 OLD_LIBS+=usr/lib32/libwrap.so.4 OLD_LIBS+=usr/lib32/libypclnt.so.2 OLD_LIBS+=usr/lib32/libz.so.3 # 20070519: GCC 4.2 OLD_FILES+=usr/bin/f77 OLD_FILES+=usr/bin/protoize OLD_FILES+=usr/include/g2c.h OLD_FILES+=usr/libexec/f771 OLD_FILES+=usr/share/info/g77.info.gz OLD_FILES+=usr/share/man/man1/f77.1.gz OLD_FILES+=usr/include/c++/3.4/algorithm OLD_FILES+=usr/include/c++/3.4/backward/algo.h OLD_FILES+=usr/include/c++/3.4/backward/algobase.h OLD_FILES+=usr/include/c++/3.4/backward/alloc.h OLD_FILES+=usr/include/c++/3.4/backward/backward_warning.h OLD_FILES+=usr/include/c++/3.4/backward/bvector.h OLD_FILES+=usr/include/c++/3.4/backward/complex.h OLD_FILES+=usr/include/c++/3.4/backward/defalloc.h OLD_FILES+=usr/include/c++/3.4/backward/deque.h OLD_FILES+=usr/include/c++/3.4/backward/fstream.h OLD_FILES+=usr/include/c++/3.4/backward/function.h OLD_FILES+=usr/include/c++/3.4/backward/hash_map.h OLD_FILES+=usr/include/c++/3.4/backward/hash_set.h OLD_FILES+=usr/include/c++/3.4/backward/hashtable.h OLD_FILES+=usr/include/c++/3.4/backward/heap.h OLD_FILES+=usr/include/c++/3.4/backward/iomanip.h OLD_FILES+=usr/include/c++/3.4/backward/iostream.h OLD_FILES+=usr/include/c++/3.4/backward/istream.h OLD_FILES+=usr/include/c++/3.4/backward/iterator.h OLD_FILES+=usr/include/c++/3.4/backward/list.h OLD_FILES+=usr/include/c++/3.4/backward/map.h OLD_FILES+=usr/include/c++/3.4/backward/multimap.h OLD_FILES+=usr/include/c++/3.4/backward/multiset.h OLD_FILES+=usr/include/c++/3.4/backward/new.h OLD_FILES+=usr/include/c++/3.4/backward/ostream.h OLD_FILES+=usr/include/c++/3.4/backward/pair.h OLD_FILES+=usr/include/c++/3.4/backward/queue.h OLD_FILES+=usr/include/c++/3.4/backward/rope.h OLD_FILES+=usr/include/c++/3.4/backward/set.h OLD_FILES+=usr/include/c++/3.4/backward/slist.h OLD_FILES+=usr/include/c++/3.4/backward/stack.h OLD_FILES+=usr/include/c++/3.4/backward/stream.h OLD_FILES+=usr/include/c++/3.4/backward/streambuf.h OLD_FILES+=usr/include/c++/3.4/backward/strstream OLD_FILES+=usr/include/c++/3.4/backward/tempbuf.h OLD_FILES+=usr/include/c++/3.4/backward/tree.h OLD_FILES+=usr/include/c++/3.4/backward/vector.h OLD_FILES+=usr/include/c++/3.4/bits/allocator.h OLD_FILES+=usr/include/c++/3.4/bits/atomic_word.h OLD_FILES+=usr/include/c++/3.4/bits/atomicity.h OLD_FILES+=usr/include/c++/3.4/bits/basic_file.h OLD_FILES+=usr/include/c++/3.4/bits/basic_ios.h OLD_FILES+=usr/include/c++/3.4/bits/basic_ios.tcc OLD_FILES+=usr/include/c++/3.4/bits/basic_string.h OLD_FILES+=usr/include/c++/3.4/bits/basic_string.tcc OLD_FILES+=usr/include/c++/3.4/bits/boost_concept_check.h OLD_FILES+=usr/include/c++/3.4/bits/c++allocator.h OLD_FILES+=usr/include/c++/3.4/bits/c++config.h OLD_FILES+=usr/include/c++/3.4/bits/c++io.h OLD_FILES+=usr/include/c++/3.4/bits/c++locale.h OLD_FILES+=usr/include/c++/3.4/bits/c++locale_internal.h OLD_FILES+=usr/include/c++/3.4/bits/char_traits.h OLD_FILES+=usr/include/c++/3.4/bits/cmath.tcc OLD_FILES+=usr/include/c++/3.4/bits/codecvt.h OLD_FILES+=usr/include/c++/3.4/bits/codecvt_specializations.h OLD_FILES+=usr/include/c++/3.4/bits/concept_check.h OLD_FILES+=usr/include/c++/3.4/bits/concurrence.h OLD_FILES+=usr/include/c++/3.4/bits/cpp_type_traits.h OLD_FILES+=usr/include/c++/3.4/bits/ctype_base.h OLD_FILES+=usr/include/c++/3.4/bits/ctype_inline.h OLD_FILES+=usr/include/c++/3.4/bits/ctype_noninline.h OLD_FILES+=usr/include/c++/3.4/bits/deque.tcc OLD_FILES+=usr/include/c++/3.4/bits/fstream.tcc OLD_FILES+=usr/include/c++/3.4/bits/functexcept.h OLD_FILES+=usr/include/c++/3.4/bits/gslice.h OLD_FILES+=usr/include/c++/3.4/bits/gslice_array.h OLD_FILES+=usr/include/c++/3.4/bits/gthr-default.h OLD_FILES+=usr/include/c++/3.4/bits/gthr-posix.h OLD_FILES+=usr/include/c++/3.4/bits/gthr-single.h OLD_FILES+=usr/include/c++/3.4/bits/gthr.h OLD_FILES+=usr/include/c++/3.4/bits/indirect_array.h OLD_FILES+=usr/include/c++/3.4/bits/ios_base.h OLD_FILES+=usr/include/c++/3.4/bits/istream.tcc OLD_FILES+=usr/include/c++/3.4/bits/list.tcc OLD_FILES+=usr/include/c++/3.4/bits/locale_classes.h OLD_FILES+=usr/include/c++/3.4/bits/locale_facets.h OLD_FILES+=usr/include/c++/3.4/bits/locale_facets.tcc OLD_FILES+=usr/include/c++/3.4/bits/localefwd.h OLD_FILES+=usr/include/c++/3.4/bits/mask_array.h OLD_FILES+=usr/include/c++/3.4/bits/messages_members.h OLD_FILES+=usr/include/c++/3.4/bits/os_defines.h OLD_FILES+=usr/include/c++/3.4/bits/ostream.tcc OLD_FILES+=usr/include/c++/3.4/bits/postypes.h OLD_FILES+=usr/include/c++/3.4/bits/slice_array.h OLD_FILES+=usr/include/c++/3.4/bits/sstream.tcc OLD_FILES+=usr/include/c++/3.4/bits/stl_algo.h OLD_FILES+=usr/include/c++/3.4/bits/stl_algobase.h OLD_FILES+=usr/include/c++/3.4/bits/stl_bvector.h OLD_FILES+=usr/include/c++/3.4/bits/stl_construct.h OLD_FILES+=usr/include/c++/3.4/bits/stl_deque.h OLD_FILES+=usr/include/c++/3.4/bits/stl_function.h OLD_FILES+=usr/include/c++/3.4/bits/stl_heap.h OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator.h OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator_base_funcs.h OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator_base_types.h OLD_FILES+=usr/include/c++/3.4/bits/stl_list.h OLD_FILES+=usr/include/c++/3.4/bits/stl_map.h OLD_FILES+=usr/include/c++/3.4/bits/stl_multimap.h OLD_FILES+=usr/include/c++/3.4/bits/stl_multiset.h OLD_FILES+=usr/include/c++/3.4/bits/stl_numeric.h OLD_FILES+=usr/include/c++/3.4/bits/stl_pair.h OLD_FILES+=usr/include/c++/3.4/bits/stl_queue.h OLD_FILES+=usr/include/c++/3.4/bits/stl_raw_storage_iter.h OLD_FILES+=usr/include/c++/3.4/bits/stl_relops.h OLD_FILES+=usr/include/c++/3.4/bits/stl_set.h OLD_FILES+=usr/include/c++/3.4/bits/stl_stack.h OLD_FILES+=usr/include/c++/3.4/bits/stl_tempbuf.h OLD_FILES+=usr/include/c++/3.4/bits/stl_threads.h OLD_FILES+=usr/include/c++/3.4/bits/stl_tree.h OLD_FILES+=usr/include/c++/3.4/bits/stl_uninitialized.h OLD_FILES+=usr/include/c++/3.4/bits/stl_vector.h OLD_FILES+=usr/include/c++/3.4/bits/stream_iterator.h OLD_FILES+=usr/include/c++/3.4/bits/streambuf.tcc OLD_FILES+=usr/include/c++/3.4/bits/streambuf_iterator.h OLD_FILES+=usr/include/c++/3.4/bits/stringfwd.h OLD_FILES+=usr/include/c++/3.4/bits/time_members.h OLD_FILES+=usr/include/c++/3.4/bits/type_traits.h OLD_FILES+=usr/include/c++/3.4/bits/valarray_after.h OLD_FILES+=usr/include/c++/3.4/bits/valarray_array.h OLD_FILES+=usr/include/c++/3.4/bits/valarray_array.tcc OLD_FILES+=usr/include/c++/3.4/bits/valarray_before.h OLD_FILES+=usr/include/c++/3.4/bits/vector.tcc OLD_FILES+=usr/include/c++/3.4/bitset OLD_FILES+=usr/include/c++/3.4/cassert OLD_FILES+=usr/include/c++/3.4/cctype OLD_FILES+=usr/include/c++/3.4/cerrno OLD_FILES+=usr/include/c++/3.4/cfloat OLD_FILES+=usr/include/c++/3.4/ciso646 OLD_FILES+=usr/include/c++/3.4/climits OLD_FILES+=usr/include/c++/3.4/clocale OLD_FILES+=usr/include/c++/3.4/cmath OLD_FILES+=usr/include/c++/3.4/complex OLD_FILES+=usr/include/c++/3.4/csetjmp OLD_FILES+=usr/include/c++/3.4/csignal OLD_FILES+=usr/include/c++/3.4/cstdarg OLD_FILES+=usr/include/c++/3.4/cstddef OLD_FILES+=usr/include/c++/3.4/cstdio OLD_FILES+=usr/include/c++/3.4/cstdlib OLD_FILES+=usr/include/c++/3.4/cstring OLD_FILES+=usr/include/c++/3.4/ctime OLD_FILES+=usr/include/c++/3.4/cwchar OLD_FILES+=usr/include/c++/3.4/cwctype OLD_FILES+=usr/include/c++/3.4/cxxabi.h OLD_FILES+=usr/include/c++/3.4/debug/bitset OLD_FILES+=usr/include/c++/3.4/debug/debug.h OLD_FILES+=usr/include/c++/3.4/debug/deque OLD_FILES+=usr/include/c++/3.4/debug/formatter.h OLD_FILES+=usr/include/c++/3.4/debug/hash_map OLD_FILES+=usr/include/c++/3.4/debug/hash_map.h OLD_FILES+=usr/include/c++/3.4/debug/hash_multimap.h OLD_FILES+=usr/include/c++/3.4/debug/hash_multiset.h OLD_FILES+=usr/include/c++/3.4/debug/hash_set OLD_FILES+=usr/include/c++/3.4/debug/hash_set.h OLD_FILES+=usr/include/c++/3.4/debug/list OLD_FILES+=usr/include/c++/3.4/debug/map OLD_FILES+=usr/include/c++/3.4/debug/map.h OLD_FILES+=usr/include/c++/3.4/debug/multimap.h OLD_FILES+=usr/include/c++/3.4/debug/multiset.h OLD_FILES+=usr/include/c++/3.4/debug/safe_base.h OLD_FILES+=usr/include/c++/3.4/debug/safe_iterator.h OLD_FILES+=usr/include/c++/3.4/debug/safe_iterator.tcc OLD_FILES+=usr/include/c++/3.4/debug/safe_sequence.h OLD_FILES+=usr/include/c++/3.4/debug/set OLD_FILES+=usr/include/c++/3.4/debug/set.h OLD_FILES+=usr/include/c++/3.4/debug/string OLD_FILES+=usr/include/c++/3.4/debug/vector OLD_FILES+=usr/include/c++/3.4/deque OLD_FILES+=usr/include/c++/3.4/exception OLD_FILES+=usr/include/c++/3.4/exception_defines.h OLD_FILES+=usr/include/c++/3.4/ext/algorithm OLD_FILES+=usr/include/c++/3.4/ext/bitmap_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/debug_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/enc_filebuf.h OLD_FILES+=usr/include/c++/3.4/ext/functional OLD_FILES+=usr/include/c++/3.4/ext/hash_fun.h OLD_FILES+=usr/include/c++/3.4/ext/hash_map OLD_FILES+=usr/include/c++/3.4/ext/hash_set OLD_FILES+=usr/include/c++/3.4/ext/hashtable.h OLD_FILES+=usr/include/c++/3.4/ext/iterator OLD_FILES+=usr/include/c++/3.4/ext/malloc_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/memory OLD_FILES+=usr/include/c++/3.4/ext/mt_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/new_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/numeric OLD_FILES+=usr/include/c++/3.4/ext/pod_char_traits.h OLD_FILES+=usr/include/c++/3.4/ext/pool_allocator.h OLD_FILES+=usr/include/c++/3.4/ext/rb_tree OLD_FILES+=usr/include/c++/3.4/ext/rope OLD_FILES+=usr/include/c++/3.4/ext/ropeimpl.h OLD_FILES+=usr/include/c++/3.4/ext/slist OLD_FILES+=usr/include/c++/3.4/ext/stdio_filebuf.h OLD_FILES+=usr/include/c++/3.4/ext/stdio_sync_filebuf.h OLD_FILES+=usr/include/c++/3.4/fstream OLD_FILES+=usr/include/c++/3.4/functional OLD_FILES+=usr/include/c++/3.4/iomanip OLD_FILES+=usr/include/c++/3.4/ios OLD_FILES+=usr/include/c++/3.4/iosfwd OLD_FILES+=usr/include/c++/3.4/iostream OLD_FILES+=usr/include/c++/3.4/istream OLD_FILES+=usr/include/c++/3.4/iterator OLD_FILES+=usr/include/c++/3.4/limits OLD_FILES+=usr/include/c++/3.4/list OLD_FILES+=usr/include/c++/3.4/locale OLD_FILES+=usr/include/c++/3.4/map OLD_FILES+=usr/include/c++/3.4/memory OLD_FILES+=usr/include/c++/3.4/new OLD_FILES+=usr/include/c++/3.4/numeric OLD_FILES+=usr/include/c++/3.4/ostream OLD_FILES+=usr/include/c++/3.4/queue OLD_FILES+=usr/include/c++/3.4/set OLD_FILES+=usr/include/c++/3.4/sstream OLD_FILES+=usr/include/c++/3.4/stack OLD_FILES+=usr/include/c++/3.4/stdexcept OLD_FILES+=usr/include/c++/3.4/streambuf OLD_FILES+=usr/include/c++/3.4/string OLD_FILES+=usr/include/c++/3.4/typeinfo OLD_FILES+=usr/include/c++/3.4/utility OLD_FILES+=usr/include/c++/3.4/valarray OLD_FILES+=usr/include/c++/3.4/vector OLD_DIRS+=usr/include/c++/3.4/backward OLD_DIRS+=usr/include/c++/3.4/bits OLD_DIRS+=usr/include/c++/3.4/debug OLD_DIRS+=usr/include/c++/3.4/ext OLD_DIRS+=usr/include/c++/3.4 # 20070510: zpool/zfs moved to /sbin OLD_FILES+=usr/sbin/zfs OLD_FILES+=usr/sbin/zpool # 20070423: rc.bluetooth (examples) removed OLD_FILES+=usr/share/examples/netgraph/bluetooth/rc.bluetooth OLD_DIRS+=usr/share/examples/netgraph/bluetooth # 20070421: worm.4 removed OLD_FILES+=usr/share/man/man4/worm.4.gz # 20070417: trunk(4) renamed to lagg(4) OLD_FILES+=usr/include/net/if_trunk.h # 20070409: uuidgen moved to /bin/ OLD_FILES+=usr/bin/uuidgen # 20070328: bzip2 1.0.4 OLD_FILES+=usr/share/info/bzip2.info.gz # 20070303: libarchive 2.0 OLD_LIBS+=usr/lib/libarchive.so.3 OLD_LIBS+=usr/lib32/libarchive.so.3 # 20070301: remove addr2ascii and ascii2addr OLD_FILES+=usr/share/man/man3/addr2ascii.3.gz OLD_FILES+=usr/share/man/man3/ascii2addr.3.gz # 20070225: vm_page_unmanage() removed OLD_FILES+=usr/share/man/man9/vm_page_unmanage.9.gz # 20070216: VFS_VPTOFH(9) -> VOP_VPTOFH(9) OLD_FILES+=usr/share/man/man9/VFS_VPTOFH.9.gz # 20070212: kame.4 removed OLD_FILES+=usr/share/man/man4/kame.4.gz # 20070201: remove libmytinfo link OLD_FILES+=usr/lib/libmytinfo.a OLD_FILES+=usr/lib/libmytinfo.so OLD_FILES+=usr/lib/libmytinfo_p.a OLD_FILES+=usr/lib/libmytinfow.a OLD_FILES+=usr/lib/libmytinfow.so OLD_FILES+=usr/lib/libmytinfow_p.a OLD_FILES+=usr/lib32/libmytinfo.a OLD_FILES+=usr/lib32/libmytinfo.so OLD_FILES+=usr/lib32/libmytinfo_p.a OLD_FILES+=usr/lib32/libmytinfow.a OLD_FILES+=usr/lib32/libmytinfow.so OLD_FILES+=usr/lib32/libmytinfow_p.a # 20070128: remove vnconfig OLD_FILES+=usr/sbin/vnconfig # 20070127: remove bpf_compat.h OLD_FILES+=usr/include/net/bpf_compat.h # 20070125: objformat bites the dust OLD_FILES+=usr/bin/objformat OLD_FILES+=usr/share/man/man1/objformat.1.gz OLD_FILES+=usr/include/objformat.h OLD_FILES+=usr/share/man/man3/getobjformat.3.gz # 20061201: remove symlink to *.so.4 libalias modules OLD_FILES+=usr/lib/libalias_cuseeme.so OLD_FILES+=usr/lib/libalias_dummy.so OLD_FILES+=usr/lib/libalias_ftp.so OLD_FILES+=usr/lib/libalias_irc.so OLD_FILES+=usr/lib/libalias_nbt.so OLD_FILES+=usr/lib/libalias_pptp.so OLD_FILES+=usr/lib/libalias_skinny.so OLD_FILES+=usr/lib/libalias_smedia.so # 20061201: remove old *.so.4 libalias modules OLD_FILES+=lib/libalias_cuseeme.so.4 OLD_FILES+=lib/libalias_dummy.so.4 OLD_FILES+=lib/libalias_ftp.so.4 OLD_FILES+=lib/libalias_irc.so.4 OLD_FILES+=lib/libalias_nbt.so.4 OLD_FILES+=lib/libalias_pptp.so.4 OLD_FILES+=lib/libalias_skinny.so.4 OLD_FILES+=lib/libalias_smedia.so.4 # 20061126: remove old man page OLD_FILES+=usr/share/man/man3/archive_read_set_bytes_per_block.3.gz # 20061125: remove old man page OLD_FILES+=usr/share/man/man9/devsw.9.gz # 20061122: remove obsolete mount programs OLD_FILES+=sbin/mount_devfs OLD_FILES+=sbin/mount_ext2fs OLD_FILES+=sbin/mount_fdescfs OLD_FILES+=sbin/mount_linprocfs OLD_FILES+=sbin/mount_procfs OLD_FILES+=sbin/mount_std OLD_FILES+=rescue/mount_devfs OLD_FILES+=rescue/mount_ext2fs OLD_FILES+=rescue/mount_fdescfs OLD_FILES+=rescue/mount_linprocfs OLD_FILES+=rescue/mount_procfs OLD_FILES+=rescue/mount_std OLD_FILES+=usr/share/man/man8/mount_devfs.8.gz OLD_FILES+=usr/share/man/man8/mount_ext2fs.8.gz OLD_FILES+=usr/share/man/man8/mount_fdescfs.8.gz OLD_FILES+=usr/share/man/man8/mount_linprocfs.8.gz OLD_FILES+=usr/share/man/man8/mount_procfs.8.gz OLD_FILES+=usr/share/man/man8/mount_std.8.gz # 20061116: uhidev.4 removed OLD_FILES+=usr/share/man/man4/uhidev.4.gz # 20061106: archive_write_prepare.3 removed OLD_FILES+=usr/share/man/man3/archive_write_prepare.3.gz # 20061018: pccardc removed OLD_FILES+=usr/sbin/pccardc usr/share/man/man8/pccardc.8.gz # 20060930: demangle.h from contrib/libstdc++/include/ext/ OLD_FILES+=usr/include/c++/3.4/ext/demangle.h # 20060929: mrouted removed OLD_FILES+=usr/sbin/map-mbone OLD_FILES+=usr/sbin/mrinfo OLD_FILES+=usr/sbin/mrouted OLD_FILES+=usr/sbin/mtrace OLD_FILES+=usr/share/man/man8/map-mbone.8.gz OLD_FILES+=usr/share/man/man8/mrinfo.8.gz OLD_FILES+=usr/share/man/man8/mrouted.8.gz OLD_FILES+=usr/share/man/man8/mtrace.8.gz # 20060924: tcpslice removed OLD_FILES+=usr/sbin/tcpslice OLD_FILES+=usr/share/man/man1/tcpslice.1.gz # 20060829: kvmdb cleanup script removed OLD_FILES+=etc/periodic/weekly/120.clean-kvmdb # 20060822: ramdisk{,-own} have been replaced by mdconfig{,2} OLD_FILES+=etc/rc.d/ramdisk OLD_FILES+=etc/rc.d/ramdisk-own # 20060729: OpenSSL 0.9.7e -> 0.9.8b upgrade OLD_FILES+=usr/include/openssl/eng_int.h OLD_FILES+=usr/include/openssl/hw_4758_cca_err.h OLD_FILES+=usr/include/openssl/hw_aep_err.h OLD_FILES+=usr/include/openssl/hw_atalla_err.h OLD_FILES+=usr/include/openssl/hw_cswift_err.h OLD_FILES+=usr/include/openssl/hw_ncipher_err.h OLD_FILES+=usr/include/openssl/hw_nuron_err.h OLD_FILES+=usr/include/openssl/hw_sureware_err.h OLD_FILES+=usr/include/openssl/hw_ubsec_err.h # 20060713: mount_linsysfs(8) never existed in 7.x OLD_FILES+=sbin/mount_linsysfs OLD_FILES+=usr/share/man/man8/mount_linsysfs.8.gz # 20060704: KAME compat file net_osdep.h removed OLD_FILES+=usr/include/net/net_osdep.h # 20060605: man page links removed by OpenBSM 1.0 alpha 6 import OLD_FILES+=usr/share/man/man3/au_to_socket.3.gz OLD_FILES+=usr/share/man/man3/au_to_socket_ex_128.3.gz OLD_FILES+=usr/share/man/man3/au_to_socket_ex_32.3.gz # 20060517: pcvt removed OLD_FILES+=usr/share/pcvt/README.FIRST OLD_FILES+=usr/share/pcvt/Etc/xmodmap-german OLD_FILES+=usr/share/pcvt/Etc/pcvt.sh OLD_FILES+=usr/share/pcvt/Etc/pcvt.el OLD_FILES+=usr/share/pcvt/Etc/Terminfo OLD_FILES+=usr/share/pcvt/Etc/Termcap OLD_DIRS+=usr/share/pcvt/Etc OLD_FILES+=usr/share/pcvt/Doc/NotesAndHints OLD_FILES+=usr/share/pcvt/Doc/Keyboard.VT OLD_FILES+=usr/share/pcvt/Doc/Keyboard.HP OLD_FILES+=usr/share/pcvt/Doc/EscapeSequences OLD_FILES+=usr/share/pcvt/Doc/Charsets OLD_FILES+=usr/share/pcvt/Doc/CharGen OLD_FILES+=usr/share/pcvt/Doc/Bibliography OLD_FILES+=usr/share/pcvt/Doc/Acknowledgements OLD_DIRS+=usr/share/pcvt/Doc OLD_DIRS+=usr/share/pcvt OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.816 OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.814 OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.810 OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.808 OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.816 OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.814 OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.810 OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.808 OLD_DIRS+=usr/share/misc/pcvtfonts OLD_FILES+=usr/share/misc/keycap.pcvt OLD_FILES+=usr/share/man/man8/ispcvt.8.gz OLD_FILES+=usr/share/man/man5/keycap.5.gz OLD_FILES+=usr/share/man/man4/pcvt.4.gz OLD_FILES+=usr/share/man/man3/kgetstr.3.gz OLD_FILES+=usr/share/man/man3/kgetnum.3.gz OLD_FILES+=usr/share/man/man3/kgetflag.3.gz OLD_FILES+=usr/share/man/man3/kgetent.3.gz OLD_FILES+=usr/share/man/man3/keycap.3.gz OLD_FILES+=usr/share/man/man1/vt220keys.1.gz OLD_FILES+=usr/share/man/man1/scon.1.gz OLD_FILES+=usr/share/man/man1/loadfont.1.gz OLD_FILES+=usr/share/man/man1/kcon.1.gz OLD_FILES+=usr/share/man/man1/fontedit.1.gz OLD_FILES+=usr/share/man/man1/cursor.1.gz OLD_FILES+=usr/sbin/vt220keys OLD_FILES+=usr/sbin/scon OLD_FILES+=usr/sbin/loadfont OLD_FILES+=usr/sbin/kcon OLD_FILES+=usr/sbin/ispcvt OLD_FILES+=usr/sbin/fontedit OLD_FILES+=usr/sbin/cursor OLD_FILES+=usr/lib/libkeycap_p.a OLD_FILES+=usr/lib/libkeycap.a OLD_FILES+=usr/include/machine/pcvt_ioctl.h # 20060514: lnc(4) replaced by le(4) OLD_FILES+=usr/share/man/man4/i386/lnc.4.gz # 20060512: remove ip6fw OLD_FILES+=etc/periodic/security/600.ip6fwdenied OLD_FILES+=etc/periodic/security/650.ip6fwlimit OLD_FILES+=sbin/ip6fw OLD_FILES+=usr/include/netinet6/ip6_fw.h OLD_FILES+=usr/share/man/man8/ip6fw.8.gz # 20060424: sab(4) removed OLD_FILES+=usr/share/man/man4/sab.4.gz # 20060328: remove redundant rc.d script OLD_FILES+=etc/rc.d/ike # 20060127: revert libdisk to static-only OLD_FILES+=usr/lib/libdisk.so # 20060115: sys/pccard includes cleanup OLD_FILES+=usr/include/pccard/driver.h OLD_FILES+=usr/include/pccard/i82365.h OLD_FILES+=usr/include/pccard/meciareg.h OLD_FILES+=usr/include/pccard/pccard_nbk.h OLD_FILES+=usr/include/pccard/pcic_pci.h OLD_FILES+=usr/include/pccard/pcicvar.h OLD_FILES+=usr/include/pccard/slot.h # 20051215: rescue/nextboot.sh renamed to rescue/nextboot OLD_FILES+=rescue/nextboot.sh # 20051214: usbd(8) removed OLD_FILES+=etc/rc.d/usbd OLD_FILES+=etc/usbd.conf OLD_FILES+=usr/sbin/usbd OLD_FILES+=usr/share/man/man8/usbd.8.gz # 20051029: rc.d/ppp-user renamed to rc.d/ppp for convenience OLD_FILES+=etc/rc.d/ppp-user # 20051012: setkey(8) moved to /sbin/ OLD_FILES+=usr/sbin/setkey # 20050930: pccardd(8) removed OLD_FILES+=usr/sbin/pccardd OLD_FILES+=usr/share/man/man5/pccard.conf.5.gz OLD_FILES+=usr/share/man/man8/pccardd.8.gz # 20050927: bridge(4) replaced by if_bridge(4) OLD_FILES+=usr/include/net/bridge.h # 20050831: not implemented OLD_FILES+=usr/share/man/man3/getino.3.gz OLD_FILES+=usr/share/man/man3/putino.3.gz # 20050825: T/TCP retired several months ago OLD_FILES+=usr/share/man/man4/ttcp.4.gz # 20050805 tn3270 retired long ago OLD_FILES+=usr/share/misc/map3270 # 20050801: too old to be interesting here OLD_FILES+=usr/share/doc/papers/px.ps.gz # 20050721: moved to ports OLD_FILES+=usr/sbin/vttest OLD_FILES+=usr/share/man/man1/vttest.1.gz # 20050617: wpa man pages moved to section 8 OLD_FILES+=usr/share/man/man1/hostapd.1.gz OLD_FILES+=usr/share/man/man1/hostapd_cli.1.gz OLD_FILES+=usr/share/man/man1/wpa_cli.1.gz OLD_FILES+=usr/share/man/man1/wpa_supplicant.1.gz # 20050610: rexecd (insecure by design) OLD_FILES+=etc/pam.d/rexecd OLD_FILES+=usr/share/man/man8/rexecd.8.gz OLD_FILES+=usr/libexec/rexecd # 20050606: OpenBSD dhclient replaces ISC one OLD_FILES+=bin/omshell OLD_FILES+=sbin/omshell OLD_FILES+=usr/share/man/man1/omshell.1.gz OLD_FILES+=usr/share/man/man5/dhcp-eval.5.gz # 200504XX: ipf tools moved from /usr to / OLD_FILES+=rescue/ipfs OLD_FILES+=rescue/ipfstat OLD_FILES+=rescue/ipmon OLD_FILES+=rescue/ipnat OLD_FILES+=usr/sbin/ipftest OLD_FILES+=usr/sbin/ipresend OLD_FILES+=usr/sbin/ipsend OLD_FILES+=usr/sbin/iptest OLD_FILES+=usr/share/man/man1/ipnat.1.gz OLD_FILES+=usr/share/man/man1/ipsend.1.gz OLD_FILES+=usr/share/man/man1/iptest.1.gz OLD_FILES+=usr/share/man/man5/ipsend.5.gz # 200503XX: bsdtar takes over gtar OLD_FILES+=usr/bin/gtar OLD_FILES+=usr/share/man/man1/gtar.1.gz # 200503XX OLD_FILES+=usr/share/man/man3/exp10.3.gz OLD_FILES+=usr/share/man/man3/exp10f.3.gz OLD_FILES+=usr/share/man/man3/fpsetsticky.3.gz # 20050324: updated release infrastructure OLD_FILES+=usr/share/man/man5/drivers.conf.5.gz # 20050317: removed from BIND 9 distribution OLD_FILES+=usr/share/doc/bind9/KNOWN_DEFECTS # 2005XXXX: OLD_FILES+=sbin/mount_autofs OLD_FILES+=usr/lib/libautofs.a OLD_FILES+=usr/lib/libautofs.so OLD_FILES+=usr/share/man/man8/mount_autofs.8.gz # 20050203: Merged with fortunes OLD_FILES+=usr/share/games/fortune/fortunes2 OLD_FILES+=usr/share/games/fortune/fortunes2.dat # 200501XX: OLD_FILES+=usr/libexec/getNAME # 200411XX: gvinum replaces vinum OLD_FILES+=bin/vinum OLD_FILES+=rescue/vinum OLD_FILES+=sbin/vinum OLD_FILES+=usr/share/man/man8/vinum.8.gz # 200411XX: libxpg4 removal OLD_FILES+=usr/lib/libxpg4.a OLD_FILES+=usr/lib/libxpg4.so OLD_FILES+=usr/lib/libxpg4_p.a # 20041109: replaced by em(4) OLD_FILES+=usr/share/man/man4/gx.4.gz OLD_FILES+=usr/share/man/man4/if_gx.4.gz # 20041017: rune interface removed OLD_FILES+=usr/include/rune.h OLD_FILES+=usr/share/man/man3/fgetrune.3.gz OLD_FILES+=usr/share/man/man3/fputrune.3.gz OLD_FILES+=usr/share/man/man3/fungetrune.3.gz OLD_FILES+=usr/share/man/man3/mbrrune.3.gz OLD_FILES+=usr/share/man/man3/mbrune.3.gz OLD_FILES+=usr/share/man/man3/rune.3.gz OLD_FILES+=usr/share/man/man3/setinvalidrune.3.gz OLD_FILES+=usr/share/man/man3/sgetrune.3.gz OLD_FILES+=usr/share/man/man3/sputrune.3.gz # 20040925: bind9 import OLD_FILES+=usr/bin/dnskeygen OLD_FILES+=usr/bin/dnsquery OLD_FILES+=usr/lib/libisc.a OLD_FILES+=usr/lib/libisc.so OLD_FILES+=usr/lib/libisc_p.a OLD_FILES+=usr/libexec/named-xfer OLD_FILES+=usr/sbin/named.restart OLD_FILES+=usr/sbin/ndc OLD_FILES+=usr/sbin/nslookup OLD_FILES+=usr/sbin/nsupdate OLD_FILES+=usr/share/doc/bind/html/acl.html OLD_FILES+=usr/share/doc/bind/html/address_list.html OLD_FILES+=usr/share/doc/bind/html/comments.html OLD_FILES+=usr/share/doc/bind/html/config.html OLD_FILES+=usr/share/doc/bind/html/controls.html OLD_FILES+=usr/share/doc/bind/html/docdef.html OLD_FILES+=usr/share/doc/bind/html/example.html OLD_FILES+=usr/share/doc/bind/html/include.html OLD_FILES+=usr/share/doc/bind/html/index.html OLD_FILES+=usr/share/doc/bind/html/key.html OLD_FILES+=usr/share/doc/bind/html/logging.html OLD_FILES+=usr/share/doc/bind/html/master.html OLD_FILES+=usr/share/doc/bind/html/options.html OLD_FILES+=usr/share/doc/bind/html/server.html OLD_FILES+=usr/share/doc/bind/html/trusted-keys.html OLD_FILES+=usr/share/doc/bind/html/zone.html OLD_FILES+=usr/share/doc/bind/misc/DynamicUpdate OLD_FILES+=usr/share/doc/bind/misc/FAQ.1of2 OLD_FILES+=usr/share/doc/bind/misc/FAQ.2of2 OLD_FILES+=usr/share/doc/bind/misc/rfc2317-notes.txt OLD_FILES+=usr/share/doc/bind/misc/style.txt OLD_FILES+=usr/share/man/man1/dnskeygen.1.gz OLD_FILES+=usr/share/man/man1/dnsquery.1.gz OLD_FILES+=usr/share/man/man8/named-bootconf.8.gz OLD_FILES+=usr/share/man/man8/named-xfer.8.gz OLD_FILES+=usr/share/man/man8/named.restart.8.gz OLD_FILES+=usr/share/man/man8/ndc.8.gz OLD_FILES+=usr/share/man/man8/nslookup.8.gz # 200409XX OLD_FILES+=usr/share/man/man3/ENSURE.3.gz OLD_FILES+=usr/share/man/man3/ENSURE_ERR.3.gz OLD_FILES+=usr/share/man/man3/INSIST.3.gz OLD_FILES+=usr/share/man/man3/INSIST_ERR.3.gz OLD_FILES+=usr/share/man/man3/INVARIANT.3.gz OLD_FILES+=usr/share/man/man3/INVARIANT_ERR.3.gz OLD_FILES+=usr/share/man/man3/REQUIRE.3.gz OLD_FILES+=usr/share/man/man3/REQUIRE_ERR.3.gz OLD_FILES+=usr/share/man/man3/assertion_type_to_text.3.gz OLD_FILES+=usr/share/man/man3/assertions.3.gz OLD_FILES+=usr/share/man/man3/bitncmp.3.gz OLD_FILES+=usr/share/man/man3/evAddTime.3.gz OLD_FILES+=usr/share/man/man3/evCancelConn.3.gz OLD_FILES+=usr/share/man/man3/evCancelRW.3.gz OLD_FILES+=usr/share/man/man3/evClearIdleTimer.3.gz OLD_FILES+=usr/share/man/man3/evClearTimer.3.gz OLD_FILES+=usr/share/man/man3/evCmpTime.3.gz OLD_FILES+=usr/share/man/man3/evConnFunc.3.gz OLD_FILES+=usr/share/man/man3/evConnect.3.gz OLD_FILES+=usr/share/man/man3/evConsIovec.3.gz OLD_FILES+=usr/share/man/man3/evConsTime.3.gz OLD_FILES+=usr/share/man/man3/evCreate.3.gz OLD_FILES+=usr/share/man/man3/evDefer.3.gz OLD_FILES+=usr/share/man/man3/evDeselectFD.3.gz OLD_FILES+=usr/share/man/man3/evDestroy.3.gz OLD_FILES+=usr/share/man/man3/evDispatch.3.gz OLD_FILES+=usr/share/man/man3/evDo.3.gz OLD_FILES+=usr/share/man/man3/evDrop.3.gz OLD_FILES+=usr/share/man/man3/evFileFunc.3.gz OLD_FILES+=usr/share/man/man3/evGetNext.3.gz OLD_FILES+=usr/share/man/man3/evHold.3.gz OLD_FILES+=usr/share/man/man3/evInitID.3.gz OLD_FILES+=usr/share/man/man3/evLastEventTime.3.gz OLD_FILES+=usr/share/man/man3/evListen.3.gz OLD_FILES+=usr/share/man/man3/evMainLoop.3.gz OLD_FILES+=usr/share/man/man3/evNowTime.3.gz OLD_FILES+=usr/share/man/man3/evPrintf.3.gz OLD_FILES+=usr/share/man/man3/evRead.3.gz OLD_FILES+=usr/share/man/man3/evResetTimer.3.gz OLD_FILES+=usr/share/man/man3/evSelectFD.3.gz OLD_FILES+=usr/share/man/man3/evSetDebug.3.gz OLD_FILES+=usr/share/man/man3/evSetIdleTimer.3.gz OLD_FILES+=usr/share/man/man3/evSetTimer.3.gz OLD_FILES+=usr/share/man/man3/evStreamFunc.3.gz OLD_FILES+=usr/share/man/man3/evSubTime.3.gz OLD_FILES+=usr/share/man/man3/evTestID.3.gz OLD_FILES+=usr/share/man/man3/evTimeRW.3.gz OLD_FILES+=usr/share/man/man3/evTimeSpec.3.gz OLD_FILES+=usr/share/man/man3/evTimeVal.3.gz OLD_FILES+=usr/share/man/man3/evTimerFunc.3.gz OLD_FILES+=usr/share/man/man3/evTouchIdleTimer.3.gz OLD_FILES+=usr/share/man/man3/evTryAccept.3.gz OLD_FILES+=usr/share/man/man3/evUnhold.3.gz OLD_FILES+=usr/share/man/man3/evUntimeRW.3.gz OLD_FILES+=usr/share/man/man3/evUnwait.3.gz OLD_FILES+=usr/share/man/man3/evWaitFor.3.gz OLD_FILES+=usr/share/man/man3/evWaitFunc.3.gz OLD_FILES+=usr/share/man/man3/evWrite.3.gz OLD_FILES+=usr/share/man/man3/eventlib.3.gz OLD_FILES+=usr/share/man/man3/heap.3.gz OLD_FILES+=usr/share/man/man3/heap_decreased.3.gz OLD_FILES+=usr/share/man/man3/heap_delete.3.gz OLD_FILES+=usr/share/man/man3/heap_element.3.gz OLD_FILES+=usr/share/man/man3/heap_for_each.3.gz OLD_FILES+=usr/share/man/man3/heap_free.3.gz OLD_FILES+=usr/share/man/man3/heap_increased.3.gz OLD_FILES+=usr/share/man/man3/heap_insert.3.gz OLD_FILES+=usr/share/man/man3/heap_new.3.gz OLD_FILES+=usr/share/man/man3/log_add_channel.3.gz OLD_FILES+=usr/share/man/man3/log_category_is_active.3.gz OLD_FILES+=usr/share/man/man3/log_close_stream.3.gz OLD_FILES+=usr/share/man/man3/log_dec_references.3.gz OLD_FILES+=usr/share/man/man3/log_free_channel.3.gz OLD_FILES+=usr/share/man/man3/log_free_context.3.gz OLD_FILES+=usr/share/man/man3/log_get_filename.3.gz OLD_FILES+=usr/share/man/man3/log_get_stream.3.gz OLD_FILES+=usr/share/man/man3/log_inc_references.3.gz OLD_FILES+=usr/share/man/man3/log_new_context.3.gz OLD_FILES+=usr/share/man/man3/log_new_file_channel.3.gz OLD_FILES+=usr/share/man/man3/log_new_null_channel.3.gz OLD_FILES+=usr/share/man/man3/log_new_syslog_channel.3.gz OLD_FILES+=usr/share/man/man3/log_open_stream.3.gz OLD_FILES+=usr/share/man/man3/log_option.3.gz OLD_FILES+=usr/share/man/man3/log_remove_channel.3.gz OLD_FILES+=usr/share/man/man3/log_set_file_owner.3.gz OLD_FILES+=usr/share/man/man3/log_vwrite.3.gz OLD_FILES+=usr/share/man/man3/log_write.3.gz OLD_FILES+=usr/share/man/man3/logging.3.gz OLD_FILES+=usr/share/man/man3/memcluster.3.gz OLD_FILES+=usr/share/man/man3/memget.3.gz OLD_FILES+=usr/share/man/man3/memput.3.gz OLD_FILES+=usr/share/man/man3/memstats.3.gz OLD_FILES+=usr/share/man/man3/set_assertion_failure_callback.3. OLD_FILES+=usr/share/man/man3/sigwait.3.gz OLD_FILES+=usr/share/man/man3/tree_add.3.gz OLD_FILES+=usr/share/man/man3/tree_delete.3.gz OLD_FILES+=usr/share/man/man3/tree_init.3.gz OLD_FILES+=usr/share/man/man3/tree_mung.3.gz OLD_FILES+=usr/share/man/man3/tree_srch.3.gz OLD_FILES+=usr/share/man/man3/tree_trav.3.gz # 2004XXYY: OS internal libs, no ports use them, no need to use OLD_LIBS OLD_FILES+=lib/geom/geom_concat.so.1 OLD_FILES+=lib/geom/geom_label.so.1 OLD_FILES+=lib/geom/geom_nop.so.1 OLD_FILES+=lib/geom/geom_stripe.so.1 # 20040728: GCC 3.4.2 OLD_DIRS+=usr/include/c++/3.3 OLD_FILES+=usr/include/c++/3.3/FlexLexer.h OLD_FILES+=usr/include/c++/3.3/algorithm OLD_FILES+=usr/include/c++/3.3/backward/algo.h OLD_FILES+=usr/include/c++/3.3/backward/algobase.h OLD_FILES+=usr/include/c++/3.3/backward/alloc.h OLD_FILES+=usr/include/c++/3.3/backward/backward_warning.h OLD_FILES+=usr/include/c++/3.3/backward/bvector.h OLD_FILES+=usr/include/c++/3.3/backward/complex.h OLD_FILES+=usr/include/c++/3.3/backward/defalloc.h OLD_FILES+=usr/include/c++/3.3/backward/deque.h OLD_FILES+=usr/include/c++/3.3/backward/fstream.h OLD_FILES+=usr/include/c++/3.3/backward/function.h OLD_FILES+=usr/include/c++/3.3/backward/hash_map.h OLD_FILES+=usr/include/c++/3.3/backward/hash_set.h OLD_FILES+=usr/include/c++/3.3/backward/hashtable.h OLD_FILES+=usr/include/c++/3.3/backward/heap.h OLD_FILES+=usr/include/c++/3.3/backward/iomanip.h OLD_FILES+=usr/include/c++/3.3/backward/iostream.h OLD_FILES+=usr/include/c++/3.3/backward/istream.h OLD_FILES+=usr/include/c++/3.3/backward/iterator.h OLD_FILES+=usr/include/c++/3.3/backward/list.h OLD_FILES+=usr/include/c++/3.3/backward/map.h OLD_FILES+=usr/include/c++/3.3/backward/multimap.h OLD_FILES+=usr/include/c++/3.3/backward/multiset.h OLD_FILES+=usr/include/c++/3.3/backward/new.h OLD_FILES+=usr/include/c++/3.3/backward/ostream.h OLD_FILES+=usr/include/c++/3.3/backward/pair.h OLD_FILES+=usr/include/c++/3.3/backward/queue.h OLD_FILES+=usr/include/c++/3.3/backward/rope.h OLD_FILES+=usr/include/c++/3.3/backward/set.h OLD_FILES+=usr/include/c++/3.3/backward/slist.h OLD_FILES+=usr/include/c++/3.3/backward/stack.h OLD_FILES+=usr/include/c++/3.3/backward/stream.h OLD_FILES+=usr/include/c++/3.3/backward/streambuf.h OLD_FILES+=usr/include/c++/3.3/backward/strstream OLD_FILES+=usr/include/c++/3.3/backward/strstream.h OLD_FILES+=usr/include/c++/3.3/backward/tempbuf.h OLD_FILES+=usr/include/c++/3.3/backward/tree.h OLD_FILES+=usr/include/c++/3.3/backward/vector.h OLD_DIRS+=usr/include/c++/3.3/backward OLD_FILES+=usr/include/c++/3.3/bits/atomicity.h OLD_FILES+=usr/include/c++/3.3/bits/basic_file.h OLD_FILES+=usr/include/c++/3.3/bits/basic_ios.h OLD_FILES+=usr/include/c++/3.3/bits/basic_ios.tcc OLD_FILES+=usr/include/c++/3.3/bits/basic_string.h OLD_FILES+=usr/include/c++/3.3/bits/basic_string.tcc OLD_FILES+=usr/include/c++/3.3/bits/boost_concept_check.h OLD_FILES+=usr/include/c++/3.3/bits/c++config.h OLD_FILES+=usr/include/c++/3.3/bits/c++io.h OLD_FILES+=usr/include/c++/3.3/bits/c++locale.h OLD_FILES+=usr/include/c++/3.3/bits/c++locale_internal.h OLD_FILES+=usr/include/c++/3.3/bits/char_traits.h OLD_FILES+=usr/include/c++/3.3/bits/cmath.tcc OLD_FILES+=usr/include/c++/3.3/bits/codecvt.h OLD_FILES+=usr/include/c++/3.3/bits/codecvt_specializations.h OLD_FILES+=usr/include/c++/3.3/bits/concept_check.h OLD_FILES+=usr/include/c++/3.3/bits/cpp_type_traits.h OLD_FILES+=usr/include/c++/3.3/bits/ctype_base.h OLD_FILES+=usr/include/c++/3.3/bits/ctype_inline.h OLD_FILES+=usr/include/c++/3.3/bits/ctype_noninline.h OLD_FILES+=usr/include/c++/3.3/bits/deque.tcc OLD_FILES+=usr/include/c++/3.3/bits/fpos.h OLD_FILES+=usr/include/c++/3.3/bits/fstream.tcc OLD_FILES+=usr/include/c++/3.3/bits/functexcept.h OLD_FILES+=usr/include/c++/3.3/bits/generic_shadow.h OLD_FILES+=usr/include/c++/3.3/bits/gslice.h OLD_FILES+=usr/include/c++/3.3/bits/gslice_array.h OLD_FILES+=usr/include/c++/3.3/bits/gthr-default.h OLD_FILES+=usr/include/c++/3.3/bits/gthr-posix.h OLD_FILES+=usr/include/c++/3.3/bits/gthr-single.h OLD_FILES+=usr/include/c++/3.3/bits/gthr.h OLD_FILES+=usr/include/c++/3.3/bits/indirect_array.h OLD_FILES+=usr/include/c++/3.3/bits/ios_base.h OLD_FILES+=usr/include/c++/3.3/bits/istream.tcc OLD_FILES+=usr/include/c++/3.3/bits/list.tcc OLD_FILES+=usr/include/c++/3.3/bits/locale_classes.h OLD_FILES+=usr/include/c++/3.3/bits/locale_facets.h OLD_FILES+=usr/include/c++/3.3/bits/locale_facets.tcc OLD_FILES+=usr/include/c++/3.3/bits/localefwd.h OLD_FILES+=usr/include/c++/3.3/bits/mask_array.h OLD_FILES+=usr/include/c++/3.3/bits/messages_members.h OLD_FILES+=usr/include/c++/3.3/bits/os_defines.h OLD_FILES+=usr/include/c++/3.3/bits/ostream.tcc OLD_FILES+=usr/include/c++/3.3/bits/pthread_allocimpl.h OLD_FILES+=usr/include/c++/3.3/bits/slice.h OLD_FILES+=usr/include/c++/3.3/bits/slice_array.h OLD_FILES+=usr/include/c++/3.3/bits/sstream.tcc OLD_FILES+=usr/include/c++/3.3/bits/stl_algo.h OLD_FILES+=usr/include/c++/3.3/bits/stl_algobase.h OLD_FILES+=usr/include/c++/3.3/bits/stl_alloc.h OLD_FILES+=usr/include/c++/3.3/bits/stl_bvector.h OLD_FILES+=usr/include/c++/3.3/bits/stl_construct.h OLD_FILES+=usr/include/c++/3.3/bits/stl_deque.h OLD_FILES+=usr/include/c++/3.3/bits/stl_function.h OLD_FILES+=usr/include/c++/3.3/bits/stl_heap.h OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator.h OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator_base_funcs.h OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator_base_types.h OLD_FILES+=usr/include/c++/3.3/bits/stl_list.h OLD_FILES+=usr/include/c++/3.3/bits/stl_map.h OLD_FILES+=usr/include/c++/3.3/bits/stl_multimap.h OLD_FILES+=usr/include/c++/3.3/bits/stl_multiset.h OLD_FILES+=usr/include/c++/3.3/bits/stl_numeric.h OLD_FILES+=usr/include/c++/3.3/bits/stl_pair.h OLD_FILES+=usr/include/c++/3.3/bits/stl_pthread_alloc.h OLD_FILES+=usr/include/c++/3.3/bits/stl_queue.h OLD_FILES+=usr/include/c++/3.3/bits/stl_raw_storage_iter.h OLD_FILES+=usr/include/c++/3.3/bits/stl_relops.h OLD_FILES+=usr/include/c++/3.3/bits/stl_set.h OLD_FILES+=usr/include/c++/3.3/bits/stl_stack.h OLD_FILES+=usr/include/c++/3.3/bits/stl_tempbuf.h OLD_FILES+=usr/include/c++/3.3/bits/stl_threads.h OLD_FILES+=usr/include/c++/3.3/bits/stl_tree.h OLD_FILES+=usr/include/c++/3.3/bits/stl_uninitialized.h OLD_FILES+=usr/include/c++/3.3/bits/stl_vector.h OLD_FILES+=usr/include/c++/3.3/bits/stream_iterator.h OLD_FILES+=usr/include/c++/3.3/bits/streambuf.tcc OLD_FILES+=usr/include/c++/3.3/bits/streambuf_iterator.h OLD_FILES+=usr/include/c++/3.3/bits/stringfwd.h OLD_FILES+=usr/include/c++/3.3/bits/time_members.h OLD_FILES+=usr/include/c++/3.3/bits/type_traits.h OLD_FILES+=usr/include/c++/3.3/bits/valarray_array.h OLD_FILES+=usr/include/c++/3.3/bits/valarray_array.tcc OLD_FILES+=usr/include/c++/3.3/bits/valarray_meta.h OLD_FILES+=usr/include/c++/3.3/bits/vector.tcc OLD_DIRS+=usr/include/c++/3.3/bits OLD_FILES+=usr/include/c++/3.3/bitset OLD_FILES+=usr/include/c++/3.3/cassert OLD_FILES+=usr/include/c++/3.3/cctype OLD_FILES+=usr/include/c++/3.3/cerrno OLD_FILES+=usr/include/c++/3.3/cfloat OLD_FILES+=usr/include/c++/3.3/ciso646 OLD_FILES+=usr/include/c++/3.3/climits OLD_FILES+=usr/include/c++/3.3/clocale OLD_FILES+=usr/include/c++/3.3/cmath OLD_FILES+=usr/include/c++/3.3/complex OLD_FILES+=usr/include/c++/3.3/csetjmp OLD_FILES+=usr/include/c++/3.3/csignal OLD_FILES+=usr/include/c++/3.3/cstdarg OLD_FILES+=usr/include/c++/3.3/cstddef OLD_FILES+=usr/include/c++/3.3/cstdio OLD_FILES+=usr/include/c++/3.3/cstdlib OLD_FILES+=usr/include/c++/3.3/cstring OLD_FILES+=usr/include/c++/3.3/ctime OLD_FILES+=usr/include/c++/3.3/cwchar OLD_FILES+=usr/include/c++/3.3/cwctype OLD_FILES+=usr/include/c++/3.3/cxxabi.h OLD_FILES+=usr/include/c++/3.3/deque OLD_FILES+=usr/include/c++/3.3/exception OLD_FILES+=usr/include/c++/3.3/exception_defines.h OLD_FILES+=usr/include/c++/3.3/ext/algorithm OLD_FILES+=usr/include/c++/3.3/ext/enc_filebuf.h OLD_FILES+=usr/include/c++/3.3/ext/functional OLD_FILES+=usr/include/c++/3.3/ext/hash_map OLD_FILES+=usr/include/c++/3.3/ext/hash_set OLD_FILES+=usr/include/c++/3.3/ext/iterator OLD_FILES+=usr/include/c++/3.3/ext/memory OLD_FILES+=usr/include/c++/3.3/ext/numeric OLD_FILES+=usr/include/c++/3.3/ext/rb_tree OLD_FILES+=usr/include/c++/3.3/ext/rope OLD_FILES+=usr/include/c++/3.3/ext/ropeimpl.h OLD_FILES+=usr/include/c++/3.3/ext/slist OLD_FILES+=usr/include/c++/3.3/ext/stdio_filebuf.h OLD_FILES+=usr/include/c++/3.3/ext/stl_hash_fun.h OLD_FILES+=usr/include/c++/3.3/ext/stl_hashtable.h OLD_FILES+=usr/include/c++/3.3/ext/stl_rope.h OLD_DIRS+=usr/include/c++/3.3/ext OLD_FILES+=usr/include/c++/3.3/fstream OLD_FILES+=usr/include/c++/3.3/functional OLD_FILES+=usr/include/c++/3.3/iomanip OLD_FILES+=usr/include/c++/3.3/ios OLD_FILES+=usr/include/c++/3.3/iosfwd OLD_FILES+=usr/include/c++/3.3/iostream OLD_FILES+=usr/include/c++/3.3/istream OLD_FILES+=usr/include/c++/3.3/iterator OLD_FILES+=usr/include/c++/3.3/limits OLD_FILES+=usr/include/c++/3.3/list OLD_FILES+=usr/include/c++/3.3/locale OLD_FILES+=usr/include/c++/3.3/map OLD_FILES+=usr/include/c++/3.3/memory OLD_FILES+=usr/include/c++/3.3/new OLD_FILES+=usr/include/c++/3.3/numeric OLD_FILES+=usr/include/c++/3.3/ostream OLD_FILES+=usr/include/c++/3.3/queue OLD_FILES+=usr/include/c++/3.3/set OLD_FILES+=usr/include/c++/3.3/sstream OLD_FILES+=usr/include/c++/3.3/stack OLD_FILES+=usr/include/c++/3.3/stdexcept OLD_FILES+=usr/include/c++/3.3/streambuf OLD_FILES+=usr/include/c++/3.3/string OLD_FILES+=usr/include/c++/3.3/typeinfo OLD_FILES+=usr/include/c++/3.3/utility OLD_FILES+=usr/include/c++/3.3/valarray OLD_FILES+=usr/include/c++/3.3/vector # 20040713: fla(4) removed. OLD_FILES+=usr/share/man/man4/fla.4.gz # 200407XX OLD_FILES+=usr/sbin/kernbb OLD_FILES+=usr/sbin/ntp-genkeys OLD_FILES+=usr/sbin/ntptimeset OLD_FILES+=usr/share/man/man8/kernbb.8.gz OLD_FILES+=usr/share/man/man8/ntp-genkeys.8.gz # 20040627: usbdevs.h and usbdevs_data.h removal OLD_FILES+=usr/include/dev/usb/usbdevs.h OLD_FILES+=usr/include/dev/usb/usbdevs_data.h # 200406XX OLD_FILES+=usr/bin/gasp OLD_FILES+=usr/bin/gdbreplay OLD_FILES+=usr/share/man/man1/gasp.1.gz OLD_FILES+=sbin/mountd OLD_FILES+=sbin/mount_fdesc OLD_FILES+=sbin/mount_umap OLD_FILES+=sbin/mount_union OLD_FILES+=sbin/mount_msdos OLD_FILES+=sbin/mount_null OLD_FILES+=sbin/mount_kernfs # 200405XX: arl OLD_FILES+=usr/sbin/arlconfig OLD_FILES+=usr/share/man/man8/arlconfig.8.gz # 200403XX OLD_FILES+=bin/raidctl OLD_FILES+=sbin/raidctl OLD_FILES+=usr/bin/sasc OLD_FILES+=usr/sbin/sgsc OLD_FILES+=usr/sbin/stlload OLD_FILES+=usr/sbin/stlstats OLD_FILES+=usr/share/man/man1/sasc.1.gz OLD_FILES+=usr/share/man/man1/sgsc.1.gz OLD_FILES+=usr/share/man/man4/i386/stl.4.gz OLD_FILES+=usr/share/man/man8/raidctl.8.gz # 20040229: clean_environment() was removed after 3 days OLD_FILES+=usr/share/man/man3/clean_environment.3.gz # 20040119: installed as `isdntel' in newer systems OLD_FILES+=etc/isdn/isdntel.sh # 200XYYZZ: /lib transition clitches OLD_FILES+=lib/libalias.so OLD_FILES+=lib/libatm.so OLD_FILES+=lib/libbsdxml.so OLD_FILES+=lib/libc.so OLD_FILES+=lib/libcam.so OLD_FILES+=lib/libcrypt.so OLD_FILES+=lib/libcrypto.so OLD_FILES+=lib/libdevstat.so OLD_FILES+=lib/libedit.so OLD_FILES+=lib/libgeom.so OLD_FILES+=lib/libipsec.so OLD_FILES+=lib/libipx.so OLD_FILES+=lib/libkvm.so OLD_FILES+=lib/libm.so OLD_FILES+=lib/libmd.so OLD_FILES+=lib/libncurses.so OLD_FILES+=lib/libreadline.so OLD_FILES+=lib/libsbuf.so OLD_FILES+=lib/libufs.so OLD_FILES+=lib/libz.so # 200312XX OLD_FILES+=bin/cxconfig OLD_FILES+=sbin/cxconfig OLD_FILES+=usr/share/man/man8/cxconfig.8.gz # 20031016: MULTI_DRIVER_MODULE macro removed OLD_FILES+=usr/share/man/man9/MULTI_DRIVER_MODULE.9.gz # 200309XX OLD_FILES+=usr/bin/symorder OLD_FILES+=usr/share/man/man1/symorder.1.gz # 200308XX OLD_FILES+=usr/sbin/amldb OLD_FILES+=usr/share/man/man8/amldb.8.gz # 200307XX OLD_FILES+=sbin/mount_nwfs OLD_FILES+=sbin/mount_portalfs OLD_FILES+=sbin/mount_smbfs # 200306XX OLD_FILES+=usr/sbin/dev_mkdb OLD_FILES+=usr/share/man/man8/dev_mkdb.8.gz # 200304XX OLD_FILES+=usr/lib/libcipher.a OLD_FILES+=usr/lib/libcipher.so OLD_FILES+=usr/lib/libcipher_p.a OLD_FILES+=usr/lib/libgmp.a OLD_FILES+=usr/lib/libgmp.so OLD_FILES+=usr/lib/libgmp_p.a OLD_FILES+=usr/lib/libperl.a OLD_FILES+=usr/lib/libperl.so OLD_FILES+=usr/lib/libperl_p.a OLD_FILES+=usr/lib/libposix1e.a OLD_FILES+=usr/lib/libposix1e.so OLD_FILES+=usr/lib/libposix1e_p.a OLD_FILES+=usr/lib/libskey.a OLD_FILES+=usr/lib/libskey.so OLD_FILES+=usr/lib/libskey_p.a OLD_FILES+=usr/libexec/tradcpp0 OLD_FILES+=usr/libexec/cpp0 # 200304XX: removal of xten OLD_FILES+=usr/libexec/xtend OLD_FILES+=usr/sbin/xten OLD_FILES+=usr/share/man/man1/xten.1.gz OLD_FILES+=usr/share/man/man8/xtend.8.gz # 200303XX OLD_FILES+=usr/lib/libacl.so OLD_FILES+=usr/lib/libdescrypt.so OLD_FILES+=usr/lib/libf2c.so OLD_FILES+=usr/lib/libg++.so OLD_FILES+=usr/lib/libkdb.so OLD_FILES+=usr/lib/librsaINTL.so OLD_FILES+=usr/lib/libscrypt.so OLD_FILES+=usr/lib/libss.so # 200302XX OLD_FILES+=usr/lib/libacl.a OLD_FILES+=usr/lib/libacl_p.a OLD_FILES+=usr/lib/libkadm.a OLD_FILES+=usr/lib/libkadm.so OLD_FILES+=usr/lib/libkadm_p.a OLD_FILES+=usr/lib/libkafs.a OLD_FILES+=usr/lib/libkafs.so OLD_FILES+=usr/lib/libkafs_p.a OLD_FILES+=usr/lib/libkdb.a OLD_FILES+=usr/lib/libkdb_p.a OLD_FILES+=usr/lib/libkrb.a OLD_FILES+=usr/lib/libkrb.so OLD_FILES+=usr/lib/libkrb_p.a OLD_FILES+=usr/share/man/man3/SSL_CIPHER_get_name.3.gz OLD_FILES+=usr/share/man/man3/SSL_COMP_add_compression_method.3 OLD_FILES+=usr/share/man/man3/SSL_CTX_add_extra_chain_cert.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_add_session.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_ctrl.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_flush_sessions.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_free.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_get_verify_mode.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_load_verify_locations.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_new.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_number.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_set_cache_size.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_set_get_cb.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_sessions.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cert_store.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cert_verify_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cipher_list.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_client_CA_list.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_client_cert_cb.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_default_passwd_cb.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_generate_session_id.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_info_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_max_cert_list.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_mode.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_msg_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_options.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_quiet_shutdown.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_session_cache_mode.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_session_id_context.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_ssl_version.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_timeout.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_tmp_dh_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_tmp_rsa_callback.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_set_verify.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_use_certificate.3.gz OLD_FILES+=usr/share/man/man3/SSL_SESSION_free.3.gz OLD_FILES+=usr/share/man/man3/SSL_SESSION_get_ex_new_index.3.gz OLD_FILES+=usr/share/man/man3/SSL_SESSION_get_time.3.gz OLD_FILES+=usr/share/man/man3/SSL_accept.3.gz OLD_FILES+=usr/share/man/man3/SSL_alert_type_string.3.gz OLD_FILES+=usr/share/man/man3/SSL_clear.3.gz OLD_FILES+=usr/share/man/man3/SSL_connect.3.gz OLD_FILES+=usr/share/man/man3/SSL_do_handshake.3.gz OLD_FILES+=usr/share/man/man3/SSL_free.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_SSL_CTX.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_ciphers.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_client_CA_list.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_current_cipher.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_default_timeout.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_error.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_ex_data_X509_STORE_CTX_idx.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_ex_new_index.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_fd.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_peer_cert_chain.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_peer_certificate.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_rbio.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_session.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_verify_result.3.gz OLD_FILES+=usr/share/man/man3/SSL_get_version.3.gz OLD_FILES+=usr/share/man/man3/SSL_library_init.3.gz OLD_FILES+=usr/share/man/man3/SSL_load_client_CA_file.3.gz OLD_FILES+=usr/share/man/man3/SSL_new.3.gz OLD_FILES+=usr/share/man/man3/SSL_pending.3.gz OLD_FILES+=usr/share/man/man3/SSL_read.3.gz OLD_FILES+=usr/share/man/man3/SSL_rstate_string.3.gz OLD_FILES+=usr/share/man/man3/SSL_session_reused.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_bio.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_connect_state.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_fd.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_session.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_shutdown.3.gz OLD_FILES+=usr/share/man/man3/SSL_set_verify_result.3.gz OLD_FILES+=usr/share/man/man3/SSL_shutdown.3.gz OLD_FILES+=usr/share/man/man3/SSL_state_string.3.gz OLD_FILES+=usr/share/man/man3/SSL_want.3.gz OLD_FILES+=usr/share/man/man3/SSL_write.3.gz OLD_FILES+=usr/share/man/man3/d2i_SSL_SESSION.3.gz # 200301XX OLD_FILES+=usr/share/man/man3/des_3cbc_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_3ecb_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_cbc_cksum.3.gz OLD_FILES+=usr/share/man/man3/des_cbc_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_cfb_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_ecb_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_enc_read.3.gz OLD_FILES+=usr/share/man/man3/des_enc_write.3.gz OLD_FILES+=usr/share/man/man3/des_is_weak_key.3.gz OLD_FILES+=usr/share/man/man3/des_key_sched.3.gz OLD_FILES+=usr/share/man/man3/des_ofb_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_pcbc_encrypt.3.gz OLD_FILES+=usr/share/man/man3/des_quad_cksum.3.gz OLD_FILES+=usr/share/man/man3/des_random_key.3.gz OLD_FILES+=usr/share/man/man3/des_read_2password.3.gz OLD_FILES+=usr/share/man/man3/des_read_password.3.gz OLD_FILES+=usr/share/man/man3/des_read_pw_string.3.gz OLD_FILES+=usr/share/man/man3/des_set_key.3.gz OLD_FILES+=usr/share/man/man3/des_set_odd_parity.3.gz OLD_FILES+=usr/share/man/man3/des_string_to_2key.3.gz OLD_FILES+=usr/share/man/man3/des_string_to_key.3.gz # 200212XX OLD_FILES+=usr/sbin/kenv OLD_FILES+=usr/bin/kenv OLD_FILES+=usr/sbin/elf2aout # 200210XX OLD_FILES+=usr/include/libusbhid.h OLD_FILES+=usr/share/man/man3/All_FreeBSD.3.gz OLD_FILES+=usr/share/man/man3/CheckRules.3.gz OLD_FILES+=usr/share/man/man3/ChunkCanBeRoot.3.gz OLD_FILES+=usr/share/man/man3/Clone_Disk.3.gz OLD_FILES+=usr/share/man/man3/Collapse_Chunk.3.gz OLD_FILES+=usr/share/man/man3/Collapse_Disk.3.gz OLD_FILES+=usr/share/man/man3/Create_Chunk.3.gz OLD_FILES+=usr/share/man/man3/Create_Chunk_DWIM.3.gz OLD_FILES+=usr/share/man/man3/Cyl_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Debug_Disk.3.gz OLD_FILES+=usr/share/man/man3/Delete_Chunk.3.gz OLD_FILES+=usr/share/man/man3/Disk_Names.3.gz OLD_FILES+=usr/share/man/man3/Free_Disk.3.gz OLD_FILES+=usr/share/man/man3/MakeDev.3.gz OLD_FILES+=usr/share/man/man3/MakeDevDisk.3.gz OLD_FILES+=usr/share/man/man3/Next_Cyl_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Next_Track_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Open_Disk.3.gz OLD_FILES+=usr/share/man/man3/Prev_Cyl_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Prev_Track_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Set_Bios_Geom.3.gz OLD_FILES+=usr/share/man/man3/Set_Boot_Blocks.3.gz OLD_FILES+=usr/share/man/man3/Set_Boot_Mgr.3.gz OLD_FILES+=usr/share/man/man3/ShowChunkFlags.3.gz OLD_FILES+=usr/share/man/man3/Track_Aligned.3.gz OLD_FILES+=usr/share/man/man3/Write_Disk.3.gz OLD_FILES+=usr/share/man/man3/slice_type_name.3.gz # 200210XX: most games moved to ports OLD_FILES+=usr/share/man/man6/adventure.6.gz OLD_FILES+=usr/share/man/man6/arithmetic.6.gz OLD_FILES+=usr/share/man/man6/atc.6.gz OLD_FILES+=usr/share/man/man6/backgammon.6.gz OLD_FILES+=usr/share/man/man6/battlestar.6.gz OLD_FILES+=usr/share/man/man6/bs.6.gz OLD_FILES+=usr/share/man/man6/canfield.6.gz OLD_FILES+=usr/share/man/man6/cfscores.6.gz OLD_FILES+=usr/share/man/man6/cribbage.6.gz OLD_FILES+=usr/share/man/man6/fish.6.gz OLD_FILES+=usr/share/man/man6/hack.6.gz OLD_FILES+=usr/share/man/man6/hangman.6.gz OLD_FILES+=usr/share/man/man6/larn.6.gz OLD_FILES+=usr/share/man/man6/mille.6.gz OLD_FILES+=usr/share/man/man6/phantasia.6.gz OLD_FILES+=usr/share/man/man6/piano.6.gz OLD_FILES+=usr/share/man/man6/pig.6.gz OLD_FILES+=usr/share/man/man6/quiz.6.gz OLD_FILES+=usr/share/man/man6/rain.6.gz OLD_FILES+=usr/share/man/man6/robots.6.gz OLD_FILES+=usr/share/man/man6/rogue.6.gz OLD_FILES+=usr/share/man/man6/sail.6.gz OLD_FILES+=usr/share/man/man6/snake.6.gz OLD_FILES+=usr/share/man/man6/snscore.6.gz OLD_FILES+=usr/share/man/man6/trek.6.gz OLD_FILES+=usr/share/man/man6/wargames.6.gz OLD_FILES+=usr/share/man/man6/worm.6.gz OLD_FILES+=usr/share/man/man6/worms.6.gz OLD_FILES+=usr/share/man/man6/wump.6.gz # 200207XX OLD_FILES+=usr/share/man/man1aout/ar.1aout.gz OLD_FILES+=usr/share/man/man1aout/as.1aout.gz OLD_FILES+=usr/share/man/man1aout/ld.1aout.gz OLD_FILES+=usr/share/man/man1aout/nm.1aout.gz OLD_FILES+=usr/share/man/man1aout/ranlib.1aout.gz OLD_FILES+=usr/share/man/man1aout/size.1aout.gz OLD_FILES+=usr/share/man/man1aout/strings.1aout.gz OLD_FILES+=usr/share/man/man1aout/strip.1aout.gz OLD_FILES+=bin/mountd OLD_FILES+=bin/nfsd # 20020707 sbin/nfsd -> usr.sbin/nfsd OLD_FILES+=sbin/nfsd # 200206XX OLD_FILES+=usr/lib/libpam_ssh.a OLD_FILES+=usr/lib/libpam_ssh_p.a OLD_FILES+=usr/bin/help OLD_FILES+=usr/bin/sccs .if ${TARGET_ARCH} != "amd64" && ${TARGET} != "arm" && ${TARGET_ARCH} != "i386" && ${TARGET} != "powerpc" OLD_FILES+=usr/bin/gdbserver .endif OLD_FILES+=usr/bin/ssh-keysign OLD_FILES+=usr/sbin/gifconfig OLD_FILES+=usr/sbin/prefix # 200205XX OLD_FILES+=usr/bin/doscmd # 200204XX OLD_FILES+=usr/bin/a2p OLD_FILES+=usr/bin/ptx OLD_FILES+=usr/bin/pod2text OLD_FILES+=usr/bin/pod2man OLD_FILES+=usr/bin/pod2latex OLD_FILES+=usr/bin/pod2html OLD_FILES+=usr/bin/h2ph OLD_FILES+=usr/bin/dprofpp OLD_FILES+=usr/bin/c2ph OLD_FILES+=usr/bin/h2xs OLD_FILES+=usr/bin/pl2pm OLD_FILES+=usr/bin/splain OLD_FILES+=usr/bin/s2p OLD_FILES+=usr/bin/find2perl OLD_FILES+=usr/sbin/pkg_update OLD_FILES+=usr/sbin/scriptdump # 20020409 GC kget(1), userconfig is long dead. OLD_FILES+=sbin/kget OLD_FILES+=usr/share/man/man8/kget.8.gz # 200203XX OLD_FILES+=usr/lib/libss.a OLD_FILES+=usr/lib/libss_p.a OLD_FILES+=usr/lib/libtelnet.a OLD_FILES+=usr/lib/libtelnet_p.a OLD_FILES+=usr/sbin/diskpart # 200202XX OLD_FILES+=usr/bin/gprof4 # 200201XX OLD_FILES+=usr/sbin/linux # 2001XXXX OLD_FILES+=usr/bin/joy OLD_FILES+=usr/sbin/ibcs2 OLD_FILES+=usr/sbin/svr4 OLD_FILES+=usr/bin/chflags OLD_FILES+=usr/sbin/uuconv OLD_FILES+=usr/sbin/uuchk OLD_FILES+=usr/sbin/portmap OLD_FILES+=usr/sbin/pmap_set OLD_FILES+=usr/sbin/pmap_dump OLD_FILES+=usr/sbin/mcon OLD_FILES+=usr/sbin/stlstty OLD_FILES+=usr/sbin/ispppcontrol OLD_FILES+=usr/sbin/rndcontrol # 20011001: UUCP migration to ports OLD_FILES+=usr/bin/uucp OLD_FILES+=usr/bin/uulog OLD_FILES+=usr/bin/uuname OLD_FILES+=usr/bin/uupick OLD_FILES+=usr/bin/uusched OLD_FILES+=usr/bin/uustat OLD_FILES+=usr/bin/uuto OLD_FILES+=usr/bin/uux OLD_FILES+=usr/libexec/uucp/uucico OLD_FILES+=usr/libexec/uucp/uuxqt OLD_FILES+=usr/libexec/uucpd OLD_FILES+=usr/share/man/man1/uuconv.1.gz OLD_FILES+=usr/share/man/man1/uucp.1.gz OLD_FILES+=usr/share/man/man1/uulog.1.gz OLD_FILES+=usr/share/man/man1/uuname.1.gz OLD_FILES+=usr/share/man/man1/uupick.1.gz OLD_FILES+=usr/share/man/man1/uustat.1.gz OLD_FILES+=usr/share/man/man1/uuto.1.gz OLD_FILES+=usr/share/man/man1/uux.1.gz OLD_FILES+=usr/share/man/man8/uuchk.8.gz OLD_FILES+=usr/share/man/man8/uucico.8.gz OLD_FILES+=usr/share/man/man8/uucpd.8.gz OLD_FILES+=usr/share/man/man8/uusched.8.gz OLD_FILES+=usr/share/man/man8/uuxqt.8.gz # 20010523 mount_portal -> mount_portalfs OLD_FILES+=sbin/mount_portal OLD_FILES+=usr/share/man/man8/mount_portal.8.gz # 200104XX OLD_FILES+=usr/lib/libdescrypt.a OLD_FILES+=usr/lib/libscrypt.a OLD_FILES+=usr/lib/libscrypt_p.a OLD_FILES+=usr/sbin/pim6stat OLD_FILES+=usr/sbin/pim6sd OLD_FILES+=usr/sbin/pim6dd # 20010217 OLD_FILES+=usr/share/doc/bind/misc/dns-setup # 20001200 OLD_FILES+=usr/lib/libgcc_r_pic.a # 200009XX OLD_FILES+=usr/lib/libRSAglue.a OLD_FILES+=usr/lib/libRSAglue.so OLD_FILES+=usr/lib/librsaINTL.a OLD_FILES+=usr/lib/librsaUSA.a OLD_FILES+=usr/lib/librsaUSA.so # 200002XX ? OLD_FILES+=usr/lib/libf2c.a OLD_FILES+=usr/lib/libf2c_p.a OLD_FILES+=usr/lib/libg++.a OLD_FILES+=usr/lib/libg++_p.a # 20001006 OLD_FILES+=usr/bin/miniperl # 20000810 OLD_FILES+=usr/bin/sperl # 200001XX OLD_FILES+=usr/sbin/apmconf # 199911XX OLD_FILES+=usr/sbin/ipfstat OLD_FILES+=usr/sbin/ipmon OLD_FILES+=usr/sbin/ipnat OLD_FILES+=usr/sbin/bad144 OLD_FILES+=usr/sbin/wormcontrol OLD_FILES+=usr/sbin/named-bootconf OLD_FILES+=usr/sbin/kvm_mkdb OLD_FILES+=usr/sbin/keyadmin # 199909XX OLD_FILES+=usr/lib/libdesrypt_p.a OLD_FILES+=sbin/ft # 199903XX OLD_FILES+=sbin/modload OLD_FILES+=sbin/modunload OLD_FILES+=usr/sbin/natd # 199812XX OLD_FILES+=sbin/dset # 199809XX OLD_FILES+=sbin/scsi OLD_FILES+=sbin/scsiformat OLD_FILES+=usr/sbin/ncrcontrol OLD_FILES+=usr/sbin/tickadj # 199806XX OLD_FILES+=usr/sbin/mkdosfs # 199801XX OLD_FILES+=sbin/mount_lfs OLD_FILES+=sbin/newlfs OLD_FILES+=sbin/dumplfs OLD_FILES+=usr/sbin/qcamcontrol OLD_FILES+=usr/sbin/supscan # 1997XXXX OLD_FILES+=usr/sbin/sysctl OLD_FILES+=usr/sbin/ctm_scan OLD_FILES+=usr/sbin/addgroup OLD_FILES+=usr/sbin/rmgroup # 1996XXXX OLD_FILES+=sbin/rdisc OLD_FILES+=usr/sbin/cdplay OLD_FILES+=usr/sbin/supfilesrv OLD_FILES+=usr/sbin/routed OLD_FILES+=usr/sbin/lsdev OLD_FILES+=usr/sbin/yppasswdd ## unsorted # do we still support aout builds? #OLD_FILES+=usr/lib/aout/c++rt0.o #OLD_FILES+=usr/lib/aout/crt0.o #OLD_FILES+=usr/lib/aout/gcrt0.o #OLD_FILES+=usr/lib/aout/scrt0.o #OLD_FILES+=usr/lib/aout/sgcrt0.o OLD_FILES+=usr/bin/sperl5 OLD_FILES+=usr/bin/perl5.6.0 OLD_FILES+=usr/bin/sperl5.6.0 OLD_FILES+=usr/bin/perlbc OLD_FILES+=usr/bin/perl5.00503 OLD_FILES+=usr/bin/sperl5.00503 OLD_FILES+=usr/bin/perlbug OLD_FILES+=usr/bin/perlcc OLD_FILES+=usr/bin/perldoc OLD_FILES+=usr/bin/suidperl OLD_FILES+=usr/lib/pam_ftp.so OLD_FILES+=usr/libdata/perl/5.00503/CGI/Apache.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Carp.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Cookie.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Fast.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Push.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI/Switch.pm OLD_FILES+=usr/libdata/perl/5.00503/CPAN/FirstTime.pm OLD_FILES+=usr/libdata/perl/5.00503/CPAN/Nox.pm OLD_FILES+=usr/libdata/perl/5.00503/Class/Struct.pm OLD_FILES+=usr/libdata/perl/5.00503/Devel/SelfStubber.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Command.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Embed.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Install.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Installed.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Liblist.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_OS2.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_Unix.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_VMS.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_Win32.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MakeMaker.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Manifest.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Mkbootstrap.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Mksymlists.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Packlist.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/inst OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/testlib.pm OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/typemap OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/xsubpp OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Mac.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/OS2.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Unix.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/VMS.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Win32.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Basename.pm OLD_FILES+=usr/libdata/perl/5.00503/File/CheckTree.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Compare.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Copy.pm OLD_FILES+=usr/libdata/perl/5.00503/File/DosGlob.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Find.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Path.pm OLD_FILES+=usr/libdata/perl/5.00503/File/Spec.pm OLD_FILES+=usr/libdata/perl/5.00503/File/stat.pm OLD_FILES+=usr/libdata/perl/5.00503/Getopt/Long.pm OLD_FILES+=usr/libdata/perl/5.00503/Getopt/Std.pm OLD_FILES+=usr/libdata/perl/5.00503/I18N/Collate.pm OLD_FILES+=usr/libdata/perl/5.00503/IPC/Open2.pm OLD_FILES+=usr/libdata/perl/5.00503/IPC/Open3.pm OLD_FILES+=usr/libdata/perl/5.00503/Math/BigFloat.pm OLD_FILES+=usr/libdata/perl/5.00503/Math/BigInt.pm OLD_FILES+=usr/libdata/perl/5.00503/Math/Complex.pm OLD_FILES+=usr/libdata/perl/5.00503/Math/Trig.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/Ping.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/hostent.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/netent.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/protoent.pm OLD_FILES+=usr/libdata/perl/5.00503/Net/servent.pm OLD_FILES+=usr/libdata/perl/5.00503/Pod/Functions.pm OLD_FILES+=usr/libdata/perl/5.00503/Pod/Html.pm OLD_FILES+=usr/libdata/perl/5.00503/Pod/Text.pm OLD_FILES+=usr/libdata/perl/5.00503/Search/Dict.pm OLD_FILES+=usr/libdata/perl/5.00503/Sys/Hostname.pm OLD_FILES+=usr/libdata/perl/5.00503/Sys/Syslog.pm OLD_FILES+=usr/libdata/perl/5.00503/Term/Cap.pm OLD_FILES+=usr/libdata/perl/5.00503/Term/Complete.pm OLD_FILES+=usr/libdata/perl/5.00503/Term/ReadLine.pm OLD_FILES+=usr/libdata/perl/5.00503/Test/Harness.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/Abbrev.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/ParseWords.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/Soundex.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/Tabs.pm OLD_FILES+=usr/libdata/perl/5.00503/Text/Wrap.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/Array.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/Handle.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/Hash.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/RefHash.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/Scalar.pm OLD_FILES+=usr/libdata/perl/5.00503/Tie/SubstrHash.pm OLD_FILES+=usr/libdata/perl/5.00503/Time/Local.pm OLD_FILES+=usr/libdata/perl/5.00503/Time/gmtime.pm OLD_FILES+=usr/libdata/perl/5.00503/Time/localtime.pm OLD_FILES+=usr/libdata/perl/5.00503/Time/tm.pm OLD_FILES+=usr/libdata/perl/5.00503/User/grent.pm OLD_FILES+=usr/libdata/perl/5.00503/User/pwent.pm OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/GetOptions.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/FindOption.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/Configure.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/config.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/Croak.al OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/autosplit.ix OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Deparse.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/CC.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Debug.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Showlex.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/makeliblinks OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Bblock.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/cc_harness OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Bytecode.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Stackobj.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Xref.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Lint.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Asmdata.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Assembler.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Disassembler.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/disassemble OLD_FILES+=usr/libdata/perl/5.00503/mach/B/assemble OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Terse.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B/C.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/EXTERN.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/INTERN.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/XSUB.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/XSlock.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/av.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/bytecode.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/byterun.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cc_runtime.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/config.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cop.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cv.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/dosish.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/embed.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/embedvar.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/fakethr.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/form.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/gv.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/handy.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/hv.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/intrpvar.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/iperlsys.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/keywords.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/mg.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/nostdio.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/objXSUB.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/objpp.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/op.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/opcode.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/patchlevel.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perl.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlio.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlsdio.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlsfio.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlvars.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perly.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/pp.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/pp_proto.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/proto.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regcomp.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regexp.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regnodes.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/scope.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/sv.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/thrdvar.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/thread.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/unixish.h OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/util.h OLD_FILES+=usr/libdata/perl/5.00503/mach/Data/Dumper.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/File.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Select.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Socket.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Handle.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Seekable.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Pipe.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/SysV.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/Msg.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/Semaphore.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/B.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/B.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/autosplit.ix OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/DB_File.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/DB_File.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/Dumper.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/Dumper.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/.exists OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_findfile.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_expandspec.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_find_symbol_anywhere.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/autosplit.ix OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/DynaLoader.a OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/extralibs.ld OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/Fcntl.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/Fcntl.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/IO.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/IO.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/SysV.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/SysV.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/NDBM_File.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/NDBM_File.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/Opcode.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/Opcode.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/assert.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/tolower.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/toupper.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/closedir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/opendir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/readdir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rewinddir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/errno.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/creat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fcntl.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgrgid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgrnam.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atan2.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/cos.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/exp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fabs.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/log.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/pow.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sin.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sqrt.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpwnam.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpwuid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/longjmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setjmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/kill.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/feof.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/siglongjmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sigsetjmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/raise.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/offsetof.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/clearerr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fclose.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fdopen.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgetc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgets.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fileno.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fopen.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fputc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fputs.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fread.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/freopen.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fscanf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fseek.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ferror.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fflush.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgetpos.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fsetpos.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ftell.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fwrite.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getchar.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/gets.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/perror.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/printf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/putc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/putchar.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/puts.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/remove.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rename.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rewind.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/scanf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sscanf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/tmpfile.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ungetc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vfprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vsprintf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/abs.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atexit.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atof.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atoi.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atol.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/bsearch.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/calloc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/div.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/exit.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/free.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getenv.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/labs.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ldiv.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/malloc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/qsort.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rand.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/realloc.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/srand.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/system.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memchr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memcmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memcpy.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memmove.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memset.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strchr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcpy.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcspn.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strerror.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strlen.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncmp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncpy.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strpbrk.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strrchr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strspn.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strstr.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strtok.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chmod.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fstat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/mkdir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/stat.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/umask.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/wait.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/waitpid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/gmtime.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/localtime.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/time.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/alarm.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chdir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chown.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execl.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execle.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execlp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execv.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execve.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execvp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fork.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getcwd.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getegid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/geteuid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgroups.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getlogin.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpgrp.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getppid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getuid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/isatty.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/link.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rmdir.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setbuf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setgid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setuid.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setvbuf.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sleep.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/unlink.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/utime.al OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/autosplit.ix OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/POSIX.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/POSIX.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/SDBM_File.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/SDBM_File.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/Socket.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/Socket.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/attrs.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/attrs.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/re.so OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/re.bs OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/sdbm/extralibs.ld OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Errno/.packlist OLD_FILES+=usr/libdata/perl/5.00503/mach/Config.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/B.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/O.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/perllocal.pod OLD_FILES+=usr/libdata/perl/5.00503/mach/DB_File.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Errno.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Fcntl.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/IO.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/NDBM_File.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Safe.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Opcode.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/ops.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/POSIX.pod OLD_FILES+=usr/libdata/perl/5.00503/mach/POSIX.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/SDBM_File.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/Socket.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/attrs.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/re.pm OLD_FILES+=usr/libdata/perl/5.00503/mach/_h2ph_pre.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/a.out.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_ccb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_extend.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_periph.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_queue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_sim.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt_periph.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt_sim.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/aio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/alias.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/assert.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/bitstring.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/calendar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/camlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/com_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/com_right.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ctype.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/curses.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/db.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/des.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/devstat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/dialog.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/dirent.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/disktab.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/dlfcn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/elf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/errno.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/eti.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fcntl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fetch.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/float.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/floatingpoint.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fnmatch.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/form.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fstab.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ftpio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/fts.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/glob.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/gmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/gnuregex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/grp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/histedit.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ieeefp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ifaddrs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/inttypes.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/iso646.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/kvm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libatm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libdisk.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libgen.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libusb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libutil.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/limits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/link.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/linker_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/locale.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/login_cap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/malloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/FlexLexer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/PlotFile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/SFile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/_G_config.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/algo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/algobase.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/alloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/builtinbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/bvector.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/complex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/defalloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/deque.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/editbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/floatio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/fstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/function.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hash_map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hash_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hashtable.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/heap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/indstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iolibio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iomanip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/list.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostdio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostreamP.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/istream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iterator.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/libio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/libioP.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/multimap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/multiset.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/new.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/ostream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pair.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/parsestream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pfstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/procbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pthread_alloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/rope.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/ropeimpl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/slist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stdiostream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_algo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/tree.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_algobase.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_alloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_bvector.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_config.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_construct.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_deque.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_function.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_fun.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hashtable.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_heap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_iterator.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_list.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_multimap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_multiset.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_numeric.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_pair.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_queue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_raw_storage_iter.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_relops.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_rope.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_slist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_stack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_tempbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_tree.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_uninitialized.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_vector.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/streambuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/strfile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/strstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/tempbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/type_traits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/vector.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/math.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/md2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/md4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/md5.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/memory.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/menu.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/mp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/mpool.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/mqueue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ncurses.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ndbm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netdb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nl_types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nlist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objformat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/opie.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/osreldate.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/panel.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/paths.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap-int.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap-namedb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/poll.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pthread.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pthread_np.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pwd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/radlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ranlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/regex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/regexp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/resolv.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ripemd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rune.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/runetype.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sched.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/search.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/semaphore.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/setjmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sgtty.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sha.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/signal.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/skey.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stab.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stand.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stdarg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stddef.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stdio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stdlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/strhash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/string.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stringlist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/strings.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/struct.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sysexits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/syslog.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/taclib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/tar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/tcpd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/term.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/termcap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/termios.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/time.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/timers.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ttyent.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ucontext.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/unctrl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/unistd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/utime.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/utmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/values.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/varargs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vgl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vis.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/zconf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/zlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/ftp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/inet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/nameser.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/nameser_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/telnet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/tftp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/assertions.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/ctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/dst.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/eventlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/heap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/irpmarshall.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/logging.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/memcluster.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/misc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/tree.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/list.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ansi.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apm_bios.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apm_segments.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asc_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asmacros.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asnames.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/atomic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bootinfo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_at386.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_memio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pc98.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pio_ind.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cdk.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/clock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/comstats.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/console.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cpu.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cpufunc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cputypes.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cronyx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/db_machdep.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/dvcfg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/elf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/endian.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/exec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/float.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/floatingpoint.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/frame.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/globaldata.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/globals.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/gsc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_cause.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_rbch_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_tel_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_trace.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ieeefp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/if_wavelan_ieee.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/if_wl_wavelan.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/iic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/in_cksum.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_bt848.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_ctx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_fd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_meteor.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ipl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/joystick.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/limits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/lock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/md_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mouse.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mpapic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mtpr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_dma.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/npx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/param.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcaudioio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcb_ext.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcvt_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/perfmon.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/physio_proc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pmap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/proc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/profile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/psl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ptrace.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/reg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/reloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/resource.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/segments.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/setjmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/sigframe.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/signal.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smptests.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/soundcard.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/speaker.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/specialreg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/spigot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/stdarg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/sysarch.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/trap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/tss.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/uc_device.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ucontext.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ultrasound.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/varargs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/vm86.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/vmparam.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/wtio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_isppp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pci_cfgreg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/bootsect.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/bpb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/denode.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/direntry.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/fat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/msdosfsmount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpf_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpfdesc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bridge.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ethernet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/hostcache.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_arp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_atm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_dl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_gif.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_ieee80211.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_llc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_media.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_mib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_ppp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_pppvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_slvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_sppp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_stf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tapvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tun.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/slip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tunvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_vlan_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/intrq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/iso88025.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/net_osdep.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/netisr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/pfkeyv2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ppp_comp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ppp_defs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/radix.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/raw_cb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/route.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/slcompress.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/zlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_faith.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_arc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_gre.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/krpc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsdiskless.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsm_subs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsmount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsnode.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsproto.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsrtt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsrvcache.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsv2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nqnfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/rpcv2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/xdr_subs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/aarp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at_extern.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/ddp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/ddp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/endian.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/phase2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_cm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_if.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sigmgr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_stack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sys.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_vc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/kern_include.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/port.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/queue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/netgraph.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_UI.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_async.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_bpf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_bridge.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_cisco.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_echo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ether.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_frame_relay.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_hole.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_iface.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ksocket.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_lmi.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_message.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_mppc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_one2many.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_parse.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ppp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_pppoe.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_pptpgre.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_rfc1490.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_sample.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_socket.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_socketvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_tee.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_tty.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_vjc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_eiface.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_etf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_device.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_l2tp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_fec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/icmp6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/icmp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_atm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_ether.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_fddi.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/igmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/igmp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_gif.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_hostcache.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_systm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_auth.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_dummynet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_ecn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_encap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fil.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_flow.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_frag.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_icmp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_mroute.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_nat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_proxy.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_state.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ipl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ipprotosw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_fsm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_seq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_timer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcpip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/udp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/udp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fw2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_gre.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ah.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ah6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/icmp6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_gif.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_ifattach.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_prefix.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_ecn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_fw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_mroute.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6protosw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipcomp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipcomp6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipsec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipsec6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/mld6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/nd6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/pim6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/pim6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/scope6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/tcp6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/udp6_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp_rijndael.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/raw_ip6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_if.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_ip.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_timer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/keydb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/keysock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netnatm/natm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_cfg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_conn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_file.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_lib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_ncp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_nls.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_rcfile.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_rq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_sock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_subr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_user.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/nwerror.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/idp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/idp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_error.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_if.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_pcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/sp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spidp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_timer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_compr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_ihash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_inode.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_subr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_vfsops.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfsmount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_mount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_node.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_subr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/NXConstStr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/Object.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/Protocol.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/encoding.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/hash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc-api.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc-list.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/runtime.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/sarray.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/thr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/typedstream.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1_mac.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/bio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/blowfish.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/bn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/buffer.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/cast.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/comp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/conf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/conf_api.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/crypto.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/des.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dh.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dsa.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dso.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/e_os.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/e_os2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ebcdic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/evp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hmac.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/lhash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md5.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/mdc2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/obj_mac.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/objects.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/opensslconf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/opensslv.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pem.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pem2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pkcs12.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pkcs7.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rand.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc5.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ripemd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rsa.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/safestack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/sha.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl2.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl23.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl3.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl_locl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/stack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/symhacks.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/tls1.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/tmdiff.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/txt_db.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509_vfy.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509v3.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/idea.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/aes.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1t.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/cryptlib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/des_old.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/engine.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/krb5_asn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/kssl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ocsp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ossl_typ.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/aes_locl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/eng_int.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_4758_cca_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_aep_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_atalla_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_cswift_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_ncipher_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_nuron_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_sureware_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_ubsec_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui_locl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/cardinfo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/cis.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/driver.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/i82365.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pccard_nbk.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/slot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/meciareg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pcic_pci.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pcicvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/aio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/mqueue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/posix4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/sched.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/semaphore.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/dumprestore.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/routed.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/rwhod.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/talkd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/timed.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/chardefs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/history.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/keymaps.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/readline.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/rlconf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/rlstdc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/tilde.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth_des.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth_unix.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/clnt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/des.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/des_crypt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/key_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_clnt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_rmt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc_com.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc_msg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/svc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/svc_auth.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/xdr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/bootparam_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/crypt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/key_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/klm_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/mount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nfs_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_cache.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_callback.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_db.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_tags.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nislib.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nlm_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rnusers.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rquota.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rstat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rwall.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/sm_inter.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/spray.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yp_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypclnt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yppasswd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypupdate_prot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypxfrd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_macros.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_appl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_malloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_misc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_mod_misc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_modules.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/mit-sipb-copyright.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/ss.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/ss_err.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/_posix.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ata.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/acct.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/acl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/agpio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/aio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/assym.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/blist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/buf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/bus.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/bus_private.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/callout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ccdvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdefs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdrio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/chio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/clist.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/endian.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/conf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cons.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/consio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/copyright.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ctype.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dir.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dataacq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/link_elf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/device_port.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/devicestat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dirent.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/disk.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/disklabel.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/diskslice.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dkstat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dmap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/domain.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dvdio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf32.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf64.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf_common.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf_generic.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/errno.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/event.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/eventhandler.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/eventvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/exec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/extattr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fbio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fcntl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/file.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/filedesc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/filio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/gmon.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact_aout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact_elf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/inflate.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/interrupt.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/inttypes.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioccom.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioctl_compat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ipc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/jail.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/joystick.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kbio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kernel.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kthread.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ktrace.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/libkern.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/linker.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/linker_set.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/lock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/lockf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/malloc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/md5.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/memrange.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mman.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/module.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mount.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/msg.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/msgbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mtio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/namei.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/param.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pciio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pipe.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/poll.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/proc.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/procfs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/protosw.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ptio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ptrace.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/queue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/random.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/reboot.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/resource.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/resourcevar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/rman.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/rtprio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sbuf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/select.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sem.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/shm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/signal.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/signalvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/snoop.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/socket.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/socketvar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sockio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/soundcard.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/stat.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syscall-hide.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syscall.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysent.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syslimits.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syslog.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysproto.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/systm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/taskqueue.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/termios.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/time.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timeb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timepps.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timers.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/times.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/tprintf.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/tty.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttychars.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttycom.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttydefaults.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttydev.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/types.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ucontext.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ucred.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/uio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/un.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/unistd.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/unpcb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/user.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/utsname.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vmmeter.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vnioctl.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vnode.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/wait.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/wormio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/xrpuio.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kobj.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/link_aout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/nlist_aout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mchain.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fnv_hash.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/iconv.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/md4.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/pmap.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/swap_pager.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_extern.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_kern.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_map.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_object.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_page.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_pageout.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_pager.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_param.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_zone.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vnode_pager.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/complex.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/stdbool.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/langinfo.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/netbios.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_conn.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_dev.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_rq.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_subr.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_tran.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_trantcp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/g2c.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/telnet.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/elf-hints.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/libusbhid.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/radlib_vs.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/readpassphrase.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/wchar.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/wctype.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cast.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/castsb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cryptodev.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cryptosoft.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/deflate.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/rijndael.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/rmd160.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/skipjack.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/xform.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ah.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ah_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/esp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/esp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipcomp.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipcomp_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipip_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipsec.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipsec6.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key_debug.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key_var.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/keydb.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/keysock.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/xform.ph OLD_FILES+=usr/libdata/perl/5.00503/mach/bzlib.ph OLD_FILES+=usr/libdata/perl/5.00503/pod/perl.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perl5004delta.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlapio.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlbook.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlbot.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlcall.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldata.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldebug.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldelta.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldiag.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perldsc.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlembed.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq1.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq2.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq3.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq4.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq5.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq6.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq7.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlipc.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq8.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq9.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlform.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfunc.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlguts.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlhist.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perllocale.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perllol.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmod.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmodinstall.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmodlib.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlobj.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlop.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlopentut.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlpod.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlport.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlre.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlref.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlreftut.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlrun.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsec.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlstyle.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsub.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsyn.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlthrtut.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perltie.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perltoc.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perltoot.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perltrap.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlvar.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlxs.pod OLD_FILES+=usr/libdata/perl/5.00503/pod/perlxstut.pod OLD_FILES+=usr/libdata/perl/5.00503/AnyDBM_File.pm OLD_FILES+=usr/libdata/perl/5.00503/AutoLoader.pm OLD_FILES+=usr/libdata/perl/5.00503/AutoSplit.pm OLD_FILES+=usr/libdata/perl/5.00503/Benchmark.pm OLD_FILES+=usr/libdata/perl/5.00503/CGI.pm OLD_FILES+=usr/libdata/perl/5.00503/CPAN.pm OLD_FILES+=usr/libdata/perl/5.00503/Carp.pm OLD_FILES+=usr/libdata/perl/5.00503/Cwd.pm OLD_FILES+=usr/libdata/perl/5.00503/DirHandle.pm OLD_FILES+=usr/libdata/perl/5.00503/Dumpvalue.pm OLD_FILES+=usr/libdata/perl/5.00503/English.pm OLD_FILES+=usr/libdata/perl/5.00503/Env.pm OLD_FILES+=usr/libdata/perl/5.00503/Exporter.pm OLD_FILES+=usr/libdata/perl/5.00503/Fatal.pm OLD_FILES+=usr/libdata/perl/5.00503/FileCache.pm OLD_FILES+=usr/libdata/perl/5.00503/FileHandle.pm OLD_FILES+=usr/libdata/perl/5.00503/FindBin.pm OLD_FILES+=usr/libdata/perl/5.00503/SelectSaver.pm OLD_FILES+=usr/libdata/perl/5.00503/SelfLoader.pm OLD_FILES+=usr/libdata/perl/5.00503/Shell.pm OLD_FILES+=usr/libdata/perl/5.00503/Symbol.pm OLD_FILES+=usr/libdata/perl/5.00503/Test.pm OLD_FILES+=usr/libdata/perl/5.00503/abbrev.pl OLD_FILES+=usr/libdata/perl/5.00503/UNIVERSAL.pm OLD_FILES+=usr/libdata/perl/5.00503/assert.pl OLD_FILES+=usr/libdata/perl/5.00503/autouse.pm OLD_FILES+=usr/libdata/perl/5.00503/base.pm OLD_FILES+=usr/libdata/perl/5.00503/bigfloat.pl OLD_FILES+=usr/libdata/perl/5.00503/bigint.pl OLD_FILES+=usr/libdata/perl/5.00503/bigrat.pl OLD_FILES+=usr/libdata/perl/5.00503/blib.pm OLD_FILES+=usr/libdata/perl/5.00503/cacheout.pl OLD_FILES+=usr/libdata/perl/5.00503/chat2.pl OLD_FILES+=usr/libdata/perl/5.00503/complete.pl OLD_FILES+=usr/libdata/perl/5.00503/constant.pm OLD_FILES+=usr/libdata/perl/5.00503/ctime.pl OLD_FILES+=usr/libdata/perl/5.00503/diagnostics.pm OLD_FILES+=usr/libdata/perl/5.00503/dotsh.pl OLD_FILES+=usr/libdata/perl/5.00503/dumpvar.pl OLD_FILES+=usr/libdata/perl/5.00503/exceptions.pl OLD_FILES+=usr/libdata/perl/5.00503/fastcwd.pl OLD_FILES+=usr/libdata/perl/5.00503/fields.pm OLD_FILES+=usr/libdata/perl/5.00503/find.pl OLD_FILES+=usr/libdata/perl/5.00503/finddepth.pl OLD_FILES+=usr/libdata/perl/5.00503/flush.pl OLD_FILES+=usr/libdata/perl/5.00503/ftp.pl OLD_FILES+=usr/libdata/perl/5.00503/getcwd.pl OLD_FILES+=usr/libdata/perl/5.00503/getopt.pl OLD_FILES+=usr/libdata/perl/5.00503/getopts.pl OLD_FILES+=usr/libdata/perl/5.00503/hostname.pl OLD_FILES+=usr/libdata/perl/5.00503/importenv.pl OLD_FILES+=usr/libdata/perl/5.00503/integer.pm OLD_FILES+=usr/libdata/perl/5.00503/less.pm OLD_FILES+=usr/libdata/perl/5.00503/lib.pm OLD_FILES+=usr/libdata/perl/5.00503/locale.pm OLD_FILES+=usr/libdata/perl/5.00503/look.pl OLD_FILES+=usr/libdata/perl/5.00503/newgetopt.pl OLD_FILES+=usr/libdata/perl/5.00503/open2.pl OLD_FILES+=usr/libdata/perl/5.00503/open3.pl OLD_FILES+=usr/libdata/perl/5.00503/overload.pm OLD_FILES+=usr/libdata/perl/5.00503/perl5db.pl OLD_FILES+=usr/libdata/perl/5.00503/pwd.pl OLD_FILES+=usr/libdata/perl/5.00503/shellwords.pl OLD_FILES+=usr/libdata/perl/5.00503/sigtrap.pm OLD_FILES+=usr/libdata/perl/5.00503/stat.pl OLD_FILES+=usr/libdata/perl/5.00503/strict.pm OLD_FILES+=usr/libdata/perl/5.00503/subs.pm OLD_FILES+=usr/libdata/perl/5.00503/syslog.pl OLD_FILES+=usr/libdata/perl/5.00503/tainted.pl OLD_FILES+=usr/libdata/perl/5.00503/termcap.pl OLD_FILES+=usr/libdata/perl/5.00503/timelocal.pl OLD_FILES+=usr/libdata/perl/5.00503/validate.pl OLD_FILES+=usr/libdata/perl/5.00503/vars.pm OLD_FILES+=usr/libdata/perl/5.00503/re.pm OLD_FILES+=usr/libdata/perl/5.00503/Config.pm OLD_FILES+=usr/libdata/perl/5.00503/.exists OLD_FILES+=usr/libdata/perl/5.00503/DynaLoader.pm OLD_FILES+=usr/share/perl/man/man3/AnyDBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/AutoLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/AutoSplit.3.gz OLD_FILES+=usr/share/perl/man/man3/B.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Asmdata.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Assembler.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Bblock.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Bytecode.3.gz OLD_FILES+=usr/share/perl/man/man3/B::C.3.gz OLD_FILES+=usr/share/perl/man/man3/B::CC.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Debug.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Deparse.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Disassembler.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Lint.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Showlex.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Stackobj.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Terse.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Xref.3.gz OLD_FILES+=usr/share/perl/man/man3/Benchmark.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Apache.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Carp.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Cookie.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Fast.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Push.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Switch.3.gz OLD_FILES+=usr/share/perl/man/man3/CPAN.3.gz OLD_FILES+=usr/share/perl/man/man3/CPAN::FirstTime.3.gz OLD_FILES+=usr/share/perl/man/man3/CPAN::Nox.3.gz OLD_FILES+=usr/share/perl/man/man3/Carp.3.gz OLD_FILES+=usr/share/perl/man/man3/Class::Struct.3.gz OLD_FILES+=usr/share/perl/man/man3/Config.3.gz OLD_FILES+=usr/share/perl/man/man3/Cwd.3.gz OLD_FILES+=usr/share/perl/man/man3/DB_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Data::Dumper.3.gz OLD_FILES+=usr/share/perl/man/man3/Devel::SelfStubber.3.gz OLD_FILES+=usr/share/perl/man/man3/DirHandle.3.gz OLD_FILES+=usr/share/perl/man/man3/Dumpvalue.3.gz OLD_FILES+=usr/share/perl/man/man3/DynaLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/English.3.gz OLD_FILES+=usr/share/perl/man/man3/Env.3.gz OLD_FILES+=usr/share/perl/man/man3/Exporter.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Command.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Embed.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Install.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Installed.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Liblist.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_OS2.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Unix.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_VMS.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Win32.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MakeMaker.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Manifest.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Mkbootstrap.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Mksymlists.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Packlist.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::testlib.3.gz OLD_FILES+=usr/share/perl/man/man3/Fatal.3.gz OLD_FILES+=usr/share/perl/man/man3/Fcntl.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Basename.3.gz OLD_FILES+=usr/share/perl/man/man3/File::CheckTree.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Compare.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Copy.3.gz OLD_FILES+=usr/share/perl/man/man3/File::DosGlob.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Find.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Path.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::Mac.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::OS2.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::Unix.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::VMS.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Spec::Win32.3.gz OLD_FILES+=usr/share/perl/man/man3/File::stat.3.gz OLD_FILES+=usr/share/perl/man/man3/FileCache.3.gz OLD_FILES+=usr/share/perl/man/man3/IO.3.gz OLD_FILES+=usr/share/perl/man/man3/FileHandle.3.gz OLD_FILES+=usr/share/perl/man/man3/FindBin.3.gz OLD_FILES+=usr/share/perl/man/man3/GDBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Getopt::Long.3.gz OLD_FILES+=usr/share/perl/man/man3/Getopt::Std.3.gz OLD_FILES+=usr/share/perl/man/man3/I18N::Collate.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::File.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Handle.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Pipe.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Seekable.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Select.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Socket.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::Msg.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::Open2.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::Open3.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::Semaphore.3.gz OLD_FILES+=usr/share/perl/man/man3/IPC::SysV.3.gz OLD_FILES+=usr/share/perl/man/man3/Math::BigFloat.3.gz OLD_FILES+=usr/share/perl/man/man3/Math::BigInt.3.gz OLD_FILES+=usr/share/perl/man/man3/Math::Complex.3.gz OLD_FILES+=usr/share/perl/man/man3/Math::Trig.3.gz OLD_FILES+=usr/share/perl/man/man3/NDBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::Ping.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::hostent.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::netent.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::protoent.3.gz OLD_FILES+=usr/share/perl/man/man3/Net::servent.3.gz OLD_FILES+=usr/share/perl/man/man3/O.3.gz OLD_FILES+=usr/share/perl/man/man3/ODBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Opcode.3.gz OLD_FILES+=usr/share/perl/man/man3/POSIX.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Html.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Text.3.gz OLD_FILES+=usr/share/perl/man/man3/SDBM_File.3.gz OLD_FILES+=usr/share/perl/man/man3/Safe.3.gz OLD_FILES+=usr/share/perl/man/man3/Search::Dict.3.gz OLD_FILES+=usr/share/perl/man/man3/SelectSaver.3.gz OLD_FILES+=usr/share/perl/man/man3/SelfLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/Shell.3.gz OLD_FILES+=usr/share/perl/man/man3/Socket.3.gz OLD_FILES+=usr/share/perl/man/man3/Symbol.3.gz OLD_FILES+=usr/share/perl/man/man3/re.3.gz OLD_FILES+=usr/share/perl/man/man3/Sys::Hostname.3.gz OLD_FILES+=usr/share/perl/man/man3/Sys::Syslog.3.gz OLD_FILES+=usr/share/perl/man/man3/Term::Cap.3.gz OLD_FILES+=usr/share/perl/man/man3/Term::Complete.3.gz OLD_FILES+=usr/share/perl/man/man3/Term::ReadLine.3.gz OLD_FILES+=usr/share/perl/man/man3/Test.3.gz OLD_FILES+=usr/share/perl/man/man3/Test::Harness.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::Abbrev.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::ParseWords.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::Soundex.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::Tabs.3.gz OLD_FILES+=usr/share/perl/man/man3/Text::Wrap.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread::Queue.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread::Semaphore.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread::Signal.3.gz OLD_FILES+=usr/share/perl/man/man3/Thread::Specific.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::Array.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::Handle.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::Hash.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::RefHash.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::Scalar.3.gz OLD_FILES+=usr/share/perl/man/man3/Tie::SubstrHash.3.gz OLD_FILES+=usr/share/perl/man/man3/Time::Local.3.gz OLD_FILES+=usr/share/perl/man/man3/Time::gmtime.3.gz OLD_FILES+=usr/share/perl/man/man3/Time::localtime.3.gz OLD_FILES+=usr/share/perl/man/man3/Time::tm.3.gz OLD_FILES+=usr/share/perl/man/man3/UNIVERSAL.3.gz OLD_FILES+=usr/share/perl/man/man3/User::grent.3.gz OLD_FILES+=usr/share/perl/man/man3/User::pwent.3.gz OLD_FILES+=usr/share/perl/man/man3/attrs.3.gz OLD_FILES+=usr/share/perl/man/man3/autouse.3.gz OLD_FILES+=usr/share/perl/man/man3/base.3.gz OLD_FILES+=usr/share/perl/man/man3/blib.3.gz OLD_FILES+=usr/share/perl/man/man3/constant.3.gz OLD_FILES+=usr/share/perl/man/man3/diagnostics.3.gz OLD_FILES+=usr/share/perl/man/man3/fields.3.gz OLD_FILES+=usr/share/perl/man/man3/integer.3.gz OLD_FILES+=usr/share/perl/man/man3/less.3.gz OLD_FILES+=usr/share/perl/man/man3/lib.3.gz OLD_FILES+=usr/share/perl/man/man3/locale.3.gz OLD_FILES+=usr/share/perl/man/man3/ops.3.gz OLD_FILES+=usr/share/perl/man/man3/overload.3.gz OLD_FILES+=usr/share/perl/man/man3/sigtrap.3.gz OLD_FILES+=usr/share/perl/man/man3/strict.3.gz OLD_FILES+=usr/share/perl/man/man3/subs.3.gz OLD_FILES+=usr/share/perl/man/man3/vars.3.gz OLD_FILES+=usr/share/perl/man/man3/B::Stash.3.gz OLD_FILES+=usr/share/perl/man/man3/ByteLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/CGI::Pretty.3.gz OLD_FILES+=usr/share/perl/man/man3/Carp::Heavy.3.gz OLD_FILES+=usr/share/perl/man/man3/DB.3.gz OLD_FILES+=usr/share/perl/man/man3/DProf::DProf.3.gz OLD_FILES+=usr/share/perl/man/man3/Exporter::Heavy.3.gz OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Cygwin.3.gz OLD_FILES+=usr/share/perl/man/man3/File::Glob.3.gz OLD_FILES+=usr/share/perl/man/man3/Glob::Glob.3.gz OLD_FILES+=usr/share/perl/man/man3/Hostname::Hostname.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Dir.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Poll.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Socket::INET.3.gz OLD_FILES+=usr/share/perl/man/man3/IO::Socket::UNIX.3.gz OLD_FILES+=usr/share/perl/man/man3/Peek::Peek.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Checker.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Find.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::InputObjects.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Man.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::ParseUtils.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Parser.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Plainer.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Select.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Text::Color.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Text::Termcap.3.gz OLD_FILES+=usr/share/perl/man/man3/Pod::Usage.3.gz OLD_FILES+=usr/share/perl/man/man3/Syslog::Syslog.3.gz OLD_FILES+=usr/share/perl/man/man3/Term::ANSIColor.3.gz OLD_FILES+=usr/share/perl/man/man3/XSLoader.3.gz OLD_FILES+=usr/share/perl/man/man3/attributes.3.gz OLD_FILES+=usr/share/perl/man/man3/bytes.3.gz OLD_FILES+=usr/share/perl/man/man3/charnames.3.gz OLD_FILES+=usr/share/perl/man/man3/filetest.3.gz OLD_FILES+=usr/share/perl/man/man3/open.3.gz OLD_FILES+=usr/share/perl/man/man3/utf8.3.gz OLD_FILES+=usr/share/perl/man/man3/warnings.3.gz OLD_FILES+=usr/share/perl/man/man3/warnings::register.3.gz OLD_FILES+=usr/share/perl/man/whatis OLD_FILES+=usr/share/man/man1/CA.pl.1.gz OLD_FILES+=usr/share/man/man1/asn1parse.1.gz OLD_FILES+=usr/share/man/man1/ca.1.gz OLD_FILES+=usr/share/man/man1/ciphers.1.gz OLD_FILES+=usr/share/man/man1/config.1.gz OLD_FILES+=usr/share/man/man1/crl.1.gz OLD_FILES+=usr/share/man/man1/crl2pkcs7.1.gz OLD_FILES+=usr/share/man/man1/dgst.1.gz OLD_FILES+=usr/share/man/man1/dhparam.1.gz OLD_FILES+=usr/share/man/man1/doscmd.1.gz OLD_FILES+=usr/share/man/man1/dsa.1.gz OLD_FILES+=usr/share/man/man1/dsaparam.1.gz OLD_FILES+=usr/share/man/man1/enc.1.gz OLD_FILES+=usr/share/man/man1/gendsa.1.gz OLD_FILES+=usr/share/man/man1/genrsa.1.gz OLD_FILES+=usr/share/man/man1/getNAME.1.gz OLD_FILES+=usr/share/man/man1/nseq.1.gz OLD_FILES+=usr/share/man/man1/ocsp.1.gz OLD_FILES+=usr/share/man/man1/openssl.1.gz OLD_FILES+=usr/share/man/man1/perl.1.gz OLD_FILES+=usr/share/man/man1/perl5004delta.1.gz OLD_FILES+=usr/share/man/man1/perlapio.1.gz OLD_FILES+=usr/share/man/man1/perlbook.1.gz OLD_FILES+=usr/share/man/man1/perlbot.1.gz OLD_FILES+=usr/share/man/man1/perlcall.1.gz OLD_FILES+=usr/share/man/man1/perldata.1.gz OLD_FILES+=usr/share/man/man1/perldebug.1.gz OLD_FILES+=usr/share/man/man1/perldelta.1.gz OLD_FILES+=usr/share/man/man1/perldiag.1.gz OLD_FILES+=usr/share/man/man1/perldsc.1.gz OLD_FILES+=usr/share/man/man1/perlembed.1.gz OLD_FILES+=usr/share/man/man1/perlfaq.1.gz OLD_FILES+=usr/share/man/man1/perlfaq1.1.gz OLD_FILES+=usr/share/man/man1/perlfaq2.1.gz OLD_FILES+=usr/share/man/man1/perlfaq3.1.gz OLD_FILES+=usr/share/man/man1/perlfaq4.1.gz OLD_FILES+=usr/share/man/man1/perlfaq5.1.gz OLD_FILES+=usr/share/man/man1/perlfaq6.1.gz OLD_FILES+=usr/share/man/man1/perlfaq7.1.gz OLD_FILES+=usr/share/man/man1/perlfaq8.1.gz OLD_FILES+=usr/share/man/man1/perlfaq9.1.gz OLD_FILES+=usr/share/man/man1/perlform.1.gz OLD_FILES+=usr/share/man/man1/perlfunc.1.gz OLD_FILES+=usr/share/man/man1/perlguts.1.gz OLD_FILES+=usr/share/man/man1/perlhist.1.gz OLD_FILES+=usr/share/man/man1/perlipc.1.gz OLD_FILES+=usr/share/man/man1/perllocale.1.gz OLD_FILES+=usr/share/man/man1/perllol.1.gz OLD_FILES+=usr/share/man/man1/perlmod.1.gz OLD_FILES+=usr/share/man/man1/perlmodinstall.1.gz OLD_FILES+=usr/share/man/man1/perlmodlib.1.gz OLD_FILES+=usr/share/man/man1/perlobj.1.gz OLD_FILES+=usr/share/man/man1/perlop.1.gz OLD_FILES+=usr/share/man/man1/perlopentut.1.gz OLD_FILES+=usr/share/man/man1/perlpod.1.gz OLD_FILES+=usr/share/man/man1/perlport.1.gz OLD_FILES+=usr/share/man/man1/perlre.1.gz OLD_FILES+=usr/share/man/man1/perlref.1.gz OLD_FILES+=usr/share/man/man1/perlreftut.1.gz OLD_FILES+=usr/share/man/man1/perlrun.1.gz OLD_FILES+=usr/share/man/man1/perlsec.1.gz OLD_FILES+=usr/share/man/man1/perlstyle.1.gz OLD_FILES+=usr/share/man/man1/perlsub.1.gz OLD_FILES+=usr/share/man/man1/perlsyn.1.gz OLD_FILES+=usr/share/man/man1/perlthrtut.1.gz OLD_FILES+=usr/share/man/man1/perltie.1.gz OLD_FILES+=usr/share/man/man1/perltoc.1.gz OLD_FILES+=usr/share/man/man1/perltoot.1.gz OLD_FILES+=usr/share/man/man1/perltrap.1.gz OLD_FILES+=usr/share/man/man1/perlvar.1.gz OLD_FILES+=usr/share/man/man1/perlxs.1.gz OLD_FILES+=usr/share/man/man1/perlxstut.1.gz OLD_FILES+=usr/share/man/man1/perlbug.1.gz OLD_FILES+=usr/share/man/man1/perlcc.1.gz OLD_FILES+=usr/share/man/man1/perldoc.1.gz OLD_FILES+=usr/share/man/man1/perl5005delta.1.gz OLD_FILES+=usr/share/man/man1/perlfork.1.gz OLD_FILES+=usr/share/man/man1/perlboot.1.gz OLD_FILES+=usr/share/man/man1/perltootc.1.gz OLD_FILES+=usr/share/man/man1/perldbmfilter.1.gz OLD_FILES+=usr/share/man/man1/perldebguts.1.gz OLD_FILES+=usr/share/man/man1/perlnumber.1.gz OLD_FILES+=usr/share/man/man1/perlcompile.1.gz OLD_FILES+=usr/share/man/man1/perltodo.1.gz OLD_FILES+=usr/share/man/man1/perlapi.1.gz OLD_FILES+=usr/share/man/man1/perlintern.1.gz OLD_FILES+=usr/share/man/man1/perlhack.1.gz OLD_FILES+=usr/share/man/man1/perlbc.1.gz OLD_FILES+=usr/share/man/man1/pkcs12.1.gz OLD_FILES+=usr/share/man/man1/pkcs7.1.gz OLD_FILES+=usr/share/man/man1/pkcs8.1.gz OLD_FILES+=usr/share/man/man1/rand.1.gz OLD_FILES+=usr/share/man/man1/req.1.gz OLD_FILES+=usr/share/man/man1/rsa.1.gz OLD_FILES+=usr/share/man/man1/rsautl.1.gz OLD_FILES+=usr/share/man/man1/s_client.1.gz OLD_FILES+=usr/share/man/man1/s_server.1.gz OLD_FILES+=usr/share/man/man1/sess_id.1.gz OLD_FILES+=usr/share/man/man1/smime.1.gz OLD_FILES+=usr/share/man/man1/speed.1.gz OLD_FILES+=usr/share/man/man1/spkac.1.gz OLD_FILES+=usr/share/man/man1/verify.1.gz OLD_FILES+=usr/share/man/man1/version.1.gz OLD_FILES+=usr/share/man/man1/x509.1.gz OLD_FILES+=usr/share/man/man3/SSL_COMP_add_compression_method.3.gz OLD_FILES+=usr/share/man/man3/SSL_CTX_get_ex_new_index.3.gz OLD_FILES+=usr/share/man/man3/archive_entry_dup.3.gz OLD_FILES+=usr/share/man/man3/archive_entry_set_tartype.3.gz OLD_FILES+=usr/share/man/man3/archive_entry_tartype.3.gz OLD_FILES+=usr/share/man/man3/archive_read_data_into_file.3.gz OLD_FILES+=usr/share/man/man3/archive_read_open_tar.3.gz OLD_FILES+=usr/share/man/man3/archive_read_support_format_gnutar.3.gz OLD_FILES+=usr/share/man/man3/cipher.3.gz OLD_FILES+=usr/share/man/man3/des_cipher.3.gz OLD_FILES+=usr/share/man/man3/des_setkey.3.gz OLD_FILES+=usr/share/man/man3/encrypt.3.gz OLD_FILES+=usr/share/man/man3/endvfsent.3.gz OLD_FILES+=usr/share/man/man3/getvfsbytype.3.gz OLD_FILES+=usr/share/man/man3/getvfsent.3.gz OLD_FILES+=usr/share/man/man3/isnanf.3.gz OLD_FILES+=usr/share/man/man3/libautofs.3.gz OLD_FILES+=usr/share/man/man3/pthread_attr_setsstack.3.gz OLD_FILES+=usr/share/man/man3/pthread_getcancelstate.3.gz OLD_FILES+=usr/share/man/man3/pthread_mutexattr_getpshared.3.gz OLD_FILES+=usr/share/man/man3/pthread_mutexattr_setpshared.3.gz OLD_FILES+=usr/share/man/man3/set_assertion_failure_callback.3.gz OLD_FILES+=usr/share/man/man3/setkey.3.gz OLD_FILES+=usr/share/man/man3/setvfsent.3.gz OLD_FILES+=usr/share/man/man3/ssl.3.gz OLD_FILES+=usr/share/man/man3/vfsisloadable.3.gz OLD_FILES+=usr/share/man/man3/vfsload.3.gz OLD_FILES+=usr/share/man/man4/als4000.4.gz OLD_FILES+=usr/share/man/man4/csa.4.gz OLD_FILES+=usr/share/man/man4/emu10k1.4.gz OLD_FILES+=usr/share/man/man4/euc.4.gz OLD_FILES+=usr/share/man/man4/gusc.4.gz OLD_FILES+=usr/share/man/man4/if_fwp.4.gz OLD_FILES+=usr/share/man/man4/lomac.4.gz OLD_FILES+=usr/share/man/man4/maestro3.4.gz OLD_FILES+=usr/share/man/man4/raid.4.gz OLD_FILES+=usr/share/man/man4/sbc.4.gz OLD_FILES+=usr/share/man/man4/sd.4.gz OLD_FILES+=usr/share/man/man4/snc.4.gz OLD_FILES+=usr/share/man/man4/st.4.gz OLD_FILES+=usr/share/man/man4/uaudio.4.gz OLD_FILES+=usr/share/man/man4/utf2.4.gz OLD_FILES+=usr/share/man/man4/vinumdebug.4.gz OLD_FILES+=usr/share/man/man5/disklabel.5.gz OLD_FILES+=usr/share/man/man5/dm.conf.5.gz OLD_FILES+=usr/share/man/man5/ranlib.5.gz OLD_FILES+=usr/share/man/man5/utf2.5.gz OLD_FILES+=usr/share/man/man7/groff_mwww.7.gz OLD_FILES+=usr/share/man/man7/mmroff.7.gz OLD_FILES+=usr/share/man/man7/mwww.7.gz OLD_FILES+=usr/share/man/man7/style.perl.7.gz OLD_FILES+=usr/share/man/man8/apm.8.gz OLD_FILES+=usr/share/man/man8/apmconf.8.gz OLD_FILES+=usr/share/man/man8/apmd.8.gz OLD_FILES+=usr/share/man/man8/dm.8.gz OLD_FILES+=usr/share/man/man8/pam_ftp.8.gz OLD_FILES+=usr/share/man/man8/pam_wheel.8.gz OLD_FILES+=usr/share/man/man8/sconfig.8.gz OLD_FILES+=usr/share/man/man8/ssl.8.gz OLD_FILES+=usr/share/man/man8/wlconfig.8.gz OLD_FILES+=usr/share/man/man9/CURSIG.9.gz OLD_FILES+=usr/share/man/man9/VFS_INIT.9.gz OLD_FILES+=usr/share/man/man9/at_exit.9.gz OLD_FILES+=usr/share/man/man9/at_fork.9.gz OLD_FILES+=usr/share/man/man9/cdevsw_add.9.gz OLD_FILES+=usr/share/man/man9/cdevsw_remove.9.gz OLD_FILES+=usr/share/man/man9/cv_waitq_empty.9.gz OLD_FILES+=usr/share/man/man9/cv_waitq_remove.9.gz OLD_FILES+=usr/share/man/man9/endtsleep.9.gz OLD_FILES+=usr/share/man/man9/jumbo.9.gz OLD_FILES+=usr/share/man/man9/jumbo_freem.9.gz OLD_FILES+=usr/share/man/man9/jumbo_pg_alloc.9.gz OLD_FILES+=usr/share/man/man9/jumbo_pg_free.9.gz OLD_FILES+=usr/share/man/man9/jumbo_pg_steal.9.gz OLD_FILES+=usr/share/man/man9/jumbo_phys_to_kva.9.gz OLD_FILES+=usr/share/man/man9/jumbo_vm_init.9.gz OLD_FILES+=usr/share/man/man9/mac_biba.9.gz OLD_FILES+=usr/share/man/man9/mac_bsdextended.9.gz OLD_FILES+=usr/share/man/man9/mono_time.9.gz OLD_FILES+=usr/share/man/man9/p1003_1b.9.gz OLD_FILES+=usr/share/man/man9/pmap_prefault.9.gz OLD_FILES+=usr/share/man/man9/posix4.9.gz OLD_FILES+=usr/share/man/man9/resource_query_name.9.gz OLD_FILES+=usr/share/man/man9/resource_query_string.9.gz OLD_FILES+=usr/share/man/man9/resource_query_unit.9.gz OLD_FILES+=usr/share/man/man9/rm_at_exit.9.gz OLD_FILES+=usr/share/man/man9/rm_at_fork.9.gz OLD_FILES+=usr/share/man/man9/runtime.9.gz OLD_FILES+=usr/share/man/man9/sleepinit.9.gz OLD_FILES+=usr/share/man/man9/unsleep.9.gz OLD_FILES+=usr/share/man/ja/man1/perl.1.gz OLD_FILES+=usr/share/games/atc/Game_List OLD_FILES+=usr/share/games/atc/Killer OLD_FILES+=usr/share/games/atc/crossover OLD_FILES+=usr/share/games/atc/default OLD_FILES+=usr/share/games/atc/easy OLD_FILES+=usr/share/games/atc/game_2 OLD_FILES+=usr/share/games/larn/larnmaze OLD_FILES+=usr/share/games/larn/larnopts OLD_FILES+=usr/share/games/larn/larn.help OLD_FILES+=usr/share/games/quiz.db/africa OLD_FILES+=usr/share/games/quiz.db/america OLD_FILES+=usr/share/games/quiz.db/areas OLD_FILES+=usr/share/games/quiz.db/arith OLD_FILES+=usr/share/games/quiz.db/asia OLD_FILES+=usr/share/games/quiz.db/babies OLD_FILES+=usr/share/games/quiz.db/bard OLD_FILES+=usr/share/games/quiz.db/chinese OLD_FILES+=usr/share/games/quiz.db/collectives OLD_FILES+=usr/share/games/quiz.db/ed OLD_FILES+=usr/share/games/quiz.db/elements OLD_FILES+=usr/share/games/quiz.db/europe OLD_FILES+=usr/share/games/quiz.db/flowers OLD_FILES+=usr/share/games/quiz.db/greek OLD_FILES+=usr/share/games/quiz.db/inca OLD_FILES+=usr/share/games/quiz.db/index OLD_FILES+=usr/share/games/quiz.db/latin OLD_FILES+=usr/share/games/quiz.db/locomotive OLD_FILES+=usr/share/games/quiz.db/midearth OLD_FILES+=usr/share/games/quiz.db/morse OLD_FILES+=usr/share/games/quiz.db/murders OLD_FILES+=usr/share/games/quiz.db/poetry OLD_FILES+=usr/share/games/quiz.db/posneg OLD_FILES+=usr/share/games/quiz.db/pres OLD_FILES+=usr/share/games/quiz.db/province OLD_FILES+=usr/share/games/quiz.db/seq-easy OLD_FILES+=usr/share/games/quiz.db/seq-hard OLD_FILES+=usr/share/games/quiz.db/sexes OLD_FILES+=usr/share/games/quiz.db/sov OLD_FILES+=usr/share/games/quiz.db/spell OLD_FILES+=usr/share/games/quiz.db/state OLD_FILES+=usr/share/games/quiz.db/trek OLD_FILES+=usr/share/games/quiz.db/ucc OLD_FILES+=usr/share/games/cribbage.instr OLD_FILES+=usr/share/games/fish.instr OLD_FILES+=usr/share/games/wump.info OLD_FILES+=usr/games/hide/adventure OLD_FILES+=usr/games/hide/arithmetic OLD_FILES+=usr/games/hide/atc OLD_FILES+=usr/games/hide/backgammon OLD_FILES+=usr/games/hide/teachgammon OLD_FILES+=usr/games/hide/battlestar OLD_FILES+=usr/games/hide/bs OLD_FILES+=usr/games/hide/canfield OLD_FILES+=usr/games/hide/cribbage OLD_FILES+=usr/games/hide/fish OLD_FILES+=usr/games/hide/hack OLD_FILES+=usr/games/hide/hangman OLD_FILES+=usr/games/hide/larn OLD_FILES+=usr/games/hide/mille OLD_FILES+=usr/games/hide/phantasia OLD_FILES+=usr/games/hide/quiz OLD_FILES+=usr/games/hide/robots OLD_FILES+=usr/games/hide/rogue OLD_FILES+=usr/games/hide/sail OLD_FILES+=usr/games/hide/snake OLD_FILES+=usr/games/hide/trek OLD_FILES+=usr/games/hide/worm OLD_FILES+=usr/games/hide/wump OLD_FILES+=usr/games/adventure OLD_FILES+=usr/games/arithmetic OLD_FILES+=usr/games/atc OLD_FILES+=usr/games/backgammon OLD_FILES+=usr/games/teachgammon OLD_FILES+=usr/games/battlestar OLD_FILES+=usr/games/bs OLD_FILES+=usr/games/canfield OLD_FILES+=usr/games/cfscores OLD_FILES+=usr/games/cribbage OLD_FILES+=usr/games/dm OLD_FILES+=usr/games/fish OLD_FILES+=usr/games/hack OLD_FILES+=usr/games/hangman OLD_FILES+=usr/games/larn OLD_FILES+=usr/games/mille OLD_FILES+=usr/games/phantasia OLD_FILES+=usr/games/piano OLD_FILES+=usr/games/pig OLD_FILES+=usr/games/quiz OLD_FILES+=usr/games/rain OLD_FILES+=usr/games/robots OLD_FILES+=usr/games/rogue OLD_FILES+=usr/games/sail OLD_FILES+=usr/games/snake OLD_FILES+=usr/games/snscore OLD_FILES+=usr/games/trek OLD_FILES+=usr/games/wargames OLD_FILES+=usr/games/worm OLD_FILES+=usr/games/worms OLD_FILES+=usr/games/wump OLD_FILES+=sbin/mount_reiserfs OLD_FILES+=usr/include/cam/cam_extend.h OLD_FILES+=usr/include/dev/wi/wi_hostap.h OLD_FILES+=usr/include/disktab.h OLD_FILES+=usr/include/g++/FlexLexer.h OLD_FILES+=usr/include/g++/PlotFile.h OLD_FILES+=usr/include/g++/SFile.h OLD_FILES+=usr/include/g++/_G_config.h OLD_FILES+=usr/include/g++/algo.h OLD_FILES+=usr/include/g++/algobase.h OLD_FILES+=usr/include/g++/algorithm OLD_FILES+=usr/include/g++/alloc.h OLD_FILES+=usr/include/g++/bitset OLD_FILES+=usr/include/g++/builtinbuf.h OLD_FILES+=usr/include/g++/bvector.h OLD_FILES+=usr/include/g++/cassert OLD_FILES+=usr/include/g++/cctype OLD_FILES+=usr/include/g++/cerrno OLD_FILES+=usr/include/g++/cfloat OLD_FILES+=usr/include/g++/ciso646 OLD_FILES+=usr/include/g++/climits OLD_FILES+=usr/include/g++/clocale OLD_FILES+=usr/include/g++/cmath OLD_FILES+=usr/include/g++/complex OLD_FILES+=usr/include/g++/complex.h OLD_FILES+=usr/include/g++/csetjmp OLD_FILES+=usr/include/g++/csignal OLD_FILES+=usr/include/g++/cstdarg OLD_FILES+=usr/include/g++/cstddef OLD_FILES+=usr/include/g++/cstdio OLD_FILES+=usr/include/g++/cstdlib OLD_FILES+=usr/include/g++/cstring OLD_FILES+=usr/include/g++/ctime OLD_FILES+=usr/include/g++/cwchar OLD_FILES+=usr/include/g++/cwctype OLD_FILES+=usr/include/g++/defalloc.h OLD_FILES+=usr/include/g++/deque OLD_FILES+=usr/include/g++/deque.h OLD_FILES+=usr/include/g++/editbuf.h OLD_FILES+=usr/include/g++/exception OLD_FILES+=usr/include/g++/floatio.h OLD_FILES+=usr/include/g++/fstream OLD_FILES+=usr/include/g++/fstream.h OLD_FILES+=usr/include/g++/function.h OLD_FILES+=usr/include/g++/functional OLD_FILES+=usr/include/g++/hash_map OLD_FILES+=usr/include/g++/hash_map.h OLD_FILES+=usr/include/g++/hash_set OLD_FILES+=usr/include/g++/hash_set.h OLD_FILES+=usr/include/g++/hashtable.h OLD_FILES+=usr/include/g++/heap.h OLD_FILES+=usr/include/g++/indstream.h OLD_FILES+=usr/include/g++/iolibio.h OLD_FILES+=usr/include/g++/iomanip OLD_FILES+=usr/include/g++/iomanip.h OLD_FILES+=usr/include/g++/iosfwd OLD_FILES+=usr/include/g++/iostdio.h OLD_FILES+=usr/include/g++/iostream OLD_FILES+=usr/include/g++/iostream.h OLD_FILES+=usr/include/g++/iostreamP.h OLD_FILES+=usr/include/g++/istream.h OLD_FILES+=usr/include/g++/iterator OLD_FILES+=usr/include/g++/iterator.h OLD_FILES+=usr/include/g++/libio.h OLD_FILES+=usr/include/g++/libioP.h OLD_FILES+=usr/include/g++/list OLD_FILES+=usr/include/g++/list.h OLD_FILES+=usr/include/g++/map OLD_FILES+=usr/include/g++/map.h OLD_FILES+=usr/include/g++/memory OLD_FILES+=usr/include/g++/multimap.h OLD_FILES+=usr/include/g++/multiset.h OLD_FILES+=usr/include/g++/new OLD_FILES+=usr/include/g++/new.h OLD_FILES+=usr/include/g++/numeric OLD_FILES+=usr/include/g++/ostream.h OLD_FILES+=usr/include/g++/pair.h OLD_FILES+=usr/include/g++/parsestream.h OLD_FILES+=usr/include/g++/pfstream.h OLD_FILES+=usr/include/g++/procbuf.h OLD_FILES+=usr/include/g++/pthread_alloc OLD_FILES+=usr/include/g++/pthread_alloc.h OLD_FILES+=usr/include/g++/queue OLD_FILES+=usr/include/g++/rope OLD_FILES+=usr/include/g++/rope.h OLD_FILES+=usr/include/g++/ropeimpl.h OLD_FILES+=usr/include/g++/set OLD_FILES+=usr/include/g++/set.h OLD_FILES+=usr/include/g++/slist OLD_FILES+=usr/include/g++/slist.h OLD_FILES+=usr/include/g++/sstream OLD_FILES+=usr/include/g++/stack OLD_FILES+=usr/include/g++/stack.h OLD_FILES+=usr/include/g++/std/bastring.cc OLD_FILES+=usr/include/g++/std/bastring.h OLD_FILES+=usr/include/g++/std/complext.cc OLD_FILES+=usr/include/g++/std/complext.h OLD_FILES+=usr/include/g++/std/dcomplex.h OLD_FILES+=usr/include/g++/std/fcomplex.h OLD_FILES+=usr/include/g++/std/gslice.h OLD_FILES+=usr/include/g++/std/gslice_array.h OLD_FILES+=usr/include/g++/std/indirect_array.h OLD_FILES+=usr/include/g++/std/ldcomplex.h OLD_FILES+=usr/include/g++/std/mask_array.h OLD_FILES+=usr/include/g++/std/slice.h OLD_FILES+=usr/include/g++/std/slice_array.h OLD_FILES+=usr/include/g++/std/std_valarray.h OLD_FILES+=usr/include/g++/std/straits.h OLD_FILES+=usr/include/g++/std/valarray_array.h OLD_FILES+=usr/include/g++/std/valarray_array.tcc OLD_FILES+=usr/include/g++/std/valarray_meta.h OLD_FILES+=usr/include/g++/stdexcept OLD_FILES+=usr/include/g++/stdiostream.h OLD_FILES+=usr/include/g++/stl.h OLD_FILES+=usr/include/g++/stl_algo.h OLD_FILES+=usr/include/g++/stl_algobase.h OLD_FILES+=usr/include/g++/stl_alloc.h OLD_FILES+=usr/include/g++/stl_bvector.h OLD_FILES+=usr/include/g++/stl_config.h OLD_FILES+=usr/include/g++/stl_construct.h OLD_FILES+=usr/include/g++/stl_deque.h OLD_FILES+=usr/include/g++/stl_function.h OLD_FILES+=usr/include/g++/stl_hash_fun.h OLD_FILES+=usr/include/g++/stl_hash_map.h OLD_FILES+=usr/include/g++/stl_hash_set.h OLD_FILES+=usr/include/g++/stl_hashtable.h OLD_FILES+=usr/include/g++/stl_heap.h OLD_FILES+=usr/include/g++/stl_iterator.h OLD_FILES+=usr/include/g++/stl_list.h OLD_FILES+=usr/include/g++/stl_map.h OLD_FILES+=usr/include/g++/stl_multimap.h OLD_FILES+=usr/include/g++/stl_multiset.h OLD_FILES+=usr/include/g++/stl_numeric.h OLD_FILES+=usr/include/g++/stl_pair.h OLD_FILES+=usr/include/g++/stl_queue.h OLD_FILES+=usr/include/g++/stl_raw_storage_iter.h OLD_FILES+=usr/include/g++/stl_relops.h OLD_FILES+=usr/include/g++/stl_rope.h OLD_FILES+=usr/include/g++/stl_set.h OLD_FILES+=usr/include/g++/stl_slist.h OLD_FILES+=usr/include/g++/stl_stack.h OLD_FILES+=usr/include/g++/stl_tempbuf.h OLD_FILES+=usr/include/g++/stl_tree.h OLD_FILES+=usr/include/g++/stl_uninitialized.h OLD_FILES+=usr/include/g++/stl_vector.h OLD_FILES+=usr/include/g++/stream.h OLD_FILES+=usr/include/g++/streambuf.h OLD_FILES+=usr/include/g++/strfile.h OLD_FILES+=usr/include/g++/string OLD_FILES+=usr/include/g++/strstream OLD_FILES+=usr/include/g++/strstream.h OLD_FILES+=usr/include/g++/tempbuf.h OLD_FILES+=usr/include/g++/tree.h OLD_FILES+=usr/include/g++/type_traits.h OLD_FILES+=usr/include/g++/typeinfo OLD_FILES+=usr/include/g++/utility OLD_FILES+=usr/include/g++/valarray OLD_FILES+=usr/include/g++/vector OLD_FILES+=usr/include/g++/vector.h OLD_FILES+=usr/include/gmp.h OLD_FILES+=usr/include/isc/assertions.h OLD_FILES+=usr/include/isc/ctl.h OLD_FILES+=usr/include/isc/dst.h OLD_FILES+=usr/include/isc/eventlib.h OLD_FILES+=usr/include/isc/heap.h OLD_FILES+=usr/include/isc/irpmarshall.h OLD_FILES+=usr/include/isc/list.h OLD_FILES+=usr/include/isc/logging.h OLD_FILES+=usr/include/isc/memcluster.h OLD_FILES+=usr/include/isc/misc.h OLD_FILES+=usr/include/isc/tree.h OLD_FILES+=usr/include/machine/ansi.h OLD_FILES+=usr/include/machine/apic.h OLD_FILES+=usr/include/machine/asc_ioctl.h OLD_FILES+=usr/include/machine/asnames.h OLD_FILES+=usr/include/machine/bus_at386.h OLD_FILES+=usr/include/machine/bus_memio.h OLD_FILES+=usr/include/machine/bus_pc98.h OLD_FILES+=usr/include/machine/bus_pio.h OLD_FILES+=usr/include/machine/cdk.h OLD_FILES+=usr/include/machine/comstats.h OLD_FILES+=usr/include/machine/console.h OLD_FILES+=usr/include/machine/critical.h OLD_FILES+=usr/include/machine/cronyx.h OLD_FILES+=usr/include/machine/dvcfg.h OLD_FILES+=usr/include/machine/globaldata.h OLD_FILES+=usr/include/machine/globals.h OLD_FILES+=usr/include/machine/gsc.h OLD_FILES+=usr/include/machine/i4b_isppp.h OLD_FILES+=usr/include/machine/if_wavelan_ieee.h OLD_FILES+=usr/include/machine/iic.h OLD_FILES+=usr/include/machine/ioctl_ctx.h OLD_FILES+=usr/include/machine/ioctl_fd.h OLD_FILES+=usr/include/machine/ipl.h OLD_FILES+=usr/include/machine/lock.h OLD_FILES+=usr/include/machine/mouse.h OLD_FILES+=usr/include/machine/mpapic.h OLD_FILES+=usr/include/machine/mtpr.h OLD_FILES+=usr/include/machine/pc/msdos.h OLD_FILES+=usr/include/machine/physio_proc.h OLD_FILES+=usr/include/machine/smb.h OLD_FILES+=usr/include/machine/spigot.h OLD_FILES+=usr/include/machine/types.h OLD_FILES+=usr/include/machine/uc_device.h OLD_FILES+=usr/include/machine/ultrasound.h OLD_FILES+=usr/include/machine/wtio.h OLD_FILES+=usr/include/msdosfs/bootsect.h OLD_FILES+=usr/include/msdosfs/bpb.h OLD_FILES+=usr/include/msdosfs/denode.h OLD_FILES+=usr/include/msdosfs/direntry.h OLD_FILES+=usr/include/msdosfs/fat.h OLD_FILES+=usr/include/msdosfs/msdosfsmount.h OLD_FILES+=usr/include/net/hostcache.h OLD_FILES+=usr/include/net/if_faith.h OLD_FILES+=usr/include/net/if_ieee80211.h OLD_FILES+=usr/include/net/if_tunvar.h OLD_FILES+=usr/include/net/intrq.h OLD_FILES+=usr/include/netatm/kern_include.h OLD_FILES+=usr/include/netinet/if_fddi.h OLD_FILES+=usr/include/netinet/in_hostcache.h OLD_FILES+=usr/include/netinet/ip_flow.h OLD_FILES+=usr/include/netinet/ip_fw2.h OLD_FILES+=usr/include/netinet6/in6_prefix.h OLD_FILES+=usr/include/netns/idp.h OLD_FILES+=usr/include/netns/idp_var.h OLD_FILES+=usr/include/netns/ns.h OLD_FILES+=usr/include/netns/ns_error.h OLD_FILES+=usr/include/netns/ns_if.h OLD_FILES+=usr/include/netns/ns_pcb.h OLD_FILES+=usr/include/netns/sp.h OLD_FILES+=usr/include/netns/spidp.h OLD_FILES+=usr/include/netns/spp_debug.h OLD_FILES+=usr/include/netns/spp_timer.h OLD_FILES+=usr/include/netns/spp_var.h OLD_FILES+=usr/include/nfs/nfs.h OLD_FILES+=usr/include/nfs/nfsm_subs.h OLD_FILES+=usr/include/nfs/nfsmount.h OLD_FILES+=usr/include/nfs/nfsnode.h OLD_FILES+=usr/include/nfs/nfsrtt.h OLD_FILES+=usr/include/nfs/nfsrvcache.h OLD_FILES+=usr/include/nfs/nfsv2.h OLD_FILES+=usr/include/nfs/nqnfs.h OLD_FILES+=usr/include/ntfs/ntfs.h OLD_FILES+=usr/include/ntfs/ntfs_compr.h OLD_FILES+=usr/include/ntfs/ntfs_ihash.h OLD_FILES+=usr/include/ntfs/ntfs_inode.h OLD_FILES+=usr/include/ntfs/ntfs_subr.h OLD_FILES+=usr/include/ntfs/ntfs_vfsops.h OLD_FILES+=usr/include/ntfs/ntfsmount.h OLD_FILES+=usr/include/nwfs/nwfs.h OLD_FILES+=usr/include/nwfs/nwfs_mount.h OLD_FILES+=usr/include/nwfs/nwfs_node.h OLD_FILES+=usr/include/nwfs/nwfs_subr.h OLD_FILES+=usr/include/posix4/_semaphore.h OLD_FILES+=usr/include/posix4/aio.h OLD_FILES+=usr/include/posix4/ksem.h OLD_FILES+=usr/include/posix4/mqueue.h OLD_FILES+=usr/include/posix4/posix4.h OLD_FILES+=usr/include/posix4/sched.h OLD_FILES+=usr/include/posix4/semaphore.h OLD_DIRS+=usr/include/posix4 OLD_FILES+=usr/include/security/_pam_compat.h OLD_FILES+=usr/include/security/_pam_macros.h OLD_FILES+=usr/include/security/_pam_types.h OLD_FILES+=usr/include/security/pam_malloc.h OLD_FILES+=usr/include/security/pam_misc.h OLD_FILES+=usr/include/skey.h OLD_FILES+=usr/include/strhash.h OLD_FILES+=usr/include/struct.h OLD_FILES+=usr/include/sys/_label.h OLD_FILES+=usr/include/sys/_posix.h OLD_FILES+=usr/include/sys/bus_private.h OLD_FILES+=usr/include/sys/ccdvar.h OLD_FILES+=usr/include/sys/diskslice.h OLD_FILES+=usr/include/sys/dmap.h OLD_FILES+=usr/include/sys/inttypes.h OLD_FILES+=usr/include/sys/jumbo.h OLD_FILES+=usr/include/sys/mac_policy.h OLD_FILES+=usr/include/sys/pbioio.h OLD_FILES+=usr/include/sys/syscall-hide.h OLD_FILES+=usr/include/sys/tprintf.h OLD_FILES+=usr/include/sys/vnioctl.h OLD_FILES+=usr/include/sys/wormio.h OLD_FILES+=usr/include/telnet.h OLD_FILES+=usr/include/ufs/mfs/mfs_extern.h OLD_FILES+=usr/include/ufs/mfs/mfsnode.h OLD_FILES+=usr/include/values.h OLD_FILES+=usr/include/vm/vm_zone.h OLD_FILES+=usr/share/examples/etc/usbd.conf OLD_FILES+=usr/share/examples/meteor/README OLD_FILES+=usr/share/examples/meteor/rgb16.c OLD_FILES+=usr/share/examples/meteor/rgb24.c OLD_FILES+=usr/share/examples/meteor/test-n.c OLD_FILES+=usr/share/examples/meteor/yuvpk.c OLD_FILES+=usr/share/examples/meteor/yuvpl.c OLD_FILES+=usr/share/examples/worm/README OLD_FILES+=usr/share/examples/worm/makecdfs.sh OLD_FILES+=usr/share/groff_font/devlj4/Makefile OLD_FILES+=usr/share/groff_font/devlj4/text.map OLD_FILES+=usr/share/groff_font/devlj4/special.map OLD_FILES+=usr/share/misc/nslookup.help OLD_FILES+=usr/share/sendmail/cf/feature/nodns.m4 OLD_FILES+=usr/share/syscons/keymaps/lat-amer.kbd OLD_FILES+=usr/share/vi/catalog/ru_SU.KOI8-R OLD_FILES+=usr/share/zoneinfo/Africa/Timbuktu OLD_FILES+=usr/share/zoneinfo/Africa/Asmera OLD_FILES+=usr/share/zoneinfo/America/Buenos_Aires OLD_FILES+=usr/share/zoneinfo/America/Cordoba OLD_FILES+=usr/share/zoneinfo/America/Jujuy OLD_FILES+=usr/share/zoneinfo/America/Catamarca OLD_FILES+=usr/share/zoneinfo/America/Mendoza OLD_FILES+=usr/share/zoneinfo/America/Indianapolis OLD_FILES+=usr/share/zoneinfo/America/Louisville OLD_FILES+=usr/share/zoneinfo/America/Argentina/ComodRivadavia OLD_FILES+=usr/share/zoneinfo/Atlantic/Faeroe OLD_FILES+=usr/share/zoneinfo/Europe/Belfast OLD_FILES+=usr/share/zoneinfo/Pacific/Yap OLD_FILES+=usr/share/zoneinfo/SystemV/YST9 OLD_FILES+=usr/share/zoneinfo/SystemV/PST8 OLD_FILES+=usr/share/zoneinfo/SystemV/EST5EDT OLD_FILES+=usr/share/zoneinfo/SystemV/CST6CDT OLD_FILES+=usr/share/zoneinfo/SystemV/MST7MDT OLD_FILES+=usr/share/zoneinfo/SystemV/PST8PDT OLD_FILES+=usr/share/zoneinfo/SystemV/YST9YDT OLD_FILES+=usr/share/zoneinfo/SystemV/HST10 OLD_FILES+=usr/share/zoneinfo/SystemV/MST7 OLD_FILES+=usr/share/zoneinfo/SystemV/EST5 OLD_FILES+=usr/share/zoneinfo/SystemV/AST4ADT OLD_FILES+=usr/share/zoneinfo/SystemV/CST6 OLD_FILES+=usr/share/zoneinfo/SystemV/AST4 OLD_FILES+=usr/share/doc/ntp/accopt.htm OLD_FILES+=usr/share/doc/ntp/assoc.htm OLD_FILES+=usr/share/doc/ntp/audio.htm OLD_FILES+=usr/share/doc/ntp/authopt.htm OLD_FILES+=usr/share/doc/ntp/biblio.htm OLD_FILES+=usr/share/doc/ntp/build.htm OLD_FILES+=usr/share/doc/ntp/clockopt.htm OLD_FILES+=usr/share/doc/ntp/config.htm OLD_FILES+=usr/share/doc/ntp/confopt.htm OLD_FILES+=usr/share/doc/ntp/copyright.htm OLD_FILES+=usr/share/doc/ntp/debug.htm OLD_FILES+=usr/share/doc/ntp/driver1.htm OLD_FILES+=usr/share/doc/ntp/driver10.htm OLD_FILES+=usr/share/doc/ntp/driver11.htm OLD_FILES+=usr/share/doc/ntp/driver12.htm OLD_FILES+=usr/share/doc/ntp/driver16.htm OLD_FILES+=usr/share/doc/ntp/driver18.htm OLD_FILES+=usr/share/doc/ntp/driver19.htm OLD_FILES+=usr/share/doc/ntp/driver2.htm OLD_FILES+=usr/share/doc/ntp/driver20.htm OLD_FILES+=usr/share/doc/ntp/driver22.htm OLD_FILES+=usr/share/doc/ntp/driver23.htm OLD_FILES+=usr/share/doc/ntp/driver24.htm OLD_FILES+=usr/share/doc/ntp/driver26.htm OLD_FILES+=usr/share/doc/ntp/driver27.htm OLD_FILES+=usr/share/doc/ntp/driver28.htm OLD_FILES+=usr/share/doc/ntp/driver29.htm OLD_FILES+=usr/share/doc/ntp/driver3.htm OLD_FILES+=usr/share/doc/ntp/driver30.htm OLD_FILES+=usr/share/doc/ntp/driver32.htm OLD_FILES+=usr/share/doc/ntp/driver33.htm OLD_FILES+=usr/share/doc/ntp/driver34.htm OLD_FILES+=usr/share/doc/ntp/driver35.htm OLD_FILES+=usr/share/doc/ntp/driver36.htm OLD_FILES+=usr/share/doc/ntp/driver37.htm OLD_FILES+=usr/share/doc/ntp/driver4.htm OLD_FILES+=usr/share/doc/ntp/driver5.htm OLD_FILES+=usr/share/doc/ntp/driver6.htm OLD_FILES+=usr/share/doc/ntp/driver7.htm OLD_FILES+=usr/share/doc/ntp/driver8.htm OLD_FILES+=usr/share/doc/ntp/driver9.htm OLD_FILES+=usr/share/doc/ntp/exec.htm OLD_FILES+=usr/share/doc/ntp/extern.htm OLD_FILES+=usr/share/doc/ntp/gadget.htm OLD_FILES+=usr/share/doc/ntp/hints.htm OLD_FILES+=usr/share/doc/ntp/howto.htm OLD_FILES+=usr/share/doc/ntp/htmlprimer.htm OLD_FILES+=usr/share/doc/ntp/index.htm OLD_FILES+=usr/share/doc/ntp/kern.htm OLD_FILES+=usr/share/doc/ntp/kernpps.htm OLD_FILES+=usr/share/doc/ntp/ldisc.htm OLD_FILES+=usr/share/doc/ntp/measure.htm OLD_FILES+=usr/share/doc/ntp/miscopt.htm OLD_FILES+=usr/share/doc/ntp/monopt.htm OLD_FILES+=usr/share/doc/ntp/mx4200data.htm OLD_FILES+=usr/share/doc/ntp/notes.htm OLD_FILES+=usr/share/doc/ntp/ntpd.htm OLD_FILES+=usr/share/doc/ntp/ntpdate.htm OLD_FILES+=usr/share/doc/ntp/ntpdc.htm OLD_FILES+=usr/share/doc/ntp/ntpq.htm OLD_FILES+=usr/share/doc/ntp/ntptime.htm OLD_FILES+=usr/share/doc/ntp/ntptrace.htm OLD_FILES+=usr/share/doc/ntp/parsedata.htm OLD_FILES+=usr/share/doc/ntp/parsenew.htm OLD_FILES+=usr/share/doc/ntp/patches.htm OLD_FILES+=usr/share/doc/ntp/porting.htm OLD_FILES+=usr/share/doc/ntp/pps.htm OLD_FILES+=usr/share/doc/ntp/prefer.htm OLD_FILES+=usr/share/doc/ntp/qth.htm OLD_FILES+=usr/share/doc/ntp/quick.htm OLD_FILES+=usr/share/doc/ntp/rdebug.htm OLD_FILES+=usr/share/doc/ntp/refclock.htm OLD_FILES+=usr/share/doc/ntp/release.htm OLD_FILES+=usr/share/doc/ntp/tickadj.htm OLD_FILES+=usr/share/doc/papers/nqnfs.ascii.gz OLD_FILES+=usr/share/doc/papers/px.ascii.gz OLD_FILES+=usr/share/man/man3/exp10.3.gz OLD_FILES+=usr/share/man/man3/exp10f.3.gz OLD_FILES+=usr/share/man/man3/fpsetsticky.3.gz OLD_FILES+=usr/share/man/man3/gss_krb5_compat_des3_mic.3.gz OLD_FILES+=usr/share/man/man3/gss_krb5_copy_ccache.3.gz OLD_FILES+=usr/share/man/man3/mac_is_present_np.3.gz OLD_FILES+=usr/share/man/man3/mbmb.3.gz OLD_FILES+=usr/share/man/man3/setrunelocale.3.gz OLD_FILES+=usr/share/man/man5/usbd.conf.5.gz .if ${TARGET_ARCH} != "i386" && ${TARGET_ARCH} != "amd64" OLD_FILES+=usr/share/man/man8/boot_i386.8.gz .endif .if ${TARGET_ARCH} != "powerpc" && ${TARGET_ARCH} != "powerpc64" && ${TARGET_ARCH} != "sparc64" OLD_FILES+=usr/share/man/man8/ofwdump.8.gz .endif OLD_FILES+=usr/share/man/man8/mount_reiserfs.8.gz OLD_FILES+=usr/share/man/man9/VFS_START.9.gz OLD_FILES+=usr/share/man/man9/cpu_critical_exit.9.gz OLD_FILES+=usr/share/man/man9/cpu_critical_enter.9.gz OLD_FILES+=usr/share/info/annotate.info.gz OLD_FILES+=usr/share/info/tar.info.gz OLD_FILES+=usr/share/bsnmp/defs/tree.def OLD_FILES+=usr/share/bsnmp/defs/mibII_tree.def OLD_FILES+=usr/share/bsnmp/defs/netgraph_tree.def OLD_FILES+=usr/share/bsnmp/mibs/FOKUS-MIB.txt OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-MIB.txt OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-SNMPD.txt OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-NETGRAPH.txt OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.x OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xbn OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xn OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xr OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xs OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xu OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xc OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xsc OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.x OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xbn OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xn OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xr OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xs OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xu OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xc OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xsc OLD_FILES+=usr/libdata/msdosfs/iso22dos OLD_FILES+=usr/libdata/msdosfs/iso72dos OLD_FILES+=usr/libdata/msdosfs/koi2dos OLD_FILES+=usr/libdata/msdosfs/koi8u2dos # The following files are *not* obsolete, they just don't get touched at # install, so don't add them: # - boot/loader.rc # - usr/share/tmac/man.local # - usr/share/tmac/mm/locale # - usr/share/tmac/mm/se_locale # - var/yp/Makefile # 20071120: shared library version bump OLD_LIBS+=usr/lib/libasn1.so.8 OLD_LIBS+=usr/lib/libgssapi.so.8 OLD_LIBS+=usr/lib/libgssapi_krb5.so.8 OLD_LIBS+=usr/lib/libhdb.so.8 OLD_LIBS+=usr/lib/libkadm5clnt.so.8 OLD_LIBS+=usr/lib/libkadm5srv.so.8 OLD_LIBS+=usr/lib/libkafs5.so.8 OLD_LIBS+=usr/lib/libkrb5.so.8 OLD_LIBS+=usr/lib/libobjc.so.2 OLD_LIBS+=usr/lib32/libgssapi.so.8 OLD_LIBS+=usr/lib32/libobjc.so.2 # 20070519: GCC 4.2 OLD_LIBS+=usr/lib/libg2c.a OLD_LIBS+=usr/lib/libg2c.so OLD_LIBS+=usr/lib/libg2c.so.2 OLD_LIBS+=usr/lib/libg2c_p.a OLD_LIBS+=usr/lib/libgcc_pic.a OLD_LIBS+=usr/lib32/libg2c.a OLD_LIBS+=usr/lib32/libg2c.so OLD_LIBS+=usr/lib32/libg2c.so.2 OLD_LIBS+=usr/lib32/libg2c_p.a OLD_LIBS+=usr/lib32/libgcc_pic.a # 20060729: OpenSSL 0.9.7e -> 0.9.8b upgrade OLD_LIBS+=lib/libcrypto.so.4 OLD_LIBS+=usr/lib/libssl.so.4 OLD_LIBS+=usr/lib32/libcrypto.so.4 OLD_LIBS+=usr/lib32/libssl.so.4 # 20060521: gethostbyaddr(3) ABI change OLD_LIBS+=usr/lib/libroken.so.8 OLD_LIBS+=lib/libatm.so.3 OLD_LIBS+=lib/libc.so.6 OLD_LIBS+=lib/libutil.so.5 OLD_LIBS+=usr/lib32/libatm.so.3 OLD_LIBS+=usr/lib32/libc.so.6 OLD_LIBS+=usr/lib32/libutil.so.5 # 20060413: shared library moved to /usr/lib OLD_LIBS+=lib/libgpib.so.1 # 20060413: libpcap.so.4 moved to /lib/ OLD_LIBS+=usr/lib/libpcap.so.4 # 20060412: libpthread.so.2 moved to /lib/ OLD_LIBS+=usr/lib/libpthread.so.2 # 20060127: revert libdisk to static-only OLD_LIBS+=usr/lib/libdisk.so.3 # 20051027: libc_r discontinued (removed 20101113) OLD_LIBS+=usr/lib/libc_r.a OLD_LIBS+=usr/lib/libc_r.so OLD_LIBS+=usr/lib/libc_r.so.7 OLD_LIBS+=usr/lib/libc_r_p.a OLD_LIBS+=usr/lib32/libc_r.a OLD_LIBS+=usr/lib32/libc_r.so OLD_LIBS+=usr/lib32/libc_r.so.7 OLD_LIBS+=usr/lib32/libc_r_p.a # 20050722: bump for 6.0-RELEASE OLD_LIBS+=lib/libalias.so.4 OLD_LIBS+=lib/libatm.so.2 OLD_LIBS+=lib/libbegemot.so.1 OLD_LIBS+=lib/libbsdxml.so.1 OLD_LIBS+=lib/libbsnmp.so.2 OLD_LIBS+=lib/libc.so.5 OLD_LIBS+=lib/libcam.so.2 OLD_LIBS+=lib/libcrypt.so.2 OLD_LIBS+=lib/libcrypto.so.3 OLD_LIBS+=lib/libdevstat.so.4 OLD_LIBS+=lib/libedit.so.4 OLD_LIBS+=lib/libgeom.so.2 OLD_LIBS+=lib/libgpib.so.0 OLD_LIBS+=lib/libipsec.so.1 OLD_LIBS+=lib/libipx.so.2 OLD_LIBS+=lib/libkiconv.so.1 OLD_LIBS+=lib/libkvm.so.2 OLD_LIBS+=lib/libm.so.3 OLD_LIBS+=lib/libmd.so.2 OLD_LIBS+=lib/libncurses.so.5 OLD_LIBS+=lib/libreadline.so.5 OLD_LIBS+=lib/libsbuf.so.2 OLD_LIBS+=lib/libufs.so.2 OLD_LIBS+=lib/libutil.so.4 OLD_LIBS+=lib/libz.so.2 OLD_LIBS+=usr/lib/libarchive.so.1 OLD_LIBS+=usr/lib/libasn1.so.7 OLD_LIBS+=usr/lib/libbluetooth.so.1 OLD_LIBS+=usr/lib/libbz2.so.1 OLD_LIBS+=usr/lib/libc_r.so.5 OLD_LIBS+=usr/lib/libcalendar.so.2 OLD_LIBS+=usr/lib/libcom_err.so.2 OLD_LIBS+=usr/lib/libdevinfo.so.2 OLD_LIBS+=usr/lib/libdialog.so.4 OLD_LIBS+=usr/lib/libfetch.so.3 OLD_LIBS+=usr/lib/libform.so.2 OLD_LIBS+=usr/lib/libftpio.so.5 OLD_LIBS+=usr/lib/libg2c.so.1 OLD_LIBS+=usr/lib/libgnuregex.so.2 OLD_LIBS+=usr/lib/libgssapi.so.7 OLD_LIBS+=usr/lib/libhdb.so.7 OLD_LIBS+=usr/lib/libhistory.so.5 OLD_LIBS+=usr/lib/libkadm5clnt.so.7 OLD_LIBS+=usr/lib/libkadm5srv.so.7 OLD_LIBS+=usr/lib/libkafs5.so.7 OLD_LIBS+=usr/lib/libkrb5.so.7 OLD_LIBS+=usr/lib/libmagic.so.1 OLD_LIBS+=usr/lib/libmenu.so.2 OLD_LIBS+=usr/lib/libmilter.so.2 OLD_LIBS+=usr/lib/libmp.so.4 OLD_LIBS+=usr/lib/libncp.so.1 OLD_LIBS+=usr/lib/libnetgraph.so.1 OLD_LIBS+=usr/lib/libngatm.so.1 OLD_LIBS+=usr/lib/libobjc.so.1 OLD_LIBS+=usr/lib/libopie.so.3 OLD_LIBS+=usr/lib/libpam.so.2 OLD_LIBS+=usr/lib/libpanel.so.2 OLD_LIBS+=usr/lib/libpcap.so.3 OLD_LIBS+=usr/lib/libpmc.so.2 OLD_LIBS+=usr/lib/libpthread.so.1 OLD_LIBS+=usr/lib/libradius.so.1 OLD_LIBS+=usr/lib/libroken.so.7 OLD_LIBS+=usr/lib/librpcsvc.so.2 OLD_LIBS+=usr/lib/libsdp.so.1 OLD_LIBS+=usr/lib/libsmb.so.1 OLD_LIBS+=usr/lib/libssh.so.2 OLD_LIBS+=usr/lib/libssl.so.3 OLD_LIBS+=usr/lib/libstdc++.so.4 OLD_LIBS+=usr/lib/libtacplus.so.1 OLD_LIBS+=usr/lib/libthr.so.1 OLD_LIBS+=usr/lib/libthread_db.so.1 OLD_LIBS+=usr/lib/libugidfw.so.1 OLD_LIBS+=usr/lib/libusbhid.so.1 OLD_LIBS+=usr/lib/libvgl.so.3 OLD_LIBS+=usr/lib/libwrap.so.3 OLD_LIBS+=usr/lib/libypclnt.so.1 OLD_LIBS+=usr/lib/pam_chroot.so.2 OLD_LIBS+=usr/lib/pam_deny.so.2 OLD_LIBS+=usr/lib/pam_echo.so.2 OLD_LIBS+=usr/lib/pam_exec.so.2 OLD_LIBS+=usr/lib/pam_ftpusers.so.2 OLD_LIBS+=usr/lib/pam_group.so.2 OLD_LIBS+=usr/lib/pam_guest.so.2 OLD_LIBS+=usr/lib/pam_krb5.so.2 OLD_LIBS+=usr/lib/pam_ksu.so.2 OLD_LIBS+=usr/lib/pam_lastlog.so.2 OLD_LIBS+=usr/lib/pam_login_access.so.2 OLD_LIBS+=usr/lib/pam_nologin.so.2 OLD_LIBS+=usr/lib/pam_opie.so.2 OLD_LIBS+=usr/lib/pam_opieaccess.so.2 OLD_LIBS+=usr/lib/pam_passwdqc.so.2 OLD_LIBS+=usr/lib/pam_permit.so.2 OLD_LIBS+=usr/lib/pam_radius.so.2 OLD_LIBS+=usr/lib/pam_rhosts.so.2 OLD_LIBS+=usr/lib/pam_rootok.so.2 OLD_LIBS+=usr/lib/pam_securetty.so.2 OLD_LIBS+=usr/lib/pam_self.so.2 OLD_LIBS+=usr/lib/pam_ssh.so.2 OLD_LIBS+=usr/lib/pam_tacplus.so.2 OLD_LIBS+=usr/lib/pam_unix.so.2 OLD_LIBS+=usr/lib/snmp_atm.so.3 OLD_LIBS+=usr/lib/snmp_mibII.so.3 OLD_LIBS+=usr/lib/snmp_netgraph.so.3 OLD_LIBS+=usr/lib/snmp_pf.so.3 # 200505XX: ? OLD_LIBS+=usr/lib/snmp_atm.so.2 OLD_LIBS+=usr/lib/snmp_mibII.so.2 OLD_LIBS+=usr/lib/snmp_netgraph.so.2 OLD_LIBS+=usr/lib/snmp_pf.so.2 # 2005XXXX: not ready for primetime yet OLD_LIBS+=usr/lib/libautofs.so.1 # 200411XX: libxpg4 removal OLD_LIBS+=usr/lib/libxpg4.so.3 # 200410XX: libm compatibility fix OLD_LIBS+=lib/libm.so.2 # 20041001: version bump OLD_LIBS+=lib/libreadline.so.4 OLD_LIBS+=usr/lib/libhistory.so.4 OLD_LIBS+=usr/lib/libopie.so.2 OLD_LIBS+=usr/lib/libpcap.so.2 # 20040925: bind9 import OLD_LIBS+=usr/lib/libisc.so.1 # 200408XX OLD_LIBS+=usr/lib/snmp_netgraph.so.1 # 200404XX OLD_LIBS+=usr/lib/libsnmp.so.1 OLD_LIBS+=usr/lib/snmp_mibII.so.1 # 200309XX OLD_LIBS+=usr/lib/libasn1.so.6 OLD_LIBS+=usr/lib/libhdb.so.6 OLD_LIBS+=usr/lib/libkadm5clnt.so.6 OLD_LIBS+=usr/lib/libkadm5srv.so.6 OLD_LIBS+=usr/lib/libkrb5.so.6 OLD_LIBS+=usr/lib/libroken.so.6 # 200304XX OLD_LIBS+=usr/lib/libc.so.4 OLD_LIBS+=usr/lib/libc_r.so.4 OLD_LIBS+=usr/lib/libdevstat.so.2 OLD_LIBS+=usr/lib/libedit.so.3 OLD_LIBS+=usr/lib/libgmp.so.3 OLD_LIBS+=usr/lib/libmp.so.3 OLD_LIBS+=usr/lib/libpam.so.1 OLD_LIBS+=usr/lib/libposix1e.so.2 OLD_LIBS+=usr/lib/libskey.so.2 OLD_LIBS+=usr/lib/libusbhid.so.0 OLD_LIBS+=usr/lib/libvgl.so.2 # 20030218: OpenSSL 0.9.7 import OLD_FILES+=usr/include/des.h OLD_FILES+=usr/lib/libdes.a OLD_FILES+=usr/lib/libdes.so OLD_LIBS+=usr/lib/libdes.so.3 OLD_FILES+=usr/lib/libdes_p.a # 200302XX OLD_LIBS+=usr/lib/libacl.so.3 OLD_LIBS+=usr/lib/libasn1.so.5 OLD_LIBS+=usr/lib/libcrypto.so.2 OLD_LIBS+=usr/lib/libgssapi.so.5 OLD_LIBS+=usr/lib/libhdb.so.5 OLD_LIBS+=usr/lib/libkadm.so.3 OLD_LIBS+=usr/lib/libkadm5clnt.so.5 OLD_LIBS+=usr/lib/libkadm5srv.so.5 OLD_LIBS+=usr/lib/libkafs.so.3 OLD_LIBS+=usr/lib/libkafs5.so.5 OLD_LIBS+=usr/lib/libkdb.so.3 OLD_LIBS+=usr/lib/libkrb.so.3 OLD_LIBS+=usr/lib/libroken.so. OLD_LIBS+=usr/lib/libssl.so.2 OLD_LIBS+=usr/lib/pam_kerberosIV.so # 200208XX OLD_LIBS+=usr/lib/libgssapi.so.4 # 200203XX OLD_LIBS+=usr/lib/libss.so.3 OLD_LIBS+=usr/lib/libusb.so.0 # 200112XX OLD_LIBS+=usr/lib/libfetch.so.2 # 200110XX OLD_LIBS+=usr/lib/libgssapi.so.3 # 200104XX OLD_LIBS+=usr/lib/libdescrypt.so.2 OLD_LIBS+=usr/lib/libscrypt.so.2 # 200102XX OLD_LIBS+=usr/lib/libcrypto.so.1 OLD_LIBS+=usr/lib/libssl.so.1 # 200009XX OLD_LIBS+=usr/lib/libRSAglue.so.1 OLD_LIBS+=usr/lib/librsaINTL.so.1 OLD_LIBS+=usr/lib/librsaUSA.so.1 # 200006XX OLD_LIBS+=usr/lib/libalias.so.3 OLD_LIBS+=usr/lib/libfetch.so.1 OLD_LIBS+=usr/lib/libipsec.so.0 # 200005XX OLD_LIBS+=usr/lib/libxpg4.so.2 # 200002XX OLD_LIBS+=usr/lib/libc.so.3 OLD_LIBS+=usr/lib/libcurses.so.2 OLD_LIBS+=usr/lib/libdialog.so.3 OLD_LIBS+=usr/lib/libedit.so.2 OLD_LIBS+=usr/lib/libf2c.so.2 OLD_LIBS+=usr/lib/libftpio.so.4 OLD_LIBS+=usr/lib/libg++.so.4 OLD_LIBS+=usr/lib/libhistory.so.3 OLD_LIBS+=usr/lib/libmytinfo.so.2 OLD_LIBS+=usr/lib/libncurses.so.3 OLD_LIBS+=usr/lib/libreadline.so.3 OLD_LIBS+=usr/lib/libss.so.2 OLD_LIBS+=usr/lib/libtermcap.so.2 OLD_LIBS+=usr/lib/libutil.so.2 OLD_LIBS+=usr/lib/libvgl.so.1 OLD_LIBS+=usr/lib/libwrap.so.2 # 19991216 OLD_FILES+=usr/sbin/xntpdc # 199909XX OLD_LIBS+=usr/lib/libc_r.so.3 # ??? OLD_LIBS+=usr/lib/libarchive.so.2 OLD_LIBS+=usr/lib/libbsnmp.so.1 OLD_LIBS+=usr/lib/libc_r.so.6 OLD_LIBS+=usr/lib32/libarchive.so.2 OLD_LIBS+=usr/lib32/libc_r.so.6 OLD_LIBS+=usr/lib/libcipher.so.2 OLD_LIBS+=usr/lib/libgssapi.so.6 OLD_LIBS+=usr/lib/libkse.so.1 OLD_LIBS+=usr/lib/liblwres.so.3 OLD_LIBS+=usr/lib/pam_ftp.so.2 # 20131013: Removal of the ATF tools OLD_DIRS+=etc/atf OLD_DIRS+=usr/share/examples/atf OLD_DIRS+=usr/share/xml/atf OLD_DIRS+=usr/share/xml OLD_DIRS+=usr/share/xsl/atf OLD_DIRS+=usr/share/xsl # 20040925: bind9 import OLD_DIRS+=usr/share/doc/bind/html OLD_DIRS+=usr/share/doc/bind/misc OLD_DIRS+=usr/share/doc/bind/ # ??? OLD_DIRS+=usr/include/g++/std OLD_DIRS+=usr/include/msdosfs OLD_DIRS+=usr/include/ntfs OLD_DIRS+=usr/include/nwfs OLD_DIRS+=usr/include/ufs/mfs # 20011001: UUCP migration to ports OLD_DIRS+=usr/libexec/uucp .include "tools/build/mk/OptionalObsoleteFiles.inc" Index: head/share/man/man9/Makefile =================================================================== --- head/share/man/man9/Makefile (revision 296271) +++ head/share/man/man9/Makefile (revision 296272) @@ -1,1920 +1,1919 @@ # $FreeBSD$ .include MAN= accept_filter.9 \ accf_data.9 \ accf_dns.9 \ accf_http.9 \ acl.9 \ alq.9 \ altq.9 \ atomic.9 \ bios.9 \ bitset.9 \ boot.9 \ bpf.9 \ buf.9 \ buf_ring.9 \ BUF_ISLOCKED.9 \ BUF_LOCK.9 \ BUF_LOCKFREE.9 \ BUF_LOCKINIT.9 \ BUF_RECURSED.9 \ BUF_TIMELOCK.9 \ BUF_UNLOCK.9 \ bus_activate_resource.9 \ BUS_ADD_CHILD.9 \ bus_adjust_resource.9 \ bus_alloc_resource.9 \ BUS_BIND_INTR.9 \ bus_child_present.9 \ BUS_CHILD_DELETED.9 \ BUS_CHILD_DETACHED.9 \ BUS_CONFIG_INTR.9 \ BUS_DESCRIBE_INTR.9 \ bus_dma.9 \ bus_generic_attach.9 \ bus_generic_detach.9 \ bus_generic_new_pass.9 \ bus_generic_print_child.9 \ bus_generic_read_ivar.9 \ bus_generic_shutdown.9 \ bus_get_resource.9 \ BUS_NEW_PASS.9 \ BUS_PRINT_CHILD.9 \ BUS_READ_IVAR.9 \ bus_release_resource.9 \ bus_set_pass.9 \ bus_set_resource.9 \ BUS_SETUP_INTR.9 \ bus_space.9 \ byteorder.9 \ casuword.9 \ cd.9 \ condvar.9 \ config_intrhook.9 \ contigmalloc.9 \ copy.9 \ counter.9 \ cpuset.9 \ cr_cansee.9 \ critical_enter.9 \ cr_seeothergids.9 \ cr_seeotheruids.9 \ crypto.9 \ CTASSERT.9 \ DB_COMMAND.9 \ DECLARE_GEOM_CLASS.9 \ DECLARE_MODULE.9 \ DELAY.9 \ devclass.9 \ devclass_find.9 \ devclass_get_device.9 \ devclass_get_devices.9 \ devclass_get_drivers.9 \ devclass_get_maxunit.9 \ devclass_get_name.9 \ devclass_get_softc.9 \ dev_clone.9 \ devfs_set_cdevpriv.9 \ device.9 \ device_add_child.9 \ DEVICE_ATTACH.9 \ device_delete_child.9 \ DEVICE_DETACH.9 \ device_enable.9 \ device_find_child.9 \ device_get_children.9 \ device_get_devclass.9 \ device_get_driver.9 \ device_get_ivars.9 \ device_get_name.9 \ device_get_parent.9 \ device_get_softc.9 \ device_get_state.9 \ device_get_sysctl.9 \ device_get_unit.9 \ DEVICE_IDENTIFY.9 \ device_printf.9 \ DEVICE_PROBE.9 \ device_probe_and_attach.9 \ device_quiet.9 \ device_set_desc.9 \ device_set_driver.9 \ device_set_flags.9 \ DEVICE_SHUTDOWN.9 \ DEV_MODULE.9 \ devstat.9 \ devtoname.9 \ disk.9 \ domain.9 \ drbr.9 \ driver.9 \ DRIVER_MODULE.9 \ EVENTHANDLER.9 \ eventtimers.9 \ extattr.9 \ fail.9 \ fetch.9 \ firmware.9 \ fpu_kern.9 \ g_access.9 \ g_attach.9 \ g_bio.9 \ g_consumer.9 \ g_data.9 \ get_cyclecount.9 \ getenv.9 \ getnewvnode.9 \ g_event.9 \ g_geom.9 \ g_provider.9 \ g_provider_by_name.9 \ groupmember.9 \ g_wither_geom.9 \ hash.9 \ hashinit.9 \ hexdump.9 \ hhook.9 \ ieee80211.9 \ ieee80211_amrr.9 \ ieee80211_beacon.9 \ ieee80211_bmiss.9 \ ieee80211_crypto.9 \ ieee80211_ddb.9 \ ieee80211_input.9 \ ieee80211_node.9 \ ieee80211_output.9 \ ieee80211_proto.9 \ ieee80211_radiotap.9 \ ieee80211_regdomain.9 \ ieee80211_scan.9 \ ieee80211_vap.9 \ ifnet.9 \ inittodr.9 \ insmntque.9 \ intro.9 \ ithread.9 \ KASSERT.9 \ kern_testfrwk.9 \ kernacc.9 \ kernel_mount.9 \ khelp.9 \ kobj.9 \ kproc.9 \ kqueue.9 \ kthread.9 \ ktr.9 \ lock.9 \ locking.9 \ LOCK_PROFILING.9 \ mac.9 \ make_dev.9 \ malloc.9 \ mbchain.9 \ mbpool.9 \ mbuf.9 \ mbuf_tags.9 \ MD5.9 \ mdchain.9 \ memcchr.9 \ memguard.9 \ microseq.9 \ microtime.9 \ microuptime.9 \ mi_switch.9 \ mod_cc.9 \ module.9 \ MODULE_DEPEND.9 \ MODULE_VERSION.9 \ mtx_pool.9 \ mutex.9 \ namei.9 \ netisr.9 \ nv.9 \ osd.9 \ owll.9 \ own.9 \ panic.9 \ pbuf.9 \ PCBGROUP.9 \ p_candebug.9 \ p_cansee.9 \ pci.9 \ PCI_IOV_ADD_VF.9 \ PCI_IOV_INIT.9 \ pci_iov_schema.9 \ PCI_IOV_UNINIT.9 \ pfil.9 \ pfind.9 \ pget.9 \ pgfind.9 \ PHOLD.9 \ physio.9 \ pmap.9 \ pmap_activate.9 \ pmap_clear_modify.9 \ pmap_copy.9 \ pmap_enter.9 \ pmap_extract.9 \ pmap_growkernel.9 \ pmap_init.9 \ pmap_is_modified.9 \ pmap_is_prefaultable.9 \ pmap_map.9 \ pmap_mincore.9 \ pmap_object_init_pt.9 \ pmap_page_exists_quick.9 \ pmap_page_init.9 \ pmap_pinit.9 \ pmap_protect.9 \ pmap_qenter.9 \ pmap_quick_enter_page.9 \ pmap_release.9 \ pmap_remove.9 \ pmap_resident_count.9 \ pmap_unwire.9 \ pmap_zero_page.9 \ printf.9 \ prison_check.9 \ priv.9 \ proc_rwmem.9 \ pseudofs.9 \ psignal.9 \ random.9 \ random_harvest.9 \ redzone.9 \ refcount.9 \ resettodr.9 \ resource_int_value.9 \ rijndael.9 \ rman.9 \ rmlock.9 \ rtalloc.9 \ rtentry.9 \ runqueue.9 \ rwlock.9 \ sbuf.9 \ scheduler.9 \ SDT.9 \ securelevel_gt.9 \ selrecord.9 \ sema.9 \ sf_buf.9 \ sglist.9 \ shm_map.9 \ signal.9 \ sleep.9 \ sleepqueue.9 \ socket.9 \ stack.9 \ store.9 \ style.9 \ swi.9 \ sx.9 \ SYSCALL_MODULE.9 \ sysctl.9 \ sysctl_add_oid.9 \ sysctl_ctx_init.9 \ SYSINIT.9 \ taskqueue.9 \ thread_exit.9 \ time.9 \ timeout.9 \ tvtohz.9 \ ucred.9 \ uidinfo.9 \ uio.9 \ unr.9 \ utopia.9 \ vaccess.9 \ vaccess_acl_nfs4.9 \ vaccess_acl_posix1e.9 \ vcount.9 \ vflush.9 \ VFS.9 \ vfs_busy.9 \ VFS_CHECKEXP.9 \ vfsconf.9 \ VFS_FHTOVP.9 \ vfs_getnewfsid.9 \ vfs_getopt.9 \ vfs_getvfs.9 \ VFS_MOUNT.9 \ vfs_mountedfrom.9 \ VFS_QUOTACTL.9 \ VFS_ROOT.9 \ vfs_rootmountalloc.9 \ VFS_SET.9 \ VFS_STATFS.9 \ vfs_suser.9 \ VFS_SYNC.9 \ vfs_timestamp.9 \ vfs_unbusy.9 \ VFS_UNMOUNT.9 \ vfs_unmountall.9 \ VFS_VGET.9 \ vget.9 \ vgone.9 \ vhold.9 \ vinvalbuf.9 \ vm_fault_prefault.9 \ vm_map.9 \ vm_map_check_protection.9 \ vm_map_create.9 \ vm_map_delete.9 \ vm_map_entry_resize_free.9 \ vm_map_find.9 \ vm_map_findspace.9 \ vm_map_inherit.9 \ vm_map_init.9 \ vm_map_insert.9 \ vm_map_lock.9 \ vm_map_lookup.9 \ vm_map_madvise.9 \ vm_map_max.9 \ vm_map_protect.9 \ vm_map_remove.9 \ vm_map_simplify_entry.9 \ vm_map_stack.9 \ vm_map_submap.9 \ vm_map_sync.9 \ vm_map_wire.9 \ vm_page_alloc.9 \ vm_page_bits.9 \ vm_page_busy.9 \ vm_page_cache.9 \ vm_page_deactivate.9 \ vm_page_dontneed.9 \ vm_page_aflag.9 \ vm_page_free.9 \ vm_page_grab.9 \ vm_page_hold.9 \ vm_page_insert.9 \ vm_page_lookup.9 \ vm_page_rename.9 \ vm_page_wire.9 \ vm_set_page_size.9 \ vmem.9 \ vn_fullpath.9 \ vn_isdisk.9 \ vnet.9 \ vnode.9 \ VOP_ACCESS.9 \ VOP_ACLCHECK.9 \ VOP_ADVISE.9 \ VOP_ADVLOCK.9 \ VOP_ALLOCATE.9 \ VOP_ATTRIB.9 \ VOP_BWRITE.9 \ VOP_CREATE.9 \ VOP_FSYNC.9 \ VOP_GETACL.9 \ VOP_GETEXTATTR.9 \ VOP_GETPAGES.9 \ VOP_INACTIVE.9 \ VOP_IOCTL.9 \ VOP_LINK.9 \ VOP_LISTEXTATTR.9 \ VOP_LOCK.9 \ VOP_LOOKUP.9 \ VOP_OPENCLOSE.9 \ VOP_PATHCONF.9 \ VOP_PRINT.9 \ VOP_RDWR.9 \ VOP_READDIR.9 \ VOP_READLINK.9 \ VOP_REALLOCBLKS.9 \ VOP_REMOVE.9 \ VOP_RENAME.9 \ VOP_REVOKE.9 \ VOP_SETACL.9 \ VOP_SETEXTATTR.9 \ VOP_STRATEGY.9 \ VOP_VPTOCNP.9 \ VOP_VPTOFH.9 \ vref.9 \ vrefcnt.9 \ vrele.9 \ vslock.9 \ watchdog.9 \ zone.9 MLINKS= unr.9 alloc_unr.9 \ unr.9 alloc_unrl.9 \ unr.9 alloc_unr_specific.9 \ unr.9 delete_unrhdr.9 \ unr.9 free_unr.9 \ unr.9 new_unrhdr.9 MLINKS+=accept_filter.9 accept_filt_add.9 \ accept_filter.9 accept_filt_del.9 \ accept_filter.9 accept_filt_generic_mod_event.9 \ accept_filter.9 accept_filt_get.9 MLINKS+=alq.9 ALQ.9 \ alq.9 alq_close.9 \ alq.9 alq_flush.9 \ alq.9 alq_get.9 \ alq.9 alq_getn.9 \ alq.9 alq_open.9 \ alq.9 alq_open_flags.9 \ alq.9 alq_post.9 \ alq.9 alq_post_flags.9 \ alq.9 alq_write.9 \ alq.9 alq_writen.9 MLINKS+=altq.9 ALTQ.9 MLINKS+=atomic.9 atomic_add.9 \ atomic.9 atomic_clear.9 \ atomic.9 atomic_cmpset.9 \ atomic.9 atomic_fetchadd.9 \ atomic.9 atomic_load.9 \ atomic.9 atomic_readandclear.9 \ atomic.9 atomic_set.9 \ atomic.9 atomic_store.9 \ atomic.9 atomic_subtract.9 \ atomic.9 atomic_swap.9 \ atomic.9 atomic_testandset.9 MLINKS+=bitset.9 BITSET_DEFINE.9 \ bitset.9 BITSET_T_INITIALIZER.9 \ bitset.9 BITSET_FSET.9 \ bitset.9 BIT_CLR.9 \ bitset.9 BIT_COPY.9 \ bitset.9 BIT_ISSET.9 \ bitset.9 BIT_SET.9 \ bitset.9 BIT_ZERO.9 \ bitset.9 BIT_FILL.9 \ bitset.9 BIT_SETOF.9 \ bitset.9 BIT_EMPTY.9 \ bitset.9 BIT_ISFULLSET.9 \ bitset.9 BIT_FFS.9 \ bitset.9 BIT_COUNT.9 \ bitset.9 BIT_SUBSET.9 \ bitset.9 BIT_OVERLAP.9 \ bitset.9 BIT_CMP.9 \ bitset.9 BIT_OR.9 \ bitset.9 BIT_AND.9 \ bitset.9 BIT_NAND.9 \ bitset.9 BIT_CLR_ATOMIC.9 \ bitset.9 BIT_SET_ATOMIC.9 \ bitset.9 BIT_SET_ATOMIC_ACQ.9 \ bitset.9 BIT_AND_ATOMIC.9 \ bitset.9 BIT_OR_ATOMIC.9 \ bitset.9 BIT_COPY_STORE_REL.9 MLINKS+=bpf.9 bpfattach.9 \ bpf.9 bpfattach2.9 \ bpf.9 bpfdetach.9 \ bpf.9 bpf_filter.9 \ bpf.9 bpf_mtap.9 \ bpf.9 bpf_mtap2.9 \ bpf.9 bpf_tap.9 \ bpf.9 bpf_validate.9 MLINKS+=buf.9 bp.9 MLINKS+=buf_ring.9 buf_ring_alloc.9 \ buf_ring.9 buf_ring_free.9 \ buf_ring.9 buf_ring_enqueue.9 \ buf_ring.9 buf_ring_enqueue_bytes.9 \ buf_ring.9 buf_ring_dequeue_mc.9 \ buf_ring.9 buf_ring_dequeue_sc.9 \ buf_ring.9 buf_ring_count.9 \ buf_ring.9 buf_ring_empty.9 \ buf_ring.9 buf_ring_full.9 \ buf_ring.9 buf_ring_peek.9 MLINKS+=bus_activate_resource.9 bus_deactivate_resource.9 MLINKS+=bus_alloc_resource.9 bus_alloc_resource_any.9 MLINKS+=BUS_BIND_INTR.9 bus_bind_intr.9 MLINKS+=BUS_DESCRIBE_INTR.9 bus_describe_intr.9 MLINKS+=bus_dma.9 busdma.9 \ bus_dma.9 bus_dmamap_create.9 \ bus_dma.9 bus_dmamap_destroy.9 \ bus_dma.9 bus_dmamap_load.9 \ bus_dma.9 bus_dmamap_load_bio.9 \ bus_dma.9 bus_dmamap_load_ccb.9 \ bus_dma.9 bus_dmamap_load_mbuf.9 \ bus_dma.9 bus_dmamap_load_mbuf_sg.9 \ bus_dma.9 bus_dmamap_load_uio.9 \ bus_dma.9 bus_dmamap_sync.9 \ bus_dma.9 bus_dmamap_unload.9 \ bus_dma.9 bus_dmamem_alloc.9 \ bus_dma.9 bus_dmamem_free.9 \ bus_dma.9 bus_dma_tag_create.9 \ bus_dma.9 bus_dma_tag_destroy.9 MLINKS+=bus_generic_read_ivar.9 bus_generic_write_ivar.9 MLINKS+=BUS_READ_IVAR.9 BUS_WRITE_IVAR.9 MLINKS+=BUS_SETUP_INTR.9 bus_setup_intr.9 \ BUS_SETUP_INTR.9 BUS_TEARDOWN_INTR.9 \ BUS_SETUP_INTR.9 bus_teardown_intr.9 MLINKS+=bus_space.9 bus_space_alloc.9 \ bus_space.9 bus_space_barrier.9 \ bus_space.9 bus_space_copy_region_1.9 \ bus_space.9 bus_space_copy_region_2.9 \ bus_space.9 bus_space_copy_region_4.9 \ bus_space.9 bus_space_copy_region_8.9 \ bus_space.9 bus_space_copy_region_stream_1.9 \ bus_space.9 bus_space_copy_region_stream_2.9 \ bus_space.9 bus_space_copy_region_stream_4.9 \ bus_space.9 bus_space_copy_region_stream_8.9 \ bus_space.9 bus_space_free.9 \ bus_space.9 bus_space_map.9 \ bus_space.9 bus_space_read_1.9 \ bus_space.9 bus_space_read_2.9 \ bus_space.9 bus_space_read_4.9 \ bus_space.9 bus_space_read_8.9 \ bus_space.9 bus_space_read_multi_1.9 \ bus_space.9 bus_space_read_multi_2.9 \ bus_space.9 bus_space_read_multi_4.9 \ bus_space.9 bus_space_read_multi_8.9 \ bus_space.9 bus_space_read_multi_stream_1.9 \ bus_space.9 bus_space_read_multi_stream_2.9 \ bus_space.9 bus_space_read_multi_stream_4.9 \ bus_space.9 bus_space_read_multi_stream_8.9 \ bus_space.9 bus_space_read_region_1.9 \ bus_space.9 bus_space_read_region_2.9 \ bus_space.9 bus_space_read_region_4.9 \ bus_space.9 bus_space_read_region_8.9 \ bus_space.9 bus_space_read_region_stream_1.9 \ bus_space.9 bus_space_read_region_stream_2.9 \ bus_space.9 bus_space_read_region_stream_4.9 \ bus_space.9 bus_space_read_region_stream_8.9 \ bus_space.9 bus_space_read_stream_1.9 \ bus_space.9 bus_space_read_stream_2.9 \ bus_space.9 bus_space_read_stream_4.9 \ bus_space.9 bus_space_read_stream_8.9 \ bus_space.9 bus_space_set_multi_1.9 \ bus_space.9 bus_space_set_multi_2.9 \ bus_space.9 bus_space_set_multi_4.9 \ bus_space.9 bus_space_set_multi_8.9 \ bus_space.9 bus_space_set_multi_stream_1.9 \ bus_space.9 bus_space_set_multi_stream_2.9 \ bus_space.9 bus_space_set_multi_stream_4.9 \ bus_space.9 bus_space_set_multi_stream_8.9 \ bus_space.9 bus_space_set_region_1.9 \ bus_space.9 bus_space_set_region_2.9 \ bus_space.9 bus_space_set_region_4.9 \ bus_space.9 bus_space_set_region_8.9 \ bus_space.9 bus_space_set_region_stream_1.9 \ bus_space.9 bus_space_set_region_stream_2.9 \ bus_space.9 bus_space_set_region_stream_4.9 \ bus_space.9 bus_space_set_region_stream_8.9 \ bus_space.9 bus_space_subregion.9 \ bus_space.9 bus_space_unmap.9 \ bus_space.9 bus_space_write_1.9 \ bus_space.9 bus_space_write_2.9 \ bus_space.9 bus_space_write_4.9 \ bus_space.9 bus_space_write_8.9 \ bus_space.9 bus_space_write_multi_1.9 \ bus_space.9 bus_space_write_multi_2.9 \ bus_space.9 bus_space_write_multi_4.9 \ bus_space.9 bus_space_write_multi_8.9 \ bus_space.9 bus_space_write_multi_stream_1.9 \ bus_space.9 bus_space_write_multi_stream_2.9 \ bus_space.9 bus_space_write_multi_stream_4.9 \ bus_space.9 bus_space_write_multi_stream_8.9 \ bus_space.9 bus_space_write_region_1.9 \ bus_space.9 bus_space_write_region_2.9 \ bus_space.9 bus_space_write_region_4.9 \ bus_space.9 bus_space_write_region_8.9 \ bus_space.9 bus_space_write_region_stream_1.9 \ bus_space.9 bus_space_write_region_stream_2.9 \ bus_space.9 bus_space_write_region_stream_4.9 \ bus_space.9 bus_space_write_region_stream_8.9 \ bus_space.9 bus_space_write_stream_1.9 \ bus_space.9 bus_space_write_stream_2.9 \ bus_space.9 bus_space_write_stream_4.9 \ bus_space.9 bus_space_write_stream_8.9 MLINKS+=byteorder.9 be16dec.9 \ byteorder.9 be16enc.9 \ byteorder.9 be16toh.9 \ byteorder.9 be32dec.9 \ byteorder.9 be32enc.9 \ byteorder.9 be32toh.9 \ byteorder.9 be64dec.9 \ byteorder.9 be64enc.9 \ byteorder.9 be64toh.9 \ byteorder.9 bswap16.9 \ byteorder.9 bswap32.9 \ byteorder.9 bswap64.9 \ byteorder.9 htobe16.9 \ byteorder.9 htobe32.9 \ byteorder.9 htobe64.9 \ byteorder.9 htole16.9 \ byteorder.9 htole32.9 \ byteorder.9 htole64.9 \ byteorder.9 le16dec.9 \ byteorder.9 le16enc.9 \ byteorder.9 le16toh.9 \ byteorder.9 le32dec.9 \ byteorder.9 le32enc.9 \ byteorder.9 le32toh.9 \ byteorder.9 le64dec.9 \ byteorder.9 le64enc.9 \ byteorder.9 le64toh.9 MLINKS+=condvar.9 cv_broadcast.9 \ condvar.9 cv_broadcastpri.9 \ condvar.9 cv_destroy.9 \ condvar.9 cv_init.9 \ condvar.9 cv_signal.9 \ condvar.9 cv_timedwait.9 \ condvar.9 cv_timedwait_sig.9 \ condvar.9 cv_timedwait_sig_sbt.9 \ condvar.9 cv_wait.9 \ condvar.9 cv_wait_sig.9 \ condvar.9 cv_wait_unlock.9 \ condvar.9 cv_wmesg.9 MLINKS+=config_intrhook.9 config_intrhook_disestablish.9 \ config_intrhook.9 config_intrhook_establish.9 MLINKS+=contigmalloc.9 contigfree.9 MLINKS+=casuword.9 casueword.9 \ casuword.9 casueword32.9 \ casuword.9 casuword32.9 MLINKS+=copy.9 copyin.9 \ copy.9 copyin_nofault.9 \ copy.9 copyinstr.9 \ copy.9 copyout.9 \ copy.9 copyout_nofault.9 \ copy.9 copystr.9 MLINKS+=counter.9 counter_u64_alloc.9 \ counter.9 counter_u64_free.9 \ counter.9 counter_u64_add.9 \ counter.9 counter_enter.9 \ counter.9 counter_exit.9 \ counter.9 counter_u64_add_protected.9 \ counter.9 counter_u64_fetch.9 \ counter.9 counter_u64_zero.9 MLINKS+=cpuset.9 CPUSET_T_INITIALIZER.9 \ cpuset.9 CPUSET_FSET.9 \ cpuset.9 CPU_CLR.9 \ cpuset.9 CPU_COPY.9 \ cpuset.9 CPU_ISSET.9 \ cpuset.9 CPU_SET.9 \ cpuset.9 CPU_ZERO.9 \ cpuset.9 CPU_FILL.9 \ cpuset.9 CPU_SETOF.9 \ cpuset.9 CPU_EMPTY.9 \ cpuset.9 CPU_ISFULLSET.9 \ cpuset.9 CPU_FFS.9 \ cpuset.9 CPU_COUNT.9 \ cpuset.9 CPU_SUBSET.9 \ cpuset.9 CPU_OVERLAP.9 \ cpuset.9 CPU_CMP.9 \ cpuset.9 CPU_OR.9 \ cpuset.9 CPU_AND.9 \ cpuset.9 CPU_NAND.9 \ cpuset.9 CPU_CLR_ATOMIC.9 \ cpuset.9 CPU_SET_ATOMIC.9 \ cpuset.9 CPU_SET_ATOMIC_ACQ.9 \ cpuset.9 CPU_AND_ATOMIC.9 \ cpuset.9 CPU_OR_ATOMIC.9 \ cpuset.9 CPU_COPY_STORE_REL.9 MLINKS+=critical_enter.9 critical.9 \ critical_enter.9 critical_exit.9 MLINKS+=crypto.9 crypto_dispatch.9 \ crypto.9 crypto_done.9 \ crypto.9 crypto_freereq.9 \ crypto.9 crypto_freesession.9 \ crypto.9 crypto_get_driverid.9 \ crypto.9 crypto_getreq.9 \ crypto.9 crypto_kdispatch.9 \ crypto.9 crypto_kdone.9 \ crypto.9 crypto_kregister.9 \ crypto.9 crypto_newsession.9 \ crypto.9 crypto_register.9 \ crypto.9 crypto_unblock.9 \ crypto.9 crypto_unregister.9 \ crypto.9 crypto_unregister_all.9 MLINKS+=DB_COMMAND.9 DB_SHOW_ALL_COMMAND.9 \ DB_COMMAND.9 DB_SHOW_COMMAND.9 MLINKS+=dev_clone.9 drain_dev_clone_events.9 MLINKS+=devfs_set_cdevpriv.9 devfs_clear_cdevpriv.9 \ devfs_set_cdevpriv.9 devfs_get_cdevpriv.9 MLINKS+=device_add_child.9 device_add_child_ordered.9 MLINKS+=device_enable.9 device_disable.9 \ device_enable.9 device_is_enabled.9 MLINKS+=device_get_ivars.9 device_set_ivars.9 MLINKS+=device_get_name.9 device_get_nameunit.9 MLINKS+=device_get_state.9 device_busy.9 \ device_get_state.9 device_is_alive.9 \ device_get_state.9 device_is_attached.9 \ device_get_state.9 device_unbusy.9 MLINKS+=device_get_sysctl.9 device_get_sysctl_ctx.9 \ device_get_sysctl.9 device_get_sysctl_tree.9 MLINKS+=device_quiet.9 device_is_quiet.9 \ device_quiet.9 device_verbose.9 MLINKS+=device_set_desc.9 device_get_desc.9 \ device_set_desc.9 device_set_desc_copy.9 MLINKS+=device_set_flags.9 device_get_flags.9 MLINKS+=devstat.9 devicestat.9 \ devstat.9 devstat_add_entry.9 \ devstat.9 devstat_end_transaction.9 \ devstat.9 devstat_remove_entry.9 \ devstat.9 devstat_start_transaction.9 MLINKS+=disk.9 disk_alloc.9 \ disk.9 disk_create.9 \ disk.9 disk_destroy.9 \ disk.9 disk_gone.9 \ disk.9 disk_resize.9 MLINKS+=domain.9 DOMAIN_SET.9 \ domain.9 domain_add.9 \ domain.9 pfctlinput.9 \ domain.9 pfctlinput2.9 \ domain.9 pffinddomain.9 \ domain.9 pffindproto.9 \ domain.9 pffindtype.9 MLINKS+=drbr.9 drbr_free.9 \ drbr.9 drbr_enqueue.9 \ drbr.9 drbr_dequeue.9 \ drbr.9 drbr_dequeue_cond.9 \ drbr.9 drbr_flush.9 \ drbr.9 drbr_empty.9 \ drbr.9 drbr_inuse.9 \ drbr.9 drbr_stats_update.9 MLINKS+=DRIVER_MODULE.9 DRIVER_MODULE_ORDERED.9 \ DRIVER_MODULE.9 EARLY_DRIVER_MODULE.9 \ DRIVER_MODULE.9 EARLY_DRIVER_MODULE_ORDERED.9 MLINKS+=EVENTHANDLER.9 EVENTHANDLER_DECLARE.9 \ EVENTHANDLER.9 EVENTHANDLER_DEREGISTER.9 \ EVENTHANDLER.9 eventhandler_deregister.9 \ EVENTHANDLER.9 eventhandler_find_list.9 \ EVENTHANDLER.9 EVENTHANDLER_INVOKE.9 \ EVENTHANDLER.9 eventhandler_prune_list.9 \ EVENTHANDLER.9 EVENTHANDLER_REGISTER.9 \ EVENTHANDLER.9 eventhandler_register.9 MLINKS+=eventtimers.9 et_register.9 \ eventtimers.9 et_deregister.9 \ eventtimers.9 et_ban.9 \ eventtimers.9 et_find.9 \ eventtimers.9 et_free.9 \ eventtimers.9 et_init.9 \ eventtimers.9 ET_LOCK.9 \ eventtimers.9 ET_UNLOCK.9 \ eventtimers.9 et_start.9 \ eventtimers.9 et_stop.9 MLINKS+=fail.9 KFAIL_POINT_CODE.9 \ fail.9 KFAIL_POINT_ERROR.9 \ fail.9 KFAIL_POINT_GOTO.9 \ fail.9 KFAIL_POINT_RETURN.9 \ fail.9 KFAIL_POINT_RETURN_VOID.9 MLINKS+=fetch.9 fubyte.9 \ fetch.9 fuswintr.9 \ fetch.9 fuword.9 \ fetch.9 fuword16.9 \ fetch.9 fuword32.9 \ fetch.9 fuword64.9 \ fetch.9 fueword.9 \ fetch.9 fueword32.9 \ fetch.9 fueword64.9 MLINKS+=firmware.9 firmware_get.9 \ firmware.9 firmware_put.9 \ firmware.9 firmware_register.9 \ firmware.9 firmware_unregister.9 MLINKS+=fpu_kern.9 fpu_kern_alloc_ctx.9 \ fpu_kern.9 fpu_kern_free_ctx.9 \ fpu_kern.9 fpu_kern_enter.9 \ fpu_kern.9 fpu_kern_leave.9 \ fpu_kern.9 fpu_kern_thread.9 \ fpu_kern.9 is_fpu_kern_thread.9 MLINKS+=g_attach.9 g_detach.9 MLINKS+=g_bio.9 g_alloc_bio.9 \ g_bio.9 g_clone_bio.9 \ g_bio.9 g_destroy_bio.9 \ g_bio.9 g_duplicate_bio.9 \ g_bio.9 g_new_bio.9 \ g_bio.9 g_print_bio.9 MLINKS+=g_consumer.9 g_destroy_consumer.9 \ g_consumer.9 g_new_consumer.9 MLINKS+=g_data.9 g_read_data.9 \ g_data.9 g_write_data.9 MLINKS+=getenv.9 freeenv.9 \ getenv.9 getenv_int.9 \ getenv.9 getenv_long.9 \ getenv.9 getenv_string.9 \ getenv.9 getenv_quad.9 \ getenv.9 getenv_uint.9 \ getenv.9 getenv_ulong.9 \ getenv.9 setenv.9 \ getenv.9 testenv.9 \ getenv.9 unsetenv.9 MLINKS+=g_event.9 g_cancel_event.9 \ g_event.9 g_post_event.9 \ g_event.9 g_waitfor_event.9 MLINKS+=g_geom.9 g_destroy_geom.9 \ g_geom.9 g_new_geomf.9 MLINKS+=g_provider.9 g_destroy_provider.9 \ g_provider.9 g_error_provider.9 \ g_provider.9 g_new_providerf.9 MLINKS+=hash.9 hash32.9 \ hash.9 hash32_buf.9 \ hash.9 hash32_str.9 \ hash.9 hash32_stre.9 \ hash.9 hash32_strn.9 \ hash.9 hash32_strne.9 \ hash.9 jenkins_hash.9 \ hash.9 jenkins_hash32.9 MLINKS+=hashinit.9 hashdestroy.9 \ hashinit.9 hashinit_flags.9 \ hashinit.9 phashinit.9 MLINKS+=hhook.9 hhook_head_register.9 \ hhook.9 hhook_head_deregister.9 \ hhook.9 hhook_head_deregister_lookup.9 \ hhook.9 hhook_run_hooks.9 \ hhook.9 HHOOKS_RUN_IF.9 \ hhook.9 HHOOKS_RUN_LOOKUP_IF.9 MLINKS+=ieee80211.9 ieee80211_ifattach.9 \ ieee80211.9 ieee80211_ifdetach.9 MLINKS+=ieee80211_amrr.9 ieee80211_amrr_choose.9 \ ieee80211_amrr.9 ieee80211_amrr_cleanup.9 \ ieee80211_amrr.9 ieee80211_amrr_init.9 \ ieee80211_amrr.9 ieee80211_amrr_node_init.9 \ ieee80211_amrr.9 ieee80211_amrr_setinterval.9 \ ieee80211_amrr.9 ieee80211_amrr_tx_complete.9 \ ieee80211_amrr.9 ieee80211_amrr_tx_update.9 MLINKS+=ieee80211_beacon.9 ieee80211_beacon_alloc.9 \ ieee80211_beacon.9 ieee80211_beacon_notify.9 \ ieee80211_beacon.9 ieee80211_beacon_update.9 MLINKS+=ieee80211_bmiss.9 ieee80211_beacon_miss.9 MLINKS+=ieee80211_crypto.9 ieee80211_crypto_available.9 \ ieee80211_crypto.9 ieee80211_crypto_decap.9 \ ieee80211_crypto.9 ieee80211_crypto_delglobalkeys.9 \ ieee80211_crypto.9 ieee80211_crypto_delkey.9 \ ieee80211_crypto.9 ieee80211_crypto_demic.9 \ ieee80211_crypto.9 ieee80211_crypto_encap.9 \ ieee80211_crypto.9 ieee80211_crypto_enmic.9 \ ieee80211_crypto.9 ieee80211_crypto_newkey.9 \ ieee80211_crypto.9 ieee80211_crypto_register.9 \ ieee80211_crypto.9 ieee80211_crypto_reload_keys.9 \ ieee80211_crypto.9 ieee80211_crypto_setkey.9 \ ieee80211_crypto.9 ieee80211_crypto_unregister.9 \ ieee80211_crypto.9 ieee80211_key_update_begin.9 \ ieee80211_crypto.9 ieee80211_key_update_end.9 \ ieee80211_crypto.9 ieee80211_notify_michael_failure.9 \ ieee80211_crypto.9 ieee80211_notify_replay_failure.9 MLINKS+=ieee80211_input.9 ieee80211_input_all.9 MLINKS+=ieee80211_node.9 ieee80211_dump_node.9 \ ieee80211_node.9 ieee80211_dump_nodes.9 \ ieee80211_node.9 ieee80211_find_rxnode.9 \ ieee80211_node.9 ieee80211_find_rxnode_withkey.9 \ ieee80211_node.9 ieee80211_free_node.9 \ ieee80211_node.9 ieee80211_iterate_nodes.9 \ ieee80211_node.9 ieee80211_ref_node.9 \ ieee80211_node.9 ieee80211_unref_node.9 MLINKS+=ieee80211_output.9 ieee80211_process_callback.9 \ ieee80211_output.9 M_SEQNO_GET.9 \ ieee80211_output.9 M_WME_GETAC.9 MLINKS+=ieee80211_proto.9 ieee80211_new_state.9 \ ieee80211_proto.9 ieee80211_resume_all.9 \ ieee80211_proto.9 ieee80211_start_all.9 \ ieee80211_proto.9 ieee80211_stop_all.9 \ ieee80211_proto.9 ieee80211_suspend_all.9 \ ieee80211_proto.9 ieee80211_waitfor_parent.9 MLINKS+=ieee80211_radiotap.9 ieee80211_radiotap_active.9 \ ieee80211_radiotap.9 ieee80211_radiotap_active_vap.9 \ ieee80211_radiotap.9 ieee80211_radiotap_attach.9 \ ieee80211_radiotap.9 ieee80211_radiotap_tx.9 \ ieee80211_radiotap.9 radiotap.9 MLINKS+=ieee80211_regdomain.9 ieee80211_alloc_countryie.9 \ ieee80211_regdomain.9 ieee80211_init_channels.9 \ ieee80211_regdomain.9 ieee80211_sort_channels.9 MLINKS+=ieee80211_scan.9 ieee80211_add_scan.9 \ ieee80211_scan.9 ieee80211_bg_scan.9 \ ieee80211_scan.9 ieee80211_cancel_scan.9 \ ieee80211_scan.9 ieee80211_cancel_scan_any.9 \ ieee80211_scan.9 ieee80211_check_scan.9 \ ieee80211_scan.9 ieee80211_check_scan_current.9 \ ieee80211_scan.9 ieee80211_flush.9 \ ieee80211_scan.9 ieee80211_probe_curchan.9 \ ieee80211_scan.9 ieee80211_scan_assoc_fail.9 \ ieee80211_scan.9 ieee80211_scan_done.9 \ ieee80211_scan.9 ieee80211_scan_dump_channels.9 \ ieee80211_scan.9 ieee80211_scan_flush.9 \ ieee80211_scan.9 ieee80211_scan_iterate.9 \ ieee80211_scan.9 ieee80211_scan_next.9 \ ieee80211_scan.9 ieee80211_scan_timeout.9 \ ieee80211_scan.9 ieee80211_scanner_get.9 \ ieee80211_scan.9 ieee80211_scanner_register.9 \ ieee80211_scan.9 ieee80211_scanner_unregister.9 \ ieee80211_scan.9 ieee80211_scanner_unregister_all.9 \ ieee80211_scan.9 ieee80211_start_scan.9 MLINKS+=ieee80211_vap.9 ieee80211_vap_attach.9 \ ieee80211_vap.9 ieee80211_vap_detach.9 \ ieee80211_vap.9 ieee80211_vap_setup.9 MLINKS+=ifnet.9 if_addmulti.9 \ ifnet.9 if_alloc.9 \ ifnet.9 if_allmulti.9 \ ifnet.9 if_attach.9 \ ifnet.9 if_data.9 \ ifnet.9 IF_DEQUEUE.9 \ ifnet.9 if_delmulti.9 \ ifnet.9 if_detach.9 \ ifnet.9 if_down.9 \ ifnet.9 if_findmulti.9 \ ifnet.9 if_free.9 \ ifnet.9 if_free_type.9 \ ifnet.9 if_up.9 \ ifnet.9 ifa_free.9 \ ifnet.9 ifa_ifwithaddr.9 \ ifnet.9 ifa_ifwithdstaddr.9 \ ifnet.9 ifa_ifwithnet.9 \ ifnet.9 ifa_ref.9 \ ifnet.9 ifaddr.9 \ ifnet.9 ifaddr_byindex.9 \ ifnet.9 ifaof_ifpforaddr.9 \ ifnet.9 ifioctl.9 \ ifnet.9 ifpromisc.9 \ ifnet.9 ifqueue.9 \ ifnet.9 ifunit.9 \ ifnet.9 ifunit_ref.9 MLINKS+=insmntque.9 insmntque1.9 MLINKS+=ithread.9 ithread_add_handler.9 \ ithread.9 ithread_create.9 \ ithread.9 ithread_destroy.9 \ ithread.9 ithread_priority.9 \ ithread.9 ithread_remove_handler.9 \ ithread.9 ithread_schedule.9 MLINKS+=kernacc.9 useracc.9 MLINKS+=kernel_mount.9 free_mntarg.9 \ kernel_mount.9 kernel_vmount.9 \ kernel_mount.9 mount_arg.9 \ kernel_mount.9 mount_argb.9 \ kernel_mount.9 mount_argf.9 \ kernel_mount.9 mount_argsu.9 MLINKS+=khelp.9 khelp_add_hhook.9 \ khelp.9 KHELP_DECLARE_MOD.9 \ khelp.9 KHELP_DECLARE_MOD_UMA.9 \ khelp.9 khelp_destroy_osd.9 \ khelp.9 khelp_get_id.9 \ khelp.9 khelp_get_osd.9 \ khelp.9 khelp_init_osd.9 \ khelp.9 khelp_remove_hhook.9 MLINKS+=kobj.9 DEFINE_CLASS.9 \ kobj.9 kobj_class_compile.9 \ kobj.9 kobj_class_compile_static.9 \ kobj.9 kobj_class_free.9 \ kobj.9 kobj_create.9 \ kobj.9 kobj_delete.9 \ kobj.9 kobj_init.9 \ kobj.9 kobj_init_static.9 MLINKS+=kproc.9 kproc_create.9 \ kproc.9 kproc_exit.9 \ kproc.9 kproc_kthread_add.9 \ kproc.9 kproc_resume.9 \ kproc.9 kproc_shutdown.9 \ kproc.9 kproc_start.9 \ kproc.9 kproc_suspend.9 \ kproc.9 kproc_suspend_check.9 \ kproc.9 kthread_create.9 MLINKS+=kqueue.9 knlist_add.9 \ kqueue.9 knlist_clear.9 \ kqueue.9 knlist_delete.9 \ kqueue.9 knlist_destroy.9 \ kqueue.9 knlist_empty.9 \ kqueue.9 knlist_init.9 \ kqueue.9 knlist_init_mtx.9 \ kqueue.9 knlist_init_rw_reader.9 \ kqueue.9 knlist_remove.9 \ kqueue.9 knlist_remove_inevent.9 \ kqueue.9 knote_fdclose.9 \ kqueue.9 KNOTE_LOCKED.9 \ kqueue.9 KNOTE_UNLOCKED.9 \ kqueue.9 kqfd_register.9 \ kqueue.9 kqueue_add_filteropts.9 \ kqueue.9 kqueue_del_filteropts.9 MLINKS+=kthread.9 kthread_add.9 \ kthread.9 kthread_exit.9 \ kthread.9 kthread_resume.9 \ kthread.9 kthread_shutdown.9 \ kthread.9 kthread_start.9 \ kthread.9 kthread_suspend.9 \ kthread.9 kthread_suspend_check.9 MLINKS+=ktr.9 CTR0.9 \ ktr.9 CTR1.9 \ ktr.9 CTR2.9 \ ktr.9 CTR3.9 \ ktr.9 CTR4.9 \ ktr.9 CTR5.9 \ ktr.9 CTR6.9 MLINKS+=lock.9 lockdestroy.9 \ lock.9 lockinit.9 \ lock.9 lockmgr.9 \ lock.9 lockmgr_args.9 \ lock.9 lockmgr_args_rw.9 \ lock.9 lockmgr_assert.9 \ lock.9 lockmgr_disown.9 \ lock.9 lockmgr_printinfo.9 \ lock.9 lockmgr_recursed.9 \ lock.9 lockmgr_rw.9 \ lock.9 lockmgr_waiters.9 \ lock.9 lockstatus.9 MLINKS+=LOCK_PROFILING.9 MUTEX_PROFILING.9 MLINKS+=make_dev.9 destroy_dev.9 \ make_dev.9 destroy_dev_drain.9 \ make_dev.9 destroy_dev_sched.9 \ make_dev.9 destroy_dev_sched_cb.9 \ make_dev.9 dev_depends.9 \ make_dev.9 make_dev_alias.9 \ make_dev.9 make_dev_alias_p.9 \ make_dev.9 make_dev_cred.9 \ make_dev.9 make_dev_credf.9 \ make_dev.9 make_dev_p.9 \ make_dev.9 make_dev_s.9 MLINKS+=malloc.9 free.9 \ malloc.9 MALLOC_DECLARE.9 \ malloc.9 MALLOC_DEFINE.9 \ malloc.9 realloc.9 \ malloc.9 reallocf.9 MLINKS+=mbchain.9 mb_detach.9 \ mbchain.9 mb_done.9 \ mbchain.9 mb_fixhdr.9 \ mbchain.9 mb_init.9 \ mbchain.9 mb_initm.9 \ mbchain.9 mb_put_int64be.9 \ mbchain.9 mb_put_int64le.9 \ mbchain.9 mb_put_mbuf.9 \ mbchain.9 mb_put_mem.9 \ mbchain.9 mb_put_uint16be.9 \ mbchain.9 mb_put_uint16le.9 \ mbchain.9 mb_put_uint32be.9 \ mbchain.9 mb_put_uint32le.9 \ mbchain.9 mb_put_uint8.9 \ mbchain.9 mb_put_uio.9 \ mbchain.9 mb_reserve.9 MLINKS+=mbpool.9 mbp_alloc.9 \ mbpool.9 mbp_card_free.9 \ mbpool.9 mbp_count.9 \ mbpool.9 mbp_create.9 \ mbpool.9 mbp_destroy.9 \ mbpool.9 mbp_ext_free.9 \ mbpool.9 mbp_free.9 \ mbpool.9 mbp_get.9 \ mbpool.9 mbp_get_keep.9 \ mbpool.9 mbp_sync.9 MLINKS+=\ mbuf.9 m_adj.9 \ mbuf.9 m_align.9 \ mbuf.9 M_ALIGN.9 \ mbuf.9 m_append.9 \ mbuf.9 m_apply.9 \ mbuf.9 m_cat.9 \ mbuf.9 MCHTYPE.9 \ mbuf.9 MCLGET.9 \ mbuf.9 m_collapse.9 \ mbuf.9 m_copyback.9 \ mbuf.9 m_copydata.9 \ mbuf.9 m_copym.9 \ mbuf.9 m_copypacket.9 \ mbuf.9 m_copyup.9 \ mbuf.9 m_defrag.9 \ mbuf.9 m_devget.9 \ mbuf.9 m_dup.9 \ mbuf.9 m_dup_pkthdr.9 \ mbuf.9 MEXTADD.9 \ mbuf.9 m_fixhdr.9 \ mbuf.9 m_free.9 \ mbuf.9 m_freem.9 \ mbuf.9 MGET.9 \ mbuf.9 m_get.9 \ mbuf.9 m_get2.9 \ mbuf.9 m_getjcl.9 \ mbuf.9 m_getcl.9 \ mbuf.9 m_getclr.9 \ mbuf.9 MGETHDR.9 \ mbuf.9 m_gethdr.9 \ mbuf.9 m_getm.9 \ mbuf.9 m_getptr.9 \ mbuf.9 MH_ALIGN.9 \ mbuf.9 M_LEADINGSPACE.9 \ mbuf.9 m_length.9 \ mbuf.9 M_MOVE_PKTHDR.9 \ mbuf.9 m_move_pkthdr.9 \ mbuf.9 M_PREPEND.9 \ mbuf.9 m_prepend.9 \ mbuf.9 m_pulldown.9 \ mbuf.9 m_pullup.9 \ mbuf.9 m_split.9 \ mbuf.9 mtod.9 \ mbuf.9 M_TRAILINGSPACE.9 \ mbuf.9 m_unshare.9 \ mbuf.9 M_WRITABLE.9 MLINKS+=\ mbuf_tags.9 m_tag_alloc.9 \ mbuf_tags.9 m_tag_copy.9 \ mbuf_tags.9 m_tag_copy_chain.9 \ mbuf_tags.9 m_tag_delete.9 \ mbuf_tags.9 m_tag_delete_chain.9 \ mbuf_tags.9 m_tag_delete_nonpersistent.9 \ mbuf_tags.9 m_tag_find.9 \ mbuf_tags.9 m_tag_first.9 \ mbuf_tags.9 m_tag_free.9 \ mbuf_tags.9 m_tag_get.9 \ mbuf_tags.9 m_tag_init.9 \ mbuf_tags.9 m_tag_locate.9 \ mbuf_tags.9 m_tag_next.9 \ mbuf_tags.9 m_tag_prepend.9 \ mbuf_tags.9 m_tag_unlink.9 MLINKS+=MD5.9 MD5Init.9 \ MD5.9 MD5Transform.9 MLINKS+=mdchain.9 md_append_record.9 \ mdchain.9 md_done.9 \ mdchain.9 md_get_int64.9 \ mdchain.9 md_get_int64be.9 \ mdchain.9 md_get_int64le.9 \ mdchain.9 md_get_mbuf.9 \ mdchain.9 md_get_mem.9 \ mdchain.9 md_get_uint16.9 \ mdchain.9 md_get_uint16be.9 \ mdchain.9 md_get_uint16le.9 \ mdchain.9 md_get_uint32.9 \ mdchain.9 md_get_uint32be.9 \ mdchain.9 md_get_uint32le.9 \ mdchain.9 md_get_uint8.9 \ mdchain.9 md_get_uio.9 \ mdchain.9 md_initm.9 \ mdchain.9 md_next_record.9 MLINKS+=microtime.9 bintime.9 \ microtime.9 getbintime.9 \ microtime.9 getmicrotime.9 \ microtime.9 getnanotime.9 \ microtime.9 nanotime.9 MLINKS+=microuptime.9 binuptime.9 \ microuptime.9 getbinuptime.9 \ microuptime.9 getmicrouptime.9 \ microuptime.9 getnanouptime.9 \ microuptime.9 getsbinuptime.9 \ microuptime.9 nanouptime.9 \ microuptime.9 sbinuptime.9 MLINKS+=mi_switch.9 cpu_switch.9 \ mi_switch.9 cpu_throw.9 MLINKS+=mod_cc.9 CCV.9 \ mod_cc.9 DECLARE_CC_MODULE.9 MLINKS+=mtx_pool.9 mtx_pool_alloc.9 \ mtx_pool.9 mtx_pool_create.9 \ mtx_pool.9 mtx_pool_destroy.9 \ mtx_pool.9 mtx_pool_find.9 \ mtx_pool.9 mtx_pool_lock.9 \ mtx_pool.9 mtx_pool_lock_spin.9 \ mtx_pool.9 mtx_pool_unlock.9 \ mtx_pool.9 mtx_pool_unlock_spin.9 MLINKS+=mutex.9 mtx_assert.9 \ mutex.9 mtx_destroy.9 \ mutex.9 mtx_init.9 \ mutex.9 mtx_initialized.9 \ mutex.9 mtx_lock.9 \ mutex.9 mtx_lock_flags.9 \ mutex.9 mtx_lock_spin.9 \ mutex.9 mtx_lock_spin_flags.9 \ mutex.9 mtx_owned.9 \ mutex.9 mtx_recursed.9 \ mutex.9 mtx_sleep.9 \ mutex.9 MTX_SYSINIT.9 \ mutex.9 mtx_trylock.9 \ mutex.9 mtx_trylock_flags.9 \ mutex.9 mtx_unlock.9 \ mutex.9 mtx_unlock_flags.9 \ mutex.9 mtx_unlock_spin.9 \ mutex.9 mtx_unlock_spin_flags.9 MLINKS+=namei.9 NDFREE.9 \ namei.9 NDINIT.9 MLINKS+=netisr.9 netisr_clearqdrops.9 \ netisr.9 netisr_default_flow2cpu.9 \ netisr.9 netisr_dispatch.9 \ netisr.9 netisr_dispatch_src.9 \ netisr.9 netisr_get_cpucount.9 \ netisr.9 netisr_get_cpuid.9 \ netisr.9 netisr_getqdrops.9 \ netisr.9 netisr_getqlimit.9 \ netisr.9 netisr_queue.9 \ netisr.9 netisr_queue_src.9 \ netisr.9 netisr_register.9 \ netisr.9 netisr_setqlimit.9 \ netisr.9 netisr_unregister.9 MLINKS+=nv.9 libnv.9 \ nv.9 nvlist.9 \ nv.9 nvlist_add_binary.9 \ nv.9 nvlist_add_bool.9 \ nv.9 nvlist_add_descriptor.9 \ nv.9 nvlist_add_null.9 \ nv.9 nvlist_add_number.9 \ nv.9 nvlist_add_nvlist.9 \ nv.9 nvlist_add_string.9 \ nv.9 nvlist_add_stringf.9 \ nv.9 nvlist_add_stringv.9 \ nv.9 nvlist_clone.9 \ nv.9 nvlist_create.9 \ nv.9 nvlist_destroy.9 \ nv.9 nvlist_dump.9 \ nv.9 nvlist_empty.9 \ nv.9 nvlist_error.9 \ nv.9 nvlist_exists.9 \ nv.9 nvlist_exists_binary.9 \ nv.9 nvlist_exists_bool.9 \ nv.9 nvlist_exists_descriptor.9 \ nv.9 nvlist_exists_null.9 \ nv.9 nvlist_exists_number.9 \ nv.9 nvlist_exists_nvlist.9 \ nv.9 nvlist_exists_string.9 \ nv.9 nvlist_exists_type.9 \ nv.9 nvlist_fdump.9 \ nv.9 nvlist_flags.9 \ nv.9 nvlist_free.9 \ nv.9 nvlist_free_binary.9 \ nv.9 nvlist_free_bool.9 \ nv.9 nvlist_free_descriptor.9 \ nv.9 nvlist_free_null.9 \ nv.9 nvlist_free_number.9 \ nv.9 nvlist_free_nvlist.9 \ nv.9 nvlist_free_string.9 \ nv.9 nvlist_free_type.9 \ nv.9 nvlist_get_binary.9 \ nv.9 nvlist_get_bool.9 \ nv.9 nvlist_get_descriptor.9 \ nv.9 nvlist_get_number.9 \ nv.9 nvlist_get_nvlist.9 \ nv.9 nvlist_get_parent.9 \ nv.9 nvlist_get_string.9 \ nv.9 nvlist_move_binary.9 \ nv.9 nvlist_move_descriptor.9 \ nv.9 nvlist_move_nvlist.9 \ nv.9 nvlist_move_string.9 \ nv.9 nvlist_next.9 \ nv.9 nvlist_pack.9 \ nv.9 nvlist_recv.9 \ nv.9 nvlist_send.9 \ nv.9 nvlist_set_error.9 \ nv.9 nvlist_size.9 \ nv.9 nvlist_take_binary.9 \ nv.9 nvlist_take_bool.9 \ nv.9 nvlist_take_descriptor.9 \ nv.9 nvlist_take_number.9 \ nv.9 nvlist_take_nvlist.9 \ nv.9 nvlist_take_string.9 \ nv.9 nvlist_unpack.9 \ nv.9 nvlist_xfer.9 MLINKS+=osd.9 osd_call.9 \ osd.9 osd_del.9 \ osd.9 osd_deregister.9 \ osd.9 osd_exit.9 \ osd.9 osd_get.9 \ osd.9 osd_register.9 \ osd.9 osd_set.9 MLINKS+=panic.9 vpanic.9 MLINKS+=pbuf.9 getpbuf.9 \ pbuf.9 relpbuf.9 \ pbuf.9 trypbuf.9 MLINKS+=PCBGROUP.9 in_pcbgroup_byhash.9 \ PCBGROUP.9 in_pcbgroup_byinpcb.9 \ PCBGROUP.9 in_pcbgroup_destroy.9 \ PCBGROUP.9 in_pcbgroup_enabled.9 \ PCBGROUP.9 in_pcbgroup_init.9 \ PCBGROUP.9 in_pcbgroup_remove.9 \ PCBGROUP.9 in_pcbgroup_update.9 \ PCBGROUP.9 in_pcbgroup_update_mbuf.9 \ PCBGROUP.9 in6_pcbgroup_byhash.9 MLINKS+=pci.9 pci_alloc_msi.9 \ pci.9 pci_alloc_msix.9 \ pci.9 pci_disable_busmaster.9 \ pci.9 pci_disable_io.9 \ pci.9 pci_enable_busmaster.9 \ pci.9 pci_enable_io.9 \ pci.9 pci_find_bsf.9 \ pci.9 pci_find_cap.9 \ pci.9 pci_find_dbsf.9 \ pci.9 pci_find_device.9 \ pci.9 pci_find_extcap.9 \ pci.9 pci_find_htcap.9 \ pci.9 pci_find_pcie_root_port.9 \ pci.9 pci_get_max_read_req.9 \ pci.9 pci_get_powerstate.9 \ pci.9 pci_get_vpd_ident.9 \ pci.9 pci_get_vpd_readonly.9 \ pci.9 pci_iov_attach.9 \ pci.9 pci_iov_detach.9 \ pci.9 pci_msi_count.9 \ pci.9 pci_msix_count.9 \ pci.9 pci_msix_pba_bar.9 \ pci.9 pci_msix_table_bar.9 \ pci.9 pci_pending_msix.9 \ pci.9 pci_read_config.9 \ pci.9 pci_release_msi.9 \ pci.9 pci_remap_msix.9 \ pci.9 pci_restore_state.9 \ pci.9 pci_save_state.9 \ pci.9 pci_set_powerstate.9 \ pci.9 pci_set_max_read_req.9 \ pci.9 pci_write_config.9 \ pci.9 pcie_adjust_config.9 \ pci.9 pcie_read_config.9 \ pci.9 pcie_write_config.9 MLINKS+=pci_iov_schema.9 pci_iov_schema_alloc_node.9 \ pci_iov_schema.9 pci_iov_schema_add_bool.9 \ pci_iov_schema.9 pci_iov_schema_add_string.9 \ pci_iov_schema.9 pci_iov_schema_add_uint8.9 \ pci_iov_schema.9 pci_iov_schema_add_uint16.9 \ pci_iov_schema.9 pci_iov_schema_add_uint32.9 \ pci_iov_schema.9 pci_iov_schema_add_uint64.9 \ pci_iov_schema.9 pci_iov_schema_add_unicast_mac.9 MLINKS+=pfil.9 pfil_add_hook.9 \ pfil.9 pfil_head_register.9 \ pfil.9 pfil_head_unregister.9 \ pfil.9 pfil_hook_get.9 \ pfil.9 pfil_remove_hook.9 \ pfil.9 pfil_rlock.9 \ pfil.9 pfil_run_hooks.9 \ pfil.9 pfil_runlock.9 \ pfil.9 pfil_wlock.9 \ pfil.9 pfil_wunlock.9 MLINKS+=pfind.9 zpfind.9 MLINKS+=PHOLD.9 PRELE.9 \ PHOLD.9 _PHOLD.9 \ PHOLD.9 _PRELE.9 \ PHOLD.9 PROC_ASSERT_HELD.9 \ PHOLD.9 PROC_ASSERT_NOT_HELD.9 MLINKS+=pmap_copy.9 pmap_copy_page.9 MLINKS+=pmap_extract.9 pmap_extract_and_hold.9 MLINKS+=pmap_init.9 pmap_init2.9 MLINKS+=pmap_is_modified.9 pmap_ts_referenced.9 MLINKS+=pmap_pinit.9 pmap_pinit0.9 \ pmap_pinit.9 pmap_pinit2.9 MLINKS+=pmap_qenter.9 pmap_qremove.9 MLINKS+=pmap_quick_enter_page.9 pmap_quick_remove_page.9 MLINKS+=pmap_remove.9 pmap_remove_all.9 \ pmap_remove.9 pmap_remove_pages.9 MLINKS+=pmap_resident_count.9 pmap_wired_count.9 MLINKS+=pmap_zero_page.9 pmap_zero_area.9 \ pmap_zero_page.9 pmap_zero_idle.9 MLINKS+=printf.9 log.9 \ printf.9 tprintf.9 \ printf.9 uprintf.9 MLINKS+=priv.9 priv_check.9 \ priv.9 priv_check_cred.9 MLINKS+=proc_rwmem.9 proc_readmem.9 \ proc_rwmem.9 proc_writemem.9 MLINKS+=psignal.9 gsignal.9 \ psignal.9 pgsignal.9 \ psignal.9 tdsignal.9 MLINKS+=random.9 arc4rand.9 \ random.9 arc4random.9 \ random.9 read_random.9 \ random.9 read_random_uio.9 \ random.9 srandom.9 MLINKS+=refcount.9 refcount_acquire.9 \ refcount.9 refcount_init.9 \ refcount.9 refcount_release.9 MLINKS+=resource_int_value.9 resource_long_value.9 \ resource_int_value.9 resource_string_value.9 MLINKS+=rman.9 rman_activate_resource.9 \ rman.9 rman_adjust_resource.9 \ rman.9 rman_await_resource.9 \ rman.9 rman_deactivate_resource.9 \ rman.9 rman_fini.9 \ rman.9 rman_first_free_region.9 \ rman.9 rman_get_bushandle.9 \ rman.9 rman_get_bustag.9 \ rman.9 rman_get_device.9 \ rman.9 rman_get_end.9 \ rman.9 rman_get_flags.9 \ rman.9 rman_get_rid.9 \ rman.9 rman_get_size.9 \ rman.9 rman_get_start.9 \ rman.9 rman_get_virtual.9 \ rman.9 rman_init.9 \ rman.9 rman_init_from_resource.9 \ rman.9 rman_is_region_manager.9 \ rman.9 rman_last_free_region.9 \ rman.9 rman_make_alignment_flags.9 \ rman.9 rman_manage_region.9 \ rman.9 rman_release_resource.9 \ rman.9 rman_reserve_resource.9 \ rman.9 rman_reserve_resource_bound.9 \ rman.9 rman_set_bushandle.9 \ rman.9 rman_set_bustag.9 \ rman.9 rman_set_rid.9 \ rman.9 rman_set_virtual.9 MLINKS+=rmlock.9 rm_assert.9 \ rmlock.9 rm_destroy.9 \ rmlock.9 rm_init.9 \ rmlock.9 rm_init_flags.9 \ rmlock.9 rm_rlock.9 \ rmlock.9 rm_runlock.9 \ rmlock.9 rm_sleep.9 \ rmlock.9 RM_SYSINIT.9 \ rmlock.9 rm_try_rlock.9 \ rmlock.9 rm_wlock.9 \ rmlock.9 rm_wowned.9 \ rmlock.9 rm_wunlock.9 MLINKS+=rtalloc.9 rtalloc1.9 \ rtalloc.9 rtalloc_ign.9 \ rtalloc.9 RT_ADDREF.9 \ rtalloc.9 RT_LOCK.9 \ rtalloc.9 RT_REMREF.9 \ rtalloc.9 RT_RTFREE.9 \ rtalloc.9 RT_UNLOCK.9 \ rtalloc.9 RTFREE_LOCKED.9 \ rtalloc.9 RTFREE.9 \ rtalloc.9 rtfree.9 \ rtalloc.9 rtalloc1_fib.9 \ rtalloc.9 rtalloc_ign_fib.9 \ rtalloc.9 rtalloc_fib.9 MLINKS+=runqueue.9 choosethread.9 \ runqueue.9 procrunnable.9 \ runqueue.9 remrunqueue.9 \ runqueue.9 setrunqueue.9 MLINKS+=rwlock.9 rw_assert.9 \ rwlock.9 rw_destroy.9 \ rwlock.9 rw_downgrade.9 \ rwlock.9 rw_init.9 \ rwlock.9 rw_init_flags.9 \ rwlock.9 rw_initialized.9 \ rwlock.9 rw_rlock.9 \ rwlock.9 rw_runlock.9 \ rwlock.9 rw_unlock.9 \ rwlock.9 rw_sleep.9 \ rwlock.9 RW_SYSINIT.9 \ rwlock.9 rw_try_rlock.9 \ rwlock.9 rw_try_upgrade.9 \ rwlock.9 rw_try_wlock.9 \ rwlock.9 rw_wlock.9 \ rwlock.9 rw_wowned.9 \ rwlock.9 rw_wunlock.9 MLINKS+=sbuf.9 sbuf_bcat.9 \ sbuf.9 sbuf_bcopyin.9 \ sbuf.9 sbuf_bcpy.9 \ sbuf.9 sbuf_cat.9 \ sbuf.9 sbuf_clear.9 \ sbuf.9 sbuf_copyin.9 \ sbuf.9 sbuf_cpy.9 \ sbuf.9 sbuf_data.9 \ sbuf.9 sbuf_delete.9 \ sbuf.9 sbuf_done.9 \ sbuf.9 sbuf_error.9 \ sbuf.9 sbuf_finish.9 \ sbuf.9 sbuf_len.9 \ sbuf.9 sbuf_new.9 \ sbuf.9 sbuf_new_auto.9 \ sbuf.9 sbuf_new_for_sysctl.9 \ sbuf.9 sbuf_printf.9 \ sbuf.9 sbuf_putc.9 \ sbuf.9 sbuf_set_drain.9 \ sbuf.9 sbuf_setpos.9 \ sbuf.9 sbuf_start_section.9 \ sbuf.9 sbuf_end_section.9 \ sbuf.9 sbuf_trim.9 \ sbuf.9 sbuf_vprintf.9 MLINKS+=scheduler.9 curpriority_cmp.9 \ scheduler.9 maybe_resched.9 \ scheduler.9 propagate_priority.9 \ scheduler.9 resetpriority.9 \ scheduler.9 roundrobin.9 \ scheduler.9 roundrobin_interval.9 \ scheduler.9 schedclock.9 \ scheduler.9 schedcpu.9 \ scheduler.9 sched_setup.9 \ scheduler.9 setrunnable.9 \ scheduler.9 updatepri.9 MLINKS+=SDT.9 SDT_PROVIDER_DECLARE.9 \ SDT.9 SDT_PROVIDER_DEFINE.9 \ SDT.9 SDT_PROBE_DECLARE.9 \ SDT.9 SDT_PROBE_DEFINE.9 \ SDT.9 SDT_PROBE.9 MLINKS+=securelevel_gt.9 securelevel_ge.9 MLINKS+=selrecord.9 seldrain.9 \ selrecord.9 selwakeup.9 MLINKS+=sema.9 sema_destroy.9 \ sema.9 sema_init.9 \ sema.9 sema_post.9 \ sema.9 sema_timedwait.9 \ sema.9 sema_trywait.9 \ sema.9 sema_value.9 \ sema.9 sema_wait.9 MLINKS+=sf_buf.9 sf_buf_alloc.9 \ sf_buf.9 sf_buf_free.9 \ sf_buf.9 sf_buf_kva.9 \ sf_buf.9 sf_buf_page.9 MLINKS+=sglist.9 sglist_alloc.9 \ sglist.9 sglist_append.9 \ sglist.9 sglist_append_bio.9 \ sglist.9 sglist_append_mbuf.9 \ sglist.9 sglist_append_phys.9 \ sglist.9 sglist_append_uio.9 \ sglist.9 sglist_append_user.9 \ sglist.9 sglist_build.9 \ sglist.9 sglist_clone.9 \ sglist.9 sglist_consume_uio.9 \ sglist.9 sglist_count.9 \ sglist.9 sglist_free.9 \ sglist.9 sglist_hold.9 \ sglist.9 sglist_init.9 \ sglist.9 sglist_join.9 \ sglist.9 sglist_length.9 \ sglist.9 sglist_reset.9 \ sglist.9 sglist_slice.9 \ sglist.9 sglist_split.9 MLINKS+=shm_map.9 shm_unmap.9 MLINKS+=signal.9 cursig.9 \ signal.9 execsigs.9 \ signal.9 issignal.9 \ signal.9 killproc.9 \ signal.9 pgsigio.9 \ signal.9 postsig.9 \ signal.9 SETSETNEQ.9 \ signal.9 SETSETOR.9 \ signal.9 SIGADDSET.9 \ signal.9 SIG_CONTSIGMASK.9 \ signal.9 SIGDELSET.9 \ signal.9 SIGEMPTYSET.9 \ signal.9 sigexit.9 \ signal.9 SIGFILLSET.9 \ signal.9 siginit.9 \ signal.9 SIGISEMPTY.9 \ signal.9 SIGISMEMBER.9 \ signal.9 SIGNOTEMPTY.9 \ signal.9 signotify.9 \ signal.9 SIGPENDING.9 \ signal.9 SIGSETAND.9 \ signal.9 SIGSETCANTMASK.9 \ signal.9 SIGSETEQ.9 \ signal.9 SIGSETNAND.9 \ signal.9 SIG_STOPSIGMASK.9 \ signal.9 trapsignal.9 MLINKS+=sleep.9 msleep.9 \ sleep.9 msleep_sbt.9 \ sleep.9 msleep_spin.9 \ sleep.9 msleep_spin_sbt.9 \ sleep.9 pause.9 \ sleep.9 pause_sbt.9 \ sleep.9 tsleep.9 \ sleep.9 tsleep_sbt.9 \ sleep.9 wakeup.9 \ sleep.9 wakeup_one.9 MLINKS+=sleepqueue.9 init_sleepqueues.9 \ sleepqueue.9 sleepq_abort.9 \ sleepqueue.9 sleepq_add.9 \ sleepqueue.9 sleepq_alloc.9 \ sleepqueue.9 sleepq_broadcast.9 \ sleepqueue.9 sleepq_free.9 \ sleepqueue.9 sleepq_lookup.9 \ sleepqueue.9 sleepq_lock.9 \ sleepqueue.9 sleepq_release.9 \ sleepqueue.9 sleepq_remove.9 \ sleepqueue.9 sleepq_set_timeout.9 \ sleepqueue.9 sleepq_set_timeout_sbt.9 \ sleepqueue.9 sleepq_signal.9 \ sleepqueue.9 sleepq_sleepcnt.9 \ sleepqueue.9 sleepq_timedwait.9 \ sleepqueue.9 sleepq_timedwait_sig.9 \ sleepqueue.9 sleepq_type.9 \ sleepqueue.9 sleepq_wait.9 \ sleepqueue.9 sleepq_wait_sig.9 MLINKS+=socket.9 soabort.9 \ socket.9 soaccept.9 \ socket.9 sobind.9 \ socket.9 socheckuid.9 \ socket.9 soclose.9 \ socket.9 soconnect.9 \ socket.9 socreate.9 \ socket.9 sodisconnect.9 \ socket.9 sodupsockaddr.9 \ socket.9 sofree.9 \ socket.9 sogetopt.9 \ socket.9 sohasoutofband.9 \ socket.9 solisten.9 \ socket.9 solisten_proto.9 \ socket.9 solisten_proto_check.9 \ socket.9 sonewconn.9 \ socket.9 sooptcopyin.9 \ socket.9 sooptcopyout.9 \ socket.9 sopoll.9 \ socket.9 sopoll_generic.9 \ socket.9 soreceive.9 \ socket.9 soreceive_dgram.9 \ socket.9 soreceive_generic.9 \ socket.9 soreceive_stream.9 \ socket.9 soreserve.9 \ socket.9 sorflush.9 \ socket.9 sosend.9 \ socket.9 sosend_dgram.9 \ socket.9 sosend_generic.9 \ socket.9 sosetopt.9 \ socket.9 soshutdown.9 \ socket.9 sotoxsocket.9 \ socket.9 soupcall_clear.9 \ socket.9 soupcall_set.9 \ socket.9 sowakeup.9 MLINKS+=stack.9 stack_copy.9 \ stack.9 stack_create.9 \ stack.9 stack_destroy.9 \ stack.9 stack_print.9 \ stack.9 stack_print_ddb.9 \ stack.9 stack_print_short.9 \ stack.9 stack_print_short_ddb.9 \ stack.9 stack_put.9 \ stack.9 stack_save.9 \ stack.9 stack_sbuf_print.9 \ stack.9 stack_sbuf_print_ddb.9 \ stack.9 stack_zero.9 MLINKS+=store.9 subyte.9 \ store.9 suswintr.9 \ store.9 suword.9 \ store.9 suword16.9 \ store.9 suword32.9 \ store.9 suword64.9 MLINKS+=swi.9 swi_add.9 \ swi.9 swi_remove.9 \ swi.9 swi_sched.9 MLINKS+=sx.9 sx_assert.9 \ sx.9 sx_destroy.9 \ sx.9 sx_downgrade.9 \ sx.9 sx_init.9 \ sx.9 sx_init_flags.9 \ sx.9 sx_sleep.9 \ sx.9 sx_slock.9 \ sx.9 sx_slock_sig.9 \ sx.9 sx_sunlock.9 \ sx.9 SX_SYSINIT.9 \ sx.9 sx_try_slock.9 \ sx.9 sx_try_upgrade.9 \ sx.9 sx_try_xlock.9 \ sx.9 sx_unlock.9 \ sx.9 sx_xholder.9 \ sx.9 sx_xlock.9 \ sx.9 sx_xlock_sig.9 \ sx.9 sx_xlocked.9 \ sx.9 sx_xunlock.9 MLINKS+=sysctl.9 SYSCTL_DECL.9 \ sysctl.9 SYSCTL_ADD_INT.9 \ sysctl.9 SYSCTL_ADD_LONG.9 \ sysctl.9 SYSCTL_ADD_NODE.9 \ sysctl.9 SYSCTL_ADD_OPAQUE.9 \ sysctl.9 SYSCTL_ADD_PROC.9 \ sysctl.9 SYSCTL_ADD_QUAD.9 \ sysctl.9 SYSCTL_ADD_ROOT_NODE.9 \ sysctl.9 SYSCTL_ADD_S8.9 \ sysctl.9 SYSCTL_ADD_S16.9 \ sysctl.9 SYSCTL_ADD_S32.9 \ sysctl.9 SYSCTL_ADD_S64.9 \ sysctl.9 SYSCTL_ADD_STRING.9 \ sysctl.9 SYSCTL_ADD_STRUCT.9 \ sysctl.9 SYSCTL_ADD_U8.9 \ sysctl.9 SYSCTL_ADD_U16.9 \ sysctl.9 SYSCTL_ADD_U32.9 \ sysctl.9 SYSCTL_ADD_U64.9 \ sysctl.9 SYSCTL_ADD_UAUTO.9 \ sysctl.9 SYSCTL_ADD_UINT.9 \ sysctl.9 SYSCTL_ADD_ULONG.9 \ sysctl.9 SYSCTL_ADD_UQUAD.9 \ sysctl.9 SYSCTL_CHILDREN.9 \ sysctl.9 SYSCTL_STATIC_CHILDREN.9 \ sysctl.9 SYSCTL_NODE_CHILDREN.9 \ sysctl.9 SYSCTL_PARENT.9 \ sysctl.9 SYSCTL_INT.9 \ sysctl.9 SYSCTL_LONG.9 \ sysctl.9 SYSCTL_NODE.9 \ sysctl.9 SYSCTL_OPAQUE.9 \ sysctl.9 SYSCTL_PROC.9 \ sysctl.9 SYSCTL_QUAD.9 \ sysctl.9 SYSCTL_ROOT_NODE.9 \ sysctl.9 SYSCTL_S8.9 \ sysctl.9 SYSCTL_S16.9 \ sysctl.9 SYSCTL_S32.9 \ sysctl.9 SYSCTL_S64.9 \ sysctl.9 SYSCTL_STRING.9 \ sysctl.9 SYSCTL_STRUCT.9 \ sysctl.9 SYSCTL_U8.9 \ sysctl.9 SYSCTL_U16.9 \ sysctl.9 SYSCTL_U32.9 \ sysctl.9 SYSCTL_U64.9 \ sysctl.9 SYSCTL_UINT.9 \ sysctl.9 SYSCTL_ULONG.9 \ sysctl.9 SYSCTL_UQUAD.9 MLINKS+=sysctl_add_oid.9 sysctl_move_oid.9 \ sysctl_add_oid.9 sysctl_remove_oid.9 \ sysctl_add_oid.9 sysctl_remove_name.9 MLINKS+=sysctl_ctx_init.9 sysctl_ctx_entry_add.9 \ sysctl_ctx_init.9 sysctl_ctx_entry_del.9 \ sysctl_ctx_init.9 sysctl_ctx_entry_find.9 \ sysctl_ctx_init.9 sysctl_ctx_free.9 MLINKS+=SYSINIT.9 SYSUNINIT.9 MLINKS+=taskqueue.9 TASK_INIT.9 \ taskqueue.9 TASK_INITIALIZER.9 \ taskqueue.9 taskqueue_block.9 \ taskqueue.9 taskqueue_cancel.9 \ taskqueue.9 taskqueue_cancel_timeout.9 \ taskqueue.9 taskqueue_create.9 \ taskqueue.9 taskqueue_create_fast.9 \ taskqueue.9 TASKQUEUE_DECLARE.9 \ taskqueue.9 TASKQUEUE_DEFINE.9 \ taskqueue.9 TASKQUEUE_DEFINE_THREAD.9 \ taskqueue.9 taskqueue_drain.9 \ taskqueue.9 taskqueue_drain_all.9 \ taskqueue.9 taskqueue_drain_timeout.9 \ taskqueue.9 taskqueue_enqueue.9 \ - taskqueue.9 taskqueue_enqueue_fast.9 \ taskqueue.9 taskqueue_enqueue_timeout.9 \ taskqueue.9 TASKQUEUE_FAST_DEFINE.9 \ taskqueue.9 TASKQUEUE_FAST_DEFINE_THREAD.9 \ taskqueue.9 taskqueue_free.9 \ taskqueue.9 taskqueue_member.9 \ taskqueue.9 taskqueue_run.9 \ taskqueue.9 taskqueue_set_callback.9 \ taskqueue.9 taskqueue_start_threads.9 \ taskqueue.9 taskqueue_start_threads_pinned.9 \ taskqueue.9 taskqueue_unblock.9 \ taskqueue.9 TIMEOUT_TASK_INIT.9 MLINKS+=time.9 boottime.9 \ time.9 time_second.9 \ time.9 time_uptime.9 MLINKS+=timeout.9 callout.9 \ timeout.9 callout_active.9 \ timeout.9 callout_async_drain.9 \ timeout.9 callout_deactivate.9 \ timeout.9 callout_drain.9 \ timeout.9 callout_handle_init.9 \ timeout.9 callout_init.9 \ timeout.9 callout_init_mtx.9 \ timeout.9 callout_init_rm.9 \ timeout.9 callout_init_rw.9 \ timeout.9 callout_pending.9 \ timeout.9 callout_reset.9 \ timeout.9 callout_reset_curcpu.9 \ timeout.9 callout_reset_on.9 \ timeout.9 callout_reset_sbt.9 \ timeout.9 callout_reset_sbt_curcpu.9 \ timeout.9 callout_reset_sbt_on.9 \ timeout.9 callout_schedule.9 \ timeout.9 callout_schedule_curcpu.9 \ timeout.9 callout_schedule_on.9 \ timeout.9 callout_schedule_sbt.9 \ timeout.9 callout_schedule_sbt_curcpu.9 \ timeout.9 callout_schedule_sbt_on.9 \ timeout.9 callout_stop.9 \ timeout.9 untimeout.9 MLINKS+=ucred.9 cred_update_thread.9 \ ucred.9 crcopy.9 \ ucred.9 crcopysafe.9 \ ucred.9 crdup.9 \ ucred.9 crfree.9 \ ucred.9 crget.9 \ ucred.9 crhold.9 \ ucred.9 crsetgroups.9 \ ucred.9 crshared.9 \ ucred.9 cru2x.9 MLINKS+=uidinfo.9 uifind.9 \ uidinfo.9 uifree.9 \ uidinfo.9 uihashinit.9 \ uidinfo.9 uihold.9 MLINKS+=uio.9 uiomove.9 \ uio.9 uiomove_nofault.9 .if ${MK_USB} != "no" MAN+= usbdi.9 MLINKS+=usbdi.9 usbd_do_request.9 \ usbdi.9 usbd_do_request_flags.9 \ usbdi.9 usbd_errstr.9 \ usbdi.9 usbd_lookup_id_by_info.9 \ usbdi.9 usbd_lookup_id_by_uaa.9 \ usbdi.9 usbd_transfer_clear_stall.9 \ usbdi.9 usbd_transfer_drain.9 \ usbdi.9 usbd_transfer_pending.9 \ usbdi.9 usbd_transfer_poll.9 \ usbdi.9 usbd_transfer_setup.9 \ usbdi.9 usbd_transfer_start.9 \ usbdi.9 usbd_transfer_stop.9 \ usbdi.9 usbd_transfer_submit.9 \ usbdi.9 usbd_transfer_unsetup.9 \ usbdi.9 usbd_xfer_clr_flag.9 \ usbdi.9 usbd_xfer_frame_data.9 \ usbdi.9 usbd_xfer_frame_len.9 \ usbdi.9 usbd_xfer_get_frame.9 \ usbdi.9 usbd_xfer_get_priv.9 \ usbdi.9 usbd_xfer_is_stalled.9 \ usbdi.9 usbd_xfer_max_framelen.9 \ usbdi.9 usbd_xfer_max_frames.9 \ usbdi.9 usbd_xfer_max_len.9 \ usbdi.9 usbd_xfer_set_flag.9 \ usbdi.9 usbd_xfer_set_frame_data.9 \ usbdi.9 usbd_xfer_set_frame_len.9 \ usbdi.9 usbd_xfer_set_frame_offset.9 \ usbdi.9 usbd_xfer_set_frames.9 \ usbdi.9 usbd_xfer_set_interval.9 \ usbdi.9 usbd_xfer_set_priv.9 \ usbdi.9 usbd_xfer_set_stall.9 \ usbdi.9 usbd_xfer_set_timeout.9 \ usbdi.9 usbd_xfer_softc.9 \ usbdi.9 usbd_xfer_state.9 \ usbdi.9 usbd_xfer_status.9 \ usbdi.9 usb_fifo_alloc_buffer.9 \ usbdi.9 usb_fifo_attach.9 \ usbdi.9 usb_fifo_detach.9 \ usbdi.9 usb_fifo_free_buffer.9 \ usbdi.9 usb_fifo_get_data.9 \ usbdi.9 usb_fifo_get_data_buffer.9 \ usbdi.9 usb_fifo_get_data_error.9 \ usbdi.9 usb_fifo_get_data_linear.9 \ usbdi.9 usb_fifo_put_bytes_max.9 \ usbdi.9 usb_fifo_put_data.9 \ usbdi.9 usb_fifo_put_data_buffer.9 \ usbdi.9 usb_fifo_put_data_error.9 \ usbdi.9 usb_fifo_put_data_linear.9 \ usbdi.9 usb_fifo_reset.9 \ usbdi.9 usb_fifo_softc.9 \ usbdi.9 usb_fifo_wakeup.9 .endif MLINKS+=vcount.9 count_dev.9 MLINKS+=vfsconf.9 vfs_modevent.9 \ vfsconf.9 vfs_register.9 \ vfsconf.9 vfs_unregister.9 MLINKS+=vfs_getopt.9 vfs_copyopt.9 \ vfs_getopt.9 vfs_filteropt.9 \ vfs_getopt.9 vfs_flagopt.9 \ vfs_getopt.9 vfs_getopts.9 \ vfs_getopt.9 vfs_scanopt.9 \ vfs_getopt.9 vfs_setopt.9 \ vfs_getopt.9 vfs_setopt_part.9 \ vfs_getopt.9 vfs_setopts.9 MLINKS+=vhold.9 vdrop.9 \ vhold.9 vdropl.9 \ vhold.9 vholdl.9 MLINKS+=vmem.9 vmem_add.9 \ vmem.9 vmem_alloc.9 \ vmem.9 vmem_create.9 \ vmem.9 vmem_destroy.9 \ vmem.9 vmem_free.9 \ vmem.9 vmem_xalloc.9 \ vmem.9 vmem_xfree.9 MLINKS+=vm_map_lock.9 vm_map_lock_downgrade.9 \ vm_map_lock.9 vm_map_lock_read.9 \ vm_map_lock.9 vm_map_lock_upgrade.9 \ vm_map_lock.9 vm_map_trylock.9 \ vm_map_lock.9 vm_map_trylock_read.9 \ vm_map_lock.9 vm_map_unlock.9 \ vm_map_lock.9 vm_map_unlock_read.9 MLINKS+=vm_map_lookup.9 vm_map_lookup_done.9 MLINKS+=vm_map_max.9 vm_map_min.9 \ vm_map_max.9 vm_map_pmap.9 MLINKS+=vm_map_stack.9 vm_map_growstack.9 MLINKS+=vm_map_wire.9 vm_map_unwire.9 MLINKS+=vm_page_bits.9 vm_page_clear_dirty.9 \ vm_page_bits.9 vm_page_dirty.9 \ vm_page_bits.9 vm_page_is_valid.9 \ vm_page_bits.9 vm_page_set_invalid.9 \ vm_page_bits.9 vm_page_set_validclean.9 \ vm_page_bits.9 vm_page_test_dirty.9 \ vm_page_bits.9 vm_page_undirty.9 \ vm_page_bits.9 vm_page_zero_invalid.9 MLINKS+=vm_page_busy.9 vm_page_busied.9 \ vm_page_busy.9 vm_page_busy_downgrade.9 \ vm_page_busy.9 vm_page_busy_sleep.9 \ vm_page_busy.9 vm_page_sbusied.9 \ vm_page_busy.9 vm_page_sbusy.9 \ vm_page_busy.9 vm_page_sleep_if_busy.9 \ vm_page_busy.9 vm_page_sunbusy.9 \ vm_page_busy.9 vm_page_trysbusy.9 \ vm_page_busy.9 vm_page_tryxbusy.9 \ vm_page_busy.9 vm_page_xbusied.9 \ vm_page_busy.9 vm_page_xbusy.9 \ vm_page_busy.9 vm_page_xunbusy.9 \ vm_page_busy.9 vm_page_assert_sbusied.9 \ vm_page_busy.9 vm_page_assert_unbusied.9 \ vm_page_busy.9 vm_page_assert_xbusied.9 MLINKS+=vm_page_aflag.9 vm_page_aflag_clear.9 \ vm_page_aflag.9 vm_page_aflag_set.9 \ vm_page_aflag.9 vm_page_reference.9 MLINKS+=vm_page_free.9 vm_page_free_toq.9 \ vm_page_free.9 vm_page_free_zero.9 \ vm_page_free.9 vm_page_try_to_free.9 MLINKS+=vm_page_hold.9 vm_page_unhold.9 MLINKS+=vm_page_insert.9 vm_page_remove.9 MLINKS+=vm_page_wire.9 vm_page_unwire.9 MLINKS+=VOP_ACCESS.9 VOP_ACCESSX.9 MLINKS+=VOP_ATTRIB.9 VOP_GETATTR.9 \ VOP_ATTRIB.9 VOP_SETATTR.9 MLINKS+=VOP_CREATE.9 VOP_MKDIR.9 \ VOP_CREATE.9 VOP_MKNOD.9 \ VOP_CREATE.9 VOP_SYMLINK.9 MLINKS+=VOP_GETPAGES.9 VOP_PUTPAGES.9 MLINKS+=VOP_INACTIVE.9 VOP_RECLAIM.9 MLINKS+=VOP_LOCK.9 vn_lock.9 \ VOP_LOCK.9 VOP_ISLOCKED.9 \ VOP_LOCK.9 VOP_UNLOCK.9 MLINKS+=VOP_OPENCLOSE.9 VOP_CLOSE.9 \ VOP_OPENCLOSE.9 VOP_OPEN.9 MLINKS+=VOP_RDWR.9 VOP_READ.9 \ VOP_RDWR.9 VOP_WRITE.9 MLINKS+=VOP_REMOVE.9 VOP_RMDIR.9 MLINKS+=vnet.9 vimage.9 MLINKS+=vref.9 VREF.9 MLINKS+=vrele.9 vput.9 \ vrele.9 vunref.9 MLINKS+=vslock.9 vsunlock.9 MLINKS+=zone.9 uma.9 \ zone.9 uma_find_refcnt.9 \ zone.9 uma_zalloc.9 \ zone.9 uma_zalloc_arg.9 \ zone.9 uma_zcreate.9 \ zone.9 uma_zdestroy.9 \ zone.9 uma_zfree.9 \ zone.9 uma_zfree_arg.9 \ zone.9 uma_zone_get_cur.9 \ zone.9 uma_zone_get_max.9 \ zone.9 uma_zone_set_max.9 \ zone.9 uma_zone_set_warning.9 \ zone.9 uma_zone_set_maxaction.9 .include Index: head/share/man/man9/taskqueue.9 =================================================================== --- head/share/man/man9/taskqueue.9 (revision 296271) +++ head/share/man/man9/taskqueue.9 (revision 296272) @@ -1,508 +1,490 @@ .\" -*- nroff -*- .\" .\" Copyright (c) 2000 Doug Rabson .\" .\" All rights reserved. .\" .\" This program is free software. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE DEVELOPERS BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd January 4, 2015 +.Dd March 1, 2016 .Dt TASKQUEUE 9 .Os .Sh NAME .Nm taskqueue .Nd asynchronous task execution .Sh SYNOPSIS .In sys/param.h .In sys/kernel.h .In sys/malloc.h .In sys/queue.h .In sys/taskqueue.h .Bd -literal typedef void (*task_fn_t)(void *context, int pending); typedef void (*taskqueue_enqueue_fn)(void *context); struct task { STAILQ_ENTRY(task) ta_link; /* link for queue */ u_short ta_pending; /* count times queued */ u_short ta_priority; /* priority of task in queue */ task_fn_t ta_func; /* task handler */ void *ta_context; /* argument for handler */ }; enum taskqueue_callback_type { TASKQUEUE_CALLBACK_TYPE_INIT, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN, }; typedef void (*taskqueue_callback_fn)(void *context); struct timeout_task; .Ed .Ft struct taskqueue * .Fn taskqueue_create "const char *name" "int mflags" "taskqueue_enqueue_fn enqueue" "void *context" .Ft struct taskqueue * .Fn taskqueue_create_fast "const char *name" "int mflags" "taskqueue_enqueue_fn enqueue" "void *context" .Ft int .Fn taskqueue_start_threads "struct taskqueue **tqp" "int count" "int pri" "const char *name" "..." .Ft int .Fo taskqueue_start_threads_pinned .Fa "struct taskqueue **tqp" "int count" "int pri" "int cpu_id" .Fa "const char *name" "..." .Fc .Ft void .Fn taskqueue_set_callback "struct taskqueue *queue" "enum taskqueue_callback_type cb_type" "taskqueue_callback_fn callback" "void *context" .Ft void .Fn taskqueue_free "struct taskqueue *queue" .Ft int .Fn taskqueue_enqueue "struct taskqueue *queue" "struct task *task" .Ft int -.Fn taskqueue_enqueue_fast "struct taskqueue *queue" "struct task *task" -.Ft int .Fn taskqueue_enqueue_timeout "struct taskqueue *queue" "struct timeout_task *timeout_task" "int ticks" .Ft int .Fn taskqueue_cancel "struct taskqueue *queue" "struct task *task" "u_int *pendp" .Ft int .Fn taskqueue_cancel_timeout "struct taskqueue *queue" "struct timeout_task *timeout_task" "u_int *pendp" .Ft void .Fn taskqueue_drain "struct taskqueue *queue" "struct task *task" .Ft void .Fn taskqueue_drain_timeout "struct taskqueue *queue" "struct timeout_task *timeout_task" .Ft void .Fn taskqueue_drain_all "struct taskqueue *queue" .Ft void .Fn taskqueue_block "struct taskqueue *queue" .Ft void .Fn taskqueue_unblock "struct taskqueue *queue" .Ft int .Fn taskqueue_member "struct taskqueue *queue" "struct thread *td" .Ft void .Fn taskqueue_run "struct taskqueue *queue" .Fn TASK_INIT "struct task *task" "int priority" "task_fn_t func" "void *context" .Fn TASK_INITIALIZER "int priority" "task_fn_t func" "void *context" .Fn TASKQUEUE_DECLARE "name" .Fn TASKQUEUE_DEFINE "name" "taskqueue_enqueue_fn enqueue" "void *context" "init" .Fn TASKQUEUE_FAST_DEFINE "name" "taskqueue_enqueue_fn enqueue" "void *context" "init" .Fn TASKQUEUE_DEFINE_THREAD "name" .Fn TASKQUEUE_FAST_DEFINE_THREAD "name" .Fn TIMEOUT_TASK_INIT "struct taskqueue *queue" "struct timeout_task *timeout_task" "int priority" "task_fn_t func" "void *context" .Sh DESCRIPTION These functions provide a simple interface for asynchronous execution of code. .Pp The function .Fn taskqueue_create is used to create new queues. The arguments to .Fn taskqueue_create include a name that should be unique, a set of .Xr malloc 9 flags that specify whether the call to .Fn malloc is allowed to sleep, a function that is called from .Fn taskqueue_enqueue when a task is added to the queue, and a pointer to the memory location where the identity of the thread that services the queue is recorded. .\" XXX The rest of the sentence gets lots in relation to the first part. The function called from .Fn taskqueue_enqueue must arrange for the queue to be processed (for instance by scheduling a software interrupt or waking a kernel thread). The memory location where the thread identity is recorded is used to signal the service thread(s) to terminate--when this value is set to zero and the thread is signaled it will terminate. If the queue is intended for use in fast interrupt handlers .Fn taskqueue_create_fast should be used in place of .Fn taskqueue_create . .Pp The function .Fn taskqueue_free should be used to free the memory used by the queue. Any tasks that are on the queue will be executed at this time after which the thread servicing the queue will be signaled that it should exit. .Pp Once a taskqueue has been created, its threads should be started using .Fn taskqueue_start_threads or .Fn taskqueue_start_threads_pinned . .Fn taskqueue_start_threads_pinned takes a .Va cpu_id argument which will cause the threads which are started for the taskqueue to be pinned to run on the given CPU. Callbacks may optionally be registered using .Fn taskqueue_set_callback . Currently, callbacks may be registered for the following purposes: .Bl -tag -width TASKQUEUE_CALLBACK_TYPE_SHUTDOWN .It Dv TASKQUEUE_CALLBACK_TYPE_INIT This callback is called by every thread in the taskqueue, before it executes any tasks. This callback must be set before the taskqueue's threads are started. .It Dv TASKQUEUE_CALLBACK_TYPE_SHUTDOWN This callback is called by every thread in the taskqueue, after it executes its last task. This callback will always be called before the taskqueue structure is reclaimed. .El .Pp To add a task to the list of tasks queued on a taskqueue, call .Fn taskqueue_enqueue with pointers to the queue and task. If the task's .Va ta_pending field is non-zero, then it is simply incremented to reflect the number of times the task was enqueued, up to a cap of USHRT_MAX. Otherwise, the task is added to the list before the first task which has a lower .Va ta_priority value or at the end of the list if no tasks have a lower priority. Enqueueing a task does not perform any memory allocation which makes it suitable for calling from an interrupt handler. This function will return .Er EPIPE if the queue is being freed. .Pp -The function -.Fn taskqueue_enqueue_fast -should be used in place of -.Fn taskqueue_enqueue -when the enqueuing must happen from a fast interrupt handler. -This method uses spin locks to avoid the possibility of sleeping in the fast -interrupt context. -.Pp When a task is executed, first it is removed from the queue, the value of .Va ta_pending is recorded and then the field is zeroed. The function .Va ta_func from the task structure is called with the value of the field .Va ta_context as its first argument and the value of .Va ta_pending as its second argument. After the function .Va ta_func returns, .Xr wakeup 9 is called on the task pointer passed to .Fn taskqueue_enqueue . .Pp The .Fn taskqueue_enqueue_timeout is used to schedule the enqueue after the specified amount of .Va ticks . Only non-fast task queues can be used for .Va timeout_task scheduling. If the .Va ticks argument is negative, the already scheduled enqueueing is not re-scheduled. Otherwise, the task is scheduled for enqueueing in the future, after the absolute value of .Va ticks is passed. .Pp The .Fn taskqueue_cancel function is used to cancel a task. The .Va ta_pending count is cleared, and the old value returned in the reference parameter .Fa pendp , if it is .Pf non- Dv NULL . If the task is currently running, .Dv EBUSY is returned, otherwise 0. To implement a blocking .Fn taskqueue_cancel that waits for a running task to finish, it could look like: .Bd -literal -offset indent while (taskqueue_cancel(tq, task, NULL) != 0) taskqueue_drain(tq, task); .Ed .Pp Note that, as with .Fn taskqueue_drain , the caller is responsible for ensuring that the task is not re-enqueued after being canceled. .Pp Similarly, the .Fn taskqueue_cancel_timeout function is used to cancel the scheduled task execution. .Pp The .Fn taskqueue_drain function is used to wait for the task to finish, and the .Fn taskqueue_drain_timeout function is used to wait for the scheduled task to finish. There is no guarantee that the task will not be enqueued after call to .Fn taskqueue_drain . If the caller wants to put the task into a known state, then before calling .Fn taskqueue_drain the caller should use out-of-band means to ensure that the task would not be enqueued. For example, if the task is enqueued by an interrupt filter, then the interrupt could be disabled. .Pp The .Fn taskqueue_drain_all function is used to wait for all pending and running tasks that are enqueued on the taskqueue to finish. Tasks posted to the taskqueue after .Fn taskqueue_drain_all begins processing, including pending enqueues scheduled by a previous call to .Fn taskqueue_enqueue_timeout , do not extend the wait time of .Fn taskqueue_drain_all and may complete after .Fn taskqueue_drain_all returns. .Pp The .Fn taskqueue_block function blocks the taskqueue. It prevents any enqueued but not running tasks from being executed. Future calls to .Fn taskqueue_enqueue will enqueue tasks, but the tasks will not be run until .Fn taskqueue_unblock is called. Please note that .Fn taskqueue_block does not wait for any currently running tasks to finish. Thus, the .Fn taskqueue_block does not provide a guarantee that .Fn taskqueue_run is not running after .Fn taskqueue_block returns, but it does provide a guarantee that .Fn taskqueue_run will not be called again until .Fn taskqueue_unblock is called. If the caller requires a guarantee that .Fn taskqueue_run is not running, then this must be arranged by the caller. Note that if .Fn taskqueue_drain is called on a task that is enqueued on a taskqueue that is blocked by .Fn taskqueue_block , then .Fn taskqueue_drain can not return until the taskqueue is unblocked. This can result in a deadlock if the thread blocked in .Fn taskqueue_drain is the thread that is supposed to call .Fn taskqueue_unblock . Thus, use of .Fn taskqueue_drain after .Fn taskqueue_block is discouraged, because the state of the task can not be known in advance. The same caveat applies to .Fn taskqueue_drain_all . .Pp The .Fn taskqueue_unblock function unblocks the previously blocked taskqueue. All enqueued tasks can be run after this call. .Pp The .Fn taskqueue_member function returns .No 1 if the given thread .Fa td is part of the given taskqueue .Fa queue and .No 0 otherwise. .Pp The .Fn taskqueue_run function will run all pending tasks in the specified .Fa queue . Normally this function is only used internally. .Pp A convenience macro, .Fn TASK_INIT "task" "priority" "func" "context" is provided to initialise a .Va task structure. The .Fn TASK_INITIALIZER macro generates an initializer for a task structure. A macro .Fn TIMEOUT_TASK_INIT "queue" "timeout_task" "priority" "func" "context" initializes the .Va timeout_task structure. The values of .Va priority , .Va func , and .Va context are simply copied into the task structure fields and the .Va ta_pending field is cleared. .Pp Five macros .Fn TASKQUEUE_DECLARE "name" , .Fn TASKQUEUE_DEFINE "name" "enqueue" "context" "init" , .Fn TASKQUEUE_FAST_DEFINE "name" "enqueue" "context" "init" , and .Fn TASKQUEUE_DEFINE_THREAD "name" .Fn TASKQUEUE_FAST_DEFINE_THREAD "name" are used to declare a reference to a global queue, to define the implementation of the queue, and declare a queue that uses its own thread. The .Fn TASKQUEUE_DEFINE macro arranges to call .Fn taskqueue_create with the values of its .Va name , .Va enqueue and .Va context arguments during system initialisation. After calling .Fn taskqueue_create , the .Va init argument to the macro is executed as a C statement, allowing any further initialisation to be performed (such as registering an interrupt handler etc.) .Pp The .Fn TASKQUEUE_DEFINE_THREAD macro defines a new taskqueue with its own kernel thread to serve tasks. The variable .Vt struct taskqueue *taskqueue_name is used to enqueue tasks onto the queue. .Pp .Fn TASKQUEUE_FAST_DEFINE and .Fn TASKQUEUE_FAST_DEFINE_THREAD act just like .Fn TASKQUEUE_DEFINE and .Fn TASKQUEUE_DEFINE_THREAD respectively but taskqueue is created with .Fn taskqueue_create_fast . .Ss Predefined Task Queues The system provides four global taskqueues, .Va taskqueue_fast , .Va taskqueue_swi , .Va taskqueue_swi_giant , and .Va taskqueue_thread . The .Va taskqueue_fast queue is for swi handlers dispatched from fast interrupt handlers, where sleep mutexes cannot be used. The swi taskqueues are run via a software interrupt mechanism. The .Va taskqueue_swi queue runs without the protection of the .Va Giant kernel lock, and the .Va taskqueue_swi_giant queue runs with the protection of the .Va Giant kernel lock. The thread taskqueue .Va taskqueue_thread runs in a kernel thread context, and tasks run from this thread do not run under the .Va Giant kernel lock. If the caller wants to run under .Va Giant , he should explicitly acquire and release .Va Giant in his taskqueue handler routine. .Pp To use these queues, call .Fn taskqueue_enqueue with the value of the global taskqueue variable for the queue you wish to -use -.Va ( taskqueue_swi , -.Va taskqueue_swi_giant , -or -.Va taskqueue_thread ) . -Use -.Fn taskqueue_enqueue_fast -for the global taskqueue variable -.Va taskqueue_fast . +use. .Pp The software interrupt queues can be used, for instance, for implementing interrupt handlers which must perform a significant amount of processing in the handler. The hardware interrupt handler would perform minimal processing of the interrupt and then enqueue a task to finish the work. This reduces to a minimum the amount of time spent with interrupts disabled. .Pp The thread queue can be used, for instance, by interrupt level routines that need to call kernel functions that do things that can only be done from a thread context. (e.g., call malloc with the M_WAITOK flag.) .Pp Note that tasks queued on shared taskqueues such as .Va taskqueue_swi may be delayed an indeterminate amount of time before execution. If queueing delays cannot be tolerated then a private taskqueue should be created with a dedicated processing thread. .Sh SEE ALSO .Xr ithread 9 , .Xr kthread 9 , .Xr swi 9 .Sh HISTORY This interface first appeared in .Fx 5.0 . There is a similar facility called work_queue in the Linux kernel. .Sh AUTHORS This manual page was written by .An Doug Rabson . Index: head/sys/arm/ti/am335x/am335x_dmtpps.c =================================================================== --- head/sys/arm/ti/am335x/am335x_dmtpps.c (revision 296271) +++ head/sys/arm/ti/am335x/am335x_dmtpps.c (revision 296272) @@ -1,549 +1,549 @@ /*- * Copyright (c) 2015 Ian lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * AM335x PPS driver using DMTimer capture. * * Note that this PPS driver does not use an interrupt. Instead it uses the * hardware's ability to latch the timer's count register in response to a * signal on an IO pin. Each of timers 4-7 have an associated pin, and this * code allows any one of those to be used. * * The timecounter routines in kern_tc.c call the pps poll routine periodically * to see if a new counter value has been latched. When a new value has been * latched, the only processing done in the poll routine is to capture the * current set of timecounter timehands (done with pps_capture()) and the * latched value from the timer. The remaining work (done by pps_event() while * holding a mutex) is scheduled to be done later in a non-interrupt context. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "am335x_dmtreg.h" #define PPS_CDEV_NAME "dmtpps" struct dmtpps_softc { device_t dev; int mem_rid; struct resource * mem_res; int tmr_num; /* N from hwmod str "timerN" */ char tmr_name[12]; /* "DMTimerN" */ uint32_t tclr; /* Cached TCLR register. */ struct timecounter tc; int pps_curmode; /* Edge mode now set in hw. */ struct task pps_task; /* For pps_event handling. */ struct cdev * pps_cdev; struct pps_state pps_state; struct mtx pps_mtx; }; static int dmtpps_tmr_num; /* Set by probe() */ /* List of compatible strings for FDT tree */ static struct ofw_compat_data compat_data[] = { {"ti,am335x-timer", 1}, {"ti,am335x-timer-1ms", 1}, {NULL, 0}, }; /* * A table relating pad names to the hardware timer number they can be mux'd to. */ struct padinfo { char * ballname; int tmr_num; }; static struct padinfo dmtpps_padinfo[] = { {"GPMC_ADVn_ALE", 4}, {"I2C0_SDA", 4}, {"MII1_TX_EN", 4}, {"XDMA_EVENT_INTR0", 4}, {"GPMC_BEn0_CLE", 5}, {"MDC", 5}, {"MMC0_DAT3", 5}, {"UART1_RTSn", 5}, {"GPMC_WEn", 6}, {"MDIO", 6}, {"MMC0_DAT2", 6}, {"UART1_CTSn", 6}, {"GPMC_OEn_REn", 7}, {"I2C0_SCL", 7}, {"UART0_CTSn", 7}, {"XDMA_EVENT_INTR1", 7}, {NULL, 0} }; /* * This is either brilliantly user-friendly, or utterly lame... * * The am335x chip is used on the popular Beaglebone boards. Those boards have * pins for all four capture-capable timers available on the P8 header. Allow * users to configure the input pin by giving the name of the header pin. */ struct nicknames { const char * nick; const char * name; }; static struct nicknames dmtpps_pin_nicks[] = { {"P8-7", "GPMC_ADVn_ALE"}, {"P8-9", "GPMC_BEn0_CLE"}, {"P8-10", "GPMC_WEn"}, {"P8-8", "GPMC_OEn_REn",}, {NULL, NULL} }; #define DMTIMER_READ4(sc, reg) bus_read_4((sc)->mem_res, (reg)) #define DMTIMER_WRITE4(sc, reg, val) bus_write_4((sc)->mem_res, (reg), (val)) /* * Translate a short friendly case-insensitive name to its canonical name. */ static const char * dmtpps_translate_nickname(const char *nick) { struct nicknames *nn; for (nn = dmtpps_pin_nicks; nn->nick != NULL; nn++) if (strcasecmp(nick, nn->nick) == 0) return nn->name; return (nick); } /* * See if our tunable is set to the name of the input pin. If not, that's NOT * an error, return 0. If so, try to configure that pin as a timer capture * input pin, and if that works, then we have our timer unit number and if it * fails that IS an error, return -1. */ static int dmtpps_find_tmr_num_by_tunable() { struct padinfo *pi; char iname[20]; char muxmode[12]; const char * ballname; int err; if (!TUNABLE_STR_FETCH("hw.am335x_dmtpps.input", iname, sizeof(iname))) return (0); ballname = dmtpps_translate_nickname(iname); for (pi = dmtpps_padinfo; pi->ballname != NULL; pi++) { if (strcmp(ballname, pi->ballname) != 0) continue; snprintf(muxmode, sizeof(muxmode), "timer%d", pi->tmr_num); err = ti_pinmux_padconf_set(pi->ballname, muxmode, PADCONF_INPUT); if (err != 0) { printf("am335x_dmtpps: unable to configure capture pin " "for %s to input mode\n", muxmode); return (-1); } else if (bootverbose) { printf("am335x_dmtpps: configured pin %s as input " "for %s\n", iname, muxmode); } return (pi->tmr_num); } /* Invalid name in the tunable, that's an error. */ printf("am335x_dmtpps: unknown pin name '%s'\n", iname); return (-1); } /* * Ask the pinmux driver whether any pin has been configured as a TIMER4..TIMER7 * input pin. If so, return the timer number, if not return 0. */ static int dmtpps_find_tmr_num_by_padconf() { int err; unsigned int padstate; const char * padmux; struct padinfo *pi; char muxmode[12]; for (pi = dmtpps_padinfo; pi->ballname != NULL; pi++) { err = ti_pinmux_padconf_get(pi->ballname, &padmux, &padstate); snprintf(muxmode, sizeof(muxmode), "timer%d", pi->tmr_num); if (err == 0 && (padstate & RXACTIVE) != 0 && strcmp(muxmode, padmux) == 0) return (pi->tmr_num); } /* Nothing found, not an error. */ return (0); } /* * Figure out which hardware timer number to use based on input pin * configuration. This is done just once, the first time probe() runs. */ static int dmtpps_find_tmr_num() { int tmr_num; if ((tmr_num = dmtpps_find_tmr_num_by_tunable()) == 0) tmr_num = dmtpps_find_tmr_num_by_padconf(); if (tmr_num <= 0) { printf("am335x_dmtpps: PPS driver not enabled: unable to find " "or configure a capture input pin\n"); tmr_num = -1; /* Must return non-zero to prevent re-probing. */ } return (tmr_num); } static void dmtpps_set_hw_capture(struct dmtpps_softc *sc, bool force_off) { int newmode; if (force_off) newmode = 0; else newmode = sc->pps_state.ppsparam.mode & PPS_CAPTUREASSERT; if (newmode == sc->pps_curmode) return; sc->pps_curmode = newmode; if (newmode == PPS_CAPTUREASSERT) sc->tclr |= DMT_TCLR_CAPTRAN_LOHI; else sc->tclr &= ~DMT_TCLR_CAPTRAN_MASK; DMTIMER_WRITE4(sc, DMT_TCLR, sc->tclr); } static unsigned dmtpps_get_timecount(struct timecounter *tc) { struct dmtpps_softc *sc; sc = tc->tc_priv; return (DMTIMER_READ4(sc, DMT_TCRR)); } static void dmtpps_poll(struct timecounter *tc) { struct dmtpps_softc *sc; sc = tc->tc_priv; /* * If a new value has been latched we've got a PPS event. Capture the * timecounter data, then override the capcount field (pps_capture() * populates it from the current DMT_TCRR register) with the latched * value from the TCAR1 register. * * There is no locking here, by design. pps_capture() writes into an * area of struct pps_state which is read only by pps_event(). The * synchronization of access to that area is temporal rather than * interlock based... we write in this routine and trigger the task that * will read the data, so no simultaneous access can occur. * * Note that we don't have the TCAR interrupt enabled, but the hardware * still provides the status bits in the "RAW" status register even when * they're masked from generating an irq. However, when clearing the * TCAR status to re-arm the capture for the next second, we have to * write to the IRQ status register, not the RAW register. Quirky. */ if (DMTIMER_READ4(sc, DMT_IRQSTATUS_RAW) & DMT_IRQ_TCAR) { pps_capture(&sc->pps_state); sc->pps_state.capcount = DMTIMER_READ4(sc, DMT_TCAR1); DMTIMER_WRITE4(sc, DMT_IRQSTATUS, DMT_IRQ_TCAR); - taskqueue_enqueue_fast(taskqueue_fast, &sc->pps_task); + taskqueue_enqueue(taskqueue_fast, &sc->pps_task); } } static void dmtpps_event(void *arg, int pending) { struct dmtpps_softc *sc; sc = arg; /* This is the task function that gets enqueued by poll_pps. Once the * time has been captured by the timecounter polling code which runs in * primary interrupt context, the remaining (more expensive) work to * process the event is done later in a threaded context. * * Here there is an interlock that protects the event data in struct * pps_state. That data can be accessed at any time from userland via * ioctl() calls so we must ensure that there is no read access to * partially updated data while pps_event() does its work. */ mtx_lock(&sc->pps_mtx); pps_event(&sc->pps_state, PPS_CAPTUREASSERT); mtx_unlock(&sc->pps_mtx); } static int dmtpps_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct dmtpps_softc *sc; sc = dev->si_drv1; /* * Begin polling for pps and enable capture in the hardware whenever the * device is open. Doing this stuff again is harmless if this isn't the * first open. */ sc->tc.tc_poll_pps = dmtpps_poll; dmtpps_set_hw_capture(sc, false); return 0; } static int dmtpps_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct dmtpps_softc *sc; sc = dev->si_drv1; /* * Stop polling and disable capture on last close. Use the force-off * flag to override the configured mode and turn off the hardware. */ sc->tc.tc_poll_pps = NULL; dmtpps_set_hw_capture(sc, true); return 0; } static int dmtpps_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) { struct dmtpps_softc *sc; int err; sc = dev->si_drv1; /* Let the kernel do the heavy lifting for ioctl. */ mtx_lock(&sc->pps_mtx); err = pps_ioctl(cmd, data, &sc->pps_state); mtx_unlock(&sc->pps_mtx); if (err != 0) return (err); /* * The capture mode could have changed, set the hardware to whatever * mode is now current. Effectively a no-op if nothing changed. */ dmtpps_set_hw_capture(sc, false); return (err); } static struct cdevsw dmtpps_cdevsw = { .d_version = D_VERSION, .d_open = dmtpps_open, .d_close = dmtpps_close, .d_ioctl = dmtpps_ioctl, .d_name = PPS_CDEV_NAME, }; static int dmtpps_probe(device_t dev) { char strbuf[64]; int tmr_num; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); /* * If we haven't chosen which hardware timer to use yet, go do that now. * We need to know that to decide whether to return success for this * hardware timer instance or not. */ if (dmtpps_tmr_num == 0) dmtpps_tmr_num = dmtpps_find_tmr_num(); /* * Figure out which hardware timer is being probed and see if it matches * the configured timer number determined earlier. */ tmr_num = ti_hwmods_get_unit(dev, "timer"); if (dmtpps_tmr_num != tmr_num) return (ENXIO); snprintf(strbuf, sizeof(strbuf), "AM335x PPS-Capture DMTimer%d", tmr_num); device_set_desc_copy(dev, strbuf); return(BUS_PROBE_DEFAULT); } static int dmtpps_attach(device_t dev) { struct dmtpps_softc *sc; clk_ident_t timer_id; int err, sysclk_freq; sc = device_get_softc(dev); sc->dev = dev; /* Get the base clock frequency. */ err = ti_prcm_clk_get_source_freq(SYS_CLK, &sysclk_freq); /* Enable clocks and power on the device. */ if ((timer_id = ti_hwmods_get_clock(dev)) == INVALID_CLK_IDENT) return (ENXIO); if ((err = ti_prcm_clk_set_source(timer_id, SYSCLK_CLK)) != 0) return (err); if ((err = ti_prcm_clk_enable(timer_id)) != 0) return (err); /* Request the memory resources. */ sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem_res == NULL) { return (ENXIO); } /* Figure out which hardware timer this is and set the name string. */ sc->tmr_num = ti_hwmods_get_unit(dev, "timer"); snprintf(sc->tmr_name, sizeof(sc->tmr_name), "DMTimer%d", sc->tmr_num); /* Set up timecounter hardware, start it. */ DMTIMER_WRITE4(sc, DMT_TSICR, DMT_TSICR_RESET); while (DMTIMER_READ4(sc, DMT_TIOCP_CFG) & DMT_TIOCP_RESET) continue; sc->tclr |= DMT_TCLR_START | DMT_TCLR_AUTOLOAD; DMTIMER_WRITE4(sc, DMT_TLDR, 0); DMTIMER_WRITE4(sc, DMT_TCRR, 0); DMTIMER_WRITE4(sc, DMT_TCLR, sc->tclr); /* Register the timecounter. */ sc->tc.tc_name = sc->tmr_name; sc->tc.tc_get_timecount = dmtpps_get_timecount; sc->tc.tc_counter_mask = ~0u; sc->tc.tc_frequency = sysclk_freq; sc->tc.tc_quality = 1000; sc->tc.tc_priv = sc; tc_init(&sc->tc); /* * Indicate our PPS capabilities. Have the kernel init its part of the * pps_state struct and add its capabilities. * * While the hardware has a mode to capture each edge, it's not clear we * can use it that way, because there's only a single interrupt/status * bit to say something was captured, but not which edge it was. For * now, just say we can only capture assert events (the positive-going * edge of the pulse). */ mtx_init(&sc->pps_mtx, "dmtpps", NULL, MTX_DEF); sc->pps_state.ppscap = PPS_CAPTUREASSERT; sc->pps_state.driver_abi = PPS_ABI_VERSION; sc->pps_state.driver_mtx = &sc->pps_mtx; pps_init_abi(&sc->pps_state); /* * Init the task that does deferred pps_event() processing after * the polling routine has captured a pps pulse time. */ TASK_INIT(&sc->pps_task, 0, dmtpps_event, sc); /* Create the PPS cdev. */ sc->pps_cdev = make_dev(&dmtpps_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PPS_CDEV_NAME); sc->pps_cdev->si_drv1 = sc; if (bootverbose) device_printf(sc->dev, "Using %s for PPS device /dev/%s\n", sc->tmr_name, PPS_CDEV_NAME); return (0); } static int dmtpps_detach(device_t dev) { /* * There is no way to remove a timecounter once it has been registered, * even if it's not in use, so we can never detach. If we were * dynamically loaded as a module this will prevent unloading. */ return (EBUSY); } static device_method_t dmtpps_methods[] = { DEVMETHOD(device_probe, dmtpps_probe), DEVMETHOD(device_attach, dmtpps_attach), DEVMETHOD(device_detach, dmtpps_detach), { 0, 0 } }; static driver_t dmtpps_driver = { "am335x_dmtpps", dmtpps_methods, sizeof(struct dmtpps_softc), }; static devclass_t dmtpps_devclass; DRIVER_MODULE(am335x_dmtpps, simplebus, dmtpps_driver, dmtpps_devclass, 0, 0); MODULE_DEPEND(am335x_dmtpps, am335x_prcm, 1, 1, 1); Index: head/sys/dev/aac/aac.c =================================================================== --- head/sys/dev/aac/aac.c (revision 296271) +++ head/sys/dev/aac/aac.c (revision 296272) @@ -1,3791 +1,3791 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2001 Scott Long * Copyright (c) 2000 BSDi * Copyright (c) 2001 Adaptec, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters. */ #define AAC_DRIVERNAME "aac" #include "opt_aac.h" /* #include */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void aac_startup(void *arg); static void aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f); static void aac_get_bus_info(struct aac_softc *sc); static void aac_daemon(void *arg); /* Command Processing */ static void aac_timeout(struct aac_softc *sc); static void aac_complete(void *context, int pending); static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp); static void aac_bio_complete(struct aac_command *cm); static int aac_wait_command(struct aac_command *cm); static void aac_command_thread(struct aac_softc *sc); /* Command Buffer Management */ static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int aac_alloc_commands(struct aac_softc *sc); static void aac_free_commands(struct aac_softc *sc); static void aac_unmap_command(struct aac_command *cm); /* Hardware Interface */ static int aac_alloc(struct aac_softc *sc); static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int aac_check_firmware(struct aac_softc *sc); static int aac_init(struct aac_softc *sc); static int aac_sync_command(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, u_int32_t *sp); static int aac_setup_intr(struct aac_softc *sc); static int aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm); static int aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, struct aac_fib **fib_addr); static int aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib); /* StrongARM interface */ static int aac_sa_get_fwstatus(struct aac_softc *sc); static void aac_sa_qnotify(struct aac_softc *sc, int qbit); static int aac_sa_get_istatus(struct aac_softc *sc); static void aac_sa_clear_istatus(struct aac_softc *sc, int mask); static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_sa_get_mailbox(struct aac_softc *sc, int mb); static void aac_sa_set_interrupts(struct aac_softc *sc, int enable); const struct aac_interface aac_sa_interface = { aac_sa_get_fwstatus, aac_sa_qnotify, aac_sa_get_istatus, aac_sa_clear_istatus, aac_sa_set_mailbox, aac_sa_get_mailbox, aac_sa_set_interrupts, NULL, NULL, NULL }; /* i960Rx interface */ static int aac_rx_get_fwstatus(struct aac_softc *sc); static void aac_rx_qnotify(struct aac_softc *sc, int qbit); static int aac_rx_get_istatus(struct aac_softc *sc); static void aac_rx_clear_istatus(struct aac_softc *sc, int mask); static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_rx_get_mailbox(struct aac_softc *sc, int mb); static void aac_rx_set_interrupts(struct aac_softc *sc, int enable); static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm); static int aac_rx_get_outb_queue(struct aac_softc *sc); static void aac_rx_set_outb_queue(struct aac_softc *sc, int index); const struct aac_interface aac_rx_interface = { aac_rx_get_fwstatus, aac_rx_qnotify, aac_rx_get_istatus, aac_rx_clear_istatus, aac_rx_set_mailbox, aac_rx_get_mailbox, aac_rx_set_interrupts, aac_rx_send_command, aac_rx_get_outb_queue, aac_rx_set_outb_queue }; /* Rocket/MIPS interface */ static int aac_rkt_get_fwstatus(struct aac_softc *sc); static void aac_rkt_qnotify(struct aac_softc *sc, int qbit); static int aac_rkt_get_istatus(struct aac_softc *sc); static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask); static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb); static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable); static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm); static int aac_rkt_get_outb_queue(struct aac_softc *sc); static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index); const struct aac_interface aac_rkt_interface = { aac_rkt_get_fwstatus, aac_rkt_qnotify, aac_rkt_get_istatus, aac_rkt_clear_istatus, aac_rkt_set_mailbox, aac_rkt_get_mailbox, aac_rkt_set_interrupts, aac_rkt_send_command, aac_rkt_get_outb_queue, aac_rkt_set_outb_queue }; /* Debugging and Diagnostics */ static void aac_describe_controller(struct aac_softc *sc); static const char *aac_describe_code(const struct aac_code_lookup *table, u_int32_t code); /* Management Interface */ static d_open_t aac_open; static d_ioctl_t aac_ioctl; static d_poll_t aac_poll; static void aac_cdevpriv_dtor(void *arg); static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib); static int aac_rev_check(struct aac_softc *sc, caddr_t udata); static int aac_open_aif(struct aac_softc *sc, caddr_t arg); static int aac_close_aif(struct aac_softc *sc, caddr_t arg); static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); static int aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr); static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); static void aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg); static struct aac_mntinforesp * aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid); static struct cdevsw aac_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = aac_open, .d_ioctl = aac_ioctl, .d_poll = aac_poll, .d_name = "aac", }; static MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver"); /* sysctl node */ SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters"); /* * Device Interface */ /* * Initialize the controller and softc */ int aac_attach(struct aac_softc *sc) { int error, unit; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Initialize per-controller queues. */ aac_initq_free(sc); aac_initq_ready(sc); aac_initq_busy(sc); aac_initq_bio(sc); /* * Initialize command-completion task. */ TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc); /* mark controller as suspended until we get ourselves organised */ sc->aac_state |= AAC_STATE_SUSPEND; /* * Check that the firmware on the card is supported. */ if ((error = aac_check_firmware(sc)) != 0) return(error); /* * Initialize locks */ mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF); mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF); mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF); TAILQ_INIT(&sc->aac_container_tqh); TAILQ_INIT(&sc->aac_ev_cmfree); /* Initialize the clock daemon callout. */ callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0); /* * Initialize the adapter. */ if ((error = aac_alloc(sc)) != 0) return(error); if ((error = aac_init(sc)) != 0) return(error); /* * Allocate and connect our interrupt. */ if ((error = aac_setup_intr(sc)) != 0) return(error); /* * Print a little information about the controller. */ aac_describe_controller(sc); /* * Add sysctls. */ SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->aac_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->aac_dev)), OID_AUTO, "firmware_build", CTLFLAG_RD, &sc->aac_revision.buildNumber, 0, "firmware build number"); /* * Register to probe our containers later. */ sc->aac_ich.ich_func = aac_startup; sc->aac_ich.ich_arg = sc; if (config_intrhook_establish(&sc->aac_ich) != 0) { device_printf(sc->aac_dev, "can't establish configuration hook\n"); return(ENXIO); } /* * Make the control device. */ unit = device_get_unit(sc->aac_dev); sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, "aac%d", unit); (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit); (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit); sc->aac_dev_t->si_drv1 = sc; /* Create the AIF thread */ if (kproc_create((void(*)(void *))aac_command_thread, sc, &sc->aifthread, 0, 0, "aac%daif", unit)) panic("Could not create AIF thread"); /* Register the shutdown method to only be called post-dump */ if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown, sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) device_printf(sc->aac_dev, "shutdown event registration failed\n"); /* Register with CAM for the non-DASD devices */ if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) { TAILQ_INIT(&sc->aac_sim_tqh); aac_get_bus_info(sc); } mtx_lock(&sc->aac_io_lock); callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); mtx_unlock(&sc->aac_io_lock); return(0); } static void aac_daemon(void *arg) { struct timeval tv; struct aac_softc *sc; struct aac_fib *fib; sc = arg; mtx_assert(&sc->aac_io_lock, MA_OWNED); if (callout_pending(&sc->aac_daemontime) || callout_active(&sc->aac_daemontime) == 0) return; getmicrotime(&tv); aac_alloc_sync_fib(sc, &fib); *(uint32_t *)fib->data = tv.tv_sec; aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t)); aac_release_sync_fib(sc); callout_schedule(&sc->aac_daemontime, 30 * 60 * hz); } void aac_add_event(struct aac_softc *sc, struct aac_event *event) { switch (event->ev_type & AAC_EVENT_MASK) { case AAC_EVENT_CMFREE: TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); break; default: device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", event->ev_type); break; } } /* * Request information of container #cid */ static struct aac_mntinforesp * aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid) { struct aac_mntinfo *mi; mi = (struct aac_mntinfo *)&fib->data[0]; /* use 64-bit LBA if enabled */ mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ? VM_NameServe64 : VM_NameServe; mi->MntType = FT_FILESYS; mi->MntCount = cid; if (aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_mntinfo))) { device_printf(sc->aac_dev, "Error probing container %d\n", cid); return (NULL); } return ((struct aac_mntinforesp *)&fib->data[0]); } /* * Probe for containers, create disks. */ static void aac_startup(void *arg) { struct aac_softc *sc; struct aac_fib *fib; struct aac_mntinforesp *mir; int count = 0, i = 0; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* disconnect ourselves from the intrhook chain */ config_intrhook_disestablish(&sc->aac_ich); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); /* loop over possible containers */ do { if ((mir = aac_get_container_info(sc, fib, i)) == NULL) continue; if (i == 0) count = mir->MntRespCount; aac_add_container(sc, mir, 0); i++; } while ((i < count) && (i < AAC_MAX_CONTAINERS)); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); /* poke the bus to actually attach the child devices */ if (bus_generic_attach(sc->aac_dev)) device_printf(sc->aac_dev, "bus_generic_attach failed\n"); /* mark the controller up */ sc->aac_state &= ~AAC_STATE_SUSPEND; /* enable interrupts now */ AAC_UNMASK_INTERRUPTS(sc); } /* * Create a device to represent a new container */ static void aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f) { struct aac_container *co; device_t child; /* * Check container volume type for validity. Note that many of * the possible types may never show up. */ if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { co = (struct aac_container *)malloc(sizeof *co, M_AACBUF, M_NOWAIT | M_ZERO); if (co == NULL) panic("Out of memory?!"); fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d", mir->MntTable[0].ObjectId, mir->MntTable[0].FileSystemName, mir->MntTable[0].Capacity, mir->MntTable[0].VolType); if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL) device_printf(sc->aac_dev, "device_add_child failed\n"); else device_set_ivars(child, co); device_set_desc(child, aac_describe_code(aac_container_types, mir->MntTable[0].VolType)); co->co_disk = child; co->co_found = f; bcopy(&mir->MntTable[0], &co->co_mntobj, sizeof(struct aac_mntobj)); mtx_lock(&sc->aac_container_lock); TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); mtx_unlock(&sc->aac_container_lock); } } /* * Allocate resources associated with (sc) */ static int aac_alloc(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_SG_64BIT) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->aac_max_sectors << 9, /* maxsize */ sc->aac_sg_tablesize, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->aac_io_lock, /* lockfuncarg */ &sc->aac_buffer_dmat)) { device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); return (ENOMEM); } /* * Create DMA tag for mapping FIBs into controller-addressable space.. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_4GB_WINDOW) ? BUS_SPACE_MAXADDR_32BIT : 0x7fffffff, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->aac_max_fibs_alloc * sc->aac_max_fib_size, /* maxsize */ 1, /* nsegments */ sc->aac_max_fibs_alloc * sc->aac_max_fib_size, /* maxsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_fib_dmat)) { device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); return (ENOMEM); } /* * Create DMA tag for the common structure and allocate it. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_4GB_WINDOW) ? BUS_SPACE_MAXADDR_32BIT : 0x7fffffff, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ 8192 + sizeof(struct aac_common), /* maxsize */ 1, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_common_dmat)) { device_printf(sc->aac_dev, "can't allocate common structure DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { device_printf(sc->aac_dev, "can't allocate common structure\n"); return (ENOMEM); } /* * Work around a bug in the 2120 and 2200 that cannot DMA commands * below address 8192 in physical memory. * XXX If the padding is not needed, can it be put to use instead * of ignored? */ (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, sc->aac_common, 8192 + sizeof(*sc->aac_common), aac_common_map, sc, 0); if (sc->aac_common_busaddr < 8192) { sc->aac_common = (struct aac_common *) ((uint8_t *)sc->aac_common + 8192); sc->aac_common_busaddr += 8192; } bzero(sc->aac_common, sizeof(*sc->aac_common)); /* Allocate some FIBs and associated command structs */ TAILQ_INIT(&sc->aac_fibmap_tqh); sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command), M_AACBUF, M_WAITOK|M_ZERO); while (sc->total_fibs < sc->aac_max_fibs) { if (aac_alloc_commands(sc) != 0) break; } if (sc->total_fibs == 0) return (ENOMEM); return (0); } /* * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ void aac_free(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* remove the control device */ if (sc->aac_dev_t != NULL) destroy_dev(sc->aac_dev_t); /* throw away any FIB buffers, discard the FIB DMA tag */ aac_free_commands(sc); if (sc->aac_fib_dmat) bus_dma_tag_destroy(sc->aac_fib_dmat); free(sc->aac_commands, M_AACBUF); /* destroy the common area */ if (sc->aac_common) { bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, sc->aac_common_dmamap); } if (sc->aac_common_dmat) bus_dma_tag_destroy(sc->aac_common_dmat); /* disconnect the interrupt handler */ if (sc->aac_intr) bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); if (sc->aac_irq != NULL) { bus_release_resource(sc->aac_dev, SYS_RES_IRQ, rman_get_rid(sc->aac_irq), sc->aac_irq); pci_release_msi(sc->aac_dev); } /* destroy data-transfer DMA tag */ if (sc->aac_buffer_dmat) bus_dma_tag_destroy(sc->aac_buffer_dmat); /* destroy the parent DMA tag */ if (sc->aac_parent_dmat) bus_dma_tag_destroy(sc->aac_parent_dmat); /* release the register window mapping */ if (sc->aac_regs_res0 != NULL) bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rman_get_rid(sc->aac_regs_res0), sc->aac_regs_res0); if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL) bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rman_get_rid(sc->aac_regs_res1), sc->aac_regs_res1); } /* * Disconnect from the controller completely, in preparation for unload. */ int aac_detach(device_t dev) { struct aac_softc *sc; struct aac_container *co; struct aac_sim *sim; int error; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); callout_drain(&sc->aac_daemontime); mtx_lock(&sc->aac_io_lock); while (sc->aifflags & AAC_AIFFLAGS_RUNNING) { sc->aifflags |= AAC_AIFFLAGS_EXIT; wakeup(sc->aifthread); msleep(sc->aac_dev, &sc->aac_io_lock, PUSER, "aacdch", 0); } mtx_unlock(&sc->aac_io_lock); KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0, ("%s: invalid detach state", __func__)); /* Remove the child containers */ while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { error = device_delete_child(dev, co->co_disk); if (error) return (error); TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); free(co, M_AACBUF); } /* Remove the CAM SIMs */ while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); error = device_delete_child(dev, sim->sim_dev); if (error) return (error); free(sim, M_AACBUF); } if ((error = aac_shutdown(dev))) return(error); EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); aac_free(sc); mtx_destroy(&sc->aac_aifq_lock); mtx_destroy(&sc->aac_io_lock); mtx_destroy(&sc->aac_container_lock); return(0); } /* * Bring the controller down to a dormant state and detach all child devices. * * This function is called before detach or system shutdown. * * Note that we can assume that the bioq on the controller is empty, as we won't * allow shutdown if any device is open. */ int aac_shutdown(device_t dev) { struct aac_softc *sc; struct aac_fib *fib; struct aac_close_command *cc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state |= AAC_STATE_SUSPEND; /* * Send a Container shutdown followed by a HostShutdown FIB to the * controller to convince it that we don't want to talk to it anymore. * We've been closed and all I/O completed already */ device_printf(sc->aac_dev, "shutting down controller..."); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); cc = (struct aac_close_command *)&fib->data[0]; bzero(cc, sizeof(struct aac_close_command)); cc->Command = VM_CloseAll; cc->ContainerId = 0xffffffff; if (aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_close_command))) printf("FAILED.\n"); else printf("done\n"); #if 0 else { fib->data[0] = 0; /* * XXX Issuing this command to the controller makes it shut down * but also keeps it from coming back up without a reset of the * PCI bus. This is not desirable if you are just unloading the * driver module with the intent to reload it later. */ if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN, fib, 1)) { printf("FAILED.\n"); } else { printf("done.\n"); } } #endif AAC_MASK_INTERRUPTS(sc); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return(0); } /* * Bring the controller to a quiescent state, ready for system suspend. */ int aac_suspend(device_t dev) { struct aac_softc *sc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state |= AAC_STATE_SUSPEND; AAC_MASK_INTERRUPTS(sc); return(0); } /* * Bring the controller back to a state ready for operation. */ int aac_resume(device_t dev) { struct aac_softc *sc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state &= ~AAC_STATE_SUSPEND; AAC_UNMASK_INTERRUPTS(sc); return(0); } /* * Interrupt handler for NEW_COMM interface. */ void aac_new_intr(void *arg) { struct aac_softc *sc; u_int32_t index, fast; struct aac_command *cm; struct aac_fib *fib; int i; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); while (1) { index = AAC_GET_OUTB_QUEUE(sc); if (index == 0xffffffff) index = AAC_GET_OUTB_QUEUE(sc); if (index == 0xffffffff) break; if (index & 2) { if (index == 0xfffffffe) { /* XXX This means that the controller wants * more work. Ignore it for now. */ continue; } /* AIF */ fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF, M_NOWAIT | M_ZERO); if (fib == NULL) { /* If we're really this short on memory, * hopefully breaking out of the handler will * allow something to get freed. This * actually sucks a whole lot. */ break; } index &= ~2; for (i = 0; i < sizeof(struct aac_fib)/4; ++i) ((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4); aac_handle_aif(sc, fib); free(fib, M_AACBUF); /* * AIF memory is owned by the adapter, so let it * know that we are done with it. */ AAC_SET_OUTB_QUEUE(sc, index); AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); } else { fast = index & 1; cm = sc->aac_commands + (index >> 2); fib = cm->cm_fib; if (fast) { fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL; } aac_remove_busy(cm); aac_unmap_command(cm); cm->cm_flags |= AAC_CMD_COMPLETED; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this * command */ wakeup(cm); } sc->flags &= ~AAC_QUEUE_FRZN; } } /* see if we can start some more I/O */ if ((sc->flags & AAC_QUEUE_FRZN) == 0) aac_startio(sc); mtx_unlock(&sc->aac_io_lock); } /* * Interrupt filter for !NEW_COMM interface. */ int aac_filter(void *arg) { struct aac_softc *sc; u_int16_t reason; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Read the status register directly. This is faster than taking the * driver lock and reading the queues directly. It also saves having * to turn parts of the driver lock into a spin mutex, which would be * ugly. */ reason = AAC_GET_ISTATUS(sc); AAC_CLEAR_ISTATUS(sc, reason); /* handle completion processing */ if (reason & AAC_DB_RESPONSE_READY) - taskqueue_enqueue_fast(taskqueue_fast, &sc->aac_task_complete); + taskqueue_enqueue(taskqueue_fast, &sc->aac_task_complete); /* controller wants to talk to us */ if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) { /* * XXX Make sure that we don't get fooled by strange messages * that start with a NULL. */ if ((reason & AAC_DB_PRINTF) && (sc->aac_common->ac_printf[0] == 0)) sc->aac_common->ac_printf[0] = 32; /* * This might miss doing the actual wakeup. However, the * msleep that this is waking up has a timeout, so it will * wake up eventually. AIFs and printfs are low enough * priority that they can handle hanging out for a few seconds * if needed. */ wakeup(sc->aifthread); } return (FILTER_HANDLED); } /* * Command Processing */ /* * Start as much queued I/O as possible on the controller */ void aac_startio(struct aac_softc *sc) { struct aac_command *cm; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); for (;;) { /* * This flag might be set if the card is out of resources. * Checking it here prevents an infinite loop of deferrals. */ if (sc->flags & AAC_QUEUE_FRZN) break; /* * Try to get a command that's been put off for lack of * resources */ cm = aac_dequeue_ready(sc); /* * Try to build a command off the bio queue (ignore error * return) */ if (cm == NULL) aac_bio_command(sc, &cm); /* nothing to do? */ if (cm == NULL) break; /* don't map more than once */ if (cm->cm_flags & AAC_CMD_MAPPED) panic("aac: command %p already mapped", cm); /* * Set up the command to go to the controller. If there are no * data buffers associated with the command then it can bypass * busdma. */ if (cm->cm_datalen != 0) { if (cm->cm_flags & AAC_REQ_BIO) error = bus_dmamap_load_bio( sc->aac_buffer_dmat, cm->cm_datamap, (struct bio *)cm->cm_private, aac_map_command_sg, cm, 0); else error = bus_dmamap_load(sc->aac_buffer_dmat, cm->cm_datamap, cm->cm_data, cm->cm_datalen, aac_map_command_sg, cm, 0); if (error == EINPROGRESS) { fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n"); sc->flags |= AAC_QUEUE_FRZN; } else if (error != 0) panic("aac_startio: unexpected error %d from " "busdma", error); } else aac_map_command_sg(cm, NULL, 0, 0); } } /* * Handle notification of one or more FIBs coming from the controller. */ static void aac_command_thread(struct aac_softc *sc) { struct aac_fib *fib; u_int32_t fib_size; int size, retval; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); sc->aifflags = AAC_AIFFLAGS_RUNNING; while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { retval = 0; if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO, "aifthd", AAC_PERIODIC_INTERVAL * hz); /* * First see if any FIBs need to be allocated. This needs * to be called without the driver lock because contigmalloc * can sleep. */ if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { mtx_unlock(&sc->aac_io_lock); aac_alloc_commands(sc); mtx_lock(&sc->aac_io_lock); sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; aac_startio(sc); } /* * While we're here, check to see if any commands are stuck. * This is pretty low-priority, so it's ok if it doesn't * always fire. */ if (retval == EWOULDBLOCK) aac_timeout(sc); /* Check the hardware printf message buffer */ if (sc->aac_common->ac_printf[0] != 0) aac_print_printf(sc); /* Also check to see if the adapter has a command for us. */ if (sc->flags & AAC_FLAGS_NEW_COMM) continue; for (;;) { if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, &fib_size, &fib)) break; AAC_PRINT_FIB(sc, fib); switch (fib->Header.Command) { case AifRequest: aac_handle_aif(sc, fib); break; default: device_printf(sc->aac_dev, "unknown command " "from controller\n"); break; } if ((fib->Header.XferState == 0) || (fib->Header.StructType != AAC_FIBTYPE_TFIB)) { break; } /* Return the AIF to the controller. */ if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) { fib->Header.XferState |= AAC_FIBSTATE_DONEHOST; *(AAC_FSAStatus*)fib->data = ST_OK; /* XXX Compute the Size field? */ size = fib->Header.Size; if (size > sizeof(struct aac_fib)) { size = sizeof(struct aac_fib); fib->Header.Size = size; } /* * Since we did not generate this command, it * cannot go through the normal * enqueue->startio chain. */ aac_enqueue_response(sc, AAC_ADAP_NORM_RESP_QUEUE, fib); } } } sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; mtx_unlock(&sc->aac_io_lock); wakeup(sc->aac_dev); kproc_exit(0); } /* * Process completed commands. */ static void aac_complete(void *context, int pending) { struct aac_softc *sc; struct aac_command *cm; struct aac_fib *fib; u_int32_t fib_size; sc = (struct aac_softc *)context; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); /* pull completed commands off the queue */ for (;;) { /* look for completed FIBs on our queue */ if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, &fib)) break; /* nothing to do */ /* get the command, unmap and hand off for processing */ cm = sc->aac_commands + fib->Header.SenderData; if (cm == NULL) { AAC_PRINT_FIB(sc, fib); break; } if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0) device_printf(sc->aac_dev, "COMMAND %p COMPLETED AFTER %d SECONDS\n", cm, (int)(time_uptime-cm->cm_timestamp)); aac_remove_busy(cm); aac_unmap_command(cm); cm->cm_flags |= AAC_CMD_COMPLETED; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this command */ wakeup(cm); } } /* see if we can start some more I/O */ sc->flags &= ~AAC_QUEUE_FRZN; aac_startio(sc); mtx_unlock(&sc->aac_io_lock); } /* * Handle a bio submitted from a disk device. */ void aac_submit_bio(struct bio *bp) { struct aac_disk *ad; struct aac_softc *sc; ad = (struct aac_disk *)bp->bio_disk->d_drv1; sc = ad->ad_controller; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* queue the BIO and try to get some work done */ aac_enqueue_bio(sc, bp); aac_startio(sc); } /* * Get a bio and build a command to go with it. */ static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp) { struct aac_command *cm; struct aac_fib *fib; struct aac_disk *ad; struct bio *bp; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* get the resources we will need */ cm = NULL; bp = NULL; if (aac_alloc_command(sc, &cm)) /* get a command */ goto fail; if ((bp = aac_dequeue_bio(sc)) == NULL) goto fail; /* fill out the command */ cm->cm_datalen = bp->bio_bcount; cm->cm_complete = aac_bio_complete; cm->cm_flags = AAC_REQ_BIO; cm->cm_private = bp; cm->cm_timestamp = time_uptime; /* build the FIB */ fib = cm->cm_fib; fib->Header.Size = sizeof(struct aac_fib_header); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC | AAC_FIBSTATE_FAST_RESPONSE; /* build the read/write request */ ad = (struct aac_disk *)bp->bio_disk->d_drv1; if (sc->flags & AAC_FLAGS_RAW_IO) { struct aac_raw_io *raw; raw = (struct aac_raw_io *)&fib->data[0]; fib->Header.Command = RawIo; raw->BlockNumber = (u_int64_t)bp->bio_pblkno; raw->ByteCount = bp->bio_bcount; raw->ContainerId = ad->ad_container->co_mntobj.ObjectId; raw->BpTotal = 0; raw->BpComplete = 0; fib->Header.Size += sizeof(struct aac_raw_io); cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw; if (bp->bio_cmd == BIO_READ) { raw->Flags = 1; cm->cm_flags |= AAC_CMD_DATAIN; } else { raw->Flags = 0; cm->cm_flags |= AAC_CMD_DATAOUT; } } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { fib->Header.Command = ContainerCommand; if (bp->bio_cmd == BIO_READ) { struct aac_blockread *br; br = (struct aac_blockread *)&fib->data[0]; br->Command = VM_CtBlockRead; br->ContainerId = ad->ad_container->co_mntobj.ObjectId; br->BlockNumber = bp->bio_pblkno; br->ByteCount = bp->bio_bcount; fib->Header.Size += sizeof(struct aac_blockread); cm->cm_sgtable = &br->SgMap; cm->cm_flags |= AAC_CMD_DATAIN; } else { struct aac_blockwrite *bw; bw = (struct aac_blockwrite *)&fib->data[0]; bw->Command = VM_CtBlockWrite; bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; bw->BlockNumber = bp->bio_pblkno; bw->ByteCount = bp->bio_bcount; bw->Stable = CUNSTABLE; fib->Header.Size += sizeof(struct aac_blockwrite); cm->cm_flags |= AAC_CMD_DATAOUT; cm->cm_sgtable = &bw->SgMap; } } else { fib->Header.Command = ContainerCommand64; if (bp->bio_cmd == BIO_READ) { struct aac_blockread64 *br; br = (struct aac_blockread64 *)&fib->data[0]; br->Command = VM_CtHostRead64; br->ContainerId = ad->ad_container->co_mntobj.ObjectId; br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; br->BlockNumber = bp->bio_pblkno; br->Pad = 0; br->Flags = 0; fib->Header.Size += sizeof(struct aac_blockread64); cm->cm_flags |= AAC_CMD_DATAIN; cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; } else { struct aac_blockwrite64 *bw; bw = (struct aac_blockwrite64 *)&fib->data[0]; bw->Command = VM_CtHostWrite64; bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; bw->BlockNumber = bp->bio_pblkno; bw->Pad = 0; bw->Flags = 0; fib->Header.Size += sizeof(struct aac_blockwrite64); cm->cm_flags |= AAC_CMD_DATAOUT; cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; } } *cmp = cm; return(0); fail: if (bp != NULL) aac_enqueue_bio(sc, bp); if (cm != NULL) aac_release_command(cm); return(ENOMEM); } /* * Handle a bio-instigated command that has been completed. */ static void aac_bio_complete(struct aac_command *cm) { struct aac_blockread_response *brr; struct aac_blockwrite_response *bwr; struct bio *bp; AAC_FSAStatus status; /* fetch relevant status and then release the command */ bp = (struct bio *)cm->cm_private; if (bp->bio_cmd == BIO_READ) { brr = (struct aac_blockread_response *)&cm->cm_fib->data[0]; status = brr->Status; } else { bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0]; status = bwr->Status; } aac_release_command(cm); /* fix up the bio based on status */ if (status == ST_OK) { bp->bio_resid = 0; } else { bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; } aac_biodone(bp); } /* * Submit a command to the controller, return when it completes. * XXX This is very dangerous! If the card has gone out to lunch, we could * be stuck here forever. At the same time, signals are not caught * because there is a risk that a signal could wakeup the sleep before * the card has a chance to complete the command. Since there is no way * to cancel a command that is in progress, we can't protect against the * card completing a command late and spamming the command and data * memory. So, we are held hostage until the command completes. */ static int aac_wait_command(struct aac_command *cm) { struct aac_softc *sc; int error; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* Put the command on the ready queue and get things going */ aac_enqueue_ready(cm); aac_startio(sc); error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0); return(error); } /* *Command Buffer Management */ /* * Allocate a command. */ int aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp) { struct aac_command *cm; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if ((cm = aac_dequeue_free(sc)) == NULL) { if (sc->total_fibs < sc->aac_max_fibs) { mtx_lock(&sc->aac_io_lock); sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; mtx_unlock(&sc->aac_io_lock); wakeup(sc->aifthread); } return (EBUSY); } *cmp = cm; return(0); } /* * Release a command back to the freelist. */ void aac_release_command(struct aac_command *cm) { struct aac_event *event; struct aac_softc *sc; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* (re)initialize the command/FIB */ cm->cm_datalen = 0; cm->cm_sgtable = NULL; cm->cm_flags = 0; cm->cm_complete = NULL; cm->cm_private = NULL; cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; cm->cm_fib->Header.Flags = 0; cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; /* * These are duplicated in aac_start to cover the case where an * intermediate stage may have destroyed them. They're left * initialized here for debugging purposes only. */ cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; cm->cm_fib->Header.SenderData = 0; aac_enqueue_free(cm); if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); event->ev_callback(sc, event, event->ev_arg); } } /* * Map helper for command/FIB allocation. */ static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint64_t *fibphys; fibphys = (uint64_t *)arg; *fibphys = segs[0].ds_addr; } /* * Allocate and initialize commands/FIBs for this adapter. */ static int aac_alloc_commands(struct aac_softc *sc) { struct aac_command *cm; struct aac_fibmap *fm; uint64_t fibphys; int i, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) return (ENOMEM); fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO); if (fm == NULL) return (ENOMEM); /* allocate the FIBs in DMAable memory and load them */ if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, BUS_DMA_NOWAIT, &fm->aac_fibmap)) { device_printf(sc->aac_dev, "Not enough contiguous memory available.\n"); free(fm, M_AACBUF); return (ENOMEM); } /* Ignore errors since this doesn't bounce */ (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size, aac_map_command_helper, &fibphys, 0); /* initialize constant fields in the command structure */ bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size); for (i = 0; i < sc->aac_max_fibs_alloc; i++) { cm = sc->aac_commands + sc->total_fibs; fm->aac_commands = cm; cm->cm_sc = sc; cm->cm_fib = (struct aac_fib *) ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size); cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size; cm->cm_index = sc->total_fibs; if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, &cm->cm_datamap)) != 0) break; mtx_lock(&sc->aac_io_lock); aac_release_command(cm); sc->total_fibs++; mtx_unlock(&sc->aac_io_lock); } if (i > 0) { mtx_lock(&sc->aac_io_lock); TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); mtx_unlock(&sc->aac_io_lock); return (0); } bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); free(fm, M_AACBUF); return (ENOMEM); } /* * Free FIBs owned by this adapter. */ static void aac_free_commands(struct aac_softc *sc) { struct aac_fibmap *fm; struct aac_command *cm; int i; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); /* * We check against total_fibs to handle partially * allocated blocks. */ for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { cm = fm->aac_commands + i; bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); } bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); free(fm, M_AACBUF); } } /* * Command-mapping helper function - populate this command's s/g table. */ static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_softc *sc; struct aac_command *cm; struct aac_fib *fib; int i; cm = (struct aac_command *)arg; sc = cm->cm_sc; fib = cm->cm_fib; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* copy into the FIB */ if (cm->cm_sgtable != NULL) { if (fib->Header.Command == RawIo) { struct aac_sg_tableraw *sg; sg = (struct aac_sg_tableraw *)cm->cm_sgtable; sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; sg->SgEntryRaw[i].Next = 0; sg->SgEntryRaw[i].Prev = 0; sg->SgEntryRaw[i].Flags = 0; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { struct aac_sg_table *sg; sg = cm->cm_sgtable; sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntry[i].SgAddress = segs[i].ds_addr; sg->SgEntry[i].SgByteCount = segs[i].ds_len; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entry); } else { struct aac_sg_table64 *sg; sg = (struct aac_sg_table64 *)cm->cm_sgtable; sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntry64[i].SgAddress = segs[i].ds_addr; sg->SgEntry64[i].SgByteCount = segs[i].ds_len; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); } } /* Fix up the address values in the FIB. Use the command array index * instead of a pointer since these fields are only 32 bits. Shift * the SenderFibAddress over to make room for the fast response bit * and for the AIF bit */ cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; /* save a pointer to the command for speedy reverse-lookup */ cm->cm_fib->Header.SenderData = cm->cm_index; if (cm->cm_flags & AAC_CMD_DATAIN) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_PREREAD); if (cm->cm_flags & AAC_CMD_DATAOUT) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_PREWRITE); cm->cm_flags |= AAC_CMD_MAPPED; if (sc->flags & AAC_FLAGS_NEW_COMM) { int count = 10000000L; while (AAC_SEND_COMMAND(sc, cm) != 0) { if (--count == 0) { aac_unmap_command(cm); sc->flags |= AAC_QUEUE_FRZN; aac_requeue_ready(cm); } DELAY(5); /* wait 5 usec. */ } } else { /* Put the FIB on the outbound queue */ if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) { aac_unmap_command(cm); sc->flags |= AAC_QUEUE_FRZN; aac_requeue_ready(cm); } } } /* * Unmap a command from controller-visible space. */ static void aac_unmap_command(struct aac_command *cm) { struct aac_softc *sc; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (!(cm->cm_flags & AAC_CMD_MAPPED)) return; if (cm->cm_datalen != 0) { if (cm->cm_flags & AAC_CMD_DATAIN) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_POSTREAD); if (cm->cm_flags & AAC_CMD_DATAOUT) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); } cm->cm_flags &= ~AAC_CMD_MAPPED; } /* * Hardware Interface */ /* * Initialize the adapter. */ static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_softc *sc; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_common_busaddr = segs[0].ds_addr; } static int aac_check_firmware(struct aac_softc *sc) { u_int32_t code, major, minor, options = 0, atu_size = 0; int rid, status; time_t then; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Wait for the adapter to come ready. */ then = time_uptime; do { code = AAC_GET_FWSTATUS(sc); if (code & AAC_SELF_TEST_FAILED) { device_printf(sc->aac_dev, "FATAL: selftest failed\n"); return(ENXIO); } if (code & AAC_KERNEL_PANIC) { device_printf(sc->aac_dev, "FATAL: controller kernel panic"); return(ENXIO); } if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { device_printf(sc->aac_dev, "FATAL: controller not coming ready, " "status %x\n", code); return(ENXIO); } } while (!(code & AAC_UP_AND_RUNNING)); /* * Retrieve the firmware version numbers. Dell PERC2/QC cards with * firmware version 1.x are not compatible with this driver. */ if (sc->flags & AAC_FLAGS_PERC2QC) { if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, NULL)) { device_printf(sc->aac_dev, "Error reading firmware version\n"); return (EIO); } /* These numbers are stored as ASCII! */ major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; if (major == 1) { device_printf(sc->aac_dev, "Firmware version %d.%d is not supported.\n", major, minor); return (EINVAL); } } /* * Retrieve the capabilities/supported options word so we know what * work-arounds to enable. Some firmware revs don't support this * command. */ if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) { if (status != AAC_SRB_STS_INVALID_REQUEST) { device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); return (EIO); } } else { options = AAC_GET_MAILBOX(sc, 1); atu_size = AAC_GET_MAILBOX(sc, 2); sc->supported_options = options; if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && (sc->flags & AAC_FLAGS_NO4GB) == 0) sc->flags |= AAC_FLAGS_4GB_WINDOW; if (options & AAC_SUPPORTED_NONDASD) sc->flags |= AAC_FLAGS_ENABLE_CAM; if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 && (sizeof(bus_addr_t) > 4)) { device_printf(sc->aac_dev, "Enabling 64-bit address support\n"); sc->flags |= AAC_FLAGS_SG_64BIT; } if ((options & AAC_SUPPORTED_NEW_COMM) && sc->aac_if->aif_send_command) sc->flags |= AAC_FLAGS_NEW_COMM; if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) sc->flags |= AAC_FLAGS_ARRAY_64BIT; } /* Check for broken hardware that does a lower number of commands */ sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); /* Remap mem. resource, if required */ if ((sc->flags & AAC_FLAGS_NEW_COMM) && atu_size > rman_get_size(sc->aac_regs_res1)) { rid = rman_get_rid(sc->aac_regs_res1); bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rid, sc->aac_regs_res1); sc->aac_regs_res1 = bus_alloc_resource_anywhere(sc->aac_dev, SYS_RES_MEMORY, &rid, atu_size, RF_ACTIVE); if (sc->aac_regs_res1 == NULL) { sc->aac_regs_res1 = bus_alloc_resource_any( sc->aac_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->aac_regs_res1 == NULL) { device_printf(sc->aac_dev, "couldn't allocate register window\n"); return (ENXIO); } sc->flags &= ~AAC_FLAGS_NEW_COMM; } sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1); sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1); if (sc->aac_hwif == AAC_HWIF_NARK) { sc->aac_regs_res0 = sc->aac_regs_res1; sc->aac_btag0 = sc->aac_btag1; sc->aac_bhandle0 = sc->aac_bhandle1; } } /* Read preferred settings */ sc->aac_max_fib_size = sizeof(struct aac_fib); sc->aac_max_sectors = 128; /* 64KB */ if (sc->flags & AAC_FLAGS_SG_64BIT) sc->aac_sg_tablesize = (AAC_FIB_DATASIZE - sizeof(struct aac_blockwrite64)) / sizeof(struct aac_sg_entry64); else sc->aac_sg_tablesize = (AAC_FIB_DATASIZE - sizeof(struct aac_blockwrite)) / sizeof(struct aac_sg_entry); if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) { options = AAC_GET_MAILBOX(sc, 1); sc->aac_max_fib_size = (options & 0xFFFF); sc->aac_max_sectors = (options >> 16) << 1; options = AAC_GET_MAILBOX(sc, 2); sc->aac_sg_tablesize = (options >> 16); options = AAC_GET_MAILBOX(sc, 3); sc->aac_max_fibs = (options & 0xFFFF); } if (sc->aac_max_fib_size > PAGE_SIZE) sc->aac_max_fib_size = PAGE_SIZE; sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size; if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { sc->flags |= AAC_FLAGS_RAW_IO; device_printf(sc->aac_dev, "Enable Raw I/O\n"); } if ((sc->flags & AAC_FLAGS_RAW_IO) && (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { sc->flags |= AAC_FLAGS_LBA_64BIT; device_printf(sc->aac_dev, "Enable 64-bit array\n"); } return (0); } static int aac_init(struct aac_softc *sc) { struct aac_adapter_init *ip; u_int32_t qoffset; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Fill in the init structure. This tells the adapter about the * physical location of various important shared data structures. */ ip = &sc->aac_common->ac_init; ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; sc->flags |= AAC_FLAGS_RAW_IO; } ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION; ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_fibs); ip->AdapterFibsVirtualAddress = 0; ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); ip->AdapterFibAlign = sizeof(struct aac_fib); ip->PrintfBufferAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_printf); ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; /* * The adapter assumes that pages are 4K in size, except on some * broken firmware versions that do the page->byte conversion twice, * therefore 'assuming' that this value is in 16MB units (2^24). * Round up since the granularity is so high. */ ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { ip->HostPhysMemPages = (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; } ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ ip->InitFlags = 0; if (sc->flags & AAC_FLAGS_NEW_COMM) { ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED; device_printf(sc->aac_dev, "New comm. interface enabled\n"); } ip->MaxIoCommands = sc->aac_max_fibs; ip->MaxIoSize = sc->aac_max_sectors << 9; ip->MaxFibSize = sc->aac_max_fib_size; /* * Initialize FIB queues. Note that it appears that the layout of the * indexes and the segmentation of the entries may be mandated by the * adapter, which is only told about the base of the queue index fields. * * The initial values of the indices are assumed to inform the adapter * of the sizes of the respective queues, and theoretically it could * work out the entire layout of the queue structures from this. We * take the easy route and just lay this area out like everyone else * does. * * The Linux driver uses a much more complex scheme whereby several * header records are kept for each queue. We use a couple of generic * list manipulation functions which 'know' the size of each list by * virtue of a table. */ qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN; qoffset &= ~(AAC_QUEUE_ALIGN - 1); sc->aac_queues = (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset); ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset; sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_HOST_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_HOST_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_HOST_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_HOST_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_ADAP_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_ADAP_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_ADAP_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_ADAP_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_HOST_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_HOST_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_HOST_HIGH_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_HOST_HIGH_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_ADAP_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_ADAP_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_ADAP_HIGH_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_ADAP_HIGH_RESP_ENTRIES; sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] = &sc->aac_queues->qt_HostNormCmdQueue[0]; sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] = &sc->aac_queues->qt_HostHighCmdQueue[0]; sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] = &sc->aac_queues->qt_AdapNormCmdQueue[0]; sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] = &sc->aac_queues->qt_AdapHighCmdQueue[0]; sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] = &sc->aac_queues->qt_HostNormRespQueue[0]; sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] = &sc->aac_queues->qt_HostHighRespQueue[0]; sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] = &sc->aac_queues->qt_AdapNormRespQueue[0]; sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] = &sc->aac_queues->qt_AdapHighRespQueue[0]; /* * Do controller-type-specific initialisation */ switch (sc->aac_hwif) { case AAC_HWIF_I960RX: AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0); break; case AAC_HWIF_RKT: AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0); break; default: break; } /* * Give the init structure to the controller. */ if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT, sc->aac_common_busaddr + offsetof(struct aac_common, ac_init), 0, 0, 0, NULL)) { device_printf(sc->aac_dev, "error establishing init structure\n"); error = EIO; goto out; } error = 0; out: return(error); } static int aac_setup_intr(struct aac_softc *sc) { if (sc->flags & AAC_FLAGS_NEW_COMM) { if (bus_setup_intr(sc->aac_dev, sc->aac_irq, INTR_MPSAFE|INTR_TYPE_BIO, NULL, aac_new_intr, sc, &sc->aac_intr)) { device_printf(sc->aac_dev, "can't set up interrupt\n"); return (EINVAL); } } else { if (bus_setup_intr(sc->aac_dev, sc->aac_irq, INTR_TYPE_BIO, aac_filter, NULL, sc, &sc->aac_intr)) { device_printf(sc->aac_dev, "can't set up interrupt filter\n"); return (EINVAL); } } return (0); } /* * Send a synchronous command to the controller and wait for a result. * Indicate if the controller completed the command with an error status. */ static int aac_sync_command(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, u_int32_t *sp) { time_t then; u_int32_t status; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* populate the mailbox */ AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); /* ensure the sync command doorbell flag is cleared */ AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); /* then set it to signal the adapter */ AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); /* spin waiting for the command to complete */ then = time_uptime; do { if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) { fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); return(EIO); } } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); /* clear the completion flag */ AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); /* get the command status */ status = AAC_GET_MAILBOX(sc, 0); if (sp != NULL) *sp = status; if (status != AAC_SRB_STS_SUCCESS) return (-1); return(0); } int aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, struct aac_fib *fib, u_int16_t datasize) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); if (datasize > AAC_FIB_DATASIZE) return(EINVAL); /* * Set up the sync FIB */ fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY; fib->Header.XferState |= xferstate; fib->Header.Command = command; fib->Header.StructType = AAC_FIBTYPE_TFIB; fib->Header.Size = sizeof(struct aac_fib_header) + datasize; fib->Header.SenderSize = sizeof(struct aac_fib); fib->Header.SenderFibAddress = 0; /* Not needed */ fib->Header.ReceiverFibAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_sync_fib); /* * Give the FIB to the controller, wait for a response. */ if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) { fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); return(EIO); } return (0); } /* * Adapter-space FIB queue manipulation * * Note that the queue implementation here is a little funky; neither the PI or * CI will ever be zero. This behaviour is a controller feature. */ static const struct { int size; int notify; } aac_qinfo[] = { {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, {AAC_HOST_HIGH_CMD_ENTRIES, 0}, {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, {AAC_HOST_HIGH_RESP_ENTRIES, 0}, {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, {AAC_ADAP_HIGH_RESP_ENTRIES, 0} }; /* * Atomically insert an entry into the nominated queue, returns 0 on success or * EBUSY if the queue is full. * * Note: it would be more efficient to defer notifying the controller in * the case where we may be inserting several entries in rapid succession, * but implementing this usefully may be difficult (it would involve a * separate queue/notify interface). */ static int aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm) { u_int32_t pi, ci; int error; u_int32_t fib_size; u_int32_t fib_addr; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); fib_size = cm->cm_fib->Header.Size; fib_addr = cm->cm_fib->Header.ReceiverFibAddress; /* get the producer/consumer indices */ pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; /* wrap the queue? */ if (pi >= aac_qinfo[queue].size) pi = 0; /* check for queue full */ if ((pi + 1) == ci) { error = EBUSY; goto out; } /* * To avoid a race with its completion interrupt, place this command on * the busy queue prior to advertising it to the controller. */ aac_enqueue_busy(cm); /* populate queue entry */ (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; /* update producer index */ sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; /* notify the adapter if we know how */ if (aac_qinfo[queue].notify != 0) AAC_QNOTIFY(sc, aac_qinfo[queue].notify); error = 0; out: return(error); } /* * Atomically remove one entry from the nominated queue, returns 0 on * success or ENOENT if the queue is empty. */ static int aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, struct aac_fib **fib_addr) { u_int32_t pi, ci; u_int32_t fib_index; int error; int notify; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* get the producer/consumer indices */ pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; /* check for queue empty */ if (ci == pi) { error = ENOENT; goto out; } /* wrap the pi so the following test works */ if (pi >= aac_qinfo[queue].size) pi = 0; notify = 0; if (ci == pi + 1) notify++; /* wrap the queue? */ if (ci >= aac_qinfo[queue].size) ci = 0; /* fetch the entry */ *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size; switch (queue) { case AAC_HOST_NORM_CMD_QUEUE: case AAC_HOST_HIGH_CMD_QUEUE: /* * The aq_fib_addr is only 32 bits wide so it can't be counted * on to hold an address. For AIF's, the adapter assumes * that it's giving us an address into the array of AIF fibs. * Therefore, we have to convert it to an index. */ fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr / sizeof(struct aac_fib); *fib_addr = &sc->aac_common->ac_fibs[fib_index]; break; case AAC_HOST_NORM_RESP_QUEUE: case AAC_HOST_HIGH_RESP_QUEUE: { struct aac_command *cm; /* * As above, an index is used instead of an actual address. * Gotta shift the index to account for the fast response * bit. No other correction is needed since this value was * originally provided by the driver via the SenderFibAddress * field. */ fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr; cm = sc->aac_commands + (fib_index >> 2); *fib_addr = cm->cm_fib; /* * Is this a fast response? If it is, update the fib fields in * local memory since the whole fib isn't DMA'd back up. */ if (fib_index & 0x01) { (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP; *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL; } break; } default: panic("Invalid queue in aac_dequeue_fib()"); break; } /* update consumer index */ sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1; /* if we have made the queue un-full, notify the adapter */ if (notify && (aac_qinfo[queue].notify != 0)) AAC_QNOTIFY(sc, aac_qinfo[queue].notify); error = 0; out: return(error); } /* * Put our response to an Adapter Initialed Fib on the response queue */ static int aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib) { u_int32_t pi, ci; int error; u_int32_t fib_size; u_int32_t fib_addr; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* Tell the adapter where the FIB is */ fib_size = fib->Header.Size; fib_addr = fib->Header.SenderFibAddress; fib->Header.ReceiverFibAddress = fib_addr; /* get the producer/consumer indices */ pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; /* wrap the queue? */ if (pi >= aac_qinfo[queue].size) pi = 0; /* check for queue full */ if ((pi + 1) == ci) { error = EBUSY; goto out; } /* populate queue entry */ (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; /* update producer index */ sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; /* notify the adapter if we know how */ if (aac_qinfo[queue].notify != 0) AAC_QNOTIFY(sc, aac_qinfo[queue].notify); error = 0; out: return(error); } /* * Check for commands that have been outstanding for a suspiciously long time, * and complain about them. */ static void aac_timeout(struct aac_softc *sc) { struct aac_command *cm; time_t deadline; int timedout, code; /* * Traverse the busy command list, bitch about late commands once * only. */ timedout = 0; deadline = time_uptime - AAC_CMD_TIMEOUT; TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { if ((cm->cm_timestamp < deadline) && !(cm->cm_flags & AAC_CMD_TIMEDOUT)) { cm->cm_flags |= AAC_CMD_TIMEDOUT; device_printf(sc->aac_dev, "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n", cm, cm->cm_fib->Header.Command, (int)(time_uptime-cm->cm_timestamp)); AAC_PRINT_FIB(sc, cm->cm_fib); timedout++; } } if (timedout) { code = AAC_GET_FWSTATUS(sc); if (code != AAC_UP_AND_RUNNING) { device_printf(sc->aac_dev, "WARNING! Controller is no " "longer running! code= 0x%x\n", code); } } } /* * Interface Function Vectors */ /* * Read the current firmware status word. */ static int aac_sa_get_fwstatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS)); } static int aac_rx_get_fwstatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? AAC_RX_OMR0 : AAC_RX_FWSTATUS)); } static int aac_rkt_get_fwstatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? AAC_RKT_OMR0 : AAC_RKT_FWSTATUS)); } /* * Notify the controller of a change in a given queue */ static void aac_sa_qnotify(struct aac_softc *sc, int qbit) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit); } static void aac_rx_qnotify(struct aac_softc *sc, int qbit) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit); } static void aac_rkt_qnotify(struct aac_softc *sc, int qbit) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit); } /* * Get the interrupt reason bits */ static int aac_sa_get_istatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0)); } static int aac_rx_get_istatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR)); } static int aac_rkt_get_istatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR)); } /* * Clear some interrupt reason bits */ static void aac_sa_clear_istatus(struct aac_softc *sc, int mask) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask); } static void aac_rx_clear_istatus(struct aac_softc *sc, int mask) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask); } static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask); } /* * Populate the mailbox and set the command word */ static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3); } static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3); } static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3); } /* * Fetch the immediate command status word */ static int aac_sa_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4))); } static int aac_rx_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4))); } static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4))); } /* * Set/clear interrupt masks */ static void aac_sa_set_interrupts(struct aac_softc *sc, int enable) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); if (enable) { AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS); } else { AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0); } } static void aac_rx_set_interrupts(struct aac_softc *sc, int enable) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); if (enable) { if (sc->flags & AAC_FLAGS_NEW_COMM) AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM); else AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS); } else { AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0); } } static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); if (enable) { if (sc->flags & AAC_FLAGS_NEW_COMM) AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM); else AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS); } else { AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0); } } /* * New comm. interface: Send command functions */ static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm) { u_int32_t index, device; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); if (index == 0xffffffffL) index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); if (index == 0xffffffffL) return index; aac_enqueue_busy(cm); device = index; AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); device += 4; AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); device += 4; AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index); return 0; } static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm) { u_int32_t index, device; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); if (index == 0xffffffffL) index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); if (index == 0xffffffffL) return index; aac_enqueue_busy(cm); device = index; AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); device += 4; AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); device += 4; AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index); return 0; } /* * New comm. interface: get, set outbound queue index */ static int aac_rx_get_outb_queue(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE)); } static int aac_rkt_get_outb_queue(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE)); } static void aac_rx_set_outb_queue(struct aac_softc *sc, int index) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index); } static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index); } /* * Debugging and Diagnostics */ /* * Print some information about the controller. */ static void aac_describe_controller(struct aac_softc *sc) { struct aac_fib *fib; struct aac_adapter_info *info; char *adapter_type = "Adaptec RAID controller"; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); fib->data[0] = 0; if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } /* save the kernel revision structure for later use */ info = (struct aac_adapter_info *)&fib->data[0]; sc->aac_revision = info->KernelRevision; if (bootverbose) { device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " "(%dMB cache, %dMB execution), %s\n", aac_describe_code(aac_cpu_variant, info->CpuVariant), info->ClockSpeed, info->TotalMem / (1024 * 1024), info->BufferMem / (1024 * 1024), info->ExecutionMem / (1024 * 1024), aac_describe_code(aac_battery_platform, info->batteryPlatform)); device_printf(sc->aac_dev, "Kernel %d.%d-%d, Build %d, S/N %6X\n", info->KernelRevision.external.comp.major, info->KernelRevision.external.comp.minor, info->KernelRevision.external.comp.dash, info->KernelRevision.buildNumber, (u_int32_t)(info->SerialNumber & 0xffffff)); device_printf(sc->aac_dev, "Supported Options=%b\n", sc->supported_options, "\20" "\1SNAPSHOT" "\2CLUSTERS" "\3WCACHE" "\4DATA64" "\5HOSTTIME" "\6RAID50" "\7WINDOW4GB" "\10SCSIUPGD" "\11SOFTERR" "\12NORECOND" "\13SGMAP64" "\14ALARM" "\15NONDASD" "\16SCSIMGT" "\17RAIDSCSI" "\21ADPTINFO" "\22NEWCOMM" "\23ARRAY64BIT" "\24HEATSENSOR"); } if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { fib->data[0] = 0; if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n"); else adapter_type = ((struct aac_supplement_adapter_info *) &fib->data[0])->AdapterTypeText; } device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n", adapter_type, AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); } /* * Look up a text description of a numeric error code and return a pointer to * same. */ static const char * aac_describe_code(const struct aac_code_lookup *table, u_int32_t code) { int i; for (i = 0; table[i].string != NULL; i++) if (table[i].code == code) return(table[i].string); return(table[i + 1].string); } /* * Management Interface */ static int aac_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct aac_softc *sc; sc = dev->si_drv1; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); device_busy(sc->aac_dev); devfs_set_cdevpriv(sc, aac_cdevpriv_dtor); return 0; } static int aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { union aac_statrequest *as; struct aac_softc *sc; int error = 0; as = (union aac_statrequest *)arg; sc = dev->si_drv1; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); switch (cmd) { case AACIO_STATS: switch (as->as_item) { case AACQ_FREE: case AACQ_BIO: case AACQ_READY: case AACQ_BUSY: bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, sizeof(struct aac_qstat)); break; default: error = ENOENT; break; } break; case FSACTL_SENDFIB: case FSACTL_SEND_LARGE_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_SENDFIB: case FSACTL_LNX_SEND_LARGE_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); error = aac_ioctl_sendfib(sc, arg); break; case FSACTL_SEND_RAW_SRB: arg = *(caddr_t*)arg; case FSACTL_LNX_SEND_RAW_SRB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); error = aac_ioctl_send_raw_srb(sc, arg); break; case FSACTL_AIF_THREAD: case FSACTL_LNX_AIF_THREAD: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); error = EINVAL; break; case FSACTL_OPEN_GET_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); error = aac_open_aif(sc, arg); break; case FSACTL_GET_NEXT_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); error = aac_getnext_aif(sc, arg); break; case FSACTL_CLOSE_GET_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); error = aac_close_aif(sc, arg); break; case FSACTL_MINIPORT_REV_CHECK: arg = *(caddr_t*)arg; case FSACTL_LNX_MINIPORT_REV_CHECK: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); error = aac_rev_check(sc, arg); break; case FSACTL_QUERY_DISK: arg = *(caddr_t*)arg; case FSACTL_LNX_QUERY_DISK: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); error = aac_query_disk(sc, arg); break; case FSACTL_DELETE_DISK: case FSACTL_LNX_DELETE_DISK: /* * We don't trust the underland to tell us when to delete a * container, rather we rely on an AIF coming from the * controller */ error = 0; break; case FSACTL_GET_PCI_INFO: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_PCI_INFO: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); error = aac_get_pci_info(sc, arg); break; case FSACTL_GET_FEATURES: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_FEATURES: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); error = aac_supported_features(sc, arg); break; default: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); error = EINVAL; break; } return(error); } static int aac_poll(struct cdev *dev, int poll_events, struct thread *td) { struct aac_softc *sc; struct aac_fib_context *ctx; int revents; sc = dev->si_drv1; revents = 0; mtx_lock(&sc->aac_aifq_lock); if ((poll_events & (POLLRDNORM | POLLIN)) != 0) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) { revents |= poll_events & (POLLIN | POLLRDNORM); break; } } } mtx_unlock(&sc->aac_aifq_lock); if (revents == 0) { if (poll_events & (POLLIN | POLLRDNORM)) selrecord(td, &sc->rcv_select); } return (revents); } static void aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) { switch (event->ev_type) { case AAC_EVENT_CMFREE: mtx_assert(&sc->aac_io_lock, MA_OWNED); if (aac_alloc_command(sc, (struct aac_command **)arg)) { aac_add_event(sc, event); return; } free(event, M_AACBUF); wakeup(arg); break; default: break; } } /* * Send a FIB supplied from userspace */ static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) { struct aac_command *cm; int size, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); cm = NULL; /* * Get a command */ mtx_lock(&sc->aac_io_lock); if (aac_alloc_command(sc, &cm)) { struct aac_event *event; event = malloc(sizeof(struct aac_event), M_AACBUF, M_NOWAIT | M_ZERO); if (event == NULL) { error = EBUSY; mtx_unlock(&sc->aac_io_lock); goto out; } event->ev_type = AAC_EVENT_CMFREE; event->ev_callback = aac_ioctl_event; event->ev_arg = &cm; aac_add_event(sc, event); msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0); } mtx_unlock(&sc->aac_io_lock); /* * Fetch the FIB header, then re-copy to get data as well. */ if ((error = copyin(ufib, cm->cm_fib, sizeof(struct aac_fib_header))) != 0) goto out; size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); if (size > sc->aac_max_fib_size) { device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", size, sc->aac_max_fib_size); size = sc->aac_max_fib_size; } if ((error = copyin(ufib, cm->cm_fib, size)) != 0) goto out; cm->cm_fib->Header.Size = size; cm->cm_timestamp = time_uptime; /* * Pass the FIB to the controller, wait for it to complete. */ mtx_lock(&sc->aac_io_lock); error = aac_wait_command(cm); mtx_unlock(&sc->aac_io_lock); if (error != 0) { device_printf(sc->aac_dev, "aac_wait_command return %d\n", error); goto out; } /* * Copy the FIB and data back out to the caller. */ size = cm->cm_fib->Header.Size; if (size > sc->aac_max_fib_size) { device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", size, sc->aac_max_fib_size); size = sc->aac_max_fib_size; } error = copyout(cm->cm_fib, ufib, size); out: if (cm != NULL) { mtx_lock(&sc->aac_io_lock); aac_release_command(cm); mtx_unlock(&sc->aac_io_lock); } return(error); } /* * Send a passthrough FIB supplied from userspace */ static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) { struct aac_command *cm; struct aac_event *event; struct aac_fib *fib; struct aac_srb *srbcmd, *user_srb; struct aac_sg_entry *sge; struct aac_sg_entry64 *sge64; void *srb_sg_address, *ureply; uint32_t fibsize, srb_sg_bytecount; int error, transfer_data; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); cm = NULL; transfer_data = 0; fibsize = 0; user_srb = (struct aac_srb *)arg; mtx_lock(&sc->aac_io_lock); if (aac_alloc_command(sc, &cm)) { event = malloc(sizeof(struct aac_event), M_AACBUF, M_NOWAIT | M_ZERO); if (event == NULL) { error = EBUSY; mtx_unlock(&sc->aac_io_lock); goto out; } event->ev_type = AAC_EVENT_CMFREE; event->ev_callback = aac_ioctl_event; event->ev_arg = &cm; aac_add_event(sc, event); msleep(cm, &sc->aac_io_lock, 0, "aacraw", 0); } mtx_unlock(&sc->aac_io_lock); cm->cm_data = NULL; fib = cm->cm_fib; srbcmd = (struct aac_srb *)fib->data; error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t)); if (error != 0) goto out; if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) { error = EINVAL; goto out; } error = copyin(user_srb, srbcmd, fibsize); if (error != 0) goto out; srbcmd->function = 0; srbcmd->retry_limit = 0; if (srbcmd->sg_map.SgCount > 1) { error = EINVAL; goto out; } /* Retrieve correct SG entries. */ if (fibsize == (sizeof(struct aac_srb) + srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { sge = srbcmd->sg_map.SgEntry; sge64 = NULL; srb_sg_bytecount = sge->SgByteCount; srb_sg_address = (void *)(uintptr_t)sge->SgAddress; } #ifdef __amd64__ else if (fibsize == (sizeof(struct aac_srb) + srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { sge = NULL; sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; srb_sg_bytecount = sge64->SgByteCount; srb_sg_address = (void *)sge64->SgAddress; if (sge64->SgAddress > 0xffffffffull && (sc->flags & AAC_FLAGS_SG_64BIT) == 0) { error = EINVAL; goto out; } } #endif else { error = EINVAL; goto out; } ureply = (char *)arg + fibsize; srbcmd->data_len = srb_sg_bytecount; if (srbcmd->sg_map.SgCount == 1) transfer_data = 1; cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; if (transfer_data) { cm->cm_datalen = srb_sg_bytecount; cm->cm_data = malloc(cm->cm_datalen, M_AACBUF, M_NOWAIT); if (cm->cm_data == NULL) { error = ENOMEM; goto out; } if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) cm->cm_flags |= AAC_CMD_DATAIN; if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { cm->cm_flags |= AAC_CMD_DATAOUT; error = copyin(srb_sg_address, cm->cm_data, cm->cm_datalen); if (error != 0) goto out; } } fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(struct aac_srb); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC | AAC_FIBSTATE_FAST_RESPONSE; fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ? ScsiPortCommandU64 : ScsiPortCommand; mtx_lock(&sc->aac_io_lock); aac_wait_command(cm); mtx_unlock(&sc->aac_io_lock); if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) { error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen); if (error != 0) goto out; } error = copyout(fib->data, ureply, sizeof(struct aac_srb_response)); out: if (cm != NULL) { if (cm->cm_data != NULL) free(cm->cm_data, M_AACBUF); mtx_lock(&sc->aac_io_lock); aac_release_command(cm); mtx_unlock(&sc->aac_io_lock); } return(error); } /* * cdevpriv interface private destructor. */ static void aac_cdevpriv_dtor(void *arg) { struct aac_softc *sc; sc = arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&Giant); device_unbusy(sc->aac_dev); mtx_unlock(&Giant); } /* * Handle an AIF sent to us by the controller; queue it for later reference. * If the queue fills up, then drop the older entries. */ static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) { struct aac_aif_command *aif; struct aac_container *co, *co_next; struct aac_fib_context *ctx; struct aac_mntinforesp *mir; int next, current, found; int count = 0, added = 0, i = 0; uint32_t channel; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); aif = (struct aac_aif_command*)&fib->data[0]; aac_print_aif(sc, aif); /* Is it an event that we should care about? */ switch (aif->command) { case AifCmdEventNotify: switch (aif->data.EN.type) { case AifEnAddContainer: case AifEnDeleteContainer: /* * A container was added or deleted, but the message * doesn't tell us anything else! Re-enumerate the * containers and sort things out. */ aac_alloc_sync_fib(sc, &fib); do { /* * Ask the controller for its containers one at * a time. * XXX What if the controller's list changes * midway through this enumaration? * XXX This should be done async. */ if ((mir = aac_get_container_info(sc, fib, i)) == NULL) continue; if (i == 0) count = mir->MntRespCount; /* * Check the container against our list. * co->co_found was already set to 0 in a * previous run. */ if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { found = 0; TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { if (co->co_mntobj.ObjectId == mir->MntTable[0].ObjectId) { co->co_found = 1; found = 1; break; } } /* * If the container matched, continue * in the list. */ if (found) { i++; continue; } /* * This is a new container. Do all the * appropriate things to set it up. */ aac_add_container(sc, mir, 1); added = 1; } i++; } while ((i < count) && (i < AAC_MAX_CONTAINERS)); aac_release_sync_fib(sc); /* * Go through our list of containers and see which ones * were not marked 'found'. Since the controller didn't * list them they must have been deleted. Do the * appropriate steps to destroy the device. Also reset * the co->co_found field. */ co = TAILQ_FIRST(&sc->aac_container_tqh); while (co != NULL) { if (co->co_found == 0) { mtx_unlock(&sc->aac_io_lock); mtx_lock(&Giant); device_delete_child(sc->aac_dev, co->co_disk); mtx_unlock(&Giant); mtx_lock(&sc->aac_io_lock); co_next = TAILQ_NEXT(co, co_link); mtx_lock(&sc->aac_container_lock); TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); mtx_unlock(&sc->aac_container_lock); free(co, M_AACBUF); co = co_next; } else { co->co_found = 0; co = TAILQ_NEXT(co, co_link); } } /* Attach the newly created containers */ if (added) { mtx_unlock(&sc->aac_io_lock); mtx_lock(&Giant); bus_generic_attach(sc->aac_dev); mtx_unlock(&Giant); mtx_lock(&sc->aac_io_lock); } break; case AifEnEnclosureManagement: switch (aif->data.EN.data.EEE.eventType) { case AIF_EM_DRIVE_INSERTION: case AIF_EM_DRIVE_REMOVAL: channel = aif->data.EN.data.EEE.unitID; if (sc->cam_rescan_cb != NULL) sc->cam_rescan_cb(sc, (channel >> 24) & 0xF, (channel & 0xFFFF)); break; } break; case AifEnAddJBOD: case AifEnDeleteJBOD: channel = aif->data.EN.data.ECE.container; if (sc->cam_rescan_cb != NULL) sc->cam_rescan_cb(sc, (channel >> 24) & 0xF, AAC_CAM_TARGET_WILDCARD); break; default: break; } default: break; } /* Copy the AIF data to the AIF queue for ioctl retrieval */ mtx_lock(&sc->aac_aifq_lock); current = sc->aifq_idx; next = (current + 1) % AAC_AIFQ_LENGTH; if (next == 0) sc->aifq_filled = 1; bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); /* modify AIF contexts */ if (sc->aifq_filled) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (next == ctx->ctx_idx) ctx->ctx_wrap = 1; else if (current == ctx->ctx_idx && ctx->ctx_wrap) ctx->ctx_idx = next; } } sc->aifq_idx = next; /* On the off chance that someone is sleeping for an aif... */ if (sc->aac_state & AAC_STATE_AIF_SLEEPER) wakeup(sc->aac_aifq); /* Wakeup any poll()ers */ selwakeuppri(&sc->rcv_select, PRIBIO); mtx_unlock(&sc->aac_aifq_lock); } /* * Return the Revision of the driver to userspace and check to see if the * userspace app is possibly compatible. This is extremely bogus since * our driver doesn't follow Adaptec's versioning system. Cheat by just * returning what the card reported. */ static int aac_rev_check(struct aac_softc *sc, caddr_t udata) { struct aac_rev_check rev_check; struct aac_rev_check_resp rev_check_resp; int error = 0; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Copyin the revision struct from userspace */ if ((error = copyin(udata, (caddr_t)&rev_check, sizeof(struct aac_rev_check))) != 0) { return error; } fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", rev_check.callingRevision.buildNumber); /* * Doctor up the response struct. */ rev_check_resp.possiblyCompatible = 1; rev_check_resp.adapterSWRevision.external.comp.major = AAC_DRIVER_MAJOR_VERSION; rev_check_resp.adapterSWRevision.external.comp.minor = AAC_DRIVER_MINOR_VERSION; rev_check_resp.adapterSWRevision.external.comp.type = AAC_DRIVER_TYPE; rev_check_resp.adapterSWRevision.external.comp.dash = AAC_DRIVER_BUGFIX_LEVEL; rev_check_resp.adapterSWRevision.buildNumber = AAC_DRIVER_BUILD; return(copyout((caddr_t)&rev_check_resp, udata, sizeof(struct aac_rev_check_resp))); } /* * Pass the fib context to the caller */ static int aac_open_aif(struct aac_softc *sc, caddr_t arg) { struct aac_fib_context *fibctx, *ctx; int error = 0; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO); if (fibctx == NULL) return (ENOMEM); mtx_lock(&sc->aac_aifq_lock); /* all elements are already 0, add to queue */ if (sc->fibctx == NULL) sc->fibctx = fibctx; else { for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) ; ctx->next = fibctx; fibctx->prev = ctx; } /* evaluate unique value */ fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); ctx = sc->fibctx; while (ctx != fibctx) { if (ctx->unique == fibctx->unique) { fibctx->unique++; ctx = sc->fibctx; } else { ctx = ctx->next; } } mtx_unlock(&sc->aac_aifq_lock); error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); if (error) aac_close_aif(sc, (caddr_t)ctx); return error; } /* * Close the caller's fib context */ static int aac_close_aif(struct aac_softc *sc, caddr_t arg) { struct aac_fib_context *ctx; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_aifq_lock); for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (ctx->unique == *(uint32_t *)&arg) { if (ctx == sc->fibctx) sc->fibctx = NULL; else { ctx->prev->next = ctx->next; if (ctx->next) ctx->next->prev = ctx->prev; } break; } } mtx_unlock(&sc->aac_aifq_lock); if (ctx) free(ctx, M_AACBUF); return 0; } /* * Pass the caller the next AIF in their queue */ static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg) { struct get_adapter_fib_ioctl agf; struct aac_fib_context *ctx; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if ((error = copyin(arg, &agf, sizeof(agf))) == 0) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (agf.AdapterFibContext == ctx->unique) break; } if (!ctx) return (EFAULT); error = aac_return_aif(sc, ctx, agf.AifFib); if (error == EAGAIN && agf.Wait) { fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); sc->aac_state |= AAC_STATE_AIF_SLEEPER; while (error == EAGAIN) { error = tsleep(sc->aac_aifq, PRIBIO | PCATCH, "aacaif", 0); if (error == 0) error = aac_return_aif(sc, ctx, agf.AifFib); } sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; } } return(error); } /* * Hand the next AIF off the top of the queue out to userspace. */ static int aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) { int current, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_aifq_lock); current = ctx->ctx_idx; if (current == sc->aifq_idx && !ctx->ctx_wrap) { /* empty */ mtx_unlock(&sc->aac_aifq_lock); return (EAGAIN); } error = copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); if (error) device_printf(sc->aac_dev, "aac_return_aif: copyout returned %d\n", error); else { ctx->ctx_wrap = 0; ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; } mtx_unlock(&sc->aac_aifq_lock); return(error); } static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) { struct aac_pci_info { u_int32_t bus; u_int32_t slot; } pciinf; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); pciinf.bus = pci_get_bus(sc->aac_dev); pciinf.slot = pci_get_slot(sc->aac_dev); error = copyout((caddr_t)&pciinf, uptr, sizeof(struct aac_pci_info)); return (error); } static int aac_supported_features(struct aac_softc *sc, caddr_t uptr) { struct aac_features f; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if ((error = copyin(uptr, &f, sizeof (f))) != 0) return (error); /* * When the management driver receives FSACTL_GET_FEATURES ioctl with * ALL zero in the featuresState, the driver will return the current * state of all the supported features, the data field will not be * valid. * When the management driver receives FSACTL_GET_FEATURES ioctl with * a specific bit set in the featuresState, the driver will return the * current state of this specific feature and whatever data that are * associated with the feature in the data field or perform whatever * action needed indicates in the data field. */ if (f.feat.fValue == 0) { f.feat.fBits.largeLBA = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; /* TODO: In the future, add other features state here as well */ } else { if (f.feat.fBits.largeLBA) f.feat.fBits.largeLBA = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; /* TODO: Add other features state and data in the future */ } error = copyout(&f, uptr, sizeof (f)); return (error); } /* * Give the userland some information about the container. The AAC arch * expects the driver to be a SCSI passthrough type driver, so it expects * the containers to have b:t:l numbers. Fake it. */ static int aac_query_disk(struct aac_softc *sc, caddr_t uptr) { struct aac_query_disk query_disk; struct aac_container *co; struct aac_disk *disk; int error, id; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); disk = NULL; error = copyin(uptr, (caddr_t)&query_disk, sizeof(struct aac_query_disk)); if (error) return (error); id = query_disk.ContainerNumber; if (id == -1) return (EINVAL); mtx_lock(&sc->aac_container_lock); TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { if (co->co_mntobj.ObjectId == id) break; } if (co == NULL) { query_disk.Valid = 0; query_disk.Locked = 0; query_disk.Deleted = 1; /* XXX is this right? */ } else { disk = device_get_softc(co->co_disk); query_disk.Valid = 1; query_disk.Locked = (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0; query_disk.Deleted = 0; query_disk.Bus = device_get_unit(sc->aac_dev); query_disk.Target = disk->unit; query_disk.Lun = 0; query_disk.UnMapped = 0; sprintf(&query_disk.diskDeviceName[0], "%s%d", disk->ad_disk->d_name, disk->ad_disk->d_unit); } mtx_unlock(&sc->aac_container_lock); error = copyout((caddr_t)&query_disk, uptr, sizeof(struct aac_query_disk)); return (error); } static void aac_get_bus_info(struct aac_softc *sc) { struct aac_fib *fib; struct aac_ctcfg *c_cmd; struct aac_ctcfg_resp *c_resp; struct aac_vmioctl *vmi; struct aac_vmi_businf_resp *vmi_resp; struct aac_getbusinf businfo; struct aac_sim *caminf; device_t child; int i, found, error; mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); c_cmd = (struct aac_ctcfg *)&fib->data[0]; bzero(c_cmd, sizeof(struct aac_ctcfg)); c_cmd->Command = VM_ContainerConfig; c_cmd->cmd = CT_GET_SCSI_METHOD; c_cmd->param = 0; error = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_ctcfg)); if (error) { device_printf(sc->aac_dev, "Error %d sending " "VM_ContainerConfig command\n", error); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; if (c_resp->Status != ST_OK) { device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", c_resp->Status); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } sc->scsi_method_id = c_resp->param; vmi = (struct aac_vmioctl *)&fib->data[0]; bzero(vmi, sizeof(struct aac_vmioctl)); vmi->Command = VM_Ioctl; vmi->ObjType = FT_DRIVE; vmi->MethId = sc->scsi_method_id; vmi->ObjId = 0; vmi->IoctlCmd = GetBusInfo; error = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_vmi_businf_resp)); if (error) { device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", error); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; if (vmi_resp->Status != ST_OK) { device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", vmi_resp->Status); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); found = 0; for (i = 0; i < businfo.BusCount; i++) { if (businfo.BusValid[i] != AAC_BUS_VALID) continue; caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim), M_AACBUF, M_NOWAIT | M_ZERO); if (caminf == NULL) { device_printf(sc->aac_dev, "No memory to add passthrough bus %d\n", i); break; }; child = device_add_child(sc->aac_dev, "aacp", -1); if (child == NULL) { device_printf(sc->aac_dev, "device_add_child failed for passthrough bus %d\n", i); free(caminf, M_AACBUF); break; } caminf->TargetsPerBus = businfo.TargetsPerBus; caminf->BusNumber = i; caminf->InitiatorBusId = businfo.InitiatorBusId[i]; caminf->aac_sc = sc; caminf->sim_dev = child; device_set_ivars(child, caminf); device_set_desc(child, "SCSI Passthrough Bus"); TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); found = 1; } if (found) bus_generic_attach(sc->aac_dev); } Index: head/sys/dev/ath/if_ath_sysctl.c =================================================================== --- head/sys/dev/ath/if_ath_sysctl.c (revision 296271) +++ head/sys/dev/ath/if_ath_sysctl.c (revision 296272) @@ -1,1346 +1,1346 @@ /*- * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #ifdef INET #include #include #endif #include #include /* XXX for softled */ #include #include #include #include #include #include #ifdef ATH_TX99_DIAG #include #endif #ifdef ATH_DEBUG_ALQ #include #endif static int ath_sysctl_slottime(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int slottime; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); slottime = ath_hal_getslottime(sc->sc_ah); ATH_UNLOCK(sc); error = sysctl_handle_int(oidp, &slottime, 0, req); if (error || !req->newptr) goto finish; error = !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return error; } static int ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int acktimeout; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); acktimeout = ath_hal_getacktimeout(sc->sc_ah); ATH_UNLOCK(sc); error = sysctl_handle_int(oidp, &acktimeout, 0, req); if (error || !req->newptr) goto finish; error = !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } static int ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int ctstimeout; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ctstimeout = ath_hal_getctstimeout(sc->sc_ah); ATH_UNLOCK(sc); error = sysctl_handle_int(oidp, &ctstimeout, 0, req); if (error || !req->newptr) goto finish; error = !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } static int ath_sysctl_softled(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int softled = sc->sc_softled; int error; error = sysctl_handle_int(oidp, &softled, 0, req); if (error || !req->newptr) return error; softled = (softled != 0); if (softled != sc->sc_softled) { if (softled) { /* NB: handle any sc_ledpin change */ ath_led_config(sc); } sc->sc_softled = softled; } return 0; } static int ath_sysctl_ledpin(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int ledpin = sc->sc_ledpin; int error; error = sysctl_handle_int(oidp, &ledpin, 0, req); if (error || !req->newptr) return error; if (ledpin != sc->sc_ledpin) { sc->sc_ledpin = ledpin; if (sc->sc_softled) { ath_led_config(sc); } } return 0; } static int ath_sysctl_hardled(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int hardled = sc->sc_hardled; int error; error = sysctl_handle_int(oidp, &hardled, 0, req); if (error || !req->newptr) return error; hardled = (hardled != 0); if (hardled != sc->sc_hardled) { if (hardled) { /* NB: handle any sc_ledpin change */ ath_led_config(sc); } sc->sc_hardled = hardled; } return 0; } static int ath_sysctl_txantenna(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int txantenna; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); txantenna = ath_hal_getantennaswitch(sc->sc_ah); error = sysctl_handle_int(oidp, &txantenna, 0, req); if (!error && req->newptr) { /* XXX assumes 2 antenna ports */ if (txantenna < HAL_ANT_VARIABLE || txantenna > HAL_ANT_FIXED_B) { error = EINVAL; goto finish; } ath_hal_setantennaswitch(sc->sc_ah, txantenna); /* * NB: with the switch locked this isn't meaningful, * but set it anyway so things like radiotap get * consistent info in their data. */ sc->sc_txantenna = txantenna; } finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } static int ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int defantenna; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); defantenna = ath_hal_getdefantenna(sc->sc_ah); ATH_UNLOCK(sc); error = sysctl_handle_int(oidp, &defantenna, 0, req); if (!error && req->newptr) ath_hal_setdefantenna(sc->sc_ah, defantenna); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } static int ath_sysctl_diversity(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int diversity; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); diversity = ath_hal_getdiversity(sc->sc_ah); error = sysctl_handle_int(oidp, &diversity, 0, req); if (error || !req->newptr) goto finish; if (!ath_hal_setdiversity(sc->sc_ah, diversity)) { error = EINVAL; goto finish; } sc->sc_diversity = diversity; error = 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } static int ath_sysctl_diag(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int32_t diag; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); if (!ath_hal_getdiag(sc->sc_ah, &diag)) { error = EINVAL; goto finish; } error = sysctl_handle_int(oidp, &diag, 0, req); if (error || !req->newptr) goto finish; error = !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } static int ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int32_t scale; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); (void) ath_hal_gettpscale(sc->sc_ah, &scale); error = sysctl_handle_int(oidp, &scale, 0, req); if (error || !req->newptr) goto finish; error = !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL : (sc->sc_running) ? ath_reset(sc, ATH_RESET_NOLOSS) : 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } static int ath_sysctl_tpc(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int tpc; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); tpc = ath_hal_gettpc(sc->sc_ah); error = sysctl_handle_int(oidp, &tpc, 0, req); if (error || !req->newptr) goto finish; error = !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } static int ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; struct ath_hal *ah = sc->sc_ah; u_int rfkill; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); rfkill = ath_hal_getrfkill(ah); error = sysctl_handle_int(oidp, &rfkill, 0, req); if (error || !req->newptr) goto finish; if (rfkill == ath_hal_getrfkill(ah)) { /* unchanged */ error = 0; goto finish; } if (!ath_hal_setrfkill(ah, rfkill)) { error = EINVAL; goto finish; } error = sc->sc_running ? ath_reset(sc, ATH_RESET_FULL) : 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } static int ath_sysctl_txagg(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int i, t, param = 0; int error; struct ath_buf *bf; error = sysctl_handle_int(oidp, ¶m, 0, req); if (error || !req->newptr) return error; if (param != 1) return 0; printf("no tx bufs (empty list): %d\n", sc->sc_stats.ast_tx_getnobuf); printf("no tx bufs (was busy): %d\n", sc->sc_stats.ast_tx_getbusybuf); printf("aggr single packet: %d\n", sc->sc_aggr_stats.aggr_single_pkt); printf("aggr single packet w/ BAW closed: %d\n", sc->sc_aggr_stats.aggr_baw_closed_single_pkt); printf("aggr non-baw packet: %d\n", sc->sc_aggr_stats.aggr_nonbaw_pkt); printf("aggr aggregate packet: %d\n", sc->sc_aggr_stats.aggr_aggr_pkt); printf("aggr single packet low hwq: %d\n", sc->sc_aggr_stats.aggr_low_hwq_single_pkt); printf("aggr single packet RTS aggr limited: %d\n", sc->sc_aggr_stats.aggr_rts_aggr_limited); printf("aggr sched, no work: %d\n", sc->sc_aggr_stats.aggr_sched_nopkt); for (i = 0; i < 64; i++) { printf("%2d: %10d ", i, sc->sc_aggr_stats.aggr_pkts[i]); if (i % 4 == 3) printf("\n"); } printf("\n"); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) { printf("HW TXQ %d: axq_depth=%d, axq_aggr_depth=%d, " "axq_fifo_depth=%d, holdingbf=%p\n", i, sc->sc_txq[i].axq_depth, sc->sc_txq[i].axq_aggr_depth, sc->sc_txq[i].axq_fifo_depth, sc->sc_txq[i].axq_holdingbf); } } i = t = 0; ATH_TXBUF_LOCK(sc); TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list) { if (bf->bf_flags & ATH_BUF_BUSY) { printf("Busy: %d\n", t); i++; } t++; } ATH_TXBUF_UNLOCK(sc); printf("Total TX buffers: %d; Total TX buffers busy: %d (%d)\n", t, i, sc->sc_txbuf_cnt); i = t = 0; ATH_TXBUF_LOCK(sc); TAILQ_FOREACH(bf, &sc->sc_txbuf_mgmt, bf_list) { if (bf->bf_flags & ATH_BUF_BUSY) { printf("Busy: %d\n", t); i++; } t++; } ATH_TXBUF_UNLOCK(sc); printf("Total mgmt TX buffers: %d; Total mgmt TX buffers busy: %d\n", t, i); ATH_RX_LOCK(sc); for (i = 0; i < 2; i++) { printf("%d: fifolen: %d/%d; head=%d; tail=%d; m_pending=%p, m_holdbf=%p\n", i, sc->sc_rxedma[i].m_fifo_depth, sc->sc_rxedma[i].m_fifolen, sc->sc_rxedma[i].m_fifo_head, sc->sc_rxedma[i].m_fifo_tail, sc->sc_rxedma[i].m_rxpending, sc->sc_rxedma[i].m_holdbf); } i = 0; TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { i++; } printf("Total RX buffers in free list: %d buffers\n", i); ATH_RX_UNLOCK(sc); return 0; } static int ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int rfsilent; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); (void) ath_hal_getrfsilent(sc->sc_ah, &rfsilent); error = sysctl_handle_int(oidp, &rfsilent, 0, req); if (error || !req->newptr) goto finish; if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent)) { error = EINVAL; goto finish; } /* * Earlier chips (< AR5212) have up to 8 GPIO * pins exposed. * * AR5416 and later chips have many more GPIO * pins (up to 16) so the mask is expanded to * four bits. */ sc->sc_rfsilentpin = rfsilent & 0x3c; sc->sc_rfsilentpol = (rfsilent & 0x2) != 0; error = 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } static int ath_sysctl_tpack(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int32_t tpack; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); (void) ath_hal_gettpack(sc->sc_ah, &tpack); error = sysctl_handle_int(oidp, &tpack, 0, req); if (error || !req->newptr) goto finish; error = !ath_hal_settpack(sc->sc_ah, tpack) ? EINVAL : 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } static int ath_sysctl_tpcts(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int32_t tpcts; int error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); (void) ath_hal_gettpcts(sc->sc_ah, &tpcts); error = sysctl_handle_int(oidp, &tpcts, 0, req); if (error || !req->newptr) goto finish; error = !ath_hal_settpcts(sc->sc_ah, tpcts) ? EINVAL : 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } static int ath_sysctl_intmit(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int intmit, error; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); intmit = ath_hal_getintmit(sc->sc_ah); error = sysctl_handle_int(oidp, &intmit, 0, req); if (error || !req->newptr) goto finish; /* reusing error; 1 here means "good"; 0 means "fail" */ error = ath_hal_setintmit(sc->sc_ah, intmit); if (! error) { error = EINVAL; goto finish; } /* * Reset the hardware here - disabling ANI in the HAL * doesn't reset ANI related registers, so it'll leave * things in an inconsistent state. */ if (sc->sc_running) ath_reset(sc, ATH_RESET_NOLOSS); error = 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } #ifdef IEEE80211_SUPPORT_TDMA static int ath_sysctl_setcca(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int setcca, error; setcca = sc->sc_setcca; error = sysctl_handle_int(oidp, &setcca, 0, req); if (error || !req->newptr) return error; sc->sc_setcca = (setcca != 0); return 0; } #endif /* IEEE80211_SUPPORT_TDMA */ static int ath_sysctl_forcebstuck(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int val = 0; int error; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; if (val == 0) return 0; - taskqueue_enqueue_fast(sc->sc_tq, &sc->sc_bstucktask); + taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); val = 0; return 0; } static int ath_sysctl_hangcheck(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int val = 0; int error; uint32_t mask = 0xffffffff; uint32_t *sp; uint32_t rsize; struct ath_hal *ah = sc->sc_ah; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; if (val == 0) return 0; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); /* Do a hang check */ if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), (void *) &sp, &rsize)) { error = 0; goto finish; } device_printf(sc->sc_dev, "%s: sp=0x%08x\n", __func__, *sp); val = 0; error = 0; finish: ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (error); } #ifdef ATH_DEBUG_ALQ static int ath_sysctl_alq_log(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int error, enable; enable = (sc->sc_alq.sc_alq_isactive); error = sysctl_handle_int(oidp, &enable, 0, req); if (error || !req->newptr) return (error); else if (enable) error = if_ath_alq_start(&sc->sc_alq); else error = if_ath_alq_stop(&sc->sc_alq); return (error); } /* * Attach the ALQ debugging if required. */ static void ath_sysctl_alq_attach(struct ath_softc *sc) { struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "alq", CTLFLAG_RD, NULL, "Atheros ALQ logging parameters"); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_STRING(ctx, child, OID_AUTO, "filename", CTLFLAG_RW, sc->sc_alq.sc_alq_filename, 0, "ALQ filename"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_alq_log, "I", ""); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debugmask", CTLFLAG_RW, &sc->sc_alq.sc_alq_debug, 0, "ALQ debug mask"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "numlost", CTLFLAG_RW, &sc->sc_alq.sc_alq_numlost, 0, "number lost"); } #endif /* ATH_DEBUG_ALQ */ void ath_sysctlattach(struct ath_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); struct ath_hal *ah = sc->sc_ah; SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "countrycode", CTLFLAG_RD, &sc->sc_eecc, 0, "EEPROM country code"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "regdomain", CTLFLAG_RD, &sc->sc_eerd, 0, "EEPROM regdomain code"); #ifdef ATH_DEBUG SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, "control debugging printfs"); #endif #ifdef ATH_DEBUG_ALQ SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ktrdebug", CTLFLAG_RW, &sc->sc_ktrdebug, "control debugging KTR"); #endif /* ATH_DEBUG_ALQ */ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_slottime, "I", "802.11 slot time (us)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_softled, "I", "enable/disable software LED support"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledpin", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_ledpin, "I", "GPIO pin connected to LED"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledon", CTLFLAG_RW, &sc->sc_ledon, 0, "setting to turn LED on"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0, "idle time for inactivity LED (ticks)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "hardled", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_hardled, "I", "enable/disable hardware LED support"); /* XXX Laziness - configure pins, then flip hardled off/on */ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "led_net_pin", CTLFLAG_RW, &sc->sc_led_net_pin, 0, "MAC Network LED pin, or -1 to disable"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "led_pwr_pin", CTLFLAG_RW, &sc->sc_led_pwr_pin, 0, "MAC Power LED pin, or -1 to disable"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "txantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_txantenna, "I", "antenna switch"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_rxantenna, "I", "default/rx antenna"); if (ath_hal_hasdiversity(ah)) SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_diversity, "I", "antenna diversity"); sc->sc_txintrperiod = ATH_TXINTR_PERIOD; SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0, "tx descriptor batching"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_diag, "I", "h/w diagnostic control"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_tpscale, "I", "tx power scaling"); if (ath_hal_hastpc(ah)) { SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_tpc, "I", "enable/disable per-packet TPC"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tpack", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_tpack, "I", "tx power for ack frames"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_tpcts, "I", "tx power for cts frames"); } if (ath_hal_hasrfsilent(ah)) { SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_rfsilent, "I", "h/w RF silent config"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_rfkill, "I", "enable/disable RF kill switch"); } SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "txagg", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_txagg, "I", ""); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "forcebstuck", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_forcebstuck, "I", ""); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "hangcheck", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_hangcheck, "I", ""); if (ath_hal_hasintmit(ah)) { SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "intmit", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_intmit, "I", "interference mitigation"); } sc->sc_monpass = HAL_RXERR_DECRYPT | HAL_RXERR_MIC; SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "monpass", CTLFLAG_RW, &sc->sc_monpass, 0, "mask of error frames to pass when monitoring"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "hwq_limit_nonaggr", CTLFLAG_RW, &sc->sc_hwq_limit_nonaggr, 0, "Hardware non-AMPDU queue depth before software-queuing TX frames"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "hwq_limit_aggr", CTLFLAG_RW, &sc->sc_hwq_limit_aggr, 0, "Hardware AMPDU queue depth before software-queuing TX frames"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tid_hwq_lo", CTLFLAG_RW, &sc->sc_tid_hwq_lo, 0, ""); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tid_hwq_hi", CTLFLAG_RW, &sc->sc_tid_hwq_hi, 0, ""); /* Aggregate length twiddles */ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "aggr_limit", CTLFLAG_RW, &sc->sc_aggr_limit, 0, "Maximum A-MPDU size, or 0 for 'default'"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "rts_aggr_limit", CTLFLAG_RW, &sc->sc_rts_aggr_limit, 0, "Maximum A-MPDU size for RTS-protected frames, or '0' " "for default"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "delim_min_pad", CTLFLAG_RW, &sc->sc_delim_min_pad, 0, "Enforce a minimum number of delimiters per A-MPDU " " sub-frame"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "txq_data_minfree", CTLFLAG_RW, &sc->sc_txq_data_minfree, 0, "Minimum free buffers before adding a data frame" " to the TX queue"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "txq_mcastq_maxdepth", CTLFLAG_RW, &sc->sc_txq_mcastq_maxdepth, 0, "Maximum buffer depth for multicast/broadcast frames"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "txq_node_maxdepth", CTLFLAG_RW, &sc->sc_txq_node_maxdepth, 0, "Maximum buffer depth for a single node"); #if 0 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "cabq_enable", CTLFLAG_RW, &sc->sc_cabq_enable, 0, "Whether to transmit on the CABQ or not"); #endif #ifdef IEEE80211_SUPPORT_TDMA if (ath_hal_macversion(ah) > 0x78) { sc->sc_tdmadbaprep = 2; SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "dbaprep", CTLFLAG_RW, &sc->sc_tdmadbaprep, 0, "TDMA DBA preparation time"); sc->sc_tdmaswbaprep = 10; SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "swbaprep", CTLFLAG_RW, &sc->sc_tdmaswbaprep, 0, "TDMA SWBA preparation time"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "guardtime", CTLFLAG_RW, &sc->sc_tdmaguard, 0, "TDMA slot guard time"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "superframe", CTLFLAG_RD, &sc->sc_tdmabintval, 0, "TDMA calculated super frame"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "setcca", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_setcca, "I", "enable CCA control"); } #endif #ifdef ATH_DEBUG_ALQ ath_sysctl_alq_attach(sc); #endif } static int ath_sysctl_clearstats(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int val = 0; int error; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; if (val == 0) return 0; /* Not clearing the stats is still valid */ memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); memset(&sc->sc_aggr_stats, 0, sizeof(sc->sc_aggr_stats)); memset(&sc->sc_intr_stats, 0, sizeof(sc->sc_intr_stats)); val = 0; return 0; } static void ath_sysctl_stats_attach_rxphyerr(struct ath_softc *sc, struct sysctl_oid_list *parent) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); int i; char sn[8]; tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx_phy_err", CTLFLAG_RD, NULL, "Per-code RX PHY Errors"); child = SYSCTL_CHILDREN(tree); for (i = 0; i < 64; i++) { snprintf(sn, sizeof(sn), "%d", i); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, sn, CTLFLAG_RD, &sc->sc_stats.ast_rx_phy[i], 0, ""); } } static void ath_sysctl_stats_attach_intr(struct ath_softc *sc, struct sysctl_oid_list *parent) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); int i; char sn[8]; tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "sync_intr", CTLFLAG_RD, NULL, "Sync interrupt statistics"); child = SYSCTL_CHILDREN(tree); for (i = 0; i < 32; i++) { snprintf(sn, sizeof(sn), "%d", i); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, sn, CTLFLAG_RD, &sc->sc_intr_stats.sync_intr[i], 0, ""); } } void ath_sysctl_stats_attach(struct ath_softc *sc) { struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); /* Create "clear" node */ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "clear_stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_clearstats, "I", "clear stats"); /* Create stats node */ tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "Statistics"); child = SYSCTL_CHILDREN(tree); /* This was generated from if_athioctl.h */ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_watchdog", CTLFLAG_RD, &sc->sc_stats.ast_watchdog, 0, "device reset by watchdog"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_hardware", CTLFLAG_RD, &sc->sc_stats.ast_hardware, 0, "fatal hardware error interrupts"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_bmiss", CTLFLAG_RD, &sc->sc_stats.ast_bmiss, 0, "beacon miss interrupts"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_bmiss_phantom", CTLFLAG_RD, &sc->sc_stats.ast_bmiss_phantom, 0, "beacon miss interrupts"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_bstuck", CTLFLAG_RD, &sc->sc_stats.ast_bstuck, 0, "beacon stuck interrupts"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rxorn", CTLFLAG_RD, &sc->sc_stats.ast_rxorn, 0, "rx overrun interrupts"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rxeol", CTLFLAG_RD, &sc->sc_stats.ast_rxeol, 0, "rx eol interrupts"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_txurn", CTLFLAG_RD, &sc->sc_stats.ast_txurn, 0, "tx underrun interrupts"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_mib", CTLFLAG_RD, &sc->sc_stats.ast_mib, 0, "mib interrupts"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_intrcoal", CTLFLAG_RD, &sc->sc_stats.ast_intrcoal, 0, "interrupts coalesced"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_packets", CTLFLAG_RD, &sc->sc_stats.ast_tx_packets, 0, "packet sent on the interface"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_mgmt", CTLFLAG_RD, &sc->sc_stats.ast_tx_mgmt, 0, "management frames transmitted"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_discard", CTLFLAG_RD, &sc->sc_stats.ast_tx_discard, 0, "frames discarded prior to assoc"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_qstop", CTLFLAG_RD, &sc->sc_stats.ast_tx_qstop, 0, "output stopped 'cuz no buffer"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_encap", CTLFLAG_RD, &sc->sc_stats.ast_tx_encap, 0, "tx encapsulation failed"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_nonode", CTLFLAG_RD, &sc->sc_stats.ast_tx_nonode, 0, "tx failed 'cuz no node"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_nombuf", CTLFLAG_RD, &sc->sc_stats.ast_tx_nombuf, 0, "tx failed 'cuz no mbuf"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_nomcl", CTLFLAG_RD, &sc->sc_stats.ast_tx_nomcl, 0, "tx failed 'cuz no cluster"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_linear", CTLFLAG_RD, &sc->sc_stats.ast_tx_linear, 0, "tx linearized to cluster"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_nodata", CTLFLAG_RD, &sc->sc_stats.ast_tx_nodata, 0, "tx discarded empty frame"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_busdma", CTLFLAG_RD, &sc->sc_stats.ast_tx_busdma, 0, "tx failed for dma resrcs"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_xretries", CTLFLAG_RD, &sc->sc_stats.ast_tx_xretries, 0, "tx failed 'cuz too many retries"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_fifoerr", CTLFLAG_RD, &sc->sc_stats.ast_tx_fifoerr, 0, "tx failed 'cuz FIFO underrun"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_filtered", CTLFLAG_RD, &sc->sc_stats.ast_tx_filtered, 0, "tx failed 'cuz xmit filtered"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_shortretry", CTLFLAG_RD, &sc->sc_stats.ast_tx_shortretry, 0, "tx on-chip retries (short)"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_longretry", CTLFLAG_RD, &sc->sc_stats.ast_tx_longretry, 0, "tx on-chip retries (long)"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_badrate", CTLFLAG_RD, &sc->sc_stats.ast_tx_badrate, 0, "tx failed 'cuz bogus xmit rate"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_noack", CTLFLAG_RD, &sc->sc_stats.ast_tx_noack, 0, "tx frames with no ack marked"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_rts", CTLFLAG_RD, &sc->sc_stats.ast_tx_rts, 0, "tx frames with rts enabled"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_cts", CTLFLAG_RD, &sc->sc_stats.ast_tx_cts, 0, "tx frames with cts enabled"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_shortpre", CTLFLAG_RD, &sc->sc_stats.ast_tx_shortpre, 0, "tx frames with short preamble"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_altrate", CTLFLAG_RD, &sc->sc_stats.ast_tx_altrate, 0, "tx frames with alternate rate"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_protect", CTLFLAG_RD, &sc->sc_stats.ast_tx_protect, 0, "tx frames with protection"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_ctsburst", CTLFLAG_RD, &sc->sc_stats.ast_tx_ctsburst, 0, "tx frames with cts and bursting"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_ctsext", CTLFLAG_RD, &sc->sc_stats.ast_tx_ctsext, 0, "tx frames with cts extension"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_nombuf", CTLFLAG_RD, &sc->sc_stats.ast_rx_nombuf, 0, "rx setup failed 'cuz no mbuf"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_busdma", CTLFLAG_RD, &sc->sc_stats.ast_rx_busdma, 0, "rx setup failed for dma resrcs"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_orn", CTLFLAG_RD, &sc->sc_stats.ast_rx_orn, 0, "rx failed 'cuz of desc overrun"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_crcerr", CTLFLAG_RD, &sc->sc_stats.ast_rx_crcerr, 0, "rx failed 'cuz of bad CRC"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_fifoerr", CTLFLAG_RD, &sc->sc_stats.ast_rx_fifoerr, 0, "rx failed 'cuz of FIFO overrun"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_badcrypt", CTLFLAG_RD, &sc->sc_stats.ast_rx_badcrypt, 0, "rx failed 'cuz decryption"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_badmic", CTLFLAG_RD, &sc->sc_stats.ast_rx_badmic, 0, "rx failed 'cuz MIC failure"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_phyerr", CTLFLAG_RD, &sc->sc_stats.ast_rx_phyerr, 0, "rx failed 'cuz of PHY err"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_tooshort", CTLFLAG_RD, &sc->sc_stats.ast_rx_tooshort, 0, "rx discarded 'cuz frame too short"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_toobig", CTLFLAG_RD, &sc->sc_stats.ast_rx_toobig, 0, "rx discarded 'cuz frame too large"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_packets", CTLFLAG_RD, &sc->sc_stats.ast_rx_packets, 0, "packet recv on the interface"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_mgt", CTLFLAG_RD, &sc->sc_stats.ast_rx_mgt, 0, "management frames received"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_ctl", CTLFLAG_RD, &sc->sc_stats.ast_rx_ctl, 0, "rx discarded 'cuz ctl frame"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_be_xmit", CTLFLAG_RD, &sc->sc_stats.ast_be_xmit, 0, "beacons transmitted"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_be_nombuf", CTLFLAG_RD, &sc->sc_stats.ast_be_nombuf, 0, "beacon setup failed 'cuz no mbuf"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_per_cal", CTLFLAG_RD, &sc->sc_stats.ast_per_cal, 0, "periodic calibration calls"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_per_calfail", CTLFLAG_RD, &sc->sc_stats.ast_per_calfail, 0, "periodic calibration failed"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_per_rfgain", CTLFLAG_RD, &sc->sc_stats.ast_per_rfgain, 0, "periodic calibration rfgain reset"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rate_calls", CTLFLAG_RD, &sc->sc_stats.ast_rate_calls, 0, "rate control checks"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rate_raise", CTLFLAG_RD, &sc->sc_stats.ast_rate_raise, 0, "rate control raised xmit rate"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rate_drop", CTLFLAG_RD, &sc->sc_stats.ast_rate_drop, 0, "rate control dropped xmit rate"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ant_defswitch", CTLFLAG_RD, &sc->sc_stats.ast_ant_defswitch, 0, "rx/default antenna switches"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ant_txswitch", CTLFLAG_RD, &sc->sc_stats.ast_ant_txswitch, 0, "tx antenna switches"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_cabq_xmit", CTLFLAG_RD, &sc->sc_stats.ast_cabq_xmit, 0, "cabq frames transmitted"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_cabq_busy", CTLFLAG_RD, &sc->sc_stats.ast_cabq_busy, 0, "cabq found busy"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_raw", CTLFLAG_RD, &sc->sc_stats.ast_tx_raw, 0, "tx frames through raw api"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ff_txok", CTLFLAG_RD, &sc->sc_stats.ast_ff_txok, 0, "fast frames tx'd successfully"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ff_txerr", CTLFLAG_RD, &sc->sc_stats.ast_ff_txerr, 0, "fast frames tx'd w/ error"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ff_rx", CTLFLAG_RD, &sc->sc_stats.ast_ff_rx, 0, "fast frames rx'd"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ff_flush", CTLFLAG_RD, &sc->sc_stats.ast_ff_flush, 0, "fast frames flushed from staging q"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_qfull", CTLFLAG_RD, &sc->sc_stats.ast_tx_qfull, 0, "tx dropped 'cuz of queue limit"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_nobuf", CTLFLAG_RD, &sc->sc_stats.ast_tx_nobuf, 0, "tx dropped 'cuz no ath buffer"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tdma_update", CTLFLAG_RD, &sc->sc_stats.ast_tdma_update, 0, "TDMA slot timing updates"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tdma_timers", CTLFLAG_RD, &sc->sc_stats.ast_tdma_timers, 0, "TDMA slot update set beacon timers"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tdma_tsf", CTLFLAG_RD, &sc->sc_stats.ast_tdma_tsf, 0, "TDMA slot update set TSF"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tdma_ack", CTLFLAG_RD, &sc->sc_stats.ast_tdma_ack, 0, "TDMA tx failed 'cuz ACK required"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_raw_fail", CTLFLAG_RD, &sc->sc_stats.ast_tx_raw_fail, 0, "raw tx failed 'cuz h/w down"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_nofrag", CTLFLAG_RD, &sc->sc_stats.ast_tx_nofrag, 0, "tx dropped 'cuz no ath frag buffer"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_be_missed", CTLFLAG_RD, &sc->sc_stats.ast_be_missed, 0, "number of -missed- beacons"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ani_cal", CTLFLAG_RD, &sc->sc_stats.ast_ani_cal, 0, "number of ANI polls"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_agg", CTLFLAG_RD, &sc->sc_stats.ast_rx_agg, 0, "number of aggregate frames received"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_halfgi", CTLFLAG_RD, &sc->sc_stats.ast_rx_halfgi, 0, "number of frames received with half-GI"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_2040", CTLFLAG_RD, &sc->sc_stats.ast_rx_2040, 0, "number of HT/40 frames received"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_pre_crc_err", CTLFLAG_RD, &sc->sc_stats.ast_rx_pre_crc_err, 0, "number of delimeter-CRC errors detected"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_post_crc_err", CTLFLAG_RD, &sc->sc_stats.ast_rx_post_crc_err, 0, "number of post-delimiter CRC errors detected"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_decrypt_busy_err", CTLFLAG_RD, &sc->sc_stats.ast_rx_decrypt_busy_err, 0, "number of frames received w/ busy decrypt engine"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_hi_rx_chain", CTLFLAG_RD, &sc->sc_stats.ast_rx_hi_rx_chain, 0, ""); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_htprotect", CTLFLAG_RD, &sc->sc_stats.ast_tx_htprotect, 0, "HT tx frames with protection"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_hitqueueend", CTLFLAG_RD, &sc->sc_stats.ast_rx_hitqueueend, 0, "RX hit queue end"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_timeout", CTLFLAG_RD, &sc->sc_stats.ast_tx_timeout, 0, "TX Global Timeout"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_cst", CTLFLAG_RD, &sc->sc_stats.ast_tx_cst, 0, "TX Carrier Sense Timeout"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_xtxop", CTLFLAG_RD, &sc->sc_stats.ast_tx_xtxop, 0, "TX exceeded TXOP"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_timerexpired", CTLFLAG_RD, &sc->sc_stats.ast_tx_timerexpired, 0, "TX exceeded TX_TIMER register"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_desccfgerr", CTLFLAG_RD, &sc->sc_stats.ast_tx_desccfgerr, 0, "TX Descriptor Cfg Error"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_swretries", CTLFLAG_RD, &sc->sc_stats.ast_tx_swretries, 0, "TX software retry count"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_swretrymax", CTLFLAG_RD, &sc->sc_stats.ast_tx_swretrymax, 0, "TX software retry max reached"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_data_underrun", CTLFLAG_RD, &sc->sc_stats.ast_tx_data_underrun, 0, ""); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_delim_underrun", CTLFLAG_RD, &sc->sc_stats.ast_tx_delim_underrun, 0, ""); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_aggr_failall", CTLFLAG_RD, &sc->sc_stats.ast_tx_aggr_failall, 0, "Number of aggregate TX failures (whole frame)"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_aggr_ok", CTLFLAG_RD, &sc->sc_stats.ast_tx_aggr_ok, 0, "Number of aggregate TX OK completions (subframe)"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_aggr_fail", CTLFLAG_RD, &sc->sc_stats.ast_tx_aggr_fail, 0, "Number of aggregate TX failures (subframe)"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_intr", CTLFLAG_RD, &sc->sc_stats.ast_rx_intr, 0, "RX interrupts"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_intr", CTLFLAG_RD, &sc->sc_stats.ast_tx_intr, 0, "TX interrupts"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_mcastq_overflow", CTLFLAG_RD, &sc->sc_stats.ast_tx_mcastq_overflow, 0, "Number of multicast frames exceeding maximum mcast queue depth"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_keymiss", CTLFLAG_RD, &sc->sc_stats.ast_rx_keymiss, 0, ""); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_swfiltered", CTLFLAG_RD, &sc->sc_stats.ast_tx_swfiltered, 0, ""); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_stbc", CTLFLAG_RD, &sc->sc_stats.ast_rx_stbc, 0, "Number of STBC frames received"); /* Attach the RX phy error array */ ath_sysctl_stats_attach_rxphyerr(sc, child); /* Attach the interrupt statistics array */ ath_sysctl_stats_attach_intr(sc, child); } /* * This doesn't necessarily belong here (because it's HAL related, not * driver related). */ void ath_sysctl_hal_attach(struct ath_softc *sc) { struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hal", CTLFLAG_RD, NULL, "Atheros HAL parameters"); child = SYSCTL_CHILDREN(tree); sc->sc_ah->ah_config.ah_debug = 0; SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_ah->ah_config.ah_debug, 0, "Atheros HAL debugging printfs"); sc->sc_ah->ah_config.ah_ar5416_biasadj = 0; SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ar5416_biasadj", CTLFLAG_RW, &sc->sc_ah->ah_config.ah_ar5416_biasadj, 0, "Enable 2GHz AR5416 direction sensitivity bias adjust"); sc->sc_ah->ah_config.ah_dma_beacon_response_time = 2; SYSCTL_ADD_INT(ctx, child, OID_AUTO, "dma_brt", CTLFLAG_RW, &sc->sc_ah->ah_config.ah_dma_beacon_response_time, 0, "Atheros HAL DMA beacon response time"); sc->sc_ah->ah_config.ah_sw_beacon_response_time = 10; SYSCTL_ADD_INT(ctx, child, OID_AUTO, "sw_brt", CTLFLAG_RW, &sc->sc_ah->ah_config.ah_sw_beacon_response_time, 0, "Atheros HAL software beacon response time"); sc->sc_ah->ah_config.ah_additional_swba_backoff = 0; SYSCTL_ADD_INT(ctx, child, OID_AUTO, "swba_backoff", CTLFLAG_RW, &sc->sc_ah->ah_config.ah_additional_swba_backoff, 0, "Atheros HAL additional SWBA backoff time"); sc->sc_ah->ah_config.ah_force_full_reset = 0; SYSCTL_ADD_INT(ctx, child, OID_AUTO, "force_full_reset", CTLFLAG_RW, &sc->sc_ah->ah_config.ah_force_full_reset, 0, "Force full chip reset rather than a warm reset"); /* * This is initialised by the driver. */ SYSCTL_ADD_INT(ctx, child, OID_AUTO, "serialise_reg_war", CTLFLAG_RW, &sc->sc_ah->ah_config.ah_serialise_reg_war, 0, "Force register access serialisation"); } Index: head/sys/dev/bwn/if_bwn.c =================================================================== --- head/sys/dev/bwn/if_bwn.c (revision 296271) +++ head/sys/dev/bwn/if_bwn.c (revision 296272) @@ -1,14087 +1,14087 @@ /*- * Copyright (c) 2009-2010 Weongyo Jeong * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * The Broadcom Wireless LAN controller driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0, "Broadcom driver parameters"); /* * Tunable & sysctl variables. */ #ifdef BWN_DEBUG static int bwn_debug = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RWTUN, &bwn_debug, 0, "Broadcom debugging printfs"); enum { BWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ BWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ BWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ BWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */ BWN_DEBUG_RESET = 0x00000010, /* reset processing */ BWN_DEBUG_OPS = 0x00000020, /* bwn_ops processing */ BWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ BWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */ BWN_DEBUG_INTR = 0x00000100, /* ISR */ BWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */ BWN_DEBUG_NODE = 0x00000400, /* node management */ BWN_DEBUG_LED = 0x00000800, /* led management */ BWN_DEBUG_CMD = 0x00001000, /* cmd submission */ BWN_DEBUG_LO = 0x00002000, /* LO */ BWN_DEBUG_FW = 0x00004000, /* firmware */ BWN_DEBUG_WME = 0x00008000, /* WME */ BWN_DEBUG_RF = 0x00010000, /* RF */ BWN_DEBUG_FATAL = 0x80000000, /* fatal errors */ BWN_DEBUG_ANY = 0xffffffff }; #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0) #endif static int bwn_bfp = 0; /* use "Bad Frames Preemption" */ SYSCTL_INT(_hw_bwn, OID_AUTO, bfp, CTLFLAG_RW, &bwn_bfp, 0, "uses Bad Frames Preemption"); static int bwn_bluetooth = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, bluetooth, CTLFLAG_RW, &bwn_bluetooth, 0, "turns on Bluetooth Coexistence"); static int bwn_hwpctl = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, hwpctl, CTLFLAG_RW, &bwn_hwpctl, 0, "uses H/W power control"); static int bwn_msi_disable = 0; /* MSI disabled */ TUNABLE_INT("hw.bwn.msi_disable", &bwn_msi_disable); static int bwn_usedma = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, usedma, CTLFLAG_RD, &bwn_usedma, 0, "uses DMA"); TUNABLE_INT("hw.bwn.usedma", &bwn_usedma); static int bwn_wme = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, wme, CTLFLAG_RW, &bwn_wme, 0, "uses WME support"); static void bwn_attach_pre(struct bwn_softc *); static int bwn_attach_post(struct bwn_softc *); static void bwn_sprom_bugfixes(device_t); static int bwn_init(struct bwn_softc *); static void bwn_parent(struct ieee80211com *); static void bwn_start(struct bwn_softc *); static int bwn_transmit(struct ieee80211com *, struct mbuf *); static int bwn_attach_core(struct bwn_mac *); static void bwn_reset_core(struct bwn_mac *, uint32_t); static int bwn_phy_getinfo(struct bwn_mac *, int); static int bwn_chiptest(struct bwn_mac *); static int bwn_setup_channels(struct bwn_mac *, int, int); static int bwn_phy_g_attach(struct bwn_mac *); static void bwn_phy_g_detach(struct bwn_mac *); static void bwn_phy_g_init_pre(struct bwn_mac *); static int bwn_phy_g_prepare_hw(struct bwn_mac *); static int bwn_phy_g_init(struct bwn_mac *); static void bwn_phy_g_exit(struct bwn_mac *); static uint16_t bwn_phy_g_read(struct bwn_mac *, uint16_t); static void bwn_phy_g_write(struct bwn_mac *, uint16_t, uint16_t); static uint16_t bwn_phy_g_rf_read(struct bwn_mac *, uint16_t); static void bwn_phy_g_rf_write(struct bwn_mac *, uint16_t, uint16_t); static int bwn_phy_g_hwpctl(struct bwn_mac *); static void bwn_phy_g_rf_onoff(struct bwn_mac *, int); static int bwn_phy_g_switch_channel(struct bwn_mac *, uint32_t); static uint32_t bwn_phy_g_get_default_chan(struct bwn_mac *); static void bwn_phy_g_set_antenna(struct bwn_mac *, int); static int bwn_phy_g_im(struct bwn_mac *, int); static int bwn_phy_g_recalc_txpwr(struct bwn_mac *, int); static void bwn_phy_g_set_txpwr(struct bwn_mac *); static void bwn_phy_g_task_15s(struct bwn_mac *); static void bwn_phy_g_task_60s(struct bwn_mac *); static uint16_t bwn_phy_g_txctl(struct bwn_mac *); static void bwn_phy_switch_analog(struct bwn_mac *, int); static uint16_t bwn_shm_read_2(struct bwn_mac *, uint16_t, uint16_t); static void bwn_shm_write_2(struct bwn_mac *, uint16_t, uint16_t, uint16_t); static uint32_t bwn_shm_read_4(struct bwn_mac *, uint16_t, uint16_t); static void bwn_shm_write_4(struct bwn_mac *, uint16_t, uint16_t, uint32_t); static void bwn_shm_ctlword(struct bwn_mac *, uint16_t, uint16_t); static void bwn_addchannels(struct ieee80211_channel [], int, int *, const struct bwn_channelinfo *, int); static int bwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void bwn_updateslot(struct ieee80211com *); static void bwn_update_promisc(struct ieee80211com *); static void bwn_wme_init(struct bwn_mac *); static int bwn_wme_update(struct ieee80211com *); static void bwn_wme_clear(struct bwn_softc *); static void bwn_wme_load(struct bwn_mac *); static void bwn_wme_loadparams(struct bwn_mac *, const struct wmeParams *, uint16_t); static void bwn_scan_start(struct ieee80211com *); static void bwn_scan_end(struct ieee80211com *); static void bwn_set_channel(struct ieee80211com *); static struct ieee80211vap *bwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void bwn_vap_delete(struct ieee80211vap *); static void bwn_stop(struct bwn_softc *); static int bwn_core_init(struct bwn_mac *); static void bwn_core_start(struct bwn_mac *); static void bwn_core_exit(struct bwn_mac *); static void bwn_bt_disable(struct bwn_mac *); static int bwn_chip_init(struct bwn_mac *); static uint64_t bwn_hf_read(struct bwn_mac *); static void bwn_hf_write(struct bwn_mac *, uint64_t); static void bwn_set_txretry(struct bwn_mac *, int, int); static void bwn_rate_init(struct bwn_mac *); static void bwn_set_phytxctl(struct bwn_mac *); static void bwn_spu_setdelay(struct bwn_mac *, int); static void bwn_bt_enable(struct bwn_mac *); static void bwn_set_macaddr(struct bwn_mac *); static void bwn_crypt_init(struct bwn_mac *); static void bwn_chip_exit(struct bwn_mac *); static int bwn_fw_fillinfo(struct bwn_mac *); static int bwn_fw_loaducode(struct bwn_mac *); static int bwn_gpio_init(struct bwn_mac *); static int bwn_fw_loadinitvals(struct bwn_mac *); static int bwn_phy_init(struct bwn_mac *); static void bwn_set_txantenna(struct bwn_mac *, int); static void bwn_set_opmode(struct bwn_mac *); static void bwn_rate_write(struct bwn_mac *, uint16_t, int); static uint8_t bwn_plcp_getcck(const uint8_t); static uint8_t bwn_plcp_getofdm(const uint8_t); static void bwn_pio_init(struct bwn_mac *); static uint16_t bwn_pio_idx2base(struct bwn_mac *, int); static void bwn_pio_set_txqueue(struct bwn_mac *, struct bwn_pio_txqueue *, int); static void bwn_pio_setupqueue_rx(struct bwn_mac *, struct bwn_pio_rxqueue *, int); static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *); static uint16_t bwn_pio_read_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t); static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *); static int bwn_pio_rx(struct bwn_pio_rxqueue *); static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *); static void bwn_pio_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *, uint16_t); static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *, uint16_t); static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *, uint16_t, uint16_t); static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *, uint16_t, uint32_t); static int bwn_pio_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_select(struct bwn_mac *, uint8_t); static uint32_t bwn_pio_write_multi_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint32_t, const void *, int); static void bwn_pio_write_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, uint32_t); static uint16_t bwn_pio_write_multi_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, const void *, int); static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_parse_cookie(struct bwn_mac *, uint16_t, struct bwn_pio_txpkt **); static void bwn_dma_init(struct bwn_mac *); static void bwn_dma_rxdirectfifo(struct bwn_mac *, int, uint8_t); static int bwn_dma_mask2type(uint64_t); static uint64_t bwn_dma_mask(struct bwn_mac *); static uint16_t bwn_dma_base(int, int); static void bwn_dma_ringfree(struct bwn_dma_ring **); static void bwn_dma_32_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_32_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_32_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_32_suspend(struct bwn_dma_ring *); static void bwn_dma_32_resume(struct bwn_dma_ring *); static int bwn_dma_32_get_curslot(struct bwn_dma_ring *); static void bwn_dma_32_set_curslot(struct bwn_dma_ring *, int); static void bwn_dma_64_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_64_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_64_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_64_suspend(struct bwn_dma_ring *); static void bwn_dma_64_resume(struct bwn_dma_ring *); static int bwn_dma_64_get_curslot(struct bwn_dma_ring *); static void bwn_dma_64_set_curslot(struct bwn_dma_ring *, int); static int bwn_dma_allocringmemory(struct bwn_dma_ring *); static void bwn_dma_setup(struct bwn_dma_ring *); static void bwn_dma_free_ringmemory(struct bwn_dma_ring *); static void bwn_dma_cleanup(struct bwn_dma_ring *); static void bwn_dma_free_descbufs(struct bwn_dma_ring *); static int bwn_dma_tx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_rx(struct bwn_dma_ring *); static int bwn_dma_rx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_free_descbuf(struct bwn_dma_ring *, struct bwn_dmadesc_meta *); static void bwn_dma_set_redzone(struct bwn_dma_ring *, struct mbuf *); static int bwn_dma_gettype(struct bwn_mac *); static void bwn_dma_ring_addr(void *, bus_dma_segment_t *, int, int); static int bwn_dma_freeslot(struct bwn_dma_ring *); static int bwn_dma_nextslot(struct bwn_dma_ring *, int); static void bwn_dma_rxeof(struct bwn_dma_ring *, int *); static int bwn_dma_newbuf(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, struct bwn_dmadesc_meta *, int); static void bwn_dma_buf_addr(void *, bus_dma_segment_t *, int, bus_size_t, int); static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *, struct mbuf *); static void bwn_dma_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static int bwn_dma_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf *); static int bwn_dma_getslot(struct bwn_dma_ring *); static struct bwn_dma_ring *bwn_dma_select(struct bwn_mac *, uint8_t); static int bwn_dma_attach(struct bwn_mac *); static struct bwn_dma_ring *bwn_dma_ringsetup(struct bwn_mac *, int, int, int); static struct bwn_dma_ring *bwn_dma_parse_cookie(struct bwn_mac *, const struct bwn_txstatus *, uint16_t, int *); static void bwn_dma_free(struct bwn_mac *); static void bwn_phy_g_init_sub(struct bwn_mac *); static uint8_t bwn_has_hwpctl(struct bwn_mac *); static void bwn_phy_init_b5(struct bwn_mac *); static void bwn_phy_init_b6(struct bwn_mac *); static void bwn_phy_init_a(struct bwn_mac *); static void bwn_loopback_calcgain(struct bwn_mac *); static uint16_t bwn_rf_init_bcm2050(struct bwn_mac *); static void bwn_lo_g_init(struct bwn_mac *); static void bwn_lo_g_adjust(struct bwn_mac *); static void bwn_lo_get_powervector(struct bwn_mac *); static struct bwn_lo_calib *bwn_lo_calibset(struct bwn_mac *, const struct bwn_bbatt *, const struct bwn_rfatt *); static void bwn_lo_write(struct bwn_mac *, struct bwn_loctl *); static void bwn_phy_hwpctl_init(struct bwn_mac *); static void bwn_phy_g_switch_chan(struct bwn_mac *, int, uint8_t); static void bwn_phy_g_set_txpwr_sub(struct bwn_mac *, const struct bwn_bbatt *, const struct bwn_rfatt *, uint8_t); static void bwn_phy_g_set_bbatt(struct bwn_mac *, uint16_t); static uint16_t bwn_rf_2050_rfoverval(struct bwn_mac *, uint16_t, uint32_t); static void bwn_spu_workaround(struct bwn_mac *, uint8_t); static void bwn_wa_init(struct bwn_mac *); static void bwn_ofdmtab_write_2(struct bwn_mac *, uint16_t, uint16_t, uint16_t); static void bwn_dummy_transmission(struct bwn_mac *, int, int); static void bwn_ofdmtab_write_4(struct bwn_mac *, uint16_t, uint16_t, uint32_t); static void bwn_gtab_write(struct bwn_mac *, uint16_t, uint16_t, uint16_t); static void bwn_ram_write(struct bwn_mac *, uint16_t, uint32_t); static void bwn_mac_suspend(struct bwn_mac *); static void bwn_mac_enable(struct bwn_mac *); static void bwn_psctl(struct bwn_mac *, uint32_t); static int16_t bwn_nrssi_read(struct bwn_mac *, uint16_t); static void bwn_nrssi_offset(struct bwn_mac *); static void bwn_nrssi_threshold(struct bwn_mac *); static void bwn_nrssi_slope_11g(struct bwn_mac *); static void bwn_set_all_gains(struct bwn_mac *, int16_t, int16_t, int16_t); static void bwn_set_original_gains(struct bwn_mac *); static void bwn_hwpctl_early_init(struct bwn_mac *); static void bwn_hwpctl_init_gphy(struct bwn_mac *); static uint16_t bwn_phy_g_chan2freq(uint8_t); static int bwn_fw_gets(struct bwn_mac *, enum bwn_fwtype); static int bwn_fw_get(struct bwn_mac *, enum bwn_fwtype, const char *, struct bwn_fwfile *); static void bwn_release_firmware(struct bwn_mac *); static void bwn_do_release_fw(struct bwn_fwfile *); static uint16_t bwn_fwcaps_read(struct bwn_mac *); static int bwn_fwinitvals_write(struct bwn_mac *, const struct bwn_fwinitvals *, size_t, size_t); static int bwn_switch_channel(struct bwn_mac *, int); static uint16_t bwn_ant2phy(int); static void bwn_mac_write_bssid(struct bwn_mac *); static void bwn_mac_setfilter(struct bwn_mac *, uint16_t, const uint8_t *); static void bwn_key_dowrite(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *, size_t, const uint8_t *); static void bwn_key_macwrite(struct bwn_mac *, uint8_t, const uint8_t *); static void bwn_key_write(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *); static void bwn_phy_exit(struct bwn_mac *); static void bwn_core_stop(struct bwn_mac *); static int bwn_switch_band(struct bwn_softc *, struct ieee80211_channel *); static void bwn_phy_reset(struct bwn_mac *); static int bwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void bwn_set_pretbtt(struct bwn_mac *); static int bwn_intr(void *); static void bwn_intrtask(void *, int); static void bwn_restart(struct bwn_mac *, const char *); static void bwn_intr_ucode_debug(struct bwn_mac *); static void bwn_intr_tbtt_indication(struct bwn_mac *); static void bwn_intr_atim_end(struct bwn_mac *); static void bwn_intr_beacon(struct bwn_mac *); static void bwn_intr_pmq(struct bwn_mac *); static void bwn_intr_noise(struct bwn_mac *); static void bwn_intr_txeof(struct bwn_mac *); static void bwn_hwreset(void *, int); static void bwn_handle_fwpanic(struct bwn_mac *); static void bwn_load_beacon0(struct bwn_mac *); static void bwn_load_beacon1(struct bwn_mac *); static uint32_t bwn_jssi_read(struct bwn_mac *); static void bwn_noise_gensample(struct bwn_mac *); static void bwn_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static void bwn_rxeof(struct bwn_mac *, struct mbuf *, const void *); static void bwn_phy_txpower_check(struct bwn_mac *, uint32_t); static int bwn_tx_start(struct bwn_softc *, struct ieee80211_node *, struct mbuf *); static int bwn_tx_isfull(struct bwn_softc *, struct mbuf *); static int bwn_set_txhdr(struct bwn_mac *, struct ieee80211_node *, struct mbuf *, struct bwn_txhdr *, uint16_t); static void bwn_plcp_genhdr(struct bwn_plcp4 *, const uint16_t, const uint8_t); static uint8_t bwn_antenna_sanitize(struct bwn_mac *, uint8_t); static uint8_t bwn_get_fbrate(uint8_t); static int bwn_phy_shm_tssi_read(struct bwn_mac *, uint16_t); static void bwn_phy_g_setatt(struct bwn_mac *, int *, int *); static void bwn_phy_lock(struct bwn_mac *); static void bwn_phy_unlock(struct bwn_mac *); static void bwn_rf_lock(struct bwn_mac *); static void bwn_rf_unlock(struct bwn_mac *); static void bwn_txpwr(void *, int); static void bwn_tasks(void *); static void bwn_task_15s(struct bwn_mac *); static void bwn_task_30s(struct bwn_mac *); static void bwn_task_60s(struct bwn_mac *); static int bwn_plcp_get_ofdmrate(struct bwn_mac *, struct bwn_plcp6 *, uint8_t); static int bwn_plcp_get_cckrate(struct bwn_mac *, struct bwn_plcp6 *); static void bwn_rx_radiotap(struct bwn_mac *, struct mbuf *, const struct bwn_rxhdr4 *, struct bwn_plcp6 *, int, int, int); static void bwn_tsf_read(struct bwn_mac *, uint64_t *); static void bwn_phy_g_dc_lookup_init(struct bwn_mac *, uint8_t); static void bwn_set_slot_time(struct bwn_mac *, uint16_t); static void bwn_watchdog(void *); static void bwn_dma_stop(struct bwn_mac *); static void bwn_pio_stop(struct bwn_mac *); static void bwn_dma_ringstop(struct bwn_dma_ring **); static void bwn_led_attach(struct bwn_mac *); static void bwn_led_newstate(struct bwn_mac *, enum ieee80211_state); static void bwn_led_event(struct bwn_mac *, int); static void bwn_led_blink_start(struct bwn_mac *, int, int); static void bwn_led_blink_next(void *); static void bwn_led_blink_end(void *); static void bwn_rfswitch(void *); static void bwn_rf_turnon(struct bwn_mac *); static void bwn_rf_turnoff(struct bwn_mac *); static void bwn_phy_lp_init_pre(struct bwn_mac *); static int bwn_phy_lp_init(struct bwn_mac *); static uint16_t bwn_phy_lp_read(struct bwn_mac *, uint16_t); static void bwn_phy_lp_write(struct bwn_mac *, uint16_t, uint16_t); static void bwn_phy_lp_maskset(struct bwn_mac *, uint16_t, uint16_t, uint16_t); static uint16_t bwn_phy_lp_rf_read(struct bwn_mac *, uint16_t); static void bwn_phy_lp_rf_write(struct bwn_mac *, uint16_t, uint16_t); static void bwn_phy_lp_rf_onoff(struct bwn_mac *, int); static int bwn_phy_lp_switch_channel(struct bwn_mac *, uint32_t); static uint32_t bwn_phy_lp_get_default_chan(struct bwn_mac *); static void bwn_phy_lp_set_antenna(struct bwn_mac *, int); static void bwn_phy_lp_task_60s(struct bwn_mac *); static void bwn_phy_lp_readsprom(struct bwn_mac *); static void bwn_phy_lp_bbinit(struct bwn_mac *); static void bwn_phy_lp_txpctl_init(struct bwn_mac *); static void bwn_phy_lp_calib(struct bwn_mac *); static void bwn_phy_lp_switch_analog(struct bwn_mac *, int); static int bwn_phy_lp_b2062_switch_channel(struct bwn_mac *, uint8_t); static int bwn_phy_lp_b2063_switch_channel(struct bwn_mac *, uint8_t); static void bwn_phy_lp_set_anafilter(struct bwn_mac *, uint8_t); static void bwn_phy_lp_set_gaintbl(struct bwn_mac *, uint32_t); static void bwn_phy_lp_digflt_save(struct bwn_mac *); static void bwn_phy_lp_get_txpctlmode(struct bwn_mac *); static void bwn_phy_lp_set_txpctlmode(struct bwn_mac *, uint8_t); static void bwn_phy_lp_bugfix(struct bwn_mac *); static void bwn_phy_lp_digflt_restore(struct bwn_mac *); static void bwn_phy_lp_tblinit(struct bwn_mac *); static void bwn_phy_lp_bbinit_r2(struct bwn_mac *); static void bwn_phy_lp_bbinit_r01(struct bwn_mac *); static void bwn_phy_lp_b2062_init(struct bwn_mac *); static void bwn_phy_lp_b2063_init(struct bwn_mac *); static void bwn_phy_lp_rxcal_r2(struct bwn_mac *); static void bwn_phy_lp_rccal_r12(struct bwn_mac *); static void bwn_phy_lp_set_rccap(struct bwn_mac *); static uint32_t bwn_phy_lp_roundup(uint32_t, uint32_t, uint8_t); static void bwn_phy_lp_b2062_reset_pllbias(struct bwn_mac *); static void bwn_phy_lp_b2062_vco_calib(struct bwn_mac *); static void bwn_tab_write_multi(struct bwn_mac *, uint32_t, int, const void *); static void bwn_tab_read_multi(struct bwn_mac *, uint32_t, int, void *); static struct bwn_txgain bwn_phy_lp_get_txgain(struct bwn_mac *); static uint8_t bwn_phy_lp_get_bbmult(struct bwn_mac *); static void bwn_phy_lp_set_txgain(struct bwn_mac *, struct bwn_txgain *); static void bwn_phy_lp_set_bbmult(struct bwn_mac *, uint8_t); static void bwn_phy_lp_set_trsw_over(struct bwn_mac *, uint8_t, uint8_t); static void bwn_phy_lp_set_rxgain(struct bwn_mac *, uint32_t); static void bwn_phy_lp_set_deaf(struct bwn_mac *, uint8_t); static int bwn_phy_lp_calc_rx_iq_comp(struct bwn_mac *, uint16_t); static void bwn_phy_lp_clear_deaf(struct bwn_mac *, uint8_t); static void bwn_phy_lp_tblinit_r01(struct bwn_mac *); static void bwn_phy_lp_tblinit_r2(struct bwn_mac *); static void bwn_phy_lp_tblinit_txgain(struct bwn_mac *); static void bwn_tab_write(struct bwn_mac *, uint32_t, uint32_t); static void bwn_phy_lp_b2062_tblinit(struct bwn_mac *); static void bwn_phy_lp_b2063_tblinit(struct bwn_mac *); static int bwn_phy_lp_loopback(struct bwn_mac *); static void bwn_phy_lp_set_rxgain_idx(struct bwn_mac *, uint16_t); static void bwn_phy_lp_ddfs_turnon(struct bwn_mac *, int, int, int, int, int); static uint8_t bwn_phy_lp_rx_iq_est(struct bwn_mac *, uint16_t, uint8_t, struct bwn_phy_lp_iq_est *); static void bwn_phy_lp_ddfs_turnoff(struct bwn_mac *); static uint32_t bwn_tab_read(struct bwn_mac *, uint32_t); static void bwn_phy_lp_set_txgain_dac(struct bwn_mac *, uint16_t); static void bwn_phy_lp_set_txgain_pa(struct bwn_mac *, uint16_t); static void bwn_phy_lp_set_txgain_override(struct bwn_mac *); static uint16_t bwn_phy_lp_get_pa_gain(struct bwn_mac *); static uint8_t bwn_nbits(int32_t); static void bwn_phy_lp_gaintbl_write_multi(struct bwn_mac *, int, int, struct bwn_txgain_entry *); static void bwn_phy_lp_gaintbl_write(struct bwn_mac *, int, struct bwn_txgain_entry); static void bwn_phy_lp_gaintbl_write_r2(struct bwn_mac *, int, struct bwn_txgain_entry); static void bwn_phy_lp_gaintbl_write_r01(struct bwn_mac *, int, struct bwn_txgain_entry); static void bwn_sysctl_node(struct bwn_softc *); static struct resource_spec bwn_res_spec_legacy[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static struct resource_spec bwn_res_spec_msi[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; static const struct bwn_channelinfo bwn_chantable_bg = { .channels = { { 2412, 1, 30 }, { 2417, 2, 30 }, { 2422, 3, 30 }, { 2427, 4, 30 }, { 2432, 5, 30 }, { 2437, 6, 30 }, { 2442, 7, 30 }, { 2447, 8, 30 }, { 2452, 9, 30 }, { 2457, 10, 30 }, { 2462, 11, 30 }, { 2467, 12, 30 }, { 2472, 13, 30 }, { 2484, 14, 30 } }, .nchannels = 14 }; static const struct bwn_channelinfo bwn_chantable_a = { .channels = { { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5260, 52, 30 }, { 5280, 56, 30 }, { 5300, 60, 30 }, { 5320, 64, 30 }, { 5500, 100, 30 }, { 5520, 104, 30 }, { 5540, 108, 30 }, { 5560, 112, 30 }, { 5580, 116, 30 }, { 5600, 120, 30 }, { 5620, 124, 30 }, { 5640, 128, 30 }, { 5660, 132, 30 }, { 5680, 136, 30 }, { 5700, 140, 30 }, { 5745, 149, 30 }, { 5765, 153, 30 }, { 5785, 157, 30 }, { 5805, 161, 30 }, { 5825, 165, 30 }, { 5920, 184, 30 }, { 5940, 188, 30 }, { 5960, 192, 30 }, { 5980, 196, 30 }, { 6000, 200, 30 }, { 6020, 204, 30 }, { 6040, 208, 30 }, { 6060, 212, 30 }, { 6080, 216, 30 } }, .nchannels = 37 }; static const struct bwn_channelinfo bwn_chantable_n = { .channels = { { 5160, 32, 30 }, { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5250, 50, 30 }, { 5260, 52, 30 }, { 5270, 54, 30 }, { 5280, 56, 30 }, { 5290, 58, 30 }, { 5300, 60, 30 }, { 5310, 62, 30 }, { 5320, 64, 30 }, { 5330, 66, 30 }, { 5340, 68, 30 }, { 5350, 70, 30 }, { 5360, 72, 30 }, { 5370, 74, 30 }, { 5380, 76, 30 }, { 5390, 78, 30 }, { 5400, 80, 30 }, { 5410, 82, 30 }, { 5420, 84, 30 }, { 5430, 86, 30 }, { 5440, 88, 30 }, { 5450, 90, 30 }, { 5460, 92, 30 }, { 5470, 94, 30 }, { 5480, 96, 30 }, { 5490, 98, 30 }, { 5500, 100, 30 }, { 5510, 102, 30 }, { 5520, 104, 30 }, { 5530, 106, 30 }, { 5540, 108, 30 }, { 5550, 110, 30 }, { 5560, 112, 30 }, { 5570, 114, 30 }, { 5580, 116, 30 }, { 5590, 118, 30 }, { 5600, 120, 30 }, { 5610, 122, 30 }, { 5620, 124, 30 }, { 5630, 126, 30 }, { 5640, 128, 30 }, { 5650, 130, 30 }, { 5660, 132, 30 }, { 5670, 134, 30 }, { 5680, 136, 30 }, { 5690, 138, 30 }, { 5700, 140, 30 }, { 5710, 142, 30 }, { 5720, 144, 30 }, { 5725, 145, 30 }, { 5730, 146, 30 }, { 5735, 147, 30 }, { 5740, 148, 30 }, { 5745, 149, 30 }, { 5750, 150, 30 }, { 5755, 151, 30 }, { 5760, 152, 30 }, { 5765, 153, 30 }, { 5770, 154, 30 }, { 5775, 155, 30 }, { 5780, 156, 30 }, { 5785, 157, 30 }, { 5790, 158, 30 }, { 5795, 159, 30 }, { 5800, 160, 30 }, { 5805, 161, 30 }, { 5810, 162, 30 }, { 5815, 163, 30 }, { 5820, 164, 30 }, { 5825, 165, 30 }, { 5830, 166, 30 }, { 5840, 168, 30 }, { 5850, 170, 30 }, { 5860, 172, 30 }, { 5870, 174, 30 }, { 5880, 176, 30 }, { 5890, 178, 30 }, { 5900, 180, 30 }, { 5910, 182, 30 }, { 5920, 184, 30 }, { 5930, 186, 30 }, { 5940, 188, 30 }, { 5950, 190, 30 }, { 5960, 192, 30 }, { 5970, 194, 30 }, { 5980, 196, 30 }, { 5990, 198, 30 }, { 6000, 200, 30 }, { 6010, 202, 30 }, { 6020, 204, 30 }, { 6030, 206, 30 }, { 6040, 208, 30 }, { 6050, 210, 30 }, { 6060, 212, 30 }, { 6070, 214, 30 }, { 6080, 216, 30 }, { 6090, 218, 30 }, { 6100, 220, 30 }, { 6110, 222, 30 }, { 6120, 224, 30 }, { 6130, 226, 30 }, { 6140, 228, 30 } }, .nchannels = 110 }; static const uint8_t bwn_b2063_chantable_data[33][12] = { { 0x6f, 0x3c, 0x3c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6f, 0x2c, 0x2c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6f, 0x1c, 0x1c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6e, 0x1c, 0x1c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6e, 0xc, 0xc, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6a, 0xc, 0xc, 0, 0x2, 0x5, 0xd, 0xd, 0x77, 0x80, 0x20, 0 }, { 0x6a, 0xc, 0xc, 0, 0x1, 0x5, 0xd, 0xc, 0x77, 0x80, 0x20, 0 }, { 0x6a, 0xc, 0xc, 0, 0x1, 0x4, 0xc, 0xc, 0x77, 0x80, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0x1, 0x4, 0xc, 0xc, 0x77, 0x70, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0x1, 0x4, 0xb, 0xc, 0x77, 0x70, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0, 0x4, 0xb, 0xb, 0x77, 0x60, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0, 0x3, 0xa, 0xb, 0x77, 0x60, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0, 0x3, 0xa, 0xa, 0x77, 0x60, 0x20, 0 }, { 0x68, 0xc, 0xc, 0, 0, 0x2, 0x9, 0x9, 0x77, 0x60, 0x20, 0 }, { 0x68, 0xc, 0xc, 0, 0, 0x1, 0x8, 0x8, 0x77, 0x50, 0x10, 0 }, { 0x67, 0xc, 0xc, 0, 0, 0, 0x8, 0x8, 0x77, 0x50, 0x10, 0 }, { 0x64, 0xc, 0xc, 0, 0, 0, 0x2, 0x1, 0x77, 0x20, 0, 0 }, { 0x64, 0xc, 0xc, 0, 0, 0, 0x1, 0x1, 0x77, 0x20, 0, 0 }, { 0x63, 0xc, 0xc, 0, 0, 0, 0x1, 0, 0x77, 0x10, 0, 0 }, { 0x63, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0x10, 0, 0 }, { 0x62, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0x10, 0, 0 }, { 0x62, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 }, { 0x61, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 }, { 0x60, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 }, { 0x6e, 0xc, 0xc, 0, 0x9, 0xe, 0xf, 0xf, 0x77, 0xc0, 0x50, 0 }, { 0x6e, 0xc, 0xc, 0, 0x9, 0xd, 0xf, 0xf, 0x77, 0xb0, 0x50, 0 }, { 0x6e, 0xc, 0xc, 0, 0x8, 0xc, 0xf, 0xf, 0x77, 0xb0, 0x50, 0 }, { 0x6d, 0xc, 0xc, 0, 0x8, 0xc, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 }, { 0x6d, 0xc, 0xc, 0, 0x8, 0xb, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 }, { 0x6d, 0xc, 0xc, 0, 0x8, 0xa, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 }, { 0x6c, 0xc, 0xc, 0, 0x7, 0x9, 0xf, 0xf, 0x77, 0x90, 0x40, 0 }, { 0x6c, 0xc, 0xc, 0, 0x6, 0x8, 0xf, 0xf, 0x77, 0x90, 0x40, 0 }, { 0x6c, 0xc, 0xc, 0, 0x5, 0x8, 0xf, 0xf, 0x77, 0x90, 0x40, 0 } }; static const struct bwn_b206x_chan bwn_b2063_chantable[] = { { 1, 2412, bwn_b2063_chantable_data[0] }, { 2, 2417, bwn_b2063_chantable_data[0] }, { 3, 2422, bwn_b2063_chantable_data[0] }, { 4, 2427, bwn_b2063_chantable_data[1] }, { 5, 2432, bwn_b2063_chantable_data[1] }, { 6, 2437, bwn_b2063_chantable_data[1] }, { 7, 2442, bwn_b2063_chantable_data[1] }, { 8, 2447, bwn_b2063_chantable_data[1] }, { 9, 2452, bwn_b2063_chantable_data[2] }, { 10, 2457, bwn_b2063_chantable_data[2] }, { 11, 2462, bwn_b2063_chantable_data[3] }, { 12, 2467, bwn_b2063_chantable_data[3] }, { 13, 2472, bwn_b2063_chantable_data[3] }, { 14, 2484, bwn_b2063_chantable_data[4] }, { 34, 5170, bwn_b2063_chantable_data[5] }, { 36, 5180, bwn_b2063_chantable_data[6] }, { 38, 5190, bwn_b2063_chantable_data[7] }, { 40, 5200, bwn_b2063_chantable_data[8] }, { 42, 5210, bwn_b2063_chantable_data[9] }, { 44, 5220, bwn_b2063_chantable_data[10] }, { 46, 5230, bwn_b2063_chantable_data[11] }, { 48, 5240, bwn_b2063_chantable_data[12] }, { 52, 5260, bwn_b2063_chantable_data[13] }, { 56, 5280, bwn_b2063_chantable_data[14] }, { 60, 5300, bwn_b2063_chantable_data[14] }, { 64, 5320, bwn_b2063_chantable_data[15] }, { 100, 5500, bwn_b2063_chantable_data[16] }, { 104, 5520, bwn_b2063_chantable_data[17] }, { 108, 5540, bwn_b2063_chantable_data[18] }, { 112, 5560, bwn_b2063_chantable_data[19] }, { 116, 5580, bwn_b2063_chantable_data[20] }, { 120, 5600, bwn_b2063_chantable_data[21] }, { 124, 5620, bwn_b2063_chantable_data[21] }, { 128, 5640, bwn_b2063_chantable_data[22] }, { 132, 5660, bwn_b2063_chantable_data[22] }, { 136, 5680, bwn_b2063_chantable_data[22] }, { 140, 5700, bwn_b2063_chantable_data[23] }, { 149, 5745, bwn_b2063_chantable_data[23] }, { 153, 5765, bwn_b2063_chantable_data[23] }, { 157, 5785, bwn_b2063_chantable_data[23] }, { 161, 5805, bwn_b2063_chantable_data[23] }, { 165, 5825, bwn_b2063_chantable_data[23] }, { 184, 4920, bwn_b2063_chantable_data[24] }, { 188, 4940, bwn_b2063_chantable_data[25] }, { 192, 4960, bwn_b2063_chantable_data[26] }, { 196, 4980, bwn_b2063_chantable_data[27] }, { 200, 5000, bwn_b2063_chantable_data[28] }, { 204, 5020, bwn_b2063_chantable_data[29] }, { 208, 5040, bwn_b2063_chantable_data[30] }, { 212, 5060, bwn_b2063_chantable_data[31] }, { 216, 5080, bwn_b2063_chantable_data[32] } }; static const uint8_t bwn_b2062_chantable_data[22][12] = { { 0xff, 0xff, 0xb5, 0x1b, 0x24, 0x32, 0x32, 0x88, 0x88, 0, 0, 0 }, { 0, 0x22, 0x20, 0x84, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0x10, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0x20, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0x10, 0x84, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x63, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x62, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x30, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x20, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x10, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0x55, 0x77, 0x90, 0xf7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x44, 0x77, 0x80, 0xe7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x44, 0x66, 0x80, 0xe7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x33, 0x66, 0x70, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x22, 0x55, 0x60, 0xd7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x22, 0x55, 0x60, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x22, 0x44, 0x50, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x11, 0x44, 0x50, 0xa5, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x44, 0x40, 0xb6, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 } }; static const struct bwn_b206x_chan bwn_b2062_chantable[] = { { 1, 2412, bwn_b2062_chantable_data[0] }, { 2, 2417, bwn_b2062_chantable_data[0] }, { 3, 2422, bwn_b2062_chantable_data[0] }, { 4, 2427, bwn_b2062_chantable_data[0] }, { 5, 2432, bwn_b2062_chantable_data[0] }, { 6, 2437, bwn_b2062_chantable_data[0] }, { 7, 2442, bwn_b2062_chantable_data[0] }, { 8, 2447, bwn_b2062_chantable_data[0] }, { 9, 2452, bwn_b2062_chantable_data[0] }, { 10, 2457, bwn_b2062_chantable_data[0] }, { 11, 2462, bwn_b2062_chantable_data[0] }, { 12, 2467, bwn_b2062_chantable_data[0] }, { 13, 2472, bwn_b2062_chantable_data[0] }, { 14, 2484, bwn_b2062_chantable_data[0] }, { 34, 5170, bwn_b2062_chantable_data[1] }, { 38, 5190, bwn_b2062_chantable_data[2] }, { 42, 5210, bwn_b2062_chantable_data[2] }, { 46, 5230, bwn_b2062_chantable_data[3] }, { 36, 5180, bwn_b2062_chantable_data[4] }, { 40, 5200, bwn_b2062_chantable_data[5] }, { 44, 5220, bwn_b2062_chantable_data[6] }, { 48, 5240, bwn_b2062_chantable_data[3] }, { 52, 5260, bwn_b2062_chantable_data[3] }, { 56, 5280, bwn_b2062_chantable_data[3] }, { 60, 5300, bwn_b2062_chantable_data[7] }, { 64, 5320, bwn_b2062_chantable_data[8] }, { 100, 5500, bwn_b2062_chantable_data[9] }, { 104, 5520, bwn_b2062_chantable_data[10] }, { 108, 5540, bwn_b2062_chantable_data[10] }, { 112, 5560, bwn_b2062_chantable_data[10] }, { 116, 5580, bwn_b2062_chantable_data[11] }, { 120, 5600, bwn_b2062_chantable_data[12] }, { 124, 5620, bwn_b2062_chantable_data[12] }, { 128, 5640, bwn_b2062_chantable_data[12] }, { 132, 5660, bwn_b2062_chantable_data[12] }, { 136, 5680, bwn_b2062_chantable_data[12] }, { 140, 5700, bwn_b2062_chantable_data[12] }, { 149, 5745, bwn_b2062_chantable_data[12] }, { 153, 5765, bwn_b2062_chantable_data[12] }, { 157, 5785, bwn_b2062_chantable_data[12] }, { 161, 5805, bwn_b2062_chantable_data[12] }, { 165, 5825, bwn_b2062_chantable_data[12] }, { 184, 4920, bwn_b2062_chantable_data[13] }, { 188, 4940, bwn_b2062_chantable_data[14] }, { 192, 4960, bwn_b2062_chantable_data[15] }, { 196, 4980, bwn_b2062_chantable_data[16] }, { 200, 5000, bwn_b2062_chantable_data[17] }, { 204, 5020, bwn_b2062_chantable_data[18] }, { 208, 5040, bwn_b2062_chantable_data[19] }, { 212, 5060, bwn_b2062_chantable_data[20] }, { 216, 5080, bwn_b2062_chantable_data[21] } }; /* for LP PHY */ static const struct bwn_rxcompco bwn_rxcompco_5354[] = { { 1, -66, 15 }, { 2, -66, 15 }, { 3, -66, 15 }, { 4, -66, 15 }, { 5, -66, 15 }, { 6, -66, 15 }, { 7, -66, 14 }, { 8, -66, 14 }, { 9, -66, 14 }, { 10, -66, 14 }, { 11, -66, 14 }, { 12, -66, 13 }, { 13, -66, 13 }, { 14, -66, 13 }, }; /* for LP PHY */ static const struct bwn_rxcompco bwn_rxcompco_r12[] = { { 1, -64, 13 }, { 2, -64, 13 }, { 3, -64, 13 }, { 4, -64, 13 }, { 5, -64, 12 }, { 6, -64, 12 }, { 7, -64, 12 }, { 8, -64, 12 }, { 9, -64, 12 }, { 10, -64, 11 }, { 11, -64, 11 }, { 12, -64, 11 }, { 13, -64, 11 }, { 14, -64, 10 }, { 34, -62, 24 }, { 38, -62, 24 }, { 42, -62, 24 }, { 46, -62, 23 }, { 36, -62, 24 }, { 40, -62, 24 }, { 44, -62, 23 }, { 48, -62, 23 }, { 52, -62, 23 }, { 56, -62, 22 }, { 60, -62, 22 }, { 64, -62, 22 }, { 100, -62, 16 }, { 104, -62, 16 }, { 108, -62, 15 }, { 112, -62, 14 }, { 116, -62, 14 }, { 120, -62, 13 }, { 124, -62, 12 }, { 128, -62, 12 }, { 132, -62, 12 }, { 136, -62, 11 }, { 140, -62, 10 }, { 149, -61, 9 }, { 153, -61, 9 }, { 157, -61, 9 }, { 161, -61, 8 }, { 165, -61, 8 }, { 184, -62, 25 }, { 188, -62, 25 }, { 192, -62, 25 }, { 196, -62, 25 }, { 200, -62, 25 }, { 204, -62, 25 }, { 208, -62, 25 }, { 212, -62, 25 }, { 216, -62, 26 }, }; static const struct bwn_rxcompco bwn_rxcompco_r2 = { 0, -64, 0 }; static const uint8_t bwn_tab_sigsq_tbl[] = { 0xde, 0xdc, 0xda, 0xd8, 0xd6, 0xd4, 0xd2, 0xcf, 0xcd, 0xca, 0xc7, 0xc4, 0xc1, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0x00, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd, 0xcf, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, }; static const uint8_t bwn_tab_pllfrac_tbl[] = { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, }; static const uint16_t bwn_tabl_iqlocal_tbl[] = { 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const uint16_t bwn_tab_noise_g1[] = BWN_TAB_NOISE_G1; static const uint16_t bwn_tab_noise_g2[] = BWN_TAB_NOISE_G2; static const uint16_t bwn_tab_noisescale_g1[] = BWN_TAB_NOISESCALE_G1; static const uint16_t bwn_tab_noisescale_g2[] = BWN_TAB_NOISESCALE_G2; static const uint16_t bwn_tab_noisescale_g3[] = BWN_TAB_NOISESCALE_G3; const uint8_t bwn_bitrev_table[256] = BWN_BITREV_TABLE; #define VENDOR_LED_ACT(vendor) \ { \ .vid = PCI_VENDOR_##vendor, \ .led_act = { BWN_VENDOR_LED_ACT_##vendor } \ } static const struct { uint16_t vid; uint8_t led_act[BWN_LED_MAX]; } bwn_vendor_led_act[] = { VENDOR_LED_ACT(COMPAQ), VENDOR_LED_ACT(ASUSTEK) }; static const uint8_t bwn_default_led_act[BWN_LED_MAX] = { BWN_VENDOR_LED_ACT_DEFAULT }; #undef VENDOR_LED_ACT static const struct { int on_dur; int off_dur; } bwn_led_duration[109] = { [0] = { 400, 100 }, [2] = { 150, 75 }, [4] = { 90, 45 }, [11] = { 66, 34 }, [12] = { 53, 26 }, [18] = { 42, 21 }, [22] = { 35, 17 }, [24] = { 32, 16 }, [36] = { 21, 10 }, [48] = { 16, 8 }, [72] = { 11, 5 }, [96] = { 9, 4 }, [108] = { 7, 3 } }; static const uint16_t bwn_wme_shm_offsets[] = { [0] = BWN_WME_BESTEFFORT, [1] = BWN_WME_BACKGROUND, [2] = BWN_WME_VOICE, [3] = BWN_WME_VIDEO, }; static const struct siba_devid bwn_devs[] = { SIBA_DEV(BROADCOM, 80211, 5, "Revision 5"), SIBA_DEV(BROADCOM, 80211, 6, "Revision 6"), SIBA_DEV(BROADCOM, 80211, 7, "Revision 7"), SIBA_DEV(BROADCOM, 80211, 9, "Revision 9"), SIBA_DEV(BROADCOM, 80211, 10, "Revision 10"), SIBA_DEV(BROADCOM, 80211, 11, "Revision 11"), SIBA_DEV(BROADCOM, 80211, 13, "Revision 13"), SIBA_DEV(BROADCOM, 80211, 15, "Revision 15"), SIBA_DEV(BROADCOM, 80211, 16, "Revision 16") }; static int bwn_probe(device_t dev) { int i; for (i = 0; i < sizeof(bwn_devs) / sizeof(bwn_devs[0]); i++) { if (siba_get_vendor(dev) == bwn_devs[i].sd_vendor && siba_get_device(dev) == bwn_devs[i].sd_device && siba_get_revid(dev) == bwn_devs[i].sd_rev) return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int bwn_attach(device_t dev) { struct bwn_mac *mac; struct bwn_softc *sc = device_get_softc(dev); int error, i, msic, reg; sc->sc_dev = dev; #ifdef BWN_DEBUG sc->sc_debug = bwn_debug; #endif if ((sc->sc_flags & BWN_FLAG_ATTACHED) == 0) { bwn_attach_pre(sc); bwn_sprom_bugfixes(dev); sc->sc_flags |= BWN_FLAG_ATTACHED; } if (!TAILQ_EMPTY(&sc->sc_maclist)) { if (siba_get_pci_device(dev) != 0x4313 && siba_get_pci_device(dev) != 0x431a && siba_get_pci_device(dev) != 0x4321) { device_printf(sc->sc_dev, "skip 802.11 cores\n"); return (ENODEV); } } mac = malloc(sizeof(*mac), M_DEVBUF, M_WAITOK | M_ZERO); mac->mac_sc = sc; mac->mac_status = BWN_MAC_STATUS_UNINIT; if (bwn_bfp != 0) mac->mac_flags |= BWN_MAC_FLAG_BADFRAME_PREEMP; TASK_INIT(&mac->mac_hwreset, 0, bwn_hwreset, mac); TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac); TASK_INIT(&mac->mac_txpower, 0, bwn_txpwr, mac); error = bwn_attach_core(mac); if (error) goto fail0; bwn_led_attach(mac); device_printf(sc->sc_dev, "WLAN (chipid %#x rev %u) " "PHY (analog %d type %d rev %d) RADIO (manuf %#x ver %#x rev %d)\n", siba_get_chipid(sc->sc_dev), siba_get_revid(sc->sc_dev), mac->mac_phy.analog, mac->mac_phy.type, mac->mac_phy.rev, mac->mac_phy.rf_manuf, mac->mac_phy.rf_ver, mac->mac_phy.rf_rev); if (mac->mac_flags & BWN_MAC_FLAG_DMA) device_printf(sc->sc_dev, "DMA (%d bits)\n", mac->mac_method.dma.dmatype); else device_printf(sc->sc_dev, "PIO\n"); /* * setup PCI resources and interrupt. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { msic = pci_msi_count(dev); if (bootverbose) device_printf(sc->sc_dev, "MSI count : %d\n", msic); } else msic = 0; mac->mac_intr_spec = bwn_res_spec_legacy; if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0) { if (pci_alloc_msi(dev, &msic) == 0) { device_printf(sc->sc_dev, "Using %d MSI messages\n", msic); mac->mac_intr_spec = bwn_res_spec_msi; mac->mac_msi = 1; } } error = bus_alloc_resources(dev, mac->mac_intr_spec, mac->mac_res_irq); if (error) { device_printf(sc->sc_dev, "couldn't allocate IRQ resources (%d)\n", error); goto fail1; } if (mac->mac_msi == 0) error = bus_setup_intr(dev, mac->mac_res_irq[0], INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac, &mac->mac_intrhand[0]); else { for (i = 0; i < BWN_MSI_MESSAGES; i++) { error = bus_setup_intr(dev, mac->mac_res_irq[i], INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac, &mac->mac_intrhand[i]); if (error != 0) { device_printf(sc->sc_dev, "couldn't setup interrupt (%d)\n", error); break; } } } TAILQ_INSERT_TAIL(&sc->sc_maclist, mac, mac_list); /* * calls attach-post routine */ if ((sc->sc_flags & BWN_FLAG_ATTACHED) != 0) bwn_attach_post(sc); return (0); fail1: if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0) pci_release_msi(dev); fail0: free(mac, M_DEVBUF); return (error); } static int bwn_is_valid_ether_addr(uint8_t *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) return (FALSE); return (TRUE); } static int bwn_attach_post(struct bwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->sc_dev); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WME /* WME/WMM supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_TXPMGT /* capable of txpow mgt */ ; ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */ IEEE80211_ADDR_COPY(ic->ic_macaddr, bwn_is_valid_ether_addr(siba_sprom_get_mac_80211a(sc->sc_dev)) ? siba_sprom_get_mac_80211a(sc->sc_dev) : siba_sprom_get_mac_80211bg(sc->sc_dev)); /* call MI attach routine. */ ieee80211_ifattach(ic); ic->ic_headroom = sizeof(struct bwn_txhdr); /* override default methods */ ic->ic_raw_xmit = bwn_raw_xmit; ic->ic_updateslot = bwn_updateslot; ic->ic_update_promisc = bwn_update_promisc; ic->ic_wme.wme_update = bwn_wme_update; ic->ic_scan_start = bwn_scan_start; ic->ic_scan_end = bwn_scan_end; ic->ic_set_channel = bwn_set_channel; ic->ic_vap_create = bwn_vap_create; ic->ic_vap_delete = bwn_vap_delete; ic->ic_transmit = bwn_transmit; ic->ic_parent = bwn_parent; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), BWN_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), BWN_RX_RADIOTAP_PRESENT); bwn_sysctl_node(sc); if (bootverbose) ieee80211_announce(ic); return (0); } static void bwn_phy_detach(struct bwn_mac *mac) { if (mac->mac_phy.detach != NULL) mac->mac_phy.detach(mac); } static int bwn_detach(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); struct bwn_mac *mac = sc->sc_curmac; struct ieee80211com *ic = &sc->sc_ic; int i; sc->sc_flags |= BWN_FLAG_INVALID; if (device_is_attached(sc->sc_dev)) { BWN_LOCK(sc); bwn_stop(sc); BWN_UNLOCK(sc); bwn_dma_free(mac); callout_drain(&sc->sc_led_blink_ch); callout_drain(&sc->sc_rfswitch_ch); callout_drain(&sc->sc_task_ch); callout_drain(&sc->sc_watchdog_ch); bwn_phy_detach(mac); ieee80211_draintask(ic, &mac->mac_hwreset); ieee80211_draintask(ic, &mac->mac_txpower); ieee80211_ifdetach(ic); } taskqueue_drain(sc->sc_tq, &mac->mac_intrtask); taskqueue_free(sc->sc_tq); for (i = 0; i < BWN_MSI_MESSAGES; i++) { if (mac->mac_intrhand[i] != NULL) { bus_teardown_intr(dev, mac->mac_res_irq[i], mac->mac_intrhand[i]); mac->mac_intrhand[i] = NULL; } } bus_release_resources(dev, mac->mac_intr_spec, mac->mac_res_irq); if (mac->mac_msi != 0) pci_release_msi(dev); mbufq_drain(&sc->sc_snd); BWN_LOCK_DESTROY(sc); return (0); } static void bwn_attach_pre(struct bwn_softc *sc) { BWN_LOCK_INIT(sc); TAILQ_INIT(&sc->sc_maclist); callout_init_mtx(&sc->sc_rfswitch_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_task_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); sc->sc_tq = taskqueue_create_fast("bwn_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); } static void bwn_sprom_bugfixes(device_t dev) { #define BWN_ISDEV(_vendor, _device, _subvendor, _subdevice) \ ((siba_get_pci_vendor(dev) == PCI_VENDOR_##_vendor) && \ (siba_get_pci_device(dev) == _device) && \ (siba_get_pci_subvendor(dev) == PCI_VENDOR_##_subvendor) && \ (siba_get_pci_subdevice(dev) == _subdevice)) if (siba_get_pci_subvendor(dev) == PCI_VENDOR_APPLE && siba_get_pci_subdevice(dev) == 0x4e && siba_get_pci_revid(dev) > 0x40) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) | BWN_BFL_PACTRL); if (siba_get_pci_subvendor(dev) == SIBA_BOARDVENDOR_DELL && siba_get_chipid(dev) == 0x4301 && siba_get_pci_revid(dev) == 0x74) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) | BWN_BFL_BTCOEXIST); if (siba_get_type(dev) == SIBA_TYPE_PCI) { if (BWN_ISDEV(BROADCOM, 0x4318, ASUSTEK, 0x100f) || BWN_ISDEV(BROADCOM, 0x4320, DELL, 0x0003) || BWN_ISDEV(BROADCOM, 0x4320, HP, 0x12f8) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0013) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0014) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0015) || BWN_ISDEV(BROADCOM, 0x4320, MOTOROLA, 0x7010)) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) & ~BWN_BFL_BTCOEXIST); } #undef BWN_ISDEV } static void bwn_parent(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; int startall = 0; BWN_LOCK(sc); if (ic->ic_nrunning > 0) { if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) { bwn_init(sc); startall = 1; } else bwn_update_promisc(ic); } else if (sc->sc_flags & BWN_FLAG_RUNNING) bwn_stop(sc); BWN_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static int bwn_transmit(struct ieee80211com *ic, struct mbuf *m) { struct bwn_softc *sc = ic->ic_softc; int error; BWN_LOCK(sc); if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) { BWN_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { BWN_UNLOCK(sc); return (error); } bwn_start(sc); BWN_UNLOCK(sc); return (0); } static void bwn_start(struct bwn_softc *sc) { struct bwn_mac *mac = sc->sc_curmac; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct ieee80211_key *k; struct mbuf *m; BWN_ASSERT_LOCKED(sc); if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac == NULL || mac->mac_status < BWN_MAC_STATUS_STARTED) return; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { if (bwn_tx_isfull(sc, m)) break; ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (ni == NULL) { device_printf(sc->sc_dev, "unexpected NULL ni\n"); m_freem(m); counter_u64_add(sc->sc_ic.ic_oerrors, 1); continue; } wh = mtod(m, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); m_freem(m); continue; } } wh = NULL; /* Catch any invalid use */ if (bwn_tx_start(sc, ni, m) != 0) { if (ni != NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); } continue; } sc->sc_watchdog_timer = 5; } } static int bwn_tx_isfull(struct bwn_softc *sc, struct mbuf *m) { struct bwn_dma_ring *dr; struct bwn_mac *mac = sc->sc_curmac; struct bwn_pio_txqueue *tq; int pktlen = roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); BWN_ASSERT_LOCKED(sc); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { dr = bwn_dma_select(mac, M_WME_GETAC(m)); if (dr->dr_stop == 1 || bwn_dma_freeslot(dr) < BWN_TX_SLOTS_PER_FRAME) { dr->dr_stop = 1; goto full; } } else { tq = bwn_pio_select(mac, M_WME_GETAC(m)); if (tq->tq_free == 0 || pktlen > tq->tq_size || pktlen > (tq->tq_size - tq->tq_used)) goto full; } return (0); full: mbufq_prepend(&sc->sc_snd, m); return (1); } static int bwn_tx_start(struct bwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_mac *mac = sc->sc_curmac; int error; BWN_ASSERT_LOCKED(sc); if (m->m_pkthdr.len < IEEE80211_MIN_LEN || mac == NULL) { m_freem(m); return (ENXIO); } error = (mac->mac_flags & BWN_MAC_FLAG_DMA) ? bwn_dma_tx_start(mac, ni, m) : bwn_pio_tx_start(mac, ni, m); if (error) { m_freem(m); return (error); } return (0); } static int bwn_pio_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_pio_txpkt *tp; struct bwn_pio_txqueue *tq = bwn_pio_select(mac, M_WME_GETAC(m)); struct bwn_softc *sc = mac->mac_sc; struct bwn_txhdr txhdr; struct mbuf *m_new; uint32_t ctl32; int error; uint16_t ctl16; BWN_ASSERT_LOCKED(sc); /* XXX TODO send packets after DTIM */ KASSERT(!TAILQ_EMPTY(&tq->tq_pktlist), ("%s: fail", __func__)); tp = TAILQ_FIRST(&tq->tq_pktlist); tp->tp_ni = ni; tp->tp_m = m; error = bwn_set_txhdr(mac, ni, m, &txhdr, BWN_PIO_COOKIE(tq, tp)); if (error) { device_printf(sc->sc_dev, "tx fail\n"); return (error); } TAILQ_REMOVE(&tq->tq_pktlist, tp, tp_list); tq->tq_used += roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free--; if (siba_get_revid(sc->sc_dev) >= 8) { /* * XXX please removes m_defrag(9) */ m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); return (ENOBUFS); } if (m_new->m_next != NULL) device_printf(sc->sc_dev, "TODO: fragmented packets for PIO\n"); tp->tp_m = m_new; /* send HEADER */ ctl32 = bwn_pio_write_multi_4(mac, tq, (BWN_PIO_READ_4(mac, tq, BWN_PIO8_TXCTL) | BWN_PIO8_TXCTL_FRAMEREADY) & ~BWN_PIO8_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); /* send BODY */ ctl32 = bwn_pio_write_multi_4(mac, tq, ctl32, mtod(m_new, const void *), m_new->m_pkthdr.len); bwn_pio_write_4(mac, tq, BWN_PIO_TXCTL, ctl32 | BWN_PIO8_TXCTL_EOF); } else { ctl16 = bwn_pio_write_multi_2(mac, tq, (bwn_pio_read_2(mac, tq, BWN_PIO_TXCTL) | BWN_PIO_TXCTL_FRAMEREADY) & ~BWN_PIO_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); ctl16 = bwn_pio_write_mbuf_2(mac, tq, ctl16, m); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl16 | BWN_PIO_TXCTL_EOF); } return (0); } static struct bwn_pio_txqueue * bwn_pio_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (&mac->mac_method.pio.wme[WME_AC_BE]); switch (prio) { case 0: return (&mac->mac_method.pio.wme[WME_AC_BE]); case 1: return (&mac->mac_method.pio.wme[WME_AC_BK]); case 2: return (&mac->mac_method.pio.wme[WME_AC_VI]); case 3: return (&mac->mac_method.pio.wme[WME_AC_VO]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m) { #define BWN_GET_TXHDRCACHE(slot) \ &(txhdr_cache[(slot / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac)]) struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr = bwn_dma_select(mac, M_WME_GETAC(m)); struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; uint8_t *txhdr_cache = (uint8_t *)dr->dr_txhdr_cache; int error, slot, backup[2] = { dr->dr_curslot, dr->dr_usedslot }; BWN_ASSERT_LOCKED(sc); KASSERT(!dr->dr_stop, ("%s:%d: fail", __func__, __LINE__)); /* XXX send after DTIM */ slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_HEADER, ("%s:%d: fail", __func__, __LINE__)); error = bwn_set_txhdr(dr->dr_mac, ni, m, (struct bwn_txhdr *)BWN_GET_TXHDRCACHE(slot), BWN_DMA_COOKIE(dr, slot)); if (error) goto fail; error = bus_dmamap_load(dr->dr_txring_dtag, mt->mt_dmap, BWN_GET_TXHDRCACHE(slot), BWN_HDRSIZE(mac), bwn_dma_ring_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } bus_dmamap_sync(dr->dr_txring_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, BWN_HDRSIZE(mac), 1, 0, 0); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_BODY && mt->mt_islast == 1, ("%s:%d: fail", __func__, __LINE__)); mt->mt_m = m; mt->mt_ni = ni; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error && error != EFBIG) { device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } if (error) { /* error == EFBIG */ struct mbuf *m_new; m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); error = ENOBUFS; goto fail; } else { m = m_new; } mt->mt_m = m; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "%s: can't load TX buffer (2) %d\n", __func__, error); goto fail; } } bus_dmamap_sync(dma->txbuf_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, m->m_pkthdr.len, 0, 1, 1); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); /* XXX send after DTIM */ dr->start_transfer(dr, bwn_dma_nextslot(dr, slot)); return (0); fail: dr->dr_curslot = backup[0]; dr->dr_usedslot = backup[1]; return (error); #undef BWN_GET_TXHDRCACHE } static void bwn_watchdog(void *arg) { struct bwn_softc *sc = arg; if (sc->sc_watchdog_timer != 0 && --sc->sc_watchdog_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); counter_u64_add(sc->sc_ic.ic_oerrors, 1); } callout_schedule(&sc->sc_watchdog_ch, hz); } static int bwn_attach_core(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error, have_bg = 0, have_a = 0; uint32_t high; KASSERT(siba_get_revid(sc->sc_dev) >= 5, ("unsupported revision %d", siba_get_revid(sc->sc_dev))); siba_powerup(sc->sc_dev, 0); high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH); bwn_reset_core(mac, (high & BWN_TGSHIGH_HAVE_2GHZ) ? BWN_TGSLOW_SUPPORT_G : 0); error = bwn_phy_getinfo(mac, high); if (error) goto fail; have_a = (high & BWN_TGSHIGH_HAVE_5GHZ) ? 1 : 0; have_bg = (high & BWN_TGSHIGH_HAVE_2GHZ) ? 1 : 0; if (siba_get_pci_device(sc->sc_dev) != 0x4312 && siba_get_pci_device(sc->sc_dev) != 0x4319 && siba_get_pci_device(sc->sc_dev) != 0x4324) { have_a = have_bg = 0; if (mac->mac_phy.type == BWN_PHYTYPE_A) have_a = 1; else if (mac->mac_phy.type == BWN_PHYTYPE_G || mac->mac_phy.type == BWN_PHYTYPE_N || mac->mac_phy.type == BWN_PHYTYPE_LP) have_bg = 1; else KASSERT(0 == 1, ("%s: unknown phy type (%d)", __func__, mac->mac_phy.type)); } /* XXX turns off PHY A because it's not supported */ if (mac->mac_phy.type != BWN_PHYTYPE_LP && mac->mac_phy.type != BWN_PHYTYPE_N) { have_a = 0; have_bg = 1; } if (mac->mac_phy.type == BWN_PHYTYPE_G) { mac->mac_phy.attach = bwn_phy_g_attach; mac->mac_phy.detach = bwn_phy_g_detach; mac->mac_phy.prepare_hw = bwn_phy_g_prepare_hw; mac->mac_phy.init_pre = bwn_phy_g_init_pre; mac->mac_phy.init = bwn_phy_g_init; mac->mac_phy.exit = bwn_phy_g_exit; mac->mac_phy.phy_read = bwn_phy_g_read; mac->mac_phy.phy_write = bwn_phy_g_write; mac->mac_phy.rf_read = bwn_phy_g_rf_read; mac->mac_phy.rf_write = bwn_phy_g_rf_write; mac->mac_phy.use_hwpctl = bwn_phy_g_hwpctl; mac->mac_phy.rf_onoff = bwn_phy_g_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_switch_analog; mac->mac_phy.switch_channel = bwn_phy_g_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_g_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_g_set_antenna; mac->mac_phy.set_im = bwn_phy_g_im; mac->mac_phy.recalc_txpwr = bwn_phy_g_recalc_txpwr; mac->mac_phy.set_txpwr = bwn_phy_g_set_txpwr; mac->mac_phy.task_15s = bwn_phy_g_task_15s; mac->mac_phy.task_60s = bwn_phy_g_task_60s; } else if (mac->mac_phy.type == BWN_PHYTYPE_LP) { mac->mac_phy.init_pre = bwn_phy_lp_init_pre; mac->mac_phy.init = bwn_phy_lp_init; mac->mac_phy.phy_read = bwn_phy_lp_read; mac->mac_phy.phy_write = bwn_phy_lp_write; mac->mac_phy.phy_maskset = bwn_phy_lp_maskset; mac->mac_phy.rf_read = bwn_phy_lp_rf_read; mac->mac_phy.rf_write = bwn_phy_lp_rf_write; mac->mac_phy.rf_onoff = bwn_phy_lp_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_lp_switch_analog; mac->mac_phy.switch_channel = bwn_phy_lp_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_lp_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_lp_set_antenna; mac->mac_phy.task_60s = bwn_phy_lp_task_60s; } else { device_printf(sc->sc_dev, "unsupported PHY type (%d)\n", mac->mac_phy.type); error = ENXIO; goto fail; } mac->mac_phy.gmode = have_bg; if (mac->mac_phy.attach != NULL) { error = mac->mac_phy.attach(mac); if (error) { device_printf(sc->sc_dev, "failed\n"); goto fail; } } bwn_reset_core(mac, have_bg ? BWN_TGSLOW_SUPPORT_G : 0); error = bwn_chiptest(mac); if (error) goto fail; error = bwn_setup_channels(mac, have_bg, have_a); if (error) { device_printf(sc->sc_dev, "failed to setup channels\n"); goto fail; } if (sc->sc_curmac == NULL) sc->sc_curmac = mac; error = bwn_dma_attach(mac); if (error != 0) { device_printf(sc->sc_dev, "failed to initialize DMA\n"); goto fail; } mac->mac_phy.switch_analog(mac, 0); siba_dev_down(sc->sc_dev, 0); fail: siba_powerdown(sc->sc_dev); return (error); } static void bwn_reset_core(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; uint32_t low, ctl; flags |= (BWN_TGSLOW_PHYCLOCK_ENABLE | BWN_TGSLOW_PHYRESET); siba_dev_up(sc->sc_dev, flags); DELAY(2000); low = (siba_read_4(sc->sc_dev, SIBA_TGSLOW) | SIBA_TGSLOW_FGC) & ~BWN_TGSLOW_PHYRESET; siba_write_4(sc->sc_dev, SIBA_TGSLOW, low); siba_read_4(sc->sc_dev, SIBA_TGSLOW); DELAY(1000); siba_write_4(sc->sc_dev, SIBA_TGSLOW, low & ~SIBA_TGSLOW_FGC); siba_read_4(sc->sc_dev, SIBA_TGSLOW); DELAY(1000); if (mac->mac_phy.switch_analog != NULL) mac->mac_phy.switch_analog(mac, 1); ctl = BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GMODE; if (flags & BWN_TGSLOW_SUPPORT_G) ctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, ctl | BWN_MACCTL_IHR_ON); } static int bwn_phy_getinfo(struct bwn_mac *mac, int tgshigh) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; /* PHY */ tmp = BWN_READ_2(mac, BWN_PHYVER); phy->gmode = (tgshigh & BWN_TGSHIGH_HAVE_2GHZ) ? 1 : 0; phy->rf_on = 1; phy->analog = (tmp & BWN_PHYVER_ANALOG) >> 12; phy->type = (tmp & BWN_PHYVER_TYPE) >> 8; phy->rev = (tmp & BWN_PHYVER_VERSION); if ((phy->type == BWN_PHYTYPE_A && phy->rev >= 4) || (phy->type == BWN_PHYTYPE_B && phy->rev != 2 && phy->rev != 4 && phy->rev != 6 && phy->rev != 7) || (phy->type == BWN_PHYTYPE_G && phy->rev > 9) || (phy->type == BWN_PHYTYPE_N && phy->rev > 4) || (phy->type == BWN_PHYTYPE_LP && phy->rev > 2)) goto unsupphy; /* RADIO */ if (siba_get_chipid(sc->sc_dev) == 0x4317) { if (siba_get_chiprev(sc->sc_dev) == 0) tmp = 0x3205017f; else if (siba_get_chiprev(sc->sc_dev) == 1) tmp = 0x4205017f; else tmp = 0x5205017f; } else { BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp = BWN_READ_2(mac, BWN_RFDATALO); BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp |= (uint32_t)BWN_READ_2(mac, BWN_RFDATAHI) << 16; } phy->rf_rev = (tmp & 0xf0000000) >> 28; phy->rf_ver = (tmp & 0x0ffff000) >> 12; phy->rf_manuf = (tmp & 0x00000fff); if (phy->rf_manuf != 0x17f) /* 0x17f is broadcom */ goto unsupradio; if ((phy->type == BWN_PHYTYPE_A && (phy->rf_ver != 0x2060 || phy->rf_rev != 1 || phy->rf_manuf != 0x17f)) || (phy->type == BWN_PHYTYPE_B && (phy->rf_ver & 0xfff0) != 0x2050) || (phy->type == BWN_PHYTYPE_G && phy->rf_ver != 0x2050) || (phy->type == BWN_PHYTYPE_N && phy->rf_ver != 0x2055 && phy->rf_ver != 0x2056) || (phy->type == BWN_PHYTYPE_LP && phy->rf_ver != 0x2062 && phy->rf_ver != 0x2063)) goto unsupradio; return (0); unsupphy: device_printf(sc->sc_dev, "unsupported PHY (type %#x, rev %#x, " "analog %#x)\n", phy->type, phy->rev, phy->analog); return (ENXIO); unsupradio: device_printf(sc->sc_dev, "unsupported radio (manuf %#x, ver %#x, " "rev %#x)\n", phy->rf_manuf, phy->rf_ver, phy->rf_rev); return (ENXIO); } static int bwn_chiptest(struct bwn_mac *mac) { #define TESTVAL0 0x55aaaa55 #define TESTVAL1 0xaa5555aa struct bwn_softc *sc = mac->mac_sc; uint32_t v, backup; BWN_LOCK(sc); backup = bwn_shm_read_4(mac, BWN_SHARED, 0); bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL0); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL0) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL1); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL1) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, backup); if ((siba_get_revid(sc->sc_dev) >= 3) && (siba_get_revid(sc->sc_dev) <= 10)) { BWN_WRITE_2(mac, BWN_TSF_CFP_START, 0xaaaa); BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0xccccbbbb); if (BWN_READ_2(mac, BWN_TSF_CFP_START_LOW) != 0xbbbb) goto error; if (BWN_READ_2(mac, BWN_TSF_CFP_START_HIGH) != 0xcccc) goto error; } BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0); v = BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_GMODE; if (v != (BWN_MACCTL_GMODE | BWN_MACCTL_IHR_ON)) goto error; BWN_UNLOCK(sc); return (0); error: BWN_UNLOCK(sc); device_printf(sc->sc_dev, "failed to validate the chipaccess\n"); return (ENODEV); } #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT | IEEE80211_CHAN_G) #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT | IEEE80211_CHAN_A) static int bwn_setup_channels(struct bwn_mac *mac, int have_bg, int have_a) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); ic->ic_nchans = 0; if (have_bg) bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_bg, IEEE80211_CHAN_G); if (mac->mac_phy.type == BWN_PHYTYPE_N) { if (have_a) bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_n, IEEE80211_CHAN_HTA); } else { if (have_a) bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_a, IEEE80211_CHAN_A); } mac->mac_phy.supports_2ghz = have_bg; mac->mac_phy.supports_5ghz = have_a; return (ic->ic_nchans == 0 ? ENXIO : 0); } static uint32_t bwn_shm_read_4(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); ret <<= 16; bwn_shm_ctlword(mac, way, (offset >> 2) + 1); ret |= BWN_READ_2(mac, BWN_SHM_DATA); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_4(mac, BWN_SHM_DATA); out: return (ret); } static uint16_t bwn_shm_read_2(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint16_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_2(mac, BWN_SHM_DATA); out: return (ret); } static void bwn_shm_ctlword(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t control; control = way; control <<= 16; control |= offset; BWN_WRITE_4(mac, BWN_SHM_CONTROL, control); } static void bwn_shm_write_4(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint32_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, (value >> 16) & 0xffff); bwn_shm_ctlword(mac, way, (offset >> 2) + 1); BWN_WRITE_2(mac, BWN_SHM_DATA, value & 0xffff); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_4(mac, BWN_SHM_DATA, value); } static void bwn_shm_write_2(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint16_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, value); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_2(mac, BWN_SHM_DATA, value); } static void bwn_addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow) { c->ic_freq = freq; c->ic_flags = flags; c->ic_ieee = ieee; c->ic_minpower = 0; c->ic_maxpower = 2 * txpow; c->ic_maxregpower = txpow; } static void bwn_addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans, const struct bwn_channelinfo *ci, int flags) { struct ieee80211_channel *c; int i; c = &chans[*nchans]; for (i = 0; i < ci->nchannels; i++) { const struct bwn_channel *hc; hc = &ci->channels[i]; if (*nchans >= maxchans) break; bwn_addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow); c++, (*nchans)++; if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) { /* g channel have a separate b-only entry */ if (*nchans >= maxchans) break; c[0] = c[-1]; c[-1].ic_flags = IEEE80211_CHAN_B; c++, (*nchans)++; } if (flags == IEEE80211_CHAN_HTG) { /* HT g channel have a separate g-only entry */ if (*nchans >= maxchans) break; c[-1].ic_flags = IEEE80211_CHAN_G; c[0] = c[-1]; c[0].ic_flags &= ~IEEE80211_CHAN_HT; c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */ c++, (*nchans)++; } if (flags == IEEE80211_CHAN_HTA) { /* HT a channel have a separate a-only entry */ if (*nchans >= maxchans) break; c[-1].ic_flags = IEEE80211_CHAN_A; c[0] = c[-1]; c[0].ic_flags &= ~IEEE80211_CHAN_HT; c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */ c++, (*nchans)++; } } } static int bwn_phy_g_attach(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; unsigned int i; int16_t pab0, pab1, pab2; static int8_t bwn_phy_g_tssi2dbm_table[] = BWN_PHY_G_TSSI2DBM_TABLE; int8_t bg; bg = (int8_t)siba_sprom_get_tssi_bg(sc->sc_dev); pab0 = (int16_t)siba_sprom_get_pa0b0(sc->sc_dev); pab1 = (int16_t)siba_sprom_get_pa0b1(sc->sc_dev); pab2 = (int16_t)siba_sprom_get_pa0b2(sc->sc_dev); if ((siba_get_chipid(sc->sc_dev) == 0x4301) && (phy->rf_ver != 0x2050)) device_printf(sc->sc_dev, "not supported anymore\n"); pg->pg_flags = 0; if (pab0 == 0 || pab1 == 0 || pab2 == 0 || pab0 == -1 || pab1 == -1 || pab2 == -1) { pg->pg_idletssi = 52; pg->pg_tssi2dbm = bwn_phy_g_tssi2dbm_table; return (0); } pg->pg_idletssi = (bg == 0 || bg == -1) ? 62 : bg; pg->pg_tssi2dbm = (uint8_t *)malloc(64, M_DEVBUF, M_NOWAIT | M_ZERO); if (pg->pg_tssi2dbm == NULL) { device_printf(sc->sc_dev, "failed to allocate buffer\n"); return (ENOMEM); } for (i = 0; i < 64; i++) { int32_t m1, m2, f, q, delta; int8_t j = 0; m1 = BWN_TSSI2DBM(16 * pab0 + i * pab1, 32); m2 = MAX(BWN_TSSI2DBM(32768 + i * pab2, 256), 1); f = 256; do { if (j > 15) { device_printf(sc->sc_dev, "failed to generate tssi2dBm\n"); free(pg->pg_tssi2dbm, M_DEVBUF); return (ENOMEM); } q = BWN_TSSI2DBM(f * 4096 - BWN_TSSI2DBM(m2 * f, 16) * f, 2048); delta = abs(q - f); f = q; j++; } while (delta >= 2); pg->pg_tssi2dbm[i] = MIN(MAX(BWN_TSSI2DBM(m1 * f, 8192), -127), 128); } pg->pg_flags |= BWN_PHY_G_FLAG_TSSITABLE_ALLOC; return (0); } static void bwn_phy_g_detach(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; if (pg->pg_flags & BWN_PHY_G_FLAG_TSSITABLE_ALLOC) { free(pg->pg_tssi2dbm, M_DEVBUF); pg->pg_tssi2dbm = NULL; } pg->pg_flags = 0; } static void bwn_phy_g_init_pre(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; void *tssi2dbm; int idletssi; unsigned int i; tssi2dbm = pg->pg_tssi2dbm; idletssi = pg->pg_idletssi; memset(pg, 0, sizeof(*pg)); pg->pg_tssi2dbm = tssi2dbm; pg->pg_idletssi = idletssi; memset(pg->pg_minlowsig, 0xff, sizeof(pg->pg_minlowsig)); for (i = 0; i < N(pg->pg_nrssi); i++) pg->pg_nrssi[i] = -1000; for (i = 0; i < N(pg->pg_nrssi_lt); i++) pg->pg_nrssi_lt[i] = i; pg->pg_lofcal = 0xffff; pg->pg_initval = 0xffff; pg->pg_immode = BWN_IMMODE_NONE; pg->pg_ofdmtab_dir = BWN_OFDMTAB_DIR_UNKNOWN; pg->pg_avgtssi = 0xff; pg->pg_loctl.tx_bias = 0xff; TAILQ_INIT(&pg->pg_loctl.calib_list); } static int bwn_phy_g_prepare_hw(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; static const struct bwn_rfatt rfatt0[] = { { 3, 0 }, { 1, 0 }, { 5, 0 }, { 7, 0 }, { 9, 0 }, { 2, 0 }, { 0, 0 }, { 4, 0 }, { 6, 0 }, { 8, 0 }, { 1, 1 }, { 2, 1 }, { 3, 1 }, { 4, 1 } }; static const struct bwn_rfatt rfatt1[] = { { 2, 1 }, { 4, 1 }, { 6, 1 }, { 8, 1 }, { 10, 1 }, { 12, 1 }, { 14, 1 } }; static const struct bwn_rfatt rfatt2[] = { { 0, 1 }, { 2, 1 }, { 4, 1 }, { 6, 1 }, { 8, 1 }, { 9, 1 }, { 9, 1 } }; static const struct bwn_bbatt bbatt_0[] = { { 0 }, { 1 }, { 2 }, { 3 }, { 4 }, { 5 }, { 6 }, { 7 }, { 8 } }; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__)); if (phy->rf_ver == 0x2050 && phy->rf_rev < 6) pg->pg_bbatt.att = 0; else pg->pg_bbatt.att = 2; /* prepare Radio Attenuation */ pg->pg_rfatt.padmix = 0; if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G) { if (siba_get_pci_revid(sc->sc_dev) < 0x43) { pg->pg_rfatt.att = 2; goto done; } else if (siba_get_pci_revid(sc->sc_dev) < 0x51) { pg->pg_rfatt.att = 3; goto done; } } if (phy->type == BWN_PHYTYPE_A) { pg->pg_rfatt.att = 0x60; goto done; } switch (phy->rf_ver) { case 0x2050: switch (phy->rf_rev) { case 0: pg->pg_rfatt.att = 5; goto done; case 1: if (phy->type == BWN_PHYTYPE_G) { if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G && siba_get_pci_revid(sc->sc_dev) >= 30) pg->pg_rfatt.att = 3; else if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306) pg->pg_rfatt.att = 3; else pg->pg_rfatt.att = 1; } else { if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G && siba_get_pci_revid(sc->sc_dev) >= 30) pg->pg_rfatt.att = 7; else pg->pg_rfatt.att = 6; } goto done; case 2: if (phy->type == BWN_PHYTYPE_G) { if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G && siba_get_pci_revid(sc->sc_dev) >= 30) pg->pg_rfatt.att = 3; else if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306) pg->pg_rfatt.att = 5; else if (siba_get_chipid(sc->sc_dev) == 0x4320) pg->pg_rfatt.att = 4; else pg->pg_rfatt.att = 3; } else pg->pg_rfatt.att = 6; goto done; case 3: pg->pg_rfatt.att = 5; goto done; case 4: case 5: pg->pg_rfatt.att = 1; goto done; case 6: case 7: pg->pg_rfatt.att = 5; goto done; case 8: pg->pg_rfatt.att = 0xa; pg->pg_rfatt.padmix = 1; goto done; case 9: default: pg->pg_rfatt.att = 5; goto done; } break; case 0x2053: switch (phy->rf_rev) { case 1: pg->pg_rfatt.att = 6; goto done; } break; } pg->pg_rfatt.att = 5; done: pg->pg_txctl = (bwn_phy_g_txctl(mac) << 4); if (!bwn_has_hwpctl(mac)) { lo->rfatt.array = rfatt0; lo->rfatt.len = N(rfatt0); lo->rfatt.min = 0; lo->rfatt.max = 9; goto genbbatt; } if (phy->rf_ver == 0x2050 && phy->rf_rev == 8) { lo->rfatt.array = rfatt1; lo->rfatt.len = N(rfatt1); lo->rfatt.min = 0; lo->rfatt.max = 14; goto genbbatt; } lo->rfatt.array = rfatt2; lo->rfatt.len = N(rfatt2); lo->rfatt.min = 0; lo->rfatt.max = 9; genbbatt: lo->bbatt.array = bbatt_0; lo->bbatt.len = N(bbatt_0); lo->bbatt.min = 0; lo->bbatt.max = 8; BWN_READ_4(mac, BWN_MACCTL); if (phy->rev == 1) { phy->gmode = 0; bwn_reset_core(mac, 0); bwn_phy_g_init_sub(mac); phy->gmode = 1; bwn_reset_core(mac, BWN_TGSLOW_SUPPORT_G); } return (0); } static uint16_t bwn_phy_g_txctl(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; if (phy->rf_ver != 0x2050) return (0); if (phy->rf_rev == 1) return (BWN_TXCTL_PA2DB | BWN_TXCTL_TXMIX); if (phy->rf_rev < 6) return (BWN_TXCTL_PA2DB); if (phy->rf_rev == 8) return (BWN_TXCTL_TXMIX); return (0); } static int bwn_phy_g_init(struct bwn_mac *mac) { bwn_phy_g_init_sub(mac); return (0); } static void bwn_phy_g_exit(struct bwn_mac *mac) { struct bwn_txpwr_loctl *lo = &mac->mac_phy.phy_g.pg_loctl; struct bwn_lo_calib *cal, *tmp; if (lo == NULL) return; TAILQ_FOREACH_SAFE(cal, &lo->calib_list, list, tmp) { TAILQ_REMOVE(&lo->calib_list, cal, list); free(cal, M_DEVBUF); } } static uint16_t bwn_phy_g_read(struct bwn_mac *mac, uint16_t reg) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); return (BWN_READ_2(mac, BWN_PHYDATA)); } static void bwn_phy_g_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); BWN_WRITE_2(mac, BWN_PHYDATA, value); } static uint16_t bwn_phy_g_rf_read(struct bwn_mac *mac, uint16_t reg) { KASSERT(reg != 1, ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_2(mac, BWN_RFCTL, reg | 0x80); return (BWN_READ_2(mac, BWN_RFDATALO)); } static void bwn_phy_g_rf_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { KASSERT(reg != 1, ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_2(mac, BWN_RFCTL, reg); BWN_WRITE_2(mac, BWN_RFDATALO, value); } static int bwn_phy_g_hwpctl(struct bwn_mac *mac) { return (mac->mac_phy.rev >= 6); } static void bwn_phy_g_rf_onoff(struct bwn_mac *mac, int on) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; unsigned int channel; uint16_t rfover, rfoverval; if (on) { if (phy->rf_on) return; BWN_PHY_WRITE(mac, 0x15, 0x8000); BWN_PHY_WRITE(mac, 0x15, 0xcc00); BWN_PHY_WRITE(mac, 0x15, (phy->gmode ? 0xc0 : 0x0)); if (pg->pg_flags & BWN_PHY_G_FLAG_RADIOCTX_VALID) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, pg->pg_radioctx_over); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, pg->pg_radioctx_overval); pg->pg_flags &= ~BWN_PHY_G_FLAG_RADIOCTX_VALID; } channel = phy->chan; bwn_phy_g_switch_chan(mac, 6, 1); bwn_phy_g_switch_chan(mac, channel, 0); return; } rfover = BWN_PHY_READ(mac, BWN_PHY_RFOVER); rfoverval = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL); pg->pg_radioctx_over = rfover; pg->pg_radioctx_overval = rfoverval; pg->pg_flags |= BWN_PHY_G_FLAG_RADIOCTX_VALID; BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, rfover | 0x008c); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfoverval & 0xff73); } static int bwn_phy_g_switch_channel(struct bwn_mac *mac, uint32_t newchan) { if ((newchan < 1) || (newchan > 14)) return (EINVAL); bwn_phy_g_switch_chan(mac, newchan, 0); return (0); } static uint32_t bwn_phy_g_get_default_chan(struct bwn_mac *mac) { return (1); } static void bwn_phy_g_set_antenna(struct bwn_mac *mac, int antenna) { struct bwn_phy *phy = &mac->mac_phy; uint64_t hf; int autodiv = 0; uint16_t tmp; if (antenna == BWN_ANTAUTO0 || antenna == BWN_ANTAUTO1) autodiv = 1; hf = bwn_hf_read(mac) & ~BWN_HF_UCODE_ANTDIV_HELPER; bwn_hf_write(mac, hf); BWN_PHY_WRITE(mac, BWN_PHY_BBANDCFG, (BWN_PHY_READ(mac, BWN_PHY_BBANDCFG) & ~BWN_PHY_BBANDCFG_RXANT) | ((autodiv ? BWN_ANTAUTO1 : antenna) << BWN_PHY_BBANDCFG_RXANT_SHIFT)); if (autodiv) { tmp = BWN_PHY_READ(mac, BWN_PHY_ANTDWELL); if (antenna == BWN_ANTAUTO1) tmp &= ~BWN_PHY_ANTDWELL_AUTODIV1; else tmp |= BWN_PHY_ANTDWELL_AUTODIV1; BWN_PHY_WRITE(mac, BWN_PHY_ANTDWELL, tmp); } tmp = BWN_PHY_READ(mac, BWN_PHY_ANTWRSETT); if (autodiv) tmp |= BWN_PHY_ANTWRSETT_ARXDIV; else tmp &= ~BWN_PHY_ANTWRSETT_ARXDIV; BWN_PHY_WRITE(mac, BWN_PHY_ANTWRSETT, tmp); if (phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_OFDM61, BWN_PHY_READ(mac, BWN_PHY_OFDM61) | BWN_PHY_OFDM61_10); BWN_PHY_WRITE(mac, BWN_PHY_DIVSRCHGAINBACK, (BWN_PHY_READ(mac, BWN_PHY_DIVSRCHGAINBACK) & 0xff00) | 0x15); if (phy->rev == 2) BWN_PHY_WRITE(mac, BWN_PHY_ADIVRELATED, 8); else BWN_PHY_WRITE(mac, BWN_PHY_ADIVRELATED, (BWN_PHY_READ(mac, BWN_PHY_ADIVRELATED) & 0xff00) | 8); } if (phy->rev >= 6) BWN_PHY_WRITE(mac, BWN_PHY_OFDM9B, 0xdc); hf |= BWN_HF_UCODE_ANTDIV_HELPER; bwn_hf_write(mac, hf); } static int bwn_phy_g_im(struct bwn_mac *mac, int mode) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s: fail", __func__)); KASSERT(mode == BWN_IMMODE_NONE, ("%s: fail", __func__)); if (phy->rev == 0 || !phy->gmode) return (ENODEV); pg->pg_aci_wlan_automatic = 0; return (0); } static int bwn_phy_g_recalc_txpwr(struct bwn_mac *mac, int ignore_tssi) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; unsigned int tssi; int cck, ofdm; int power; int rfatt, bbatt; unsigned int max; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s: fail", __func__)); cck = bwn_phy_shm_tssi_read(mac, BWN_SHARED_TSSI_CCK); ofdm = bwn_phy_shm_tssi_read(mac, BWN_SHARED_TSSI_OFDM_G); if (cck < 0 && ofdm < 0) { if (ignore_tssi == 0) return (BWN_TXPWR_RES_DONE); cck = 0; ofdm = 0; } tssi = (cck < 0) ? ofdm : ((ofdm < 0) ? cck : (cck + ofdm) / 2); if (pg->pg_avgtssi != 0xff) tssi = (tssi + pg->pg_avgtssi) / 2; pg->pg_avgtssi = tssi; KASSERT(tssi < BWN_TSSI_MAX, ("%s:%d: fail", __func__, __LINE__)); max = siba_sprom_get_maxpwr_bg(sc->sc_dev); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) max -= 3; if (max >= 120) { device_printf(sc->sc_dev, "invalid max TX-power value\n"); max = 80; siba_sprom_set_maxpwr_bg(sc->sc_dev, max); } power = MIN(MAX((phy->txpower < 0) ? 0 : (phy->txpower << 2), 0), max) - (pg->pg_tssi2dbm[MIN(MAX(pg->pg_idletssi - pg->pg_curtssi + tssi, 0x00), 0x3f)]); if (power == 0) return (BWN_TXPWR_RES_DONE); rfatt = -((power + 7) / 8); bbatt = (-(power / 2)) - (4 * rfatt); if ((rfatt == 0) && (bbatt == 0)) return (BWN_TXPWR_RES_DONE); pg->pg_bbatt_delta = bbatt; pg->pg_rfatt_delta = rfatt; return (BWN_TXPWR_RES_NEED_ADJUST); } static void bwn_phy_g_set_txpwr(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; int rfatt, bbatt; uint8_t txctl; bwn_mac_suspend(mac); BWN_ASSERT_LOCKED(sc); bbatt = pg->pg_bbatt.att; bbatt += pg->pg_bbatt_delta; rfatt = pg->pg_rfatt.att; rfatt += pg->pg_rfatt_delta; bwn_phy_g_setatt(mac, &bbatt, &rfatt); txctl = pg->pg_txctl; if ((phy->rf_ver == 0x2050) && (phy->rf_rev == 2)) { if (rfatt <= 1) { if (txctl == 0) { txctl = BWN_TXCTL_PA2DB | BWN_TXCTL_TXMIX; rfatt += 2; bbatt += 2; } else if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) { bbatt += 4 * (rfatt - 2); rfatt = 2; } } else if (rfatt > 4 && txctl) { txctl = 0; if (bbatt < 3) { rfatt -= 3; bbatt += 2; } else { rfatt -= 2; bbatt -= 2; } } } pg->pg_txctl = txctl; bwn_phy_g_setatt(mac, &bbatt, &rfatt); pg->pg_rfatt.att = rfatt; pg->pg_bbatt.att = bbatt; DPRINTF(sc, BWN_DEBUG_TXPOW, "%s: adjust TX power\n", __func__); bwn_phy_lock(mac); bwn_rf_lock(mac); bwn_phy_g_set_txpwr_sub(mac, &pg->pg_bbatt, &pg->pg_rfatt, pg->pg_txctl); bwn_rf_unlock(mac); bwn_phy_unlock(mac); bwn_mac_enable(mac); } static void bwn_phy_g_task_15s(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; unsigned long expire, now; struct bwn_lo_calib *cal, *tmp; uint8_t expired = 0; bwn_mac_suspend(mac); if (lo == NULL) goto fail; BWN_GETTIME(now); if (bwn_has_hwpctl(mac)) { expire = now - BWN_LO_PWRVEC_EXPIRE; if (time_before(lo->pwr_vec_read_time, expire)) { bwn_lo_get_powervector(mac); bwn_phy_g_dc_lookup_init(mac, 0); } goto fail; } expire = now - BWN_LO_CALIB_EXPIRE; TAILQ_FOREACH_SAFE(cal, &lo->calib_list, list, tmp) { if (!time_before(cal->calib_time, expire)) continue; if (BWN_BBATTCMP(&cal->bbatt, &pg->pg_bbatt) && BWN_RFATTCMP(&cal->rfatt, &pg->pg_rfatt)) { KASSERT(!expired, ("%s:%d: fail", __func__, __LINE__)); expired = 1; } DPRINTF(sc, BWN_DEBUG_LO, "expired BB %u RF %u %u I %d Q %d\n", cal->bbatt.att, cal->rfatt.att, cal->rfatt.padmix, cal->ctl.i, cal->ctl.q); TAILQ_REMOVE(&lo->calib_list, cal, list); free(cal, M_DEVBUF); } if (expired || TAILQ_EMPTY(&lo->calib_list)) { cal = bwn_lo_calibset(mac, &pg->pg_bbatt, &pg->pg_rfatt); if (cal == NULL) { device_printf(sc->sc_dev, "failed to recalibrate LO\n"); goto fail; } TAILQ_INSERT_TAIL(&lo->calib_list, cal, list); bwn_lo_write(mac, &cal->ctl); } fail: bwn_mac_enable(mac); } static void bwn_phy_g_task_60s(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint8_t old = phy->chan; if (!(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_RSSI)) return; bwn_mac_suspend(mac); bwn_nrssi_slope_11g(mac); if ((phy->rf_ver == 0x2050) && (phy->rf_rev == 8)) { bwn_switch_channel(mac, (old >= 8) ? 1 : 13); bwn_switch_channel(mac, old); } bwn_mac_enable(mac); } static void bwn_phy_switch_analog(struct bwn_mac *mac, int on) { BWN_WRITE_2(mac, BWN_PHY0, on ? 0 : 0xf4); } static int bwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; int error; if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac->mac_status < BWN_MAC_STATUS_STARTED) { m_freem(m); return (ENETDOWN); } BWN_LOCK(sc); if (bwn_tx_isfull(sc, m)) { m_freem(m); BWN_UNLOCK(sc); return (ENOBUFS); } error = bwn_tx_start(sc, ni, m); if (error == 0) sc->sc_watchdog_timer = 5; BWN_UNLOCK(sc); return (error); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void bwn_updateslot(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); if (sc->sc_flags & BWN_FLAG_RUNNING) { mac = (struct bwn_mac *)sc->sc_curmac; bwn_set_slot_time(mac, IEEE80211_GET_SLOTTIME(ic)); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer after a promiscuous mode change. * Note this interface does not check the operating mode as this * is an internal callback and we are expected to honor the current * state (e.g. this is used for setting the interface in promiscuous * mode when operating in hostap mode to do ACS). */ static void bwn_update_promisc(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { if (ic->ic_promisc > 0) sc->sc_filters |= BWN_MACCTL_PROMISC; else sc->sc_filters &= ~BWN_MACCTL_PROMISC; bwn_set_opmode(mac); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer to update WME parameters. */ static int bwn_wme_update(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; struct wmeParams *wmep; int i; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) { wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[i]; bwn_wme_loadparams(mac, wmep, bwn_wme_shm_offsets[i]); } bwn_mac_enable(mac); } BWN_UNLOCK(sc); return (0); } static void bwn_scan_start(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters |= BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); /* disable CFP update during scan */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_scan_end(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters &= ~BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_set_channel(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; struct bwn_phy *phy = &mac->mac_phy; int chan, error; BWN_LOCK(sc); error = bwn_switch_band(sc, ic->ic_curchan); if (error) goto fail; bwn_mac_suspend(mac); bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); chan = ieee80211_chan2ieee(ic, ic->ic_curchan); if (chan != phy->chan) bwn_switch_channel(mac, chan); /* TX power level */ if (ic->ic_curchan->ic_maxpower != 0 && ic->ic_curchan->ic_maxpower != phy->txpower) { phy->txpower = ic->ic_curchan->ic_maxpower / 2; bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME | BWN_TXPWR_IGNORE_TSSI); } bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); if (sc->sc_rf_enabled != phy->rf_on) { if (sc->sc_rf_enabled) { bwn_rf_turnon(mac); if (!(mac->mac_flags & BWN_MAC_FLAG_RADIO_ON)) device_printf(sc->sc_dev, "please turn on the RF switch\n"); } else bwn_rf_turnoff(mac); } bwn_mac_enable(mac); fail: /* * Setup radio tap channel freq and flags */ sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = htole16(ic->ic_curchan->ic_freq); sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = htole16(ic->ic_curchan->ic_flags & 0xffff); BWN_UNLOCK(sc); } static struct ieee80211vap * bwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211vap *vap; struct bwn_vap *bvp; switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: case IEEE80211_M_WDS: case IEEE80211_M_MONITOR: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: break; default: return (NULL); } bvp = malloc(sizeof(struct bwn_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &bvp->bv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override with driver methods */ bvp->bv_newstate = vap->iv_newstate; vap->iv_newstate = bwn_newstate; /* override max aid so sta's cannot assoc when we're out of sta id's */ vap->iv_max_aid = BWN_STAID_MAX; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); return (vap); } static void bwn_vap_delete(struct ieee80211vap *vap) { struct bwn_vap *bvp = BWN_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(bvp, M_80211_VAP); } static int bwn_init(struct bwn_softc *sc) { struct bwn_mac *mac; int error; BWN_ASSERT_LOCKED(sc); bzero(sc->sc_bssid, IEEE80211_ADDR_LEN); sc->sc_flags |= BWN_FLAG_NEED_BEACON_TP; sc->sc_filters = 0; bwn_wme_clear(sc); sc->sc_beacons[0] = sc->sc_beacons[1] = 0; sc->sc_rf_enabled = 1; mac = sc->sc_curmac; if (mac->mac_status == BWN_MAC_STATUS_UNINIT) { error = bwn_core_init(mac); if (error != 0) return (error); } if (mac->mac_status == BWN_MAC_STATUS_INITED) bwn_core_start(mac); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); sc->sc_flags |= BWN_FLAG_RUNNING; callout_reset(&sc->sc_rfswitch_ch, hz, bwn_rfswitch, sc); callout_reset(&sc->sc_watchdog_ch, hz, bwn_watchdog, sc); return (0); } static void bwn_stop(struct bwn_softc *sc) { struct bwn_mac *mac = sc->sc_curmac; BWN_ASSERT_LOCKED(sc); if (mac->mac_status >= BWN_MAC_STATUS_INITED) { /* XXX FIXME opmode not based on VAP */ bwn_set_opmode(mac); bwn_set_macaddr(mac); } if (mac->mac_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; bwn_core_exit(mac); sc->sc_rf_enabled = 0; sc->sc_flags &= ~BWN_FLAG_RUNNING; } static void bwn_wme_clear(struct bwn_softc *sc) { #define MS(_v, _f) (((_v) & _f) >> _f##_S) struct wmeParams *p; unsigned int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); for (i = 0; i < N(sc->sc_wmeParams); i++) { p = &(sc->sc_wmeParams[i]); switch (bwn_wme_shm_offsets[i]) { case BWN_WME_VOICE: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_VIDEO: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_BESTEFFORT: p->wmep_txopLimit = 0; p->wmep_aifsn = 3; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; case BWN_WME_BACKGROUND: p->wmep_txopLimit = 0; p->wmep_aifsn = 7; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static int bwn_core_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; int error; KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); siba_powerup(sc->sc_dev, 0); if (!siba_dev_isup(sc->sc_dev)) bwn_reset_core(mac, mac->mac_phy.gmode ? BWN_TGSLOW_SUPPORT_G : 0); mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; mac->mac_phy.hwpctl = (bwn_hwpctl) ? 1 : 0; BWN_GETTIME(mac->mac_phy.nexttime); mac->mac_phy.txerrors = BWN_TXERROR_MAX; bzero(&mac->mac_stats, sizeof(mac->mac_stats)); mac->mac_stats.link_noise = -95; mac->mac_reason_intr = 0; bzero(mac->mac_reason, sizeof(mac->mac_reason)); mac->mac_intr_mask = BWN_INTR_MASKTEMPLATE; #ifdef BWN_DEBUG if (sc->sc_debug & BWN_DEBUG_XMIT) mac->mac_intr_mask &= ~BWN_INTR_PHY_TXERR; #endif mac->mac_suspended = 1; mac->mac_task_state = 0; memset(&mac->mac_noise, 0, sizeof(mac->mac_noise)); mac->mac_phy.init_pre(mac); siba_pcicore_intr(sc->sc_dev); siba_fix_imcfglobug(sc->sc_dev); bwn_bt_disable(mac); if (mac->mac_phy.prepare_hw) { error = mac->mac_phy.prepare_hw(mac); if (error) goto fail0; } error = bwn_chip_init(mac); if (error) goto fail0; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_COREREV, siba_get_revid(sc->sc_dev)); hf = bwn_hf_read(mac); if (mac->mac_phy.type == BWN_PHYTYPE_G) { hf |= BWN_HF_GPHY_SYM_WORKAROUND; if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) hf |= BWN_HF_PAGAINBOOST_OFDM_ON; if (mac->mac_phy.rev == 1) hf |= BWN_HF_GPHY_DC_CANCELFILTER; } if (mac->mac_phy.rf_ver == 0x2050) { if (mac->mac_phy.rf_rev < 6) hf |= BWN_HF_FORCE_VCO_RECALC; if (mac->mac_phy.rf_rev == 6) hf |= BWN_HF_4318_TSSI; } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW) hf |= BWN_HF_SLOWCLOCK_REQ_OFF; if ((siba_get_type(sc->sc_dev) == SIBA_TYPE_PCI) && (siba_get_pcicore_revid(sc->sc_dev) <= 10)) hf |= BWN_HF_PCI_SLOWCLOCK_WORKAROUND; hf &= ~BWN_HF_SKIP_CFP_UPDATE; bwn_hf_write(mac, hf); bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SHORT_RETRY_FALLBACK, 3); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_LONG_RETRY_FALLBACK, 2); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_MAXTIME, 1); bwn_rate_init(mac); bwn_set_phytxctl(mac); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MIN, (mac->mac_phy.type == BWN_PHYTYPE_B) ? 0x1f : 0xf); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MAX, 0x3ff); if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0) bwn_pio_init(mac); else bwn_dma_init(mac); bwn_wme_init(mac); bwn_spu_setdelay(mac, 1); bwn_bt_enable(mac); siba_powerup(sc->sc_dev, !(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW)); bwn_set_macaddr(mac); bwn_crypt_init(mac); /* XXX LED initializatin */ mac->mac_status = BWN_MAC_STATUS_INITED; return (error); fail0: siba_powerdown(sc->sc_dev); KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); return (error); } static void bwn_core_start(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; KASSERT(mac->mac_status == BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (siba_get_revid(sc->sc_dev) < 5) return; while (1) { tmp = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(tmp & 0x00000001)) break; tmp = BWN_READ_4(mac, BWN_XMITSTAT_1); } bwn_mac_enable(mac); BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); mac->mac_status = BWN_MAC_STATUS_STARTED; } static void bwn_core_exit(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t macctl; BWN_ASSERT_LOCKED(mac->mac_sc); KASSERT(mac->mac_status <= BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_status != BWN_MAC_STATUS_INITED) return; mac->mac_status = BWN_MAC_STATUS_UNINIT; macctl = BWN_READ_4(mac, BWN_MACCTL); macctl &= ~BWN_MACCTL_MCODE_RUN; macctl |= BWN_MACCTL_MCODE_JMP0; BWN_WRITE_4(mac, BWN_MACCTL, macctl); bwn_dma_stop(mac); bwn_pio_stop(mac); bwn_chip_exit(mac); mac->mac_phy.switch_analog(mac, 0); siba_dev_down(sc->sc_dev, 0); siba_powerdown(sc->sc_dev); } static void bwn_bt_disable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; (void)sc; /* XXX do nothing yet */ } static int bwn_chip_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; uint32_t macctl; int error; macctl = BWN_MACCTL_IHR_ON | BWN_MACCTL_SHM_ON | BWN_MACCTL_STA; if (phy->gmode) macctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, macctl); error = bwn_fw_fillinfo(mac); if (error) return (error); error = bwn_fw_loaducode(mac); if (error) return (error); error = bwn_gpio_init(mac); if (error) return (error); error = bwn_fw_loadinitvals(mac); if (error) { siba_gpio_set(sc->sc_dev, 0); return (error); } phy->switch_analog(mac, 1); error = bwn_phy_init(mac); if (error) { siba_gpio_set(sc->sc_dev, 0); return (error); } if (phy->set_im) phy->set_im(mac, BWN_IMMODE_NONE); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->type == BWN_PHYTYPE_B) BWN_WRITE_2(mac, 0x005e, BWN_READ_2(mac, 0x005e) | 0x0004); BWN_WRITE_4(mac, 0x0100, 0x01000000); if (siba_get_revid(sc->sc_dev) < 5) BWN_WRITE_4(mac, 0x010c, 0x01000000); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_STA); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_STA); bwn_shm_write_2(mac, BWN_SHARED, 0x0074, 0x0000); bwn_set_opmode(mac); if (siba_get_revid(sc->sc_dev) < 3) { BWN_WRITE_2(mac, 0x060e, 0x0000); BWN_WRITE_2(mac, 0x0610, 0x8000); BWN_WRITE_2(mac, 0x0604, 0x0000); BWN_WRITE_2(mac, 0x0606, 0x0200); } else { BWN_WRITE_4(mac, 0x0188, 0x80000000); BWN_WRITE_4(mac, 0x018c, 0x02000000); } BWN_WRITE_4(mac, BWN_INTR_REASON, 0x00004000); BWN_WRITE_4(mac, BWN_DMA0_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA1_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA2_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA3_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA4_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA5_INTR_MASK, 0x0000dc00); siba_write_4(sc->sc_dev, SIBA_TGSLOW, siba_read_4(sc->sc_dev, SIBA_TGSLOW) | 0x00100000); BWN_WRITE_2(mac, BWN_POWERUP_DELAY, siba_get_cc_powerdelay(sc->sc_dev)); return (error); } /* read hostflags */ static uint64_t bwn_hf_read(struct bwn_mac *mac) { uint64_t ret; ret = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFHI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFMI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO); return (ret); } static void bwn_hf_write(struct bwn_mac *mac, uint64_t value) { bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFLO, (value & 0x00000000ffffull)); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFMI, (value & 0x0000ffff0000ull) >> 16); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFHI, (value & 0xffff00000000ULL) >> 32); } static void bwn_set_txretry(struct bwn_mac *mac, int s, int l) { bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_SHORT_RETRY, MIN(s, 0xf)); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_LONG_RETRY, MIN(l, 0xf)); } static void bwn_rate_init(struct bwn_mac *mac) { switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: case BWN_PHYTYPE_G: case BWN_PHYTYPE_LP: case BWN_PHYTYPE_N: bwn_rate_write(mac, BWN_OFDM_RATE_6MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_12MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_18MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_24MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_36MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_48MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_54MB, 1); if (mac->mac_phy.type == BWN_PHYTYPE_A) break; /* FALLTHROUGH */ case BWN_PHYTYPE_B: bwn_rate_write(mac, BWN_CCK_RATE_1MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_2MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_5MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_11MB, 0); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } static void bwn_rate_write(struct bwn_mac *mac, uint16_t rate, int ofdm) { uint16_t offset; if (ofdm) { offset = 0x480; offset += (bwn_plcp_getofdm(rate) & 0x000f) * 2; } else { offset = 0x4c0; offset += (bwn_plcp_getcck(rate) & 0x000f) * 2; } bwn_shm_write_2(mac, BWN_SHARED, offset + 0x20, bwn_shm_read_2(mac, BWN_SHARED, offset)); } static uint8_t bwn_plcp_getcck(const uint8_t bitrate) { switch (bitrate) { case BWN_CCK_RATE_1MB: return (0x0a); case BWN_CCK_RATE_2MB: return (0x14); case BWN_CCK_RATE_5MB: return (0x37); case BWN_CCK_RATE_11MB: return (0x6e); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint8_t bwn_plcp_getofdm(const uint8_t bitrate) { switch (bitrate) { case BWN_OFDM_RATE_6MB: return (0xb); case BWN_OFDM_RATE_9MB: return (0xf); case BWN_OFDM_RATE_12MB: return (0xa); case BWN_OFDM_RATE_18MB: return (0xe); case BWN_OFDM_RATE_24MB: return (0x9); case BWN_OFDM_RATE_36MB: return (0xd); case BWN_OFDM_RATE_48MB: return (0x8); case BWN_OFDM_RATE_54MB: return (0xc); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_set_phytxctl(struct bwn_mac *mac) { uint16_t ctl; ctl = (BWN_TX_PHY_ENC_CCK | BWN_TX_PHY_ANT01AUTO | BWN_TX_PHY_TXPWR); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_BEACON_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, ctl); } static void bwn_pio_init(struct bwn_mac *mac) { struct bwn_pio *pio = &mac->mac_method.pio; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_BIGENDIAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RX_PADOFFSET, 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BK], 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BE], 1); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VI], 2); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VO], 3); bwn_pio_set_txqueue(mac, &pio->mcast, 4); bwn_pio_setupqueue_rx(mac, &pio->rx, 0); } static void bwn_pio_set_txqueue(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, int index) { struct bwn_pio_txpkt *tp; struct bwn_softc *sc = mac->mac_sc; unsigned int i; tq->tq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_TXQOFFSET(mac); tq->tq_index = index; tq->tq_free = BWN_PIO_MAX_TXPACKETS; if (siba_get_revid(sc->sc_dev) >= 8) tq->tq_size = 1920; else { tq->tq_size = bwn_pio_read_2(mac, tq, BWN_PIO_TXQBUFSIZE); tq->tq_size -= 80; } TAILQ_INIT(&tq->tq_pktlist); for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); tp->tp_index = i; tp->tp_queue = tq; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); } } static uint16_t bwn_pio_idx2base(struct bwn_mac *mac, int index) { struct bwn_softc *sc = mac->mac_sc; static const uint16_t bases[] = { BWN_PIO_BASE0, BWN_PIO_BASE1, BWN_PIO_BASE2, BWN_PIO_BASE3, BWN_PIO_BASE4, BWN_PIO_BASE5, BWN_PIO_BASE6, BWN_PIO_BASE7, }; static const uint16_t bases_rev11[] = { BWN_PIO11_BASE0, BWN_PIO11_BASE1, BWN_PIO11_BASE2, BWN_PIO11_BASE3, BWN_PIO11_BASE4, BWN_PIO11_BASE5, }; if (siba_get_revid(sc->sc_dev) >= 11) { if (index >= N(bases_rev11)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases_rev11[index]); } if (index >= N(bases)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases[index]); } static void bwn_pio_setupqueue_rx(struct bwn_mac *mac, struct bwn_pio_rxqueue *prq, int index) { struct bwn_softc *sc = mac->mac_sc; prq->prq_mac = mac; prq->prq_rev = siba_get_revid(sc->sc_dev); prq->prq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_RXQOFFSET(mac); bwn_dma_rxdirectfifo(mac, index, 1); } static void bwn_destroy_pioqueue_tx(struct bwn_pio_txqueue *tq) { if (tq == NULL) return; bwn_pio_cancel_tx_packets(tq); } static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *pio) { bwn_destroy_pioqueue_tx(pio); } static uint16_t bwn_pio_read_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset) { return (BWN_READ_2(mac, tq->tq_base + offset)); } static void bwn_dma_rxdirectfifo(struct bwn_mac *mac, int idx, uint8_t enable) { uint32_t ctl; int type; uint16_t base; type = bwn_dma_mask2type(bwn_dma_mask(mac)); base = bwn_dma_base(type, idx); if (type == BWN_DMA_64BIT) { ctl = BWN_READ_4(mac, base + BWN_DMA64_RXCTL); ctl &= ~BWN_DMA64_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA64_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA64_RXCTL, ctl); } else { ctl = BWN_READ_4(mac, base + BWN_DMA32_RXCTL); ctl &= ~BWN_DMA32_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA32_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA32_RXCTL, ctl); } } static uint64_t bwn_dma_mask(struct bwn_mac *mac) { uint32_t tmp; uint16_t base; tmp = BWN_READ_4(mac, SIBA_TGSHIGH); if (tmp & SIBA_TGSHIGH_DMA64) return (BWN_DMA_BIT_MASK(64)); base = bwn_dma_base(0, 0); BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK); tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL); if (tmp & BWN_DMA32_TXADDREXT_MASK) return (BWN_DMA_BIT_MASK(32)); return (BWN_DMA_BIT_MASK(30)); } static int bwn_dma_mask2type(uint64_t dmamask) { if (dmamask == BWN_DMA_BIT_MASK(30)) return (BWN_DMA_30BIT); if (dmamask == BWN_DMA_BIT_MASK(32)) return (BWN_DMA_32BIT); if (dmamask == BWN_DMA_BIT_MASK(64)) return (BWN_DMA_64BIT); KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (BWN_DMA_30BIT); } static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *tq) { struct bwn_pio_txpkt *tp; unsigned int i; for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); if (tp->tp_m) { m_freem(tp->tp_m); tp->tp_m = NULL; } } } static uint16_t bwn_dma_base(int type, int controller_idx) { static const uint16_t map64[] = { BWN_DMA64_BASE0, BWN_DMA64_BASE1, BWN_DMA64_BASE2, BWN_DMA64_BASE3, BWN_DMA64_BASE4, BWN_DMA64_BASE5, }; static const uint16_t map32[] = { BWN_DMA32_BASE0, BWN_DMA32_BASE1, BWN_DMA32_BASE2, BWN_DMA32_BASE3, BWN_DMA32_BASE4, BWN_DMA32_BASE5, }; if (type == BWN_DMA_64BIT) { KASSERT(controller_idx >= 0 && controller_idx < N(map64), ("%s:%d: fail", __func__, __LINE__)); return (map64[controller_idx]); } KASSERT(controller_idx >= 0 && controller_idx < N(map32), ("%s:%d: fail", __func__, __LINE__)); return (map32[controller_idx]); } static void bwn_dma_init(struct bwn_mac *mac) { struct bwn_dma *dma = &mac->mac_method.dma; /* setup TX DMA channels. */ bwn_dma_setup(dma->wme[WME_AC_BK]); bwn_dma_setup(dma->wme[WME_AC_BE]); bwn_dma_setup(dma->wme[WME_AC_VI]); bwn_dma_setup(dma->wme[WME_AC_VO]); bwn_dma_setup(dma->mcast); /* setup RX DMA channel. */ bwn_dma_setup(dma->rx); } static struct bwn_dma_ring * bwn_dma_ringsetup(struct bwn_mac *mac, int controller_index, int for_tx, int type) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; int error, i; dr = malloc(sizeof(*dr), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr == NULL) goto out; dr->dr_numslots = BWN_RXRING_SLOTS; if (for_tx) dr->dr_numslots = BWN_TXRING_SLOTS; dr->dr_meta = malloc(dr->dr_numslots * sizeof(struct bwn_dmadesc_meta), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr->dr_meta == NULL) goto fail0; dr->dr_type = type; dr->dr_mac = mac; dr->dr_base = bwn_dma_base(type, controller_index); dr->dr_index = controller_index; if (type == BWN_DMA_64BIT) { dr->getdesc = bwn_dma_64_getdesc; dr->setdesc = bwn_dma_64_setdesc; dr->start_transfer = bwn_dma_64_start_transfer; dr->suspend = bwn_dma_64_suspend; dr->resume = bwn_dma_64_resume; dr->get_curslot = bwn_dma_64_get_curslot; dr->set_curslot = bwn_dma_64_set_curslot; } else { dr->getdesc = bwn_dma_32_getdesc; dr->setdesc = bwn_dma_32_setdesc; dr->start_transfer = bwn_dma_32_start_transfer; dr->suspend = bwn_dma_32_suspend; dr->resume = bwn_dma_32_resume; dr->get_curslot = bwn_dma_32_get_curslot; dr->set_curslot = bwn_dma_32_set_curslot; } if (for_tx) { dr->dr_tx = 1; dr->dr_curslot = -1; } else { if (dr->dr_index == 0) { dr->dr_rx_bufsize = BWN_DMA0_RX_BUFFERSIZE; dr->dr_frameoffset = BWN_DMA0_RX_FRAMEOFFSET; } else KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } error = bwn_dma_allocringmemory(dr); if (error) goto fail2; if (for_tx) { /* * Assumption: BWN_TXRING_SLOTS can be divided by * BWN_TX_SLOTS_PER_FRAME */ KASSERT(BWN_TXRING_SLOTS % BWN_TX_SLOTS_PER_FRAME == 0, ("%s:%d: fail", __func__, __LINE__)); dr->dr_txhdr_cache = malloc((dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac), M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT(dr->dr_txhdr_cache != NULL, ("%s:%d: fail", __func__, __LINE__)); /* * Create TX ring DMA stuffs */ error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_HDRSIZE(mac), 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_txring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); goto fail1; } for (i = 0; i < dr->dr_numslots; i += 2) { dr->getdesc(dr, i, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_HEADER; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 0; error = bus_dmamap_create(dr->dr_txring_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail1; } dr->getdesc(dr, i + 1, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_BODY; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 1; error = bus_dmamap_create(dma->txbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail1; } } } else { error = bus_dmamap_create(dma->rxbuf_dtag, 0, &dr->dr_spare_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &mt); error = bus_dmamap_create(dma->rxbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } error = bwn_dma_newbuf(dr, desc, mt, 1); if (error) { device_printf(sc->sc_dev, "failed to allocate RX buf\n"); goto out; /* XXX wrong! */ } } bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->dr_usedslot = dr->dr_numslots; } out: return (dr); fail2: free(dr->dr_txhdr_cache, M_DEVBUF); fail1: free(dr->dr_meta, M_DEVBUF); fail0: free(dr, M_DEVBUF); return (NULL); } static void bwn_dma_ringfree(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_free_descbufs(*dr); bwn_dma_free_ringmemory(*dr); free((*dr)->dr_txhdr_cache, M_DEVBUF); free((*dr)->dr_meta, M_DEVBUF); free(*dr, M_DEVBUF); *dr = NULL; } static void bwn_dma_32_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc32 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_32_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc32 *descbase = dr->dr_ring_descbase; struct bwn_softc *sc = dr->dr_mac->mac_sc; uint32_t addr, addrext, ctl; int slot; slot = (int)(&(desc->dma.dma32) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addr = (uint32_t) (dmaaddr & ~SIBA_DMA_TRANSLATION_MASK); addrext = (uint32_t) (dmaaddr & SIBA_DMA_TRANSLATION_MASK) >> 30; addr |= siba_dma_translation(sc->sc_dev); ctl = bufsize & BWN_DMA32_DCTL_BYTECNT; if (slot == dr->dr_numslots - 1) ctl |= BWN_DMA32_DCTL_DTABLEEND; if (start) ctl |= BWN_DMA32_DCTL_FRAMESTART; if (end) ctl |= BWN_DMA32_DCTL_FRAMEEND; if (irq) ctl |= BWN_DMA32_DCTL_IRQ; ctl |= (addrext << BWN_DMA32_DCTL_ADDREXT_SHIFT) & BWN_DMA32_DCTL_ADDREXT_MASK; desc->dma.dma32.control = htole32(ctl); desc->dma.dma32.address = htole32(addr); } static void bwn_dma_32_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_32_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) | BWN_DMA32_TXSUSPEND); } static void bwn_dma_32_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) & ~BWN_DMA32_TXSUSPEND); } static int bwn_dma_32_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA32_RXSTATUS); val &= BWN_DMA32_RXDPTR; return (val / sizeof(struct bwn_dmadesc32)); } static void bwn_dma_32_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, (uint32_t) (slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_64_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc64 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_64_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc64 *descbase = dr->dr_ring_descbase; struct bwn_softc *sc = dr->dr_mac->mac_sc; int slot; uint32_t ctl0 = 0, ctl1 = 0; uint32_t addrlo, addrhi; uint32_t addrext; slot = (int)(&(desc->dma.dma64) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addrlo = (uint32_t) (dmaaddr & 0xffffffff); addrhi = (((uint64_t) dmaaddr >> 32) & ~SIBA_DMA_TRANSLATION_MASK); addrext = (((uint64_t) dmaaddr >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; addrhi |= (siba_dma_translation(sc->sc_dev) << 1); if (slot == dr->dr_numslots - 1) ctl0 |= BWN_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= BWN_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= BWN_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= BWN_DMA64_DCTL0_IRQ; ctl1 |= bufsize & BWN_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << BWN_DMA64_DCTL1_ADDREXT_SHIFT) & BWN_DMA64_DCTL1_ADDREXT_MASK; desc->dma.dma64.control0 = htole32(ctl0); desc->dma.dma64.control1 = htole32(ctl1); desc->dma.dma64.address_low = htole32(addrlo); desc->dma.dma64.address_high = htole32(addrhi); } static void bwn_dma_64_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static void bwn_dma_64_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) | BWN_DMA64_TXSUSPEND); } static void bwn_dma_64_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) & ~BWN_DMA64_TXSUSPEND); } static int bwn_dma_64_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA64_RXSTATUS); val &= BWN_DMA64_RXSTATDPTR; return (val / sizeof(struct bwn_dmadesc64)); } static void bwn_dma_64_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static int bwn_dma_allocringmemory(struct bwn_dma_ring *dr) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int error; error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_DMA_RINGMEMSIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_ring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); return (-1); } error = bus_dmamem_alloc(dr->dr_ring_dtag, &dr->dr_ring_descbase, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dr->dr_ring_dmap); if (error) { device_printf(sc->sc_dev, "can't allocate DMA mem: TODO frees\n"); return (-1); } error = bus_dmamap_load(dr->dr_ring_dtag, dr->dr_ring_dmap, dr->dr_ring_descbase, BWN_DMA_RINGMEMSIZE, bwn_dma_ring_addr, &dr->dr_ring_dmabase, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "can't load DMA mem: TODO free\n"); return (-1); } return (0); } static void bwn_dma_setup(struct bwn_dma_ring *dr) { struct bwn_softc *sc = dr->dr_mac->mac_sc; uint64_t ring64; uint32_t addrext, ring32, value; uint32_t trans = siba_dma_translation(sc->sc_dev); if (dr->dr_tx) { dr->dr_curslot = -1; if (dr->dr_type == BWN_DMA_64BIT) { ring64 = (uint64_t)(dr->dr_ring_dmabase); addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; value = BWN_DMA64_TXENABLE; value |= (addrext << BWN_DMA64_TXADDREXT_SHIFT) & BWN_DMA64_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, (ring64 & 0xffffffff)); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, ((ring64 >> 32) & ~SIBA_DMA_TRANSLATION_MASK) | (trans << 1)); } else { ring32 = (uint32_t)(dr->dr_ring_dmabase); addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30; value = BWN_DMA32_TXENABLE; value |= (addrext << BWN_DMA32_TXADDREXT_SHIFT) & BWN_DMA32_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, (ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans); } return; } /* * set for RX */ dr->dr_usedslot = dr->dr_numslots; if (dr->dr_type == BWN_DMA_64BIT) { ring64 = (uint64_t)(dr->dr_ring_dmabase); addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; value = (dr->dr_frameoffset << BWN_DMA64_RXFROFF_SHIFT); value |= BWN_DMA64_RXENABLE; value |= (addrext << BWN_DMA64_RXADDREXT_SHIFT) & BWN_DMA64_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, (ring64 & 0xffffffff)); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, ((ring64 >> 32) & ~SIBA_DMA_TRANSLATION_MASK) | (trans << 1)); BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc64)); } else { ring32 = (uint32_t)(dr->dr_ring_dmabase); addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30; value = (dr->dr_frameoffset << BWN_DMA32_RXFROFF_SHIFT); value |= BWN_DMA32_RXENABLE; value |= (addrext << BWN_DMA32_RXADDREXT_SHIFT) & BWN_DMA32_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, (ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans); BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc32)); } } static void bwn_dma_free_ringmemory(struct bwn_dma_ring *dr) { bus_dmamap_unload(dr->dr_ring_dtag, dr->dr_ring_dmap); bus_dmamem_free(dr->dr_ring_dtag, dr->dr_ring_descbase, dr->dr_ring_dmap); } static void bwn_dma_cleanup(struct bwn_dma_ring *dr) { if (dr->dr_tx) { bwn_dma_tx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BWN_DMA_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, 0); } else { bwn_dma_rx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BWN_DMA_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, 0); } } static void bwn_dma_free_descbufs(struct bwn_dma_ring *dr) { struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int i; if (!dr->dr_usedslot) return; for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &meta); if (meta->mt_m == NULL) { if (!dr->dr_tx) device_printf(sc->sc_dev, "%s: not TX?\n", __func__); continue; } if (dr->dr_tx) { if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); } else bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); bwn_dma_free_descbuf(dr, meta); } } static int bwn_dma_tx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED || value == BWN_DMA64_TXSTAT_IDLEWAIT || value == BWN_DMA64_TXSTAT_STOPPED) break; } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED || value == BWN_DMA32_TXSTAT_IDLEWAIT || value == BWN_DMA32_TXSTAT_STOPPED) break; } DELAY(1000); } offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXCTL : BWN_DMA32_TXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } DELAY(1000); return (0); } static int bwn_dma_rx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXCTL : BWN_DMA32_RXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXSTATUS : BWN_DMA32_RXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_RXSTAT; if (value == BWN_DMA64_RXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_RXSTATE; if (value == BWN_DMA32_RXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } return (0); } static void bwn_dma_free_descbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_meta *meta) { if (meta->mt_m != NULL) { m_freem(meta->mt_m); meta->mt_m = NULL; } if (meta->mt_ni != NULL) { ieee80211_free_node(meta->mt_ni); meta->mt_ni = NULL; } } static void bwn_dma_set_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { struct bwn_rxhdr4 *rxhdr; unsigned char *frame; rxhdr = mtod(m, struct bwn_rxhdr4 *); rxhdr->frame_len = 0; KASSERT(dr->dr_rx_bufsize >= dr->dr_frameoffset + sizeof(struct bwn_plcp6) + 2, ("%s:%d: fail", __func__, __LINE__)); frame = mtod(m, char *) + dr->dr_frameoffset; memset(frame, 0xff, sizeof(struct bwn_plcp6) + 2 /* padding */); } static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { unsigned char *f = mtod(m, char *) + dr->dr_frameoffset; return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xff); } static void bwn_wme_init(struct bwn_mac *mac) { bwn_wme_load(mac); /* enable WME support. */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_EDCF); BWN_WRITE_2(mac, BWN_IFSCTL, BWN_READ_2(mac, BWN_IFSCTL) | BWN_IFSCTL_USE_EDCF); } static void bwn_spu_setdelay(struct bwn_mac *mac, int idle) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t delay; /* microsec */ delay = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 3700 : 1050; if (ic->ic_opmode == IEEE80211_M_IBSS || idle) delay = 500; if ((mac->mac_phy.rf_ver == 0x2050) && (mac->mac_phy.rf_rev == 8)) delay = max(delay, (uint16_t)2400); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SPU_WAKEUP, delay); } static void bwn_bt_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; if (bwn_bluetooth == 0) return; if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCOEXIST) == 0) return; if (mac->mac_phy.type != BWN_PHYTYPE_B && !mac->mac_phy.gmode) return; hf = bwn_hf_read(mac); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCMOD) hf |= BWN_HF_BT_COEXISTALT; else hf |= BWN_HF_BT_COEXIST; bwn_hf_write(mac, hf); } static void bwn_set_macaddr(struct bwn_mac *mac) { bwn_mac_write_bssid(mac); bwn_mac_setfilter(mac, BWN_MACFILTER_SELF, mac->mac_sc->sc_ic.ic_macaddr); } static void bwn_clear_keys(struct bwn_mac *mac) { int i; for (i = 0; i < mac->mac_max_nr_keys; i++) { KASSERT(i >= 0 && i < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); bwn_key_dowrite(mac, i, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); if ((i <= 3) && !BWN_SEC_NEWAPI(mac)) { bwn_key_dowrite(mac, i + 4, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); } mac->mac_key[i].keyconf = NULL; } } static void bwn_crypt_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; mac->mac_max_nr_keys = (siba_get_revid(sc->sc_dev) >= 5) ? 58 : 20; KASSERT(mac->mac_max_nr_keys <= N(mac->mac_key), ("%s:%d: fail", __func__, __LINE__)); mac->mac_ktp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_KEY_TABLEP); mac->mac_ktp *= 2; if (siba_get_revid(sc->sc_dev) >= 5) BWN_WRITE_2(mac, BWN_RCMTA_COUNT, mac->mac_max_nr_keys - 8); bwn_clear_keys(mac); } static void bwn_chip_exit(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; bwn_phy_exit(mac); siba_gpio_set(sc->sc_dev, 0); } static int bwn_fw_fillinfo(struct bwn_mac *mac) { int error; error = bwn_fw_gets(mac, BWN_FWTYPE_DEFAULT); if (error == 0) return (0); error = bwn_fw_gets(mac, BWN_FWTYPE_OPENSOURCE); if (error == 0) return (0); return (error); } static int bwn_gpio_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t mask = 0x1f, set = 0xf, value; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GPOUT_MASK); BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | 0x000f); if (siba_get_chipid(sc->sc_dev) == 0x4301) { mask |= 0x0060; set |= 0x0060; } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) { BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | 0x0200); mask |= 0x0200; set |= 0x0200; } if (siba_get_revid(sc->sc_dev) >= 2) mask |= 0x0010; value = siba_gpio_get(sc->sc_dev); if (value == -1) return (0); siba_gpio_set(sc->sc_dev, (value & mask) | set); return (0); } static int bwn_fw_loadinitvals(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const struct bwn_fwinitvals *)((const char *)fwp.fw->data + offset)) const size_t hdr_len = sizeof(struct bwn_fwhdr); const struct bwn_fwhdr *hdr; struct bwn_fw *fw = &mac->mac_fw; int error; hdr = (const struct bwn_fwhdr *)(fw->initvals.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals, hdr_len), be32toh(hdr->size), fw->initvals.fw->datasize - hdr_len); if (error) return (error); if (fw->initvals_band.fw) { hdr = (const struct bwn_fwhdr *)(fw->initvals_band.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals_band, hdr_len), be32toh(hdr->size), fw->initvals_band.fw->datasize - hdr_len); } return (error); #undef GETFWOFFSET } static int bwn_phy_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error; mac->mac_phy.chan = mac->mac_phy.get_default_chan(mac); mac->mac_phy.rf_onoff(mac, 1); error = mac->mac_phy.init(mac); if (error) { device_printf(sc->sc_dev, "PHY init failed\n"); goto fail0; } error = bwn_switch_channel(mac, mac->mac_phy.get_default_chan(mac)); if (error) { device_printf(sc->sc_dev, "failed to switch default channel\n"); goto fail1; } return (0); fail1: if (mac->mac_phy.exit) mac->mac_phy.exit(mac); fail0: mac->mac_phy.rf_onoff(mac, 0); return (error); } static void bwn_set_txantenna(struct bwn_mac *mac, int antenna) { uint16_t ant; uint16_t tmp; ant = bwn_ant2phy(antenna); /* For ACK/CTS */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, tmp); /* For Probe Resposes */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, tmp); } static void bwn_set_opmode(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t ctl; uint16_t cfp_pretbtt; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl &= ~(BWN_MACCTL_HOSTAP | BWN_MACCTL_PASS_CTL | BWN_MACCTL_PASS_BADPLCP | BWN_MACCTL_PASS_BADFCS | BWN_MACCTL_PROMISC | BWN_MACCTL_BEACON_PROMISC); ctl |= BWN_MACCTL_STA; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) ctl |= BWN_MACCTL_HOSTAP; else if (ic->ic_opmode == IEEE80211_M_IBSS) ctl &= ~BWN_MACCTL_STA; ctl |= sc->sc_filters; if (siba_get_revid(sc->sc_dev) <= 4) ctl |= BWN_MACCTL_PROMISC; BWN_WRITE_4(mac, BWN_MACCTL, ctl); cfp_pretbtt = 2; if ((ctl & BWN_MACCTL_STA) && !(ctl & BWN_MACCTL_HOSTAP)) { if (siba_get_chipid(sc->sc_dev) == 0x4306 && siba_get_chiprev(sc->sc_dev) == 3) cfp_pretbtt = 100; else cfp_pretbtt = 50; } BWN_WRITE_2(mac, 0x612, cfp_pretbtt); } static int bwn_dma_gettype(struct bwn_mac *mac) { uint32_t tmp; uint16_t base; tmp = BWN_READ_4(mac, SIBA_TGSHIGH); if (tmp & SIBA_TGSHIGH_DMA64) return (BWN_DMA_64BIT); base = bwn_dma_base(0, 0); BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK); tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL); if (tmp & BWN_DMA32_TXADDREXT_MASK) return (BWN_DMA_32BIT); return (BWN_DMA_30BIT); } static void bwn_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } static void bwn_phy_g_init_sub(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; uint16_t i, tmp; if (phy->rev == 1) bwn_phy_init_b5(mac); else bwn_phy_init_b6(mac); if (phy->rev >= 2 || phy->gmode) bwn_phy_init_a(mac); if (phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, 0); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, 0); } if (phy->rev == 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xc0); } if (phy->rev > 5) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0x400); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xc0); } if (phy->gmode || phy->rev >= 2) { tmp = BWN_PHY_READ(mac, BWN_PHY_VERSION_OFDM); tmp &= BWN_PHYVER_VERSION; if (tmp == 3 || tmp == 5) { BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc2), 0x1816); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc3), 0x8006); } if (tmp == 5) { BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xcc), 0x00ff, 0x1f00); } } if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2) BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x7e), 0x78); if (phy->rf_rev == 8) { BWN_PHY_SET(mac, BWN_PHY_EXTG(0x01), 0x80); BWN_PHY_SET(mac, BWN_PHY_OFDM(0x3e), 0x4); } if (BWN_HAS_LOOPBACK(phy)) bwn_loopback_calcgain(mac); if (phy->rf_rev != 8) { if (pg->pg_initval == 0xffff) pg->pg_initval = bwn_rf_init_bcm2050(mac); else BWN_RF_WRITE(mac, 0x0078, pg->pg_initval); } bwn_lo_g_init(mac); if (BWN_HAS_TXMAG(phy)) { BWN_RF_WRITE(mac, 0x52, (BWN_RF_READ(mac, 0x52) & 0xff00) | pg->pg_loctl.tx_bias | pg->pg_loctl.tx_magn); } else { BWN_RF_SETMASK(mac, 0x52, 0xfff0, pg->pg_loctl.tx_bias); } if (phy->rev >= 6) { BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x36), 0x0fff, (pg->pg_loctl.tx_bias << 12)); } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x8075); else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x807f); if (phy->rev < 2) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x101); else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x202); if (phy->gmode || phy->rev >= 2) { bwn_lo_g_adjust(mac); BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8078); } if (!(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_RSSI)) { for (i = 0; i < 64; i++) { BWN_PHY_WRITE(mac, BWN_PHY_NRSSI_CTRL, i); BWN_PHY_WRITE(mac, BWN_PHY_NRSSI_DATA, (uint16_t)MIN(MAX(bwn_nrssi_read(mac, i) - 0xffff, -32), 31)); } bwn_nrssi_threshold(mac); } else if (phy->gmode || phy->rev >= 2) { if (pg->pg_nrssi[0] == -1000) { KASSERT(pg->pg_nrssi[1] == -1000, ("%s:%d: fail", __func__, __LINE__)); bwn_nrssi_slope_11g(mac); } else bwn_nrssi_threshold(mac); } if (phy->rf_rev == 8) BWN_PHY_WRITE(mac, BWN_PHY_EXTG(0x05), 0x3230); bwn_phy_hwpctl_init(mac); if ((siba_get_chipid(sc->sc_dev) == 0x4306 && siba_get_chippkg(sc->sc_dev) == 2) || 0) { BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0xbfff); BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xc3), 0x7fff); } } static uint8_t bwn_has_hwpctl(struct bwn_mac *mac) { if (mac->mac_phy.hwpctl == 0 || mac->mac_phy.use_hwpctl == NULL) return (0); return (mac->mac_phy.use_hwpctl(mac)); } static void bwn_phy_init_b5(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; uint16_t offset, value; uint8_t old_channel; if (phy->analog == 1) BWN_RF_SET(mac, 0x007a, 0x0050); if ((siba_get_pci_subvendor(sc->sc_dev) != SIBA_BOARDVENDOR_BCM) && (siba_get_pci_subdevice(sc->sc_dev) != SIBA_BOARD_BU4306)) { value = 0x2120; for (offset = 0x00a8; offset < 0x00c7; offset++) { BWN_PHY_WRITE(mac, offset, value); value += 0x202; } } BWN_PHY_SETMASK(mac, 0x0035, 0xf0ff, 0x0700); if (phy->rf_ver == 0x2050) BWN_PHY_WRITE(mac, 0x0038, 0x0667); if (phy->gmode || phy->rev >= 2) { if (phy->rf_ver == 0x2050) { BWN_RF_SET(mac, 0x007a, 0x0020); BWN_RF_SET(mac, 0x0051, 0x0004); } BWN_WRITE_2(mac, BWN_PHY_RADIO, 0x0000); BWN_PHY_SET(mac, 0x0802, 0x0100); BWN_PHY_SET(mac, 0x042b, 0x2000); BWN_PHY_WRITE(mac, 0x001c, 0x186a); BWN_PHY_SETMASK(mac, 0x0013, 0x00ff, 0x1900); BWN_PHY_SETMASK(mac, 0x0035, 0xffc0, 0x0064); BWN_PHY_SETMASK(mac, 0x005d, 0xff80, 0x000a); } if (mac->mac_flags & BWN_MAC_FLAG_BADFRAME_PREEMP) BWN_PHY_SET(mac, BWN_PHY_RADIO_BITFIELD, (1 << 11)); if (phy->analog == 1) { BWN_PHY_WRITE(mac, 0x0026, 0xce00); BWN_PHY_WRITE(mac, 0x0021, 0x3763); BWN_PHY_WRITE(mac, 0x0022, 0x1bc3); BWN_PHY_WRITE(mac, 0x0023, 0x06f9); BWN_PHY_WRITE(mac, 0x0024, 0x037e); } else BWN_PHY_WRITE(mac, 0x0026, 0xcc00); BWN_PHY_WRITE(mac, 0x0030, 0x00c6); BWN_WRITE_2(mac, 0x03ec, 0x3f22); if (phy->analog == 1) BWN_PHY_WRITE(mac, 0x0020, 0x3e1c); else BWN_PHY_WRITE(mac, 0x0020, 0x301c); if (phy->analog == 0) BWN_WRITE_2(mac, 0x03e4, 0x3000); old_channel = phy->chan; bwn_phy_g_switch_chan(mac, 7, 0); if (phy->rf_ver != 0x2050) { BWN_RF_WRITE(mac, 0x0075, 0x0080); BWN_RF_WRITE(mac, 0x0079, 0x0081); } BWN_RF_WRITE(mac, 0x0050, 0x0020); BWN_RF_WRITE(mac, 0x0050, 0x0023); if (phy->rf_ver == 0x2050) { BWN_RF_WRITE(mac, 0x0050, 0x0020); BWN_RF_WRITE(mac, 0x005a, 0x0070); } BWN_RF_WRITE(mac, 0x005b, 0x007b); BWN_RF_WRITE(mac, 0x005c, 0x00b0); BWN_RF_SET(mac, 0x007a, 0x0007); bwn_phy_g_switch_chan(mac, old_channel, 0); BWN_PHY_WRITE(mac, 0x0014, 0x0080); BWN_PHY_WRITE(mac, 0x0032, 0x00ca); BWN_PHY_WRITE(mac, 0x002a, 0x88a3); bwn_phy_g_set_txpwr_sub(mac, &pg->pg_bbatt, &pg->pg_rfatt, pg->pg_txctl); if (phy->rf_ver == 0x2050) BWN_RF_WRITE(mac, 0x005d, 0x000d); BWN_WRITE_2(mac, 0x03e4, (BWN_READ_2(mac, 0x03e4) & 0xffc0) | 0x0004); } static void bwn_loopback_calcgain(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; uint16_t backup_phy[16] = { 0 }; uint16_t backup_radio[3]; uint16_t backup_bband; uint16_t i, j, loop_i_max; uint16_t trsw_rx; uint16_t loop1_outer_done, loop1_inner_done; backup_phy[0] = BWN_PHY_READ(mac, BWN_PHY_CRS0); backup_phy[1] = BWN_PHY_READ(mac, BWN_PHY_CCKBBANDCFG); backup_phy[2] = BWN_PHY_READ(mac, BWN_PHY_RFOVER); backup_phy[3] = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL); if (phy->rev != 1) { backup_phy[4] = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVER); backup_phy[5] = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVERVAL); } backup_phy[6] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x5a)); backup_phy[7] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x59)); backup_phy[8] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x58)); backup_phy[9] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x0a)); backup_phy[10] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x03)); backup_phy[11] = BWN_PHY_READ(mac, BWN_PHY_LO_MASK); backup_phy[12] = BWN_PHY_READ(mac, BWN_PHY_LO_CTL); backup_phy[13] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x2b)); backup_phy[14] = BWN_PHY_READ(mac, BWN_PHY_PGACTL); backup_phy[15] = BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE); backup_bband = pg->pg_bbatt.att; backup_radio[0] = BWN_RF_READ(mac, 0x52); backup_radio[1] = BWN_RF_READ(mac, 0x43); backup_radio[2] = BWN_RF_READ(mac, 0x7a); BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0x3fff); BWN_PHY_SET(mac, BWN_PHY_CCKBBANDCFG, 0x8000); BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0002); BWN_PHY_MASK(mac, BWN_PHY_RFOVERVAL, 0xfffd); BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0001); BWN_PHY_MASK(mac, BWN_PHY_RFOVERVAL, 0xfffe); if (phy->rev != 1) { BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0001); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffe); BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0002); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffd); } BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x000c); BWN_PHY_SET(mac, BWN_PHY_RFOVERVAL, 0x000c); BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0030); BWN_PHY_SETMASK(mac, BWN_PHY_RFOVERVAL, 0xffcf, 0x10); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), 0x0780); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), 0xc810); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0x000d); BWN_PHY_SET(mac, BWN_PHY_CCK(0x0a), 0x2000); if (phy->rev != 1) { BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0004); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffb); } BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x03), 0xff9f, 0x40); if (phy->rf_rev == 8) BWN_RF_WRITE(mac, 0x43, 0x000f); else { BWN_RF_WRITE(mac, 0x52, 0); BWN_RF_SETMASK(mac, 0x43, 0xfff0, 0x9); } bwn_phy_g_set_bbatt(mac, 11); if (phy->rev >= 3) BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0xc020); else BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8020); BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, 0); BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x2b), 0xffc0, 0x01); BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x2b), 0xc0ff, 0x800); BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0100); BWN_PHY_MASK(mac, BWN_PHY_RFOVERVAL, 0xcfff); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA) { if (phy->rev >= 7) { BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0800); BWN_PHY_SET(mac, BWN_PHY_RFOVERVAL, 0x8000); } } BWN_RF_MASK(mac, 0x7a, 0x00f7); j = 0; loop_i_max = (phy->rf_rev == 8) ? 15 : 9; for (i = 0; i < loop_i_max; i++) { for (j = 0; j < 16; j++) { BWN_RF_WRITE(mac, 0x43, i); BWN_PHY_SETMASK(mac, BWN_PHY_RFOVERVAL, 0xf0ff, (j << 8)); BWN_PHY_SETMASK(mac, BWN_PHY_PGACTL, 0x0fff, 0xa000); BWN_PHY_SET(mac, BWN_PHY_PGACTL, 0xf000); DELAY(20); if (BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE) >= 0xdfc) goto done0; } } done0: loop1_outer_done = i; loop1_inner_done = j; if (j >= 8) { BWN_PHY_SET(mac, BWN_PHY_RFOVERVAL, 0x30); trsw_rx = 0x1b; for (j = j - 8; j < 16; j++) { BWN_PHY_SETMASK(mac, BWN_PHY_RFOVERVAL, 0xf0ff, j << 8); BWN_PHY_SETMASK(mac, BWN_PHY_PGACTL, 0x0fff, 0xa000); BWN_PHY_SET(mac, BWN_PHY_PGACTL, 0xf000); DELAY(20); trsw_rx -= 3; if (BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE) >= 0xdfc) goto done1; } } else trsw_rx = 0x18; done1: if (phy->rev != 1) { BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, backup_phy[4]); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, backup_phy[5]); } BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), backup_phy[6]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), backup_phy[7]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), backup_phy[8]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x0a), backup_phy[9]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x03), backup_phy[10]); BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, backup_phy[11]); BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, backup_phy[12]); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), backup_phy[13]); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, backup_phy[14]); bwn_phy_g_set_bbatt(mac, backup_bband); BWN_RF_WRITE(mac, 0x52, backup_radio[0]); BWN_RF_WRITE(mac, 0x43, backup_radio[1]); BWN_RF_WRITE(mac, 0x7a, backup_radio[2]); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, backup_phy[2] | 0x0003); DELAY(10); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, backup_phy[2]); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, backup_phy[3]); BWN_PHY_WRITE(mac, BWN_PHY_CRS0, backup_phy[0]); BWN_PHY_WRITE(mac, BWN_PHY_CCKBBANDCFG, backup_phy[1]); pg->pg_max_lb_gain = ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11; pg->pg_trsw_rx_gain = trsw_rx * 2; } static uint16_t bwn_rf_init_bcm2050(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; uint32_t tmp1 = 0, tmp2 = 0; uint16_t rcc, i, j, pgactl, cck0, cck1, cck2, cck3, rfover, rfoverval, analogover, analogoverval, crs0, classctl, lomask, loctl, syncctl, radio0, radio1, radio2, reg0, reg1, reg2, radio78, reg, index; static const uint8_t rcc_table[] = { 0x02, 0x03, 0x01, 0x0f, 0x06, 0x07, 0x05, 0x0f, 0x0a, 0x0b, 0x09, 0x0f, 0x0e, 0x0f, 0x0d, 0x0f, }; loctl = lomask = reg0 = classctl = crs0 = analogoverval = analogover = rfoverval = rfover = cck3 = 0; radio0 = BWN_RF_READ(mac, 0x43); radio1 = BWN_RF_READ(mac, 0x51); radio2 = BWN_RF_READ(mac, 0x52); pgactl = BWN_PHY_READ(mac, BWN_PHY_PGACTL); cck0 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x5a)); cck1 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x59)); cck2 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x58)); if (phy->type == BWN_PHYTYPE_B) { cck3 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x30)); reg0 = BWN_READ_2(mac, 0x3ec); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), 0xff); BWN_WRITE_2(mac, 0x3ec, 0x3f3f); } else if (phy->gmode || phy->rev >= 2) { rfover = BWN_PHY_READ(mac, BWN_PHY_RFOVER); rfoverval = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL); analogover = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVER); analogoverval = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVERVAL); crs0 = BWN_PHY_READ(mac, BWN_PHY_CRS0); classctl = BWN_PHY_READ(mac, BWN_PHY_CLASSCTL); BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0003); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffc); BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0x7fff); BWN_PHY_MASK(mac, BWN_PHY_CLASSCTL, 0xfffc); if (BWN_HAS_LOOPBACK(phy)) { lomask = BWN_PHY_READ(mac, BWN_PHY_LO_MASK); loctl = BWN_PHY_READ(mac, BWN_PHY_LO_CTL); if (phy->rev >= 3) BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0xc020); else BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8020); BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, 0); } BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(0, 1, 1))); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVER, 0)); } BWN_WRITE_2(mac, 0x3e2, BWN_READ_2(mac, 0x3e2) | 0x8000); syncctl = BWN_PHY_READ(mac, BWN_PHY_SYNCCTL); BWN_PHY_MASK(mac, BWN_PHY_SYNCCTL, 0xff7f); reg1 = BWN_READ_2(mac, 0x3e6); reg2 = BWN_READ_2(mac, 0x3f4); if (phy->analog == 0) BWN_WRITE_2(mac, 0x03e6, 0x0122); else { if (phy->analog >= 2) BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x03), 0xffbf, 0x40); BWN_WRITE_2(mac, BWN_CHANNEL_EXT, (BWN_READ_2(mac, BWN_CHANNEL_EXT) | 0x2000)); } reg = BWN_RF_READ(mac, 0x60); index = (reg & 0x001e) >> 1; rcc = (((rcc_table[index] << 1) | (reg & 0x0001)) | 0x0020); if (phy->type == BWN_PHYTYPE_B) BWN_RF_WRITE(mac, 0x78, 0x26); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(0, 1, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xbfaf); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), 0x1403); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(0, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xbfa0); BWN_RF_SET(mac, 0x51, 0x0004); if (phy->rf_rev == 8) BWN_RF_WRITE(mac, 0x43, 0x1f); else { BWN_RF_WRITE(mac, 0x52, 0); BWN_RF_SETMASK(mac, 0x43, 0xfff0, 0x0009); } BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0); for (i = 0; i < 16; i++) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), 0x0480); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), 0xc810); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0x000d); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0); DELAY(10); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xefb0); DELAY(10); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 0))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xfff0); DELAY(20); tmp1 += BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0); } DELAY(10); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0); tmp1++; tmp1 >>= 9; for (i = 0; i < 16; i++) { radio78 = (BWN_BITREV4(i) << 1) | 0x0020; BWN_RF_WRITE(mac, 0x78, radio78); DELAY(10); for (j = 0; j < 16; j++) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), 0x0d80); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), 0xc810); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0x000d); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0); DELAY(10); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xefb0); DELAY(10); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 0))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xfff0); DELAY(10); tmp2 += BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0); if (phy->gmode || phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1))); } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0); } tmp2++; tmp2 >>= 8; if (tmp1 < tmp2) break; } BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pgactl); BWN_RF_WRITE(mac, 0x51, radio1); BWN_RF_WRITE(mac, 0x52, radio2); BWN_RF_WRITE(mac, 0x43, radio0); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), cck0); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), cck1); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), cck2); BWN_WRITE_2(mac, 0x3e6, reg1); if (phy->analog != 0) BWN_WRITE_2(mac, 0x3f4, reg2); BWN_PHY_WRITE(mac, BWN_PHY_SYNCCTL, syncctl); bwn_spu_workaround(mac, phy->chan); if (phy->type == BWN_PHYTYPE_B) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), cck3); BWN_WRITE_2(mac, 0x3ec, reg0); } else if (phy->gmode) { BWN_WRITE_2(mac, BWN_PHY_RADIO, BWN_READ_2(mac, BWN_PHY_RADIO) & 0x7fff); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, rfover); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfoverval); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, analogover); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, analogoverval); BWN_PHY_WRITE(mac, BWN_PHY_CRS0, crs0); BWN_PHY_WRITE(mac, BWN_PHY_CLASSCTL, classctl); if (BWN_HAS_LOOPBACK(phy)) { BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, lomask); BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, loctl); } } return ((i > 15) ? radio78 : rcc); } static void bwn_phy_init_b6(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; uint16_t offset, val; uint8_t old_channel; KASSERT(!(phy->rf_rev == 6 || phy->rf_rev == 7), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, 0x003e, 0x817a); BWN_RF_WRITE(mac, 0x007a, BWN_RF_READ(mac, 0x007a) | 0x0058); if (phy->rf_rev == 4 || phy->rf_rev == 5) { BWN_RF_WRITE(mac, 0x51, 0x37); BWN_RF_WRITE(mac, 0x52, 0x70); BWN_RF_WRITE(mac, 0x53, 0xb3); BWN_RF_WRITE(mac, 0x54, 0x9b); BWN_RF_WRITE(mac, 0x5a, 0x88); BWN_RF_WRITE(mac, 0x5b, 0x88); BWN_RF_WRITE(mac, 0x5d, 0x88); BWN_RF_WRITE(mac, 0x5e, 0x88); BWN_RF_WRITE(mac, 0x7d, 0x88); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_TSSI_RESET_PSM_WORKAROUN); } if (phy->rf_rev == 8) { BWN_RF_WRITE(mac, 0x51, 0); BWN_RF_WRITE(mac, 0x52, 0x40); BWN_RF_WRITE(mac, 0x53, 0xb7); BWN_RF_WRITE(mac, 0x54, 0x98); BWN_RF_WRITE(mac, 0x5a, 0x88); BWN_RF_WRITE(mac, 0x5b, 0x6b); BWN_RF_WRITE(mac, 0x5c, 0x0f); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_ALTIQ) { BWN_RF_WRITE(mac, 0x5d, 0xfa); BWN_RF_WRITE(mac, 0x5e, 0xd8); } else { BWN_RF_WRITE(mac, 0x5d, 0xf5); BWN_RF_WRITE(mac, 0x5e, 0xb8); } BWN_RF_WRITE(mac, 0x0073, 0x0003); BWN_RF_WRITE(mac, 0x007d, 0x00a8); BWN_RF_WRITE(mac, 0x007c, 0x0001); BWN_RF_WRITE(mac, 0x007e, 0x0008); } for (val = 0x1e1f, offset = 0x0088; offset < 0x0098; offset++) { BWN_PHY_WRITE(mac, offset, val); val -= 0x0202; } for (val = 0x3e3f, offset = 0x0098; offset < 0x00a8; offset++) { BWN_PHY_WRITE(mac, offset, val); val -= 0x0202; } for (val = 0x2120, offset = 0x00a8; offset < 0x00c8; offset++) { BWN_PHY_WRITE(mac, offset, (val & 0x3f3f)); val += 0x0202; } if (phy->type == BWN_PHYTYPE_G) { BWN_RF_SET(mac, 0x007a, 0x0020); BWN_RF_SET(mac, 0x0051, 0x0004); BWN_PHY_SET(mac, 0x0802, 0x0100); BWN_PHY_SET(mac, 0x042b, 0x2000); BWN_PHY_WRITE(mac, 0x5b, 0); BWN_PHY_WRITE(mac, 0x5c, 0); } old_channel = phy->chan; bwn_phy_g_switch_chan(mac, (old_channel >= 8) ? 1 : 13, 0); BWN_RF_WRITE(mac, 0x0050, 0x0020); BWN_RF_WRITE(mac, 0x0050, 0x0023); DELAY(40); if (phy->rf_rev < 6 || phy->rf_rev == 8) { BWN_RF_WRITE(mac, 0x7c, BWN_RF_READ(mac, 0x7c) | 0x0002); BWN_RF_WRITE(mac, 0x50, 0x20); } if (phy->rf_rev <= 2) { BWN_RF_WRITE(mac, 0x7c, 0x20); BWN_RF_WRITE(mac, 0x5a, 0x70); BWN_RF_WRITE(mac, 0x5b, 0x7b); BWN_RF_WRITE(mac, 0x5c, 0xb0); } BWN_RF_SETMASK(mac, 0x007a, 0x00f8, 0x0007); bwn_phy_g_switch_chan(mac, old_channel, 0); BWN_PHY_WRITE(mac, 0x0014, 0x0200); if (phy->rf_rev >= 6) BWN_PHY_WRITE(mac, 0x2a, 0x88c2); else BWN_PHY_WRITE(mac, 0x2a, 0x8ac0); BWN_PHY_WRITE(mac, 0x0038, 0x0668); bwn_phy_g_set_txpwr_sub(mac, &pg->pg_bbatt, &pg->pg_rfatt, pg->pg_txctl); if (phy->rf_rev <= 5) BWN_PHY_SETMASK(mac, 0x5d, 0xff80, 0x0003); if (phy->rf_rev <= 2) BWN_RF_WRITE(mac, 0x005d, 0x000d); if (phy->analog == 4) { BWN_WRITE_2(mac, 0x3e4, 9); BWN_PHY_MASK(mac, 0x61, 0x0fff); } else BWN_PHY_SETMASK(mac, 0x0002, 0xffc0, 0x0004); if (phy->type == BWN_PHYTYPE_B) KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); else if (phy->type == BWN_PHYTYPE_G) BWN_WRITE_2(mac, 0x03e6, 0x0); } static void bwn_phy_init_a(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; KASSERT(phy->type == BWN_PHYTYPE_A || phy->type == BWN_PHYTYPE_G, ("%s:%d: fail", __func__, __LINE__)); if (phy->rev >= 6) { if (phy->type == BWN_PHYTYPE_A) BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x1b), ~0x1000); if (BWN_PHY_READ(mac, BWN_PHY_ENCORE) & BWN_PHY_ENCORE_EN) BWN_PHY_SET(mac, BWN_PHY_ENCORE, 0x0010); else BWN_PHY_MASK(mac, BWN_PHY_ENCORE, ~0x1010); } bwn_wa_init(mac); if (phy->type == BWN_PHYTYPE_G && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL)) BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x6e), 0xe000, 0x3cf); } static void bwn_wa_write_noisescale(struct bwn_mac *mac, const uint16_t *nst) { int i; for (i = 0; i < BWN_TAB_NOISESCALE_SIZE; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_NOISESCALE, i, nst[i]); } static void bwn_wa_agc(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; if (phy->rev == 1) { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 0, 254); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 1, 13); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 2, 19); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 3, 25); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 0, 0x2710); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 1, 0x9b83); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 2, 0x9b83); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 3, 0x0f8d); BWN_PHY_WRITE(mac, BWN_PHY_LMS, 4); } else { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 0, 254); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 1, 13); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 2, 19); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 3, 25); } BWN_PHY_SETMASK(mac, BWN_PHY_CCKSHIFTBITS_WA, (uint16_t)~0xff00, 0x5700); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x1a), ~0x007f, 0x000f); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x1a), ~0x3f80, 0x2b80); BWN_PHY_SETMASK(mac, BWN_PHY_ANTWRSETT, 0xf0ff, 0x0300); BWN_RF_SET(mac, 0x7a, 0x0008); BWN_PHY_SETMASK(mac, BWN_PHY_N1P1GAIN, ~0x000f, 0x0008); BWN_PHY_SETMASK(mac, BWN_PHY_P1P2GAIN, ~0x0f00, 0x0600); BWN_PHY_SETMASK(mac, BWN_PHY_N1N2GAIN, ~0x0f00, 0x0700); BWN_PHY_SETMASK(mac, BWN_PHY_N1P1GAIN, ~0x0f00, 0x0100); if (phy->rev == 1) BWN_PHY_SETMASK(mac, BWN_PHY_N1N2GAIN, ~0x000f, 0x0007); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x88), ~0x00ff, 0x001c); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x88), ~0x3f00, 0x0200); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x96), ~0x00ff, 0x001c); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x89), ~0x00ff, 0x0020); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x89), ~0x3f00, 0x0200); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x82), ~0x00ff, 0x002e); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x96), (uint16_t)~0xff00, 0x1a00); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x81), ~0x00ff, 0x0028); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x81), (uint16_t)~0xff00, 0x2c00); if (phy->rev == 1) { BWN_PHY_WRITE(mac, BWN_PHY_PEAK_COUNT, 0x092b); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x1b), ~0x001e, 0x0002); } else { BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x1b), ~0x001e); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x1f), 0x287a); BWN_PHY_SETMASK(mac, BWN_PHY_LPFGAINCTL, ~0x000f, 0x0004); if (phy->rev >= 6) { BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x22), 0x287a); BWN_PHY_SETMASK(mac, BWN_PHY_LPFGAINCTL, (uint16_t)~0xf000, 0x3000); } } BWN_PHY_SETMASK(mac, BWN_PHY_DIVSRCHIDX, 0x8080, 0x7874); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x8e), 0x1c00); if (phy->rev == 1) { BWN_PHY_SETMASK(mac, BWN_PHY_DIVP1P2GAIN, ~0x0f00, 0x0600); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x8b), 0x005e); BWN_PHY_SETMASK(mac, BWN_PHY_ANTWRSETT, ~0x00ff, 0x001e); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x8d), 0x0002); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 0, 0); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 1, 7); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 2, 16); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 3, 28); } else { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 0, 0); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 1, 7); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 2, 16); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 3, 28); } if (phy->rev >= 6) { BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x26), ~0x0003); BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x26), ~0x1000); } BWN_PHY_READ(mac, BWN_PHY_VERSION_OFDM); } static void bwn_wa_grev1(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; int i; static const uint16_t bwn_tab_finefreqg[] = BWN_TAB_FINEFREQ_G; static const uint32_t bwn_tab_retard[] = BWN_TAB_RETARD; static const uint32_t bwn_tab_rotor[] = BWN_TAB_ROTOR; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__)); /* init CRSTHRES and ANTDWELL */ if (phy->rev == 1) { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1_R1, 0x4f19); } else if (phy->rev == 2) { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x1861); BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0271); BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800); } else { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x0098); BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0070); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc9), 0x0080); BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800); } BWN_PHY_SETMASK(mac, BWN_PHY_CRS0, ~0x03c0, 0xd000); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x2c), 0x005a); BWN_PHY_WRITE(mac, BWN_PHY_CCKSHIFTBITS, 0x0026); /* XXX support PHY-A??? */ for (i = 0; i < N(bwn_tab_finefreqg); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DACRFPABB, i, bwn_tab_finefreqg[i]); /* XXX support PHY-A??? */ if (phy->rev == 1) for (i = 0; i < N(bwn_tab_noise_g1); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i, bwn_tab_noise_g1[i]); else for (i = 0; i < N(bwn_tab_noise_g2); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i, bwn_tab_noise_g2[i]); for (i = 0; i < N(bwn_tab_rotor); i++) bwn_ofdmtab_write_4(mac, BWN_OFDMTAB_ROTOR, i, bwn_tab_rotor[i]); /* XXX support PHY-A??? */ if (phy->rev >= 6) { if (BWN_PHY_READ(mac, BWN_PHY_ENCORE) & BWN_PHY_ENCORE_EN) bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g3); else bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g2); } else bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g1); for (i = 0; i < N(bwn_tab_retard); i++) bwn_ofdmtab_write_4(mac, BWN_OFDMTAB_ADVRETARD, i, bwn_tab_retard[i]); if (phy->rev == 1) { for (i = 0; i < 16; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI_R1, i, 0x0020); } else { for (i = 0; i < 32; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI, i, 0x0820); } bwn_wa_agc(mac); } static void bwn_wa_grev26789(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; int i; static const uint16_t bwn_tab_sigmasqr2[] = BWN_TAB_SIGMASQR2; uint16_t ofdmrev; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__)); bwn_gtab_write(mac, BWN_GTAB_ORIGTR, 0, 0xc480); /* init CRSTHRES and ANTDWELL */ if (phy->rev == 1) BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1_R1, 0x4f19); else if (phy->rev == 2) { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x1861); BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0271); BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800); } else { BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x0098); BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0070); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc9), 0x0080); BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800); } for (i = 0; i < 64; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_RSSI, i, i); /* XXX support PHY-A??? */ if (phy->rev == 1) for (i = 0; i < N(bwn_tab_noise_g1); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i, bwn_tab_noise_g1[i]); else for (i = 0; i < N(bwn_tab_noise_g2); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i, bwn_tab_noise_g2[i]); /* XXX support PHY-A??? */ if (phy->rev >= 6) { if (BWN_PHY_READ(mac, BWN_PHY_ENCORE) & BWN_PHY_ENCORE_EN) bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g3); else bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g2); } else bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g1); for (i = 0; i < N(bwn_tab_sigmasqr2); i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_MINSIGSQ, i, bwn_tab_sigmasqr2[i]); if (phy->rev == 1) { for (i = 0; i < 16; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI_R1, i, 0x0020); } else { for (i = 0; i < 32; i++) bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI, i, 0x0820); } bwn_wa_agc(mac); ofdmrev = BWN_PHY_READ(mac, BWN_PHY_VERSION_OFDM) & BWN_PHYVER_VERSION; if (ofdmrev > 2) { if (phy->type == BWN_PHYTYPE_A) BWN_PHY_WRITE(mac, BWN_PHY_PWRDOWN, 0x1808); else BWN_PHY_WRITE(mac, BWN_PHY_PWRDOWN, 0x1000); } else { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DAC, 3, 0x1044); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DAC, 4, 0x7201); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DAC, 6, 0x0040); } bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_0F, 2, 15); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_0F, 3, 20); } static void bwn_wa_init(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__)); switch (phy->rev) { case 1: bwn_wa_grev1(mac); break; case 2: case 6: case 7: case 8: case 9: bwn_wa_grev26789(mac); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } if (siba_get_pci_subvendor(sc->sc_dev) != SIBA_BOARDVENDOR_BCM || siba_get_pci_subdevice(sc->sc_dev) != SIBA_BOARD_BU4306 || siba_get_pci_revid(sc->sc_dev) != 0x17) { if (phy->rev < 2) { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX_R1, 1, 0x0002); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX_R1, 2, 0x0001); } else { bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 1, 0x0002); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 2, 0x0001); if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA) && (phy->rev >= 7)) { BWN_PHY_MASK(mac, BWN_PHY_EXTG(0x11), 0xf7ff); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0020, 0x0001); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0021, 0x0001); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0022, 0x0001); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0023, 0x0000); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0000, 0x0000); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 0x0003, 0x0002); } } } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM) { BWN_PHY_WRITE(mac, BWN_PHY_GTABCTL, 0x3120); BWN_PHY_WRITE(mac, BWN_PHY_GTABDATA, 0xc480); } bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_11, 0, 0); bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_11, 1, 0); } static void bwn_ofdmtab_write_2(struct bwn_mac *mac, uint16_t table, uint16_t offset, uint16_t value) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t addr; addr = table + offset; if ((pg->pg_ofdmtab_dir != BWN_OFDMTAB_DIR_WRITE) || (addr - 1 != pg->pg_ofdmtab_addr)) { BWN_PHY_WRITE(mac, BWN_PHY_OTABLECTL, addr); pg->pg_ofdmtab_dir = BWN_OFDMTAB_DIR_WRITE; } pg->pg_ofdmtab_addr = addr; BWN_PHY_WRITE(mac, BWN_PHY_OTABLEI, value); } static void bwn_ofdmtab_write_4(struct bwn_mac *mac, uint16_t table, uint16_t offset, uint32_t value) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t addr; addr = table + offset; if ((pg->pg_ofdmtab_dir != BWN_OFDMTAB_DIR_WRITE) || (addr - 1 != pg->pg_ofdmtab_addr)) { BWN_PHY_WRITE(mac, BWN_PHY_OTABLECTL, addr); pg->pg_ofdmtab_dir = BWN_OFDMTAB_DIR_WRITE; } pg->pg_ofdmtab_addr = addr; BWN_PHY_WRITE(mac, BWN_PHY_OTABLEI, value); BWN_PHY_WRITE(mac, BWN_PHY_OTABLEQ, (value >> 16)); } static void bwn_gtab_write(struct bwn_mac *mac, uint16_t table, uint16_t offset, uint16_t value) { BWN_PHY_WRITE(mac, BWN_PHY_GTABCTL, table + offset); BWN_PHY_WRITE(mac, BWN_PHY_GTABDATA, value); } static void bwn_dummy_transmission(struct bwn_mac *mac, int ofdm, int paon) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; unsigned int i, max_loop; uint16_t value; uint32_t buffer[5] = { 0x00000000, 0x00d40000, 0x00000000, 0x01000000, 0x00000000 }; if (ofdm) { max_loop = 0x1e; buffer[0] = 0x000201cc; } else { max_loop = 0xfa; buffer[0] = 0x000b846e; } BWN_ASSERT_LOCKED(mac->mac_sc); for (i = 0; i < 5; i++) bwn_ram_write(mac, i * 4, buffer[i]); BWN_WRITE_2(mac, 0x0568, 0x0000); BWN_WRITE_2(mac, 0x07c0, (siba_get_revid(sc->sc_dev) < 11) ? 0x0000 : 0x0100); value = ((phy->type == BWN_PHYTYPE_A) ? 0x41 : 0x40); BWN_WRITE_2(mac, 0x050c, value); if (phy->type == BWN_PHYTYPE_LP) BWN_WRITE_2(mac, 0x0514, 0x1a02); BWN_WRITE_2(mac, 0x0508, 0x0000); BWN_WRITE_2(mac, 0x050a, 0x0000); BWN_WRITE_2(mac, 0x054c, 0x0000); BWN_WRITE_2(mac, 0x056a, 0x0014); BWN_WRITE_2(mac, 0x0568, 0x0826); BWN_WRITE_2(mac, 0x0500, 0x0000); if (phy->type == BWN_PHYTYPE_LP) BWN_WRITE_2(mac, 0x0502, 0x0050); else BWN_WRITE_2(mac, 0x0502, 0x0030); if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0017); for (i = 0x00; i < max_loop; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0080) break; DELAY(10); } for (i = 0x00; i < 0x0a; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0400) break; DELAY(10); } for (i = 0x00; i < 0x19; i++) { value = BWN_READ_2(mac, 0x0690); if (!(value & 0x0100)) break; DELAY(10); } if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0037); } static void bwn_ram_write(struct bwn_mac *mac, uint16_t offset, uint32_t val) { uint32_t macctl; KASSERT(offset % 4 == 0, ("%s:%d: fail", __func__, __LINE__)); macctl = BWN_READ_4(mac, BWN_MACCTL); if (macctl & BWN_MACCTL_BIGENDIAN) printf("TODO: need swap\n"); BWN_WRITE_4(mac, BWN_RAM_CONTROL, offset); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); BWN_WRITE_4(mac, BWN_RAM_DATA, val); } static void bwn_lo_write(struct bwn_mac *mac, struct bwn_loctl *ctl) { uint16_t value; KASSERT(mac->mac_phy.type == BWN_PHYTYPE_G, ("%s:%d: fail", __func__, __LINE__)); value = (uint8_t) (ctl->q); value |= ((uint8_t) (ctl->i)) << 8; BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, value); } static uint16_t bwn_lo_calcfeed(struct bwn_mac *mac, uint16_t lna, uint16_t pga, uint16_t trsw_rx) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint16_t rfover; uint16_t feedthrough; if (phy->gmode) { lna <<= BWN_PHY_RFOVERVAL_LNA_SHIFT; pga <<= BWN_PHY_RFOVERVAL_PGA_SHIFT; KASSERT((lna & ~BWN_PHY_RFOVERVAL_LNA) == 0, ("%s:%d: fail", __func__, __LINE__)); KASSERT((pga & ~BWN_PHY_RFOVERVAL_PGA) == 0, ("%s:%d: fail", __func__, __LINE__)); trsw_rx &= (BWN_PHY_RFOVERVAL_TRSWRX | BWN_PHY_RFOVERVAL_BW); rfover = BWN_PHY_RFOVERVAL_UNK | pga | lna | trsw_rx; if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA) && phy->rev > 6) rfover |= BWN_PHY_RFOVERVAL_EXTLNA; BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xe300); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfover); DELAY(10); rfover |= BWN_PHY_RFOVERVAL_BW_LBW; BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfover); DELAY(10); rfover |= BWN_PHY_RFOVERVAL_BW_LPF; BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfover); DELAY(10); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xf300); } else { pga |= BWN_PHY_PGACTL_UNKNOWN; BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pga); DELAY(10); pga |= BWN_PHY_PGACTL_LOWBANDW; BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pga); DELAY(10); pga |= BWN_PHY_PGACTL_LPF; BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pga); } DELAY(21); feedthrough = BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE); return (feedthrough); } static uint16_t bwn_lo_txctl_regtable(struct bwn_mac *mac, uint16_t *value, uint16_t *pad_mix_gain) { struct bwn_phy *phy = &mac->mac_phy; uint16_t reg, v, padmix; if (phy->type == BWN_PHYTYPE_B) { v = 0x30; if (phy->rf_rev <= 5) { reg = 0x43; padmix = 0; } else { reg = 0x52; padmix = 5; } } else { if (phy->rev >= 2 && phy->rf_rev == 8) { reg = 0x43; v = 0x10; padmix = 2; } else { reg = 0x52; v = 0x30; padmix = 5; } } if (value) *value = v; if (pad_mix_gain) *pad_mix_gain = padmix; return (reg); } static void bwn_lo_measure_txctl_values(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; uint16_t reg, mask; uint16_t trsw_rx, pga; uint16_t rf_pctl_reg; static const uint8_t tx_bias_values[] = { 0x09, 0x08, 0x0a, 0x01, 0x00, 0x02, 0x05, 0x04, 0x06, }; static const uint8_t tx_magn_values[] = { 0x70, 0x40, }; if (!BWN_HAS_LOOPBACK(phy)) { rf_pctl_reg = 6; trsw_rx = 2; pga = 0; } else { int lb_gain; trsw_rx = 0; lb_gain = pg->pg_max_lb_gain / 2; if (lb_gain > 10) { rf_pctl_reg = 0; pga = abs(10 - lb_gain) / 6; pga = MIN(MAX(pga, 0), 15); } else { int cmp_val; int tmp; pga = 0; cmp_val = 0x24; if ((phy->rev >= 2) && (phy->rf_ver == 0x2050) && (phy->rf_rev == 8)) cmp_val = 0x3c; tmp = lb_gain; if ((10 - lb_gain) < cmp_val) tmp = (10 - lb_gain); if (tmp < 0) tmp += 6; else tmp += 3; cmp_val /= 4; tmp /= 4; if (tmp >= cmp_val) rf_pctl_reg = cmp_val; else rf_pctl_reg = tmp; } } BWN_RF_SETMASK(mac, 0x43, 0xfff0, rf_pctl_reg); bwn_phy_g_set_bbatt(mac, 2); reg = bwn_lo_txctl_regtable(mac, &mask, NULL); mask = ~mask; BWN_RF_MASK(mac, reg, mask); if (BWN_HAS_TXMAG(phy)) { int i, j; int feedthrough; int min_feedth = 0xffff; uint8_t tx_magn, tx_bias; for (i = 0; i < N(tx_magn_values); i++) { tx_magn = tx_magn_values[i]; BWN_RF_SETMASK(mac, 0x52, 0xff0f, tx_magn); for (j = 0; j < N(tx_bias_values); j++) { tx_bias = tx_bias_values[j]; BWN_RF_SETMASK(mac, 0x52, 0xfff0, tx_bias); feedthrough = bwn_lo_calcfeed(mac, 0, pga, trsw_rx); if (feedthrough < min_feedth) { lo->tx_bias = tx_bias; lo->tx_magn = tx_magn; min_feedth = feedthrough; } if (lo->tx_bias == 0) break; } BWN_RF_WRITE(mac, 0x52, (BWN_RF_READ(mac, 0x52) & 0xff00) | lo->tx_bias | lo-> tx_magn); } } else { lo->tx_magn = 0; lo->tx_bias = 0; BWN_RF_MASK(mac, 0x52, 0xfff0); } BWN_GETTIME(lo->txctl_measured_time); } static void bwn_lo_get_powervector(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; int i; uint64_t tmp; uint64_t power_vector = 0; for (i = 0; i < 8; i += 2) { tmp = bwn_shm_read_2(mac, BWN_SHARED, 0x310 + i); power_vector |= (tmp << (i * 8)); bwn_shm_write_2(mac, BWN_SHARED, 0x310 + i, 0); } if (power_vector) lo->power_vector = power_vector; BWN_GETTIME(lo->pwr_vec_read_time); } static void bwn_lo_measure_gain_values(struct bwn_mac *mac, int16_t max_rx_gain, int use_trsw_rx) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; uint16_t tmp; if (max_rx_gain < 0) max_rx_gain = 0; if (BWN_HAS_LOOPBACK(phy)) { int trsw_rx = 0; int trsw_rx_gain; if (use_trsw_rx) { trsw_rx_gain = pg->pg_trsw_rx_gain / 2; if (max_rx_gain >= trsw_rx_gain) { trsw_rx_gain = max_rx_gain - trsw_rx_gain; trsw_rx = 0x20; } } else trsw_rx_gain = max_rx_gain; if (trsw_rx_gain < 9) { pg->pg_lna_lod_gain = 0; } else { pg->pg_lna_lod_gain = 1; trsw_rx_gain -= 8; } trsw_rx_gain = MIN(MAX(trsw_rx_gain, 0), 0x2d); pg->pg_pga_gain = trsw_rx_gain / 3; if (pg->pg_pga_gain >= 5) { pg->pg_pga_gain -= 5; pg->pg_lna_gain = 2; } else pg->pg_lna_gain = 0; } else { pg->pg_lna_gain = 0; pg->pg_trsw_rx_gain = 0x20; if (max_rx_gain >= 0x14) { pg->pg_lna_lod_gain = 1; pg->pg_pga_gain = 2; } else if (max_rx_gain >= 0x12) { pg->pg_lna_lod_gain = 1; pg->pg_pga_gain = 1; } else if (max_rx_gain >= 0xf) { pg->pg_lna_lod_gain = 1; pg->pg_pga_gain = 0; } else { pg->pg_lna_lod_gain = 0; pg->pg_pga_gain = 0; } } tmp = BWN_RF_READ(mac, 0x7a); if (pg->pg_lna_lod_gain == 0) tmp &= ~0x0008; else tmp |= 0x0008; BWN_RF_WRITE(mac, 0x7a, tmp); } static void bwn_lo_save(struct bwn_mac *mac, struct bwn_lo_g_value *sav) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; struct timespec ts; uint16_t tmp; if (bwn_has_hwpctl(mac)) { sav->phy_lomask = BWN_PHY_READ(mac, BWN_PHY_LO_MASK); sav->phy_extg = BWN_PHY_READ(mac, BWN_PHY_EXTG(0x01)); sav->phy_dacctl_hwpctl = BWN_PHY_READ(mac, BWN_PHY_DACCTL); sav->phy_cck4 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x14)); sav->phy_hpwr_tssictl = BWN_PHY_READ(mac, BWN_PHY_HPWR_TSSICTL); BWN_PHY_SET(mac, BWN_PHY_HPWR_TSSICTL, 0x100); BWN_PHY_SET(mac, BWN_PHY_EXTG(0x01), 0x40); BWN_PHY_SET(mac, BWN_PHY_DACCTL, 0x40); BWN_PHY_SET(mac, BWN_PHY_CCK(0x14), 0x200); } if (phy->type == BWN_PHYTYPE_B && phy->rf_ver == 0x2050 && phy->rf_rev < 6) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x16), 0x410); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x17), 0x820); } if (phy->rev >= 2) { sav->phy_analogover = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVER); sav->phy_analogoverval = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVERVAL); sav->phy_rfover = BWN_PHY_READ(mac, BWN_PHY_RFOVER); sav->phy_rfoverval = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL); sav->phy_classctl = BWN_PHY_READ(mac, BWN_PHY_CLASSCTL); sav->phy_cck3 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x3e)); sav->phy_crs0 = BWN_PHY_READ(mac, BWN_PHY_CRS0); BWN_PHY_MASK(mac, BWN_PHY_CLASSCTL, 0xfffc); BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0x7fff); BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0003); BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffc); if (phy->type == BWN_PHYTYPE_G) { if ((phy->rev >= 7) && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA)) { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0x933); } else { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0x133); } } else { BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0); } BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x3e), 0); } sav->reg0 = BWN_READ_2(mac, 0x3f4); sav->reg1 = BWN_READ_2(mac, 0x3e2); sav->rf0 = BWN_RF_READ(mac, 0x43); sav->rf1 = BWN_RF_READ(mac, 0x7a); sav->phy_pgactl = BWN_PHY_READ(mac, BWN_PHY_PGACTL); sav->phy_cck2 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x2a)); sav->phy_syncctl = BWN_PHY_READ(mac, BWN_PHY_SYNCCTL); sav->phy_dacctl = BWN_PHY_READ(mac, BWN_PHY_DACCTL); if (!BWN_HAS_TXMAG(phy)) { sav->rf2 = BWN_RF_READ(mac, 0x52); sav->rf2 &= 0x00f0; } if (phy->type == BWN_PHYTYPE_B) { sav->phy_cck0 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x30)); sav->phy_cck1 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x06)); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), 0x00ff); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x06), 0x3f3f); } else { BWN_WRITE_2(mac, 0x3e2, BWN_READ_2(mac, 0x3e2) | 0x8000); } BWN_WRITE_2(mac, 0x3f4, BWN_READ_2(mac, 0x3f4) & 0xf000); tmp = (phy->type == BWN_PHYTYPE_G) ? BWN_PHY_LO_MASK : BWN_PHY_CCK(0x2e); BWN_PHY_WRITE(mac, tmp, 0x007f); tmp = sav->phy_syncctl; BWN_PHY_WRITE(mac, BWN_PHY_SYNCCTL, tmp & 0xff7f); tmp = sav->rf1; BWN_RF_WRITE(mac, 0x007a, tmp & 0xfff0); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2a), 0x8a3); if (phy->type == BWN_PHYTYPE_G || (phy->type == BWN_PHYTYPE_B && phy->rf_ver == 0x2050 && phy->rf_rev >= 6)) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), 0x1003); } else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), 0x0802); if (phy->rev >= 2) bwn_dummy_transmission(mac, 0, 1); bwn_phy_g_switch_chan(mac, 6, 0); BWN_RF_READ(mac, 0x51); if (phy->type == BWN_PHYTYPE_G) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0); nanouptime(&ts); if (time_before(lo->txctl_measured_time, (ts.tv_nsec / 1000000 + ts.tv_sec * 1000) - BWN_LO_TXCTL_EXPIRE)) bwn_lo_measure_txctl_values(mac); if (phy->type == BWN_PHYTYPE_G && phy->rev >= 3) BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0xc078); else { if (phy->type == BWN_PHYTYPE_B) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x8078); else BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8078); } } static void bwn_lo_restore(struct bwn_mac *mac, struct bwn_lo_g_value *sav) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; uint16_t tmp; if (phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xe300); tmp = (pg->pg_pga_gain << 8); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, tmp | 0xa0); DELAY(5); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, tmp | 0xa2); DELAY(2); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, tmp | 0xa3); } else { tmp = (pg->pg_pga_gain | 0xefa0); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, tmp); } if (phy->type == BWN_PHYTYPE_G) { if (phy->rev >= 3) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0xc078); else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x8078); if (phy->rev >= 2) BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x0202); else BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x0101); } BWN_WRITE_2(mac, 0x3f4, sav->reg0); BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, sav->phy_pgactl); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2a), sav->phy_cck2); BWN_PHY_WRITE(mac, BWN_PHY_SYNCCTL, sav->phy_syncctl); BWN_PHY_WRITE(mac, BWN_PHY_DACCTL, sav->phy_dacctl); BWN_RF_WRITE(mac, 0x43, sav->rf0); BWN_RF_WRITE(mac, 0x7a, sav->rf1); if (!BWN_HAS_TXMAG(phy)) { tmp = sav->rf2; BWN_RF_SETMASK(mac, 0x52, 0xff0f, tmp); } BWN_WRITE_2(mac, 0x3e2, sav->reg1); if (phy->type == BWN_PHYTYPE_B && phy->rf_ver == 0x2050 && phy->rf_rev <= 5) { BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), sav->phy_cck0); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x06), sav->phy_cck1); } if (phy->rev >= 2) { BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, sav->phy_analogover); BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, sav->phy_analogoverval); BWN_PHY_WRITE(mac, BWN_PHY_CLASSCTL, sav->phy_classctl); BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, sav->phy_rfover); BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, sav->phy_rfoverval); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x3e), sav->phy_cck3); BWN_PHY_WRITE(mac, BWN_PHY_CRS0, sav->phy_crs0); } if (bwn_has_hwpctl(mac)) { tmp = (sav->phy_lomask & 0xbfff); BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, tmp); BWN_PHY_WRITE(mac, BWN_PHY_EXTG(0x01), sav->phy_extg); BWN_PHY_WRITE(mac, BWN_PHY_DACCTL, sav->phy_dacctl_hwpctl); BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x14), sav->phy_cck4); BWN_PHY_WRITE(mac, BWN_PHY_HPWR_TSSICTL, sav->phy_hpwr_tssictl); } bwn_phy_g_switch_chan(mac, sav->old_channel, 1); } static int bwn_lo_probe_loctl(struct bwn_mac *mac, struct bwn_loctl *probe, struct bwn_lo_g_sm *d) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_loctl orig, test; struct bwn_loctl prev = { -100, -100 }; static const struct bwn_loctl modifiers[] = { { 1, 1,}, { 1, 0,}, { 1, -1,}, { 0, -1,}, { -1, -1,}, { -1, 0,}, { -1, 1,}, { 0, 1,} }; int begin, end, lower = 0, i; uint16_t feedth; if (d->curstate == 0) { begin = 1; end = 8; } else if (d->curstate % 2 == 0) { begin = d->curstate - 1; end = d->curstate + 1; } else { begin = d->curstate - 2; end = d->curstate + 2; } if (begin < 1) begin += 8; if (end > 8) end -= 8; memcpy(&orig, probe, sizeof(struct bwn_loctl)); i = begin; d->curstate = i; while (1) { KASSERT(i >= 1 && i <= 8, ("%s:%d: fail", __func__, __LINE__)); memcpy(&test, &orig, sizeof(struct bwn_loctl)); test.i += modifiers[i - 1].i * d->multipler; test.q += modifiers[i - 1].q * d->multipler; if ((test.i != prev.i || test.q != prev.q) && (abs(test.i) <= 16 && abs(test.q) <= 16)) { bwn_lo_write(mac, &test); feedth = bwn_lo_calcfeed(mac, pg->pg_lna_gain, pg->pg_pga_gain, pg->pg_trsw_rx_gain); if (feedth < d->feedth) { memcpy(probe, &test, sizeof(struct bwn_loctl)); lower = 1; d->feedth = feedth; if (d->nmeasure < 2 && !BWN_HAS_LOOPBACK(phy)) break; } } memcpy(&prev, &test, sizeof(prev)); if (i == end) break; if (i == 8) i = 1; else i++; d->curstate = i; } return (lower); } static void bwn_lo_probe_sm(struct bwn_mac *mac, struct bwn_loctl *loctl, int *rxgain) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_lo_g_sm d; struct bwn_loctl probe; int lower, repeat, cnt = 0; uint16_t feedth; d.nmeasure = 0; d.multipler = 1; if (BWN_HAS_LOOPBACK(phy)) d.multipler = 3; memcpy(&d.loctl, loctl, sizeof(struct bwn_loctl)); repeat = (BWN_HAS_LOOPBACK(phy)) ? 4 : 1; do { bwn_lo_write(mac, &d.loctl); feedth = bwn_lo_calcfeed(mac, pg->pg_lna_gain, pg->pg_pga_gain, pg->pg_trsw_rx_gain); if (feedth < 0x258) { if (feedth >= 0x12c) *rxgain += 6; else *rxgain += 3; feedth = bwn_lo_calcfeed(mac, pg->pg_lna_gain, pg->pg_pga_gain, pg->pg_trsw_rx_gain); } d.feedth = feedth; d.curstate = 0; do { KASSERT(d.curstate >= 0 && d.curstate <= 8, ("%s:%d: fail", __func__, __LINE__)); memcpy(&probe, &d.loctl, sizeof(struct bwn_loctl)); lower = bwn_lo_probe_loctl(mac, &probe, &d); if (!lower) break; if ((probe.i == d.loctl.i) && (probe.q == d.loctl.q)) break; memcpy(&d.loctl, &probe, sizeof(struct bwn_loctl)); d.nmeasure++; } while (d.nmeasure < 24); memcpy(loctl, &d.loctl, sizeof(struct bwn_loctl)); if (BWN_HAS_LOOPBACK(phy)) { if (d.feedth > 0x1194) *rxgain -= 6; else if (d.feedth < 0x5dc) *rxgain += 3; if (cnt == 0) { if (d.feedth <= 0x5dc) { d.multipler = 1; cnt++; } else d.multipler = 2; } else if (cnt == 2) d.multipler = 1; } bwn_lo_measure_gain_values(mac, *rxgain, BWN_HAS_LOOPBACK(phy)); } while (++cnt < repeat); } static struct bwn_lo_calib * bwn_lo_calibset(struct bwn_mac *mac, const struct bwn_bbatt *bbatt, const struct bwn_rfatt *rfatt) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_loctl loctl = { 0, 0 }; struct bwn_lo_calib *cal; struct bwn_lo_g_value sval = { 0 }; int rxgain; uint16_t pad, reg, value; sval.old_channel = phy->chan; bwn_mac_suspend(mac); bwn_lo_save(mac, &sval); reg = bwn_lo_txctl_regtable(mac, &value, &pad); BWN_RF_SETMASK(mac, 0x43, 0xfff0, rfatt->att); BWN_RF_SETMASK(mac, reg, ~value, (rfatt->padmix ? value :0)); rxgain = (rfatt->att * 2) + (bbatt->att / 2); if (rfatt->padmix) rxgain -= pad; if (BWN_HAS_LOOPBACK(phy)) rxgain += pg->pg_max_lb_gain; bwn_lo_measure_gain_values(mac, rxgain, BWN_HAS_LOOPBACK(phy)); bwn_phy_g_set_bbatt(mac, bbatt->att); bwn_lo_probe_sm(mac, &loctl, &rxgain); bwn_lo_restore(mac, &sval); bwn_mac_enable(mac); cal = malloc(sizeof(*cal), M_DEVBUF, M_NOWAIT | M_ZERO); if (!cal) { device_printf(mac->mac_sc->sc_dev, "out of memory\n"); return (NULL); } memcpy(&cal->bbatt, bbatt, sizeof(*bbatt)); memcpy(&cal->rfatt, rfatt, sizeof(*rfatt)); memcpy(&cal->ctl, &loctl, sizeof(loctl)); BWN_GETTIME(cal->calib_time); return (cal); } static struct bwn_lo_calib * bwn_lo_get_calib(struct bwn_mac *mac, const struct bwn_bbatt *bbatt, const struct bwn_rfatt *rfatt) { struct bwn_txpwr_loctl *lo = &mac->mac_phy.phy_g.pg_loctl; struct bwn_lo_calib *c; TAILQ_FOREACH(c, &lo->calib_list, list) { if (!BWN_BBATTCMP(&c->bbatt, bbatt)) continue; if (!BWN_RFATTCMP(&c->rfatt, rfatt)) continue; return (c); } c = bwn_lo_calibset(mac, bbatt, rfatt); if (!c) return (NULL); TAILQ_INSERT_TAIL(&lo->calib_list, c, list); return (c); } static void bwn_phy_g_dc_lookup_init(struct bwn_mac *mac, uint8_t update) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; const struct bwn_rfatt *rfatt; const struct bwn_bbatt *bbatt; uint64_t pvector; int i; int rf_offset, bb_offset; uint8_t changed = 0; KASSERT(BWN_DC_LT_SIZE == 32, ("%s:%d: fail", __func__, __LINE__)); KASSERT(lo->rfatt.len * lo->bbatt.len <= 64, ("%s:%d: fail", __func__, __LINE__)); pvector = lo->power_vector; if (!update && !pvector) return; bwn_mac_suspend(mac); for (i = 0; i < BWN_DC_LT_SIZE * 2; i++) { struct bwn_lo_calib *cal; int idx; uint16_t val; if (!update && !(pvector & (((uint64_t)1ULL) << i))) continue; bb_offset = i / lo->rfatt.len; rf_offset = i % lo->rfatt.len; bbatt = &(lo->bbatt.array[bb_offset]); rfatt = &(lo->rfatt.array[rf_offset]); cal = bwn_lo_calibset(mac, bbatt, rfatt); if (!cal) { device_printf(sc->sc_dev, "LO: Could not " "calibrate DC table entry\n"); continue; } val = (uint8_t)(cal->ctl.q); val |= ((uint8_t)(cal->ctl.i)) << 4; free(cal, M_DEVBUF); idx = i / 2; if (i % 2) lo->dc_lt[idx] = (lo->dc_lt[idx] & 0x00ff) | ((val & 0x00ff) << 8); else lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xff00) | (val & 0x00ff); changed = 1; } if (changed) { for (i = 0; i < BWN_DC_LT_SIZE; i++) BWN_PHY_WRITE(mac, 0x3a0 + i, lo->dc_lt[i]); } bwn_mac_enable(mac); } static void bwn_lo_fixup_rfatt(struct bwn_rfatt *rf) { if (!rf->padmix) return; if ((rf->att != 1) && (rf->att != 2) && (rf->att != 3)) rf->att = 4; } static void bwn_lo_g_adjust(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; struct bwn_lo_calib *cal; struct bwn_rfatt rf; memcpy(&rf, &pg->pg_rfatt, sizeof(rf)); bwn_lo_fixup_rfatt(&rf); cal = bwn_lo_get_calib(mac, &pg->pg_bbatt, &rf); if (!cal) return; bwn_lo_write(mac, &cal->ctl); } static void bwn_lo_g_init(struct bwn_mac *mac) { if (!bwn_has_hwpctl(mac)) return; bwn_lo_get_powervector(mac); bwn_phy_g_dc_lookup_init(mac, 1); } static void bwn_mac_suspend(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; uint32_t tmp; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_suspended == 0) { bwn_psctl(mac, BWN_PS_AWAKE); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_ON); BWN_READ_4(mac, BWN_MACCTL); for (i = 35; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(10); } for (i = 40; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(1000); } device_printf(sc->sc_dev, "MAC suspend failed\n"); } out: mac->mac_suspended++; } static void bwn_mac_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t state; state = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (state != BWN_SHARED_UCODESTAT_SUSPEND && state != BWN_SHARED_UCODESTAT_SLEEP) device_printf(sc->sc_dev, "warn: firmware state (%d)\n", state); mac->mac_suspended--; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_suspended == 0) { BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_ON); BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_MAC_SUSPENDED); BWN_READ_4(mac, BWN_MACCTL); BWN_READ_4(mac, BWN_INTR_REASON); bwn_psctl(mac, 0); } } static void bwn_psctl(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; int i; uint16_t ucstat; KASSERT(!((flags & BWN_PS_ON) && (flags & BWN_PS_OFF)), ("%s:%d: fail", __func__, __LINE__)); KASSERT(!((flags & BWN_PS_AWAKE) && (flags & BWN_PS_ASLEEP)), ("%s:%d: fail", __func__, __LINE__)); /* XXX forcibly awake and hwps-off */ BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_AWAKE) & ~BWN_MACCTL_HWPS); BWN_READ_4(mac, BWN_MACCTL); if (siba_get_revid(sc->sc_dev) >= 5) { for (i = 0; i < 100; i++) { ucstat = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (ucstat != BWN_SHARED_UCODESTAT_SLEEP) break; DELAY(10); } } } static int16_t bwn_nrssi_read(struct bwn_mac *mac, uint16_t offset) { BWN_PHY_WRITE(mac, BWN_PHY_NRSSI_CTRL, offset); return ((int16_t)BWN_PHY_READ(mac, BWN_PHY_NRSSI_DATA)); } static void bwn_nrssi_threshold(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; int32_t a, b; int16_t tmp16; uint16_t tmpu16; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s: fail", __func__)); if (phy->gmode && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_RSSI)) { if (!pg->pg_aci_wlan_automatic && pg->pg_aci_enable) { a = 0x13; b = 0x12; } else { a = 0xe; b = 0x11; } a = a * (pg->pg_nrssi[1] - pg->pg_nrssi[0]); a += (pg->pg_nrssi[0] << 6); a += (a < 32) ? 31 : 32; a = a >> 6; a = MIN(MAX(a, -31), 31); b = b * (pg->pg_nrssi[1] - pg->pg_nrssi[0]); b += (pg->pg_nrssi[0] << 6); if (b < 32) b += 31; else b += 32; b = b >> 6; b = MIN(MAX(b, -31), 31); tmpu16 = BWN_PHY_READ(mac, 0x048a) & 0xf000; tmpu16 |= ((uint32_t)b & 0x0000003f); tmpu16 |= (((uint32_t)a & 0x0000003f) << 6); BWN_PHY_WRITE(mac, 0x048a, tmpu16); return; } tmp16 = bwn_nrssi_read(mac, 0x20); if (tmp16 >= 0x20) tmp16 -= 0x40; BWN_PHY_SETMASK(mac, 0x048a, 0xf000, (tmp16 < 3) ? 0x09eb : 0x0aed); } static void bwn_nrssi_slope_11g(struct bwn_mac *mac) { #define SAVE_RF_MAX 3 #define SAVE_PHY_COMM_MAX 4 #define SAVE_PHY3_MAX 8 static const uint16_t save_rf_regs[SAVE_RF_MAX] = { 0x7a, 0x52, 0x43 }; static const uint16_t save_phy_comm_regs[SAVE_PHY_COMM_MAX] = { 0x15, 0x5a, 0x59, 0x58 }; static const uint16_t save_phy3_regs[SAVE_PHY3_MAX] = { 0x002e, 0x002f, 0x080f, BWN_PHY_G_LOCTL, 0x0801, 0x0060, 0x0014, 0x0478 }; struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; int32_t i, tmp32, phy3_idx = 0; uint16_t delta, tmp; uint16_t save_rf[SAVE_RF_MAX]; uint16_t save_phy_comm[SAVE_PHY_COMM_MAX]; uint16_t save_phy3[SAVE_PHY3_MAX]; uint16_t ant_div, phy0, chan_ex; int16_t nrssi0, nrssi1; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s:%d: fail", __func__, __LINE__)); if (phy->rf_rev >= 9) return; if (phy->rf_rev == 8) bwn_nrssi_offset(mac); BWN_PHY_MASK(mac, BWN_PHY_G_CRS, 0x7fff); BWN_PHY_MASK(mac, 0x0802, 0xfffc); /* * Save RF/PHY registers for later restoration */ ant_div = BWN_READ_2(mac, 0x03e2); BWN_WRITE_2(mac, 0x03e2, BWN_READ_2(mac, 0x03e2) | 0x8000); for (i = 0; i < SAVE_RF_MAX; ++i) save_rf[i] = BWN_RF_READ(mac, save_rf_regs[i]); for (i = 0; i < SAVE_PHY_COMM_MAX; ++i) save_phy_comm[i] = BWN_PHY_READ(mac, save_phy_comm_regs[i]); phy0 = BWN_READ_2(mac, BWN_PHY0); chan_ex = BWN_READ_2(mac, BWN_CHANNEL_EXT); if (phy->rev >= 3) { for (i = 0; i < SAVE_PHY3_MAX; ++i) save_phy3[i] = BWN_PHY_READ(mac, save_phy3_regs[i]); BWN_PHY_WRITE(mac, 0x002e, 0); BWN_PHY_WRITE(mac, BWN_PHY_G_LOCTL, 0); switch (phy->rev) { case 4: case 6: case 7: BWN_PHY_SET(mac, 0x0478, 0x0100); BWN_PHY_SET(mac, 0x0801, 0x0040); break; case 3: case 5: BWN_PHY_MASK(mac, 0x0801, 0xffbf); break; } BWN_PHY_SET(mac, 0x0060, 0x0040); BWN_PHY_SET(mac, 0x0014, 0x0200); } /* * Calculate nrssi0 */ BWN_RF_SET(mac, 0x007a, 0x0070); bwn_set_all_gains(mac, 0, 8, 0); BWN_RF_MASK(mac, 0x007a, 0x00f7); if (phy->rev >= 2) { BWN_PHY_SETMASK(mac, 0x0811, 0xffcf, 0x0030); BWN_PHY_SETMASK(mac, 0x0812, 0xffcf, 0x0010); } BWN_RF_SET(mac, 0x007a, 0x0080); DELAY(20); nrssi0 = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi0 >= 0x0020) nrssi0 -= 0x0040; /* * Calculate nrssi1 */ BWN_RF_MASK(mac, 0x007a, 0x007f); if (phy->rev >= 2) BWN_PHY_SETMASK(mac, 0x0003, 0xff9f, 0x0040); BWN_WRITE_2(mac, BWN_CHANNEL_EXT, BWN_READ_2(mac, BWN_CHANNEL_EXT) | 0x2000); BWN_RF_SET(mac, 0x007a, 0x000f); BWN_PHY_WRITE(mac, 0x0015, 0xf330); if (phy->rev >= 2) { BWN_PHY_SETMASK(mac, 0x0812, 0xffcf, 0x0020); BWN_PHY_SETMASK(mac, 0x0811, 0xffcf, 0x0020); } bwn_set_all_gains(mac, 3, 0, 1); if (phy->rf_rev == 8) { BWN_RF_WRITE(mac, 0x0043, 0x001f); } else { tmp = BWN_RF_READ(mac, 0x0052) & 0xff0f; BWN_RF_WRITE(mac, 0x0052, tmp | 0x0060); tmp = BWN_RF_READ(mac, 0x0043) & 0xfff0; BWN_RF_WRITE(mac, 0x0043, tmp | 0x0009); } BWN_PHY_WRITE(mac, 0x005a, 0x0480); BWN_PHY_WRITE(mac, 0x0059, 0x0810); BWN_PHY_WRITE(mac, 0x0058, 0x000d); DELAY(20); nrssi1 = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); /* * Install calculated narrow RSSI values */ if (nrssi1 >= 0x0020) nrssi1 -= 0x0040; if (nrssi0 == nrssi1) pg->pg_nrssi_slope = 0x00010000; else pg->pg_nrssi_slope = 0x00400000 / (nrssi0 - nrssi1); if (nrssi0 >= -4) { pg->pg_nrssi[0] = nrssi1; pg->pg_nrssi[1] = nrssi0; } /* * Restore saved RF/PHY registers */ if (phy->rev >= 3) { for (phy3_idx = 0; phy3_idx < 4; ++phy3_idx) { BWN_PHY_WRITE(mac, save_phy3_regs[phy3_idx], save_phy3[phy3_idx]); } } if (phy->rev >= 2) { BWN_PHY_MASK(mac, 0x0812, 0xffcf); BWN_PHY_MASK(mac, 0x0811, 0xffcf); } for (i = 0; i < SAVE_RF_MAX; ++i) BWN_RF_WRITE(mac, save_rf_regs[i], save_rf[i]); BWN_WRITE_2(mac, 0x03e2, ant_div); BWN_WRITE_2(mac, 0x03e6, phy0); BWN_WRITE_2(mac, BWN_CHANNEL_EXT, chan_ex); for (i = 0; i < SAVE_PHY_COMM_MAX; ++i) BWN_PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]); bwn_spu_workaround(mac, phy->chan); BWN_PHY_SET(mac, 0x0802, (0x0001 | 0x0002)); bwn_set_original_gains(mac); BWN_PHY_SET(mac, BWN_PHY_G_CRS, 0x8000); if (phy->rev >= 3) { for (; phy3_idx < SAVE_PHY3_MAX; ++phy3_idx) { BWN_PHY_WRITE(mac, save_phy3_regs[phy3_idx], save_phy3[phy3_idx]); } } delta = 0x1f - pg->pg_nrssi[0]; for (i = 0; i < 64; i++) { tmp32 = (((i - delta) * pg->pg_nrssi_slope) / 0x10000) + 0x3a; tmp32 = MIN(MAX(tmp32, 0), 0x3f); pg->pg_nrssi_lt[i] = tmp32; } bwn_nrssi_threshold(mac); #undef SAVE_RF_MAX #undef SAVE_PHY_COMM_MAX #undef SAVE_PHY3_MAX } static void bwn_nrssi_offset(struct bwn_mac *mac) { #define SAVE_RF_MAX 2 #define SAVE_PHY_COMM_MAX 10 #define SAVE_PHY6_MAX 8 static const uint16_t save_rf_regs[SAVE_RF_MAX] = { 0x7a, 0x43 }; static const uint16_t save_phy_comm_regs[SAVE_PHY_COMM_MAX] = { 0x0001, 0x0811, 0x0812, 0x0814, 0x0815, 0x005a, 0x0059, 0x0058, 0x000a, 0x0003 }; static const uint16_t save_phy6_regs[SAVE_PHY6_MAX] = { 0x002e, 0x002f, 0x080f, 0x0810, 0x0801, 0x0060, 0x0014, 0x0478 }; struct bwn_phy *phy = &mac->mac_phy; int i, phy6_idx = 0; uint16_t save_rf[SAVE_RF_MAX]; uint16_t save_phy_comm[SAVE_PHY_COMM_MAX]; uint16_t save_phy6[SAVE_PHY6_MAX]; int16_t nrssi; uint16_t saved = 0xffff; for (i = 0; i < SAVE_PHY_COMM_MAX; ++i) save_phy_comm[i] = BWN_PHY_READ(mac, save_phy_comm_regs[i]); for (i = 0; i < SAVE_RF_MAX; ++i) save_rf[i] = BWN_RF_READ(mac, save_rf_regs[i]); BWN_PHY_MASK(mac, 0x0429, 0x7fff); BWN_PHY_SETMASK(mac, 0x0001, 0x3fff, 0x4000); BWN_PHY_SET(mac, 0x0811, 0x000c); BWN_PHY_SETMASK(mac, 0x0812, 0xfff3, 0x0004); BWN_PHY_MASK(mac, 0x0802, ~(0x1 | 0x2)); if (phy->rev >= 6) { for (i = 0; i < SAVE_PHY6_MAX; ++i) save_phy6[i] = BWN_PHY_READ(mac, save_phy6_regs[i]); BWN_PHY_WRITE(mac, 0x002e, 0); BWN_PHY_WRITE(mac, 0x002f, 0); BWN_PHY_WRITE(mac, 0x080f, 0); BWN_PHY_WRITE(mac, 0x0810, 0); BWN_PHY_SET(mac, 0x0478, 0x0100); BWN_PHY_SET(mac, 0x0801, 0x0040); BWN_PHY_SET(mac, 0x0060, 0x0040); BWN_PHY_SET(mac, 0x0014, 0x0200); } BWN_RF_SET(mac, 0x007a, 0x0070); BWN_RF_SET(mac, 0x007a, 0x0080); DELAY(30); nrssi = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi >= 0x20) nrssi -= 0x40; if (nrssi == 31) { for (i = 7; i >= 4; i--) { BWN_RF_WRITE(mac, 0x007b, i); DELAY(20); nrssi = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi >= 0x20) nrssi -= 0x40; if (nrssi < 31 && saved == 0xffff) saved = i; } if (saved == 0xffff) saved = 4; } else { BWN_RF_MASK(mac, 0x007a, 0x007f); if (phy->rev != 1) { BWN_PHY_SET(mac, 0x0814, 0x0001); BWN_PHY_MASK(mac, 0x0815, 0xfffe); } BWN_PHY_SET(mac, 0x0811, 0x000c); BWN_PHY_SET(mac, 0x0812, 0x000c); BWN_PHY_SET(mac, 0x0811, 0x0030); BWN_PHY_SET(mac, 0x0812, 0x0030); BWN_PHY_WRITE(mac, 0x005a, 0x0480); BWN_PHY_WRITE(mac, 0x0059, 0x0810); BWN_PHY_WRITE(mac, 0x0058, 0x000d); if (phy->rev == 0) BWN_PHY_WRITE(mac, 0x0003, 0x0122); else BWN_PHY_SET(mac, 0x000a, 0x2000); if (phy->rev != 1) { BWN_PHY_SET(mac, 0x0814, 0x0004); BWN_PHY_MASK(mac, 0x0815, 0xfffb); } BWN_PHY_SETMASK(mac, 0x0003, 0xff9f, 0x0040); BWN_RF_SET(mac, 0x007a, 0x000f); bwn_set_all_gains(mac, 3, 0, 1); BWN_RF_SETMASK(mac, 0x0043, 0x00f0, 0x000f); DELAY(30); nrssi = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi >= 0x20) nrssi -= 0x40; if (nrssi == -32) { for (i = 0; i < 4; i++) { BWN_RF_WRITE(mac, 0x007b, i); DELAY(20); nrssi = (int16_t)((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f); if (nrssi >= 0x20) nrssi -= 0x40; if (nrssi > -31 && saved == 0xffff) saved = i; } if (saved == 0xffff) saved = 3; } else saved = 0; } BWN_RF_WRITE(mac, 0x007b, saved); /* * Restore saved RF/PHY registers */ if (phy->rev >= 6) { for (phy6_idx = 0; phy6_idx < 4; ++phy6_idx) { BWN_PHY_WRITE(mac, save_phy6_regs[phy6_idx], save_phy6[phy6_idx]); } } if (phy->rev != 1) { for (i = 3; i < 5; i++) BWN_PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]); } for (i = 5; i < SAVE_PHY_COMM_MAX; i++) BWN_PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]); for (i = SAVE_RF_MAX - 1; i >= 0; --i) BWN_RF_WRITE(mac, save_rf_regs[i], save_rf[i]); BWN_PHY_WRITE(mac, 0x0802, BWN_PHY_READ(mac, 0x0802) | 0x1 | 0x2); BWN_PHY_SET(mac, 0x0429, 0x8000); bwn_set_original_gains(mac); if (phy->rev >= 6) { for (; phy6_idx < SAVE_PHY6_MAX; ++phy6_idx) { BWN_PHY_WRITE(mac, save_phy6_regs[phy6_idx], save_phy6[phy6_idx]); } } BWN_PHY_WRITE(mac, save_phy_comm_regs[0], save_phy_comm[0]); BWN_PHY_WRITE(mac, save_phy_comm_regs[2], save_phy_comm[2]); BWN_PHY_WRITE(mac, save_phy_comm_regs[1], save_phy_comm[1]); } static void bwn_set_all_gains(struct bwn_mac *mac, int16_t first, int16_t second, int16_t third) { struct bwn_phy *phy = &mac->mac_phy; uint16_t i; uint16_t start = 0x08, end = 0x18; uint16_t tmp; uint16_t table; if (phy->rev <= 1) { start = 0x10; end = 0x20; } table = BWN_OFDMTAB_GAINX; if (phy->rev <= 1) table = BWN_OFDMTAB_GAINX_R1; for (i = 0; i < 4; i++) bwn_ofdmtab_write_2(mac, table, i, first); for (i = start; i < end; i++) bwn_ofdmtab_write_2(mac, table, i, second); if (third != -1) { tmp = ((uint16_t) third << 14) | ((uint16_t) third << 6); BWN_PHY_SETMASK(mac, 0x04a0, 0xbfbf, tmp); BWN_PHY_SETMASK(mac, 0x04a1, 0xbfbf, tmp); BWN_PHY_SETMASK(mac, 0x04a2, 0xbfbf, tmp); } bwn_dummy_transmission(mac, 0, 1); } static void bwn_set_original_gains(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; uint16_t i, tmp; uint16_t table; uint16_t start = 0x0008, end = 0x0018; if (phy->rev <= 1) { start = 0x0010; end = 0x0020; } table = BWN_OFDMTAB_GAINX; if (phy->rev <= 1) table = BWN_OFDMTAB_GAINX_R1; for (i = 0; i < 4; i++) { tmp = (i & 0xfffc); tmp |= (i & 0x0001) << 1; tmp |= (i & 0x0002) >> 1; bwn_ofdmtab_write_2(mac, table, i, tmp); } for (i = start; i < end; i++) bwn_ofdmtab_write_2(mac, table, i, i - start); BWN_PHY_SETMASK(mac, 0x04a0, 0xbfbf, 0x4040); BWN_PHY_SETMASK(mac, 0x04a1, 0xbfbf, 0x4040); BWN_PHY_SETMASK(mac, 0x04a2, 0xbfbf, 0x4000); bwn_dummy_transmission(mac, 0, 1); } static void bwn_phy_hwpctl_init(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_rfatt old_rfatt, rfatt; struct bwn_bbatt old_bbatt, bbatt; struct bwn_softc *sc = mac->mac_sc; uint8_t old_txctl = 0; KASSERT(phy->type == BWN_PHYTYPE_G, ("%s:%d: fail", __func__, __LINE__)); if ((siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM) && (siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306)) return; BWN_PHY_WRITE(mac, 0x0028, 0x8018); BWN_WRITE_2(mac, BWN_PHY0, BWN_READ_2(mac, BWN_PHY0) & 0xffdf); if (!phy->gmode) return; bwn_hwpctl_early_init(mac); if (pg->pg_curtssi == 0) { if (phy->rf_ver == 0x2050 && phy->analog == 0) { BWN_RF_SETMASK(mac, 0x0076, 0x00f7, 0x0084); } else { memcpy(&old_rfatt, &pg->pg_rfatt, sizeof(old_rfatt)); memcpy(&old_bbatt, &pg->pg_bbatt, sizeof(old_bbatt)); old_txctl = pg->pg_txctl; bbatt.att = 11; if (phy->rf_rev == 8) { rfatt.att = 15; rfatt.padmix = 1; } else { rfatt.att = 9; rfatt.padmix = 0; } bwn_phy_g_set_txpwr_sub(mac, &bbatt, &rfatt, 0); } bwn_dummy_transmission(mac, 0, 1); pg->pg_curtssi = BWN_PHY_READ(mac, BWN_PHY_TSSI); if (phy->rf_ver == 0x2050 && phy->analog == 0) BWN_RF_MASK(mac, 0x0076, 0xff7b); else bwn_phy_g_set_txpwr_sub(mac, &old_bbatt, &old_rfatt, old_txctl); } bwn_hwpctl_init_gphy(mac); /* clear TSSI */ bwn_shm_write_2(mac, BWN_SHARED, 0x0058, 0x7f7f); bwn_shm_write_2(mac, BWN_SHARED, 0x005a, 0x7f7f); bwn_shm_write_2(mac, BWN_SHARED, 0x0070, 0x7f7f); bwn_shm_write_2(mac, BWN_SHARED, 0x0072, 0x7f7f); } static void bwn_hwpctl_early_init(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; if (!bwn_has_hwpctl(mac)) { BWN_PHY_WRITE(mac, 0x047a, 0xc111); return; } BWN_PHY_MASK(mac, 0x0036, 0xfeff); BWN_PHY_WRITE(mac, 0x002f, 0x0202); BWN_PHY_SET(mac, 0x047c, 0x0002); BWN_PHY_SET(mac, 0x047a, 0xf000); if (phy->rf_ver == 0x2050 && phy->rf_rev == 8) { BWN_PHY_SETMASK(mac, 0x047a, 0xff0f, 0x0010); BWN_PHY_SET(mac, 0x005d, 0x8000); BWN_PHY_SETMASK(mac, 0x004e, 0xffc0, 0x0010); BWN_PHY_WRITE(mac, 0x002e, 0xc07f); BWN_PHY_SET(mac, 0x0036, 0x0400); } else { BWN_PHY_SET(mac, 0x0036, 0x0200); BWN_PHY_SET(mac, 0x0036, 0x0400); BWN_PHY_MASK(mac, 0x005d, 0x7fff); BWN_PHY_MASK(mac, 0x004f, 0xfffe); BWN_PHY_SETMASK(mac, 0x004e, 0xffc0, 0x0010); BWN_PHY_WRITE(mac, 0x002e, 0xc07f); BWN_PHY_SETMASK(mac, 0x047a, 0xff0f, 0x0010); } } static void bwn_hwpctl_init_gphy(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; int i; uint16_t nr_written = 0, tmp, value; uint8_t rf, bb; if (!bwn_has_hwpctl(mac)) { bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_HW_POWERCTL); return; } BWN_PHY_SETMASK(mac, 0x0036, 0xffc0, (pg->pg_idletssi - pg->pg_curtssi)); BWN_PHY_SETMASK(mac, 0x0478, 0xff00, (pg->pg_idletssi - pg->pg_curtssi)); for (i = 0; i < 32; i++) bwn_ofdmtab_write_2(mac, 0x3c20, i, pg->pg_tssi2dbm[i]); for (i = 32; i < 64; i++) bwn_ofdmtab_write_2(mac, 0x3c00, i - 32, pg->pg_tssi2dbm[i]); for (i = 0; i < 64; i += 2) { value = (uint16_t) pg->pg_tssi2dbm[i]; value |= ((uint16_t) pg->pg_tssi2dbm[i + 1]) << 8; BWN_PHY_WRITE(mac, 0x380 + (i / 2), value); } for (rf = 0; rf < lo->rfatt.len; rf++) { for (bb = 0; bb < lo->bbatt.len; bb++) { if (nr_written >= 0x40) return; tmp = lo->bbatt.array[bb].att; tmp <<= 8; if (phy->rf_rev == 8) tmp |= 0x50; else tmp |= 0x40; tmp |= lo->rfatt.array[rf].att; BWN_PHY_WRITE(mac, 0x3c0 + nr_written, tmp); nr_written++; } } BWN_PHY_MASK(mac, 0x0060, 0xffbf); BWN_PHY_WRITE(mac, 0x0014, 0x0000); KASSERT(phy->rev >= 6, ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_SET(mac, 0x0478, 0x0800); BWN_PHY_MASK(mac, 0x0478, 0xfeff); BWN_PHY_MASK(mac, 0x0801, 0xffbf); bwn_phy_g_dc_lookup_init(mac, 1); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_HW_POWERCTL); } static void bwn_phy_g_switch_chan(struct bwn_mac *mac, int channel, uint8_t spu) { struct bwn_softc *sc = mac->mac_sc; if (spu != 0) bwn_spu_workaround(mac, channel); BWN_WRITE_2(mac, BWN_CHANNEL, bwn_phy_g_chan2freq(channel)); if (channel == 14) { if (siba_sprom_get_ccode(sc->sc_dev) == SIBA_CCODE_JAPAN) bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_JAPAN_CHAN14_OFF); else bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_JAPAN_CHAN14_OFF); BWN_WRITE_2(mac, BWN_CHANNEL_EXT, BWN_READ_2(mac, BWN_CHANNEL_EXT) | (1 << 11)); return; } BWN_WRITE_2(mac, BWN_CHANNEL_EXT, BWN_READ_2(mac, BWN_CHANNEL_EXT) & 0xf7bf); } static uint16_t bwn_phy_g_chan2freq(uint8_t channel) { static const uint8_t bwn_phy_g_rf_channels[] = BWN_PHY_G_RF_CHANNELS; KASSERT(channel >= 1 && channel <= 14, ("%s:%d: fail", __func__, __LINE__)); return (bwn_phy_g_rf_channels[channel - 1]); } static void bwn_phy_g_set_txpwr_sub(struct bwn_mac *mac, const struct bwn_bbatt *bbatt, const struct bwn_rfatt *rfatt, uint8_t txctl) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_txpwr_loctl *lo = &pg->pg_loctl; uint16_t bb, rf; uint16_t tx_bias, tx_magn; bb = bbatt->att; rf = rfatt->att; tx_bias = lo->tx_bias; tx_magn = lo->tx_magn; if (tx_bias == 0xff) tx_bias = 0; pg->pg_txctl = txctl; memmove(&pg->pg_rfatt, rfatt, sizeof(*rfatt)); pg->pg_rfatt.padmix = (txctl & BWN_TXCTL_TXMIX) ? 1 : 0; memmove(&pg->pg_bbatt, bbatt, sizeof(*bbatt)); bwn_phy_g_set_bbatt(mac, bb); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RADIO_ATT, rf); if (phy->rf_ver == 0x2050 && phy->rf_rev == 8) BWN_RF_WRITE(mac, 0x43, (rf & 0x000f) | (txctl & 0x0070)); else { BWN_RF_SETMASK(mac, 0x43, 0xfff0, (rf & 0x000f)); BWN_RF_SETMASK(mac, 0x52, ~0x0070, (txctl & 0x0070)); } if (BWN_HAS_TXMAG(phy)) BWN_RF_WRITE(mac, 0x52, tx_magn | tx_bias); else BWN_RF_SETMASK(mac, 0x52, 0xfff0, (tx_bias & 0x000f)); bwn_lo_g_adjust(mac); } static void bwn_phy_g_set_bbatt(struct bwn_mac *mac, uint16_t bbatt) { struct bwn_phy *phy = &mac->mac_phy; if (phy->analog == 0) { BWN_WRITE_2(mac, BWN_PHY0, (BWN_READ_2(mac, BWN_PHY0) & 0xfff0) | bbatt); return; } if (phy->analog > 1) { BWN_PHY_SETMASK(mac, BWN_PHY_DACCTL, 0xffc3, bbatt << 2); return; } BWN_PHY_SETMASK(mac, BWN_PHY_DACCTL, 0xff87, bbatt << 3); } static uint16_t bwn_rf_2050_rfoverval(struct bwn_mac *mac, uint16_t reg, uint32_t lpd) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *pg = &phy->phy_g; struct bwn_softc *sc = mac->mac_sc; int max_lb_gain; uint16_t extlna; uint16_t i; if (phy->gmode == 0) return (0); if (BWN_HAS_LOOPBACK(phy)) { max_lb_gain = pg->pg_max_lb_gain; max_lb_gain += (phy->rf_rev == 8) ? 0x3e : 0x26; if (max_lb_gain >= 0x46) { extlna = 0x3000; max_lb_gain -= 0x46; } else if (max_lb_gain >= 0x3a) { extlna = 0x1000; max_lb_gain -= 0x3a; } else if (max_lb_gain >= 0x2e) { extlna = 0x2000; max_lb_gain -= 0x2e; } else { extlna = 0; max_lb_gain -= 0x10; } for (i = 0; i < 16; i++) { max_lb_gain -= (i * 6); if (max_lb_gain < 6) break; } if ((phy->rev < 7) || !(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA)) { if (reg == BWN_PHY_RFOVER) { return (0x1b3); } else if (reg == BWN_PHY_RFOVERVAL) { extlna |= (i << 8); switch (lpd) { case BWN_LPD(0, 1, 1): return (0x0f92); case BWN_LPD(0, 0, 1): case BWN_LPD(1, 0, 1): return (0x0092 | extlna); case BWN_LPD(1, 0, 0): return (0x0093 | extlna); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } else { if (reg == BWN_PHY_RFOVER) return (0x9b3); if (reg == BWN_PHY_RFOVERVAL) { if (extlna) extlna |= 0x8000; extlna |= (i << 8); switch (lpd) { case BWN_LPD(0, 1, 1): return (0x8f92); case BWN_LPD(0, 0, 1): return (0x8092 | extlna); case BWN_LPD(1, 0, 1): return (0x2092 | extlna); case BWN_LPD(1, 0, 0): return (0x2093 | extlna); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } return (0); } if ((phy->rev < 7) || !(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA)) { if (reg == BWN_PHY_RFOVER) { return (0x1b3); } else if (reg == BWN_PHY_RFOVERVAL) { switch (lpd) { case BWN_LPD(0, 1, 1): return (0x0fb2); case BWN_LPD(0, 0, 1): return (0x00b2); case BWN_LPD(1, 0, 1): return (0x30b2); case BWN_LPD(1, 0, 0): return (0x30b3); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } else { if (reg == BWN_PHY_RFOVER) { return (0x9b3); } else if (reg == BWN_PHY_RFOVERVAL) { switch (lpd) { case BWN_LPD(0, 1, 1): return (0x8fb2); case BWN_LPD(0, 0, 1): return (0x80b2); case BWN_LPD(1, 0, 1): return (0x20b2); case BWN_LPD(1, 0, 0): return (0x20b3); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } return (0); } static void bwn_spu_workaround(struct bwn_mac *mac, uint8_t channel) { if (mac->mac_phy.rf_ver != 0x2050 || mac->mac_phy.rf_rev >= 6) return; BWN_WRITE_2(mac, BWN_CHANNEL, (channel <= 10) ? bwn_phy_g_chan2freq(channel + 4) : bwn_phy_g_chan2freq(1)); DELAY(1000); BWN_WRITE_2(mac, BWN_CHANNEL, bwn_phy_g_chan2freq(channel)); } static int bwn_fw_gets(struct bwn_mac *mac, enum bwn_fwtype type) { struct bwn_softc *sc = mac->mac_sc; struct bwn_fw *fw = &mac->mac_fw; const uint8_t rev = siba_get_revid(sc->sc_dev); const char *filename; uint32_t high; int error; /* microcode */ if (rev >= 5 && rev <= 10) filename = "ucode5"; else if (rev >= 11 && rev <= 12) filename = "ucode11"; else if (rev == 13) filename = "ucode13"; else if (rev == 14) filename = "ucode14"; else if (rev >= 15) filename = "ucode15"; else { device_printf(sc->sc_dev, "no ucode for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } error = bwn_fw_get(mac, type, filename, &fw->ucode); if (error) { bwn_release_firmware(mac); return (error); } /* PCM */ KASSERT(fw->no_pcmfile == 0, ("%s:%d fail", __func__, __LINE__)); if (rev >= 5 && rev <= 10) { error = bwn_fw_get(mac, type, "pcm5", &fw->pcm); if (error == ENOENT) fw->no_pcmfile = 1; else if (error) { bwn_release_firmware(mac); return (error); } } else if (rev < 11) { device_printf(sc->sc_dev, "no PCM for rev %d\n", rev); return (EOPNOTSUPP); } /* initvals */ high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH); switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev < 5 || rev > 10) goto fail1; if (high & BWN_TGSHIGH_HAVE_2GHZ) filename = "a0g1initvals5"; else filename = "a0g0initvals5"; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0initvals5"; else if (rev >= 13) filename = "b0g0initvals13"; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0initvals13"; else if (rev == 14) filename = "lp0initvals14"; else if (rev >= 15) filename = "lp0initvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev >= 11 && rev <= 12) filename = "n0initvals11"; else goto fail1; break; default: goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals); if (error) { bwn_release_firmware(mac); return (error); } /* bandswitch initvals */ switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev >= 5 && rev <= 10) { if (high & BWN_TGSHIGH_HAVE_2GHZ) filename = "a0g1bsinitvals5"; else filename = "a0g0bsinitvals5"; } else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0bsinitvals5"; else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0bsinitvals13"; else if (rev == 14) filename = "lp0bsinitvals14"; else if (rev >= 15) filename = "lp0bsinitvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev >= 11 && rev <= 12) filename = "n0bsinitvals11"; else goto fail1; break; default: goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals_band); if (error) { bwn_release_firmware(mac); return (error); } return (0); fail1: device_printf(sc->sc_dev, "no INITVALS for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } static int bwn_fw_get(struct bwn_mac *mac, enum bwn_fwtype type, const char *name, struct bwn_fwfile *bfw) { const struct bwn_fwhdr *hdr; struct bwn_softc *sc = mac->mac_sc; const struct firmware *fw; char namebuf[64]; if (name == NULL) { bwn_do_release_fw(bfw); return (0); } if (bfw->filename != NULL) { if (bfw->type == type && (strcmp(bfw->filename, name) == 0)) return (0); bwn_do_release_fw(bfw); } snprintf(namebuf, sizeof(namebuf), "bwn%s_v4_%s%s", (type == BWN_FWTYPE_OPENSOURCE) ? "-open" : "", (mac->mac_phy.type == BWN_PHYTYPE_LP) ? "lp_" : "", name); /* XXX Sleeping on "fwload" with the non-sleepable locks held */ fw = firmware_get(namebuf); if (fw == NULL) { device_printf(sc->sc_dev, "the fw file(%s) not found\n", namebuf); return (ENOENT); } if (fw->datasize < sizeof(struct bwn_fwhdr)) goto fail; hdr = (const struct bwn_fwhdr *)(fw->data); switch (hdr->type) { case BWN_FWTYPE_UCODE: case BWN_FWTYPE_PCM: if (be32toh(hdr->size) != (fw->datasize - sizeof(struct bwn_fwhdr))) goto fail; /* FALLTHROUGH */ case BWN_FWTYPE_IV: if (hdr->ver != 1) goto fail; break; default: goto fail; } bfw->filename = name; bfw->fw = fw; bfw->type = type; return (0); fail: device_printf(sc->sc_dev, "the fw file(%s) format error\n", namebuf); if (fw != NULL) firmware_put(fw, FIRMWARE_UNLOAD); return (EPROTO); } static void bwn_release_firmware(struct bwn_mac *mac) { bwn_do_release_fw(&mac->mac_fw.ucode); bwn_do_release_fw(&mac->mac_fw.pcm); bwn_do_release_fw(&mac->mac_fw.initvals); bwn_do_release_fw(&mac->mac_fw.initvals_band); } static void bwn_do_release_fw(struct bwn_fwfile *bfw) { if (bfw->fw != NULL) firmware_put(bfw->fw, FIRMWARE_UNLOAD); bfw->fw = NULL; bfw->filename = NULL; } static int bwn_fw_loaducode(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const uint32_t *)((const char *)fwp.fw->data + offset)) #define GETFWSIZE(fwp, offset) \ ((fwp.fw->datasize - offset) / sizeof(uint32_t)) struct bwn_softc *sc = mac->mac_sc; const uint32_t *data; unsigned int i; uint32_t ctl; uint16_t date, fwcaps, time; int error = 0; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl |= BWN_MACCTL_MCODE_JMP0; KASSERT(!(ctl & BWN_MACCTL_MCODE_RUN), ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_4(mac, BWN_MACCTL, ctl); for (i = 0; i < 64; i++) bwn_shm_write_2(mac, BWN_SCRATCH, i, 0); for (i = 0; i < 4096; i += 2) bwn_shm_write_2(mac, BWN_SHARED, i, 0); data = GETFWOFFSET(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_UCODE | BWN_SHARED_AUTOINC, 0x0000); for (i = 0; i < GETFWSIZE(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } if (mac->mac_fw.pcm.fw) { data = GETFWOFFSET(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_HW, 0x01ea); BWN_WRITE_4(mac, BWN_SHM_DATA, 0x00004000); bwn_shm_ctlword(mac, BWN_HW, 0x01eb); for (i = 0; i < GETFWSIZE(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } } BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_ALL); BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_JMP0) | BWN_MACCTL_MCODE_RUN); for (i = 0; i < 21; i++) { if (BWN_READ_4(mac, BWN_INTR_REASON) == BWN_INTR_MAC_SUSPENDED) break; if (i >= 20) { device_printf(sc->sc_dev, "ucode timeout\n"); error = ENXIO; goto error; } DELAY(50000); } BWN_READ_4(mac, BWN_INTR_REASON); mac->mac_fw.rev = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_REV); if (mac->mac_fw.rev <= 0x128) { device_printf(sc->sc_dev, "the firmware is too old\n"); error = EOPNOTSUPP; goto error; } mac->mac_fw.patch = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_PATCH); date = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_DATE); mac->mac_fw.opensource = (date == 0xffff); if (bwn_wme != 0) mac->mac_flags |= BWN_MAC_FLAG_WME; mac->mac_flags |= BWN_MAC_FLAG_HWCRYPTO; time = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_TIME); if (mac->mac_fw.opensource == 0) { device_printf(sc->sc_dev, "firmware version (rev %u patch %u date %#x time %#x)\n", mac->mac_fw.rev, mac->mac_fw.patch, date, time); if (mac->mac_fw.no_pcmfile) device_printf(sc->sc_dev, "no HW crypto acceleration due to pcm5\n"); } else { mac->mac_fw.patch = time; fwcaps = bwn_fwcaps_read(mac); if (!(fwcaps & BWN_FWCAPS_HWCRYPTO) || mac->mac_fw.no_pcmfile) { device_printf(sc->sc_dev, "disabling HW crypto acceleration\n"); mac->mac_flags &= ~BWN_MAC_FLAG_HWCRYPTO; } if (!(fwcaps & BWN_FWCAPS_WME)) { device_printf(sc->sc_dev, "disabling WME support\n"); mac->mac_flags &= ~BWN_MAC_FLAG_WME; } } if (BWN_ISOLDFMT(mac)) device_printf(sc->sc_dev, "using old firmware image\n"); return (0); error: BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_RUN) | BWN_MACCTL_MCODE_JMP0); return (error); #undef GETFWSIZE #undef GETFWOFFSET } /* OpenFirmware only */ static uint16_t bwn_fwcaps_read(struct bwn_mac *mac) { KASSERT(mac->mac_fw.opensource == 1, ("%s:%d: fail", __func__, __LINE__)); return (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_FWCAPS)); } static int bwn_fwinitvals_write(struct bwn_mac *mac, const struct bwn_fwinitvals *ivals, size_t count, size_t array_size) { #define GET_NEXTIV16(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint16_t))) #define GET_NEXTIV32(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint32_t))) struct bwn_softc *sc = mac->mac_sc; const struct bwn_fwinitvals *iv; uint16_t offset; size_t i; uint8_t bit32; KASSERT(sizeof(struct bwn_fwinitvals) == 6, ("%s:%d: fail", __func__, __LINE__)); iv = ivals; for (i = 0; i < count; i++) { if (array_size < sizeof(iv->offset_size)) goto fail; array_size -= sizeof(iv->offset_size); offset = be16toh(iv->offset_size); bit32 = (offset & BWN_FWINITVALS_32BIT) ? 1 : 0; offset &= BWN_FWINITVALS_OFFSET_MASK; if (offset >= 0x1000) goto fail; if (bit32) { if (array_size < sizeof(iv->data.d32)) goto fail; array_size -= sizeof(iv->data.d32); BWN_WRITE_4(mac, offset, be32toh(iv->data.d32)); iv = GET_NEXTIV32(iv); } else { if (array_size < sizeof(iv->data.d16)) goto fail; array_size -= sizeof(iv->data.d16); BWN_WRITE_2(mac, offset, be16toh(iv->data.d16)); iv = GET_NEXTIV16(iv); } } if (array_size != 0) goto fail; return (0); fail: device_printf(sc->sc_dev, "initvals: invalid format\n"); return (EPROTO); #undef GET_NEXTIV16 #undef GET_NEXTIV32 } static int bwn_switch_channel(struct bwn_mac *mac, int chan) { struct bwn_phy *phy = &(mac->mac_phy); struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t channelcookie, savedcookie; int error; if (chan == 0xffff) chan = phy->get_default_chan(mac); channelcookie = chan; if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) channelcookie |= 0x100; savedcookie = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_CHAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, channelcookie); error = phy->switch_channel(mac, chan); if (error) goto fail; mac->mac_phy.chan = chan; DELAY(8000); return (0); fail: device_printf(sc->sc_dev, "failed to switch channel\n"); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, savedcookie); return (error); } static uint16_t bwn_ant2phy(int antenna) { switch (antenna) { case BWN_ANT0: return (BWN_TX_PHY_ANT0); case BWN_ANT1: return (BWN_TX_PHY_ANT1); case BWN_ANT2: return (BWN_TX_PHY_ANT2); case BWN_ANT3: return (BWN_TX_PHY_ANT3); case BWN_ANTAUTO: return (BWN_TX_PHY_ANT01AUTO); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_wme_load(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) bwn_wme_loadparams(mac, &(sc->sc_wmeParams[i]), bwn_wme_shm_offsets[i]); bwn_mac_enable(mac); } static void bwn_wme_loadparams(struct bwn_mac *mac, const struct wmeParams *p, uint16_t shm_offset) { #define SM(_v, _f) (((_v) << _f##_S) & _f) struct bwn_softc *sc = mac->mac_sc; uint16_t params[BWN_NR_WMEPARAMS]; int slot, tmp; unsigned int i; slot = BWN_READ_2(mac, BWN_RNG) & SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); memset(¶ms, 0, sizeof(params)); DPRINTF(sc, BWN_DEBUG_WME, "wmep_txopLimit %d wmep_logcwmin %d " "wmep_logcwmax %d wmep_aifsn %d\n", p->wmep_txopLimit, p->wmep_logcwmin, p->wmep_logcwmax, p->wmep_aifsn); params[BWN_WMEPARAM_TXOP] = p->wmep_txopLimit * 32; params[BWN_WMEPARAM_CWMIN] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_CWMAX] = SM(p->wmep_logcwmax, WME_PARAM_LOGCWMAX); params[BWN_WMEPARAM_CWCUR] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_AIFS] = p->wmep_aifsn; params[BWN_WMEPARAM_BSLOTS] = slot; params[BWN_WMEPARAM_REGGAP] = slot + p->wmep_aifsn; for (i = 0; i < N(params); i++) { if (i == BWN_WMEPARAM_STATUS) { tmp = bwn_shm_read_2(mac, BWN_SHARED, shm_offset + (i * 2)); tmp |= 0x100; bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), tmp); } else { bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), params[i]); } } } static void bwn_mac_write_bssid(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; int i; uint8_t mac_bssid[IEEE80211_ADDR_LEN * 2]; bwn_mac_setfilter(mac, BWN_MACFILTER_BSSID, sc->sc_bssid); memcpy(mac_bssid, sc->sc_ic.ic_macaddr, IEEE80211_ADDR_LEN); memcpy(mac_bssid + IEEE80211_ADDR_LEN, sc->sc_bssid, IEEE80211_ADDR_LEN); for (i = 0; i < N(mac_bssid); i += sizeof(uint32_t)) { tmp = (uint32_t) (mac_bssid[i + 0]); tmp |= (uint32_t) (mac_bssid[i + 1]) << 8; tmp |= (uint32_t) (mac_bssid[i + 2]) << 16; tmp |= (uint32_t) (mac_bssid[i + 3]) << 24; bwn_ram_write(mac, 0x20 + i, tmp); } } static void bwn_mac_setfilter(struct bwn_mac *mac, uint16_t offset, const uint8_t *macaddr) { static const uint8_t zero[IEEE80211_ADDR_LEN] = { 0 }; uint16_t data; if (!mac) macaddr = zero; offset |= 0x0020; BWN_WRITE_2(mac, BWN_MACFILTER_CONTROL, offset); data = macaddr[0]; data |= macaddr[1] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[2]; data |= macaddr[3] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[4]; data |= macaddr[5] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); } static void bwn_key_dowrite(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key, size_t key_len, const uint8_t *mac_addr) { uint8_t buf[BWN_SEC_KEYSIZE] = { 0, }; uint8_t per_sta_keys_start = 8; if (BWN_SEC_NEWAPI(mac)) per_sta_keys_start = 4; KASSERT(index < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); KASSERT(key_len <= BWN_SEC_KEYSIZE, ("%s:%d: fail", __func__, __LINE__)); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, NULL); if (key) memcpy(buf, key, key_len); bwn_key_write(mac, index, algorithm, buf); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, mac_addr); mac->mac_key[index].algorithm = algorithm; } static void bwn_key_macwrite(struct bwn_mac *mac, uint8_t index, const uint8_t *addr) { struct bwn_softc *sc = mac->mac_sc; uint32_t addrtmp[2] = { 0, 0 }; uint8_t start = 8; if (BWN_SEC_NEWAPI(mac)) start = 4; KASSERT(index >= start, ("%s:%d: fail", __func__, __LINE__)); index -= start; if (addr) { addrtmp[0] = addr[0]; addrtmp[0] |= ((uint32_t) (addr[1]) << 8); addrtmp[0] |= ((uint32_t) (addr[2]) << 16); addrtmp[0] |= ((uint32_t) (addr[3]) << 24); addrtmp[1] = addr[4]; addrtmp[1] |= ((uint32_t) (addr[5]) << 8); } if (siba_get_revid(sc->sc_dev) >= 5) { bwn_shm_write_4(mac, BWN_RCMTA, (index * 2) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_RCMTA, (index * 2) + 1, addrtmp[1]); } else { if (index >= 8) { bwn_shm_write_4(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 4, addrtmp[1]); } } } static void bwn_key_write(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key) { unsigned int i; uint32_t offset; uint16_t kidx, value; kidx = BWN_SEC_KEY2FW(mac, index); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_KEYIDX_BLOCK + (kidx * 2), (kidx << 4) | algorithm); offset = mac->mac_ktp + (index * BWN_SEC_KEYSIZE); for (i = 0; i < BWN_SEC_KEYSIZE; i += 2) { value = key[i]; value |= (uint16_t)(key[i + 1]) << 8; bwn_shm_write_2(mac, BWN_SHARED, offset + i, value); } } static void bwn_phy_exit(struct bwn_mac *mac) { mac->mac_phy.rf_onoff(mac, 0); if (mac->mac_phy.exit != NULL) mac->mac_phy.exit(mac); } static void bwn_dma_free(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringfree(&dma->rx); bwn_dma_ringfree(&dma->wme[WME_AC_BK]); bwn_dma_ringfree(&dma->wme[WME_AC_BE]); bwn_dma_ringfree(&dma->wme[WME_AC_VI]); bwn_dma_ringfree(&dma->wme[WME_AC_VO]); bwn_dma_ringfree(&dma->mcast); } static void bwn_core_stop(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return; callout_stop(&sc->sc_rfswitch_ch); callout_stop(&sc->sc_task_ch); callout_stop(&sc->sc_watchdog_ch); sc->sc_watchdog_timer = 0; BWN_WRITE_4(mac, BWN_INTR_MASK, 0); BWN_READ_4(mac, BWN_INTR_MASK); bwn_mac_suspend(mac); mac->mac_status = BWN_MAC_STATUS_INITED; } static int bwn_switch_band(struct bwn_softc *sc, struct ieee80211_channel *chan) { struct bwn_mac *up_dev = NULL; struct bwn_mac *down_dev; struct bwn_mac *mac; int err, status; uint8_t gmode; BWN_ASSERT_LOCKED(sc); TAILQ_FOREACH(mac, &sc->sc_maclist, mac_list) { if (IEEE80211_IS_CHAN_2GHZ(chan) && mac->mac_phy.supports_2ghz) { up_dev = mac; gmode = 1; } else if (IEEE80211_IS_CHAN_5GHZ(chan) && mac->mac_phy.supports_5ghz) { up_dev = mac; gmode = 0; } else { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (EINVAL); } if (up_dev != NULL) break; } if (up_dev == NULL) { device_printf(sc->sc_dev, "Could not find a device\n"); return (ENODEV); } if (up_dev == sc->sc_curmac && sc->sc_curmac->mac_phy.gmode == gmode) return (0); device_printf(sc->sc_dev, "switching to %s-GHz band\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); down_dev = sc->sc_curmac; status = down_dev->mac_status; if (status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(down_dev); if (status >= BWN_MAC_STATUS_INITED) bwn_core_exit(down_dev); if (down_dev != up_dev) bwn_phy_reset(down_dev); up_dev->mac_phy.gmode = gmode; if (status >= BWN_MAC_STATUS_INITED) { err = bwn_core_init(up_dev); if (err) { device_printf(sc->sc_dev, "fatal: failed to initialize for %s-GHz\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); goto fail; } } if (status >= BWN_MAC_STATUS_STARTED) bwn_core_start(up_dev); KASSERT(up_dev->mac_status == status, ("%s: fail", __func__)); sc->sc_curmac = up_dev; return (0); fail: sc->sc_curmac = NULL; return (err); } static void bwn_rf_turnon(struct bwn_mac *mac) { bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 1); mac->mac_phy.rf_on = 1; bwn_mac_enable(mac); } static void bwn_rf_turnoff(struct bwn_mac *mac) { bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 0); mac->mac_phy.rf_on = 0; bwn_mac_enable(mac); } static void bwn_phy_reset(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; siba_write_4(sc->sc_dev, SIBA_TGSLOW, ((siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~BWN_TGSLOW_SUPPORT_G) | BWN_TGSLOW_PHYRESET) | SIBA_TGSLOW_FGC); DELAY(1000); siba_write_4(sc->sc_dev, SIBA_TGSLOW, (siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~SIBA_TGSLOW_FGC) | BWN_TGSLOW_PHYRESET); DELAY(1000); } static int bwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct bwn_vap *bvp = BWN_VAP(vap); struct ieee80211com *ic= vap->iv_ic; enum ieee80211_state ostate = vap->iv_state; struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; int error; DPRINTF(sc, BWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); error = bvp->bv_newstate(vap, nstate, arg); if (error != 0) return (error); BWN_LOCK(sc); bwn_led_newstate(mac, nstate); /* * Clear the BSSID when we stop a STA */ if (vap->iv_opmode == IEEE80211_M_STA) { if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { /* * Clear out the BSSID. If we reassociate to * the same AP, this will reinialize things * correctly... */ if (ic->ic_opmode == IEEE80211_M_STA && (sc->sc_flags & BWN_FLAG_INVALID) == 0) { memset(sc->sc_bssid, 0, IEEE80211_ADDR_LEN); bwn_set_macaddr(mac); } } } if (vap->iv_opmode == IEEE80211_M_MONITOR || vap->iv_opmode == IEEE80211_M_AHDEMO) { /* XXX nothing to do? */ } else if (nstate == IEEE80211_S_RUN) { memcpy(sc->sc_bssid, vap->iv_bss->ni_bssid, IEEE80211_ADDR_LEN); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); } BWN_UNLOCK(sc); return (error); } static void bwn_set_pretbtt(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t pretbtt; if (ic->ic_opmode == IEEE80211_M_IBSS) pretbtt = 2; else pretbtt = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 120 : 250; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PRETBTT, pretbtt); BWN_WRITE_2(mac, BWN_TSF_CFP_PRETBTT, pretbtt); } static int bwn_intr(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t reason; if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) return (FILTER_STRAY); reason = BWN_READ_4(mac, BWN_INTR_REASON); if (reason == 0xffffffff) /* shared IRQ */ return (FILTER_STRAY); reason &= mac->mac_intr_mask; if (reason == 0) return (FILTER_HANDLED); mac->mac_reason[0] = BWN_READ_4(mac, BWN_DMA0_REASON) & 0x0001dc00; mac->mac_reason[1] = BWN_READ_4(mac, BWN_DMA1_REASON) & 0x0000dc00; mac->mac_reason[2] = BWN_READ_4(mac, BWN_DMA2_REASON) & 0x0000dc00; mac->mac_reason[3] = BWN_READ_4(mac, BWN_DMA3_REASON) & 0x0001dc00; mac->mac_reason[4] = BWN_READ_4(mac, BWN_DMA4_REASON) & 0x0000dc00; BWN_WRITE_4(mac, BWN_INTR_REASON, reason); BWN_WRITE_4(mac, BWN_DMA0_REASON, mac->mac_reason[0]); BWN_WRITE_4(mac, BWN_DMA1_REASON, mac->mac_reason[1]); BWN_WRITE_4(mac, BWN_DMA2_REASON, mac->mac_reason[2]); BWN_WRITE_4(mac, BWN_DMA3_REASON, mac->mac_reason[3]); BWN_WRITE_4(mac, BWN_DMA4_REASON, mac->mac_reason[4]); /* Disable interrupts. */ BWN_WRITE_4(mac, BWN_INTR_MASK, 0); mac->mac_reason_intr = reason; BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); - taskqueue_enqueue_fast(sc->sc_tq, &mac->mac_intrtask); + taskqueue_enqueue(sc->sc_tq, &mac->mac_intrtask); return (FILTER_HANDLED); } static void bwn_intrtask(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t merged = 0; int i, tx = 0, rx = 0; BWN_LOCK(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) { BWN_UNLOCK(sc); return; } for (i = 0; i < N(mac->mac_reason); i++) merged |= mac->mac_reason[i]; if (mac->mac_reason_intr & BWN_INTR_MAC_TXERR) device_printf(sc->sc_dev, "MAC trans error\n"); if (mac->mac_reason_intr & BWN_INTR_PHY_TXERR) { DPRINTF(sc, BWN_DEBUG_INTR, "%s: PHY trans error\n", __func__); mac->mac_phy.txerrors--; if (mac->mac_phy.txerrors == 0) { mac->mac_phy.txerrors = BWN_TXERROR_MAX; bwn_restart(mac, "PHY TX errors"); } } if (merged & (BWN_DMAINTR_FATALMASK | BWN_DMAINTR_NONFATALMASK)) { if (merged & BWN_DMAINTR_FATALMASK) { device_printf(sc->sc_dev, "Fatal DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); bwn_restart(mac, "DMA error"); BWN_UNLOCK(sc); return; } if (merged & BWN_DMAINTR_NONFATALMASK) { device_printf(sc->sc_dev, "DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); } } if (mac->mac_reason_intr & BWN_INTR_UCODE_DEBUG) bwn_intr_ucode_debug(mac); if (mac->mac_reason_intr & BWN_INTR_TBTT_INDI) bwn_intr_tbtt_indication(mac); if (mac->mac_reason_intr & BWN_INTR_ATIM_END) bwn_intr_atim_end(mac); if (mac->mac_reason_intr & BWN_INTR_BEACON) bwn_intr_beacon(mac); if (mac->mac_reason_intr & BWN_INTR_PMQ) bwn_intr_pmq(mac); if (mac->mac_reason_intr & BWN_INTR_NOISESAMPLE_OK) bwn_intr_noise(mac); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { if (mac->mac_reason[0] & BWN_DMAINTR_RX_DONE) { bwn_dma_rx(mac->mac_method.dma.rx); rx = 1; } } else rx = bwn_pio_rx(&mac->mac_method.pio.rx); KASSERT(!(mac->mac_reason[1] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[2] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[3] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[4] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[5] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); if (mac->mac_reason_intr & BWN_INTR_TX_OK) { bwn_intr_txeof(mac); tx = 1; } BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); if (sc->sc_blink_led != NULL && sc->sc_led_blink) { int evt = BWN_LED_EVENT_NONE; if (tx && rx) { if (sc->sc_rx_rate > sc->sc_tx_rate) evt = BWN_LED_EVENT_RX; else evt = BWN_LED_EVENT_TX; } else if (tx) { evt = BWN_LED_EVENT_TX; } else if (rx) { evt = BWN_LED_EVENT_RX; } else if (rx == 0) { evt = BWN_LED_EVENT_POLL; } if (evt != BWN_LED_EVENT_NONE) bwn_led_event(mac, evt); } if (mbufq_first(&sc->sc_snd) != NULL) bwn_start(sc); BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); BWN_UNLOCK(sc); } static void bwn_restart(struct bwn_mac *mac, const char *msg) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (mac->mac_status < BWN_MAC_STATUS_INITED) return; device_printf(sc->sc_dev, "HW reset: %s\n", msg); ieee80211_runtask(ic, &mac->mac_hwreset); } static void bwn_intr_ucode_debug(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; if (mac->mac_fw.opensource == 0) return; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG); switch (reason) { case BWN_DEBUGINTR_PANIC: bwn_handle_fwpanic(mac); break; case BWN_DEBUGINTR_DUMP_SHM: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_SHM\n"); break; case BWN_DEBUGINTR_DUMP_REGS: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_REGS\n"); break; case BWN_DEBUGINTR_MARKER: device_printf(sc->sc_dev, "BWN_DEBUGINTR_MARKER\n"); break; default: device_printf(sc->sc_dev, "ucode debug unknown reason: %#x\n", reason); } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG, BWN_DEBUGINTR_ACK); } static void bwn_intr_tbtt_indication(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, 0); if (ic->ic_opmode == IEEE80211_M_IBSS) mac->mac_flags |= BWN_MAC_FLAG_DFQVALID; } static void bwn_intr_atim_end(struct bwn_mac *mac) { if (mac->mac_flags & BWN_MAC_FLAG_DFQVALID) { BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_DFQ_VALID); mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; } } static void bwn_intr_beacon(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t cmd, beacon0, beacon1; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) return; mac->mac_intr_mask &= ~BWN_INTR_BEACON; cmd = BWN_READ_4(mac, BWN_MACCMD); beacon0 = (cmd & BWN_MACCMD_BEACON0_VALID); beacon1 = (cmd & BWN_MACCMD_BEACON1_VALID); if (beacon0 && beacon1) { BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_BEACON); mac->mac_intr_mask |= BWN_INTR_BEACON; return; } if (sc->sc_flags & BWN_FLAG_NEED_BEACON_TP) { sc->sc_flags &= ~BWN_FLAG_NEED_BEACON_TP; bwn_load_beacon0(mac); bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else { if (!beacon0) { bwn_load_beacon0(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else if (!beacon1) { bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON1_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } } } static void bwn_intr_pmq(struct bwn_mac *mac) { uint32_t tmp; while (1) { tmp = BWN_READ_4(mac, BWN_PS_STATUS); if (!(tmp & 0x00000008)) break; } BWN_WRITE_2(mac, BWN_PS_STATUS, 0x0002); } static void bwn_intr_noise(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t tmp; uint8_t noise[4]; uint8_t i, j; int32_t average; if (mac->mac_phy.type != BWN_PHYTYPE_G) return; KASSERT(mac->mac_noise.noi_running, ("%s: fail", __func__)); *((uint32_t *)noise) = htole32(bwn_jssi_read(mac)); if (noise[0] == 0x7f || noise[1] == 0x7f || noise[2] == 0x7f || noise[3] == 0x7f) goto new; KASSERT(mac->mac_noise.noi_nsamples < 8, ("%s:%d: fail", __func__, __LINE__)); i = mac->mac_noise.noi_nsamples; noise[0] = MIN(MAX(noise[0], 0), N(pg->pg_nrssi_lt) - 1); noise[1] = MIN(MAX(noise[1], 0), N(pg->pg_nrssi_lt) - 1); noise[2] = MIN(MAX(noise[2], 0), N(pg->pg_nrssi_lt) - 1); noise[3] = MIN(MAX(noise[3], 0), N(pg->pg_nrssi_lt) - 1); mac->mac_noise.noi_samples[i][0] = pg->pg_nrssi_lt[noise[0]]; mac->mac_noise.noi_samples[i][1] = pg->pg_nrssi_lt[noise[1]]; mac->mac_noise.noi_samples[i][2] = pg->pg_nrssi_lt[noise[2]]; mac->mac_noise.noi_samples[i][3] = pg->pg_nrssi_lt[noise[3]]; mac->mac_noise.noi_nsamples++; if (mac->mac_noise.noi_nsamples == 8) { average = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 4; j++) average += mac->mac_noise.noi_samples[i][j]; } average = (((average / 32) * 125) + 64) / 128; tmp = (bwn_shm_read_2(mac, BWN_SHARED, 0x40c) / 128) & 0x1f; if (tmp >= 8) average += 2; else average -= 25; average -= (tmp == 8) ? 72 : 48; mac->mac_stats.link_noise = average; mac->mac_noise.noi_running = 0; return; } new: bwn_noise_gensample(mac); } static int bwn_pio_rx(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; unsigned int i; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return (0); for (i = 0; i < 5000; i++) { if (bwn_pio_rxeof(prq) == 0) break; } if (i >= 5000) device_printf(sc->sc_dev, "too many RX frames in PIO mode\n"); return ((i > 0) ? 1 : 0); } static void bwn_dma_rx(struct bwn_dma_ring *dr) { int slot, curslot; KASSERT(!dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); curslot = dr->get_curslot(dr); KASSERT(curslot >= 0 && curslot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); slot = dr->dr_curslot; for (; slot != curslot; slot = bwn_dma_nextslot(dr, slot)) bwn_dma_rxeof(dr, &slot); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->set_curslot(dr, slot); dr->dr_curslot = slot; } static void bwn_intr_txeof(struct bwn_mac *mac) { struct bwn_txstatus stat; uint32_t stat0, stat1; uint16_t tmp; BWN_ASSERT_LOCKED(mac->mac_sc); while (1) { stat0 = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(stat0 & 0x00000001)) break; stat1 = BWN_READ_4(mac, BWN_XMITSTAT_1); stat.cookie = (stat0 >> 16); stat.seq = (stat1 & 0x0000ffff); stat.phy_stat = ((stat1 & 0x00ff0000) >> 16); tmp = (stat0 & 0x0000ffff); stat.framecnt = ((tmp & 0xf000) >> 12); stat.rtscnt = ((tmp & 0x0f00) >> 8); stat.sreason = ((tmp & 0x001c) >> 2); stat.pm = (tmp & 0x0080) ? 1 : 0; stat.im = (tmp & 0x0040) ? 1 : 0; stat.ampdu = (tmp & 0x0020) ? 1 : 0; stat.ack = (tmp & 0x0002) ? 1 : 0; bwn_handle_txeof(mac, &stat); } } static void bwn_hwreset(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; int error = 0; int prev_status; BWN_LOCK(sc); prev_status = mac->mac_status; if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); if (prev_status >= BWN_MAC_STATUS_INITED) bwn_core_exit(mac); if (prev_status >= BWN_MAC_STATUS_INITED) { error = bwn_core_init(mac); if (error) goto out; } if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_start(mac); out: if (error) { device_printf(sc->sc_dev, "%s: failed (%d)\n", __func__, error); sc->sc_curmac = NULL; } BWN_UNLOCK(sc); } static void bwn_handle_fwpanic(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_FWPANIC_REASON_REG); device_printf(sc->sc_dev,"fw panic (%u)\n", reason); if (reason == BWN_FWPANIC_RESTART) bwn_restart(mac, "ucode panic"); } static void bwn_load_beacon0(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static void bwn_load_beacon1(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static uint32_t bwn_jssi_read(struct bwn_mac *mac) { uint32_t val = 0; val = bwn_shm_read_2(mac, BWN_SHARED, 0x08a); val <<= 16; val |= bwn_shm_read_2(mac, BWN_SHARED, 0x088); return (val); } static void bwn_noise_gensample(struct bwn_mac *mac) { uint32_t jssi = 0x7f7f7f7f; bwn_shm_write_2(mac, BWN_SHARED, 0x088, (jssi & 0x0000ffff)); bwn_shm_write_2(mac, BWN_SHARED, 0x08a, (jssi & 0xffff0000) >> 16); BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_BGNOISE); } static int bwn_dma_freeslot(struct bwn_dma_ring *dr) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); return (dr->dr_numslots - dr->dr_usedslot); } static int bwn_dma_nextslot(struct bwn_dma_ring *dr, int slot) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(slot >= -1 && slot <= dr->dr_numslots - 1, ("%s:%d: fail", __func__, __LINE__)); if (slot == dr->dr_numslots - 1) return (0); return (slot + 1); } static void bwn_dma_rxeof(struct bwn_dma_ring *dr, int *slot) { struct bwn_mac *mac = dr->dr_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_rxhdr4 *rxhdr; struct mbuf *m; uint32_t macstat; int32_t tmp; int cnt = 0; uint16_t len; dr->getdesc(dr, *slot, &desc, &meta); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_POSTREAD); m = meta->mt_m; if (bwn_dma_newbuf(dr, desc, meta, 0)) { counter_u64_add(sc->sc_ic.ic_ierrors, 1); return; } rxhdr = mtod(m, struct bwn_rxhdr4 *); len = le16toh(rxhdr->frame_len); if (len <= 0) { counter_u64_add(sc->sc_ic.ic_ierrors, 1); return; } if (bwn_dma_check_redzone(dr, m)) { device_printf(sc->sc_dev, "redzone error.\n"); bwn_dma_set_redzone(dr, m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); return; } if (len > dr->dr_rx_bufsize) { tmp = len; while (1) { dr->getdesc(dr, *slot, &desc, &meta); bwn_dma_set_redzone(dr, meta->mt_m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); *slot = bwn_dma_nextslot(dr, *slot); cnt++; tmp -= dr->dr_rx_bufsize; if (tmp <= 0) break; } device_printf(sc->sc_dev, "too small buffer " "(len %u buffer %u dropped %d)\n", len, dr->dr_rx_bufsize, cnt); return; } macstat = le32toh(rxhdr->mac_status); if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "RX drop\n"); return; } } m->m_len = m->m_pkthdr.len = len + dr->dr_frameoffset; m_adj(m, dr->dr_frameoffset); bwn_rxeof(dr->dr_mac, m, rxhdr); } static void bwn_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_pio_txqueue *tq; struct bwn_pio_txpkt *tp = NULL; struct bwn_softc *sc = mac->mac_sc; struct bwn_stats *stats = &mac->mac_stats; struct ieee80211_node *ni; struct ieee80211vap *vap; int retrycnt = 0, slot; BWN_ASSERT_LOCKED(mac->mac_sc); if (status->im) device_printf(sc->sc_dev, "TODO: STATUS IM\n"); if (status->ampdu) device_printf(sc->sc_dev, "TODO: STATUS AMPDU\n"); if (status->rtscnt) { if (status->rtscnt == 0xf) stats->rtsfail++; else stats->rts++; } if (mac->mac_flags & BWN_MAC_FLAG_DMA) { if (status->ack) { dr = bwn_dma_parse_cookie(mac, status, status->cookie, &slot); if (dr == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } while (1) { dr->getdesc(dr, slot, &desc, &meta); if (meta->mt_islast) { ni = meta->mt_ni; vap = ni->ni_vap; ieee80211_ratectl_tx_complete(vap, ni, status->ack ? IEEE80211_RATECTL_TX_SUCCESS : IEEE80211_RATECTL_TX_FAILURE, &retrycnt, 0); break; } slot = bwn_dma_nextslot(dr, slot); } } bwn_dma_handle_txeof(mac, status); } else { if (status->ack) { tq = bwn_pio_parse_cookie(mac, status->cookie, &tp); if (tq == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } ni = tp->tp_ni; vap = ni->ni_vap; ieee80211_ratectl_tx_complete(vap, ni, status->ack ? IEEE80211_RATECTL_TX_SUCCESS : IEEE80211_RATECTL_TX_FAILURE, &retrycnt, 0); } bwn_pio_handle_txeof(mac, status); } bwn_phy_txpower_check(mac, 0); } static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_rxhdr4 rxhdr; struct mbuf *m; uint32_t ctl32, macstat, v32; unsigned int i, padding; uint16_t ctl16, len, totlen, v16; unsigned char *mp; char *data; memset(&rxhdr, 0, sizeof(rxhdr)); if (prq->prq_rev >= 8) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (!(ctl32 & BWN_PIO8_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (ctl32 & BWN_PIO8_RXCTL_DATAREADY) goto ready; DELAY(10); } } else { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (!(ctl16 & BWN_PIO_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (ctl16 & BWN_PIO_RXCTL_DATAREADY) goto ready; DELAY(10); } } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (1); ready: if (prq->prq_rev >= 8) siba_read_multi_4(sc->sc_dev, &rxhdr, sizeof(rxhdr), prq->prq_base + BWN_PIO8_RXDATA); else siba_read_multi_2(sc->sc_dev, &rxhdr, sizeof(rxhdr), prq->prq_base + BWN_PIO_RXDATA); len = le16toh(rxhdr.frame_len); if (len > 0x700) { device_printf(sc->sc_dev, "%s: len is too big\n", __func__); goto error; } if (len == 0) { device_printf(sc->sc_dev, "%s: len is 0\n", __func__); goto error; } macstat = le32toh(rxhdr.mac_status); if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "%s: FCS error", __func__); goto error; } } padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; totlen = len + padding; KASSERT(totlen <= MCLBYTES, ("too big..\n")); m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "%s: out of memory", __func__); goto error; } mp = mtod(m, unsigned char *); if (prq->prq_rev >= 8) { siba_read_multi_4(sc->sc_dev, mp, (totlen & ~3), prq->prq_base + BWN_PIO8_RXDATA); if (totlen & 3) { v32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXDATA); data = &(mp[totlen - 1]); switch (totlen & 3) { case 3: *data = (v32 >> 16); data--; case 2: *data = (v32 >> 8); data--; case 1: *data = v32; } } } else { siba_read_multi_2(sc->sc_dev, mp, (totlen & ~1), prq->prq_base + BWN_PIO_RXDATA); if (totlen & 1) { v16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXDATA); mp[totlen - 1] = v16; } } m->m_len = m->m_pkthdr.len = totlen; bwn_rxeof(prq->prq_mac, m, &rxhdr); return (1); error: if (prq->prq_rev >= 8) bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_DATAREADY); else bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_DATAREADY); return (1); } static int bwn_dma_newbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, struct bwn_dmadesc_meta *meta, int init) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_rxhdr4 *hdr; bus_dmamap_t map; bus_addr_t paddr; struct mbuf *m; int error; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { error = ENOBUFS; /* * If the NIC is up and running, we need to: * - Clear RX buffer's header. * - Restore RX descriptor settings. */ if (init) return (error); else goto back; } m->m_len = m->m_pkthdr.len = MCLBYTES; bwn_dma_set_redzone(dr, m); /* * Try to load RX buf into temporary DMA map */ error = bus_dmamap_load_mbuf(dma->rxbuf_dtag, dr->dr_spare_dmap, m, bwn_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); if (error) { m_freem(m); /* * See the comment above */ if (init) return (error); else goto back; } if (!init) bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); meta->mt_m = m; meta->mt_paddr = paddr; /* * Swap RX buf's DMA map with the loaded temporary one */ map = meta->mt_dmap; meta->mt_dmap = dr->dr_spare_dmap; dr->dr_spare_dmap = map; back: /* * Clear RX buf header */ hdr = mtod(meta->mt_m, struct bwn_rxhdr4 *); bzero(hdr, sizeof(*hdr)); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); /* * Setup RX buf descriptor */ dr->setdesc(dr, desc, meta->mt_paddr, meta->mt_m->m_len - sizeof(*hdr), 0, 0, 0); return (error); } static void bwn_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg, bus_size_t mapsz __unused, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } static int bwn_hwrate2ieeerate(int rate) { switch (rate) { case BWN_CCK_RATE_1MB: return (2); case BWN_CCK_RATE_2MB: return (4); case BWN_CCK_RATE_5MB: return (11); case BWN_CCK_RATE_11MB: return (22); case BWN_OFDM_RATE_6MB: return (12); case BWN_OFDM_RATE_9MB: return (18); case BWN_OFDM_RATE_12MB: return (24); case BWN_OFDM_RATE_18MB: return (36); case BWN_OFDM_RATE_24MB: return (48); case BWN_OFDM_RATE_36MB: return (72); case BWN_OFDM_RATE_48MB: return (96); case BWN_OFDM_RATE_54MB: return (108); default: printf("Ooops\n"); return (0); } } static void bwn_rxeof(struct bwn_mac *mac, struct mbuf *m, const void *_rxhdr) { const struct bwn_rxhdr4 *rxhdr = _rxhdr; struct bwn_plcp6 *plcp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame_min *wh; struct ieee80211_node *ni; struct ieee80211com *ic = &sc->sc_ic; uint32_t macstat; int padding, rate, rssi = 0, noise = 0, type; uint16_t phytype, phystat0, phystat3, chanstat; unsigned char *mp = mtod(m, unsigned char *); static int rx_mac_dec_rpt = 0; BWN_ASSERT_LOCKED(sc); phystat0 = le16toh(rxhdr->phy_status0); phystat3 = le16toh(rxhdr->phy_status3); macstat = le32toh(rxhdr->mac_status); chanstat = le16toh(rxhdr->channel); phytype = chanstat & BWN_RX_CHAN_PHYTYPE; if (macstat & BWN_RX_MAC_FCSERR) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_FCS_CRC\n"); if (phystat0 & (BWN_RX_PHYST0_PLCPHCF | BWN_RX_PHYST0_PLCPFV)) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_PLCP_CRC\n"); if (macstat & BWN_RX_MAC_DECERR) goto drop; padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; if (m->m_pkthdr.len < (sizeof(struct bwn_plcp6) + padding)) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } plcp = (struct bwn_plcp6 *)(mp + padding); m_adj(m, sizeof(struct bwn_plcp6) + padding); if (m->m_pkthdr.len < IEEE80211_MIN_LEN) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } wh = mtod(m, struct ieee80211_frame_min *); if (macstat & BWN_RX_MAC_DEC && rx_mac_dec_rpt++ < 50) device_printf(sc->sc_dev, "RX decryption attempted (old %d keyidx %#x)\n", BWN_ISOLDFMT(mac), (macstat & BWN_RX_MAC_KEYIDX) >> BWN_RX_MAC_KEYIDX_SHIFT); /* XXX calculating RSSI & noise & antenna */ if (phystat0 & BWN_RX_PHYST0_OFDM) rate = bwn_plcp_get_ofdmrate(mac, plcp, phytype == BWN_PHYTYPE_A); else rate = bwn_plcp_get_cckrate(mac, plcp); if (rate == -1) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADPLCP)) goto drop; } sc->sc_rx_rate = bwn_hwrate2ieeerate(rate); /* RX radio tap */ if (ieee80211_radiotap_active(ic)) bwn_rx_radiotap(mac, m, rxhdr, plcp, rate, rssi, noise); m_adj(m, -IEEE80211_CRC_LEN); rssi = rxhdr->phy.abg.rssi; /* XXX incorrect RSSI calculation? */ noise = mac->mac_stats.link_noise; BWN_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, wh); if (ni != NULL) { type = ieee80211_input(ni, m, rssi, noise); ieee80211_free_node(ni); } else type = ieee80211_input_all(ic, m, rssi, noise); BWN_LOCK(sc); return; drop: device_printf(sc->sc_dev, "%s: dropped\n", __func__); } static void bwn_dma_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_softc *sc = mac->mac_sc; int slot; BWN_ASSERT_LOCKED(sc); dr = bwn_dma_parse_cookie(mac, status, status->cookie, &slot); if (dr == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); while (1) { KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); dr->getdesc(dr, slot, &desc, &meta); if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); if (meta->mt_islast) { KASSERT(meta->mt_m != NULL, ("%s:%d: fail", __func__, __LINE__)); ieee80211_tx_complete(meta->mt_ni, meta->mt_m, 0); meta->mt_ni = NULL; meta->mt_m = NULL; } else KASSERT(meta->mt_m == NULL, ("%s:%d: fail", __func__, __LINE__)); dr->dr_usedslot--; if (meta->mt_islast) break; slot = bwn_dma_nextslot(dr, slot); } sc->sc_watchdog_timer = 0; if (dr->dr_stop) { KASSERT(bwn_dma_freeslot(dr) >= BWN_TX_SLOTS_PER_FRAME, ("%s:%d: fail", __func__, __LINE__)); dr->dr_stop = 0; } } static void bwn_pio_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_pio_txqueue *tq; struct bwn_pio_txpkt *tp = NULL; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); tq = bwn_pio_parse_cookie(mac, status->cookie, &tp); if (tq == NULL) return; tq->tq_used -= roundup(tp->tp_m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free++; if (tp->tp_ni != NULL) { /* * Do any tx complete callback. Note this must * be done before releasing the node reference. */ if (tp->tp_m->m_flags & M_TXCB) ieee80211_process_callback(tp->tp_ni, tp->tp_m, 0); ieee80211_free_node(tp->tp_ni); tp->tp_ni = NULL; } m_freem(tp->tp_m); tp->tp_m = NULL; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); sc->sc_watchdog_timer = 0; } static void bwn_phy_txpower_check(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; struct ieee80211com *ic = &sc->sc_ic; unsigned long now; int result; BWN_GETTIME(now); if (!(flags & BWN_TXPWR_IGNORE_TIME) && time_before(now, phy->nexttime)) return; phy->nexttime = now + 2 * 1000; if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306) return; if (phy->recalc_txpwr != NULL) { result = phy->recalc_txpwr(mac, (flags & BWN_TXPWR_IGNORE_TSSI) ? 1 : 0); if (result == BWN_TXPWR_RES_DONE) return; KASSERT(result == BWN_TXPWR_RES_NEED_ADJUST, ("%s: fail", __func__)); KASSERT(phy->set_txpwr != NULL, ("%s: fail", __func__)); ieee80211_runtask(ic, &mac->mac_txpower); } } static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_2(prq->prq_mac, prq->prq_base + offset)); } static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_4(prq->prq_mac, prq->prq_base + offset)); } static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *prq, uint16_t offset, uint16_t value) { BWN_WRITE_2(prq->prq_mac, prq->prq_base + offset, value); } static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *prq, uint16_t offset, uint32_t value) { BWN_WRITE_4(prq->prq_mac, prq->prq_base + offset, value); } static int bwn_ieeerate2hwrate(struct bwn_softc *sc, int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return (BWN_OFDM_RATE_6MB); case 18: return (BWN_OFDM_RATE_9MB); case 24: return (BWN_OFDM_RATE_12MB); case 36: return (BWN_OFDM_RATE_18MB); case 48: return (BWN_OFDM_RATE_24MB); case 72: return (BWN_OFDM_RATE_36MB); case 96: return (BWN_OFDM_RATE_48MB); case 108: return (BWN_OFDM_RATE_54MB); /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return (BWN_CCK_RATE_1MB); case 4: return (BWN_CCK_RATE_2MB); case 11: return (BWN_CCK_RATE_5MB); case 22: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "unsupported rate %d\n", rate); return (BWN_CCK_RATE_1MB); } static int bwn_set_txhdr(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m, struct bwn_txhdr *txhdr, uint16_t cookie) { const struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame *wh; struct ieee80211_frame *protwh; struct ieee80211_frame_cts *cts; struct ieee80211_frame_rts *rts; const struct ieee80211_txparam *tp; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct mbuf *mprot; unsigned int len; uint32_t macctl = 0; int protdur, rts_rate, rts_rate_fb, ismcast, isshort, rix, type; uint16_t phyctl = 0; uint8_t rate, rate_fb; wh = mtod(m, struct ieee80211_frame *); memset(txhdr, 0, sizeof(*txhdr)); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; /* * Find TX rate */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL)) rate = rate_fb = tp->mgmtrate; else if (ismcast) rate = rate_fb = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = rate_fb = tp->ucastrate; else { rix = ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; if (rix > 0) rate_fb = ni->ni_rates.rs_rates[rix - 1] & IEEE80211_RATE_VAL; else rate_fb = rate; } sc->sc_tx_rate = rate; rate = bwn_ieeerate2hwrate(sc, rate); rate_fb = bwn_ieeerate2hwrate(sc, rate_fb); txhdr->phyrate = (BWN_ISOFDMRATE(rate)) ? bwn_plcp_getofdm(rate) : bwn_plcp_getcck(rate); bcopy(wh->i_fc, txhdr->macfc, sizeof(txhdr->macfc)); bcopy(wh->i_addr1, txhdr->addr1, IEEE80211_ADDR_LEN); if ((rate_fb == rate) || (*(u_int16_t *)wh->i_dur & htole16(0x8000)) || (*(u_int16_t *)wh->i_dur == htole16(0))) txhdr->dur_fb = *(u_int16_t *)wh->i_dur; else txhdr->dur_fb = ieee80211_compute_duration(ic->ic_rt, m->m_pkthdr.len, rate, isshort); /* XXX TX encryption */ bwn_plcp_genhdr(BWN_ISOLDFMT(mac) ? (struct bwn_plcp4 *)(&txhdr->body.old.plcp) : (struct bwn_plcp4 *)(&txhdr->body.new.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->plcp_fb), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate_fb); txhdr->eftypes |= (BWN_ISOFDMRATE(rate_fb)) ? BWN_TX_EFT_FB_OFDM : BWN_TX_EFT_FB_CCK; txhdr->chan = phy->chan; phyctl |= (BWN_ISOFDMRATE(rate)) ? BWN_TX_PHY_ENC_OFDM : BWN_TX_PHY_ENC_CCK; if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) phyctl |= BWN_TX_PHY_SHORTPRMBL; /* XXX TX antenna selection */ switch (bwn_antenna_sanitize(mac, 0)) { case 0: phyctl |= BWN_TX_PHY_ANT01AUTO; break; case 1: phyctl |= BWN_TX_PHY_ANT0; break; case 2: phyctl |= BWN_TX_PHY_ANT1; break; case 3: phyctl |= BWN_TX_PHY_ANT2; break; case 4: phyctl |= BWN_TX_PHY_ANT3; break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } if (!ismcast) macctl |= BWN_TX_MAC_ACK; macctl |= (BWN_TX_MAC_HWSEQ | BWN_TX_MAC_START_MSDU); if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) macctl |= BWN_TX_MAC_LONGFRAME; if (ic->ic_flags & IEEE80211_F_USEPROT) { /* XXX RTS rate is always 1MB??? */ rts_rate = BWN_CCK_RATE_1MB; rts_rate_fb = bwn_get_fbrate(rts_rate); protdur = ieee80211_compute_duration(ic->ic_rt, m->m_pkthdr.len, rate, isshort) + + ieee80211_ack_duration(ic->ic_rt, rate, isshort); if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { cts = (struct ieee80211_frame_cts *)(BWN_ISOLDFMT(mac) ? (txhdr->body.old.rts_frame) : (txhdr->body.new.rts_frame)); mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, protdur); KASSERT(mprot != NULL, ("failed to alloc mbuf\n")); bcopy(mtod(mprot, uint8_t *), (uint8_t *)cts, mprot->m_pkthdr.len); m_freem(mprot); macctl |= BWN_TX_MAC_SEND_CTSTOSELF; len = sizeof(struct ieee80211_frame_cts); } else { rts = (struct ieee80211_frame_rts *)(BWN_ISOLDFMT(mac) ? (txhdr->body.old.rts_frame) : (txhdr->body.new.rts_frame)); protdur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, protdur); KASSERT(mprot != NULL, ("failed to alloc mbuf\n")); bcopy(mtod(mprot, uint8_t *), (uint8_t *)rts, mprot->m_pkthdr.len); m_freem(mprot); macctl |= BWN_TX_MAC_SEND_RTSCTS; len = sizeof(struct ieee80211_frame_rts); } len += IEEE80211_CRC_LEN; bwn_plcp_genhdr((struct bwn_plcp4 *)((BWN_ISOLDFMT(mac)) ? &txhdr->body.old.rts_plcp : &txhdr->body.new.rts_plcp), len, rts_rate); bwn_plcp_genhdr((struct bwn_plcp4 *)&txhdr->rts_plcp_fb, len, rts_rate_fb); protwh = (struct ieee80211_frame *)(BWN_ISOLDFMT(mac) ? (&txhdr->body.old.rts_frame) : (&txhdr->body.new.rts_frame)); txhdr->rts_dur_fb = *(u_int16_t *)protwh->i_dur; if (BWN_ISOFDMRATE(rts_rate)) { txhdr->eftypes |= BWN_TX_EFT_RTS_OFDM; txhdr->phyrate_rts = bwn_plcp_getofdm(rts_rate); } else { txhdr->eftypes |= BWN_TX_EFT_RTS_CCK; txhdr->phyrate_rts = bwn_plcp_getcck(rts_rate); } txhdr->eftypes |= (BWN_ISOFDMRATE(rts_rate_fb)) ? BWN_TX_EFT_RTS_FBOFDM : BWN_TX_EFT_RTS_FBCCK; } if (BWN_ISOLDFMT(mac)) txhdr->body.old.cookie = htole16(cookie); else txhdr->body.new.cookie = htole16(cookie); txhdr->macctl = htole32(macctl); txhdr->phyctl = htole16(phyctl); /* * TX radio tap */ if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = 0; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; sc->sc_tx_th.wt_rate = rate; ieee80211_radiotap_tx(vap, m); } return (0); } static void bwn_plcp_genhdr(struct bwn_plcp4 *plcp, const uint16_t octets, const uint8_t rate) { uint32_t d, plen; uint8_t *raw = plcp->o.raw; if (BWN_ISOFDMRATE(rate)) { d = bwn_plcp_getofdm(rate); KASSERT(!(octets & 0xf000), ("%s:%d: fail", __func__, __LINE__)); d |= (octets << 5); plcp->o.data = htole32(d); } else { plen = octets * 16 / rate; if ((octets * 16 % rate) > 0) { plen++; if ((rate == BWN_CCK_RATE_11MB) && ((octets * 8 % 11) < 4)) { raw[1] = 0x84; } else raw[1] = 0x04; } else raw[1] = 0x04; plcp->o.data |= htole32(plen << 16); raw[0] = bwn_plcp_getcck(rate); } } static uint8_t bwn_antenna_sanitize(struct bwn_mac *mac, uint8_t n) { struct bwn_softc *sc = mac->mac_sc; uint8_t mask; if (n == 0) return (0); if (mac->mac_phy.gmode) mask = siba_sprom_get_ant_bg(sc->sc_dev); else mask = siba_sprom_get_ant_a(sc->sc_dev); if (!(mask & (1 << (n - 1)))) return (0); return (n); } static uint8_t bwn_get_fbrate(uint8_t bitrate) { switch (bitrate) { case BWN_CCK_RATE_1MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_2MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_5MB: return (BWN_CCK_RATE_2MB); case BWN_CCK_RATE_11MB: return (BWN_CCK_RATE_5MB); case BWN_OFDM_RATE_6MB: return (BWN_CCK_RATE_5MB); case BWN_OFDM_RATE_9MB: return (BWN_OFDM_RATE_6MB); case BWN_OFDM_RATE_12MB: return (BWN_OFDM_RATE_9MB); case BWN_OFDM_RATE_18MB: return (BWN_OFDM_RATE_12MB); case BWN_OFDM_RATE_24MB: return (BWN_OFDM_RATE_18MB); case BWN_OFDM_RATE_36MB: return (BWN_OFDM_RATE_24MB); case BWN_OFDM_RATE_48MB: return (BWN_OFDM_RATE_36MB); case BWN_OFDM_RATE_54MB: return (BWN_OFDM_RATE_48MB); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint32_t bwn_pio_write_multi_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint32_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; uint32_t value = 0; const uint8_t *data = _data; ctl |= BWN_PIO8_TXCTL_0_7 | BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31; bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); siba_write_multi_4(sc->sc_dev, data, (len & ~3), tq->tq_base + BWN_PIO8_TXDATA); if (len & 3) { ctl &= ~(BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31); data = &(data[len - 1]); switch (len & 3) { case 3: ctl |= BWN_PIO8_TXCTL_16_23; value |= (uint32_t)(*data) << 16; data--; case 2: ctl |= BWN_PIO8_TXCTL_8_15; value |= (uint32_t)(*data) << 8; data--; case 1: value |= (uint32_t)(*data); } bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); bwn_pio_write_4(mac, tq, BWN_PIO8_TXDATA, value); } return (ctl); } static void bwn_pio_write_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset, uint32_t value) { BWN_WRITE_4(mac, tq->tq_base + offset, value); } static uint16_t bwn_pio_write_multi_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *data = _data; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); siba_write_multi_2(sc->sc_dev, data, (len & ~1), tq->tq_base + BWN_PIO_TXDATA); if (len & 1) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data[len - 1]); } return (ctl); } static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, struct mbuf *m0) { int i, j = 0; uint16_t data = 0; const uint8_t *buf; struct mbuf *m = m0; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); for (; m != NULL; m = m->m_next) { buf = mtod(m, const uint8_t *); for (i = 0; i < m->m_len; i++) { if (!((j++) % 2)) data |= buf[i]; else { data |= (buf[i] << 8); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); data = 0; } } } if (m0->m_pkthdr.len % 2) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); } return (ctl); } static void bwn_set_slot_time(struct bwn_mac *mac, uint16_t time) { if (mac->mac_phy.type != BWN_PHYTYPE_G) return; BWN_WRITE_2(mac, 0x684, 510 + time); bwn_shm_write_2(mac, BWN_SHARED, 0x0010, time); } static struct bwn_dma_ring * bwn_dma_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (mac->mac_method.dma.wme[WME_AC_BE]); switch (prio) { case 3: return (mac->mac_method.dma.wme[WME_AC_VO]); case 2: return (mac->mac_method.dma.wme[WME_AC_VI]); case 0: return (mac->mac_method.dma.wme[WME_AC_BE]); case 1: return (mac->mac_method.dma.wme[WME_AC_BK]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_getslot(struct bwn_dma_ring *dr) { int slot; BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); KASSERT(!(dr->dr_stop), ("%s:%d: fail", __func__, __LINE__)); KASSERT(bwn_dma_freeslot(dr) != 0, ("%s:%d: fail", __func__, __LINE__)); slot = bwn_dma_nextslot(dr, dr->dr_curslot); KASSERT(!(slot & ~0x0fff), ("%s:%d: fail", __func__, __LINE__)); dr->dr_curslot = slot; dr->dr_usedslot++; return (slot); } static int bwn_phy_shm_tssi_read(struct bwn_mac *mac, uint16_t shm_offset) { const uint8_t ofdm = (shm_offset != BWN_SHARED_TSSI_CCK); unsigned int a, b, c, d; unsigned int avg; uint32_t tmp; tmp = bwn_shm_read_4(mac, BWN_SHARED, shm_offset); a = tmp & 0xff; b = (tmp >> 8) & 0xff; c = (tmp >> 16) & 0xff; d = (tmp >> 24) & 0xff; if (a == 0 || a == BWN_TSSI_MAX || b == 0 || b == BWN_TSSI_MAX || c == 0 || c == BWN_TSSI_MAX || d == 0 || d == BWN_TSSI_MAX) return (ENOENT); bwn_shm_write_4(mac, BWN_SHARED, shm_offset, BWN_TSSI_MAX | (BWN_TSSI_MAX << 8) | (BWN_TSSI_MAX << 16) | (BWN_TSSI_MAX << 24)); if (ofdm) { a = (a + 32) & 0x3f; b = (b + 32) & 0x3f; c = (c + 32) & 0x3f; d = (d + 32) & 0x3f; } avg = (a + b + c + d + 2) / 4; if (ofdm) { if (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO) & BWN_HF_4DB_CCK_POWERBOOST) avg = (avg >= 13) ? (avg - 13) : 0; } return (avg); } static void bwn_phy_g_setatt(struct bwn_mac *mac, int *bbattp, int *rfattp) { struct bwn_txpwr_loctl *lo = &mac->mac_phy.phy_g.pg_loctl; int rfatt = *rfattp; int bbatt = *bbattp; while (1) { if (rfatt > lo->rfatt.max && bbatt > lo->bbatt.max - 4) break; if (rfatt < lo->rfatt.min && bbatt < lo->bbatt.min + 4) break; if (bbatt > lo->bbatt.max && rfatt > lo->rfatt.max - 1) break; if (bbatt < lo->bbatt.min && rfatt < lo->rfatt.min + 1) break; if (bbatt > lo->bbatt.max) { bbatt -= 4; rfatt += 1; continue; } if (bbatt < lo->bbatt.min) { bbatt += 4; rfatt -= 1; continue; } if (rfatt > lo->rfatt.max) { rfatt -= 1; bbatt += 4; continue; } if (rfatt < lo->rfatt.min) { rfatt += 1; bbatt -= 4; continue; } break; } *rfattp = MIN(MAX(rfatt, lo->rfatt.min), lo->rfatt.max); *bbattp = MIN(MAX(bbatt, lo->bbatt.min), lo->bbatt.max); } static void bwn_phy_lock(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; KASSERT(siba_get_revid(sc->sc_dev) >= 3, ("%s: unsupported rev %d", __func__, siba_get_revid(sc->sc_dev))); if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, BWN_PS_AWAKE); } static void bwn_phy_unlock(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; KASSERT(siba_get_revid(sc->sc_dev) >= 3, ("%s: unsupported rev %d", __func__, siba_get_revid(sc->sc_dev))); if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, 0); } static void bwn_rf_lock(struct bwn_mac *mac) { BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_RADIO_LOCK); BWN_READ_4(mac, BWN_MACCTL); DELAY(10); } static void bwn_rf_unlock(struct bwn_mac *mac) { BWN_READ_2(mac, BWN_PHYVER); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_RADIO_LOCK); } static struct bwn_pio_txqueue * bwn_pio_parse_cookie(struct bwn_mac *mac, uint16_t cookie, struct bwn_pio_txpkt **pack) { struct bwn_pio *pio = &mac->mac_method.pio; struct bwn_pio_txqueue *tq = NULL; unsigned int index; switch (cookie & 0xf000) { case 0x1000: tq = &pio->wme[WME_AC_BK]; break; case 0x2000: tq = &pio->wme[WME_AC_BE]; break; case 0x3000: tq = &pio->wme[WME_AC_VI]; break; case 0x4000: tq = &pio->wme[WME_AC_VO]; break; case 0x5000: tq = &pio->mcast; break; } KASSERT(tq != NULL, ("%s:%d: fail", __func__, __LINE__)); if (tq == NULL) return (NULL); index = (cookie & 0x0fff); KASSERT(index < N(tq->tq_pkts), ("%s:%d: fail", __func__, __LINE__)); if (index >= N(tq->tq_pkts)) return (NULL); *pack = &tq->tq_pkts[index]; KASSERT(*pack != NULL, ("%s:%d: fail", __func__, __LINE__)); return (tq); } static void bwn_txpwr(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; BWN_LOCK(sc); if (mac && mac->mac_status >= BWN_MAC_STATUS_STARTED && mac->mac_phy.set_txpwr != NULL) mac->mac_phy.set_txpwr(mac); BWN_UNLOCK(sc); } static void bwn_task_15s(struct bwn_mac *mac) { uint16_t reg; if (mac->mac_fw.opensource) { reg = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG); if (reg) { bwn_restart(mac, "fw watchdog"); return; } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG, 1); } if (mac->mac_phy.task_15s) mac->mac_phy.task_15s(mac); mac->mac_phy.txerrors = BWN_TXERROR_MAX; } static void bwn_task_30s(struct bwn_mac *mac) { if (mac->mac_phy.type != BWN_PHYTYPE_G || mac->mac_noise.noi_running) return; mac->mac_noise.noi_running = 1; mac->mac_noise.noi_nsamples = 0; bwn_noise_gensample(mac); } static void bwn_task_60s(struct bwn_mac *mac) { if (mac->mac_phy.task_60s) mac->mac_phy.task_60s(mac); bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME); } static void bwn_tasks(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status != BWN_MAC_STATUS_STARTED) return; if (mac->mac_task_state % 4 == 0) bwn_task_60s(mac); if (mac->mac_task_state % 2 == 0) bwn_task_30s(mac); bwn_task_15s(mac); mac->mac_task_state++; callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); } static int bwn_plcp_get_ofdmrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp, uint8_t a) { struct bwn_softc *sc = mac->mac_sc; KASSERT(a == 0, ("not support APHY\n")); switch (plcp->o.raw[0] & 0xf) { case 0xb: return (BWN_OFDM_RATE_6MB); case 0xf: return (BWN_OFDM_RATE_9MB); case 0xa: return (BWN_OFDM_RATE_12MB); case 0xe: return (BWN_OFDM_RATE_18MB); case 0x9: return (BWN_OFDM_RATE_24MB); case 0xd: return (BWN_OFDM_RATE_36MB); case 0x8: return (BWN_OFDM_RATE_48MB); case 0xc: return (BWN_OFDM_RATE_54MB); } device_printf(sc->sc_dev, "incorrect OFDM rate %d\n", plcp->o.raw[0] & 0xf); return (-1); } static int bwn_plcp_get_cckrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp) { struct bwn_softc *sc = mac->mac_sc; switch (plcp->o.raw[0]) { case 0x0a: return (BWN_CCK_RATE_1MB); case 0x14: return (BWN_CCK_RATE_2MB); case 0x37: return (BWN_CCK_RATE_5MB); case 0x6e: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "incorrect CCK rate %d\n", plcp->o.raw[0]); return (-1); } static void bwn_rx_radiotap(struct bwn_mac *mac, struct mbuf *m, const struct bwn_rxhdr4 *rxhdr, struct bwn_plcp6 *plcp, int rate, int rssi, int noise) { struct bwn_softc *sc = mac->mac_sc; const struct ieee80211_frame_min *wh; uint64_t tsf; uint16_t low_mactime_now; if (htole16(rxhdr->phy_status0) & BWN_RX_PHYST0_SHORTPRMBL) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; wh = mtod(m, const struct ieee80211_frame_min *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP; bwn_tsf_read(mac, &tsf); low_mactime_now = tsf; tsf = tsf & ~0xffffULL; tsf += le16toh(rxhdr->mac_time); if (low_mactime_now < le16toh(rxhdr->mac_time)) tsf -= 0x10000; sc->sc_rx_th.wr_tsf = tsf; sc->sc_rx_th.wr_rate = rate; sc->sc_rx_th.wr_antsignal = rssi; sc->sc_rx_th.wr_antnoise = noise; } static void bwn_tsf_read(struct bwn_mac *mac, uint64_t *tsf) { uint32_t low, high; KASSERT(siba_get_revid(mac->mac_sc->sc_dev) >= 3, ("%s:%d: fail", __func__, __LINE__)); low = BWN_READ_4(mac, BWN_REV3PLUS_TSF_LOW); high = BWN_READ_4(mac, BWN_REV3PLUS_TSF_HIGH); *tsf = high; *tsf <<= 32; *tsf |= low; } static int bwn_dma_attach(struct bwn_mac *mac) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; bus_addr_t lowaddr = 0; int error; if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0) return (0); KASSERT(siba_get_revid(sc->sc_dev) >= 5, ("%s: fail", __func__)); mac->mac_flags |= BWN_MAC_FLAG_DMA; dma->dmatype = bwn_dma_gettype(mac); if (dma->dmatype == BWN_DMA_30BIT) lowaddr = BWN_BUS_SPACE_MAXADDR_30BIT; else if (dma->dmatype == BWN_DMA_32BIT) lowaddr = BUS_SPACE_MAXADDR_32BIT; else lowaddr = BUS_SPACE_MAXADDR; /* * Create top level DMA tag */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ BWN_ALIGN, 0, /* alignment, bounds */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma->parent_dtag); if (error) { device_printf(sc->sc_dev, "can't create parent DMA tag\n"); return (error); } /* * Create TX/RX mbuf DMA tag */ error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->rxbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail0; } error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->txbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail1; } dma->wme[WME_AC_BK] = bwn_dma_ringsetup(mac, 0, 1, dma->dmatype); if (!dma->wme[WME_AC_BK]) goto fail2; dma->wme[WME_AC_BE] = bwn_dma_ringsetup(mac, 1, 1, dma->dmatype); if (!dma->wme[WME_AC_BE]) goto fail3; dma->wme[WME_AC_VI] = bwn_dma_ringsetup(mac, 2, 1, dma->dmatype); if (!dma->wme[WME_AC_VI]) goto fail4; dma->wme[WME_AC_VO] = bwn_dma_ringsetup(mac, 3, 1, dma->dmatype); if (!dma->wme[WME_AC_VO]) goto fail5; dma->mcast = bwn_dma_ringsetup(mac, 4, 1, dma->dmatype); if (!dma->mcast) goto fail6; dma->rx = bwn_dma_ringsetup(mac, 0, 0, dma->dmatype); if (!dma->rx) goto fail7; return (error); fail7: bwn_dma_ringfree(&dma->mcast); fail6: bwn_dma_ringfree(&dma->wme[WME_AC_VO]); fail5: bwn_dma_ringfree(&dma->wme[WME_AC_VI]); fail4: bwn_dma_ringfree(&dma->wme[WME_AC_BE]); fail3: bwn_dma_ringfree(&dma->wme[WME_AC_BK]); fail2: bus_dma_tag_destroy(dma->txbuf_dtag); fail1: bus_dma_tag_destroy(dma->rxbuf_dtag); fail0: bus_dma_tag_destroy(dma->parent_dtag); return (error); } static struct bwn_dma_ring * bwn_dma_parse_cookie(struct bwn_mac *mac, const struct bwn_txstatus *status, uint16_t cookie, int *slot) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(mac->mac_sc); switch (cookie & 0xf000) { case 0x1000: dr = dma->wme[WME_AC_BK]; break; case 0x2000: dr = dma->wme[WME_AC_BE]; break; case 0x3000: dr = dma->wme[WME_AC_VI]; break; case 0x4000: dr = dma->wme[WME_AC_VO]; break; case 0x5000: dr = dma->mcast; break; default: dr = NULL; KASSERT(0 == 1, ("invalid cookie value %d", cookie & 0xf000)); } *slot = (cookie & 0x0fff); if (*slot < 0 || *slot >= dr->dr_numslots) { /* * XXX FIXME: sometimes H/W returns TX DONE events duplicately * that it occurs events which have same H/W sequence numbers. * When it's occurred just prints a WARNING msgs and ignores. */ KASSERT(status->seq == dma->lastseq, ("%s:%d: fail", __func__, __LINE__)); device_printf(sc->sc_dev, "out of slot ranges (0 < %d < %d)\n", *slot, dr->dr_numslots); return (NULL); } dma->lastseq = status->seq; return (dr); } static void bwn_dma_stop(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringstop(&dma->rx); bwn_dma_ringstop(&dma->wme[WME_AC_BK]); bwn_dma_ringstop(&dma->wme[WME_AC_BE]); bwn_dma_ringstop(&dma->wme[WME_AC_VI]); bwn_dma_ringstop(&dma->wme[WME_AC_VO]); bwn_dma_ringstop(&dma->mcast); } static void bwn_dma_ringstop(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_cleanup(*dr); } static void bwn_pio_stop(struct bwn_mac *mac) { struct bwn_pio *pio; if (mac->mac_flags & BWN_MAC_FLAG_DMA) return; pio = &mac->mac_method.pio; bwn_destroy_queue_tx(&pio->mcast); bwn_destroy_queue_tx(&pio->wme[WME_AC_VO]); bwn_destroy_queue_tx(&pio->wme[WME_AC_VI]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BE]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BK]); } static void bwn_led_attach(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *led_act = NULL; uint16_t val[BWN_LED_MAX]; int i; sc->sc_led_idle = (2350 * hz) / 1000; sc->sc_led_blink = 1; for (i = 0; i < N(bwn_vendor_led_act); ++i) { if (siba_get_pci_subvendor(sc->sc_dev) == bwn_vendor_led_act[i].vid) { led_act = bwn_vendor_led_act[i].led_act; break; } } if (led_act == NULL) led_act = bwn_default_led_act; val[0] = siba_sprom_get_gpio0(sc->sc_dev); val[1] = siba_sprom_get_gpio1(sc->sc_dev); val[2] = siba_sprom_get_gpio2(sc->sc_dev); val[3] = siba_sprom_get_gpio3(sc->sc_dev); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led = &sc->sc_leds[i]; if (val[i] == 0xff) { led->led_act = led_act[i]; } else { if (val[i] & BWN_LED_ACT_LOW) led->led_flags |= BWN_LED_F_ACTLOW; led->led_act = val[i] & BWN_LED_ACT_MASK; } led->led_mask = (1 << i); if (led->led_act == BWN_LED_ACT_BLINK_SLOW || led->led_act == BWN_LED_ACT_BLINK_POLL || led->led_act == BWN_LED_ACT_BLINK) { led->led_flags |= BWN_LED_F_BLINK; if (led->led_act == BWN_LED_ACT_BLINK_POLL) led->led_flags |= BWN_LED_F_POLLABLE; else if (led->led_act == BWN_LED_ACT_BLINK_SLOW) led->led_flags |= BWN_LED_F_SLOW; if (sc->sc_blink_led == NULL) { sc->sc_blink_led = led; if (led->led_flags & BWN_LED_F_SLOW) BWN_LED_SLOWDOWN(sc->sc_led_idle); } } DPRINTF(sc, BWN_DEBUG_LED, "%dth led, act %d, lowact %d\n", i, led->led_act, led->led_flags & BWN_LED_F_ACTLOW); } callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0); } static __inline uint16_t bwn_led_onoff(const struct bwn_led *led, uint16_t val, int on) { if (led->led_flags & BWN_LED_F_ACTLOW) on = !on; if (on) val |= led->led_mask; else val &= ~led->led_mask; return val; } static void bwn_led_newstate(struct bwn_mac *mac, enum ieee80211_state nstate) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t val; int i; if (nstate == IEEE80211_S_INIT) { callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; } if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) return; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led = &sc->sc_leds[i]; int on; if (led->led_act == BWN_LED_ACT_UNKN || led->led_act == BWN_LED_ACT_NULL) continue; if ((led->led_flags & BWN_LED_F_BLINK) && nstate != IEEE80211_S_INIT) continue; switch (led->led_act) { case BWN_LED_ACT_ON: /* Always on */ on = 1; break; case BWN_LED_ACT_OFF: /* Always off */ case BWN_LED_ACT_5GHZ: /* TODO: 11A */ on = 0; break; default: on = 1; switch (nstate) { case IEEE80211_S_INIT: on = 0; break; case IEEE80211_S_RUN: if (led->led_act == BWN_LED_ACT_11G && ic->ic_curmode != IEEE80211_MODE_11G) on = 0; break; default: if (led->led_act == BWN_LED_ACT_ASSOC) on = 0; break; } break; } val = bwn_led_onoff(led, val, on); } BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); } static void bwn_led_event(struct bwn_mac *mac, int event) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; int rate; if (event == BWN_LED_EVENT_POLL) { if ((led->led_flags & BWN_LED_F_POLLABLE) == 0) return; if (ticks - sc->sc_led_ticks < sc->sc_led_idle) return; } sc->sc_led_ticks = ticks; if (sc->sc_led_blinking) return; switch (event) { case BWN_LED_EVENT_RX: rate = sc->sc_rx_rate; break; case BWN_LED_EVENT_TX: rate = sc->sc_tx_rate; break; case BWN_LED_EVENT_POLL: rate = 0; break; default: panic("unknown LED event %d\n", event); break; } bwn_led_blink_start(mac, bwn_led_duration[rate].on_dur, bwn_led_duration[rate].off_dur); } static void bwn_led_blink_start(struct bwn_mac *mac, int on_dur, int off_dur) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(led, val, 1); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); if (led->led_flags & BWN_LED_F_SLOW) { BWN_LED_SLOWDOWN(on_dur); BWN_LED_SLOWDOWN(off_dur); } sc->sc_led_blinking = 1; sc->sc_led_blink_offdur = off_dur; callout_reset(&sc->sc_led_blink_ch, on_dur, bwn_led_blink_next, mac); } static void bwn_led_blink_next(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(sc->sc_blink_led, val, 0); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur, bwn_led_blink_end, mac); } static void bwn_led_blink_end(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; sc->sc_led_blinking = 0; } static int bwn_suspend(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); BWN_LOCK(sc); bwn_stop(sc); BWN_UNLOCK(sc); return (0); } static int bwn_resume(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); int error = EDOOFUS; BWN_LOCK(sc); if (sc->sc_ic.ic_nrunning > 0) error = bwn_init(sc); BWN_UNLOCK(sc); if (error == 0) ieee80211_start_all(&sc->sc_ic); return (0); } static void bwn_rfswitch(void *arg) { struct bwn_softc *sc = arg; struct bwn_mac *mac = sc->sc_curmac; int cur = 0, prev = 0; KASSERT(mac->mac_status >= BWN_MAC_STATUS_STARTED, ("%s: invalid MAC status %d", __func__, mac->mac_status)); if (mac->mac_phy.rev >= 3 || mac->mac_phy.type == BWN_PHYTYPE_LP) { if (!(BWN_READ_4(mac, BWN_RF_HWENABLED_HI) & BWN_RF_HWENABLED_HI_MASK)) cur = 1; } else { if (BWN_READ_2(mac, BWN_RF_HWENABLED_LO) & BWN_RF_HWENABLED_LO_MASK) cur = 1; } if (mac->mac_flags & BWN_MAC_FLAG_RADIO_ON) prev = 1; if (cur != prev) { if (cur) mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; else mac->mac_flags &= ~BWN_MAC_FLAG_RADIO_ON; device_printf(sc->sc_dev, "status of RF switch is changed to %s\n", cur ? "ON" : "OFF"); if (cur != mac->mac_phy.rf_on) { if (cur) bwn_rf_turnon(mac); else bwn_rf_turnoff(mac); } } callout_schedule(&sc->sc_rfswitch_ch, hz); } static void bwn_phy_lp_init_pre(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_lp *plp = &phy->phy_lp; plp->plp_antenna = BWN_ANT_DEFAULT; } static int bwn_phy_lp_init(struct bwn_mac *mac) { static const struct bwn_stxtable tables[] = { { 2, 6, 0x3d, 3, 0x01 }, { 1, 12, 0x4c, 1, 0x01 }, { 1, 8, 0x50, 0, 0x7f }, { 0, 8, 0x44, 0, 0xff }, { 1, 0, 0x4a, 0, 0xff }, { 0, 4, 0x4d, 0, 0xff }, { 1, 4, 0x4e, 0, 0xff }, { 0, 12, 0x4f, 0, 0x0f }, { 1, 0, 0x4f, 4, 0x0f }, { 3, 0, 0x49, 0, 0x0f }, { 4, 3, 0x46, 4, 0x07 }, { 3, 15, 0x46, 0, 0x01 }, { 4, 0, 0x46, 1, 0x07 }, { 3, 8, 0x48, 4, 0x07 }, { 3, 11, 0x48, 0, 0x0f }, { 3, 4, 0x49, 4, 0x0f }, { 2, 15, 0x45, 0, 0x01 }, { 5, 13, 0x52, 4, 0x07 }, { 6, 0, 0x52, 7, 0x01 }, { 5, 3, 0x41, 5, 0x07 }, { 5, 6, 0x41, 0, 0x0f }, { 5, 10, 0x42, 5, 0x07 }, { 4, 15, 0x42, 0, 0x01 }, { 5, 0, 0x42, 1, 0x07 }, { 4, 11, 0x43, 4, 0x0f }, { 4, 7, 0x43, 0, 0x0f }, { 4, 6, 0x45, 1, 0x01 }, { 2, 7, 0x40, 4, 0x0f }, { 2, 11, 0x40, 0, 0x0f } }; struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; const struct bwn_stxtable *st; struct ieee80211com *ic = &sc->sc_ic; int i, error; uint16_t tmp; bwn_phy_lp_readsprom(mac); /* XXX bad place */ bwn_phy_lp_bbinit(mac); /* initialize RF */ BWN_PHY_SET(mac, BWN_PHY_4WIRECTL, 0x2); DELAY(1); BWN_PHY_MASK(mac, BWN_PHY_4WIRECTL, 0xfffd); DELAY(1); if (mac->mac_phy.rf_ver == 0x2062) bwn_phy_lp_b2062_init(mac); else { bwn_phy_lp_b2063_init(mac); /* synchronize stx table. */ for (i = 0; i < N(tables); i++) { st = &tables[i]; tmp = BWN_RF_READ(mac, st->st_rfaddr); tmp >>= st->st_rfshift; tmp <<= st->st_physhift; BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xf2 + st->st_phyoffset), ~(st->st_mask << st->st_physhift), tmp); } BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xf0), 0x5f80); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xf1), 0); } /* calibrate RC */ if (mac->mac_phy.rev >= 2) bwn_phy_lp_rxcal_r2(mac); else if (!plp->plp_rccap) { if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_rccal_r12(mac); } else bwn_phy_lp_set_rccap(mac); error = bwn_phy_lp_switch_channel(mac, 7); if (error) device_printf(sc->sc_dev, "failed to change channel 7 (%d)\n", error); bwn_phy_lp_txpctl_init(mac); bwn_phy_lp_calib(mac); return (0); } static uint16_t bwn_phy_lp_read(struct bwn_mac *mac, uint16_t reg) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); return (BWN_READ_2(mac, BWN_PHYDATA)); } static void bwn_phy_lp_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); BWN_WRITE_2(mac, BWN_PHYDATA, value); } static void bwn_phy_lp_maskset(struct bwn_mac *mac, uint16_t reg, uint16_t mask, uint16_t set) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); BWN_WRITE_2(mac, BWN_PHYDATA, (BWN_READ_2(mac, BWN_PHYDATA) & mask) | set); } static uint16_t bwn_phy_lp_rf_read(struct bwn_mac *mac, uint16_t reg) { KASSERT(reg != 1, ("unaccessible register %d", reg)); if (mac->mac_phy.rev < 2 && reg != 0x4001) reg |= 0x100; if (mac->mac_phy.rev >= 2) reg |= 0x200; BWN_WRITE_2(mac, BWN_RFCTL, reg); return BWN_READ_2(mac, BWN_RFDATALO); } static void bwn_phy_lp_rf_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { KASSERT(reg != 1, ("unaccessible register %d", reg)); BWN_WRITE_2(mac, BWN_RFCTL, reg); BWN_WRITE_2(mac, BWN_RFDATALO, value); } static void bwn_phy_lp_rf_onoff(struct bwn_mac *mac, int on) { if (on) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xe0ff); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, (mac->mac_phy.rev >= 2) ? 0xf7f7 : 0xffe7); return; } if (mac->mac_phy.rev >= 2) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x83ff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1f00); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0x80ff); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xdfff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x0808); return; } BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xe0ff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1f00); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfcff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x0018); } static int bwn_phy_lp_switch_channel(struct bwn_mac *mac, uint32_t chan) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_lp *plp = &phy->phy_lp; int error; if (phy->rf_ver == 0x2063) { error = bwn_phy_lp_b2063_switch_channel(mac, chan); if (error) return (error); } else { error = bwn_phy_lp_b2062_switch_channel(mac, chan); if (error) return (error); bwn_phy_lp_set_anafilter(mac, chan); bwn_phy_lp_set_gaintbl(mac, ieee80211_ieee2mhz(chan, 0)); } plp->plp_chan = chan; BWN_WRITE_2(mac, BWN_CHANNEL, chan); return (0); } static uint32_t bwn_phy_lp_get_default_chan(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; return (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? 1 : 36); } static void bwn_phy_lp_set_antenna(struct bwn_mac *mac, int antenna) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_lp *plp = &phy->phy_lp; if (phy->rev >= 2 || antenna > BWN_ANTAUTO1) return; bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_UCODE_ANTDIV_HELPER); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfffd, antenna & 0x2); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfffe, antenna & 0x1); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_UCODE_ANTDIV_HELPER); plp->plp_antenna = antenna; } static void bwn_phy_lp_task_60s(struct bwn_mac *mac) { bwn_phy_lp_calib(mac); } static void bwn_phy_lp_readsprom(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { plp->plp_txisoband_m = siba_sprom_get_tri2g(sc->sc_dev); plp->plp_bxarch = siba_sprom_get_bxa2g(sc->sc_dev); plp->plp_rxpwroffset = siba_sprom_get_rxpo2g(sc->sc_dev); plp->plp_rssivf = siba_sprom_get_rssismf2g(sc->sc_dev); plp->plp_rssivc = siba_sprom_get_rssismc2g(sc->sc_dev); plp->plp_rssigs = siba_sprom_get_rssisav2g(sc->sc_dev); return; } plp->plp_txisoband_l = siba_sprom_get_tri5gl(sc->sc_dev); plp->plp_txisoband_m = siba_sprom_get_tri5g(sc->sc_dev); plp->plp_txisoband_h = siba_sprom_get_tri5gh(sc->sc_dev); plp->plp_bxarch = siba_sprom_get_bxa5g(sc->sc_dev); plp->plp_rxpwroffset = siba_sprom_get_rxpo5g(sc->sc_dev); plp->plp_rssivf = siba_sprom_get_rssismf5g(sc->sc_dev); plp->plp_rssivc = siba_sprom_get_rssismc5g(sc->sc_dev); plp->plp_rssigs = siba_sprom_get_rssisav5g(sc->sc_dev); } static void bwn_phy_lp_bbinit(struct bwn_mac *mac) { bwn_phy_lp_tblinit(mac); if (mac->mac_phy.rev >= 2) bwn_phy_lp_bbinit_r2(mac); else bwn_phy_lp_bbinit_r01(mac); } static void bwn_phy_lp_txpctl_init(struct bwn_mac *mac) { struct bwn_txgain gain_2ghz = { 4, 12, 12, 0 }; struct bwn_txgain gain_5ghz = { 7, 15, 14, 0 }; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; bwn_phy_lp_set_txgain(mac, IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? &gain_2ghz : &gain_5ghz); bwn_phy_lp_set_bbmult(mac, 150); } static void bwn_phy_lp_calib(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; const struct bwn_rxcompco *rc = NULL; struct bwn_txgain ogain; int i, omode, oafeovr, orf, obbmult; uint8_t mode, fc = 0; if (plp->plp_chanfullcal != plp->plp_chan) { plp->plp_chanfullcal = plp->plp_chan; fc = 1; } bwn_mac_suspend(mac); /* BlueTooth Coexistance Override */ BWN_WRITE_2(mac, BWN_BTCOEX_CTL, 0x3); BWN_WRITE_2(mac, BWN_BTCOEX_TXCTL, 0xff); if (mac->mac_phy.rev >= 2) bwn_phy_lp_digflt_save(mac); bwn_phy_lp_get_txpctlmode(mac); mode = plp->plp_txpctlmode; bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); if (mac->mac_phy.rev == 0 && mode != BWN_PHYLP_TXPCTL_OFF) bwn_phy_lp_bugfix(mac); if (mac->mac_phy.rev >= 2 && fc == 1) { bwn_phy_lp_get_txpctlmode(mac); omode = plp->plp_txpctlmode; oafeovr = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR) & 0x40; if (oafeovr) ogain = bwn_phy_lp_get_txgain(mac); orf = BWN_PHY_READ(mac, BWN_PHY_RF_PWR_OVERRIDE) & 0xff; obbmult = bwn_phy_lp_get_bbmult(mac); bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); if (oafeovr) bwn_phy_lp_set_txgain(mac, &ogain); bwn_phy_lp_set_bbmult(mac, obbmult); bwn_phy_lp_set_txpctlmode(mac, omode); BWN_PHY_SETMASK(mac, BWN_PHY_RF_PWR_OVERRIDE, 0xff00, orf); } bwn_phy_lp_set_txpctlmode(mac, mode); if (mac->mac_phy.rev >= 2) bwn_phy_lp_digflt_restore(mac); /* do RX IQ Calculation; assumes that noise is true. */ if (siba_get_chipid(sc->sc_dev) == 0x5354) { for (i = 0; i < N(bwn_rxcompco_5354); i++) { if (bwn_rxcompco_5354[i].rc_chan == plp->plp_chan) rc = &bwn_rxcompco_5354[i]; } } else if (mac->mac_phy.rev >= 2) rc = &bwn_rxcompco_r2; else { for (i = 0; i < N(bwn_rxcompco_r12); i++) { if (bwn_rxcompco_r12[i].rc_chan == plp->plp_chan) rc = &bwn_rxcompco_r12[i]; } } if (rc == NULL) goto fail; BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, rc->rc_c1); BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff, rc->rc_c0 << 8); bwn_phy_lp_set_trsw_over(mac, 1 /* TX */, 0 /* RX */); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfff7, 0); } else { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x20); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffdf, 0); } bwn_phy_lp_set_rxgain(mac, 0x2d5d); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfffe); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xfffe); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x800); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x800); bwn_phy_lp_set_deaf(mac, 0); /* XXX no checking return value? */ (void)bwn_phy_lp_calc_rx_iq_comp(mac, 0xfff0); bwn_phy_lp_clear_deaf(mac, 0); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfffc); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfff7); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffdf); /* disable RX GAIN override. */ BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfffe); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffef); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffbf); if (mac->mac_phy.rev >= 2) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfeff); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfbff); BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xe5), 0xfff7); } } else { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfdff); } BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfffe); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xf7ff); fail: bwn_mac_enable(mac); } static void bwn_phy_lp_switch_analog(struct bwn_mac *mac, int on) { if (on) { BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfff8); return; } BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVRVAL, 0x0007); BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 0x0007); } static int bwn_phy_lp_b2063_switch_channel(struct bwn_mac *mac, uint8_t chan) { static const struct bwn_b206x_chan *bc = NULL; struct bwn_softc *sc = mac->mac_sc; uint32_t count, freqref, freqvco, freqxtal, val[3], timeout, timeoutref, tmp[6]; uint16_t old, scale, tmp16; int i, div; for (i = 0; i < N(bwn_b2063_chantable); i++) { if (bwn_b2063_chantable[i].bc_chan == chan) { bc = &bwn_b2063_chantable[i]; break; } } if (bc == NULL) return (EINVAL); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_VCOBUF1, bc->bc_data[0]); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_MIXER2, bc->bc_data[1]); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_BUF2, bc->bc_data[2]); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_RCCR1, bc->bc_data[3]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_1ST3, bc->bc_data[4]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND1, bc->bc_data[5]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND4, bc->bc_data[6]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND7, bc->bc_data[7]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_PS6, bc->bc_data[8]); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_CTL2, bc->bc_data[9]); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_CTL5, bc->bc_data[10]); BWN_RF_WRITE(mac, BWN_B2063_PA_CTL11, bc->bc_data[11]); old = BWN_RF_READ(mac, BWN_B2063_COM15); BWN_RF_SET(mac, BWN_B2063_COM15, 0x1e); freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000; freqvco = bc->bc_freq << ((bc->bc_freq > 4000) ? 1 : 2); freqref = freqxtal * 3; div = (freqxtal <= 26000000 ? 1 : 2); timeout = ((((8 * freqxtal) / (div * 5000000)) + 1) >> 1) - 1; timeoutref = ((((8 * freqxtal) / (div * (timeout + 1))) + 999999) / 1000000) + 1; BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB3, 0x2); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB6, 0xfff8, timeout >> 2); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB7, 0xff9f,timeout << 5); BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB5, timeoutref); val[0] = bwn_phy_lp_roundup(freqxtal, 1000000, 16); val[1] = bwn_phy_lp_roundup(freqxtal, 1000000 * div, 16); val[2] = bwn_phy_lp_roundup(freqvco, 3, 16); count = (bwn_phy_lp_roundup(val[2], val[1] + 16, 16) * (timeout + 1) * (timeoutref + 1)) - 1; BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB7, 0xf0, count >> 8); BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB8, count & 0xff); tmp[0] = ((val[2] * 62500) / freqref) << 4; tmp[1] = ((val[2] * 62500) % freqref) << 4; while (tmp[1] >= freqref) { tmp[0]++; tmp[1] -= freqref; } BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG1, 0xffe0, tmp[0] >> 4); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG2, 0xfe0f, tmp[0] << 4); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG2, 0xfff0, tmp[0] >> 16); BWN_RF_WRITE(mac, BWN_B2063_JTAG_SG3, (tmp[1] >> 8) & 0xff); BWN_RF_WRITE(mac, BWN_B2063_JTAG_SG4, tmp[1] & 0xff); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF1, 0xb9); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF2, 0x88); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF3, 0x28); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF4, 0x63); tmp[2] = ((41 * (val[2] - 3000)) /1200) + 27; tmp[3] = bwn_phy_lp_roundup(132000 * tmp[0], 8451, 16); if ((tmp[3] + tmp[2] - 1) / tmp[2] > 60) { scale = 1; tmp[4] = ((tmp[3] + tmp[2]) / (tmp[2] << 1)) - 8; } else { scale = 0; tmp[4] = ((tmp[3] + (tmp[2] >> 1)) / tmp[2]) - 8; } BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP2, 0xffc0, tmp[4]); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP2, 0xffbf, scale << 6); tmp[5] = bwn_phy_lp_roundup(100 * val[0], val[2], 16) * (tmp[4] * 8) * (scale + 1); if (tmp[5] > 150) tmp[5] = 0; BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP3, 0xffe0, tmp[5]); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP3, 0xffdf, scale << 5); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_XTAL_12, 0xfffb, 0x4); if (freqxtal > 26000000) BWN_RF_SET(mac, BWN_B2063_JTAG_XTAL_12, 0x2); else BWN_RF_MASK(mac, BWN_B2063_JTAG_XTAL_12, 0xfd); if (val[0] == 45) BWN_RF_SET(mac, BWN_B2063_JTAG_VCO1, 0x2); else BWN_RF_MASK(mac, BWN_B2063_JTAG_VCO1, 0xfd); BWN_RF_SET(mac, BWN_B2063_PLL_SP2, 0x3); DELAY(1); BWN_RF_MASK(mac, BWN_B2063_PLL_SP2, 0xfffc); /* VCO Calibration */ BWN_RF_MASK(mac, BWN_B2063_PLL_SP1, ~0x40); tmp16 = BWN_RF_READ(mac, BWN_B2063_JTAG_CALNRST) & 0xf8; BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16); DELAY(1); BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x4); DELAY(1); BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x6); DELAY(1); BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x7); DELAY(300); BWN_RF_SET(mac, BWN_B2063_PLL_SP1, 0x40); BWN_RF_WRITE(mac, BWN_B2063_COM15, old); return (0); } static int bwn_phy_lp_b2062_switch_channel(struct bwn_mac *mac, uint8_t chan) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; const struct bwn_b206x_chan *bc = NULL; uint32_t freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000; uint32_t tmp[9]; int i; for (i = 0; i < N(bwn_b2062_chantable); i++) { if (bwn_b2062_chantable[i].bc_chan == chan) { bc = &bwn_b2062_chantable[i]; break; } } if (bc == NULL) return (EINVAL); BWN_RF_SET(mac, BWN_B2062_S_RFPLLCTL14, 0x04); BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE0, bc->bc_data[0]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE2, bc->bc_data[1]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE3, bc->bc_data[2]); BWN_RF_WRITE(mac, BWN_B2062_N_TX_TUNE, bc->bc_data[3]); BWN_RF_WRITE(mac, BWN_B2062_S_LGENG_CTL1, bc->bc_data[4]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENACTL5, bc->bc_data[5]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENACTL6, bc->bc_data[6]); BWN_RF_WRITE(mac, BWN_B2062_N_TX_PGA, bc->bc_data[7]); BWN_RF_WRITE(mac, BWN_B2062_N_TX_PAD, bc->bc_data[8]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL33, 0xcc); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL34, 0x07); bwn_phy_lp_b2062_reset_pllbias(mac); tmp[0] = freqxtal / 1000; tmp[1] = plp->plp_div * 1000; tmp[2] = tmp[1] * ieee80211_ieee2mhz(chan, 0); if (ieee80211_ieee2mhz(chan, 0) < 4000) tmp[2] *= 2; tmp[3] = 48 * tmp[0]; tmp[5] = tmp[2] / tmp[3]; tmp[6] = tmp[2] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL26, tmp[5]); tmp[4] = tmp[6] * 0x100; tmp[5] = tmp[4] / tmp[3]; tmp[6] = tmp[4] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL27, tmp[5]); tmp[4] = tmp[6] * 0x100; tmp[5] = tmp[4] / tmp[3]; tmp[6] = tmp[4] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL28, tmp[5]); tmp[4] = tmp[6] * 0x100; tmp[5] = tmp[4] / tmp[3]; tmp[6] = tmp[4] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL29, tmp[5] + ((2 * tmp[6]) / tmp[3])); tmp[7] = BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL19); tmp[8] = ((2 * tmp[2] * (tmp[7] + 1)) + (3 * tmp[0])) / (6 * tmp[0]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL23, (tmp[8] >> 8) + 16); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL24, tmp[8] & 0xff); bwn_phy_lp_b2062_vco_calib(mac); if (BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL3) & 0x10) { BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL33, 0xfc); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL34, 0); bwn_phy_lp_b2062_reset_pllbias(mac); bwn_phy_lp_b2062_vco_calib(mac); if (BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL3) & 0x10) { BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL14, ~0x04); return (EIO); } } BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL14, ~0x04); return (0); } static void bwn_phy_lp_set_anafilter(struct bwn_mac *mac, uint8_t channel) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; uint16_t tmp = (channel == 14); if (mac->mac_phy.rev < 2) { BWN_PHY_SETMASK(mac, BWN_PHY_LP_PHY_CTL, 0xfcff, tmp << 9); if ((mac->mac_phy.rev == 1) && (plp->plp_rccap)) bwn_phy_lp_set_rccap(mac); return; } BWN_RF_WRITE(mac, BWN_B2063_TX_BB_SP3, 0x3f); } static void bwn_phy_lp_set_gaintbl(struct bwn_mac *mac, uint32_t freq) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t iso, tmp[3]; KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) iso = plp->plp_txisoband_m; else if (freq <= 5320) iso = plp->plp_txisoband_l; else if (freq <= 5700) iso = plp->plp_txisoband_m; else iso = plp->plp_txisoband_h; tmp[0] = ((iso - 26) / 12) << 12; tmp[1] = tmp[0] + 0x1000; tmp[2] = tmp[0] + 0x2000; bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), 3, tmp); bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), 3, tmp); } static void bwn_phy_lp_digflt_save(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; int i; static const uint16_t addr[] = { BWN_PHY_OFDM(0xc1), BWN_PHY_OFDM(0xc2), BWN_PHY_OFDM(0xc3), BWN_PHY_OFDM(0xc4), BWN_PHY_OFDM(0xc5), BWN_PHY_OFDM(0xc6), BWN_PHY_OFDM(0xc7), BWN_PHY_OFDM(0xc8), BWN_PHY_OFDM(0xcf), }; static const uint16_t val[] = { 0xde5e, 0xe832, 0xe331, 0x4d26, 0x0026, 0x1420, 0x0020, 0xfe08, 0x0008, }; for (i = 0; i < N(addr); i++) { plp->plp_digfilt[i] = BWN_PHY_READ(mac, addr[i]); BWN_PHY_WRITE(mac, addr[i], val[i]); } } static void bwn_phy_lp_get_txpctlmode(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; uint16_t ctl; ctl = BWN_PHY_READ(mac, BWN_PHY_TX_PWR_CTL_CMD); switch (ctl & BWN_PHY_TX_PWR_CTL_CMD_MODE) { case BWN_PHY_TX_PWR_CTL_CMD_MODE_OFF: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_OFF; break; case BWN_PHY_TX_PWR_CTL_CMD_MODE_SW: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_ON_SW; break; case BWN_PHY_TX_PWR_CTL_CMD_MODE_HW: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_ON_HW; break; default: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_UNKNOWN; device_printf(sc->sc_dev, "unknown command mode\n"); break; } } static void bwn_phy_lp_set_txpctlmode(struct bwn_mac *mac, uint8_t mode) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; uint16_t ctl; uint8_t old; bwn_phy_lp_get_txpctlmode(mac); old = plp->plp_txpctlmode; if (old == mode) return; plp->plp_txpctlmode = mode; if (old != BWN_PHYLP_TXPCTL_ON_HW && mode == BWN_PHYLP_TXPCTL_ON_HW) { BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_CMD, 0xff80, plp->plp_tssiidx); BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_NNUM, 0x8fff, ((uint16_t)plp->plp_tssinpt << 16)); /* disable TX GAIN override */ if (mac->mac_phy.rev < 2) BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfeff); else { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xff7f); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xbfff); } BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xffbf); plp->plp_txpwridx = -1; } if (mac->mac_phy.rev >= 2) { if (mode == BWN_PHYLP_TXPCTL_ON_HW) BWN_PHY_SET(mac, BWN_PHY_OFDM(0xd0), 0x2); else BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xd0), 0xfffd); } /* writes TX Power Control mode */ switch (plp->plp_txpctlmode) { case BWN_PHYLP_TXPCTL_OFF: ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_OFF; break; case BWN_PHYLP_TXPCTL_ON_HW: ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_HW; break; case BWN_PHYLP_TXPCTL_ON_SW: ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_SW; break; default: ctl = 0; KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_CMD, (uint16_t)~BWN_PHY_TX_PWR_CTL_CMD_MODE, ctl); } static void bwn_phy_lp_bugfix(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; const unsigned int size = 256; struct bwn_txgain tg; uint32_t rxcomp, txgain, coeff, rfpwr, *tabs; uint16_t tssinpt, tssiidx, value[2]; uint8_t mode; int8_t txpwridx; tabs = (uint32_t *)malloc(sizeof(uint32_t) * size, M_DEVBUF, M_NOWAIT | M_ZERO); if (tabs == NULL) { device_printf(sc->sc_dev, "failed to allocate buffer.\n"); return; } bwn_phy_lp_get_txpctlmode(mac); mode = plp->plp_txpctlmode; txpwridx = plp->plp_txpwridx; tssinpt = plp->plp_tssinpt; tssiidx = plp->plp_tssiidx; bwn_tab_read_multi(mac, (mac->mac_phy.rev < 2) ? BWN_TAB_4(10, 0x140) : BWN_TAB_4(7, 0x140), size, tabs); bwn_phy_lp_tblinit(mac); bwn_phy_lp_bbinit(mac); bwn_phy_lp_txpctl_init(mac); bwn_phy_lp_rf_onoff(mac, 1); bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); bwn_tab_write_multi(mac, (mac->mac_phy.rev < 2) ? BWN_TAB_4(10, 0x140) : BWN_TAB_4(7, 0x140), size, tabs); BWN_WRITE_2(mac, BWN_CHANNEL, plp->plp_chan); plp->plp_tssinpt = tssinpt; plp->plp_tssiidx = tssiidx; bwn_phy_lp_set_anafilter(mac, plp->plp_chan); if (txpwridx != -1) { /* set TX power by index */ plp->plp_txpwridx = txpwridx; bwn_phy_lp_get_txpctlmode(mac); if (plp->plp_txpctlmode != BWN_PHYLP_TXPCTL_OFF) bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_ON_SW); if (mac->mac_phy.rev >= 2) { rxcomp = bwn_tab_read(mac, BWN_TAB_4(7, txpwridx + 320)); txgain = bwn_tab_read(mac, BWN_TAB_4(7, txpwridx + 192)); tg.tg_pad = (txgain >> 16) & 0xff; tg.tg_gm = txgain & 0xff; tg.tg_pga = (txgain >> 8) & 0xff; tg.tg_dac = (rxcomp >> 28) & 0xff; bwn_phy_lp_set_txgain(mac, &tg); } else { rxcomp = bwn_tab_read(mac, BWN_TAB_4(10, txpwridx + 320)); txgain = bwn_tab_read(mac, BWN_TAB_4(10, txpwridx + 192)); BWN_PHY_SETMASK(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0xf800, (txgain >> 4) & 0x7fff); bwn_phy_lp_set_txgain_dac(mac, txgain & 0x7); bwn_phy_lp_set_txgain_pa(mac, (txgain >> 24) & 0x7f); } bwn_phy_lp_set_bbmult(mac, (rxcomp >> 20) & 0xff); /* set TX IQCC */ value[0] = (rxcomp >> 10) & 0x3ff; value[1] = rxcomp & 0x3ff; bwn_tab_write_multi(mac, BWN_TAB_2(0, 80), 2, value); coeff = bwn_tab_read(mac, (mac->mac_phy.rev >= 2) ? BWN_TAB_4(7, txpwridx + 448) : BWN_TAB_4(10, txpwridx + 448)); bwn_tab_write(mac, BWN_TAB_2(0, 85), coeff & 0xffff); if (mac->mac_phy.rev >= 2) { rfpwr = bwn_tab_read(mac, BWN_TAB_4(7, txpwridx + 576)); BWN_PHY_SETMASK(mac, BWN_PHY_RF_PWR_OVERRIDE, 0xff00, rfpwr & 0xffff); } bwn_phy_lp_set_txgain_override(mac); } if (plp->plp_rccap) bwn_phy_lp_set_rccap(mac); bwn_phy_lp_set_antenna(mac, plp->plp_antenna); bwn_phy_lp_set_txpctlmode(mac, mode); free(tabs, M_DEVBUF); } static void bwn_phy_lp_digflt_restore(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; int i; static const uint16_t addr[] = { BWN_PHY_OFDM(0xc1), BWN_PHY_OFDM(0xc2), BWN_PHY_OFDM(0xc3), BWN_PHY_OFDM(0xc4), BWN_PHY_OFDM(0xc5), BWN_PHY_OFDM(0xc6), BWN_PHY_OFDM(0xc7), BWN_PHY_OFDM(0xc8), BWN_PHY_OFDM(0xcf), }; for (i = 0; i < N(addr); i++) BWN_PHY_WRITE(mac, addr[i], plp->plp_digfilt[i]); } static void bwn_phy_lp_tblinit(struct bwn_mac *mac) { uint32_t freq = ieee80211_ieee2mhz(bwn_phy_lp_get_default_chan(mac), 0); if (mac->mac_phy.rev < 2) { bwn_phy_lp_tblinit_r01(mac); bwn_phy_lp_tblinit_txgain(mac); bwn_phy_lp_set_gaintbl(mac, freq); return; } bwn_phy_lp_tblinit_r2(mac); bwn_phy_lp_tblinit_txgain(mac); } struct bwn_wpair { uint16_t reg; uint16_t value; }; struct bwn_smpair { uint16_t offset; uint16_t mask; uint16_t set; }; static void bwn_phy_lp_bbinit_r2(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_wpair v1[] = { { BWN_PHY_AFE_DAC_CTL, 0x50 }, { BWN_PHY_AFE_CTL, 0x8800 }, { BWN_PHY_AFE_CTL_OVR, 0 }, { BWN_PHY_AFE_CTL_OVRVAL, 0 }, { BWN_PHY_RF_OVERRIDE_0, 0 }, { BWN_PHY_RF_OVERRIDE_2, 0 }, { BWN_PHY_OFDM(0xf9), 0 }, { BWN_PHY_TR_LOOKUP_1, 0 } }; static const struct bwn_smpair v2[] = { { BWN_PHY_OFDMSYNCTHRESH0, 0xff00, 0xb4 }, { BWN_PHY_DCOFFSETTRANSIENT, 0xf8ff, 0x200 }, { BWN_PHY_DCOFFSETTRANSIENT, 0xff00, 0x7f }, { BWN_PHY_GAINDIRECTMISMATCH, 0xff0f, 0x40 }, { BWN_PHY_PREAMBLECONFIRMTO, 0xff00, 0x2 } }; static const struct bwn_smpair v3[] = { { BWN_PHY_OFDM(0xfe), 0xffe0, 0x1f }, { BWN_PHY_OFDM(0xff), 0xffe0, 0xc }, { BWN_PHY_OFDM(0x100), 0xff00, 0x19 }, { BWN_PHY_OFDM(0xff), 0x03ff, 0x3c00 }, { BWN_PHY_OFDM(0xfe), 0xfc1f, 0x3e0 }, { BWN_PHY_OFDM(0xff), 0xffe0, 0xc }, { BWN_PHY_OFDM(0x100), 0x00ff, 0x1900 }, { BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x5800 }, { BWN_PHY_CLIPCTRTHRESH, 0xffe0, 0x12 }, { BWN_PHY_GAINMISMATCH, 0x0fff, 0x9000 }, }; int i; for (i = 0; i < N(v1); i++) BWN_PHY_WRITE(mac, v1[i].reg, v1[i].value); BWN_PHY_SET(mac, BWN_PHY_ADC_COMPENSATION_CTL, 0x10); for (i = 0; i < N(v2); i++) BWN_PHY_SETMASK(mac, v2[i].offset, v2[i].mask, v2[i].set); BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x4000); BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x2000); BWN_PHY_SET(mac, BWN_PHY_OFDM(0x10a), 0x1); if (siba_get_pci_revid(sc->sc_dev) >= 0x18) { bwn_tab_write(mac, BWN_TAB_4(17, 65), 0xec); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x10a), 0xff01, 0x14); } else { BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x10a), 0xff01, 0x10); } BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xdf), 0xff00, 0xf4); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xdf), 0x00ff, 0xf100); BWN_PHY_WRITE(mac, BWN_PHY_CLIPTHRESH, 0x48); BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0xff00, 0x46); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xe4), 0xff00, 0x10); BWN_PHY_SETMASK(mac, BWN_PHY_PWR_THRESH1, 0xfff0, 0x9); BWN_PHY_MASK(mac, BWN_PHY_GAINDIRECTMISMATCH, ~0xf); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0x00ff, 0x5500); BWN_PHY_SETMASK(mac, BWN_PHY_CLIPCTRTHRESH, 0xfc1f, 0xa0); BWN_PHY_SETMASK(mac, BWN_PHY_GAINDIRECTMISMATCH, 0xe0ff, 0x300); BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0x00ff, 0x2a00); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x2100); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0xa); } else { BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x1e00); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0xd); } for (i = 0; i < N(v3); i++) BWN_PHY_SETMASK(mac, v3[i].offset, v3[i].mask, v3[i].set); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { bwn_tab_write(mac, BWN_TAB_2(0x08, 0x14), 0); bwn_tab_write(mac, BWN_TAB_2(0x08, 0x12), 0x40); } if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x40); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xf0ff, 0xb00); BWN_PHY_SETMASK(mac, BWN_PHY_SYNCPEAKCNT, 0xfff8, 0x6); BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0x00ff, 0x9d00); BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0xff00, 0xa1); BWN_PHY_MASK(mac, BWN_PHY_IDLEAFTERPKTRXTO, 0x00ff); } else BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x40); BWN_PHY_SETMASK(mac, BWN_PHY_CRS_ED_THRESH, 0xff00, 0xb3); BWN_PHY_SETMASK(mac, BWN_PHY_CRS_ED_THRESH, 0x00ff, 0xad00); BWN_PHY_SETMASK(mac, BWN_PHY_INPUT_PWRDB, 0xff00, plp->plp_rxpwroffset); BWN_PHY_SET(mac, BWN_PHY_RESET_CTL, 0x44); BWN_PHY_WRITE(mac, BWN_PHY_RESET_CTL, 0x80); BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_0, 0xa954); BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_1, 0x2000 | ((uint16_t)plp->plp_rssigs << 10) | ((uint16_t)plp->plp_rssivc << 4) | plp->plp_rssivf); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { BWN_PHY_SET(mac, BWN_PHY_AFE_ADC_CTL_0, 0x1c); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_CTL, 0x00ff, 0x8800); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_ADC_CTL_1, 0xfc3c, 0x0400); } bwn_phy_lp_digflt_save(mac); } static void bwn_phy_lp_bbinit_r01(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_smpair v1[] = { { BWN_PHY_CLIPCTRTHRESH, 0xffe0, 0x0005 }, { BWN_PHY_CLIPCTRTHRESH, 0xfc1f, 0x0180 }, { BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x3c00 }, { BWN_PHY_GAINDIRECTMISMATCH, 0xfff0, 0x0005 }, { BWN_PHY_GAIN_MISMATCH_LIMIT, 0xffc0, 0x001a }, { BWN_PHY_CRS_ED_THRESH, 0xff00, 0x00b3 }, { BWN_PHY_CRS_ED_THRESH, 0x00ff, 0xad00 } }; static const struct bwn_smpair v2[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_1, 0x3f00, 0x0900 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0400 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_5, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_5, 0xc0ff, 0x0900 }, { BWN_PHY_TR_LOOKUP_6, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_6, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_7, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_7, 0xc0ff, 0x0900 }, { BWN_PHY_TR_LOOKUP_8, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_8, 0xc0ff, 0x0b00 } }; static const struct bwn_smpair v3[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x0001 }, { BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0400 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x0001 }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0500 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0800 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0a00 } }; static const struct bwn_smpair v4[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x0004 }, { BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0800 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x0004 }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0c00 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0100 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0300 } }; static const struct bwn_smpair v5[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0900 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0006 }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0500 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0006 }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0700 } }; int i; uint16_t tmp, tmp2; BWN_PHY_MASK(mac, BWN_PHY_AFE_DAC_CTL, 0xf7ff); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL, 0); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVR, 0); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_0, 0); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, 0); BWN_PHY_SET(mac, BWN_PHY_AFE_DAC_CTL, 0x0004); BWN_PHY_SETMASK(mac, BWN_PHY_OFDMSYNCTHRESH0, 0xff00, 0x0078); BWN_PHY_SETMASK(mac, BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x5800); BWN_PHY_WRITE(mac, BWN_PHY_ADC_COMPENSATION_CTL, 0x0016); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_ADC_CTL_0, 0xfff8, 0x0004); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0x00ff, 0x5400); BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0x00ff, 0x2400); BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x2100); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0x0006); BWN_PHY_MASK(mac, BWN_PHY_RX_RADIO_CTL, 0xfffe); for (i = 0; i < N(v1); i++) BWN_PHY_SETMASK(mac, v1[i].offset, v1[i].mask, v1[i].set); BWN_PHY_SETMASK(mac, BWN_PHY_INPUT_PWRDB, 0xff00, plp->plp_rxpwroffset); if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM) && ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) || (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_LDO_PAREF))) { siba_cc_pmu_set_ldovolt(sc->sc_dev, SIBA_LDO_PAREF, 0x28); siba_cc_pmu_set_ldoparef(sc->sc_dev, 1); if (mac->mac_phy.rev == 0) BWN_PHY_SETMASK(mac, BWN_PHY_LP_RF_SIGNAL_LUT, 0xffcf, 0x0010); bwn_tab_write(mac, BWN_TAB_2(11, 7), 60); } else { siba_cc_pmu_set_ldoparef(sc->sc_dev, 0); BWN_PHY_SETMASK(mac, BWN_PHY_LP_RF_SIGNAL_LUT, 0xffcf, 0x0020); bwn_tab_write(mac, BWN_TAB_2(11, 7), 100); } tmp = plp->plp_rssivf | plp->plp_rssivc << 4 | 0xa000; BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_0, tmp); if (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_RSSIINV) BWN_PHY_SETMASK(mac, BWN_PHY_AFE_RSSI_CTL_1, 0xf000, 0x0aaa); else BWN_PHY_SETMASK(mac, BWN_PHY_AFE_RSSI_CTL_1, 0xf000, 0x02aa); bwn_tab_write(mac, BWN_TAB_2(11, 1), 24); BWN_PHY_SETMASK(mac, BWN_PHY_RX_RADIO_CTL, 0xfff9, (plp->plp_bxarch << 1)); if (mac->mac_phy.rev == 1 && (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_FEM_BT)) { for (i = 0; i < N(v2); i++) BWN_PHY_SETMASK(mac, v2[i].offset, v2[i].mask, v2[i].set); } else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan) || (siba_get_pci_subdevice(sc->sc_dev) == 0x048a) || ((mac->mac_phy.rev == 0) && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM))) { for (i = 0; i < N(v3); i++) BWN_PHY_SETMASK(mac, v3[i].offset, v3[i].mask, v3[i].set); } else if (mac->mac_phy.rev == 1 || (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM)) { for (i = 0; i < N(v4); i++) BWN_PHY_SETMASK(mac, v4[i].offset, v4[i].mask, v4[i].set); } else { for (i = 0; i < N(v5); i++) BWN_PHY_SETMASK(mac, v5[i].offset, v5[i].mask, v5[i].set); } if (mac->mac_phy.rev == 1 && (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_LDO_PAREF)) { BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_5, BWN_PHY_TR_LOOKUP_1); BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_6, BWN_PHY_TR_LOOKUP_2); BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_7, BWN_PHY_TR_LOOKUP_3); BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_8, BWN_PHY_TR_LOOKUP_4); } if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_FEM_BT) && (siba_get_chipid(sc->sc_dev) == 0x5354) && (siba_get_chippkg(sc->sc_dev) == SIBA_CHIPPACK_BCM4712S)) { BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x0006); BWN_PHY_WRITE(mac, BWN_PHY_GPIO_SELECT, 0x0005); BWN_PHY_WRITE(mac, BWN_PHY_GPIO_OUTEN, 0xffff); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_PR45960W); } if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_LP_PHY_CTL, 0x8000); BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x0040); BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0x00ff, 0xa400); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xf0ff, 0x0b00); BWN_PHY_SETMASK(mac, BWN_PHY_SYNCPEAKCNT, 0xfff8, 0x0007); BWN_PHY_SETMASK(mac, BWN_PHY_DSSS_CONFIRM_CNT, 0xfff8, 0x0003); BWN_PHY_SETMASK(mac, BWN_PHY_DSSS_CONFIRM_CNT, 0xffc7, 0x0020); BWN_PHY_MASK(mac, BWN_PHY_IDLEAFTERPKTRXTO, 0x00ff); } else { BWN_PHY_MASK(mac, BWN_PHY_LP_PHY_CTL, 0x7fff); BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, 0xffbf); } if (mac->mac_phy.rev == 1) { tmp = BWN_PHY_READ(mac, BWN_PHY_CLIPCTRTHRESH); tmp2 = (tmp & 0x03e0) >> 5; tmp2 |= tmp2 << 5; BWN_PHY_WRITE(mac, BWN_PHY_4C3, tmp2); tmp = BWN_PHY_READ(mac, BWN_PHY_GAINDIRECTMISMATCH); tmp2 = (tmp & 0x1f00) >> 8; tmp2 |= tmp2 << 5; BWN_PHY_WRITE(mac, BWN_PHY_4C4, tmp2); tmp = BWN_PHY_READ(mac, BWN_PHY_VERYLOWGAINDB); tmp2 = tmp & 0x00ff; tmp2 |= tmp << 8; BWN_PHY_WRITE(mac, BWN_PHY_4C5, tmp2); } } struct bwn_b2062_freq { uint16_t freq; uint8_t value[6]; }; static void bwn_phy_lp_b2062_init(struct bwn_mac *mac) { #define CALC_CTL7(freq, div) \ (((800000000 * (div) + (freq)) / (2 * (freq)) - 8) & 0xff) #define CALC_CTL18(freq, div) \ ((((100 * (freq) + 16000000 * (div)) / (32000000 * (div))) - 1) & 0xff) #define CALC_CTL19(freq, div) \ ((((2 * (freq) + 1000000 * (div)) / (2000000 * (div))) - 1) & 0xff) struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_b2062_freq freqdata_tab[] = { { 12000, { 6, 6, 6, 6, 10, 6 } }, { 13000, { 4, 4, 4, 4, 11, 7 } }, { 14400, { 3, 3, 3, 3, 12, 7 } }, { 16200, { 3, 3, 3, 3, 13, 8 } }, { 18000, { 2, 2, 2, 2, 14, 8 } }, { 19200, { 1, 1, 1, 1, 14, 9 } } }; static const struct bwn_wpair v1[] = { { BWN_B2062_N_TXCTL3, 0 }, { BWN_B2062_N_TXCTL4, 0 }, { BWN_B2062_N_TXCTL5, 0 }, { BWN_B2062_N_TXCTL6, 0 }, { BWN_B2062_N_PDNCTL0, 0x40 }, { BWN_B2062_N_PDNCTL0, 0 }, { BWN_B2062_N_CALIB_TS, 0x10 }, { BWN_B2062_N_CALIB_TS, 0 } }; const struct bwn_b2062_freq *f = NULL; uint32_t xtalfreq, ref; unsigned int i; bwn_phy_lp_b2062_tblinit(mac); for (i = 0; i < N(v1); i++) BWN_RF_WRITE(mac, v1[i].reg, v1[i].value); if (mac->mac_phy.rev > 0) BWN_RF_WRITE(mac, BWN_B2062_S_BG_CTL1, (BWN_RF_READ(mac, BWN_B2062_N_COM2) >> 1) | 0x80); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) BWN_RF_SET(mac, BWN_B2062_N_TSSI_CTL0, 0x1); else BWN_RF_MASK(mac, BWN_B2062_N_TSSI_CTL0, ~0x1); KASSERT(siba_get_cc_caps(sc->sc_dev) & SIBA_CC_CAPS_PMU, ("%s:%d: fail", __func__, __LINE__)); xtalfreq = siba_get_cc_pmufreq(sc->sc_dev) * 1000; KASSERT(xtalfreq != 0, ("%s:%d: fail", __func__, __LINE__)); if (xtalfreq <= 30000000) { plp->plp_div = 1; BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL1, 0xfffb); } else { plp->plp_div = 2; BWN_RF_SET(mac, BWN_B2062_S_RFPLLCTL1, 0x4); } BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL7, CALC_CTL7(xtalfreq, plp->plp_div)); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL18, CALC_CTL18(xtalfreq, plp->plp_div)); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL19, CALC_CTL19(xtalfreq, plp->plp_div)); ref = (1000 * plp->plp_div + 2 * xtalfreq) / (2000 * plp->plp_div); ref &= 0xffff; for (i = 0; i < N(freqdata_tab); i++) { if (ref < freqdata_tab[i].freq) { f = &freqdata_tab[i]; break; } } if (f == NULL) f = &freqdata_tab[N(freqdata_tab) - 1]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL8, ((uint16_t)(f->value[1]) << 4) | f->value[0]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL9, ((uint16_t)(f->value[3]) << 4) | f->value[2]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL10, f->value[4]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL11, f->value[5]); #undef CALC_CTL7 #undef CALC_CTL18 #undef CALC_CTL19 } static void bwn_phy_lp_b2063_init(struct bwn_mac *mac) { bwn_phy_lp_b2063_tblinit(mac); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_SP5, 0); BWN_RF_SET(mac, BWN_B2063_COM8, 0x38); BWN_RF_WRITE(mac, BWN_B2063_REG_SP1, 0x56); BWN_RF_MASK(mac, BWN_B2063_RX_BB_CTL2, ~0x2); BWN_RF_WRITE(mac, BWN_B2063_PA_SP7, 0); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_SP6, 0x20); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_SP9, 0x40); if (mac->mac_phy.rev == 2) { BWN_RF_WRITE(mac, BWN_B2063_PA_SP3, 0xa0); BWN_RF_WRITE(mac, BWN_B2063_PA_SP4, 0xa0); BWN_RF_WRITE(mac, BWN_B2063_PA_SP2, 0x18); } else { BWN_RF_WRITE(mac, BWN_B2063_PA_SP3, 0x20); BWN_RF_WRITE(mac, BWN_B2063_PA_SP2, 0x20); } } static void bwn_phy_lp_rxcal_r2(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; static const struct bwn_wpair v1[] = { { BWN_B2063_RX_BB_SP8, 0x0 }, { BWN_B2063_RC_CALIB_CTL1, 0x7e }, { BWN_B2063_RC_CALIB_CTL1, 0x7c }, { BWN_B2063_RC_CALIB_CTL2, 0x15 }, { BWN_B2063_RC_CALIB_CTL3, 0x70 }, { BWN_B2063_RC_CALIB_CTL4, 0x52 }, { BWN_B2063_RC_CALIB_CTL5, 0x1 }, { BWN_B2063_RC_CALIB_CTL1, 0x7d } }; static const struct bwn_wpair v2[] = { { BWN_B2063_TX_BB_SP3, 0x0 }, { BWN_B2063_RC_CALIB_CTL1, 0x7e }, { BWN_B2063_RC_CALIB_CTL1, 0x7c }, { BWN_B2063_RC_CALIB_CTL2, 0x55 }, { BWN_B2063_RC_CALIB_CTL3, 0x76 } }; uint32_t freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000; int i; uint8_t tmp; tmp = BWN_RF_READ(mac, BWN_B2063_RX_BB_SP8) & 0xff; for (i = 0; i < 2; i++) BWN_RF_WRITE(mac, v1[i].reg, v1[i].value); BWN_RF_MASK(mac, BWN_B2063_PLL_SP1, 0xf7); for (i = 2; i < N(v1); i++) BWN_RF_WRITE(mac, v1[i].reg, v1[i].value); for (i = 0; i < 10000; i++) { if (BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2) break; DELAY(1000); } if (!(BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2)) BWN_RF_WRITE(mac, BWN_B2063_RX_BB_SP8, tmp); tmp = BWN_RF_READ(mac, BWN_B2063_TX_BB_SP3) & 0xff; for (i = 0; i < N(v2); i++) BWN_RF_WRITE(mac, v2[i].reg, v2[i].value); if (freqxtal == 24000000) { BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL4, 0xfc); BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL5, 0x0); } else { BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL4, 0x13); BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL5, 0x1); } BWN_RF_WRITE(mac, BWN_B2063_PA_SP7, 0x7d); for (i = 0; i < 10000; i++) { if (BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2) break; DELAY(1000); } if (!(BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2)) BWN_RF_WRITE(mac, BWN_B2063_TX_BB_SP3, tmp); BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL1, 0x7e); } static void bwn_phy_lp_rccal_r12(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct bwn_phy_lp_iq_est ie; struct bwn_txgain tx_gains; static const uint32_t pwrtbl[21] = { 0x10000, 0x10557, 0x10e2d, 0x113e0, 0x10f22, 0x0ff64, 0x0eda2, 0x0e5d4, 0x0efd1, 0x0fbe8, 0x0b7b8, 0x04b35, 0x01a5e, 0x00a0b, 0x00444, 0x001fd, 0x000ff, 0x00088, 0x0004c, 0x0002c, 0x0001a, }; uint32_t npwr, ipwr, sqpwr, tmp; int loopback, i, j, sum, error; uint16_t save[7]; uint8_t txo, bbmult, txpctlmode; error = bwn_phy_lp_switch_channel(mac, 7); if (error) device_printf(sc->sc_dev, "failed to change channel to 7 (%d)\n", error); txo = (BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR) & 0x40) ? 1 : 0; bbmult = bwn_phy_lp_get_bbmult(mac); if (txo) tx_gains = bwn_phy_lp_get_txgain(mac); save[0] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_0); save[1] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_VAL_0); save[2] = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR); save[3] = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVRVAL); save[4] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_2); save[5] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_2_VAL); save[6] = BWN_PHY_READ(mac, BWN_PHY_LP_PHY_CTL); bwn_phy_lp_get_txpctlmode(mac); txpctlmode = plp->plp_txpctlmode; bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); /* disable CRS */ bwn_phy_lp_set_deaf(mac, 1); bwn_phy_lp_set_trsw_over(mac, 0, 1); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffb); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x4); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfff7); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x10); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x10); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffdf); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x20); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffbf); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x40); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x7); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x38); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xff3f); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x100); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfdff); BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL0, 0); BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL1, 1); BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL2, 0x20); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xf7ff); BWN_PHY_WRITE(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0); BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, 0x45af); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, 0x3ff); loopback = bwn_phy_lp_loopback(mac); if (loopback == -1) goto done; bwn_phy_lp_set_rxgain_idx(mac, loopback); BWN_PHY_SETMASK(mac, BWN_PHY_LP_PHY_CTL, 0xffbf, 0x40); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfff8, 0x1); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xffc7, 0x8); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xff3f, 0xc0); tmp = 0; memset(&ie, 0, sizeof(ie)); for (i = 128; i <= 159; i++) { BWN_RF_WRITE(mac, BWN_B2062_N_RXBB_CALIB2, i); sum = 0; for (j = 5; j <= 25; j++) { bwn_phy_lp_ddfs_turnon(mac, 1, 1, j, j, 0); if (!(bwn_phy_lp_rx_iq_est(mac, 1000, 32, &ie))) goto done; sqpwr = ie.ie_ipwr + ie.ie_qpwr; ipwr = ((pwrtbl[j - 5] >> 3) + 1) >> 1; npwr = bwn_phy_lp_roundup(sqpwr, (j == 5) ? sqpwr : 0, 12); sum += ((ipwr - npwr) * (ipwr - npwr)); if ((i == 128) || (sum < tmp)) { plp->plp_rccap = i; tmp = sum; } } } bwn_phy_lp_ddfs_turnoff(mac); done: /* restore CRS */ bwn_phy_lp_clear_deaf(mac, 1); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xff80); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfc00); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_VAL_0, save[1]); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_0, save[0]); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVRVAL, save[3]); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVR, save[2]); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2_VAL, save[5]); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, save[4]); BWN_PHY_WRITE(mac, BWN_PHY_LP_PHY_CTL, save[6]); bwn_phy_lp_set_bbmult(mac, bbmult); if (txo) bwn_phy_lp_set_txgain(mac, &tx_gains); bwn_phy_lp_set_txpctlmode(mac, txpctlmode); if (plp->plp_rccap) bwn_phy_lp_set_rccap(mac); } static void bwn_phy_lp_set_rccap(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; uint8_t rc_cap = (plp->plp_rccap & 0x1f) >> 1; if (mac->mac_phy.rev == 1) rc_cap = MIN(rc_cap + 5, 15); BWN_RF_WRITE(mac, BWN_B2062_N_RXBB_CALIB2, MAX(plp->plp_rccap - 4, 0x80)); BWN_RF_WRITE(mac, BWN_B2062_N_TXCTL_A, rc_cap | 0x80); BWN_RF_WRITE(mac, BWN_B2062_S_RXG_CNT16, ((plp->plp_rccap & 0x1f) >> 2) | 0x80); } static uint32_t bwn_phy_lp_roundup(uint32_t value, uint32_t div, uint8_t pre) { uint32_t i, q, r; if (div == 0) return (0); for (i = 0, q = value / div, r = value % div; i < pre; i++) { q <<= 1; if (r << 1 >= div) { q++; r = (r << 1) - div; } } if (r << 1 >= div) q++; return (q); } static void bwn_phy_lp_b2062_reset_pllbias(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 0xff); DELAY(20); if (siba_get_chipid(sc->sc_dev) == 0x5354) { BWN_RF_WRITE(mac, BWN_B2062_N_COM1, 4); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 4); } else { BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 0); } DELAY(5); } static void bwn_phy_lp_b2062_vco_calib(struct bwn_mac *mac) { BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL21, 0x42); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL21, 0x62); DELAY(200); } static void bwn_phy_lp_b2062_tblinit(struct bwn_mac *mac) { #define FLAG_A 0x01 #define FLAG_G 0x02 struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_b206x_rfinit_entry bwn_b2062_init_tab[] = { { BWN_B2062_N_COM4, 0x1, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_PDNCTL1, 0x0, 0xca, FLAG_G, }, { BWN_B2062_N_PDNCTL3, 0x0, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_PDNCTL4, 0x15, 0x2a, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENC, 0xDB, 0xff, FLAG_A, }, { BWN_B2062_N_LGENATUNE0, 0xdd, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENATUNE2, 0xdd, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENATUNE3, 0x77, 0xB5, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENACTL3, 0x0, 0xff, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENACTL7, 0x33, 0x33, FLAG_A | FLAG_G, }, { BWN_B2062_N_RXA_CTL1, 0x0, 0x0, FLAG_G, }, { BWN_B2062_N_RXBB_CTL0, 0x82, 0x80, FLAG_A | FLAG_G, }, { BWN_B2062_N_RXBB_GAIN1, 0x4, 0x4, FLAG_A | FLAG_G, }, { BWN_B2062_N_RXBB_GAIN2, 0x0, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_TXCTL4, 0x3, 0x3, FLAG_A | FLAG_G, }, { BWN_B2062_N_TXCTL5, 0x2, 0x2, FLAG_A | FLAG_G, }, { BWN_B2062_N_TX_TUNE, 0x88, 0x1b, FLAG_A | FLAG_G, }, { BWN_B2062_S_COM4, 0x1, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_S_PDS_CTL0, 0xff, 0xff, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL0, 0xf8, 0xd8, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL1, 0x3c, 0x24, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL8, 0x88, 0x80, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL10, 0x88, 0x80, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL0, 0x98, 0x98, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL1, 0x10, 0x10, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL5, 0x43, 0x43, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL6, 0x47, 0x47, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL7, 0xc, 0xc, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL8, 0x11, 0x11, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL9, 0x11, 0x11, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL10, 0xe, 0xe, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL11, 0x8, 0x8, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL12, 0x33, 0x33, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL13, 0xa, 0xa, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL14, 0x6, 0x6, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL18, 0x3e, 0x3e, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL19, 0x13, 0x13, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL21, 0x62, 0x62, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL22, 0x7, 0x7, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL23, 0x16, 0x16, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL24, 0x5c, 0x5c, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL25, 0x95, 0x95, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL30, 0xa0, 0xa0, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL31, 0x4, 0x4, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL33, 0xcc, 0xcc, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL34, 0x7, 0x7, FLAG_A | FLAG_G, }, { BWN_B2062_S_RXG_CNT8, 0xf, 0xf, FLAG_A, }, }; const struct bwn_b206x_rfinit_entry *br; unsigned int i; for (i = 0; i < N(bwn_b2062_init_tab); i++) { br = &bwn_b2062_init_tab[i]; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { if (br->br_flags & FLAG_G) BWN_RF_WRITE(mac, br->br_offset, br->br_valueg); } else { if (br->br_flags & FLAG_A) BWN_RF_WRITE(mac, br->br_offset, br->br_valuea); } } #undef FLAG_A #undef FLAG_B } static void bwn_phy_lp_b2063_tblinit(struct bwn_mac *mac) { #define FLAG_A 0x01 #define FLAG_G 0x02 struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_b206x_rfinit_entry bwn_b2063_init_tab[] = { { BWN_B2063_COM1, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM10, 0x1, 0x0, FLAG_A, }, { BWN_B2063_COM16, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM17, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM18, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM19, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM20, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM21, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM22, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM23, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM24, 0x0, 0x0, FLAG_G, }, { BWN_B2063_LOGEN_SP1, 0xe8, 0xd4, FLAG_A | FLAG_G, }, { BWN_B2063_LOGEN_SP2, 0xa7, 0x53, FLAG_A | FLAG_G, }, { BWN_B2063_LOGEN_SP4, 0xf0, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_SP1, 0x1f, 0x5e, FLAG_G, }, { BWN_B2063_G_RX_SP2, 0x7f, 0x7e, FLAG_G, }, { BWN_B2063_G_RX_SP3, 0x30, 0xf0, FLAG_G, }, { BWN_B2063_G_RX_SP7, 0x7f, 0x7f, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_SP10, 0xc, 0xc, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_SP1, 0x3c, 0x3f, FLAG_A, }, { BWN_B2063_A_RX_SP2, 0xfc, 0xfe, FLAG_A, }, { BWN_B2063_A_RX_SP7, 0x8, 0x8, FLAG_A | FLAG_G, }, { BWN_B2063_RX_BB_SP4, 0x60, 0x60, FLAG_A | FLAG_G, }, { BWN_B2063_RX_BB_SP8, 0x30, 0x30, FLAG_A | FLAG_G, }, { BWN_B2063_TX_RF_SP3, 0xc, 0xb, FLAG_A | FLAG_G, }, { BWN_B2063_TX_RF_SP4, 0x10, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_PA_SP1, 0x3d, 0xfd, FLAG_A | FLAG_G, }, { BWN_B2063_TX_BB_SP1, 0x2, 0x2, FLAG_A | FLAG_G, }, { BWN_B2063_BANDGAP_CTL1, 0x56, 0x56, FLAG_A | FLAG_G, }, { BWN_B2063_JTAG_VCO2, 0xF7, 0xF7, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_MIX3, 0x71, 0x71, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_MIX4, 0x71, 0x71, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_1ST2, 0xf0, 0x30, FLAG_A, }, { BWN_B2063_A_RX_PS6, 0x77, 0x77, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_MIX4, 0x3, 0x3, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_MIX5, 0xf, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_MIX6, 0xf, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_RX_TIA_CTL1, 0x77, 0x77, FLAG_A | FLAG_G, }, { BWN_B2063_RX_TIA_CTL3, 0x77, 0x77, FLAG_A | FLAG_G, }, { BWN_B2063_RX_BB_CTL2, 0x4, 0x4, FLAG_A | FLAG_G, }, { BWN_B2063_PA_CTL1, 0x0, 0x4, FLAG_A, }, { BWN_B2063_VREG_CTL1, 0x3, 0x3, FLAG_A | FLAG_G, }, }; const struct bwn_b206x_rfinit_entry *br; unsigned int i; for (i = 0; i < N(bwn_b2063_init_tab); i++) { br = &bwn_b2063_init_tab[i]; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { if (br->br_flags & FLAG_G) BWN_RF_WRITE(mac, br->br_offset, br->br_valueg); } else { if (br->br_flags & FLAG_A) BWN_RF_WRITE(mac, br->br_offset, br->br_valuea); } } #undef FLAG_A #undef FLAG_B } static void bwn_tab_read_multi(struct bwn_mac *mac, uint32_t typenoffset, int count, void *_data) { unsigned int i; uint32_t offset, type; uint8_t *data = _data; type = BWN_TAB_GETTYPE(typenoffset); offset = BWN_TAB_GETOFFSET(typenoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); for (i = 0; i < count; i++) { switch (type) { case BWN_TAB_8BIT: *data = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO) & 0xff; data++; break; case BWN_TAB_16BIT: *((uint16_t *)data) = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); data += 2; break; case BWN_TAB_32BIT: *((uint32_t *)data) = BWN_PHY_READ(mac, BWN_PHY_TABLEDATAHI); *((uint32_t *)data) <<= 16; *((uint32_t *)data) |= BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); data += 4; break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static void bwn_tab_write_multi(struct bwn_mac *mac, uint32_t typenoffset, int count, const void *_data) { uint32_t offset, type, value; const uint8_t *data = _data; unsigned int i; type = BWN_TAB_GETTYPE(typenoffset); offset = BWN_TAB_GETOFFSET(typenoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); for (i = 0; i < count; i++) { switch (type) { case BWN_TAB_8BIT: value = *data; data++; KASSERT(!(value & ~0xff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_16BIT: value = *((const uint16_t *)data); data += 2; KASSERT(!(value & ~0xffff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_32BIT: value = *((const uint32_t *)data); data += 4; BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATAHI, value >> 16); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static struct bwn_txgain bwn_phy_lp_get_txgain(struct bwn_mac *mac) { struct bwn_txgain tg; uint16_t tmp; tg.tg_dac = (BWN_PHY_READ(mac, BWN_PHY_AFE_DAC_CTL) & 0x380) >> 7; if (mac->mac_phy.rev < 2) { tmp = BWN_PHY_READ(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL) & 0x7ff; tg.tg_gm = tmp & 0x0007; tg.tg_pga = (tmp & 0x0078) >> 3; tg.tg_pad = (tmp & 0x780) >> 7; return (tg); } tmp = BWN_PHY_READ(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL); tg.tg_pad = BWN_PHY_READ(mac, BWN_PHY_OFDM(0xfb)) & 0xff; tg.tg_gm = tmp & 0xff; tg.tg_pga = (tmp >> 8) & 0xff; return (tg); } static uint8_t bwn_phy_lp_get_bbmult(struct bwn_mac *mac) { return (bwn_tab_read(mac, BWN_TAB_2(0, 87)) & 0xff00) >> 8; } static void bwn_phy_lp_set_txgain(struct bwn_mac *mac, struct bwn_txgain *tg) { uint16_t pa; if (mac->mac_phy.rev < 2) { BWN_PHY_SETMASK(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0xf800, (tg->tg_pad << 7) | (tg->tg_pga << 3) | tg->tg_gm); bwn_phy_lp_set_txgain_dac(mac, tg->tg_dac); bwn_phy_lp_set_txgain_override(mac); return; } pa = bwn_phy_lp_get_pa_gain(mac); BWN_PHY_WRITE(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, (tg->tg_pga << 8) | tg->tg_gm); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfb), 0x8000, tg->tg_pad | (pa << 6)); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xfc), (tg->tg_pga << 8) | tg->tg_gm); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfd), 0x8000, tg->tg_pad | (pa << 8)); bwn_phy_lp_set_txgain_dac(mac, tg->tg_dac); bwn_phy_lp_set_txgain_override(mac); } static void bwn_phy_lp_set_bbmult(struct bwn_mac *mac, uint8_t bbmult) { bwn_tab_write(mac, BWN_TAB_2(0, 87), (uint16_t)bbmult << 8); } static void bwn_phy_lp_set_trsw_over(struct bwn_mac *mac, uint8_t tx, uint8_t rx) { uint16_t trsw = (tx << 1) | rx; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffc, trsw); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x3); } static void bwn_phy_lp_set_rxgain(struct bwn_mac *mac, uint32_t gain) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t ext_lna, high_gain, lna, low_gain, trsw, tmp; if (mac->mac_phy.rev < 2) { trsw = gain & 0x1; lna = (gain & 0xfffc) | ((gain & 0xc) >> 2); ext_lna = (gain & 2) >> 1; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffe, trsw); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff, ext_lna << 10); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xf7ff, ext_lna << 11); BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, lna); } else { low_gain = gain & 0xffff; high_gain = (gain >> 16) & 0xf; ext_lna = (gain >> 21) & 0x1; trsw = ~(gain >> 20) & 0x1; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffe, trsw); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfdff, ext_lna << 9); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff, ext_lna << 10); BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xfff0, high_gain); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { tmp = (gain >> 2) & 0x3; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xe7ff, tmp<<11); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xe6), 0xffe7, tmp << 3); } } BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x10); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x40); if (mac->mac_phy.rev >= 2) { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x100); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x400); BWN_PHY_SET(mac, BWN_PHY_OFDM(0xe5), 0x8); } return; } BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x200); } static void bwn_phy_lp_set_deaf(struct bwn_mac *mac, uint8_t user) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; if (user) plp->plp_crsusr_off = 1; else plp->plp_crssys_off = 1; BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x80); } static void bwn_phy_lp_clear_deaf(struct bwn_mac *mac, uint8_t user) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (user) plp->plp_crsusr_off = 0; else plp->plp_crssys_off = 0; if (plp->plp_crsusr_off || plp->plp_crssys_off) return; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x60); else BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x20); } static unsigned int bwn_sqrt(struct bwn_mac *mac, unsigned int x) { /* Table holding (10 * sqrt(x)) for x between 1 and 256. */ static uint8_t sqrt_table[256] = { 10, 14, 17, 20, 22, 24, 26, 28, 30, 31, 33, 34, 36, 37, 38, 40, 41, 42, 43, 44, 45, 46, 47, 48, 50, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 64, 65, 66, 67, 67, 68, 69, 70, 70, 71, 72, 72, 73, 74, 74, 75, 76, 76, 77, 78, 78, 79, 80, 80, 81, 81, 82, 83, 83, 84, 84, 85, 86, 86, 87, 87, 88, 88, 89, 90, 90, 91, 91, 92, 92, 93, 93, 94, 94, 95, 95, 96, 96, 97, 97, 98, 98, 99, 100, 100, 100, 101, 101, 102, 102, 103, 103, 104, 104, 105, 105, 106, 106, 107, 107, 108, 108, 109, 109, 110, 110, 110, 111, 111, 112, 112, 113, 113, 114, 114, 114, 115, 115, 116, 116, 117, 117, 117, 118, 118, 119, 119, 120, 120, 120, 121, 121, 122, 122, 122, 123, 123, 124, 124, 124, 125, 125, 126, 126, 126, 127, 127, 128, 128, 128, 129, 129, 130, 130, 130, 131, 131, 131, 132, 132, 133, 133, 133, 134, 134, 134, 135, 135, 136, 136, 136, 137, 137, 137, 138, 138, 138, 139, 139, 140, 140, 140, 141, 141, 141, 142, 142, 142, 143, 143, 143, 144, 144, 144, 145, 145, 145, 146, 146, 146, 147, 147, 147, 148, 148, 148, 149, 149, 150, 150, 150, 150, 151, 151, 151, 152, 152, 152, 153, 153, 153, 154, 154, 154, 155, 155, 155, 156, 156, 156, 157, 157, 157, 158, 158, 158, 159, 159, 159, 160 }; if (x == 0) return (0); if (x >= 256) { unsigned int tmp; for (tmp = 0; x >= (2 * tmp) + 1; x -= (2 * tmp++) + 1) /* do nothing */ ; return (tmp); } return (sqrt_table[x - 1] / 10); } static int bwn_phy_lp_calc_rx_iq_comp(struct bwn_mac *mac, uint16_t sample) { #define CALC_COEFF(_v, _x, _y, _z) do { \ int _t; \ _t = _x - 20; \ if (_t >= 0) { \ _v = ((_y << (30 - _x)) + (_z >> (1 + _t))) / (_z >> _t); \ } else { \ _v = ((_y << (30 - _x)) + (_z << (-1 - _t))) / (_z << -_t); \ } \ } while (0) #define CALC_COEFF2(_v, _x, _y, _z) do { \ int _t; \ _t = _x - 11; \ if (_t >= 0) \ _v = (_y << (31 - _x)) / (_z >> _t); \ else \ _v = (_y << (31 - _x)) / (_z << -_t); \ } while (0) struct bwn_phy_lp_iq_est ie; uint16_t v0, v1; int tmp[2], ret; v1 = BWN_PHY_READ(mac, BWN_PHY_RX_COMP_COEFF_S); v0 = v1 >> 8; v1 |= 0xff; BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, 0x00c0); BWN_PHY_MASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff); ret = bwn_phy_lp_rx_iq_est(mac, sample, 32, &ie); if (ret == 0) goto done; if (ie.ie_ipwr + ie.ie_qpwr < 2) { ret = 0; goto done; } CALC_COEFF(tmp[0], bwn_nbits(ie.ie_iqprod), ie.ie_iqprod, ie.ie_ipwr); CALC_COEFF2(tmp[1], bwn_nbits(ie.ie_qpwr), ie.ie_qpwr, ie.ie_ipwr); tmp[1] = -bwn_sqrt(mac, tmp[1] - (tmp[0] * tmp[0])); v0 = tmp[0] >> 3; v1 = tmp[1] >> 4; done: BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, v1); BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff, v0 << 8); return ret; #undef CALC_COEFF #undef CALC_COEFF2 } static void bwn_phy_lp_tblinit_r01(struct bwn_mac *mac) { static const uint16_t noisescale[] = { 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa400, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0x00a4, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4c00, 0x2d36, 0x0000, 0x0000, 0x4c00, 0x2d36, }; static const uint16_t crsgainnft[] = { 0x0366, 0x036a, 0x036f, 0x0364, 0x0367, 0x036d, 0x0374, 0x037f, 0x036f, 0x037b, 0x038a, 0x0378, 0x0367, 0x036d, 0x0375, 0x0381, 0x0374, 0x0381, 0x0392, 0x03a9, 0x03c4, 0x03e1, 0x0001, 0x001f, 0x0040, 0x005e, 0x007f, 0x009e, 0x00bd, 0x00dd, 0x00fd, 0x011d, 0x013d, }; static const uint16_t filterctl[] = { 0xa0fc, 0x10fc, 0x10db, 0x20b7, 0xff93, 0x10bf, 0x109b, 0x2077, 0xff53, 0x0127, }; static const uint32_t psctl[] = { 0x00010000, 0x000000a0, 0x00040000, 0x00000048, 0x08080101, 0x00000080, 0x08080101, 0x00000040, 0x08080101, 0x000000c0, 0x08a81501, 0x000000c0, 0x0fe8fd01, 0x000000c0, 0x08300105, 0x000000c0, 0x08080201, 0x000000c0, 0x08280205, 0x000000c0, 0xe80802fe, 0x000000c7, 0x28080206, 0x000000c0, 0x08080202, 0x000000c0, 0x0ba87602, 0x000000c0, 0x1068013d, 0x000000c0, 0x10280105, 0x000000c0, 0x08880102, 0x000000c0, 0x08280106, 0x000000c0, 0xe80801fd, 0x000000c7, 0xa8080115, 0x000000c0, }; static const uint16_t ofdmcckgain_r0[] = { 0x0001, 0x0001, 0x0001, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d, }; static const uint16_t ofdmcckgain_r1[] = { 0x5000, 0x6000, 0x7000, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d, }; static const uint16_t gaindelta[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const uint32_t txpwrctl[] = { 0x00000050, 0x0000004f, 0x0000004e, 0x0000004d, 0x0000004c, 0x0000004b, 0x0000004a, 0x00000049, 0x00000048, 0x00000047, 0x00000046, 0x00000045, 0x00000044, 0x00000043, 0x00000042, 0x00000041, 0x00000040, 0x0000003f, 0x0000003e, 0x0000003d, 0x0000003c, 0x0000003b, 0x0000003a, 0x00000039, 0x00000038, 0x00000037, 0x00000036, 0x00000035, 0x00000034, 0x00000033, 0x00000032, 0x00000031, 0x00000030, 0x0000002f, 0x0000002e, 0x0000002d, 0x0000002c, 0x0000002b, 0x0000002a, 0x00000029, 0x00000028, 0x00000027, 0x00000026, 0x00000025, 0x00000024, 0x00000023, 0x00000022, 0x00000021, 0x00000020, 0x0000001f, 0x0000001e, 0x0000001d, 0x0000001c, 0x0000001b, 0x0000001a, 0x00000019, 0x00000018, 0x00000017, 0x00000016, 0x00000015, 0x00000014, 0x00000013, 0x00000012, 0x00000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000075a0, 0x000075a0, 0x000075a1, 0x000075a1, 0x000075a2, 0x000075a2, 0x000075a3, 0x000075a3, 0x000074b0, 0x000074b0, 0x000074b1, 0x000074b1, 0x000074b2, 0x000074b2, 0x000074b3, 0x000074b3, 0x00006d20, 0x00006d20, 0x00006d21, 0x00006d21, 0x00006d22, 0x00006d22, 0x00006d23, 0x00006d23, 0x00004660, 0x00004660, 0x00004661, 0x00004661, 0x00004662, 0x00004662, 0x00004663, 0x00004663, 0x00003e60, 0x00003e60, 0x00003e61, 0x00003e61, 0x00003e62, 0x00003e62, 0x00003e63, 0x00003e63, 0x00003660, 0x00003660, 0x00003661, 0x00003661, 0x00003662, 0x00003662, 0x00003663, 0x00003663, 0x00002e60, 0x00002e60, 0x00002e61, 0x00002e61, 0x00002e62, 0x00002e62, 0x00002e63, 0x00002e63, 0x00002660, 0x00002660, 0x00002661, 0x00002661, 0x00002662, 0x00002662, 0x00002663, 0x00002663, 0x000025e0, 0x000025e0, 0x000025e1, 0x000025e1, 0x000025e2, 0x000025e2, 0x000025e3, 0x000025e3, 0x00001de0, 0x00001de0, 0x00001de1, 0x00001de1, 0x00001de2, 0x00001de2, 0x00001de3, 0x00001de3, 0x00001d60, 0x00001d60, 0x00001d61, 0x00001d61, 0x00001d62, 0x00001d62, 0x00001d63, 0x00001d63, 0x00001560, 0x00001560, 0x00001561, 0x00001561, 0x00001562, 0x00001562, 0x00001563, 0x00001563, 0x00000d60, 0x00000d60, 0x00000d61, 0x00000d61, 0x00000d62, 0x00000d62, 0x00000d63, 0x00000d63, 0x00000ce0, 0x00000ce0, 0x00000ce1, 0x00000ce1, 0x00000ce2, 0x00000ce2, 0x00000ce3, 0x00000ce3, 0x00000e10, 0x00000e10, 0x00000e11, 0x00000e11, 0x00000e12, 0x00000e12, 0x00000e13, 0x00000e13, 0x00000bf0, 0x00000bf0, 0x00000bf1, 0x00000bf1, 0x00000bf2, 0x00000bf2, 0x00000bf3, 0x00000bf3, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x000000ff, 0x000002fc, 0x0000fa08, 0x00000305, 0x00000206, 0x00000304, 0x0000fb04, 0x0000fcff, 0x000005fb, 0x0000fd01, 0x00000401, 0x00000006, 0x0000ff03, 0x000007fc, 0x0000fc08, 0x00000203, 0x0000fffb, 0x00000600, 0x0000fa01, 0x0000fc03, 0x0000fe06, 0x0000fe00, 0x00000102, 0x000007fd, 0x000004fb, 0x000006ff, 0x000004fd, 0x0000fdfa, 0x000007fb, 0x0000fdfa, 0x0000fa06, 0x00000500, 0x0000f902, 0x000007fa, 0x0000fafa, 0x00000500, 0x000007fa, 0x00000700, 0x00000305, 0x000004ff, 0x00000801, 0x00000503, 0x000005f9, 0x00000404, 0x0000fb08, 0x000005fd, 0x00000501, 0x00000405, 0x0000fb03, 0x000007fc, 0x00000403, 0x00000303, 0x00000402, 0x0000faff, 0x0000fe05, 0x000005fd, 0x0000fe01, 0x000007fa, 0x00000202, 0x00000504, 0x00000102, 0x000008fe, 0x0000fa04, 0x0000fafc, 0x0000fe08, 0x000000f9, 0x000002fa, 0x000003fe, 0x00000304, 0x000004f9, 0x00000100, 0x0000fd06, 0x000008fc, 0x00000701, 0x00000504, 0x0000fdfe, 0x0000fdfc, 0x000003fe, 0x00000704, 0x000002fc, 0x000004f9, 0x0000fdfd, 0x0000fa07, 0x00000205, 0x000003fd, 0x000005fb, 0x000004f9, 0x00000804, 0x0000fc06, 0x0000fcf9, 0x00000100, 0x0000fe05, 0x00000408, 0x0000fb02, 0x00000304, 0x000006fe, 0x000004fa, 0x00000305, 0x000008fc, 0x00000102, 0x000001fd, 0x000004fc, 0x0000fe03, 0x00000701, 0x000001fb, 0x000001f9, 0x00000206, 0x000006fd, 0x00000508, 0x00000700, 0x00000304, 0x000005fe, 0x000005ff, 0x0000fa04, 0x00000303, 0x0000fefb, 0x000007f9, 0x0000fefc, 0x000004fd, 0x000005fc, 0x0000fffd, 0x0000fc08, 0x0000fbf9, 0x0000fd07, 0x000008fb, 0x0000fe02, 0x000006fb, 0x00000702, }; KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); bwn_tab_write_multi(mac, BWN_TAB_1(2, 0), N(bwn_tab_sigsq_tbl), bwn_tab_sigsq_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(1, 0), N(noisescale), noisescale); bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(crsgainnft), crsgainnft); bwn_tab_write_multi(mac, BWN_TAB_2(8, 0), N(filterctl), filterctl); bwn_tab_write_multi(mac, BWN_TAB_4(9, 0), N(psctl), psctl); bwn_tab_write_multi(mac, BWN_TAB_1(6, 0), N(bwn_tab_pllfrac_tbl), bwn_tab_pllfrac_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(0, 0), N(bwn_tabl_iqlocal_tbl), bwn_tabl_iqlocal_tbl); if (mac->mac_phy.rev == 0) { bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), N(ofdmcckgain_r0), ofdmcckgain_r0); bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), N(ofdmcckgain_r0), ofdmcckgain_r0); } else { bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), N(ofdmcckgain_r1), ofdmcckgain_r1); bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), N(ofdmcckgain_r1), ofdmcckgain_r1); } bwn_tab_write_multi(mac, BWN_TAB_2(15, 0), N(gaindelta), gaindelta); bwn_tab_write_multi(mac, BWN_TAB_4(10, 0), N(txpwrctl), txpwrctl); } static void bwn_phy_lp_tblinit_r2(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; static const uint16_t noisescale[] = { 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x0000, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4 }; static const uint32_t filterctl[] = { 0x000141fc, 0x000021fc, 0x000021b7, 0x0000416f, 0x0001ff27, 0x0000217f, 0x00002137, 0x000040ef, 0x0001fea7, 0x0000024f }; static const uint32_t psctl[] = { 0x00e38e08, 0x00e08e38, 0x00000000, 0x00000000, 0x00000000, 0x00002080, 0x00006180, 0x00003002, 0x00000040, 0x00002042, 0x00180047, 0x00080043, 0x00000041, 0x000020c1, 0x00046006, 0x00042002, 0x00040000, 0x00002003, 0x00180006, 0x00080002 }; static const uint32_t gainidx[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082, 0x00000000, 0x40000104, 0x00000000, 0x60004207, 0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001, 0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082, 0x00000000, 0x40000104, 0x00000000, 0x60004207, 0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001, 0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a }; static const uint16_t auxgainidx[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0016, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0016 }; static const uint16_t swctl[] = { 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018 }; static const uint8_t hf[] = { 0x4b, 0x36, 0x24, 0x18, 0x49, 0x34, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17 }; static const uint32_t gainval[] = { 0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000009, 0x000000f1, 0x00000000, 0x00000000 }; static const uint16_t gain[] = { 0x0000, 0x0400, 0x0800, 0x0802, 0x0804, 0x0806, 0x0807, 0x0808, 0x080a, 0x080b, 0x080c, 0x080e, 0x080f, 0x0810, 0x0812, 0x0813, 0x0814, 0x0816, 0x0817, 0x081a, 0x081b, 0x081f, 0x0820, 0x0824, 0x0830, 0x0834, 0x0837, 0x083b, 0x083f, 0x0840, 0x0844, 0x0857, 0x085b, 0x085f, 0x08d7, 0x08db, 0x08df, 0x0957, 0x095b, 0x095f, 0x0b57, 0x0b5b, 0x0b5f, 0x0f5f, 0x135f, 0x175f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }; static const uint32_t papdeps[] = { 0x00000000, 0x00013ffc, 0x0001dff3, 0x0001bff0, 0x00023fe9, 0x00021fdf, 0x00028fdf, 0x00033fd2, 0x00039fcb, 0x00043fc7, 0x0004efc2, 0x00055fb5, 0x0005cfb0, 0x00063fa8, 0x00068fa3, 0x00071f98, 0x0007ef92, 0x00084f8b, 0x0008df82, 0x00097f77, 0x0009df69, 0x000a3f62, 0x000adf57, 0x000b6f4c, 0x000bff41, 0x000c9f39, 0x000cff30, 0x000dbf27, 0x000e4f1e, 0x000edf16, 0x000f7f13, 0x00102f11, 0x00110f10, 0x0011df11, 0x0012ef15, 0x00143f1c, 0x00158f27, 0x00172f35, 0x00193f47, 0x001baf5f, 0x001e6f7e, 0x0021cfa4, 0x0025bfd2, 0x002a2008, 0x002fb047, 0x00360090, 0x003d40e0, 0x0045c135, 0x004fb189, 0x005ae1d7, 0x0067221d, 0x0075025a, 0x007ff291, 0x007ff2bf, 0x007ff2e3, 0x007ff2ff, 0x007ff315, 0x007ff329, 0x007ff33f, 0x007ff356, 0x007ff36e, 0x007ff39c, 0x007ff441, 0x007ff506 }; static const uint32_t papdmult[] = { 0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193, 0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd, 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28 }; static const uint32_t gainidx_a0[] = { 0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193, 0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd, 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28 }; static const uint16_t auxgainidx_a0[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0014, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0014 }; static const uint32_t gainval_a0[] = { 0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000f, 0x000000f7, 0x00000000, 0x00000000 }; static const uint16_t gain_a0[] = { 0x0000, 0x0002, 0x0004, 0x0006, 0x0007, 0x0008, 0x000a, 0x000b, 0x000c, 0x000e, 0x000f, 0x0010, 0x0012, 0x0013, 0x0014, 0x0016, 0x0017, 0x001a, 0x001b, 0x001f, 0x0020, 0x0024, 0x0030, 0x0034, 0x0037, 0x003b, 0x003f, 0x0040, 0x0044, 0x0057, 0x005b, 0x005f, 0x00d7, 0x00db, 0x00df, 0x0157, 0x015b, 0x015f, 0x0357, 0x035b, 0x035f, 0x075f, 0x0b5f, 0x0f5f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }; KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); for (i = 0; i < 704; i++) bwn_tab_write(mac, BWN_TAB_4(7, i), 0); bwn_tab_write_multi(mac, BWN_TAB_1(2, 0), N(bwn_tab_sigsq_tbl), bwn_tab_sigsq_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(1, 0), N(noisescale), noisescale); bwn_tab_write_multi(mac, BWN_TAB_4(11, 0), N(filterctl), filterctl); bwn_tab_write_multi(mac, BWN_TAB_4(12, 0), N(psctl), psctl); bwn_tab_write_multi(mac, BWN_TAB_4(13, 0), N(gainidx), gainidx); bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(auxgainidx), auxgainidx); bwn_tab_write_multi(mac, BWN_TAB_2(15, 0), N(swctl), swctl); bwn_tab_write_multi(mac, BWN_TAB_1(16, 0), N(hf), hf); bwn_tab_write_multi(mac, BWN_TAB_4(17, 0), N(gainval), gainval); bwn_tab_write_multi(mac, BWN_TAB_2(18, 0), N(gain), gain); bwn_tab_write_multi(mac, BWN_TAB_1(6, 0), N(bwn_tab_pllfrac_tbl), bwn_tab_pllfrac_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(0, 0), N(bwn_tabl_iqlocal_tbl), bwn_tabl_iqlocal_tbl); bwn_tab_write_multi(mac, BWN_TAB_4(9, 0), N(papdeps), papdeps); bwn_tab_write_multi(mac, BWN_TAB_4(10, 0), N(papdmult), papdmult); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { bwn_tab_write_multi(mac, BWN_TAB_4(13, 0), N(gainidx_a0), gainidx_a0); bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(auxgainidx_a0), auxgainidx_a0); bwn_tab_write_multi(mac, BWN_TAB_4(17, 0), N(gainval_a0), gainval_a0); bwn_tab_write_multi(mac, BWN_TAB_2(18, 0), N(gain_a0), gain_a0); } } static void bwn_phy_lp_tblinit_txgain(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static struct bwn_txgain_entry txgain_r2[] = { { 255, 255, 203, 0, 152 }, { 255, 255, 203, 0, 147 }, { 255, 255, 203, 0, 143 }, { 255, 255, 203, 0, 139 }, { 255, 255, 203, 0, 135 }, { 255, 255, 203, 0, 131 }, { 255, 255, 203, 0, 128 }, { 255, 255, 203, 0, 124 }, { 255, 255, 203, 0, 121 }, { 255, 255, 203, 0, 117 }, { 255, 255, 203, 0, 114 }, { 255, 255, 203, 0, 111 }, { 255, 255, 203, 0, 107 }, { 255, 255, 203, 0, 104 }, { 255, 255, 203, 0, 101 }, { 255, 255, 203, 0, 99 }, { 255, 255, 203, 0, 96 }, { 255, 255, 203, 0, 93 }, { 255, 255, 203, 0, 90 }, { 255, 255, 203, 0, 88 }, { 255, 255, 203, 0, 85 }, { 255, 255, 203, 0, 83 }, { 255, 255, 203, 0, 81 }, { 255, 255, 203, 0, 78 }, { 255, 255, 203, 0, 76 }, { 255, 255, 203, 0, 74 }, { 255, 255, 203, 0, 72 }, { 255, 255, 203, 0, 70 }, { 255, 255, 203, 0, 68 }, { 255, 255, 203, 0, 66 }, { 255, 255, 203, 0, 64 }, { 255, 255, 197, 0, 64 }, { 255, 255, 192, 0, 64 }, { 255, 255, 186, 0, 64 }, { 255, 255, 181, 0, 64 }, { 255, 255, 176, 0, 64 }, { 255, 255, 171, 0, 64 }, { 255, 255, 166, 0, 64 }, { 255, 255, 161, 0, 64 }, { 255, 255, 157, 0, 64 }, { 255, 255, 152, 0, 64 }, { 255, 255, 148, 0, 64 }, { 255, 255, 144, 0, 64 }, { 255, 255, 140, 0, 64 }, { 255, 255, 136, 0, 64 }, { 255, 255, 132, 0, 64 }, { 255, 255, 128, 0, 64 }, { 255, 255, 124, 0, 64 }, { 255, 255, 121, 0, 64 }, { 255, 255, 117, 0, 64 }, { 255, 255, 114, 0, 64 }, { 255, 255, 111, 0, 64 }, { 255, 255, 108, 0, 64 }, { 255, 255, 105, 0, 64 }, { 255, 255, 102, 0, 64 }, { 255, 255, 99, 0, 64 }, { 255, 255, 96, 0, 64 }, { 255, 255, 93, 0, 64 }, { 255, 255, 91, 0, 64 }, { 255, 255, 88, 0, 64 }, { 255, 255, 86, 0, 64 }, { 255, 255, 83, 0, 64 }, { 255, 255, 81, 0, 64 }, { 255, 255, 79, 0, 64 }, { 255, 255, 76, 0, 64 }, { 255, 255, 74, 0, 64 }, { 255, 255, 72, 0, 64 }, { 255, 255, 70, 0, 64 }, { 255, 255, 68, 0, 64 }, { 255, 255, 66, 0, 64 }, { 255, 255, 64, 0, 64 }, { 255, 248, 64, 0, 64 }, { 255, 248, 62, 0, 64 }, { 255, 241, 62, 0, 64 }, { 255, 241, 60, 0, 64 }, { 255, 234, 60, 0, 64 }, { 255, 234, 59, 0, 64 }, { 255, 227, 59, 0, 64 }, { 255, 227, 57, 0, 64 }, { 255, 221, 57, 0, 64 }, { 255, 221, 55, 0, 64 }, { 255, 215, 55, 0, 64 }, { 255, 215, 54, 0, 64 }, { 255, 208, 54, 0, 64 }, { 255, 208, 52, 0, 64 }, { 255, 203, 52, 0, 64 }, { 255, 203, 51, 0, 64 }, { 255, 197, 51, 0, 64 }, { 255, 197, 49, 0, 64 }, { 255, 191, 49, 0, 64 }, { 255, 191, 48, 0, 64 }, { 255, 186, 48, 0, 64 }, { 255, 186, 47, 0, 64 }, { 255, 181, 47, 0, 64 }, { 255, 181, 45, 0, 64 }, { 255, 175, 45, 0, 64 }, { 255, 175, 44, 0, 64 }, { 255, 170, 44, 0, 64 }, { 255, 170, 43, 0, 64 }, { 255, 166, 43, 0, 64 }, { 255, 166, 42, 0, 64 }, { 255, 161, 42, 0, 64 }, { 255, 161, 40, 0, 64 }, { 255, 156, 40, 0, 64 }, { 255, 156, 39, 0, 64 }, { 255, 152, 39, 0, 64 }, { 255, 152, 38, 0, 64 }, { 255, 148, 38, 0, 64 }, { 255, 148, 37, 0, 64 }, { 255, 143, 37, 0, 64 }, { 255, 143, 36, 0, 64 }, { 255, 139, 36, 0, 64 }, { 255, 139, 35, 0, 64 }, { 255, 135, 35, 0, 64 }, { 255, 135, 34, 0, 64 }, { 255, 132, 34, 0, 64 }, { 255, 132, 33, 0, 64 }, { 255, 128, 33, 0, 64 }, { 255, 128, 32, 0, 64 }, { 255, 124, 32, 0, 64 }, { 255, 124, 31, 0, 64 }, { 255, 121, 31, 0, 64 }, { 255, 121, 30, 0, 64 }, { 255, 117, 30, 0, 64 }, { 255, 117, 29, 0, 64 }, { 255, 114, 29, 0, 64 }, { 255, 114, 29, 0, 64 }, { 255, 111, 29, 0, 64 }, }; static struct bwn_txgain_entry txgain_2ghz_r2[] = { { 7, 99, 255, 0, 64 }, { 7, 96, 255, 0, 64 }, { 7, 93, 255, 0, 64 }, { 7, 90, 255, 0, 64 }, { 7, 88, 255, 0, 64 }, { 7, 85, 255, 0, 64 }, { 7, 83, 255, 0, 64 }, { 7, 81, 255, 0, 64 }, { 7, 78, 255, 0, 64 }, { 7, 76, 255, 0, 64 }, { 7, 74, 255, 0, 64 }, { 7, 72, 255, 0, 64 }, { 7, 70, 255, 0, 64 }, { 7, 68, 255, 0, 64 }, { 7, 66, 255, 0, 64 }, { 7, 64, 255, 0, 64 }, { 7, 64, 255, 0, 64 }, { 7, 62, 255, 0, 64 }, { 7, 62, 248, 0, 64 }, { 7, 60, 248, 0, 64 }, { 7, 60, 241, 0, 64 }, { 7, 59, 241, 0, 64 }, { 7, 59, 234, 0, 64 }, { 7, 57, 234, 0, 64 }, { 7, 57, 227, 0, 64 }, { 7, 55, 227, 0, 64 }, { 7, 55, 221, 0, 64 }, { 7, 54, 221, 0, 64 }, { 7, 54, 215, 0, 64 }, { 7, 52, 215, 0, 64 }, { 7, 52, 208, 0, 64 }, { 7, 51, 208, 0, 64 }, { 7, 51, 203, 0, 64 }, { 7, 49, 203, 0, 64 }, { 7, 49, 197, 0, 64 }, { 7, 48, 197, 0, 64 }, { 7, 48, 191, 0, 64 }, { 7, 47, 191, 0, 64 }, { 7, 47, 186, 0, 64 }, { 7, 45, 186, 0, 64 }, { 7, 45, 181, 0, 64 }, { 7, 44, 181, 0, 64 }, { 7, 44, 175, 0, 64 }, { 7, 43, 175, 0, 64 }, { 7, 43, 170, 0, 64 }, { 7, 42, 170, 0, 64 }, { 7, 42, 166, 0, 64 }, { 7, 40, 166, 0, 64 }, { 7, 40, 161, 0, 64 }, { 7, 39, 161, 0, 64 }, { 7, 39, 156, 0, 64 }, { 7, 38, 156, 0, 64 }, { 7, 38, 152, 0, 64 }, { 7, 37, 152, 0, 64 }, { 7, 37, 148, 0, 64 }, { 7, 36, 148, 0, 64 }, { 7, 36, 143, 0, 64 }, { 7, 35, 143, 0, 64 }, { 7, 35, 139, 0, 64 }, { 7, 34, 139, 0, 64 }, { 7, 34, 135, 0, 64 }, { 7, 33, 135, 0, 64 }, { 7, 33, 132, 0, 64 }, { 7, 32, 132, 0, 64 }, { 7, 32, 128, 0, 64 }, { 7, 31, 128, 0, 64 }, { 7, 31, 124, 0, 64 }, { 7, 30, 124, 0, 64 }, { 7, 30, 121, 0, 64 }, { 7, 29, 121, 0, 64 }, { 7, 29, 117, 0, 64 }, { 7, 29, 117, 0, 64 }, { 7, 29, 114, 0, 64 }, { 7, 28, 114, 0, 64 }, { 7, 28, 111, 0, 64 }, { 7, 27, 111, 0, 64 }, { 7, 27, 108, 0, 64 }, { 7, 26, 108, 0, 64 }, { 7, 26, 104, 0, 64 }, { 7, 25, 104, 0, 64 }, { 7, 25, 102, 0, 64 }, { 7, 25, 102, 0, 64 }, { 7, 25, 99, 0, 64 }, { 7, 24, 99, 0, 64 }, { 7, 24, 96, 0, 64 }, { 7, 23, 96, 0, 64 }, { 7, 23, 93, 0, 64 }, { 7, 23, 93, 0, 64 }, { 7, 23, 90, 0, 64 }, { 7, 22, 90, 0, 64 }, { 7, 22, 88, 0, 64 }, { 7, 21, 88, 0, 64 }, { 7, 21, 85, 0, 64 }, { 7, 21, 85, 0, 64 }, { 7, 21, 83, 0, 64 }, { 7, 20, 83, 0, 64 }, { 7, 20, 81, 0, 64 }, { 7, 20, 81, 0, 64 }, { 7, 20, 78, 0, 64 }, { 7, 19, 78, 0, 64 }, { 7, 19, 76, 0, 64 }, { 7, 19, 76, 0, 64 }, { 7, 19, 74, 0, 64 }, { 7, 18, 74, 0, 64 }, { 7, 18, 72, 0, 64 }, { 7, 18, 72, 0, 64 }, { 7, 18, 70, 0, 64 }, { 7, 17, 70, 0, 64 }, { 7, 17, 68, 0, 64 }, { 7, 17, 68, 0, 64 }, { 7, 17, 66, 0, 64 }, { 7, 16, 66, 0, 64 }, { 7, 16, 64, 0, 64 }, { 7, 16, 64, 0, 64 }, { 7, 16, 62, 0, 64 }, { 7, 15, 62, 0, 64 }, { 7, 15, 60, 0, 64 }, { 7, 15, 60, 0, 64 }, { 7, 15, 59, 0, 64 }, { 7, 14, 59, 0, 64 }, { 7, 14, 57, 0, 64 }, { 7, 14, 57, 0, 64 }, { 7, 14, 55, 0, 64 }, { 7, 14, 55, 0, 64 }, { 7, 14, 54, 0, 64 }, { 7, 13, 54, 0, 64 }, { 7, 13, 52, 0, 64 }, { 7, 13, 52, 0, 64 }, }; static struct bwn_txgain_entry txgain_5ghz_r2[] = { { 255, 255, 255, 0, 152 }, { 255, 255, 255, 0, 147 }, { 255, 255, 255, 0, 143 }, { 255, 255, 255, 0, 139 }, { 255, 255, 255, 0, 135 }, { 255, 255, 255, 0, 131 }, { 255, 255, 255, 0, 128 }, { 255, 255, 255, 0, 124 }, { 255, 255, 255, 0, 121 }, { 255, 255, 255, 0, 117 }, { 255, 255, 255, 0, 114 }, { 255, 255, 255, 0, 111 }, { 255, 255, 255, 0, 107 }, { 255, 255, 255, 0, 104 }, { 255, 255, 255, 0, 101 }, { 255, 255, 255, 0, 99 }, { 255, 255, 255, 0, 96 }, { 255, 255, 255, 0, 93 }, { 255, 255, 255, 0, 90 }, { 255, 255, 255, 0, 88 }, { 255, 255, 255, 0, 85 }, { 255, 255, 255, 0, 83 }, { 255, 255, 255, 0, 81 }, { 255, 255, 255, 0, 78 }, { 255, 255, 255, 0, 76 }, { 255, 255, 255, 0, 74 }, { 255, 255, 255, 0, 72 }, { 255, 255, 255, 0, 70 }, { 255, 255, 255, 0, 68 }, { 255, 255, 255, 0, 66 }, { 255, 255, 255, 0, 64 }, { 255, 255, 248, 0, 64 }, { 255, 255, 241, 0, 64 }, { 255, 255, 234, 0, 64 }, { 255, 255, 227, 0, 64 }, { 255, 255, 221, 0, 64 }, { 255, 255, 215, 0, 64 }, { 255, 255, 208, 0, 64 }, { 255, 255, 203, 0, 64 }, { 255, 255, 197, 0, 64 }, { 255, 255, 191, 0, 64 }, { 255, 255, 186, 0, 64 }, { 255, 255, 181, 0, 64 }, { 255, 255, 175, 0, 64 }, { 255, 255, 170, 0, 64 }, { 255, 255, 166, 0, 64 }, { 255, 255, 161, 0, 64 }, { 255, 255, 156, 0, 64 }, { 255, 255, 152, 0, 64 }, { 255, 255, 148, 0, 64 }, { 255, 255, 143, 0, 64 }, { 255, 255, 139, 0, 64 }, { 255, 255, 135, 0, 64 }, { 255, 255, 132, 0, 64 }, { 255, 255, 128, 0, 64 }, { 255, 255, 124, 0, 64 }, { 255, 255, 121, 0, 64 }, { 255, 255, 117, 0, 64 }, { 255, 255, 114, 0, 64 }, { 255, 255, 111, 0, 64 }, { 255, 255, 108, 0, 64 }, { 255, 255, 104, 0, 64 }, { 255, 255, 102, 0, 64 }, { 255, 255, 99, 0, 64 }, { 255, 255, 96, 0, 64 }, { 255, 255, 93, 0, 64 }, { 255, 255, 90, 0, 64 }, { 255, 255, 88, 0, 64 }, { 255, 255, 85, 0, 64 }, { 255, 255, 83, 0, 64 }, { 255, 255, 81, 0, 64 }, { 255, 255, 78, 0, 64 }, { 255, 255, 76, 0, 64 }, { 255, 255, 74, 0, 64 }, { 255, 255, 72, 0, 64 }, { 255, 255, 70, 0, 64 }, { 255, 255, 68, 0, 64 }, { 255, 255, 66, 0, 64 }, { 255, 255, 64, 0, 64 }, { 255, 255, 64, 0, 64 }, { 255, 255, 62, 0, 64 }, { 255, 248, 62, 0, 64 }, { 255, 248, 60, 0, 64 }, { 255, 241, 60, 0, 64 }, { 255, 241, 59, 0, 64 }, { 255, 234, 59, 0, 64 }, { 255, 234, 57, 0, 64 }, { 255, 227, 57, 0, 64 }, { 255, 227, 55, 0, 64 }, { 255, 221, 55, 0, 64 }, { 255, 221, 54, 0, 64 }, { 255, 215, 54, 0, 64 }, { 255, 215, 52, 0, 64 }, { 255, 208, 52, 0, 64 }, { 255, 208, 51, 0, 64 }, { 255, 203, 51, 0, 64 }, { 255, 203, 49, 0, 64 }, { 255, 197, 49, 0, 64 }, { 255, 197, 48, 0, 64 }, { 255, 191, 48, 0, 64 }, { 255, 191, 47, 0, 64 }, { 255, 186, 47, 0, 64 }, { 255, 186, 45, 0, 64 }, { 255, 181, 45, 0, 64 }, { 255, 181, 44, 0, 64 }, { 255, 175, 44, 0, 64 }, { 255, 175, 43, 0, 64 }, { 255, 170, 43, 0, 64 }, { 255, 170, 42, 0, 64 }, { 255, 166, 42, 0, 64 }, { 255, 166, 40, 0, 64 }, { 255, 161, 40, 0, 64 }, { 255, 161, 39, 0, 64 }, { 255, 156, 39, 0, 64 }, { 255, 156, 38, 0, 64 }, { 255, 152, 38, 0, 64 }, { 255, 152, 37, 0, 64 }, { 255, 148, 37, 0, 64 }, { 255, 148, 36, 0, 64 }, { 255, 143, 36, 0, 64 }, { 255, 143, 35, 0, 64 }, { 255, 139, 35, 0, 64 }, { 255, 139, 34, 0, 64 }, { 255, 135, 34, 0, 64 }, { 255, 135, 33, 0, 64 }, { 255, 132, 33, 0, 64 }, { 255, 132, 32, 0, 64 }, { 255, 128, 32, 0, 64 } }; static struct bwn_txgain_entry txgain_r0[] = { { 7, 15, 14, 0, 152 }, { 7, 15, 14, 0, 147 }, { 7, 15, 14, 0, 143 }, { 7, 15, 14, 0, 139 }, { 7, 15, 14, 0, 135 }, { 7, 15, 14, 0, 131 }, { 7, 15, 14, 0, 128 }, { 7, 15, 14, 0, 124 }, { 7, 15, 14, 0, 121 }, { 7, 15, 14, 0, 117 }, { 7, 15, 14, 0, 114 }, { 7, 15, 14, 0, 111 }, { 7, 15, 14, 0, 107 }, { 7, 15, 14, 0, 104 }, { 7, 15, 14, 0, 101 }, { 7, 15, 14, 0, 99 }, { 7, 15, 14, 0, 96 }, { 7, 15, 14, 0, 93 }, { 7, 15, 14, 0, 90 }, { 7, 15, 14, 0, 88 }, { 7, 15, 14, 0, 85 }, { 7, 15, 14, 0, 83 }, { 7, 15, 14, 0, 81 }, { 7, 15, 14, 0, 78 }, { 7, 15, 14, 0, 76 }, { 7, 15, 14, 0, 74 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 }, { 7, 15, 13, 0, 68 }, { 7, 15, 13, 0, 66 }, { 7, 15, 13, 0, 64 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 59 }, { 7, 15, 13, 0, 57 }, { 7, 15, 12, 0, 71 }, { 7, 15, 12, 0, 69 }, { 7, 15, 12, 0, 67 }, { 7, 15, 12, 0, 65 }, { 7, 15, 12, 0, 63 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 58 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 70 }, { 7, 15, 11, 0, 68 }, { 7, 15, 11, 0, 66 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 59 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 10, 0, 56 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 60 }, { 7, 15, 9, 0, 59 }, { 7, 14, 9, 0, 72 }, { 7, 14, 9, 0, 70 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 64 }, { 7, 14, 9, 0, 62 }, { 7, 14, 9, 0, 60 }, { 7, 14, 9, 0, 59 }, { 7, 13, 9, 0, 72 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 72 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 12, 8, 0, 72 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 7, 0, 73 }, { 7, 12, 7, 0, 71 }, { 7, 12, 7, 0, 69 }, { 7, 12, 7, 0, 67 }, { 7, 12, 7, 0, 65 }, { 7, 12, 7, 0, 63 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 11, 7, 0, 72 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 65 }, { 7, 11, 7, 0, 63 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 6, 0, 73 }, { 7, 11, 6, 0, 71 } }; static struct bwn_txgain_entry txgain_2ghz_r0[] = { { 4, 15, 9, 0, 64 }, { 4, 15, 9, 0, 62 }, { 4, 15, 9, 0, 60 }, { 4, 15, 9, 0, 59 }, { 4, 14, 9, 0, 72 }, { 4, 14, 9, 0, 70 }, { 4, 14, 9, 0, 68 }, { 4, 14, 9, 0, 66 }, { 4, 14, 9, 0, 64 }, { 4, 14, 9, 0, 62 }, { 4, 14, 9, 0, 60 }, { 4, 14, 9, 0, 59 }, { 4, 13, 9, 0, 72 }, { 4, 13, 9, 0, 70 }, { 4, 13, 9, 0, 68 }, { 4, 13, 9, 0, 66 }, { 4, 13, 9, 0, 64 }, { 4, 13, 9, 0, 63 }, { 4, 13, 9, 0, 61 }, { 4, 13, 9, 0, 59 }, { 4, 13, 9, 0, 57 }, { 4, 13, 8, 0, 72 }, { 4, 13, 8, 0, 70 }, { 4, 13, 8, 0, 68 }, { 4, 13, 8, 0, 66 }, { 4, 13, 8, 0, 64 }, { 4, 13, 8, 0, 62 }, { 4, 13, 8, 0, 60 }, { 4, 13, 8, 0, 59 }, { 4, 12, 8, 0, 72 }, { 4, 12, 8, 0, 70 }, { 4, 12, 8, 0, 68 }, { 4, 12, 8, 0, 66 }, { 4, 12, 8, 0, 64 }, { 4, 12, 8, 0, 62 }, { 4, 12, 8, 0, 61 }, { 4, 12, 8, 0, 59 }, { 4, 12, 7, 0, 73 }, { 4, 12, 7, 0, 71 }, { 4, 12, 7, 0, 69 }, { 4, 12, 7, 0, 67 }, { 4, 12, 7, 0, 65 }, { 4, 12, 7, 0, 63 }, { 4, 12, 7, 0, 61 }, { 4, 12, 7, 0, 59 }, { 4, 11, 7, 0, 72 }, { 4, 11, 7, 0, 70 }, { 4, 11, 7, 0, 68 }, { 4, 11, 7, 0, 66 }, { 4, 11, 7, 0, 65 }, { 4, 11, 7, 0, 63 }, { 4, 11, 7, 0, 61 }, { 4, 11, 7, 0, 59 }, { 4, 11, 6, 0, 73 }, { 4, 11, 6, 0, 71 }, { 4, 11, 6, 0, 69 }, { 4, 11, 6, 0, 67 }, { 4, 11, 6, 0, 65 }, { 4, 11, 6, 0, 63 }, { 4, 11, 6, 0, 61 }, { 4, 11, 6, 0, 60 }, { 4, 10, 6, 0, 72 }, { 4, 10, 6, 0, 70 }, { 4, 10, 6, 0, 68 }, { 4, 10, 6, 0, 66 }, { 4, 10, 6, 0, 64 }, { 4, 10, 6, 0, 62 }, { 4, 10, 6, 0, 60 }, { 4, 10, 6, 0, 59 }, { 4, 10, 5, 0, 72 }, { 4, 10, 5, 0, 70 }, { 4, 10, 5, 0, 68 }, { 4, 10, 5, 0, 66 }, { 4, 10, 5, 0, 64 }, { 4, 10, 5, 0, 62 }, { 4, 10, 5, 0, 60 }, { 4, 10, 5, 0, 59 }, { 4, 9, 5, 0, 70 }, { 4, 9, 5, 0, 68 }, { 4, 9, 5, 0, 66 }, { 4, 9, 5, 0, 64 }, { 4, 9, 5, 0, 63 }, { 4, 9, 5, 0, 61 }, { 4, 9, 5, 0, 59 }, { 4, 9, 4, 0, 71 }, { 4, 9, 4, 0, 69 }, { 4, 9, 4, 0, 67 }, { 4, 9, 4, 0, 65 }, { 4, 9, 4, 0, 63 }, { 4, 9, 4, 0, 62 }, { 4, 9, 4, 0, 60 }, { 4, 9, 4, 0, 58 }, { 4, 8, 4, 0, 70 }, { 4, 8, 4, 0, 68 }, { 4, 8, 4, 0, 66 }, { 4, 8, 4, 0, 65 }, { 4, 8, 4, 0, 63 }, { 4, 8, 4, 0, 61 }, { 4, 8, 4, 0, 59 }, { 4, 7, 4, 0, 68 }, { 4, 7, 4, 0, 66 }, { 4, 7, 4, 0, 64 }, { 4, 7, 4, 0, 62 }, { 4, 7, 4, 0, 61 }, { 4, 7, 4, 0, 59 }, { 4, 7, 3, 0, 67 }, { 4, 7, 3, 0, 65 }, { 4, 7, 3, 0, 63 }, { 4, 7, 3, 0, 62 }, { 4, 7, 3, 0, 60 }, { 4, 6, 3, 0, 65 }, { 4, 6, 3, 0, 63 }, { 4, 6, 3, 0, 61 }, { 4, 6, 3, 0, 60 }, { 4, 6, 3, 0, 58 }, { 4, 5, 3, 0, 68 }, { 4, 5, 3, 0, 66 }, { 4, 5, 3, 0, 64 }, { 4, 5, 3, 0, 62 }, { 4, 5, 3, 0, 60 }, { 4, 5, 3, 0, 59 }, { 4, 5, 3, 0, 57 }, { 4, 4, 2, 0, 83 }, { 4, 4, 2, 0, 81 }, { 4, 4, 2, 0, 78 }, { 4, 4, 2, 0, 76 }, { 4, 4, 2, 0, 74 }, { 4, 4, 2, 0, 72 } }; static struct bwn_txgain_entry txgain_5ghz_r0[] = { { 7, 15, 15, 0, 99 }, { 7, 15, 15, 0, 96 }, { 7, 15, 15, 0, 93 }, { 7, 15, 15, 0, 90 }, { 7, 15, 15, 0, 88 }, { 7, 15, 15, 0, 85 }, { 7, 15, 15, 0, 83 }, { 7, 15, 15, 0, 81 }, { 7, 15, 15, 0, 78 }, { 7, 15, 15, 0, 76 }, { 7, 15, 15, 0, 74 }, { 7, 15, 15, 0, 72 }, { 7, 15, 15, 0, 70 }, { 7, 15, 15, 0, 68 }, { 7, 15, 15, 0, 66 }, { 7, 15, 15, 0, 64 }, { 7, 15, 15, 0, 62 }, { 7, 15, 15, 0, 60 }, { 7, 15, 15, 0, 59 }, { 7, 15, 15, 0, 57 }, { 7, 15, 15, 0, 55 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 58 }, { 7, 15, 14, 0, 56 }, { 7, 15, 14, 0, 55 }, { 7, 15, 13, 0, 71 }, { 7, 15, 13, 0, 69 }, { 7, 15, 13, 0, 67 }, { 7, 15, 13, 0, 65 }, { 7, 15, 13, 0, 63 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 58 }, { 7, 15, 13, 0, 56 }, { 7, 15, 12, 0, 72 }, { 7, 15, 12, 0, 70 }, { 7, 15, 12, 0, 68 }, { 7, 15, 12, 0, 66 }, { 7, 15, 12, 0, 64 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 59 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 73 }, { 7, 15, 11, 0, 71 }, { 7, 15, 11, 0, 69 }, { 7, 15, 11, 0, 67 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 60 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 61 }, { 7, 15, 9, 0, 59 }, { 7, 15, 9, 0, 57 }, { 7, 15, 9, 0, 56 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 65 }, { 7, 14, 9, 0, 63 }, { 7, 14, 9, 0, 61 }, { 7, 14, 9, 0, 59 }, { 7, 14, 9, 0, 58 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 13, 8, 0, 57 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 8, 0, 57 }, { 7, 12, 7, 0, 70 }, { 7, 12, 7, 0, 68 }, { 7, 12, 7, 0, 66 }, { 7, 12, 7, 0, 64 }, { 7, 12, 7, 0, 62 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 12, 7, 0, 57 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 64 }, { 7, 11, 7, 0, 62 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 7, 0, 57 }, { 7, 11, 6, 0, 69 }, { 7, 11, 6, 0, 67 }, { 7, 11, 6, 0, 65 }, { 7, 11, 6, 0, 63 }, { 7, 11, 6, 0, 62 }, { 7, 11, 6, 0, 60 } }; static struct bwn_txgain_entry txgain_r1[] = { { 7, 15, 14, 0, 152 }, { 7, 15, 14, 0, 147 }, { 7, 15, 14, 0, 143 }, { 7, 15, 14, 0, 139 }, { 7, 15, 14, 0, 135 }, { 7, 15, 14, 0, 131 }, { 7, 15, 14, 0, 128 }, { 7, 15, 14, 0, 124 }, { 7, 15, 14, 0, 121 }, { 7, 15, 14, 0, 117 }, { 7, 15, 14, 0, 114 }, { 7, 15, 14, 0, 111 }, { 7, 15, 14, 0, 107 }, { 7, 15, 14, 0, 104 }, { 7, 15, 14, 0, 101 }, { 7, 15, 14, 0, 99 }, { 7, 15, 14, 0, 96 }, { 7, 15, 14, 0, 93 }, { 7, 15, 14, 0, 90 }, { 7, 15, 14, 0, 88 }, { 7, 15, 14, 0, 85 }, { 7, 15, 14, 0, 83 }, { 7, 15, 14, 0, 81 }, { 7, 15, 14, 0, 78 }, { 7, 15, 14, 0, 76 }, { 7, 15, 14, 0, 74 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 }, { 7, 15, 13, 0, 68 }, { 7, 15, 13, 0, 66 }, { 7, 15, 13, 0, 64 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 59 }, { 7, 15, 13, 0, 57 }, { 7, 15, 12, 0, 71 }, { 7, 15, 12, 0, 69 }, { 7, 15, 12, 0, 67 }, { 7, 15, 12, 0, 65 }, { 7, 15, 12, 0, 63 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 58 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 70 }, { 7, 15, 11, 0, 68 }, { 7, 15, 11, 0, 66 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 59 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 10, 0, 56 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 60 }, { 7, 15, 9, 0, 59 }, { 7, 14, 9, 0, 72 }, { 7, 14, 9, 0, 70 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 64 }, { 7, 14, 9, 0, 62 }, { 7, 14, 9, 0, 60 }, { 7, 14, 9, 0, 59 }, { 7, 13, 9, 0, 72 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 72 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 12, 8, 0, 72 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 7, 0, 73 }, { 7, 12, 7, 0, 71 }, { 7, 12, 7, 0, 69 }, { 7, 12, 7, 0, 67 }, { 7, 12, 7, 0, 65 }, { 7, 12, 7, 0, 63 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 11, 7, 0, 72 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 65 }, { 7, 11, 7, 0, 63 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 6, 0, 73 }, { 7, 11, 6, 0, 71 } }; static struct bwn_txgain_entry txgain_2ghz_r1[] = { { 4, 15, 15, 0, 90 }, { 4, 15, 15, 0, 88 }, { 4, 15, 15, 0, 85 }, { 4, 15, 15, 0, 83 }, { 4, 15, 15, 0, 81 }, { 4, 15, 15, 0, 78 }, { 4, 15, 15, 0, 76 }, { 4, 15, 15, 0, 74 }, { 4, 15, 15, 0, 72 }, { 4, 15, 15, 0, 70 }, { 4, 15, 15, 0, 68 }, { 4, 15, 15, 0, 66 }, { 4, 15, 15, 0, 64 }, { 4, 15, 15, 0, 62 }, { 4, 15, 15, 0, 60 }, { 4, 15, 15, 0, 59 }, { 4, 15, 14, 0, 72 }, { 4, 15, 14, 0, 70 }, { 4, 15, 14, 0, 68 }, { 4, 15, 14, 0, 66 }, { 4, 15, 14, 0, 64 }, { 4, 15, 14, 0, 62 }, { 4, 15, 14, 0, 60 }, { 4, 15, 14, 0, 59 }, { 4, 15, 13, 0, 72 }, { 4, 15, 13, 0, 70 }, { 4, 15, 13, 0, 68 }, { 4, 15, 13, 0, 66 }, { 4, 15, 13, 0, 64 }, { 4, 15, 13, 0, 62 }, { 4, 15, 13, 0, 60 }, { 4, 15, 13, 0, 59 }, { 4, 15, 12, 0, 72 }, { 4, 15, 12, 0, 70 }, { 4, 15, 12, 0, 68 }, { 4, 15, 12, 0, 66 }, { 4, 15, 12, 0, 64 }, { 4, 15, 12, 0, 62 }, { 4, 15, 12, 0, 60 }, { 4, 15, 12, 0, 59 }, { 4, 15, 11, 0, 72 }, { 4, 15, 11, 0, 70 }, { 4, 15, 11, 0, 68 }, { 4, 15, 11, 0, 66 }, { 4, 15, 11, 0, 64 }, { 4, 15, 11, 0, 62 }, { 4, 15, 11, 0, 60 }, { 4, 15, 11, 0, 59 }, { 4, 15, 10, 0, 72 }, { 4, 15, 10, 0, 70 }, { 4, 15, 10, 0, 68 }, { 4, 15, 10, 0, 66 }, { 4, 15, 10, 0, 64 }, { 4, 15, 10, 0, 62 }, { 4, 15, 10, 0, 60 }, { 4, 15, 10, 0, 59 }, { 4, 15, 9, 0, 72 }, { 4, 15, 9, 0, 70 }, { 4, 15, 9, 0, 68 }, { 4, 15, 9, 0, 66 }, { 4, 15, 9, 0, 64 }, { 4, 15, 9, 0, 62 }, { 4, 15, 9, 0, 60 }, { 4, 15, 9, 0, 59 }, { 4, 14, 9, 0, 72 }, { 4, 14, 9, 0, 70 }, { 4, 14, 9, 0, 68 }, { 4, 14, 9, 0, 66 }, { 4, 14, 9, 0, 64 }, { 4, 14, 9, 0, 62 }, { 4, 14, 9, 0, 60 }, { 4, 14, 9, 0, 59 }, { 4, 13, 9, 0, 72 }, { 4, 13, 9, 0, 70 }, { 4, 13, 9, 0, 68 }, { 4, 13, 9, 0, 66 }, { 4, 13, 9, 0, 64 }, { 4, 13, 9, 0, 63 }, { 4, 13, 9, 0, 61 }, { 4, 13, 9, 0, 59 }, { 4, 13, 9, 0, 57 }, { 4, 13, 8, 0, 72 }, { 4, 13, 8, 0, 70 }, { 4, 13, 8, 0, 68 }, { 4, 13, 8, 0, 66 }, { 4, 13, 8, 0, 64 }, { 4, 13, 8, 0, 62 }, { 4, 13, 8, 0, 60 }, { 4, 13, 8, 0, 59 }, { 4, 12, 8, 0, 72 }, { 4, 12, 8, 0, 70 }, { 4, 12, 8, 0, 68 }, { 4, 12, 8, 0, 66 }, { 4, 12, 8, 0, 64 }, { 4, 12, 8, 0, 62 }, { 4, 12, 8, 0, 61 }, { 4, 12, 8, 0, 59 }, { 4, 12, 7, 0, 73 }, { 4, 12, 7, 0, 71 }, { 4, 12, 7, 0, 69 }, { 4, 12, 7, 0, 67 }, { 4, 12, 7, 0, 65 }, { 4, 12, 7, 0, 63 }, { 4, 12, 7, 0, 61 }, { 4, 12, 7, 0, 59 }, { 4, 11, 7, 0, 72 }, { 4, 11, 7, 0, 70 }, { 4, 11, 7, 0, 68 }, { 4, 11, 7, 0, 66 }, { 4, 11, 7, 0, 65 }, { 4, 11, 7, 0, 63 }, { 4, 11, 7, 0, 61 }, { 4, 11, 7, 0, 59 }, { 4, 11, 6, 0, 73 }, { 4, 11, 6, 0, 71 }, { 4, 11, 6, 0, 69 }, { 4, 11, 6, 0, 67 }, { 4, 11, 6, 0, 65 }, { 4, 11, 6, 0, 63 }, { 4, 11, 6, 0, 61 }, { 4, 11, 6, 0, 60 }, { 4, 10, 6, 0, 72 }, { 4, 10, 6, 0, 70 }, { 4, 10, 6, 0, 68 }, { 4, 10, 6, 0, 66 }, { 4, 10, 6, 0, 64 }, { 4, 10, 6, 0, 62 }, { 4, 10, 6, 0, 60 } }; static struct bwn_txgain_entry txgain_5ghz_r1[] = { { 7, 15, 15, 0, 99 }, { 7, 15, 15, 0, 96 }, { 7, 15, 15, 0, 93 }, { 7, 15, 15, 0, 90 }, { 7, 15, 15, 0, 88 }, { 7, 15, 15, 0, 85 }, { 7, 15, 15, 0, 83 }, { 7, 15, 15, 0, 81 }, { 7, 15, 15, 0, 78 }, { 7, 15, 15, 0, 76 }, { 7, 15, 15, 0, 74 }, { 7, 15, 15, 0, 72 }, { 7, 15, 15, 0, 70 }, { 7, 15, 15, 0, 68 }, { 7, 15, 15, 0, 66 }, { 7, 15, 15, 0, 64 }, { 7, 15, 15, 0, 62 }, { 7, 15, 15, 0, 60 }, { 7, 15, 15, 0, 59 }, { 7, 15, 15, 0, 57 }, { 7, 15, 15, 0, 55 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 58 }, { 7, 15, 14, 0, 56 }, { 7, 15, 14, 0, 55 }, { 7, 15, 13, 0, 71 }, { 7, 15, 13, 0, 69 }, { 7, 15, 13, 0, 67 }, { 7, 15, 13, 0, 65 }, { 7, 15, 13, 0, 63 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 58 }, { 7, 15, 13, 0, 56 }, { 7, 15, 12, 0, 72 }, { 7, 15, 12, 0, 70 }, { 7, 15, 12, 0, 68 }, { 7, 15, 12, 0, 66 }, { 7, 15, 12, 0, 64 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 59 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 73 }, { 7, 15, 11, 0, 71 }, { 7, 15, 11, 0, 69 }, { 7, 15, 11, 0, 67 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 60 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 61 }, { 7, 15, 9, 0, 59 }, { 7, 15, 9, 0, 57 }, { 7, 15, 9, 0, 56 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 65 }, { 7, 14, 9, 0, 63 }, { 7, 14, 9, 0, 61 }, { 7, 14, 9, 0, 59 }, { 7, 14, 9, 0, 58 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 13, 8, 0, 57 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 8, 0, 57 }, { 7, 12, 7, 0, 70 }, { 7, 12, 7, 0, 68 }, { 7, 12, 7, 0, 66 }, { 7, 12, 7, 0, 64 }, { 7, 12, 7, 0, 62 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 12, 7, 0, 57 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 64 }, { 7, 11, 7, 0, 62 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 7, 0, 57 }, { 7, 11, 6, 0, 69 }, { 7, 11, 6, 0, 67 }, { 7, 11, 6, 0, 65 }, { 7, 11, 6, 0, 63 }, { 7, 11, 6, 0, 62 }, { 7, 11, 6, 0, 60 } }; if (mac->mac_phy.rev != 0 && mac->mac_phy.rev != 1) { if (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r2); else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r2); else bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r2); return; } if (mac->mac_phy.rev == 0) { if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) || (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_HGPA)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r0); else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r0); else bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r0); return; } if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) || (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_HGPA)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r1); else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r1); else bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r1); } static void bwn_tab_write(struct bwn_mac *mac, uint32_t typeoffset, uint32_t value) { uint32_t offset, type; type = BWN_TAB_GETTYPE(typeoffset); offset = BWN_TAB_GETOFFSET(typeoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); switch (type) { case BWN_TAB_8BIT: KASSERT(!(value & ~0xff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_16BIT: KASSERT(!(value & ~0xffff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_32BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATAHI, value >> 16); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } static int bwn_phy_lp_loopback(struct bwn_mac *mac) { struct bwn_phy_lp_iq_est ie; int i, index = -1; uint32_t tmp; memset(&ie, 0, sizeof(ie)); bwn_phy_lp_set_trsw_over(mac, 1, 1); BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 1); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xfffe); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x800); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x800); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x8); BWN_RF_WRITE(mac, BWN_B2062_N_TXCTL_A, 0x80); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x80); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x80); for (i = 0; i < 32; i++) { bwn_phy_lp_set_rxgain_idx(mac, i); bwn_phy_lp_ddfs_turnon(mac, 1, 1, 5, 5, 0); if (!(bwn_phy_lp_rx_iq_est(mac, 1000, 32, &ie))) continue; tmp = (ie.ie_ipwr + ie.ie_qpwr) / 1000; if ((tmp > 4000) && (tmp < 10000)) { index = i; break; } } bwn_phy_lp_ddfs_turnoff(mac); return (index); } static void bwn_phy_lp_set_rxgain_idx(struct bwn_mac *mac, uint16_t idx) { bwn_phy_lp_set_rxgain(mac, bwn_tab_read(mac, BWN_TAB_2(12, idx))); } static void bwn_phy_lp_ddfs_turnon(struct bwn_mac *mac, int i_on, int q_on, int incr1, int incr2, int scale_idx) { bwn_phy_lp_ddfs_turnoff(mac); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS_POINTER_INIT, 0xff80); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS_POINTER_INIT, 0x80ff); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS_INCR_INIT, 0xff80, incr1); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS_INCR_INIT, 0x80ff, incr2 << 8); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xfff7, i_on << 3); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xffef, q_on << 4); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xff9f, scale_idx << 5); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0xfffb); BWN_PHY_SET(mac, BWN_PHY_AFE_DDFS, 0x2); BWN_PHY_SET(mac, BWN_PHY_LP_PHY_CTL, 0x20); } static uint8_t bwn_phy_lp_rx_iq_est(struct bwn_mac *mac, uint16_t sample, uint8_t time, struct bwn_phy_lp_iq_est *ie) { int i; BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfff7); BWN_PHY_WRITE(mac, BWN_PHY_IQ_NUM_SMPLS_ADDR, sample); BWN_PHY_SETMASK(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xff00, time); BWN_PHY_MASK(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xfeff); BWN_PHY_SET(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0x200); for (i = 0; i < 500; i++) { if (!(BWN_PHY_READ(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) break; DELAY(1000); } if ((BWN_PHY_READ(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) { BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x8); return 0; } ie->ie_iqprod = BWN_PHY_READ(mac, BWN_PHY_IQ_ACC_HI_ADDR); ie->ie_iqprod <<= 16; ie->ie_iqprod |= BWN_PHY_READ(mac, BWN_PHY_IQ_ACC_LO_ADDR); ie->ie_ipwr = BWN_PHY_READ(mac, BWN_PHY_IQ_I_PWR_ACC_HI_ADDR); ie->ie_ipwr <<= 16; ie->ie_ipwr |= BWN_PHY_READ(mac, BWN_PHY_IQ_I_PWR_ACC_LO_ADDR); ie->ie_qpwr = BWN_PHY_READ(mac, BWN_PHY_IQ_Q_PWR_ACC_HI_ADDR); ie->ie_qpwr <<= 16; ie->ie_qpwr |= BWN_PHY_READ(mac, BWN_PHY_IQ_Q_PWR_ACC_LO_ADDR); BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x8); return 1; } static uint32_t bwn_tab_read(struct bwn_mac *mac, uint32_t typeoffset) { uint32_t offset, type, value; type = BWN_TAB_GETTYPE(typeoffset); offset = BWN_TAB_GETOFFSET(typeoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); switch (type) { case BWN_TAB_8BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO) & 0xff; break; case BWN_TAB_16BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); break; case BWN_TAB_32BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATAHI); value <<= 16; value |= BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); value = 0; } return (value); } static void bwn_phy_lp_ddfs_turnoff(struct bwn_mac *mac) { BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0xfffd); BWN_PHY_MASK(mac, BWN_PHY_LP_PHY_CTL, 0xffdf); } static void bwn_phy_lp_set_txgain_dac(struct bwn_mac *mac, uint16_t dac) { uint16_t ctl; ctl = BWN_PHY_READ(mac, BWN_PHY_AFE_DAC_CTL) & 0xc7f; ctl |= dac << 7; BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DAC_CTL, 0xf000, ctl); } static void bwn_phy_lp_set_txgain_pa(struct bwn_mac *mac, uint16_t gain) { BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfb), 0xe03f, gain << 6); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfd), 0x80ff, gain << 8); } static void bwn_phy_lp_set_txgain_override(struct bwn_mac *mac) { if (mac->mac_phy.rev < 2) BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x100); else { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x80); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x4000); } BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 0x40); } static uint16_t bwn_phy_lp_get_pa_gain(struct bwn_mac *mac) { return BWN_PHY_READ(mac, BWN_PHY_OFDM(0xfb)) & 0x7f; } static uint8_t bwn_nbits(int32_t val) { uint32_t tmp; uint8_t nbits = 0; for (tmp = abs(val); tmp != 0; tmp >>= 1) nbits++; return (nbits); } static void bwn_phy_lp_gaintbl_write_multi(struct bwn_mac *mac, int offset, int count, struct bwn_txgain_entry *table) { int i; for (i = offset; i < count; i++) bwn_phy_lp_gaintbl_write(mac, i, table[i]); } static void bwn_phy_lp_gaintbl_write(struct bwn_mac *mac, int offset, struct bwn_txgain_entry data) { if (mac->mac_phy.rev >= 2) bwn_phy_lp_gaintbl_write_r2(mac, offset, data); else bwn_phy_lp_gaintbl_write_r01(mac, offset, data); } static void bwn_phy_lp_gaintbl_write_r2(struct bwn_mac *mac, int offset, struct bwn_txgain_entry te) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; KASSERT(mac->mac_phy.rev >= 2, ("%s:%d: fail", __func__, __LINE__)); tmp = (te.te_pad << 16) | (te.te_pga << 8) | te.te_gm; if (mac->mac_phy.rev >= 3) { tmp |= ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ? (0x10 << 24) : (0x70 << 24)); } else { tmp |= ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ? (0x14 << 24) : (0x7f << 24)); } bwn_tab_write(mac, BWN_TAB_4(7, 0xc0 + offset), tmp); bwn_tab_write(mac, BWN_TAB_4(7, 0x140 + offset), te.te_bbmult << 20 | te.te_dac << 28); } static void bwn_phy_lp_gaintbl_write_r01(struct bwn_mac *mac, int offset, struct bwn_txgain_entry te) { KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); bwn_tab_write(mac, BWN_TAB_4(10, 0xc0 + offset), (te.te_pad << 11) | (te.te_pga << 7) | (te.te_gm << 4) | te.te_dac); bwn_tab_write(mac, BWN_TAB_4(10, 0x140 + offset), te.te_bbmult << 20); } static void bwn_sysctl_node(struct bwn_softc *sc) { device_t dev = sc->sc_dev; struct bwn_mac *mac; struct bwn_stats *stats; /* XXX assume that count of MAC is only 1. */ if ((mac = sc->sc_curmac) == NULL) return; stats = &mac->mac_stats; SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "linknoise", CTLFLAG_RW, &stats->rts, 0, "Noise level"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rts", CTLFLAG_RW, &stats->rts, 0, "RTS"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rtsfail", CTLFLAG_RW, &stats->rtsfail, 0, "RTS failed to send"); #ifdef BWN_DEBUG SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags"); #endif } static device_method_t bwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bwn_probe), DEVMETHOD(device_attach, bwn_attach), DEVMETHOD(device_detach, bwn_detach), DEVMETHOD(device_suspend, bwn_suspend), DEVMETHOD(device_resume, bwn_resume), DEVMETHOD_END }; static driver_t bwn_driver = { "bwn", bwn_methods, sizeof(struct bwn_softc) }; static devclass_t bwn_devclass; DRIVER_MODULE(bwn, siba_bwn, bwn_driver, bwn_devclass, 0, 0); MODULE_DEPEND(bwn, siba_bwn, 1, 1, 1); MODULE_DEPEND(bwn, wlan, 1, 1, 1); /* 802.11 media layer */ MODULE_DEPEND(bwn, firmware, 1, 1, 1); /* firmware support */ MODULE_DEPEND(bwn, wlan_amrr, 1, 1, 1); Index: head/sys/dev/bxe/bxe.c =================================================================== --- head/sys/dev/bxe/bxe.c (revision 296271) +++ head/sys/dev/bxe/bxe.c (revision 296272) @@ -1,18626 +1,18626 @@ /*- * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define BXE_DRIVER_VERSION "1.78.81" #include "bxe.h" #include "ecore_sp.h" #include "ecore_init.h" #include "ecore_init_ops.h" #include "57710_int_offsets.h" #include "57711_int_offsets.h" #include "57712_int_offsets.h" /* * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these * explicitly here for older kernels that don't include this changeset. */ #ifndef CTLTYPE_U64 #define CTLTYPE_U64 CTLTYPE_QUAD #define sysctl_handle_64 sysctl_handle_quad #endif /* * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these * here as zero(0) for older kernels that don't include this changeset * thereby masking the functionality. */ #ifndef CSUM_TCP_IPV6 #define CSUM_TCP_IPV6 0 #define CSUM_UDP_IPV6 0 #endif /* * pci_find_cap was added in r219865. Re-define this at pci_find_extcap * for older kernels that don't include this changeset. */ #if __FreeBSD_version < 900035 #define pci_find_cap pci_find_extcap #endif #define BXE_DEF_SB_ATT_IDX 0x0001 #define BXE_DEF_SB_IDX 0x0002 /* * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per * function HW initialization. */ #define FLR_WAIT_USEC 10000 /* 10 msecs */ #define FLR_WAIT_INTERVAL 50 /* usecs */ #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ struct pbf_pN_buf_regs { int pN; uint32_t init_crd; uint32_t crd; uint32_t crd_freed; }; struct pbf_pN_cmd_regs { int pN; uint32_t lines_occup; uint32_t lines_freed; }; /* * PCI Device ID Table used by bxe_probe(). */ #define BXE_DEVDESC_MAX 64 static struct bxe_device_type bxe_devs[] = { { BRCM_VENDORID, CHIP_NUM_57710, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57710 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57711, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57711 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57711E, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57711E 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57712, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57712 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57712_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57712 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57800, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57800 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57800_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57800 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57810, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57810 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57810_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57810 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57811, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57811 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57811_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57811 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_4_10, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 4x10GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 MF 10GbE" }, { 0, 0, 0, 0, NULL } }; MALLOC_DECLARE(M_BXE_ILT); MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); /* * FreeBSD device entry points. */ static int bxe_probe(device_t); static int bxe_attach(device_t); static int bxe_detach(device_t); static int bxe_shutdown(device_t); /* * FreeBSD KLD module/device interface event handler method. */ static device_method_t bxe_methods[] = { /* Device interface (device_if.h) */ DEVMETHOD(device_probe, bxe_probe), DEVMETHOD(device_attach, bxe_attach), DEVMETHOD(device_detach, bxe_detach), DEVMETHOD(device_shutdown, bxe_shutdown), /* Bus interface (bus_if.h) */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), KOBJMETHOD_END }; /* * FreeBSD KLD Module data declaration */ static driver_t bxe_driver = { "bxe", /* module name */ bxe_methods, /* event handler */ sizeof(struct bxe_softc) /* extra data */ }; /* * FreeBSD dev class is needed to manage dev instances and * to associate with a bus type */ static devclass_t bxe_devclass; MODULE_DEPEND(bxe, pci, 1, 1, 1); MODULE_DEPEND(bxe, ether, 1, 1, 1); DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); /* resources needed for unloading a previously loaded device */ #define BXE_PREV_WAIT_NEEDED 1 struct mtx bxe_prev_mtx; MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); struct bxe_prev_list_node { LIST_ENTRY(bxe_prev_list_node) node; uint8_t bus; uint8_t slot; uint8_t path; uint8_t aer; /* XXX automatic error recovery */ uint8_t undi; }; static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ /* Tunable device values... */ SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); /* Debug */ unsigned long bxe_debug = 0; SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN, &bxe_debug, 0, "Debug logging mode"); /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ static int bxe_interrupt_mode = INTR_MODE_MSIX; SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ static int bxe_queue_count = 4; SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, &bxe_queue_count, 0, "Multi-Queue queue count"); /* max number of buffers per queue (default RX_BD_USABLE) */ static int bxe_max_rx_bufs = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); /* Host interrupt coalescing RX tick timer (usecs) */ static int bxe_hc_rx_ticks = 25; SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); /* Host interrupt coalescing TX tick timer (usecs) */ static int bxe_hc_tx_ticks = 50; SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); /* Maximum number of Rx packets to process at a time */ static int bxe_rx_budget = 0xffffffff; SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, &bxe_rx_budget, 0, "Rx processing budget"); /* Maximum LRO aggregation size */ static int bxe_max_aggregation_size = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, &bxe_max_aggregation_size, 0, "max aggregation size"); /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ static int bxe_mrrs = -1; SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, &bxe_mrrs, 0, "PCIe maximum read request size"); /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ static int bxe_autogreeen = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, &bxe_autogreeen, 0, "AutoGrEEEn support"); /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ static int bxe_udp_rss = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, &bxe_udp_rss, 0, "UDP RSS support"); #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ #define STATS_OFFSET32(stat_name) \ (offsetof(struct bxe_eth_stats, stat_name) / 4) #define Q_STATS_OFFSET32(stat_name) \ (offsetof(struct bxe_eth_q_stats, stat_name) / 4) static const struct { uint32_t offset; uint32_t size; uint32_t flags; #define STATS_FLAGS_PORT 1 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) char string[STAT_NAME_LEN]; } bxe_eth_stats_arr[] = { { STATS_OFFSET32(total_bytes_received_hi), 8, STATS_FLAGS_BOTH, "rx_bytes" }, { STATS_OFFSET32(error_bytes_received_hi), 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 8, STATS_FLAGS_PORT, "rx_crc_errors" }, { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 8, STATS_FLAGS_PORT, "rx_align_errors" }, { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 8, STATS_FLAGS_PORT, "rx_fragments" }, { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, STATS_FLAGS_PORT, "rx_jabbers" }, { STATS_OFFSET32(no_buff_discard_hi), 8, STATS_FLAGS_BOTH, "rx_discards" }, { STATS_OFFSET32(mac_filter_discard), 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, { STATS_OFFSET32(mf_tag_discard), 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, { STATS_OFFSET32(pfc_frames_received_hi), 8, STATS_FLAGS_PORT, "pfc_frames_received" }, { STATS_OFFSET32(pfc_frames_sent_hi), 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, { STATS_OFFSET32(brb_drop_hi), 8, STATS_FLAGS_PORT, "rx_brb_discard" }, { STATS_OFFSET32(brb_truncate_hi), 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, { STATS_OFFSET32(pause_frames_received_hi), 8, STATS_FLAGS_PORT, "rx_pause_frames" }, { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, { STATS_OFFSET32(nig_timer_max), 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, { STATS_OFFSET32(total_bytes_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_bytes" }, { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, STATS_FLAGS_PORT, "tx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 8, STATS_FLAGS_PORT, "tx_mac_errors" }, { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 8, STATS_FLAGS_PORT, "tx_single_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 8, STATS_FLAGS_PORT, "tx_deferred" }, { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 8, STATS_FLAGS_PORT, "tx_late_collisions" }, { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 8, STATS_FLAGS_PORT, "tx_total_collisions" }, { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, { STATS_OFFSET32(etherstatspktsover1522octets_hi), 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, { STATS_OFFSET32(pause_frames_sent_hi), 8, STATS_FLAGS_PORT, "tx_pause_frames" }, { STATS_OFFSET32(total_tpa_aggregations_hi), 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, { STATS_OFFSET32(total_tpa_bytes_hi), 8, STATS_FLAGS_FUNC, "tpa_bytes"}, { STATS_OFFSET32(eee_tx_lpi), 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, { STATS_OFFSET32(rx_calls), 4, STATS_FLAGS_FUNC, "rx_calls"}, { STATS_OFFSET32(rx_pkts), 4, STATS_FLAGS_FUNC, "rx_pkts"}, { STATS_OFFSET32(rx_tpa_pkts), 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"}, { STATS_OFFSET32(rx_bxe_service_rxsgl), 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"}, { STATS_OFFSET32(rx_jumbo_sge_pkts), 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"}, { STATS_OFFSET32(rx_soft_errors), 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, { STATS_OFFSET32(rx_hw_csum_errors), 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, { STATS_OFFSET32(rx_ofld_frames_csum_ip), 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, { STATS_OFFSET32(rx_budget_reached), 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, { STATS_OFFSET32(tx_pkts), 4, STATS_FLAGS_FUNC, "tx_pkts"}, { STATS_OFFSET32(tx_soft_errors), 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, { STATS_OFFSET32(tx_ofld_frames_csum_ip), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, { STATS_OFFSET32(tx_ofld_frames_csum_udp), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, { STATS_OFFSET32(tx_ofld_frames_lso), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, { STATS_OFFSET32(tx_encap_failures), 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, { STATS_OFFSET32(tx_hw_queue_full), 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, { STATS_OFFSET32(tx_hw_max_queue_depth), 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, { STATS_OFFSET32(tx_dma_mapping_failure), 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, { STATS_OFFSET32(tx_max_drbr_queue_depth), 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, { STATS_OFFSET32(tx_window_violation_std), 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, { STATS_OFFSET32(tx_window_violation_tso), 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, { STATS_OFFSET32(tx_chain_lost_mbuf), 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, { STATS_OFFSET32(tx_frames_deferred), 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, { STATS_OFFSET32(tx_queue_xoff), 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, { STATS_OFFSET32(mbuf_defrag_attempts), 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, { STATS_OFFSET32(mbuf_defrag_failures), 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, { STATS_OFFSET32(mbuf_alloc_tx), 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, { STATS_OFFSET32(mbuf_alloc_rx), 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, { STATS_OFFSET32(mbuf_alloc_sge), 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, { STATS_OFFSET32(mbuf_alloc_tpa), 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"} }; static const struct { uint32_t offset; uint32_t size; char string[STAT_NAME_LEN]; } bxe_eth_q_stats_arr[] = { { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "rx_bytes" }, { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 8, "rx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 8, "rx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 8, "rx_bcast_packets" }, { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "rx_discards" }, { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "tx_bytes" }, { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, "tx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 8, "tx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 8, "tx_bcast_packets" }, { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 8, "tpa_aggregations" }, { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 8, "tpa_aggregated_frames"}, { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "tpa_bytes"}, { Q_STATS_OFFSET32(rx_calls), 4, "rx_calls"}, { Q_STATS_OFFSET32(rx_pkts), 4, "rx_pkts"}, { Q_STATS_OFFSET32(rx_tpa_pkts), 4, "rx_tpa_pkts"}, { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 4, "rx_erroneous_jumbo_sge_pkts"}, { Q_STATS_OFFSET32(rx_bxe_service_rxsgl), 4, "rx_bxe_service_rxsgl"}, { Q_STATS_OFFSET32(rx_jumbo_sge_pkts), 4, "rx_jumbo_sge_pkts"}, { Q_STATS_OFFSET32(rx_soft_errors), 4, "rx_soft_errors"}, { Q_STATS_OFFSET32(rx_hw_csum_errors), 4, "rx_hw_csum_errors"}, { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 4, "rx_ofld_frames_csum_ip"}, { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 4, "rx_ofld_frames_csum_tcp_udp"}, { Q_STATS_OFFSET32(rx_budget_reached), 4, "rx_budget_reached"}, { Q_STATS_OFFSET32(tx_pkts), 4, "tx_pkts"}, { Q_STATS_OFFSET32(tx_soft_errors), 4, "tx_soft_errors"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 4, "tx_ofld_frames_csum_ip"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 4, "tx_ofld_frames_csum_tcp"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 4, "tx_ofld_frames_csum_udp"}, { Q_STATS_OFFSET32(tx_ofld_frames_lso), 4, "tx_ofld_frames_lso"}, { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 4, "tx_ofld_frames_lso_hdr_splits"}, { Q_STATS_OFFSET32(tx_encap_failures), 4, "tx_encap_failures"}, { Q_STATS_OFFSET32(tx_hw_queue_full), 4, "tx_hw_queue_full"}, { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 4, "tx_hw_max_queue_depth"}, { Q_STATS_OFFSET32(tx_dma_mapping_failure), 4, "tx_dma_mapping_failure"}, { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 4, "tx_max_drbr_queue_depth"}, { Q_STATS_OFFSET32(tx_window_violation_std), 4, "tx_window_violation_std"}, { Q_STATS_OFFSET32(tx_window_violation_tso), 4, "tx_window_violation_tso"}, { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 4, "tx_chain_lost_mbuf"}, { Q_STATS_OFFSET32(tx_frames_deferred), 4, "tx_frames_deferred"}, { Q_STATS_OFFSET32(tx_queue_xoff), 4, "tx_queue_xoff"}, { Q_STATS_OFFSET32(mbuf_defrag_attempts), 4, "mbuf_defrag_attempts"}, { Q_STATS_OFFSET32(mbuf_defrag_failures), 4, "mbuf_defrag_failures"}, { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 4, "mbuf_rx_bd_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 4, "mbuf_rx_bd_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 4, "mbuf_rx_tpa_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 4, "mbuf_rx_tpa_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 4, "mbuf_rx_sge_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 4, "mbuf_rx_sge_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_alloc_tx), 4, "mbuf_alloc_tx"}, { Q_STATS_OFFSET32(mbuf_alloc_rx), 4, "mbuf_alloc_rx"}, { Q_STATS_OFFSET32(mbuf_alloc_sge), 4, "mbuf_alloc_sge"}, { Q_STATS_OFFSET32(mbuf_alloc_tpa), 4, "mbuf_alloc_tpa"} }; #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) static void bxe_cmng_fns_init(struct bxe_softc *sc, uint8_t read_cfg, uint8_t cmng_type); static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); static void storm_memset_cmng(struct bxe_softc *sc, struct cmng_init *cmng, uint8_t port); static void bxe_set_reset_global(struct bxe_softc *sc); static void bxe_set_reset_in_progress(struct bxe_softc *sc); static uint8_t bxe_reset_is_done(struct bxe_softc *sc, int engine); static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print); static void bxe_int_disable(struct bxe_softc *sc); static int bxe_release_leader_lock(struct bxe_softc *sc); static void bxe_pf_disable(struct bxe_softc *sc); static void bxe_free_fp_buffers(struct bxe_softc *sc); static inline void bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t rx_bd_prod, uint16_t rx_cq_prod, uint16_t rx_sge_prod); static void bxe_link_report_locked(struct bxe_softc *sc); static void bxe_link_report(struct bxe_softc *sc); static void bxe_link_status_update(struct bxe_softc *sc); static void bxe_periodic_callout_func(void *xsc); static void bxe_periodic_start(struct bxe_softc *sc); static void bxe_periodic_stop(struct bxe_softc *sc); static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t prev_index, uint16_t index); static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, int queue); static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index); static uint8_t bxe_txeof(struct bxe_softc *sc, struct bxe_fastpath *fp); static void bxe_task_fp(struct bxe_fastpath *fp); static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m, uint8_t contents); static int bxe_alloc_mem(struct bxe_softc *sc); static void bxe_free_mem(struct bxe_softc *sc); static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); static void bxe_free_fw_stats_mem(struct bxe_softc *sc); static int bxe_interrupt_attach(struct bxe_softc *sc); static void bxe_interrupt_detach(struct bxe_softc *sc); static void bxe_set_rx_mode(struct bxe_softc *sc); static int bxe_init_locked(struct bxe_softc *sc); static int bxe_stop_locked(struct bxe_softc *sc); static __noinline int bxe_nic_load(struct bxe_softc *sc, int load_mode); static __noinline int bxe_nic_unload(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link); static void bxe_handle_sp_tq(void *context, int pending); static void bxe_handle_fp_tq(void *context, int pending); static int bxe_add_cdev(struct bxe_softc *sc); static void bxe_del_cdev(struct bxe_softc *sc); static int bxe_grc_dump(struct bxe_softc *sc); /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ uint32_t calc_crc32(uint8_t *crc32_packet, uint32_t crc32_length, uint32_t crc32_seed, uint8_t complement) { uint32_t byte = 0; uint32_t bit = 0; uint8_t msb = 0; uint32_t temp = 0; uint32_t shft = 0; uint8_t current_byte = 0; uint32_t crc32_result = crc32_seed; const uint32_t CRC32_POLY = 0x1edc6f41; if ((crc32_packet == NULL) || (crc32_length == 0) || ((crc32_length % 8) != 0)) { return (crc32_result); } for (byte = 0; byte < crc32_length; byte = byte + 1) { current_byte = crc32_packet[byte]; for (bit = 0; bit < 8; bit = bit + 1) { /* msb = crc32_result[31]; */ msb = (uint8_t)(crc32_result >> 31); crc32_result = crc32_result << 1; /* it (msb != current_byte[bit]) */ if (msb != (0x1 & (current_byte >> bit))) { crc32_result = crc32_result ^ CRC32_POLY; /* crc32_result[0] = 1 */ crc32_result |= 1; } } } /* Last step is to: * 1. "mirror" every bit * 2. swap the 4 bytes * 3. complement each bit */ /* Mirror */ temp = crc32_result; shft = sizeof(crc32_result) * 8 - 1; for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) { temp <<= 1; temp |= crc32_result & 1; shft-- ; } /* temp[31-bit] = crc32_result[bit] */ temp <<= shft; /* Swap */ /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ { uint32_t t0, t1, t2, t3; t0 = (0x000000ff & (temp >> 24)); t1 = (0x0000ff00 & (temp >> 8)); t2 = (0x00ff0000 & (temp << 8)); t3 = (0xff000000 & (temp << 24)); crc32_result = t0 | t1 | t2 | t3; } /* Complement */ if (complement) { crc32_result = ~crc32_result; } return (crc32_result); } int bxe_test_bit(int nr, volatile unsigned long *addr) { return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); } void bxe_set_bit(unsigned int nr, volatile unsigned long *addr) { atomic_set_acq_long(addr, (1 << nr)); } void bxe_clear_bit(int nr, volatile unsigned long *addr) { atomic_clear_acq_long(addr, (1 << nr)); } int bxe_test_and_set_bit(int nr, volatile unsigned long *addr) { unsigned long x; nr = (1 << nr); do { x = *addr; } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); // if (x & nr) bit_was_set; else bit_was_not_set; return (x & nr); } int bxe_test_and_clear_bit(int nr, volatile unsigned long *addr) { unsigned long x; nr = (1 << nr); do { x = *addr; } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); // if (x & nr) bit_was_set; else bit_was_not_set; return (x & nr); } int bxe_cmpxchg(volatile int *addr, int old, int new) { int x; do { x = *addr; } while (atomic_cmpset_acq_int(addr, old, new) == 0); return (x); } /* * Get DMA memory from the OS. * * Validates that the OS has provided DMA buffers in response to a * bus_dmamap_load call and saves the physical address of those buffers. * When the callback is used the OS will return 0 for the mapping function * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any * failures back to the caller. * * Returns: * Nothing. */ static void bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bxe_dma *dma = arg; if (error) { dma->paddr = 0; dma->nseg = 0; BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); } else { dma->paddr = segs->ds_addr; dma->nseg = nseg; } } /* * Allocate a block of memory and map it for DMA. No partial completions * allowed and release any resources acquired if we can't acquire all * resources. * * Returns: * 0 = Success, !0 = Failure */ int bxe_dma_alloc(struct bxe_softc *sc, bus_size_t size, struct bxe_dma *dma, const char *msg) { int rc; if (dma->size > 0) { BLOGE(sc, "dma block '%s' already has size %lu\n", msg, (unsigned long)dma->size); return (1); } memset(dma, 0, sizeof(*dma)); /* sanity */ dma->sc = sc; dma->size = size; snprintf(dma->msg, sizeof(dma->msg), "%s", msg); rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ BCM_PAGE_SIZE, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ size, /* max map size */ 1, /* num discontinuous */ size, /* max seg size */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &dma->tag); /* returned dma tag */ if (rc != 0) { BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); memset(dma, 0, sizeof(*dma)); return (1); } rc = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &dma->map); if (rc != 0) { BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); bus_dma_tag_destroy(dma->tag); memset(dma, 0, sizeof(*dma)); return (1); } rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, bxe_dma_map_addr, /* BLOGD in here */ dma, BUS_DMA_NOWAIT); if (rc != 0) { BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); bus_dma_tag_destroy(dma->tag); memset(dma, 0, sizeof(*dma)); return (1); } return (0); } void bxe_dma_free(struct bxe_softc *sc, struct bxe_dma *dma) { if (dma->size > 0) { DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); bus_dmamap_sync(dma->tag, dma->map, (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); bus_dmamap_unload(dma->tag, dma->map); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); bus_dma_tag_destroy(dma->tag); } memset(dma, 0, sizeof(*dma)); } /* * These indirect read and write routines are only during init. * The locking is handled by the MCP. */ void bxe_reg_wr_ind(struct bxe_softc *sc, uint32_t addr, uint32_t val) { pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); } uint32_t bxe_reg_rd_ind(struct bxe_softc *sc, uint32_t addr) { uint32_t val; pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); return (val); } static int bxe_acquire_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; int cnt; /* validate the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" " resource_bit 0x%x\n", resource, resource_bit); return (-1); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); } /* validate the resource is not already taken */ lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n", resource, lock_status, resource_bit); return (-1); } /* try every 5ms for 5 seconds */ for (cnt = 0; cnt < 1000; cnt++) { REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { return (0); } DELAY(5000); } BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n", resource, resource_bit); return (-1); } static int bxe_release_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; /* validate the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" " resource_bit 0x%x\n", resource, resource_bit); return (-1); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); } /* validate the resource is currently taken */ lock_status = REG_RD(sc, hw_lock_control_reg); if (!(lock_status & resource_bit)) { BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n", resource, lock_status, resource_bit); return (-1); } REG_WR(sc, hw_lock_control_reg, resource_bit); return (0); } static void bxe_acquire_phy_lock(struct bxe_softc *sc) { BXE_PHY_LOCK(sc); bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); } static void bxe_release_phy_lock(struct bxe_softc *sc) { bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); BXE_PHY_UNLOCK(sc); } /* * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, * had we done things the other way around, if two pfs from the same port * would attempt to access nvram at the same time, we could run into a * scenario such as: * pf A takes the port lock. * pf B succeeds in taking the same lock since they are from the same port. * pf A takes the per pf misc lock. Performs eeprom access. * pf A finishes. Unlocks the per pf misc lock. * Pf B takes the lock and proceeds to perform it's own access. * pf A unlocks the per port lock, while pf B is still working (!). * mcp takes the per port lock and corrupts pf B's access (and/or has it's own * access corrupted by pf B).* */ static int bxe_acquire_nvram_lock(struct bxe_softc *sc) { int port = SC_PORT(sc); int count, i; uint32_t val = 0; /* acquire HW lock: protect against other PFs in PF Direct Assignment */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* request access to nvram interface */ REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); for (i = 0; i < count*10; i++) { val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { break; } DELAY(5); } if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { BLOGE(sc, "Cannot get access to nvram interface " "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", port, val); return (-1); } return (0); } static int bxe_release_nvram_lock(struct bxe_softc *sc) { int port = SC_PORT(sc); int count, i; uint32_t val = 0; /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* relinquish nvram interface */ REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); for (i = 0; i < count*10; i++) { val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { break; } DELAY(5); } if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { BLOGE(sc, "Cannot free access to nvram interface " "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", port, val); return (-1); } /* release HW lock: protect against other PFs in PF Direct Assignment */ bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); return (0); } static void bxe_enable_nvram_access(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); /* enable both bits, even on read */ REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); } static void bxe_disable_nvram_access(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); /* disable both bits, even after read */ REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN))); } static int bxe_nvram_read_dword(struct bxe_softc *sc, uint32_t offset, uint32_t *ret_val, uint32_t cmd_flags) { int count, i, rc; uint32_t val; /* build the command word */ cmd_flags |= MCPR_NVM_COMMAND_DOIT; /* need to clear DONE bit separately */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); /* address of the NVRAM to read from */ REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); /* issue a read command */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* wait for completion */ *ret_val = 0; rc = -1; for (i = 0; i < count; i++) { DELAY(5); val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); if (val & MCPR_NVM_COMMAND_DONE) { val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); /* we read nvram data in cpu order * but ethtool sees it as an array of bytes * converting to big-endian will do the work */ *ret_val = htobe32(val); rc = 0; break; } } if (rc == -1) { BLOGE(sc, "nvram read timeout expired " "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", offset, cmd_flags, val); } return (rc); } static int bxe_nvram_read(struct bxe_softc *sc, uint32_t offset, uint8_t *ret_buf, int buf_size) { uint32_t cmd_flags; uint32_t val; int rc; if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", offset, buf_size); return (-1); } if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); /* read the first word(s) */ cmd_flags = MCPR_NVM_COMMAND_FIRST; while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); memcpy(ret_buf, &val, 4); /* advance to the next dword */ offset += sizeof(uint32_t); ret_buf += sizeof(uint32_t); buf_size -= sizeof(uint32_t); cmd_flags = 0; } if (rc == 0) { cmd_flags |= MCPR_NVM_COMMAND_LAST; rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); memcpy(ret_buf, &val, 4); } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } static int bxe_nvram_write_dword(struct bxe_softc *sc, uint32_t offset, uint32_t val, uint32_t cmd_flags) { int count, i, rc; /* build the command word */ cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); /* need to clear DONE bit separately */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); /* write the data */ REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); /* address of the NVRAM to write to */ REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); /* issue the write command */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* wait for completion */ rc = -1; for (i = 0; i < count; i++) { DELAY(5); val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); if (val & MCPR_NVM_COMMAND_DONE) { rc = 0; break; } } if (rc == -1) { BLOGE(sc, "nvram write timeout expired " "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", offset, cmd_flags, val); } return (rc); } #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) static int bxe_nvram_write1(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf, int buf_size) { uint32_t cmd_flags; uint32_t align_offset; uint32_t val; int rc; if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); align_offset = (offset & ~0x03); rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); if (rc == 0) { val &= ~(0xff << BYTE_OFFSET(offset)); val |= (*data_buf << BYTE_OFFSET(offset)); /* nvram data is returned as an array of bytes * convert it back to cpu order */ val = be32toh(val); rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } static int bxe_nvram_write(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf, int buf_size) { uint32_t cmd_flags; uint32_t val; uint32_t written_so_far; int rc; if (buf_size == 1) { return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); } if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", offset, buf_size); return (-1); } if (buf_size == 0) { return (0); /* nothing to do */ } if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); written_so_far = 0; cmd_flags = MCPR_NVM_COMMAND_FIRST; while ((written_so_far < buf_size) && (rc == 0)) { if (written_so_far == (buf_size - sizeof(uint32_t))) { cmd_flags |= MCPR_NVM_COMMAND_LAST; } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { cmd_flags |= MCPR_NVM_COMMAND_LAST; } else if ((offset % NVRAM_PAGE_SIZE) == 0) { cmd_flags |= MCPR_NVM_COMMAND_FIRST; } memcpy(&val, data_buf, 4); rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); /* advance to the next dword */ offset += sizeof(uint32_t); data_buf += sizeof(uint32_t); written_so_far += sizeof(uint32_t); cmd_flags = 0; } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } /* copy command into DMAE command memory and set DMAE command Go */ void bxe_post_dmae(struct bxe_softc *sc, struct dmae_cmd *dmae, int idx) { uint32_t cmd_offset; int i; cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx)); for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) { REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); } REG_WR(sc, dmae_reg_go_c[idx], 1); } uint32_t bxe_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type) { return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) | DMAE_CMD_C_TYPE_ENABLE)); } uint32_t bxe_dmae_opcode_clr_src_reset(uint32_t opcode) { return (opcode & ~DMAE_CMD_SRC_RESET); } uint32_t bxe_dmae_opcode(struct bxe_softc *sc, uint8_t src_type, uint8_t dst_type, uint8_t with_comp, uint8_t comp_type) { uint32_t opcode = 0; opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) | (dst_type << DMAE_CMD_DST_SHIFT)); opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) | (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT)); opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT); #ifdef __BIG_ENDIAN opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; #else opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; #endif if (with_comp) { opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); } return (opcode); } static void bxe_prep_dmae_with_comp(struct bxe_softc *sc, struct dmae_cmd *dmae, uint8_t src_type, uint8_t dst_type) { memset(dmae, 0, sizeof(struct dmae_cmd)); /* set the opcode */ dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, TRUE, DMAE_COMP_PCI); /* fill in the completion parameters */ dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); dmae->comp_val = DMAE_COMP_VAL; } /* issue a DMAE command over the init channel and wait for completion */ static int bxe_issue_dmae_with_comp(struct bxe_softc *sc, struct dmae_cmd *dmae) { uint32_t *wb_comp = BXE_SP(sc, wb_comp); int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; BXE_DMAE_LOCK(sc); /* reset completion */ *wb_comp = 0; /* post the command on the channel used for initializations */ bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); /* wait for completion */ DELAY(5); while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { if (!timeout || (sc->recovery_state != BXE_RECOVERY_DONE && sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n", *wb_comp, sc->recovery_state); BXE_DMAE_UNLOCK(sc); return (DMAE_TIMEOUT); } timeout--; DELAY(50); } if (*wb_comp & DMAE_PCI_ERR_FLAG) { BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n", *wb_comp, sc->recovery_state); BXE_DMAE_UNLOCK(sc); return (DMAE_PCI_ERROR); } BXE_DMAE_UNLOCK(sc); return (0); } void bxe_read_dmae(struct bxe_softc *sc, uint32_t src_addr, uint32_t len32) { struct dmae_cmd dmae; uint32_t *data; int i, rc; DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); if (!sc->dmae_ready) { data = BXE_SP(sc, wb_data[0]); for (i = 0; i < len32; i++) { data[i] = (CHIP_IS_E1(sc)) ? bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : REG_RD(sc, (src_addr + (i * 4))); } return; } /* set opcode and fixed command fields */ bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); /* fill in addresses and len */ dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ dmae.src_addr_hi = 0; dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); dmae.len = len32; /* issue the command and wait for completion */ if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { bxe_panic(sc, ("DMAE failed (%d)\n", rc)); }; } void bxe_write_dmae(struct bxe_softc *sc, bus_addr_t dma_addr, uint32_t dst_addr, uint32_t len32) { struct dmae_cmd dmae; int rc; if (!sc->dmae_ready) { DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); if (CHIP_IS_E1(sc)) { ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); } else { ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); } return; } /* set opcode and fixed command fields */ bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); /* fill in addresses and len */ dmae.src_addr_lo = U64_LO(dma_addr); dmae.src_addr_hi = U64_HI(dma_addr); dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ dmae.dst_addr_hi = 0; dmae.len = len32; /* issue the command and wait for completion */ if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { bxe_panic(sc, ("DMAE failed (%d)\n", rc)); } } void bxe_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr, uint32_t addr, uint32_t len) { int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); int offset = 0; while (len > dmae_wr_max) { bxe_write_dmae(sc, (phys_addr + offset), /* src DMA address */ (addr + offset), /* dst GRC address */ dmae_wr_max); offset += (dmae_wr_max * 4); len -= dmae_wr_max; } bxe_write_dmae(sc, (phys_addr + offset), /* src DMA address */ (addr + offset), /* dst GRC address */ len); } void bxe_set_ctx_validation(struct bxe_softc *sc, struct eth_context *cxt, uint32_t cid) { /* ustorm cxt validation */ cxt->ustorm_ag_context.cdu_usage = CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); /* xcontext validation */ cxt->xstorm_ag_context.cdu_reserved = CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); } static void bxe_storm_memset_hc_timeout(struct bxe_softc *sc, uint8_t port, uint8_t fw_sb_id, uint8_t sb_index, uint8_t ticks) { uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); REG_WR8(sc, addr, ticks); BLOGD(sc, DBG_LOAD, "port %d fw_sb_id %d sb_index %d ticks %d\n", port, fw_sb_id, sb_index, ticks); } static void bxe_storm_memset_hc_disable(struct bxe_softc *sc, uint8_t port, uint16_t fw_sb_id, uint8_t sb_index, uint8_t disable) { uint32_t enable_flag = (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); uint8_t flags; /* clear and set */ flags = REG_RD8(sc, addr); flags &= ~HC_INDEX_DATA_HC_ENABLED; flags |= enable_flag; REG_WR8(sc, addr, flags); BLOGD(sc, DBG_LOAD, "port %d fw_sb_id %d sb_index %d disable %d\n", port, fw_sb_id, sb_index, disable); } void bxe_update_coalesce_sb_index(struct bxe_softc *sc, uint8_t fw_sb_id, uint8_t sb_index, uint8_t disable, uint16_t usec) { int port = SC_PORT(sc); uint8_t ticks = (usec / 4); /* XXX ??? */ bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); disable = (disable) ? 1 : ((usec) ? 0 : 1); bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); } void elink_cb_udelay(struct bxe_softc *sc, uint32_t usecs) { DELAY(usecs); } uint32_t elink_cb_reg_read(struct bxe_softc *sc, uint32_t reg_addr) { return (REG_RD(sc, reg_addr)); } void elink_cb_reg_write(struct bxe_softc *sc, uint32_t reg_addr, uint32_t val) { REG_WR(sc, reg_addr, val); } void elink_cb_reg_wb_write(struct bxe_softc *sc, uint32_t offset, uint32_t *wb_write, uint16_t len) { REG_WR_DMAE(sc, offset, wb_write, len); } void elink_cb_reg_wb_read(struct bxe_softc *sc, uint32_t offset, uint32_t *wb_write, uint16_t len) { REG_RD_DMAE(sc, offset, wb_write, len); } uint8_t elink_cb_path_id(struct bxe_softc *sc) { return (SC_PATH(sc)); } void elink_cb_event_log(struct bxe_softc *sc, const elink_log_id_t elink_log_id, ...) { /* XXX */ BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); } static int bxe_set_spio(struct bxe_softc *sc, int spio, uint32_t mode) { uint32_t spio_reg; /* Only 2 SPIOs are configurable */ if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); /* read SPIO and mask except the float bits */ spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); switch (mode) { case MISC_SPIO_OUTPUT_LOW: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); /* clear FLOAT and set CLR */ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); spio_reg |= (spio << MISC_SPIO_CLR_POS); break; case MISC_SPIO_OUTPUT_HIGH: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); /* clear FLOAT and set SET */ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); spio_reg |= (spio << MISC_SPIO_SET_POS); break; case MISC_SPIO_INPUT_HI_Z: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); /* set FLOAT */ spio_reg |= (spio << MISC_SPIO_FLOAT_POS); break; default: break; } REG_WR(sc, MISC_REG_SPIO, spio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); return (0); } static int bxe_gpio_read(struct bxe_softc *sc, int gpio_num, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d" " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift, gpio_mask); return (-1); } /* read GPIO value */ gpio_reg = REG_RD(sc, MISC_REG_GPIO); /* get the requested pin value */ return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; } static int bxe_gpio_write(struct bxe_softc *sc, int gpio_num, uint32_t mode, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" " gpio_shift %d gpio_mask 0x%x\n", gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear FLOAT and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear FLOAT and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> input\n", gpio_num, gpio_shift); /* set FLOAT */ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); break; default: break; } REG_WR(sc, MISC_REG_GPIO, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } static int bxe_gpio_mult_write(struct bxe_softc *sc, uint8_t pins, uint32_t mode) { uint32_t gpio_reg; /* any port swapping should be handled by caller */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = REG_RD(sc, MISC_REG_GPIO); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); /* set CLR */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); /* set SET */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); /* set FLOAT */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); break; default: BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x" " gpio_reg 0x%x\n", pins, mode, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (-1); } REG_WR(sc, MISC_REG_GPIO, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } static int bxe_gpio_int_write(struct bxe_softc *sc, int gpio_num, uint32_t mode, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" " gpio_shift %d gpio_mask 0x%x\n", gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO int */ gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); switch (mode) { case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: BLOGD(sc, DBG_PHY, "Clear GPIO INT %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear SET and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); break; case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: BLOGD(sc, DBG_PHY, "Set GPIO INT %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear CLR and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); break; default: break; } REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } uint32_t elink_cb_gpio_read(struct bxe_softc *sc, uint16_t gpio_num, uint8_t port) { return (bxe_gpio_read(sc, gpio_num, port)); } uint8_t elink_cb_gpio_write(struct bxe_softc *sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ uint8_t port) { return (bxe_gpio_write(sc, gpio_num, mode, port)); } uint8_t elink_cb_gpio_mult_write(struct bxe_softc *sc, uint8_t pins, uint8_t mode) /* 0=low 1=high */ { return (bxe_gpio_mult_write(sc, pins, mode)); } uint8_t elink_cb_gpio_int_write(struct bxe_softc *sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ uint8_t port) { return (bxe_gpio_int_write(sc, gpio_num, mode, port)); } void elink_cb_notify_link_changed(struct bxe_softc *sc) { REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + (SC_FUNC(sc) * sizeof(uint32_t))), 1); } /* send the MCP a request, block until there is a reply */ uint32_t elink_cb_fw_command(struct bxe_softc *sc, uint32_t command, uint32_t param) { int mb_idx = SC_FW_MB_IDX(sc); uint32_t seq; uint32_t rc = 0; uint32_t cnt = 1; uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; BXE_FWMB_LOCK(sc); seq = ++sc->fw_seq; SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); BLOGD(sc, DBG_PHY, "wrote command 0x%08x to FW MB param 0x%08x\n", (command | seq), param); /* Let the FW do it's magic. GIve it up to 5 seconds... */ do { DELAY(delay * 1000); rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); BLOGD(sc, DBG_PHY, "[after %d ms] read 0x%x seq 0x%x from FW MB\n", cnt*delay, rc, seq); /* is this a reply to our command? */ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { rc &= FW_MSG_CODE_MASK; } else { /* Ruh-roh! */ BLOGE(sc, "FW failed to respond!\n"); // XXX bxe_fw_dump(sc); rc = 0; } BXE_FWMB_UNLOCK(sc); return (rc); } static uint32_t bxe_fw_command(struct bxe_softc *sc, uint32_t command, uint32_t param) { return (elink_cb_fw_command(sc, command, param)); } static void __storm_memset_dma_mapping(struct bxe_softc *sc, uint32_t addr, bus_addr_t mapping) { REG_WR(sc, addr, U64_LO(mapping)); REG_WR(sc, (addr + 4), U64_HI(mapping)); } static void storm_memset_spq_addr(struct bxe_softc *sc, bus_addr_t mapping, uint16_t abs_fid) { uint32_t addr = (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); __storm_memset_dma_mapping(sc, addr, mapping); } static void storm_memset_vf_to_pf(struct bxe_softc *sc, uint16_t abs_fid, uint16_t pf_id) { REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); } static void storm_memset_func_en(struct bxe_softc *sc, uint16_t abs_fid, uint8_t enable) { REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); } static void storm_memset_eq_data(struct bxe_softc *sc, struct event_ring_data *eq_data, uint16_t pfid) { uint32_t addr; size_t size; addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); size = sizeof(struct event_ring_data); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); } static void storm_memset_eq_prod(struct bxe_softc *sc, uint16_t eq_prod, uint16_t pfid) { uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid)); REG_WR16(sc, addr, eq_prod); } /* * Post a slowpath command. * * A slowpath command is used to propogate a configuration change through * the controller in a controlled manner, allowing each STORM processor and * other H/W blocks to phase in the change. The commands sent on the * slowpath are referred to as ramrods. Depending on the ramrod used the * completion of the ramrod will occur in different ways. Here's a * breakdown of ramrods and how they complete: * * RAMROD_CMD_ID_ETH_PORT_SETUP * Used to setup the leading connection on a port. Completes on the * Receive Completion Queue (RCQ) of that port (typically fp[0]). * * RAMROD_CMD_ID_ETH_CLIENT_SETUP * Used to setup an additional connection on a port. Completes on the * RCQ of the multi-queue/RSS connection being initialized. * * RAMROD_CMD_ID_ETH_STAT_QUERY * Used to force the storm processors to update the statistics database * in host memory. This ramrod is send on the leading connection CID and * completes as an index increment of the CSTORM on the default status * block. * * RAMROD_CMD_ID_ETH_UPDATE * Used to update the state of the leading connection, usually to udpate * the RSS indirection table. Completes on the RCQ of the leading * connection. (Not currently used under FreeBSD until OS support becomes * available.) * * RAMROD_CMD_ID_ETH_HALT * Used when tearing down a connection prior to driver unload. Completes * on the RCQ of the multi-queue/RSS connection being torn down. Don't * use this on the leading connection. * * RAMROD_CMD_ID_ETH_SET_MAC * Sets the Unicast/Broadcast/Multicast used by the port. Completes on * the RCQ of the leading connection. * * RAMROD_CMD_ID_ETH_CFC_DEL * Used when tearing down a conneciton prior to driver unload. Completes * on the RCQ of the leading connection (since the current connection * has been completely removed from controller memory). * * RAMROD_CMD_ID_ETH_PORT_DEL * Used to tear down the leading connection prior to driver unload, * typically fp[0]. Completes as an index increment of the CSTORM on the * default status block. * * RAMROD_CMD_ID_ETH_FORWARD_SETUP * Used for connection offload. Completes on the RCQ of the multi-queue * RSS connection that is being offloaded. (Not currently used under * FreeBSD.) * * There can only be one command pending per function. * * Returns: * 0 = Success, !0 = Failure. */ /* must be called under the spq lock */ static inline struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) { struct eth_spe *next_spe = sc->spq_prod_bd; if (sc->spq_prod_bd == sc->spq_last_bd) { /* wrap back to the first eth_spq */ sc->spq_prod_bd = sc->spq; sc->spq_prod_idx = 0; } else { sc->spq_prod_bd++; sc->spq_prod_idx++; } return (next_spe); } /* must be called under the spq lock */ static inline void bxe_sp_prod_update(struct bxe_softc *sc) { int func = SC_FUNC(sc); /* * Make sure that BD data is updated before writing the producer. * BD data is written to the memory, the producer is read from the * memory, thus we need a full memory barrier to ensure the ordering. */ mb(); REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), sc->spq_prod_idx); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); } /** * bxe_is_contextless_ramrod - check if the current command ends on EQ * * @cmd: command to check * @cmd_type: command type */ static inline int bxe_is_contextless_ramrod(int cmd, int cmd_type) { if ((cmd_type == NONE_CONNECTION_TYPE) || (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { return (TRUE); } else { return (FALSE); } } /** * bxe_sp_post - place a single command on an SP ring * * @sc: driver handle * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) * @cid: SW CID the command is related to * @data_hi: command private data address (high 32 bits) * @data_lo: command private data address (low 32 bits) * @cmd_type: command type (e.g. NONE, ETH) * * SP data is handled as if it's always an address pair, thus data fields are * not swapped to little endian in upper functions. Instead this function swaps * data as if it's two uint32 fields. */ int bxe_sp_post(struct bxe_softc *sc, int command, int cid, uint32_t data_hi, uint32_t data_lo, int cmd_type) { struct eth_spe *spe; uint16_t type; int common; common = bxe_is_contextless_ramrod(command, cmd_type); BXE_SP_LOCK(sc); if (common) { if (!atomic_load_acq_long(&sc->eq_spq_left)) { BLOGE(sc, "EQ ring is full!\n"); BXE_SP_UNLOCK(sc); return (-1); } } else { if (!atomic_load_acq_long(&sc->cq_spq_left)) { BLOGE(sc, "SPQ ring is full!\n"); BXE_SP_UNLOCK(sc); return (-1); } } spe = bxe_sp_get_next(sc); /* CID needs port number to be encoded int it */ spe->hdr.conn_and_cmd_data = htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid)); type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE; /* TBD: Check if it works for VFs */ type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) & SPE_HDR_T_FUNCTION_ID); spe->hdr.type = htole16(type); spe->data.update_data_addr.hi = htole32(data_hi); spe->data.update_data_addr.lo = htole32(data_lo); /* * It's ok if the actual decrement is issued towards the memory * somewhere between the lock and unlock. Thus no more explict * memory barrier is needed. */ if (common) { atomic_subtract_acq_long(&sc->eq_spq_left, 1); } else { atomic_subtract_acq_long(&sc->cq_spq_left, 1); } BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); BLOGD(sc, DBG_SP, "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", sc->spq_prod_idx, (uint32_t)U64_HI(sc->spq_dma.paddr), (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), command, common, HW_CID(sc, cid), data_hi, data_lo, type, atomic_load_acq_long(&sc->cq_spq_left), atomic_load_acq_long(&sc->eq_spq_left)); bxe_sp_prod_update(sc); BXE_SP_UNLOCK(sc); return (0); } /** * bxe_debug_print_ind_table - prints the indirection table configuration. * * @sc: driver hanlde * @p: pointer to rss configuration */ /* * FreeBSD Device probe function. * * Compares the device found to the driver's list of supported devices and * reports back to the bsd loader whether this is the right driver for the device. * This is the driver entry function called from the "kldload" command. * * Returns: * BUS_PROBE_DEFAULT on success, positive value on failure. */ static int bxe_probe(device_t dev) { struct bxe_softc *sc; struct bxe_device_type *t; char *descbuf; uint16_t did, sdid, svid, vid; /* Find our device structure */ sc = device_get_softc(dev); sc->dev = dev; t = bxe_devs; /* Get the data for the device to be probed. */ vid = pci_get_vendor(dev); did = pci_get_device(dev); svid = pci_get_subvendor(dev); sdid = pci_get_subdevice(dev); BLOGD(sc, DBG_LOAD, "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); /* Look through the list of known devices for a match. */ while (t->bxe_name != NULL) { if ((vid == t->bxe_vid) && (did == t->bxe_did) && ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); if (descbuf == NULL) return (ENOMEM); /* Print out the device identity. */ snprintf(descbuf, BXE_DEVDESC_MAX, "%s (%c%d) BXE v:%s\n", t->bxe_name, (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), (pci_read_config(dev, PCIR_REVID, 4) & 0xf), BXE_DRIVER_VERSION); device_set_desc_copy(dev, descbuf); free(descbuf, M_TEMP); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void bxe_init_mutexes(struct bxe_softc *sc) { #ifdef BXE_CORE_LOCK_SX snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), "bxe%d_core_lock", sc->unit); sx_init(&sc->core_sx, sc->core_sx_name); #else snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), "bxe%d_core_lock", sc->unit); mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); #endif snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), "bxe%d_sp_lock", sc->unit); mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), "bxe%d_dmae_lock", sc->unit); mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), "bxe%d_phy_lock", sc->unit); mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), "bxe%d_fwmb_lock", sc->unit); mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), "bxe%d_print_lock", sc->unit); mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), "bxe%d_stats_lock", sc->unit); mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), "bxe%d_mcast_lock", sc->unit); mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); } static void bxe_release_mutexes(struct bxe_softc *sc) { #ifdef BXE_CORE_LOCK_SX sx_destroy(&sc->core_sx); #else if (mtx_initialized(&sc->core_mtx)) { mtx_destroy(&sc->core_mtx); } #endif if (mtx_initialized(&sc->sp_mtx)) { mtx_destroy(&sc->sp_mtx); } if (mtx_initialized(&sc->dmae_mtx)) { mtx_destroy(&sc->dmae_mtx); } if (mtx_initialized(&sc->port.phy_mtx)) { mtx_destroy(&sc->port.phy_mtx); } if (mtx_initialized(&sc->fwmb_mtx)) { mtx_destroy(&sc->fwmb_mtx); } if (mtx_initialized(&sc->print_mtx)) { mtx_destroy(&sc->print_mtx); } if (mtx_initialized(&sc->stats_mtx)) { mtx_destroy(&sc->stats_mtx); } if (mtx_initialized(&sc->mcast_mtx)) { mtx_destroy(&sc->mcast_mtx); } } static void bxe_tx_disable(struct bxe_softc* sc) { if_t ifp = sc->ifp; /* tell the stack the driver is stopped and TX queue is full */ if (ifp != NULL) { if_setdrvflags(ifp, 0); } } static void bxe_drv_pulse(struct bxe_softc *sc) { SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, sc->fw_drv_pulse_wr_seq); } static inline uint16_t bxe_tx_avail(struct bxe_softc *sc, struct bxe_fastpath *fp) { int16_t used; uint16_t prod; uint16_t cons; prod = fp->tx_bd_prod; cons = fp->tx_bd_cons; used = SUB_S16(prod, cons); return (int16_t)(sc->tx_ring_size) - used; } static inline int bxe_tx_queue_has_work(struct bxe_fastpath *fp) { uint16_t hw_cons; mb(); /* status block fields can change */ hw_cons = le16toh(*fp->tx_cons_sb); return (hw_cons != fp->tx_pkt_cons); } static inline uint8_t bxe_has_tx_work(struct bxe_fastpath *fp) { /* expand this for multi-cos if ever supported */ return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; } static inline int bxe_has_rx_work(struct bxe_fastpath *fp) { uint16_t rx_cq_cons_sb; mb(); /* status block fields can change */ rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) rx_cq_cons_sb++; return (fp->rx_cq_cons != rx_cq_cons_sb); } static void bxe_sp_event(struct bxe_softc *sc, struct bxe_fastpath *fp, union eth_rx_cqe *rr_cqe) { int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); switch (command) { case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); drv_cmd = ECORE_Q_CMD_UPDATE; break; case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); drv_cmd = ECORE_Q_CMD_SETUP; break; case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; break; case (RAMROD_CMD_ID_ETH_HALT): BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); drv_cmd = ECORE_Q_CMD_HALT; break; case (RAMROD_CMD_ID_ETH_TERMINATE): BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); drv_cmd = ECORE_Q_CMD_TERMINATE; break; case (RAMROD_CMD_ID_ETH_EMPTY): BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); drv_cmd = ECORE_Q_CMD_EMPTY; break; default: BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", command, fp->index); return; } if ((drv_cmd != ECORE_Q_CMD_MAX) && q_obj->complete_cmd(sc, q_obj, drv_cmd)) { /* * q_obj->complete_cmd() failure means that this was * an unexpected completion. * * In this case we don't want to increase the sc->spq_left * because apparently we haven't sent this command the first * place. */ // bxe_panic(sc, ("Unexpected SP completion\n")); return; } atomic_add_acq_long(&sc->cq_spq_left, 1); BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", atomic_load_acq_long(&sc->cq_spq_left)); } /* * The current mbuf is part of an aggregation. Move the mbuf into the TPA * aggregation queue, put an empty mbuf back onto the receive chain, and mark * the current aggregation queue as in-progress. */ static void bxe_tpa_start(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t queue, uint16_t cons, uint16_t prod, struct eth_fast_path_rx_cqe *cqe) { struct bxe_sw_rx_bd tmp_bd; struct bxe_sw_rx_bd *rx_buf; struct eth_rx_bd *rx_bd; int max_agg_queues; struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; uint16_t index; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " "cons=%d prod=%d\n", fp->index, queue, cons, prod); max_agg_queues = MAX_AGG_QS(sc); KASSERT((queue < max_agg_queues), ("fp[%02d] invalid aggr queue (%d >= %d)!", fp->index, queue, max_agg_queues)); KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", fp->index, queue)); /* copy the existing mbuf and mapping from the TPA pool */ tmp_bd = tpa_info->bd; if (tmp_bd.m == NULL) { uint32_t *tmp; tmp = (uint32_t *)cqe; BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n", fp->index, queue, cons, prod); BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); /* XXX Error handling? */ return; } /* change the TPA queue to the start state */ tpa_info->state = BXE_TPA_STATE_START; tpa_info->placement_offset = cqe->placement_offset; tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); tpa_info->vlan_tag = le16toh(cqe->vlan_tag); tpa_info->len_on_bd = le16toh(cqe->len_on_bd); fp->rx_tpa_queue_used |= (1 << queue); /* * If all the buffer descriptors are filled with mbufs then fill in * the current consumer index with a new BD. Else if a maximum Rx * buffer limit is imposed then fill in the next producer index. */ index = (sc->max_rx_bufs != RX_BD_USABLE) ? prod : cons; /* move the received mbuf and mapping to TPA pool */ tpa_info->bd = fp->rx_mbuf_chain[cons]; /* release any existing RX BD mbuf mappings */ if (cons != index) { rx_buf = &fp->rx_mbuf_chain[cons]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* * We get here when the maximum number of rx buffers is less than * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL * it out here without concern of a memory leak. */ fp->rx_mbuf_chain[cons].m = NULL; } /* update the Rx SW BD with the mbuf info from the TPA pool */ fp->rx_mbuf_chain[index] = tmp_bd; /* update the Rx BD with the empty mbuf phys address from the TPA pool */ rx_bd = &fp->rx_chain[index]; rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); } /* * When a TPA aggregation is completed, loop through the individual mbufs * of the aggregation, combining them into a single mbuf which will be sent * up the stack. Refill all freed SGEs with mbufs as we go along. */ static int bxe_fill_frag_mbuf(struct bxe_softc *sc, struct bxe_fastpath *fp, struct bxe_sw_tpa_info *tpa_info, uint16_t queue, uint16_t pages, struct mbuf *m, struct eth_end_agg_rx_cqe *cqe, uint16_t cqe_idx) { struct mbuf *m_frag; uint32_t frag_len, frag_size, i; uint16_t sge_idx; int rc = 0; int j; frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", fp->index, queue, tpa_info->len_on_bd, frag_size, pages); /* make sure the aggregated frame is not too big to handle */ if (pages > 8 * PAGES_PER_SGE) { uint32_t *tmp = (uint32_t *)cqe; BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " "pkt_len=%d len_on_bd=%d frag_size=%d\n", fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), tpa_info->len_on_bd, frag_size); BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); bxe_panic(sc, ("sge page count error\n")); return (EINVAL); } /* * Scan through the scatter gather list pulling individual mbufs into a * single mbuf for the host stack. */ for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); /* * Firmware gives the indices of the SGE as if the ring is an array * (meaning that the "next" element will consume 2 indices). */ frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " "sge_idx=%d frag_size=%d frag_len=%d\n", fp->index, queue, i, j, sge_idx, frag_size, frag_len); m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; /* allocate a new mbuf for the SGE */ rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); if (rc) { /* Leave all remaining SGEs in the ring! */ return (rc); } /* update the fragment length */ m_frag->m_len = frag_len; /* concatenate the fragment to the head mbuf */ m_cat(m, m_frag); fp->eth_q_stats.mbuf_alloc_sge--; /* update the TPA mbuf size and remaining fragment size */ m->m_pkthdr.len += frag_len; frag_size -= frag_len; } BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", fp->index, queue, frag_size); return (rc); } static inline void bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) { int i, j; for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; for (j = 0; j < 2; j++) { BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); idx--; } } } static inline void bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) { /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); /* * Clear the two last indices in the page to 1. These are the indices that * correspond to the "next" element, hence will never be indicated and * should be removed from the calculations. */ bxe_clear_sge_mask_next_elems(fp); } static inline void bxe_update_last_max_sge(struct bxe_fastpath *fp, uint16_t idx) { uint16_t last_max = fp->last_max_sge; if (SUB_S16(idx, last_max) > 0) { fp->last_max_sge = idx; } } static inline void bxe_update_sge_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t sge_len, union eth_sgl_or_raw_data *cqe) { uint16_t last_max, last_elem, first_elem; uint16_t delta = 0; uint16_t i; if (!sge_len) { return; } /* first mark all used pages */ for (i = 0; i < sge_len; i++) { BIT_VEC64_CLEAR_BIT(fp->sge_mask, RX_SGE(le16toh(cqe->sgl[i]))); } BLOGD(sc, DBG_LRO, "fp[%02d] fp_cqe->sgl[%d] = %d\n", fp->index, sge_len - 1, le16toh(cqe->sgl[sge_len - 1])); /* assume that the last SGE index is the biggest */ bxe_update_last_max_sge(fp, le16toh(cqe->sgl[sge_len - 1])); last_max = RX_SGE(fp->last_max_sge); last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; /* if ring is not full */ if (last_elem + 1 != first_elem) { last_elem++; } /* now update the prod */ for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { if (__predict_true(fp->sge_mask[i])) { break; } fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; delta += BIT_VEC64_ELEM_SZ; } if (delta > 0) { fp->rx_sge_prod += delta; /* clear page-end entries */ bxe_clear_sge_mask_next_elems(fp); } BLOGD(sc, DBG_LRO, "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", fp->index, fp->last_max_sge, fp->rx_sge_prod); } /* * The aggregation on the current TPA queue has completed. Pull the individual * mbuf fragments together into a single mbuf, perform all necessary checksum * calculations, and send the resuting mbuf to the stack. */ static void bxe_tpa_stop(struct bxe_softc *sc, struct bxe_fastpath *fp, struct bxe_sw_tpa_info *tpa_info, uint16_t queue, uint16_t pages, struct eth_end_agg_rx_cqe *cqe, uint16_t cqe_idx) { if_t ifp = sc->ifp; struct mbuf *m; int rc = 0; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", fp->index, queue, tpa_info->placement_offset, le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); m = tpa_info->bd.m; /* allocate a replacement before modifying existing mbuf */ rc = bxe_alloc_rx_tpa_mbuf(fp, queue); if (rc) { /* drop the frame and log an error */ fp->eth_q_stats.rx_soft_errors++; goto bxe_tpa_stop_exit; } /* we have a replacement, fixup the current mbuf */ m_adj(m, tpa_info->placement_offset); m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; /* mark the checksums valid (taken care of by the firmware) */ fp->eth_q_stats.rx_ofld_frames_csum_ip++; fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; m->m_pkthdr.csum_data = 0xffff; m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); /* aggregate all of the SGEs into a single mbuf */ rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); if (rc) { /* drop the packet and log an error */ fp->eth_q_stats.rx_soft_errors++; m_freem(m); } else { if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) { m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; m->m_flags |= M_VLANTAG; } /* assign packet to this interface interface */ if_setrcvif(m, ifp); #if __FreeBSD_version >= 800000 /* specify what RSS queue was used for this flow */ m->m_pkthdr.flowid = fp->index; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); #endif if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); fp->eth_q_stats.rx_tpa_pkts++; /* pass the frame to the stack */ if_input(ifp, m); } /* we passed an mbuf up the stack or dropped the frame */ fp->eth_q_stats.mbuf_alloc_tpa--; bxe_tpa_stop_exit: fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; fp->rx_tpa_queue_used &= ~(1 << queue); } static uint8_t bxe_service_rxsgl( struct bxe_fastpath *fp, uint16_t len, uint16_t lenonbd, struct mbuf *m, struct eth_fast_path_rx_cqe *cqe_fp) { struct mbuf *m_frag; uint16_t frags, frag_len; uint16_t sge_idx = 0; uint16_t j; uint8_t i, rc = 0; uint32_t frag_size; /* adjust the mbuf */ m->m_len = lenonbd; frag_size = len - lenonbd; frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) { sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE)); m_frag->m_len = frag_len; /* allocate a new mbuf for the SGE */ rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); if (rc) { /* Leave all remaining SGEs in the ring! */ return (rc); } fp->eth_q_stats.mbuf_alloc_sge--; /* concatenate the fragment to the head mbuf */ m_cat(m, m_frag); frag_size -= frag_len; } bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); return rc; } static uint8_t bxe_rxeof(struct bxe_softc *sc, struct bxe_fastpath *fp) { if_t ifp = sc->ifp; uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; int rx_pkts = 0; int rc = 0; BXE_FP_RX_LOCK(fp); /* CQ "next element" is of the size of the regular element */ hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { hw_cq_cons++; } bd_cons = fp->rx_bd_cons; bd_prod = fp->rx_bd_prod; bd_prod_fw = bd_prod; sw_cq_cons = fp->rx_cq_cons; sw_cq_prod = fp->rx_cq_prod; /* * Memory barrier necessary as speculative reads of the rx * buffer can be ahead of the index in the status block */ rmb(); BLOGD(sc, DBG_RX, "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", fp->index, hw_cq_cons, sw_cq_cons); while (sw_cq_cons != hw_cq_cons) { struct bxe_sw_rx_bd *rx_buf = NULL; union eth_rx_cqe *cqe; struct eth_fast_path_rx_cqe *cqe_fp; uint8_t cqe_fp_flags; enum eth_rx_cqe_type cqe_fp_type; uint16_t len, lenonbd, pad; struct mbuf *m = NULL; comp_ring_cons = RCQ(sw_cq_cons); bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); cqe = &fp->rcq_chain[comp_ring_cons]; cqe_fp = &cqe->fast_path_cqe; cqe_fp_flags = cqe_fp->type_error_flags; cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; BLOGD(sc, DBG_RX, "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " "BD prod=%d cons=%d CQE type=0x%x err=0x%x " "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n", fp->index, hw_cq_cons, sw_cq_cons, bd_prod, bd_cons, CQE_TYPE(cqe_fp_flags), cqe_fp_flags, cqe_fp->status_flags, le32toh(cqe_fp->rss_hash_result), le16toh(cqe_fp->vlan_tag), le16toh(cqe_fp->pkt_len_or_gro_seg_len), le16toh(cqe_fp->len_on_bd)); /* is this a slowpath msg? */ if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { bxe_sp_event(sc, fp, cqe); goto next_cqe; } rx_buf = &fp->rx_mbuf_chain[bd_cons]; if (!CQE_TYPE_FAST(cqe_fp_type)) { struct bxe_sw_tpa_info *tpa_info; uint16_t frag_size, pages; uint8_t queue; if (CQE_TYPE_START(cqe_fp_type)) { bxe_tpa_start(sc, fp, cqe_fp->queue_index, bd_cons, bd_prod, cqe_fp); m = NULL; /* packet not ready yet */ goto next_rx; } KASSERT(CQE_TYPE_STOP(cqe_fp_type), ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); queue = cqe->end_agg_cqe.queue_index; tpa_info = &fp->rx_tpa_info[queue]; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", fp->index, queue); frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - tpa_info->len_on_bd); pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; bxe_tpa_stop(sc, fp, tpa_info, queue, pages, &cqe->end_agg_cqe, comp_ring_cons); bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); goto next_cqe; } /* non TPA */ /* is this an error packet? */ if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); fp->eth_q_stats.rx_soft_errors++; goto next_rx; } len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); lenonbd = le16toh(cqe_fp->len_on_bd); pad = cqe_fp->placement_offset; m = rx_buf->m; if (__predict_false(m == NULL)) { BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", bd_cons, fp->index); goto next_rx; } /* XXX double copy if packet length under a threshold */ /* * If all the buffer descriptors are filled with mbufs then fill in * the current consumer index with a new BD. Else if a maximum Rx * buffer limit is imposed then fill in the next producer index. */ rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, (sc->max_rx_bufs != RX_BD_USABLE) ? bd_prod : bd_cons); if (rc != 0) { /* we simply reuse the received mbuf and don't post it to the stack */ m = NULL; BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", fp->index, rc); fp->eth_q_stats.rx_soft_errors++; if (sc->max_rx_bufs != RX_BD_USABLE) { /* copy this consumer index to the producer index */ memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, sizeof(struct bxe_sw_rx_bd)); memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); } goto next_rx; } /* current mbuf was detached from the bd */ fp->eth_q_stats.mbuf_alloc_rx--; /* we allocated a replacement mbuf, fixup the current one */ m_adj(m, pad); m->m_pkthdr.len = m->m_len = len; if ((len > 60) && (len > lenonbd)) { fp->eth_q_stats.rx_bxe_service_rxsgl++; rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp); if (rc) break; fp->eth_q_stats.rx_jumbo_sge_pkts++; } else if (lenonbd < len) { fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++; } /* assign packet to this interface interface */ if_setrcvif(m, ifp); /* assume no hardware checksum has complated */ m->m_pkthdr.csum_flags = 0; /* validate checksum if offload enabled */ if (if_getcapenable(ifp) & IFCAP_RXCSUM) { /* check for a valid IP frame */ if (!(cqe->fast_path_cqe.status_flags & ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { fp->eth_q_stats.rx_hw_csum_errors++; } else { fp->eth_q_stats.rx_ofld_frames_csum_ip++; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } } /* check for a valid TCP/UDP frame */ if (!(cqe->fast_path_cqe.status_flags & ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { fp->eth_q_stats.rx_hw_csum_errors++; } else { fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; m->m_pkthdr.csum_data = 0xFFFF; m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); } } } /* if there is a VLAN tag then flag that info */ if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) { m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; m->m_flags |= M_VLANTAG; } #if __FreeBSD_version >= 800000 /* specify what RSS queue was used for this flow */ m->m_pkthdr.flowid = fp->index; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); #endif next_rx: bd_cons = RX_BD_NEXT(bd_cons); bd_prod = RX_BD_NEXT(bd_prod); bd_prod_fw = RX_BD_NEXT(bd_prod_fw); /* pass the frame to the stack */ if (__predict_true(m != NULL)) { if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); rx_pkts++; if_input(ifp, m); } next_cqe: sw_cq_prod = RCQ_NEXT(sw_cq_prod); sw_cq_cons = RCQ_NEXT(sw_cq_cons); /* limit spinning on the queue */ if (rc != 0) break; if (rx_pkts == sc->rx_budget) { fp->eth_q_stats.rx_budget_reached++; break; } } /* while work to do */ fp->rx_bd_cons = bd_cons; fp->rx_bd_prod = bd_prod_fw; fp->rx_cq_cons = sw_cq_cons; fp->rx_cq_prod = sw_cq_prod; /* Update producers */ bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); fp->eth_q_stats.rx_pkts += rx_pkts; fp->eth_q_stats.rx_calls++; BXE_FP_RX_UNLOCK(fp); return (sw_cq_cons != hw_cq_cons); } static uint16_t bxe_free_tx_pkt(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t idx) { struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; struct eth_tx_start_bd *tx_start_bd; uint16_t bd_idx = TX_BD(tx_buf->first_bd); uint16_t new_cons; int nbd; /* unmap the mbuf from non-paged memory */ bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); tx_start_bd = &fp->tx_chain[bd_idx].start_bd; nbd = le16toh(tx_start_bd->nbd) - 1; new_cons = (tx_buf->first_bd + nbd); /* free the mbuf */ if (__predict_true(tx_buf->m != NULL)) { m_freem(tx_buf->m); fp->eth_q_stats.mbuf_alloc_tx--; } else { fp->eth_q_stats.tx_chain_lost_mbuf++; } tx_buf->m = NULL; tx_buf->first_bd = 0; return (new_cons); } /* transmit timeout watchdog */ static int bxe_watchdog(struct bxe_softc *sc, struct bxe_fastpath *fp) { BXE_FP_TX_LOCK(fp); if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { BXE_FP_TX_UNLOCK(fp); return (0); } BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); BXE_FP_TX_UNLOCK(fp); atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); return (-1); } /* processes transmit completions */ static uint8_t bxe_txeof(struct bxe_softc *sc, struct bxe_fastpath *fp) { if_t ifp = sc->ifp; uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; uint16_t tx_bd_avail; BXE_FP_TX_LOCK_ASSERT(fp); bd_cons = fp->tx_bd_cons; hw_cons = le16toh(*fp->tx_cons_sb); sw_cons = fp->tx_pkt_cons; while (sw_cons != hw_cons) { pkt_cons = TX_BD(sw_cons); BLOGD(sc, DBG_TX, "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", fp->index, hw_cons, sw_cons, pkt_cons); bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); sw_cons++; } fp->tx_pkt_cons = sw_cons; fp->tx_bd_cons = bd_cons; BLOGD(sc, DBG_TX, "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); mb(); tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); } else { if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } if (fp->tx_pkt_prod != fp->tx_pkt_cons) { /* reset the watchdog timer if there are pending transmits */ fp->watchdog_timer = BXE_TX_TIMEOUT; return (TRUE); } else { /* clear watchdog when there are no pending transmits */ fp->watchdog_timer = 0; return (FALSE); } } static void bxe_drain_tx_queues(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i, count; /* wait until all TX fastpath tasks have completed */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; count = 1000; while (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); if (count == 0) { BLOGE(sc, "Timeout waiting for fp[%d] " "transmits to complete!\n", i); bxe_panic(sc, ("tx drain failure\n")); return; } count--; DELAY(1000); rmb(); } } return; } static int bxe_del_all_macs(struct bxe_softc *sc, struct ecore_vlan_mac_obj *mac_obj, int mac_type, uint8_t wait_for_comp) { unsigned long ramrod_flags = 0, vlan_mac_flags = 0; int rc; /* wait for completion of requested */ if (wait_for_comp) { bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); } /* Set the mac type of addresses we want to clear */ bxe_set_bit(mac_type, &vlan_mac_flags); rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc < 0) { BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n", rc, mac_type, wait_for_comp); } return (rc); } static int bxe_fill_accept_flags(struct bxe_softc *sc, uint32_t rx_mode, unsigned long *rx_accept_flags, unsigned long *tx_accept_flags) { /* Clear the flags first */ *rx_accept_flags = 0; *tx_accept_flags = 0; switch (rx_mode) { case BXE_RX_MODE_NONE: /* * 'drop all' supersedes any accept flags that may have been * passed to the function. */ break; case BXE_RX_MODE_NORMAL: bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); break; case BXE_RX_MODE_ALLMULTI: bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); break; case BXE_RX_MODE_PROMISC: /* * According to deffinition of SI mode, iface in promisc mode * should receive matched and unmatched (in resolution of port) * unicast packets. */ bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); if (IS_MF_SI(sc)) { bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); } else { bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); } break; default: BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode); return (-1); } /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ if (rx_mode != BXE_RX_MODE_NONE) { bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); } return (0); } static int bxe_set_q_rx_mode(struct bxe_softc *sc, uint8_t cl_id, unsigned long rx_mode_flags, unsigned long rx_accept_flags, unsigned long tx_accept_flags, unsigned long ramrod_flags) { struct ecore_rx_mode_ramrod_params ramrod_param; int rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* Prepare ramrod parameters */ ramrod_param.cid = 0; ramrod_param.cl_id = cl_id; ramrod_param.rx_mode_obj = &sc->rx_mode_obj; ramrod_param.func_id = SC_FUNC(sc); ramrod_param.pstate = &sc->sp_state; ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); ramrod_param.ramrod_flags = ramrod_flags; ramrod_param.rx_mode_flags = rx_mode_flags; ramrod_param.rx_accept_flags = rx_accept_flags; ramrod_param.tx_accept_flags = tx_accept_flags; rc = ecore_config_rx_mode(sc, &ramrod_param); if (rc < 0) { BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x " "rx_accept_flags 0x%x tx_accept_flags 0x%x " "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id, (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags, (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc); return (rc); } return (0); } static int bxe_set_storm_rx_mode(struct bxe_softc *sc) { unsigned long rx_mode_flags = 0, ramrod_flags = 0; unsigned long rx_accept_flags = 0, tx_accept_flags = 0; int rc; rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, &tx_accept_flags); if (rc) { return (rc); } bxe_set_bit(RAMROD_RX, &ramrod_flags); bxe_set_bit(RAMROD_TX, &ramrod_flags); /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, rx_accept_flags, tx_accept_flags, ramrod_flags)); } /* returns the "mcp load_code" according to global load_count array */ static int bxe_nic_load_no_mcp(struct bxe_softc *sc) { int path = SC_PATH(sc); int port = SC_PORT(sc); BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); load_count[path][0]++; load_count[path][1 + port]++; BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); if (load_count[path][0] == 1) { return (FW_MSG_CODE_DRV_LOAD_COMMON); } else if (load_count[path][1 + port] == 1) { return (FW_MSG_CODE_DRV_LOAD_PORT); } else { return (FW_MSG_CODE_DRV_LOAD_FUNCTION); } } /* returns the "mcp load_code" according to global load_count array */ static int bxe_nic_unload_no_mcp(struct bxe_softc *sc) { int port = SC_PORT(sc); int path = SC_PATH(sc); BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); load_count[path][0]--; load_count[path][1 + port]--; BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); if (load_count[path][0] == 0) { return (FW_MSG_CODE_DRV_UNLOAD_COMMON); } else if (load_count[path][1 + port] == 0) { return (FW_MSG_CODE_DRV_UNLOAD_PORT); } else { return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); } } /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ static uint32_t bxe_send_unload_req(struct bxe_softc *sc, int unload_mode) { uint32_t reset_code = 0; /* Select the UNLOAD request mode */ if (unload_mode == UNLOAD_NORMAL) { reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; } else { reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; } /* Send the request to the MCP */ if (!BXE_NOMCP(sc)) { reset_code = bxe_fw_command(sc, reset_code, 0); } else { reset_code = bxe_nic_unload_no_mcp(sc); } return (reset_code); } /* send UNLOAD_DONE command to the MCP */ static void bxe_send_unload_done(struct bxe_softc *sc, uint8_t keep_link) { uint32_t reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; /* Report UNLOAD_DONE to MCP */ if (!BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); } } static int bxe_func_wait_started(struct bxe_softc *sc) { int tout = 50; if (!sc->port.pmf) { return (0); } /* * (assumption: No Attention from MCP at this stage) * PMF probably in the middle of TX disable/enable transaction * 1. Sync IRS for default SB * 2. Sync SP queue - this guarantees us that attention handling started * 3. Wait, that TX disable/enable transaction completes * * 1+2 guarantee that if DCBX attention was scheduled it already changed * pending bit of transaction from STARTED-->TX_STOPPED, if we already * received completion for the transaction the state is TX_STOPPED. * State will return to STARTED after completion of TX_STOPPED-->STARTED * transaction. */ /* XXX make sure default SB ISR is done */ /* need a way to synchronize an irq (intr_mtx?) */ /* XXX flush any work queues */ while (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED && tout--) { DELAY(20000); } if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { /* * Failed to complete the transaction in a "good way" * Force both transactions with CLR bit. */ struct ecore_func_state_params func_params = { NULL }; BLOGE(sc, "Unexpected function state! " "Forcing STARTED-->TX_STOPPED-->STARTED\n"); func_params.f_obj = &sc->func_obj; bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); /* STARTED-->TX_STOPPED */ func_params.cmd = ECORE_F_CMD_TX_STOP; ecore_func_state_change(sc, &func_params); /* TX_STOPPED-->STARTED */ func_params.cmd = ECORE_F_CMD_TX_START; return (ecore_func_state_change(sc, &func_params)); } return (0); } static int bxe_stop_queue(struct bxe_softc *sc, int index) { struct bxe_fastpath *fp = &sc->fp[index]; struct ecore_queue_state_params q_params = { NULL }; int rc; BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); q_params.q_obj = &sc->sp_objs[fp->index].q_obj; /* We want to wait for completion in this context */ bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); /* Stop the primary connection: */ /* ...halt the connection */ q_params.cmd = ECORE_Q_CMD_HALT; rc = ecore_queue_state_change(sc, &q_params); if (rc) { return (rc); } /* ...terminate the connection */ q_params.cmd = ECORE_Q_CMD_TERMINATE; memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; rc = ecore_queue_state_change(sc, &q_params); if (rc) { return (rc); } /* ...delete cfc entry */ q_params.cmd = ECORE_Q_CMD_CFC_DEL; memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; return (ecore_queue_state_change(sc, &q_params)); } /* wait for the outstanding SP commands */ static inline uint8_t bxe_wait_sp_comp(struct bxe_softc *sc, unsigned long mask) { unsigned long tmp; int tout = 5000; /* wait for 5 secs tops */ while (tout--) { mb(); if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { return (TRUE); } DELAY(1000); } mb(); tmp = atomic_load_acq_long(&sc->sp_state); if (tmp & mask) { BLOGE(sc, "Filtering completion timed out: " "sp_state 0x%lx, mask 0x%lx\n", tmp, mask); return (FALSE); } return (FALSE); } static int bxe_func_stop(struct bxe_softc *sc) { struct ecore_func_state_params func_params = { NULL }; int rc; /* prepare parameters for function state transitions */ bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_STOP; /* * Try to stop the function the 'good way'. If it fails (in case * of a parity error during bxe_chip_cleanup()) and we are * not in a debug mode, perform a state transaction in order to * enable further HW_RESET transaction. */ rc = ecore_func_state_change(sc, &func_params); if (rc) { BLOGE(sc, "FUNC_STOP ramrod failed. " "Running a dry transaction (%d)\n", rc); bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); return (ecore_func_state_change(sc, &func_params)); } return (0); } static int bxe_reset_hw(struct bxe_softc *sc, uint32_t load_code) { struct ecore_func_state_params func_params = { NULL }; /* Prepare parameters for function state transitions */ bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_HW_RESET; func_params.params.hw_init.load_phase = load_code; return (ecore_func_state_change(sc, &func_params)); } static void bxe_int_disable_sync(struct bxe_softc *sc, int disable_hw) { if (disable_hw) { /* prevent the HW from sending interrupts */ bxe_int_disable(sc); } /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ /* make sure all ISRs are done */ /* XXX make sure sp_task is not running */ /* cancel and flush work queues */ } static void bxe_chip_cleanup(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link) { int port = SC_PORT(sc); struct ecore_mcast_ramrod_params rparam = { NULL }; uint32_t reset_code; int i, rc = 0; bxe_drain_tx_queues(sc); /* give HW time to discard old tx messages */ DELAY(1000); /* Clean all ETH MACs */ rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); if (rc < 0) { BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); } /* Clean up UC list */ rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); if (rc < 0) { BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); } /* Disable LLH */ if (!CHIP_IS_E1(sc)) { REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); } /* Set "drop all" to stop Rx */ /* * We need to take the BXE_MCAST_LOCK() here in order to prevent * a race between the completion code and this code. */ BXE_MCAST_LOCK(sc); if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); } else { bxe_set_storm_rx_mode(sc); } /* Clean up multicast configuration */ rparam.mcast_obj = &sc->mcast_obj; rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); } BXE_MCAST_UNLOCK(sc); // XXX bxe_iov_chip_cleanup(sc); /* * Send the UNLOAD_REQUEST to the MCP. This will return if * this function should perform FUNCTION, PORT, or COMMON HW * reset. */ reset_code = bxe_send_unload_req(sc, unload_mode); /* * (assumption: No Attention from MCP at this stage) * PMF probably in the middle of TX disable/enable transaction */ rc = bxe_func_wait_started(sc); if (rc) { BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc); } /* * Close multi and leading connections * Completions for ramrods are collected in a synchronous way */ for (i = 0; i < sc->num_queues; i++) { if (bxe_stop_queue(sc, i)) { goto unload_error; } } /* * If SP settings didn't get completed so far - something * very wrong has happen. */ if (!bxe_wait_sp_comp(sc, ~0x0UL)) { BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc); } unload_error: rc = bxe_func_stop(sc); if (rc) { BLOGE(sc, "Function stop failed!(%d)\n", rc); } /* disable HW interrupts */ bxe_int_disable_sync(sc, TRUE); /* detach interrupts */ bxe_interrupt_detach(sc); /* Reset the chip */ rc = bxe_reset_hw(sc, reset_code); if (rc) { BLOGE(sc, "Hardware reset failed(%d)\n", rc); } /* Report UNLOAD_DONE to MCP */ bxe_send_unload_done(sc, keep_link); } static void bxe_disable_close_the_gate(struct bxe_softc *sc) { uint32_t val; int port = SC_PORT(sc); BLOGD(sc, DBG_LOAD, "Disabling 'close the gates'\n"); if (CHIP_IS_E1(sc)) { uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; val = REG_RD(sc, addr); val &= ~(0x300); REG_WR(sc, addr, val); } else { val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); } } /* * Cleans the object that have internal lists without sending * ramrods. Should be run when interrutps are disabled. */ static void bxe_squeeze_objects(struct bxe_softc *sc) { unsigned long ramrod_flags = 0, vlan_mac_flags = 0; struct ecore_mcast_ramrod_params rparam = { NULL }; struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; int rc; /* Cleanup MACs' object first... */ /* Wait for completion of requested */ bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Perform a dry cleanup */ bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); /* Clean ETH primary MAC */ bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) { BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); } /* Cleanup UC list */ vlan_mac_flags = 0; bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) { BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); } /* Now clean mcast object... */ rparam.mcast_obj = &sc->mcast_obj; bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); /* Add a DEL command... */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); } /* now wait until all pending commands are cleared */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); while (rc != 0) { if (rc < 0) { BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); return; } rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); } } /* stop the controller */ static __noinline int bxe_nic_unload(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link) { uint8_t global = FALSE; uint32_t val; BXE_CORE_LOCK_ASSERT(sc); BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); /* mark driver as unloaded in shmem2 */ if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); } if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { /* * We can get here if the driver has been unloaded * during parity error recovery and is either waiting for a * leader to complete or for other functions to unload and * then ifconfig down has been issued. In this case we want to * unload and let other functions to complete a recovery * process. */ sc->recovery_state = BXE_RECOVERY_DONE; sc->is_leader = 0; bxe_release_leader_lock(sc); mb(); BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x" " state = 0x%x\n", sc->recovery_state, sc->state); return (-1); } /* * Nothing to do during unload if previous bxe_nic_load() * did not completed succesfully - all resourses are released. */ if ((sc->state == BXE_STATE_CLOSED) || (sc->state == BXE_STATE_ERROR)) { return (0); } sc->state = BXE_STATE_CLOSING_WAITING_HALT; mb(); /* stop tx */ bxe_tx_disable(sc); sc->rx_mode = BXE_RX_MODE_NONE; /* XXX set rx mode ??? */ if (IS_PF(sc) && !sc->grcdump_done) { /* set ALWAYS_ALIVE bit in shmem */ sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bxe_drv_pulse(sc); bxe_stats_handle(sc, STATS_EVENT_STOP); bxe_save_statistics(sc); } /* wait till consumers catch up with producers in all queues */ bxe_drain_tx_queues(sc); /* if VF indicate to PF this function is going down (PF will delete sp * elements and clear initializations */ if (IS_VF(sc)) { ; /* bxe_vfpf_close_vf(sc); */ } else if (unload_mode != UNLOAD_RECOVERY) { /* if this is a normal/close unload need to clean up chip */ if (!sc->grcdump_done) bxe_chip_cleanup(sc, unload_mode, keep_link); } else { /* Send the UNLOAD_REQUEST to the MCP */ bxe_send_unload_req(sc, unload_mode); /* * Prevent transactions to host from the functions on the * engine that doesn't reset global blocks in case of global * attention once gloabl blocks are reset and gates are opened * (the engine which leader will perform the recovery * last). */ if (!CHIP_IS_E1x(sc)) { bxe_pf_disable(sc); } /* disable HW interrupts */ bxe_int_disable_sync(sc, TRUE); /* detach interrupts */ bxe_interrupt_detach(sc); /* Report UNLOAD_DONE to MCP */ bxe_send_unload_done(sc, FALSE); } /* * At this stage no more interrupts will arrive so we may safely clean * the queue'able objects here in case they failed to get cleaned so far. */ if (IS_PF(sc)) { bxe_squeeze_objects(sc); } /* There should be no more pending SP commands at this stage */ sc->sp_state = 0; sc->port.pmf = 0; bxe_free_fp_buffers(sc); if (IS_PF(sc)) { bxe_free_mem(sc); } bxe_free_fw_stats_mem(sc); sc->state = BXE_STATE_CLOSED; /* * Check if there are pending parity attentions. If there are - set * RECOVERY_IN_PROGRESS. */ if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { bxe_set_reset_in_progress(sc); /* Set RESET_IS_GLOBAL if needed */ if (global) { bxe_set_reset_global(sc); } } /* * The last driver must disable a "close the gate" if there is no * parity attention or "process kill" pending. */ if (IS_PF(sc) && !bxe_clear_pf_load(sc) && bxe_reset_is_done(sc, SC_PATH(sc))) { bxe_disable_close_the_gate(sc); } BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); return (0); } /* * Called by the OS to set various media options (i.e. link, speed, etc.) when * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". */ static int bxe_ifmedia_update(struct ifnet *ifp) { struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp); struct ifmedia *ifm; ifm = &sc->ifmedia; /* We only support Ethernet media type. */ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { return (EINVAL); } switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: break; case IFM_10G_CX4: case IFM_10G_SR: case IFM_10G_T: case IFM_10G_TWINAX: default: /* We don't support changing the media type. */ BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", IFM_SUBTYPE(ifm->ifm_media)); return (EINVAL); } return (0); } /* * Called by the OS to get the current media status (i.e. link, speed, etc.). */ static void bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct bxe_softc *sc = if_getsoftc(ifp); /* Report link down if the driver isn't running. */ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { ifmr->ifm_active |= IFM_NONE; return; } /* Setup the default interface info. */ ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (sc->link_vars.link_up) { ifmr->ifm_status |= IFM_ACTIVE; } else { ifmr->ifm_active |= IFM_NONE; return; } ifmr->ifm_active |= sc->media; if (sc->link_vars.duplex == DUPLEX_FULL) { ifmr->ifm_active |= IFM_FDX; } else { ifmr->ifm_active |= IFM_HDX; } } static int bxe_ioctl_nvram(struct bxe_softc *sc, uint32_t priv_op, struct ifreq *ifr) { struct bxe_nvram_data nvdata_base; struct bxe_nvram_data *nvdata; int len; int error = 0; copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base)); len = (sizeof(struct bxe_nvram_data) + nvdata_base.len - sizeof(uint32_t)); if (len > sizeof(struct bxe_nvram_data)) { if ((nvdata = (struct bxe_nvram_data *) malloc(len, M_DEVBUF, (M_NOWAIT | M_ZERO))) == NULL) { BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed priv_op 0x%x " " len = 0x%x\n", priv_op, len); return (1); } memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data)); } else { nvdata = &nvdata_base; } if (priv_op == BXE_IOC_RD_NVRAM) { BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n", nvdata->offset, nvdata->len); error = bxe_nvram_read(sc, nvdata->offset, (uint8_t *)nvdata->value, nvdata->len); copyout(nvdata, ifr->ifr_data, len); } else { /* BXE_IOC_WR_NVRAM */ BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n", nvdata->offset, nvdata->len); copyin(ifr->ifr_data, nvdata, len); error = bxe_nvram_write(sc, nvdata->offset, (uint8_t *)nvdata->value, nvdata->len); } if (len > sizeof(struct bxe_nvram_data)) { free(nvdata, M_DEVBUF); } return (error); } static int bxe_ioctl_stats_show(struct bxe_softc *sc, uint32_t priv_op, struct ifreq *ifr) { const size_t str_size = (BXE_NUM_ETH_STATS * STAT_NAME_LEN); const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t)); caddr_t p_tmp; uint32_t *offset; int i; switch (priv_op) { case BXE_IOC_STATS_SHOW_NUM: memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data)); ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num = BXE_NUM_ETH_STATS; ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len = STAT_NAME_LEN; return (0); case BXE_IOC_STATS_SHOW_STR: memset(ifr->ifr_data, 0, str_size); p_tmp = ifr->ifr_data; for (i = 0; i < BXE_NUM_ETH_STATS; i++) { strcpy(p_tmp, bxe_eth_stats_arr[i].string); p_tmp += STAT_NAME_LEN; } return (0); case BXE_IOC_STATS_SHOW_CNT: memset(ifr->ifr_data, 0, stats_size); p_tmp = ifr->ifr_data; for (i = 0; i < BXE_NUM_ETH_STATS; i++) { offset = ((uint32_t *)&sc->eth_stats + bxe_eth_stats_arr[i].offset); switch (bxe_eth_stats_arr[i].size) { case 4: *((uint64_t *)p_tmp) = (uint64_t)*offset; break; case 8: *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1)); break; default: *((uint64_t *)p_tmp) = 0; } p_tmp += sizeof(uint64_t); } return (0); default: return (-1); } } static void bxe_handle_chip_tq(void *context, int pending) { struct bxe_softc *sc = (struct bxe_softc *)context; long work = atomic_load_acq_long(&sc->chip_tq_flags); switch (work) { case CHIP_TQ_REINIT: if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { /* restart the interface */ BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); bxe_periodic_stop(sc); BXE_CORE_LOCK(sc); bxe_stop_locked(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } break; default: break; } } /* * Handles any IOCTL calls from the operating system. * * Returns: * 0 = Success, >0 Failure */ static int bxe_ioctl(if_t ifp, u_long command, caddr_t data) { struct bxe_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; struct bxe_nvram_data *nvdata; uint32_t priv_op; int mask = 0; int reinit = 0; int error = 0; int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); switch (command) { case SIOCSIFMTU: BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", ifr->ifr_mtu); if (sc->mtu == ifr->ifr_mtu) { /* nothing to change */ break; } if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", ifr->ifr_mtu, mtu_min, mtu_max); error = EINVAL; break; } atomic_store_rel_int((volatile unsigned int *)&sc->mtu, (unsigned long)ifr->ifr_mtu); /* atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp), (unsigned long)ifr->ifr_mtu); XXX - Not sure why it needs to be atomic */ if_setmtu(ifp, ifr->ifr_mtu); reinit = 1; break; case SIOCSIFFLAGS: /* toggle the interface state up or down */ BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); BXE_CORE_LOCK(sc); /* check if the interface is up */ if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* set the receive mode flags */ bxe_set_rx_mode(sc); } else { bxe_init_locked(sc); } } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { bxe_periodic_stop(sc); bxe_stop_locked(sc); } } BXE_CORE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* add/delete multicast addresses */ BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); /* check if the interface is up */ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* set the receive mode flags */ BXE_CORE_LOCK(sc); bxe_set_rx_mode(sc); BXE_CORE_UNLOCK(sc); } break; case SIOCSIFCAP: /* find out which capabilities have changed */ mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", mask); /* toggle the LRO capabilites enable flag */ if (mask & IFCAP_LRO) { if_togglecapenable(ifp, IFCAP_LRO); BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF"); reinit = 1; } /* toggle the TXCSUM checksum capabilites enable flag */ if (mask & IFCAP_TXCSUM) { if_togglecapenable(ifp, IFCAP_TXCSUM); BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF"); if (if_getcapenable(ifp) & IFCAP_TXCSUM) { if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0); } else { if_clearhwassist(ifp); /* XXX */ } } /* toggle the RXCSUM checksum capabilities enable flag */ if (mask & IFCAP_RXCSUM) { if_togglecapenable(ifp, IFCAP_RXCSUM); BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF"); if (if_getcapenable(ifp) & IFCAP_RXCSUM) { if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0); } else { if_clearhwassist(ifp); /* XXX */ } } /* toggle TSO4 capabilities enabled flag */ if (mask & IFCAP_TSO4) { if_togglecapenable(ifp, IFCAP_TSO4); BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF"); } /* toggle TSO6 capabilities enabled flag */ if (mask & IFCAP_TSO6) { if_togglecapenable(ifp, IFCAP_TSO6); BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF"); } /* toggle VLAN_HWTSO capabilities enabled flag */ if (mask & IFCAP_VLAN_HWTSO) { if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); } /* toggle VLAN_HWCSUM capabilities enabled flag */ if (mask & IFCAP_VLAN_HWCSUM) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); error = EINVAL; } /* toggle VLAN_MTU capabilities enable flag */ if (mask & IFCAP_VLAN_MTU) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); error = EINVAL; } /* toggle VLAN_HWTAGGING capabilities enabled flag */ if (mask & IFCAP_VLAN_HWTAGGING) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); error = EINVAL; } /* toggle VLAN_HWFILTER capabilities enabled flag */ if (mask & IFCAP_VLAN_HWFILTER) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); error = EINVAL; } /* XXX not yet... * IFCAP_WOL_MAGIC */ break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: /* set/get interface media */ BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", (command & 0xff)); error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; case SIOCGPRIVATE_0: copyin(ifr->ifr_data, &priv_op, sizeof(priv_op)); switch (priv_op) { case BXE_IOC_RD_NVRAM: case BXE_IOC_WR_NVRAM: nvdata = (struct bxe_nvram_data *)ifr->ifr_data; BLOGD(sc, DBG_IOCTL, "Received Private NVRAM ioctl addr=0x%x size=%u\n", nvdata->offset, nvdata->len); error = bxe_ioctl_nvram(sc, priv_op, ifr); break; case BXE_IOC_STATS_SHOW_NUM: case BXE_IOC_STATS_SHOW_STR: case BXE_IOC_STATS_SHOW_CNT: BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n", priv_op); error = bxe_ioctl_stats_show(sc, priv_op, ifr); break; default: BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op); error = EINVAL; break; } break; default: BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", (command & 0xff)); error = ether_ioctl(ifp, command, data); break; } if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { BLOGD(sc, DBG_LOAD | DBG_IOCTL, "Re-initializing hardware from IOCTL change\n"); bxe_periodic_stop(sc); BXE_CORE_LOCK(sc); bxe_stop_locked(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } return (error); } static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m, uint8_t contents) { char * type; int i = 0; if (!(sc->debug & DBG_MBUF)) { return; } if (m == NULL) { BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); return; } while (m) { BLOGD(sc, DBG_MBUF, "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); if (m->m_flags & M_PKTHDR) { BLOGD(sc, DBG_MBUF, "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, (int)m->m_pkthdr.csum_flags, CSUM_BITS); } if (m->m_flags & M_EXT) { switch (m->m_ext.ext_type) { case EXT_CLUSTER: type = "EXT_CLUSTER"; break; case EXT_SFBUF: type = "EXT_SFBUF"; break; case EXT_JUMBOP: type = "EXT_JUMBOP"; break; case EXT_JUMBO9: type = "EXT_JUMBO9"; break; case EXT_JUMBO16: type = "EXT_JUMBO16"; break; case EXT_PACKET: type = "EXT_PACKET"; break; case EXT_MBUF: type = "EXT_MBUF"; break; case EXT_NET_DRV: type = "EXT_NET_DRV"; break; case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; case EXT_EXTREF: type = "EXT_EXTREF"; break; default: type = "UNKNOWN"; break; } BLOGD(sc, DBG_MBUF, "%02d: - m_ext: %p ext_size=%d type=%s\n", i, m->m_ext.ext_buf, m->m_ext.ext_size, type); } if (contents) { bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); } m = m->m_next; i++; } } /* * Checks to ensure the 13 bd sliding window is >= MSS for TSO. * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD * The headers comes in a seperate bd in FreeBSD so 13-3=10. * Returns: 0 if OK to send, 1 if packet needs further defragmentation */ static int bxe_chktso_window(struct bxe_softc *sc, int nsegs, bus_dma_segment_t *segs, struct mbuf *m) { uint32_t num_wnds, wnd_size, wnd_sum; int32_t frag_idx, wnd_idx; unsigned short lso_mss; int defrag; defrag = 0; wnd_sum = 0; wnd_size = 10; num_wnds = nsegs - wnd_size; lso_mss = htole16(m->m_pkthdr.tso_segsz); /* * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the * first window sum of data while skipping the first assuming it is the * header in FreeBSD. */ for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { wnd_sum += htole16(segs[frag_idx].ds_len); } /* check the first 10 bd window size */ if (wnd_sum < lso_mss) { return (1); } /* run through the windows */ for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { /* subtract the first mbuf->m_len of the last wndw(-header) */ wnd_sum -= htole16(segs[wnd_idx+1].ds_len); /* add the next mbuf len to the len of our new window */ wnd_sum += htole16(segs[frag_idx].ds_len); if (wnd_sum < lso_mss) { return (1); } } return (0); } static uint8_t bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, struct mbuf *m, uint32_t *parsing_data) { struct ether_vlan_header *eh = NULL; struct ip *ip4 = NULL; struct ip6_hdr *ip6 = NULL; caddr_t ip = NULL; struct tcphdr *th = NULL; int e_hlen, ip_hlen, l4_off; uint16_t proto; if (m->m_pkthdr.csum_flags == CSUM_IP) { /* no L4 checksum offload needed */ return (0); } /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); proto = ntohs(eh->evl_proto); } else { e_hlen = ETHER_HDR_LEN; proto = ntohs(eh->evl_encap_proto); } switch (proto) { case ETHERTYPE_IP: /* get the IP header, if mbuf len < 20 then header in next mbuf */ ip4 = (m->m_len < sizeof(struct ip)) ? (struct ip *)m->m_next->m_data : (struct ip *)(m->m_data + e_hlen); /* ip_hl is number of 32-bit words */ ip_hlen = (ip4->ip_hl << 2); ip = (caddr_t)ip4; break; case ETHERTYPE_IPV6: /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? (struct ip6_hdr *)m->m_next->m_data : (struct ip6_hdr *)(m->m_data + e_hlen); /* XXX cannot support offload with IPv6 extensions */ ip_hlen = sizeof(struct ip6_hdr); ip = (caddr_t)ip6; break; default: /* We can't offload in this case... */ /* XXX error stat ??? */ return (0); } /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ l4_off = (e_hlen + ip_hlen); *parsing_data |= (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_tcp++; th = (struct tcphdr *)(ip + ip_hlen); /* th_off is number of 32-bit words */ *parsing_data |= ((th->th_off << ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); return (l4_off + (th->th_off << 2)); /* entire header length */ } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_udp++; return (l4_off + sizeof(struct udphdr)); /* entire header length */ } else { /* XXX error stat ??? */ return (0); } } static uint8_t bxe_set_pbd_csum(struct bxe_fastpath *fp, struct mbuf *m, struct eth_tx_parse_bd_e1x *pbd) { struct ether_vlan_header *eh = NULL; struct ip *ip4 = NULL; struct ip6_hdr *ip6 = NULL; caddr_t ip = NULL; struct tcphdr *th = NULL; struct udphdr *uh = NULL; int e_hlen, ip_hlen; uint16_t proto; uint8_t hlen; uint16_t tmp_csum; uint32_t *tmp_uh; /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); proto = ntohs(eh->evl_proto); } else { e_hlen = ETHER_HDR_LEN; proto = ntohs(eh->evl_encap_proto); } switch (proto) { case ETHERTYPE_IP: /* get the IP header, if mbuf len < 20 then header in next mbuf */ ip4 = (m->m_len < sizeof(struct ip)) ? (struct ip *)m->m_next->m_data : (struct ip *)(m->m_data + e_hlen); /* ip_hl is number of 32-bit words */ ip_hlen = (ip4->ip_hl << 1); ip = (caddr_t)ip4; break; case ETHERTYPE_IPV6: /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? (struct ip6_hdr *)m->m_next->m_data : (struct ip6_hdr *)(m->m_data + e_hlen); /* XXX cannot support offload with IPv6 extensions */ ip_hlen = (sizeof(struct ip6_hdr) >> 1); ip = (caddr_t)ip6; break; default: /* We can't offload in this case... */ /* XXX error stat ??? */ return (0); } hlen = (e_hlen >> 1); /* note that rest of global_data is indirectly zeroed here */ if (m->m_flags & M_VLANTAG) { pbd->global_data = htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); } else { pbd->global_data = htole16(hlen); } pbd->ip_hlen_w = ip_hlen; hlen += pbd->ip_hlen_w; /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { th = (struct tcphdr *)(ip + (ip_hlen << 1)); /* th_off is number of 32-bit words */ hlen += (uint16_t)(th->th_off << 1); } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { uh = (struct udphdr *)(ip + (ip_hlen << 1)); hlen += (sizeof(struct udphdr) / 2); } else { /* valid case as only CSUM_IP was set */ return (0); } pbd->total_hlen_w = htole16(hlen); if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_tcp++; pbd->tcp_pseudo_csum = ntohs(th->th_sum); } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_udp++; /* * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP * checksums and does not know anything about the UDP header and where * the checksum field is located. It only knows about TCP. Therefore * we "lie" to the hardware for outgoing UDP packets w/ checksum * offload. Since the checksum field offset for TCP is 16 bytes and * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 * bytes less than the start of the UDP header. This allows the * hardware to write the checksum in the correct spot. But the * hardware will compute a checksum which includes the last 10 bytes * of the IP header. To correct this we tweak the stack computed * pseudo checksum by folding in the calculation of the inverse * checksum for those final 10 bytes of the IP header. This allows * the correct checksum to be computed by the hardware. */ /* set pointer 10 bytes before UDP header */ tmp_uh = (uint32_t *)((uint8_t *)uh - 10); /* calculate a pseudo header checksum over the first 10 bytes */ tmp_csum = in_pseudo(*tmp_uh, *(tmp_uh + 1), *(uint16_t *)(tmp_uh + 2)); pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); } return (hlen * 2); /* entire header length, number of bytes */ } static void bxe_set_pbd_lso_e2(struct mbuf *m, uint32_t *parsing_data) { *parsing_data |= ((m->m_pkthdr.tso_segsz << ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & ETH_TX_PARSE_BD_E2_LSO_MSS); /* XXX test for IPv6 with extension header... */ } static void bxe_set_pbd_lso(struct mbuf *m, struct eth_tx_parse_bd_e1x *pbd) { struct ether_vlan_header *eh = NULL; struct ip *ip = NULL; struct tcphdr *th = NULL; int e_hlen; /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; /* get the IP and TCP header, with LSO entire header in first mbuf */ /* XXX assuming IPv4 */ ip = (struct ip *)(m->m_data + e_hlen); th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); pbd->tcp_send_seq = ntohl(th->th_seq); pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); #if 1 /* XXX IPv4 */ pbd->ip_id = ntohs(ip->ip_id); pbd->tcp_pseudo_csum = ntohs(in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP))); #else /* XXX IPv6 */ pbd->tcp_pseudo_csum = ntohs(in_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htons(IPPROTO_TCP))); #endif pbd->global_data |= htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); } /* * Encapsulte an mbuf cluster into the tx bd chain and makes the memory * visible to the controller. * * If an mbuf is submitted to this routine and cannot be given to the * controller (e.g. it has too many fragments) then the function may free * the mbuf and return to the caller. * * Returns: * 0 = Success, !0 = Failure * Note the side effect that an mbuf may be freed if it causes a problem. */ static int bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) { bus_dma_segment_t segs[32]; struct mbuf *m0; struct bxe_sw_tx_bd *tx_buf; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ struct eth_tx_bd *tx_data_bd; struct eth_tx_bd *tx_total_pkt_size_bd; struct eth_tx_start_bd *tx_start_bd; uint16_t bd_prod, pkt_prod, total_pkt_size; uint8_t mac_type; int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; struct bxe_softc *sc; uint16_t tx_bd_avail; struct ether_vlan_header *eh; uint32_t pbd_e2_parsing_data = 0; uint8_t hlen = 0; int tmp_bd; int i; sc = fp->sc; M_ASSERTPKTHDR(*m_head); m0 = *m_head; rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; tx_start_bd = NULL; tx_data_bd = NULL; tx_total_pkt_size_bd = NULL; /* get the H/W pointer for packets and BDs */ pkt_prod = fp->tx_pkt_prod; bd_prod = fp->tx_bd_prod; mac_type = UNICAST_ADDRESS; /* map the mbuf into the next open DMAable memory */ tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); /* mapping errors */ if(__predict_false(error != 0)) { fp->eth_q_stats.tx_dma_mapping_failure++; if (error == ENOMEM) { /* resource issue, try again later */ rc = ENOMEM; } else if (error == EFBIG) { /* possibly recoverable with defragmentation */ fp->eth_q_stats.mbuf_defrag_attempts++; m0 = m_defrag(*m_head, M_NOWAIT); if (m0 == NULL) { fp->eth_q_stats.mbuf_defrag_failures++; rc = ENOBUFS; } else { /* defrag successful, try mapping again */ *m_head = m0; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { fp->eth_q_stats.tx_dma_mapping_failure++; rc = error; } } } else { /* unknown, unrecoverable mapping error */ BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); bxe_dump_mbuf(sc, m0, FALSE); rc = error; } goto bxe_tx_encap_continue; } tx_bd_avail = bxe_tx_avail(sc, fp); /* make sure there is enough room in the send queue */ if (__predict_false(tx_bd_avail < (nsegs + 2))) { /* Recoverable, try again later. */ fp->eth_q_stats.tx_hw_queue_full++; bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); rc = ENOMEM; goto bxe_tx_encap_continue; } /* capture the current H/W TX chain high watermark */ if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < (TX_BD_USABLE - tx_bd_avail))) { fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); } /* make sure it fits in the packet window */ if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { /* * The mbuf may be to big for the controller to handle. If the frame * is a TSO frame we'll need to do an additional check. */ if (m0->m_pkthdr.csum_flags & CSUM_TSO) { if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { goto bxe_tx_encap_continue; /* OK to send */ } else { fp->eth_q_stats.tx_window_violation_tso++; } } else { fp->eth_q_stats.tx_window_violation_std++; } /* lets try to defragment this mbuf and remap it */ fp->eth_q_stats.mbuf_defrag_attempts++; bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); m0 = m_defrag(*m_head, M_NOWAIT); if (m0 == NULL) { fp->eth_q_stats.mbuf_defrag_failures++; /* Ugh, just drop the frame... :( */ rc = ENOBUFS; } else { /* defrag successful, try mapping again */ *m_head = m0; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { fp->eth_q_stats.tx_dma_mapping_failure++; /* No sense in trying to defrag/copy chain, drop it. :( */ rc = error; } else { /* if the chain is still too long then drop it */ if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); rc = ENODEV; } } } } bxe_tx_encap_continue: /* Check for errors */ if (rc) { if (rc == ENOMEM) { /* recoverable try again later */ } else { fp->eth_q_stats.tx_soft_errors++; fp->eth_q_stats.mbuf_alloc_tx--; m_freem(*m_head); *m_head = NULL; } return (rc); } /* set flag according to packet type (UNICAST_ADDRESS is default) */ if (m0->m_flags & M_BCAST) { mac_type = BROADCAST_ADDRESS; } else if (m0->m_flags & M_MCAST) { mac_type = MULTICAST_ADDRESS; } /* store the mbuf into the mbuf ring */ tx_buf->m = m0; tx_buf->first_bd = fp->tx_bd_prod; tx_buf->flags = 0; /* prepare the first transmit (start) BD for the mbuf */ tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; BLOGD(sc, DBG_TX, "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); tx_start_bd->nbytes = htole16(segs[0].ds_len); total_pkt_size += tx_start_bd->nbytes; tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); /* all frames have at least Start BD + Parsing BD */ nbds = nsegs + 1; tx_start_bd->nbd = htole16(nbds); if (m0->m_flags & M_VLANTAG) { tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); tx_start_bd->bd_flags.as_bitfield |= (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); } else { /* vf tx, start bd must hold the ethertype for fw to enforce it */ if (IS_VF(sc)) { /* map ethernet header to find type and header length */ eh = mtod(m0, struct ether_vlan_header *); tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; } else { /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); } } /* * add a parsing BD from the chain. The parsing BD is always added * though it is only used for TSO and chksum */ bd_prod = TX_BD_NEXT(bd_prod); if (m0->m_pkthdr.csum_flags) { if (m0->m_pkthdr.csum_flags & CSUM_IP) { fp->eth_q_stats.tx_ofld_frames_csum_ip++; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; } if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | ETH_TX_BD_FLAGS_L4_CSUM); } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | ETH_TX_BD_FLAGS_IS_UDP | ETH_TX_BD_FLAGS_L4_CSUM); } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) { tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | ETH_TX_BD_FLAGS_IS_UDP); } } if (!CHIP_IS_E1x(sc)) { pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); if (m0->m_pkthdr.csum_flags) { hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); } SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type); } else { uint16_t global_data = 0; pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); if (m0->m_pkthdr.csum_flags) { hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); } SET_FLAG(global_data, ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); pbd_e1x->global_data |= htole16(global_data); } /* setup the parsing BD with TSO specific info */ if (m0->m_pkthdr.csum_flags & CSUM_TSO) { fp->eth_q_stats.tx_ofld_frames_lso++; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; if (__predict_false(tx_start_bd->nbytes > hlen)) { fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; /* split the first BD into header/data making the fw job easy */ nbds++; tx_start_bd->nbd = htole16(nbds); tx_start_bd->nbytes = htole16(hlen); bd_prod = TX_BD_NEXT(bd_prod); /* new transmit BD after the tx_parse_bd */ tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); if (tx_total_pkt_size_bd == NULL) { tx_total_pkt_size_bd = tx_data_bd; } BLOGD(sc, DBG_TX, "TSO split header size is %d (%x:%x) nbds %d\n", le16toh(tx_start_bd->nbytes), le32toh(tx_start_bd->addr_hi), le32toh(tx_start_bd->addr_lo), nbds); } if (!CHIP_IS_E1x(sc)) { bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); } else { bxe_set_pbd_lso(m0, pbd_e1x); } } if (pbd_e2_parsing_data) { pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); } /* prepare remaining BDs, start tx bd contains first seg/frag */ for (i = 1; i < nsegs ; i++) { bd_prod = TX_BD_NEXT(bd_prod); tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); tx_data_bd->nbytes = htole16(segs[i].ds_len); if (tx_total_pkt_size_bd == NULL) { tx_total_pkt_size_bd = tx_data_bd; } total_pkt_size += tx_data_bd->nbytes; } BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); if (tx_total_pkt_size_bd != NULL) { tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; } if (__predict_false(sc->debug & DBG_TX)) { tmp_bd = tx_buf->first_bd; for (i = 0; i < nbds; i++) { if (i == 0) { BLOGD(sc, DBG_TX, "TX Strt: %p bd=%d nbd=%d vlan=0x%x " "bd_flags=0x%x hdr_nbds=%d\n", tx_start_bd, tmp_bd, le16toh(tx_start_bd->nbd), le16toh(tx_start_bd->vlan_or_ethertype), tx_start_bd->bd_flags.as_bitfield, (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); } else if (i == 1) { if (pbd_e1x) { BLOGD(sc, DBG_TX, "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " "tcp_seq=%u total_hlen_w=%u\n", pbd_e1x, tmp_bd, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, le16toh(pbd_e1x->total_hlen_w)); } else { /* if (pbd_e2) */ BLOGD(sc, DBG_TX, "-> Parse: %p bd=%d dst=%02x:%02x:%02x " "src=%02x:%02x:%02x parsing_data=0x%x\n", pbd_e2, tmp_bd, pbd_e2->data.mac_addr.dst_hi, pbd_e2->data.mac_addr.dst_mid, pbd_e2->data.mac_addr.dst_lo, pbd_e2->data.mac_addr.src_hi, pbd_e2->data.mac_addr.src_mid, pbd_e2->data.mac_addr.src_lo, pbd_e2->parsing_data); } } if (i != 1) { /* skip parse db as it doesn't hold data */ tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; BLOGD(sc, DBG_TX, "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", tx_data_bd, tmp_bd, le16toh(tx_data_bd->nbytes), le32toh(tx_data_bd->addr_hi), le32toh(tx_data_bd->addr_lo)); } tmp_bd = TX_BD_NEXT(tmp_bd); } } BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); /* update TX BD producer index value for next TX */ bd_prod = TX_BD_NEXT(bd_prod); /* * If the chain of tx_bd's describing this frame is adjacent to or spans * an eth_tx_next_bd element then we need to increment the nbds value. */ if (TX_BD_IDX(bd_prod) < nbds) { nbds++; } /* don't allow reordering of writes for nbd and packets */ mb(); fp->tx_db.data.prod += nbds; /* producer points to the next free tx_bd at this point */ fp->tx_pkt_prod++; fp->tx_bd_prod = bd_prod; DOORBELL(sc, fp->index, fp->tx_db.raw); fp->eth_q_stats.tx_pkts++; /* Prevent speculative reads from getting ahead of the status block. */ bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_READ); /* Prevent speculative reads from getting ahead of the doorbell. */ bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 0, 0, BUS_SPACE_BARRIER_READ); return (0); } static void bxe_tx_start_locked(struct bxe_softc *sc, if_t ifp, struct bxe_fastpath *fp) { struct mbuf *m = NULL; int tx_count = 0; uint16_t tx_bd_avail; BXE_FP_TX_LOCK_ASSERT(fp); /* keep adding entries while there are frames to send */ while (!if_sendq_empty(ifp)) { /* * check for any frames to send * dequeue can still be NULL even if queue is not empty */ m = if_dequeue(ifp); if (__predict_false(m == NULL)) { break; } /* the mbuf now belongs to us */ fp->eth_q_stats.mbuf_alloc_tx++; /* * Put the frame into the transmit ring. If we don't have room, * place the mbuf back at the head of the TX queue, set the * OACTIVE flag, and wait for the NIC to drain the chain. */ if (__predict_false(bxe_tx_encap(fp, &m))) { fp->eth_q_stats.tx_encap_failures++; if (m != NULL) { /* mark the TX queue as full and return the frame */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); if_sendq_prepend(ifp, m); fp->eth_q_stats.mbuf_alloc_tx--; fp->eth_q_stats.tx_queue_xoff++; } /* stop looking for more work */ break; } /* the frame was enqueued successfully */ tx_count++; /* send a copy of the frame to any BPF listeners. */ if_etherbpfmtap(ifp, m); tx_bd_avail = bxe_tx_avail(sc, fp); /* handle any completions if we're running low */ if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ bxe_txeof(sc, fp); if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { break; } } } /* all TX packets were dequeued and/or the tx ring is full */ if (tx_count > 0) { /* reset the TX watchdog timeout timer */ fp->watchdog_timer = BXE_TX_TIMEOUT; } } /* Legacy (non-RSS) dispatch routine */ static void bxe_tx_start(if_t ifp) { struct bxe_softc *sc; struct bxe_fastpath *fp; sc = if_getsoftc(ifp); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { BLOGW(sc, "Interface not running, ignoring transmit request\n"); return; } if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n"); return; } if (!sc->link_vars.link_up) { BLOGW(sc, "Interface link is down, ignoring transmit request\n"); return; } fp = &sc->fp[0]; BXE_FP_TX_LOCK(fp); bxe_tx_start_locked(sc, ifp, fp); BXE_FP_TX_UNLOCK(fp); } #if __FreeBSD_version >= 800000 static int bxe_tx_mq_start_locked(struct bxe_softc *sc, if_t ifp, struct bxe_fastpath *fp, struct mbuf *m) { struct buf_ring *tx_br = fp->tx_br; struct mbuf *next; int depth, rc, tx_count; uint16_t tx_bd_avail; rc = tx_count = 0; BXE_FP_TX_LOCK_ASSERT(fp); if (!tx_br) { BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); return (EINVAL); } if (!sc->link_vars.link_up || (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { rc = drbr_enqueue_drv(ifp, tx_br, m); goto bxe_tx_mq_start_locked_exit; } /* fetch the depth of the driver queue */ depth = drbr_inuse_drv(ifp, tx_br); if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { fp->eth_q_stats.tx_max_drbr_queue_depth = depth; } if (m == NULL) { /* no new work, check for pending frames */ next = drbr_dequeue_drv(ifp, tx_br); } else if (drbr_needs_enqueue_drv(ifp, tx_br)) { /* have both new and pending work, maintain packet order */ rc = drbr_enqueue_drv(ifp, tx_br, m); if (rc != 0) { fp->eth_q_stats.tx_soft_errors++; goto bxe_tx_mq_start_locked_exit; } next = drbr_dequeue_drv(ifp, tx_br); } else { /* new work only and nothing pending */ next = m; } /* keep adding entries while there are frames to send */ while (next != NULL) { /* the mbuf now belongs to us */ fp->eth_q_stats.mbuf_alloc_tx++; /* * Put the frame into the transmit ring. If we don't have room, * place the mbuf back at the head of the TX queue, set the * OACTIVE flag, and wait for the NIC to drain the chain. */ rc = bxe_tx_encap(fp, &next); if (__predict_false(rc != 0)) { fp->eth_q_stats.tx_encap_failures++; if (next != NULL) { /* mark the TX queue as full and save the frame */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); /* XXX this may reorder the frame */ rc = drbr_enqueue_drv(ifp, tx_br, next); fp->eth_q_stats.mbuf_alloc_tx--; fp->eth_q_stats.tx_frames_deferred++; } /* stop looking for more work */ break; } /* the transmit frame was enqueued successfully */ tx_count++; /* send a copy of the frame to any BPF listeners */ if_etherbpfmtap(ifp, next); tx_bd_avail = bxe_tx_avail(sc, fp); /* handle any completions if we're running low */ if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ bxe_txeof(sc, fp); if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { break; } } next = drbr_dequeue_drv(ifp, tx_br); } /* all TX packets were dequeued and/or the tx ring is full */ if (tx_count > 0) { /* reset the TX watchdog timeout timer */ fp->watchdog_timer = BXE_TX_TIMEOUT; } bxe_tx_mq_start_locked_exit: return (rc); } /* Multiqueue (TSS) dispatch routine. */ static int bxe_tx_mq_start(struct ifnet *ifp, struct mbuf *m) { struct bxe_softc *sc = if_getsoftc(ifp); struct bxe_fastpath *fp; int fp_index, rc; fp_index = 0; /* default is the first queue */ /* check if flowid is set */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) fp_index = (m->m_pkthdr.flowid % sc->num_queues); fp = &sc->fp[fp_index]; if (BXE_FP_TX_TRYLOCK(fp)) { rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); BXE_FP_TX_UNLOCK(fp); } else rc = drbr_enqueue_drv(ifp, fp->tx_br, m); return (rc); } static void bxe_mq_flush(struct ifnet *ifp) { struct bxe_softc *sc = if_getsoftc(ifp); struct bxe_fastpath *fp; struct mbuf *m; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->state != BXE_FP_STATE_OPEN) { BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", fp->index, fp->state); continue; } if (fp->tx_br != NULL) { BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); BXE_FP_TX_LOCK(fp); while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { m_freem(m); } BXE_FP_TX_UNLOCK(fp); } } if_qflush(ifp); } #endif /* FreeBSD_version >= 800000 */ static uint16_t bxe_cid_ilt_lines(struct bxe_softc *sc) { if (IS_SRIOV(sc)) { return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); } return (L2_ILT_LINES(sc)); } static void bxe_ilt_set_info(struct bxe_softc *sc) { struct ilt_client_info *ilt_client; struct ecore_ilt *ilt = sc->ilt; uint16_t line = 0; ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); /* CDU */ ilt_client = &ilt->clients[ILT_CLIENT_CDU]; ilt_client->client_num = ILT_CLIENT_CDU; ilt_client->page_size = CDU_ILT_PAGE_SZ; ilt_client->flags = ILT_CLIENT_SKIP_MEM; ilt_client->start = line; line += bxe_cid_ilt_lines(sc); if (CNIC_SUPPORT(sc)) { line += CNIC_ILT_LINES; } ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[CDU]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); /* QM */ if (QM_INIT(sc->qm_cid_count)) { ilt_client = &ilt->clients[ILT_CLIENT_QM]; ilt_client->client_num = ILT_CLIENT_QM; ilt_client->page_size = QM_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; /* 4 bytes for each cid */ line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, QM_ILT_PAGE_SZ); ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[QM]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); } if (CNIC_SUPPORT(sc)) { /* SRC */ ilt_client = &ilt->clients[ILT_CLIENT_SRC]; ilt_client->client_num = ILT_CLIENT_SRC; ilt_client->page_size = SRC_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; line += SRC_ILT_LINES; ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[SRC]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); /* TM */ ilt_client = &ilt->clients[ILT_CLIENT_TM]; ilt_client->client_num = ILT_CLIENT_TM; ilt_client->page_size = TM_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; line += TM_ILT_LINES; ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[TM]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); } KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); } static void bxe_set_fp_rx_buf_size(struct bxe_softc *sc) { int i; uint32_t rx_buf_size; rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); for (i = 0; i < sc->num_queues; i++) { if(rx_buf_size <= MCLBYTES){ sc->fp[i].rx_buf_size = rx_buf_size; sc->fp[i].mbuf_alloc_size = MCLBYTES; }else if (rx_buf_size <= MJUMPAGESIZE){ sc->fp[i].rx_buf_size = rx_buf_size; sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){ sc->fp[i].rx_buf_size = MCLBYTES; sc->fp[i].mbuf_alloc_size = MCLBYTES; }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){ sc->fp[i].rx_buf_size = MJUMPAGESIZE; sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; }else { sc->fp[i].rx_buf_size = MCLBYTES; sc->fp[i].mbuf_alloc_size = MCLBYTES; } } } static int bxe_alloc_ilt_mem(struct bxe_softc *sc) { int rc = 0; if ((sc->ilt = (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), M_BXE_ILT, (M_NOWAIT | M_ZERO))) == NULL) { rc = 1; } return (rc); } static int bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) { int rc = 0; if ((sc->ilt->lines = (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), M_BXE_ILT, (M_NOWAIT | M_ZERO))) == NULL) { rc = 1; } return (rc); } static void bxe_free_ilt_mem(struct bxe_softc *sc) { if (sc->ilt != NULL) { free(sc->ilt, M_BXE_ILT); sc->ilt = NULL; } } static void bxe_free_ilt_lines_mem(struct bxe_softc *sc) { if (sc->ilt->lines != NULL) { free(sc->ilt->lines, M_BXE_ILT); sc->ilt->lines = NULL; } } static void bxe_free_mem(struct bxe_softc *sc) { int i; for (i = 0; i < L2_ILT_LINES(sc); i++) { bxe_dma_free(sc, &sc->context[i].vcxt_dma); sc->context[i].vcxt = NULL; sc->context[i].size = 0; } ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); bxe_free_ilt_lines_mem(sc); } static int bxe_alloc_mem(struct bxe_softc *sc) { int context_size; int allocated; int i; /* * Allocate memory for CDU context: * This memory is allocated separately and not in the generic ILT * functions because CDU differs in few aspects: * 1. There can be multiple entities allocating memory for context - * regular L2, CNIC, and SRIOV drivers. Each separately controls * its own ILT lines. * 2. Since CDU page-size is not a single 4KB page (which is the case * for the other ILT clients), to be efficient we want to support * allocation of sub-page-size in the last entry. * 3. Context pointers are used by the driver to pass to FW / update * the context (for the other ILT clients the pointers are used just to * free the memory during unload). */ context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); for (i = 0, allocated = 0; allocated < context_size; i++) { sc->context[i].size = min(CDU_ILT_PAGE_SZ, (context_size - allocated)); if (bxe_dma_alloc(sc, sc->context[i].size, &sc->context[i].vcxt_dma, "cdu context") != 0) { bxe_free_mem(sc); return (-1); } sc->context[i].vcxt = (union cdu_context *)sc->context[i].vcxt_dma.vaddr; allocated += sc->context[i].size; } bxe_alloc_ilt_lines_mem(sc); BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", sc->ilt, sc->ilt->start_line, sc->ilt->lines); { for (i = 0; i < 4; i++) { BLOGD(sc, DBG_LOAD, "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", i, sc->ilt->clients[i].page_size, sc->ilt->clients[i].start, sc->ilt->clients[i].end, sc->ilt->clients[i].client_num, sc->ilt->clients[i].flags); } } if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); bxe_free_mem(sc); return (-1); } return (0); } static void bxe_free_rx_bd_chain(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i; sc = fp->sc; if (fp->rx_mbuf_tag == NULL) { return; } /* free all mbufs and unload all maps */ for (i = 0; i < RX_BD_TOTAL; i++) { if (fp->rx_mbuf_chain[i].m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_mbuf_chain[i].m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_chain[i].m_map); } if (fp->rx_mbuf_chain[i].m != NULL) { m_freem(fp->rx_mbuf_chain[i].m); fp->rx_mbuf_chain[i].m = NULL; fp->eth_q_stats.mbuf_alloc_rx--; } } } static void bxe_free_tpa_pool(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i, max_agg_queues; sc = fp->sc; if (fp->rx_mbuf_tag == NULL) { return; } max_agg_queues = MAX_AGG_QS(sc); /* release all mbufs and unload all DMA maps in the TPA pool */ for (i = 0; i < max_agg_queues; i++) { if (fp->rx_tpa_info[i].bd.m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_tpa_info[i].bd.m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info[i].bd.m_map); } if (fp->rx_tpa_info[i].bd.m != NULL) { m_freem(fp->rx_tpa_info[i].bd.m); fp->rx_tpa_info[i].bd.m = NULL; fp->eth_q_stats.mbuf_alloc_tpa--; } } } static void bxe_free_sge_chain(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i; sc = fp->sc; if (fp->rx_sge_mbuf_tag == NULL) { return; } /* rree all mbufs and unload all maps */ for (i = 0; i < RX_SGE_TOTAL; i++) { if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { bus_dmamap_sync(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[i].m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[i].m_map); } if (fp->rx_sge_mbuf_chain[i].m != NULL) { m_freem(fp->rx_sge_mbuf_chain[i].m); fp->rx_sge_mbuf_chain[i].m = NULL; fp->eth_q_stats.mbuf_alloc_sge--; } } } static void bxe_free_fp_buffers(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; #if __FreeBSD_version >= 800000 if (fp->tx_br != NULL) { /* just in case bxe_mq_flush() wasn't called */ if (mtx_initialized(&fp->tx_mtx)) { struct mbuf *m; BXE_FP_TX_LOCK(fp); while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) m_freem(m); BXE_FP_TX_UNLOCK(fp); } buf_ring_free(fp->tx_br, M_DEVBUF); fp->tx_br = NULL; } #endif /* free all RX buffers */ bxe_free_rx_bd_chain(fp); bxe_free_tpa_pool(fp); bxe_free_sge_chain(fp); if (fp->eth_q_stats.mbuf_alloc_rx != 0) { BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_rx); } if (fp->eth_q_stats.mbuf_alloc_sge != 0) { BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_sge); } if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_tpa); } if (fp->eth_q_stats.mbuf_alloc_tx != 0) { BLOGE(sc, "failed to release tx mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_tx); } /* XXX verify all mbufs were reclaimed */ if (mtx_initialized(&fp->tx_mtx)) { mtx_destroy(&fp->tx_mtx); } if (mtx_initialized(&fp->rx_mtx)) { mtx_destroy(&fp->rx_mtx); } } } static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t prev_index, uint16_t index) { struct bxe_sw_rx_bd *rx_buf; struct eth_rx_bd *rx_bd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs, rc; rc = 0; /* allocate the new RX BD mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; return (ENOBUFS); } fp->eth_q_stats.mbuf_alloc_rx++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = fp->rx_buf_size; /* map the mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; m_freem(m); fp->eth_q_stats.mbuf_alloc_rx--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); /* release any existing RX BD mbuf mappings */ if (prev_index != index) { rx_buf = &fp->rx_mbuf_chain[prev_index]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* * We only get here from bxe_rxeof() when the maximum number * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already * holds the mbuf in the prev_index so it's OK to NULL it out * here without concern of a memory leak. */ fp->rx_mbuf_chain[prev_index].m = NULL; } rx_buf = &fp->rx_mbuf_chain[index]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* save the mbuf and mapping info for a future packet */ map = (prev_index != index) ? fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; rx_buf->m_map = fp->rx_mbuf_spare_map; fp->rx_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_PREREAD); rx_buf->m = m; rx_bd = &fp->rx_chain[index]; rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); return (rc); } static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, int queue) { struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs; int rc = 0; /* allocate the new TPA mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; return (ENOBUFS); } fp->eth_q_stats.mbuf_alloc_tpa++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = fp->rx_buf_size; /* map the mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; m_free(m); fp->eth_q_stats.mbuf_alloc_tpa--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); /* release any existing TPA mbuf mapping */ if (tpa_info->bd.m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); } /* save the mbuf and mapping info for the TPA mbuf */ map = tpa_info->bd.m_map; tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; fp->rx_tpa_info_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, BUS_DMASYNC_PREREAD); tpa_info->bd.m = m; tpa_info->seg = segs[0]; return (rc); } /* * Allocate an mbuf and assign it to the receive scatter gather chain. The * caller must take care to save a copy of the existing mbuf in the SG mbuf * chain. */ static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index) { struct bxe_sw_rx_bd *sge_buf; struct eth_rx_sge *sge; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs; int rc = 0; /* allocate a new SGE mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; return (ENOMEM); } fp->eth_q_stats.mbuf_alloc_sge++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; /* map the SGE mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; m_freem(m); fp->eth_q_stats.mbuf_alloc_sge--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); sge_buf = &fp->rx_sge_mbuf_chain[index]; /* release any existing SGE mbuf mapping */ if (sge_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); } /* save the mbuf and mapping info for a future packet */ map = sge_buf->m_map; sge_buf->m_map = fp->rx_sge_mbuf_spare_map; fp->rx_sge_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, BUS_DMASYNC_PREREAD); sge_buf->m = m; sge = &fp->rx_sge_chain[index]; sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); return (rc); } static __noinline int bxe_alloc_fp_buffers(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i, j, rc = 0; int ring_prod, cqe_ring_prod; int max_agg_queues; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; #if __FreeBSD_version >= 800000 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, M_NOWAIT, &fp->tx_mtx); if (fp->tx_br == NULL) { BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i); goto bxe_alloc_fp_buffers_error; } #endif ring_prod = cqe_ring_prod = 0; fp->rx_bd_cons = 0; fp->rx_cq_cons = 0; /* allocate buffers for the RX BDs in RX BD chain */ for (j = 0; j < sc->max_rx_bufs; j++) { rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", i, rc); goto bxe_alloc_fp_buffers_error; } ring_prod = RX_BD_NEXT(ring_prod); cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); } fp->rx_bd_prod = ring_prod; fp->rx_cq_prod = cqe_ring_prod; fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; max_agg_queues = MAX_AGG_QS(sc); fp->tpa_enable = TRUE; /* fill the TPA pool */ for (j = 0; j < max_agg_queues; j++) { rc = bxe_alloc_rx_tpa_mbuf(fp, j); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", i, j); fp->tpa_enable = FALSE; goto bxe_alloc_fp_buffers_error; } fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; } if (fp->tpa_enable) { /* fill the RX SGE chain */ ring_prod = 0; for (j = 0; j < RX_SGE_USABLE; j++) { rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", i, ring_prod); fp->tpa_enable = FALSE; ring_prod = 0; goto bxe_alloc_fp_buffers_error; } ring_prod = RX_SGE_NEXT(ring_prod); } fp->rx_sge_prod = ring_prod; } } return (0); bxe_alloc_fp_buffers_error: /* unwind what was already allocated */ bxe_free_rx_bd_chain(fp); bxe_free_tpa_pool(fp); bxe_free_sge_chain(fp); return (ENOBUFS); } static void bxe_free_fw_stats_mem(struct bxe_softc *sc) { bxe_dma_free(sc, &sc->fw_stats_dma); sc->fw_stats_num = 0; sc->fw_stats_req_size = 0; sc->fw_stats_req = NULL; sc->fw_stats_req_mapping = 0; sc->fw_stats_data_size = 0; sc->fw_stats_data = NULL; sc->fw_stats_data_mapping = 0; } static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc) { uint8_t num_queue_stats; int num_groups; /* number of queues for statistics is number of eth queues */ num_queue_stats = BXE_NUM_ETH_QUEUES(sc); /* * Total number of FW statistics requests = * 1 for port stats + 1 for PF stats + num of queues */ sc->fw_stats_num = (2 + num_queue_stats); /* * Request is built from stats_query_header and an array of * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT * rules. The real number or requests is configured in the * stats_query_header. */ num_groups = ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", sc->fw_stats_num, num_groups); sc->fw_stats_req_size = (sizeof(struct stats_query_header) + (num_groups * sizeof(struct stats_query_cmd_group))); /* * Data for statistics requests + stats_counter. * stats_counter holds per-STORM counters that are incremented when * STORM has finished with the current request. Memory for FCoE * offloaded statistics are counted anyway, even if they will not be sent. * VF stats are not accounted for here as the data of VF stats is stored * in memory allocated by the VF, not here. */ sc->fw_stats_data_size = (sizeof(struct stats_counter) + sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + /* sizeof(struct fcoe_statistics_params) + */ (sizeof(struct per_queue_stats) * num_queue_stats)); if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), &sc->fw_stats_dma, "fw stats") != 0) { bxe_free_fw_stats_mem(sc); return (-1); } /* set up the shortcuts */ sc->fw_stats_req = (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; sc->fw_stats_data = (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + sc->fw_stats_req_size); sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + sc->fw_stats_req_size); BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", (uintmax_t)sc->fw_stats_req_mapping); BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", (uintmax_t)sc->fw_stats_data_mapping); return (0); } /* * Bits map: * 0-7 - Engine0 load counter. * 8-15 - Engine1 load counter. * 16 - Engine0 RESET_IN_PROGRESS bit. * 17 - Engine1 RESET_IN_PROGRESS bit. * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active * function on the engine * 19 - Engine1 ONE_IS_LOADED. * 20 - Chip reset flow bit. When set none-leader must wait for both engines * leader to complete (check for both RESET_IN_PROGRESS bits and not * for just the one belonging to its engine). */ #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff #define BXE_PATH0_LOAD_CNT_SHIFT 0 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 #define BXE_PATH1_LOAD_CNT_SHIFT 8 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 #define BXE_GLOBAL_RESET_BIT 0x00040000 /* set the GLOBAL_RESET bit, should be run under rtnl lock */ static void bxe_set_reset_global(struct bxe_softc *sc) { uint32_t val; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* clear the GLOBAL_RESET bit, should be run under rtnl lock */ static void bxe_clear_reset_global(struct bxe_softc *sc) { uint32_t val; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* checks the GLOBAL_RESET bit, should be run under rtnl lock */ static uint8_t bxe_reset_is_global(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; } /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ static void bxe_set_reset_done(struct bxe_softc *sc) { uint32_t val; uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); /* Clear the bit */ val &= ~bit; REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ static void bxe_set_reset_in_progress(struct bxe_softc *sc) { uint32_t val; uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); /* Set the bit */ val |= bit; REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ static uint8_t bxe_reset_is_done(struct bxe_softc *sc, int engine) { uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; /* return false if bit is set */ return (val & bit) ? FALSE : TRUE; } /* get the load status for an engine, should be run under rtnl lock */ static uint8_t bxe_get_load_status(struct bxe_softc *sc, int engine) { uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); val = ((val & mask) >> shift); BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); return (val != 0); } /* set pf load mark */ /* XXX needs to be under rtnl lock */ static void bxe_set_pf_load(struct bxe_softc *sc) { uint32_t val; uint32_t val1; uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); /* get the current counter value */ val1 = ((val & mask) >> shift); /* set bit of this PF */ val1 |= (1 << SC_ABS_FUNC(sc)); /* clear the old value */ val &= ~mask; /* set the new one */ val |= ((val1 << shift) & mask); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* clear pf load mark */ /* XXX needs to be under rtnl lock */ static uint8_t bxe_clear_pf_load(struct bxe_softc *sc) { uint32_t val1, val; uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); /* get the current counter value */ val1 = (val & mask) >> shift; /* clear bit of that PF */ val1 &= ~(1 << SC_ABS_FUNC(sc)); /* clear the old value */ val &= ~mask; /* set the new one */ val |= ((val1 << shift) & mask); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); return (val1 != 0); } /* send load requrest to mcp and analyze response */ static int bxe_nic_load_request(struct bxe_softc *sc, uint32_t *load_code) { /* init fw_seq */ sc->fw_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); /* get the current FW pulse sequence */ sc->fw_drv_pulse_wr_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & DRV_PULSE_SEQ_MASK); BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", sc->fw_drv_pulse_wr_seq); /* load request */ (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, DRV_MSG_CODE_LOAD_REQ_WITH_LFA); /* if the MCP fails to respond we must abort */ if (!(*load_code)) { BLOGE(sc, "MCP response failure!\n"); return (-1); } /* if MCP refused then must abort */ if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { BLOGE(sc, "MCP refused load request\n"); return (-1); } return (0); } /* * Check whether another PF has already loaded FW to chip. In virtualized * environments a pf from anoth VM may have already initialized the device * including loading FW. */ static int bxe_nic_load_analyze_req(struct bxe_softc *sc, uint32_t load_code) { uint32_t my_fw, loaded_fw; /* is another pf loaded on this engine? */ if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { /* build my FW version dword */ my_fw = (BCM_5710_FW_MAJOR_VERSION + (BCM_5710_FW_MINOR_VERSION << 8 ) + (BCM_5710_FW_REVISION_VERSION << 16) + (BCM_5710_FW_ENGINEERING_VERSION << 24)); /* read loaded FW from chip */ loaded_fw = REG_RD(sc, XSEM_REG_PRAM); BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", loaded_fw, my_fw); /* abort nic load if version mismatch */ if (my_fw != loaded_fw) { BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", loaded_fw, my_fw); return (-1); } } return (0); } /* mark PMF if applicable */ static void bxe_nic_load_pmf(struct bxe_softc *sc, uint32_t load_code) { uint32_t ncsi_oem_data_addr; if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { /* * Barrier here for ordering between the writing to sc->port.pmf here * and reading it from the periodic task. */ sc->port.pmf = 1; mb(); } else { sc->port.pmf = 0; } BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); /* XXX needed? */ if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); if (ncsi_oem_data_addr) { REG_WR(sc, (ncsi_oem_data_addr + offsetof(struct glob_ncsi_oem_data, driver_version)), 0); } } } } static void bxe_read_mf_cfg(struct bxe_softc *sc) { int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); int abs_func; int vn; if (BXE_NOMCP(sc)) { return; /* what should be the default bvalue in this case */ } /* * The formula for computing the absolute function number is... * For 2 port configuration (4 functions per port): * abs_func = 2 * vn + SC_PORT + SC_PATH * For 4 port configuration (2 functions per port): * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH */ for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); if (abs_func >= E1H_FUNC_MAX) { break; } sc->devinfo.mf_info.mf_config[vn] = MFCFG_RD(sc, func_mf_config[abs_func].config); } if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); sc->flags |= BXE_MF_FUNC_DIS; } else { BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); sc->flags &= ~BXE_MF_FUNC_DIS; } } /* acquire split MCP access lock register */ static int bxe_acquire_alr(struct bxe_softc *sc) { uint32_t j, val; for (j = 0; j < 1000; j++) { val = (1UL << 31); REG_WR(sc, GRCBASE_MCP + 0x9c, val); val = REG_RD(sc, GRCBASE_MCP + 0x9c); if (val & (1L << 31)) break; DELAY(5000); } if (!(val & (1L << 31))) { BLOGE(sc, "Cannot acquire MCP access lock register\n"); return (-1); } return (0); } /* release split MCP access lock register */ static void bxe_release_alr(struct bxe_softc *sc) { REG_WR(sc, GRCBASE_MCP + 0x9c, 0); } static void bxe_fan_failure(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t ext_phy_config; /* mark the failure */ ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, ext_phy_config); /* log the failure */ BLOGW(sc, "Fan Failure has caused the driver to shutdown " "the card to prevent permanent damage. " "Please contact OEM Support for assistance\n"); /* XXX */ #if 1 bxe_panic(sc, ("Schedule task to handle fan failure\n")); #else /* * Schedule device reset (unload) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); schedule_delayed_work(&sc->sp_rtnl_task, 0); #endif } /* this function is called upon a link interrupt */ static void bxe_link_attn(struct bxe_softc *sc) { uint32_t pause_enabled = 0; struct host_port_stats *pstats; int cmng_fns; /* Make sure that we are synced with the current statistics */ bxe_stats_handle(sc, STATS_EVENT_STOP); elink_link_update(&sc->link_params, &sc->link_vars); if (sc->link_vars.link_up) { /* dropless flow control */ if (!CHIP_IS_E1(sc) && sc->dropless_fc) { pause_enabled = 0; if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { pause_enabled = 1; } REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), pause_enabled); } if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { pstats = BXE_SP(sc, port_stats); /* reset old mac stats */ memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); } if (sc->state == BXE_STATE_OPEN) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); } } if (sc->link_vars.link_up && sc->link_vars.line_speed) { cmng_fns = bxe_get_cmng_fns_mode(sc); if (cmng_fns != CMNG_FNS_NONE) { bxe_cmng_fns_init(sc, FALSE, cmng_fns); storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } else { /* rate shaping and fairness are disabled */ BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); } } bxe_link_report_locked(sc); if (IS_MF(sc)) { ; // XXX bxe_link_sync_notify(sc); } } static void bxe_attn_int_asserted(struct bxe_softc *sc, uint32_t asserted) { int port = SC_PORT(sc); uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : NIG_REG_MASK_INTERRUPT_PORT0; uint32_t aeu_mask; uint32_t nig_mask = 0; uint32_t reg_addr; uint32_t igu_acked; uint32_t cnt; if (sc->attn_state & asserted) { BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); aeu_mask = REG_RD(sc, aeu_addr); BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", aeu_mask, asserted); aeu_mask &= ~(asserted & 0x3ff); BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); REG_WR(sc, aeu_addr, aeu_mask); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); sc->attn_state |= asserted; BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); if (asserted & ATTN_HARD_WIRED_MASK) { if (asserted & ATTN_NIG_FOR_FUNC) { bxe_acquire_phy_lock(sc); /* save nig interrupt mask */ nig_mask = REG_RD(sc, nig_int_mask_addr); /* If nig_mask is not set, no need to call the update function */ if (nig_mask) { REG_WR(sc, nig_int_mask_addr, 0); bxe_link_attn(sc); } /* handle unicore attn? */ } if (asserted & ATTN_SW_TIMER_4_FUNC) { BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); } if (asserted & GPIO_2_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); } if (asserted & GPIO_3_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); } if (asserted & GPIO_4_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); } if (port == 0) { if (asserted & ATTN_GENERAL_ATTN_1) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); } if (asserted & ATTN_GENERAL_ATTN_2) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); } if (asserted & ATTN_GENERAL_ATTN_3) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); } } else { if (asserted & ATTN_GENERAL_ATTN_4) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); } if (asserted & ATTN_GENERAL_ATTN_5) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); } if (asserted & ATTN_GENERAL_ATTN_6) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); } } } /* hardwired */ if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); } else { reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); } BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", asserted, (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); REG_WR(sc, reg_addr, asserted); /* now set back the mask */ if (asserted & ATTN_NIG_FOR_FUNC) { /* * Verify that IGU ack through BAR was written before restoring * NIG mask. This loop should exit after 2-3 iterations max. */ if (sc->devinfo.int_block != INT_BLOCK_HC) { cnt = 0; do { igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && (++cnt < MAX_IGU_ATTN_ACK_TO)); if (!igu_acked) { BLOGE(sc, "Failed to verify IGU ack on time\n"); } mb(); } REG_WR(sc, nig_int_mask_addr, nig_mask); bxe_release_phy_lock(sc); } } static void bxe_print_next_block(struct bxe_softc *sc, int idx, const char *blk) { BLOGI(sc, "%s%s", idx ? ", " : "", blk); } static int bxe_check_blocks_with_parity0(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "BRB"); break; case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PARSER"); break; case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TSDM"); break; case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "SEARCHER"); break; case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TCM"); break; case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TSEMI"); break; case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XPB"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity1(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t *global, uint8_t print) { int i = 0; uint32_t cur_bit = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PBF"); break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "QM"); break; case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TM"); break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XSDM"); break; case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XCM"); break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XSEMI"); break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DOORBELLQ"); break; case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "NIG"); break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DEBUG"); break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "USDM"); break; case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "UCM"); break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "USEMI"); break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "UPB"); break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CSDM"); break; case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CCM"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity2(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CSEMI"); break; case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PXP"); break; case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); break; case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CFC"); break; case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CDU"); break; case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DMAE"); break; case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "IGU"); break; case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "MISC"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity3(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t *global, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP ROM"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP UMP RX"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP UMP TX"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP SCPAD"); *global = TRUE; break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity4(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PGLUE_B"); break; case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "ATC"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static uint8_t bxe_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print, uint32_t *sig) { int par_num = 0; if ((sig[0] & HW_PRTY_ASSERT_SET_0) || (sig[1] & HW_PRTY_ASSERT_SET_1) || (sig[2] & HW_PRTY_ASSERT_SET_2) || (sig[3] & HW_PRTY_ASSERT_SET_3) || (sig[4] & HW_PRTY_ASSERT_SET_4)) { BLOGE(sc, "Parity error: HW block parity attention:\n" "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); if (print) BLOGI(sc, "Parity errors detected in blocks: "); par_num = bxe_check_blocks_with_parity0(sc, sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); par_num = bxe_check_blocks_with_parity1(sc, sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); par_num = bxe_check_blocks_with_parity2(sc, sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); par_num = bxe_check_blocks_with_parity3(sc, sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); par_num = bxe_check_blocks_with_parity4(sc, sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); if (print) BLOGI(sc, "\n"); return (TRUE); } return (FALSE); } static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print) { struct attn_route attn = { {0} }; int port = SC_PORT(sc); attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); /* * Since MCP attentions can't be disabled inside the block, we need to * read AEU registers to see whether they're currently disabled */ attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) & MISC_AEU_ENABLE_MCP_PRTY_BITS) | ~MISC_AEU_ENABLE_MCP_PRTY_BITS); if (!CHIP_IS_E1x(sc)) attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); return (bxe_parity_attn(sc, global, print, attn.sig)); } static void bxe_attn_int_deasserted4(struct bxe_softc *sc, uint32_t attn) { uint32_t val; if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); } if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); BLOGE(sc, "ATC hw attention 0x%08x\n", val); if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); } if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { BLOGE(sc, "FATAL parity attention set4 0x%08x\n", (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); } } static void bxe_e1h_disable(struct bxe_softc *sc) { int port = SC_PORT(sc); bxe_tx_disable(sc); REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); } static void bxe_e1h_enable(struct bxe_softc *sc) { int port = SC_PORT(sc); REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); // XXX bxe_tx_enable(sc); } /* * called due to MCP event (on pmf): * reread new bandwidth configuration * configure FW * notify others function about the change */ static void bxe_config_mf_bw(struct bxe_softc *sc) { if (sc->link_vars.link_up) { bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); // XXX bxe_link_sync_notify(sc); } storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } static void bxe_set_mf_bw(struct bxe_softc *sc) { bxe_config_mf_bw(sc); bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); } static void bxe_handle_eee_event(struct bxe_softc *sc) { BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); } #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 static void bxe_drv_info_ether_stat(struct bxe_softc *sc) { struct eth_stats_info *ether_stat = &sc->sp->drv_info_to_mcp.ether_stat; strlcpy(ether_stat->version, BXE_DRIVER_VERSION, ETH_STAT_INFO_VERSION_LEN); /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, ether_stat->mac_local + MAC_PAD, MAC_PAD, ETH_ALEN); ether_stat->mtu_size = sc->mtu; ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; } // XXX ether_stat->feature_flags |= ???; ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; ether_stat->txq_size = sc->tx_ring_size; ether_stat->rxq_size = sc->rx_ring_size; } static void bxe_handle_drv_info_req(struct bxe_softc *sc) { enum drv_info_opcode op_code; uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); /* if drv_info version supported by MFW doesn't match - send NACK */ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); return; } op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> DRV_INFO_CONTROL_OP_CODE_SHIFT); memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); switch (op_code) { case ETH_STATS_OPCODE: bxe_drv_info_ether_stat(sc); break; case FCOE_STATS_OPCODE: case ISCSI_STATS_OPCODE: default: /* if op code isn't supported - send NACK */ bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); return; } /* * If we got drv_info attn from MFW then these fields are defined in * shmem2 for sure */ SHMEM2_WR(sc, drv_info_host_addr_lo, U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); SHMEM2_WR(sc, drv_info_host_addr_hi, U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); } static void bxe_dcc_event(struct bxe_softc *sc, uint32_t dcc_event) { BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { /* * This is the only place besides the function initialization * where the sc->flags can change so it is done without any * locks */ if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); sc->flags |= BXE_MF_FUNC_DIS; bxe_e1h_disable(sc); } else { BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); sc->flags &= ~BXE_MF_FUNC_DIS; bxe_e1h_enable(sc); } dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; } if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { bxe_config_mf_bw(sc); dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; } /* Report results to MCP */ if (dcc_event) bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); else bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); } static void bxe_pmf_update(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t val; sc->port.pmf = 1; BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); /* * We need the mb() to ensure the ordering between the writing to * sc->port.pmf here and reading it from the bxe_periodic_task(). */ mb(); /* queue a periodic task */ // XXX schedule task... // XXX bxe_dcbx_pmf_update(sc); /* enable nig attention */ val = (0xff0f | (1 << (SC_VN(sc) + 4))); if (sc->devinfo.int_block == INT_BLOCK_HC) { REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); } else if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); } bxe_stats_handle(sc, STATS_EVENT_PMF); } static int bxe_mc_assert(struct bxe_softc *sc) { char last_idx; int i, rc = 0; uint32_t row0, row1, row2, row3; /* XSTORM */ last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* TSTORM */ last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* CSTORM */ last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* USTORM */ last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } return (rc); } static void bxe_attn_int_deasserted3(struct bxe_softc *sc, uint32_t attn) { int func = SC_FUNC(sc); uint32_t val; if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { if (attn & BXE_PMF_LINK_ASSERT(sc)) { REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); bxe_read_mf_cfg(sc); sc->devinfo.mf_info.mf_config[SC_VN(sc)] = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); if (val & DRV_STATUS_DCC_EVENT_MASK) bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); if (val & DRV_STATUS_SET_MF_BW) bxe_set_mf_bw(sc); if (val & DRV_STATUS_DRV_INFO_REQ) bxe_handle_drv_info_req(sc); if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) bxe_pmf_update(sc); if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) bxe_handle_eee_event(sc); if (sc->link_vars.periodic_flags & ELINK_PERIODIC_FLAGS_LINK_EVENT) { /* sync with link */ bxe_acquire_phy_lock(sc); sc->link_vars.periodic_flags &= ~ELINK_PERIODIC_FLAGS_LINK_EVENT; bxe_release_phy_lock(sc); if (IS_MF(sc)) ; // XXX bxe_link_sync_notify(sc); bxe_link_report(sc); } /* * Always call it here: bxe_link_report() will * prevent the link indication duplication. */ bxe_link_status_update(sc); } else if (attn & BXE_MC_ASSERT_BITS) { BLOGE(sc, "MC assert!\n"); bxe_mc_assert(sc); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); bxe_panic(sc, ("MC assert!\n")); } else if (attn & BXE_MCP_ASSERT) { BLOGE(sc, "MCP assert!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); // XXX bxe_fw_dump(sc); } else { BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); } } if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); if (attn & BXE_GRC_TIMEOUT) { val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); BLOGE(sc, "GRC time-out 0x%08x\n", val); } if (attn & BXE_GRC_RSV) { val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); BLOGE(sc, "GRC reserved 0x%08x\n", val); } REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); } } static void bxe_attn_int_deasserted2(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val0, mask0, val1, mask1; uint32_t val; if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); BLOGE(sc, "CFC hw attention 0x%08x\n", val); /* CFC error attention */ if (val & 0x2) { BLOGE(sc, "FATAL error from CFC\n"); } } if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); /* RQ_USDMDP_FIFO_OVERFLOW */ if (val & 0x18000) { BLOGE(sc, "FATAL error from PXP\n"); } if (!CHIP_IS_E1x(sc)) { val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); } } #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT if (attn & AEU_PXP2_HW_INT_BIT) { /* CQ47854 workaround do not panic on * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR */ if (!CHIP_IS_E1x(sc)) { mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); /* * If the olny PXP2_EOP_ERROR_BIT is set in * STS0 and STS1 - clear it * * probably we lose additional attentions between * STS0 and STS_CLR0, in this case user will not * be notified about them */ if (val0 & mask0 & PXP2_EOP_ERROR_BIT && !(val1 & mask1)) val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); /* print the register, since no one can restore it */ BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); /* * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR * then notify */ if (val0 & PXP2_EOP_ERROR_BIT) { BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); /* * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is * set then clear attention from PXP2 block without panic */ if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && ((val1 & mask1) == 0)) attn &= ~AEU_PXP2_HW_INT_BIT; } } } if (attn & HW_INTERRUT_ASSERT_SET_2) { reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); REG_WR(sc, reg_offset, val); BLOGE(sc, "FATAL HW block attention set2 0x%x\n", (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); bxe_panic(sc, ("HW block attention set2\n")); } } static void bxe_attn_int_deasserted1(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val; if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); BLOGE(sc, "DB hw attention 0x%08x\n", val); /* DORQ discard attention */ if (val & 0x2) { BLOGE(sc, "FATAL error from DORQ\n"); } } if (attn & HW_INTERRUT_ASSERT_SET_1) { reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); REG_WR(sc, reg_offset, val); BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); bxe_panic(sc, ("HW block attention set1\n")); } } static void bxe_attn_int_deasserted0(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val; reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { val = REG_RD(sc, reg_offset); val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(sc, reg_offset, val); BLOGW(sc, "SPIO5 hw attention\n"); /* Fan failure attention */ elink_hw_reset_phy(&sc->link_params); bxe_fan_failure(sc); } if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { bxe_acquire_phy_lock(sc); elink_handle_module_detect_int(&sc->link_params); bxe_release_phy_lock(sc); } if (attn & HW_INTERRUT_ASSERT_SET_0) { val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); REG_WR(sc, reg_offset, val); bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", (attn & HW_INTERRUT_ASSERT_SET_0))); } } static void bxe_attn_int_deasserted(struct bxe_softc *sc, uint32_t deasserted) { struct attn_route attn; struct attn_route *group_mask; int port = SC_PORT(sc); int index; uint32_t reg_addr; uint32_t val; uint32_t aeu_mask; uint8_t global = FALSE; /* * Need to take HW lock because MCP or other port might also * try to handle this event. */ bxe_acquire_alr(sc); if (bxe_chk_parity_attn(sc, &global, TRUE)) { /* XXX * In case of parity errors don't handle attentions so that * other function would "see" parity errors. */ sc->recovery_state = BXE_RECOVERY_INIT; // XXX schedule a recovery task... /* disable HW interrupts */ bxe_int_disable(sc); bxe_release_alr(sc); return; } attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); if (!CHIP_IS_E1x(sc)) { attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); } else { attn.sig[4] = 0; } BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { if (deasserted & (1 << index)) { group_mask = &sc->attn_group[index]; BLOGD(sc, DBG_INTR, "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, group_mask->sig[0], group_mask->sig[1], group_mask->sig[2], group_mask->sig[3], group_mask->sig[4]); bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); } } bxe_release_alr(sc); if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); } else { reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); } val = ~deasserted; BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", val, (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); REG_WR(sc, reg_addr, val); if (~sc->attn_state & deasserted) { BLOGE(sc, "IGU error\n"); } reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); aeu_mask = REG_RD(sc, reg_addr); BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", aeu_mask, deasserted); aeu_mask |= (deasserted & 0x3ff); BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); REG_WR(sc, reg_addr, aeu_mask); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); sc->attn_state &= ~deasserted; BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); } static void bxe_attn_int(struct bxe_softc *sc) { /* read local copy of bits */ uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); uint32_t attn_state = sc->attn_state; /* look for changed bits */ uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; uint32_t deasserted = ~attn_bits & attn_ack & attn_state; BLOGD(sc, DBG_INTR, "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", attn_bits, attn_ack, asserted, deasserted); if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { BLOGE(sc, "BAD attention state\n"); } /* handle bits that were raised */ if (asserted) { bxe_attn_int_asserted(sc, asserted); } if (deasserted) { bxe_attn_int_deasserted(sc, deasserted); } } static uint16_t bxe_update_dsb_idx(struct bxe_softc *sc) { struct host_sp_status_block *def_sb = sc->def_sb; uint16_t rc = 0; mb(); /* status block is written to by the chip */ if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; rc |= BXE_DEF_SB_ATT_IDX; } if (sc->def_idx != def_sb->sp_sb.running_index) { sc->def_idx = def_sb->sp_sb.running_index; rc |= BXE_DEF_SB_IDX; } mb(); return (rc); } static inline struct ecore_queue_sp_obj * bxe_cid_to_q_obj(struct bxe_softc *sc, uint32_t cid) { BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); } static void bxe_handle_mcast_eqe(struct bxe_softc *sc) { struct ecore_mcast_ramrod_params rparam; int rc; memset(&rparam, 0, sizeof(rparam)); rparam.mcast_obj = &sc->mcast_obj; BXE_MCAST_LOCK(sc); /* clear pending state for the last command */ sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); /* if there are pending mcast commands - send them */ if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); if (rc < 0) { BLOGD(sc, DBG_SP, "ERROR: Failed to send pending mcast commands (%d)\n", rc); } } BXE_MCAST_UNLOCK(sc); } static void bxe_handle_classification_eqe(struct bxe_softc *sc, union event_ring_elem *elem) { unsigned long ramrod_flags = 0; int rc = 0; uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; struct ecore_vlan_mac_obj *vlan_mac_obj; /* always push next commands out, don't wait here */ bit_set(&ramrod_flags, RAMROD_CONT); switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { case ECORE_FILTER_MAC_PENDING: BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); vlan_mac_obj = &sc->sp_objs[cid].mac_obj; break; case ECORE_FILTER_MCAST_PENDING: BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); /* * This is only relevant for 57710 where multicast MACs are * configured as unicast MACs using the same ramrod. */ bxe_handle_mcast_eqe(sc); return; default: BLOGE(sc, "Unsupported classification command: %d\n", elem->message.data.eth_event.echo); return; } rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); if (rc < 0) { BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); } else if (rc > 0) { BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); } } static void bxe_handle_rx_mode_eqe(struct bxe_softc *sc, union event_ring_elem *elem) { bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); /* send rx_mode command again if was requested */ if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state)) { bxe_set_storm_rx_mode(sc); } } static void bxe_update_eq_prod(struct bxe_softc *sc, uint16_t prod) { storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); wmb(); /* keep prod updates ordered */ } static void bxe_eq_int(struct bxe_softc *sc) { uint16_t hw_cons, sw_cons, sw_prod; union event_ring_elem *elem; uint8_t echo; uint32_t cid; uint8_t opcode; int spqe_cnt = 0; struct ecore_queue_sp_obj *q_obj; struct ecore_func_sp_obj *f_obj = &sc->func_obj; struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; hw_cons = le16toh(*sc->eq_cons_sb); /* * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. * when we get to the next-page we need to adjust so the loop * condition below will be met. The next element is the size of a * regular element and hence incrementing by 1 */ if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { hw_cons++; } /* * This function may never run in parallel with itself for a * specific sc and no need for a read memory barrier here. */ sw_cons = sc->eq_cons; sw_prod = sc->eq_prod; BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); for (; sw_cons != hw_cons; sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { elem = &sc->eq[EQ_DESC(sw_cons)]; /* elem CID originates from FW, actually LE */ cid = SW_CID(elem->message.data.cfc_del_event.cid); opcode = elem->message.opcode; /* handle eq element */ switch (opcode) { case EVENT_RING_OPCODE_STAT_QUERY: BLOGD(sc, DBG_SP, "got statistics completion event %d\n", sc->stats_comp++); /* nothing to do with stats comp */ goto next_spqe; case EVENT_RING_OPCODE_CFC_DEL: /* handle according to cid range */ /* we may want to verify here that the sc state is HALTING */ BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); q_obj = bxe_cid_to_q_obj(sc, cid); if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { break; } goto next_spqe; case EVENT_RING_OPCODE_STOP_TRAFFIC: BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { break; } // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { break; } // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_UPDATE: echo = elem->message.data.function_update_event.echo; if (echo == SWITCH_UPDATE) { BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_SWITCH_UPDATE)) { break; } } else { BLOGD(sc, DBG_SP, "AFEX: ramrod completed FUNCTION_UPDATE\n"); } goto next_spqe; case EVENT_RING_OPCODE_FORWARD_SETUP: q_obj = &bxe_fwd_sp_obj(sc, q_obj); if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_SETUP_TX_ONLY)) { break; } goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_START: BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { break; } goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_STOP: BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { break; } goto next_spqe; } switch (opcode | sc->state) { case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); rss_raw->clear_pending(rss_raw); break; case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); bxe_handle_classification_eqe(sc, elem); break; case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got mcast ramrod\n"); bxe_handle_mcast_eqe(sc); break; case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); bxe_handle_rx_mode_eqe(sc, elem); break; default: /* unknown event log error and continue */ BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", elem->message.opcode, sc->state); } next_spqe: spqe_cnt++; } /* for */ mb(); atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); sc->eq_cons = sw_cons; sc->eq_prod = sw_prod; /* make sure that above mem writes were issued towards the memory */ wmb(); /* update producer */ bxe_update_eq_prod(sc, sc->eq_prod); } static void bxe_handle_sp_tq(void *context, int pending) { struct bxe_softc *sc = (struct bxe_softc *)context; uint16_t status; BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); /* what work needs to be performed? */ status = bxe_update_dsb_idx(sc); BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); /* HW attentions */ if (status & BXE_DEF_SB_ATT_IDX) { BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); bxe_attn_int(sc); status &= ~BXE_DEF_SB_ATT_IDX; } /* SP events: STAT_QUERY and others */ if (status & BXE_DEF_SB_IDX) { /* handle EQ completions */ BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); bxe_eq_int(sc); bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, le16toh(sc->def_idx), IGU_INT_NOP, 1); status &= ~BXE_DEF_SB_IDX; } /* if status is non zero then something went wrong */ if (__predict_false(status)) { BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); } /* ack status block only if something was actually handled */ bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); /* * Must be called after the EQ processing (since eq leads to sriov * ramrod completion flows). * This flow may have been scheduled by the arrival of a ramrod * completion, or by the sriov code rescheduling itself. */ // XXX bxe_iov_sp_task(sc); } static void bxe_handle_fp_tq(void *context, int pending) { struct bxe_fastpath *fp = (struct bxe_fastpath *)context; struct bxe_softc *sc = fp->sc; uint8_t more_tx = FALSE; uint8_t more_rx = FALSE; BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); /* XXX * IFF_DRV_RUNNING state can't be checked here since we process * slowpath events on a client queue during setup. Instead * we need to add a "process/continue" flag here that the driver * can use to tell the task here not to do anything. */ #if 0 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { return; } #endif /* update the fastpath index */ bxe_update_fp_sb_idx(fp); /* XXX add loop here if ever support multiple tx CoS */ /* fp->txdata[cos] */ if (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); more_tx = bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); } if (bxe_has_rx_work(fp)) { more_rx = bxe_rxeof(sc, fp); } if (more_rx /*|| more_tx*/) { /* still more work to do */ - taskqueue_enqueue_fast(fp->tq, &fp->tq_task); + taskqueue_enqueue(fp->tq, &fp->tq_task); return; } bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); } static void bxe_task_fp(struct bxe_fastpath *fp) { struct bxe_softc *sc = fp->sc; uint8_t more_tx = FALSE; uint8_t more_rx = FALSE; BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); /* update the fastpath index */ bxe_update_fp_sb_idx(fp); /* XXX add loop here if ever support multiple tx CoS */ /* fp->txdata[cos] */ if (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); more_tx = bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); } if (bxe_has_rx_work(fp)) { more_rx = bxe_rxeof(sc, fp); } if (more_rx /*|| more_tx*/) { /* still more work to do, bail out if this ISR and process later */ - taskqueue_enqueue_fast(fp->tq, &fp->tq_task); + taskqueue_enqueue(fp->tq, &fp->tq_task); return; } /* * Here we write the fastpath index taken before doing any tx or rx work. * It is very well possible other hw events occurred up to this point and * they were actually processed accordingly above. Since we're going to * write an older fastpath index, an interrupt is coming which we might * not do any work in. */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); } /* * Legacy interrupt entry point. * * Verifies that the controller generated the interrupt and * then calls a separate routine to handle the various * interrupt causes: link, RX, and TX. */ static void bxe_intr_legacy(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; struct bxe_fastpath *fp; uint16_t status, mask; int i; BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); /* * 0 for ustorm, 1 for cstorm * the bits returned from ack_int() are 0-15 * bit 0 = attention status block * bit 1 = fast path status block * a mask of 0x2 or more = tx/rx event * a mask of 1 = slow path event */ status = bxe_ack_int(sc); /* the interrupt is not for us */ if (__predict_false(status == 0)) { BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); return; } BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); if (status & mask) { /* acknowledge and disable further fastpath interrupts */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); bxe_task_fp(fp); status &= ~mask; } } if (__predict_false(status & 0x1)) { /* acknowledge and disable further slowpath interrupts */ bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); /* schedule slowpath handler */ - taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); + taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); status &= ~0x1; } if (__predict_false(status)) { BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); } } /* slowpath interrupt entry point */ static void bxe_intr_sp(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); /* acknowledge and disable further slowpath interrupts */ bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); /* schedule slowpath handler */ - taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); + taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); } /* fastpath interrupt entry point */ static void bxe_intr_fp(void *xfp) { struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; struct bxe_softc *sc = fp->sc; BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); BLOGD(sc, DBG_INTR, "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); /* acknowledge and disable further fastpath interrupts */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); bxe_task_fp(fp); } /* Release all interrupts allocated by the driver. */ static void bxe_interrupt_free(struct bxe_softc *sc) { int i; switch (sc->interrupt_mode) { case INTR_MODE_INTX: BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); if (sc->intr[0].resource != NULL) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[0].rid, sc->intr[0].resource); } break; case INTR_MODE_MSI: for (i = 0; i < sc->intr_count; i++) { BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); if (sc->intr[i].resource && sc->intr[i].rid) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[i].rid, sc->intr[i].resource); } } pci_release_msi(sc->dev); break; case INTR_MODE_MSIX: for (i = 0; i < sc->intr_count; i++) { BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); if (sc->intr[i].resource && sc->intr[i].rid) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[i].rid, sc->intr[i].resource); } } pci_release_msi(sc->dev); break; default: /* nothing to do as initial allocation failed */ break; } } /* * This function determines and allocates the appropriate * interrupt based on system capabilites and user request. * * The user may force a particular interrupt mode, specify * the number of receive queues, specify the method for * distribuitng received frames to receive queues, or use * the default settings which will automatically select the * best supported combination. In addition, the OS may or * may not support certain combinations of these settings. * This routine attempts to reconcile the settings requested * by the user with the capabilites available from the system * to select the optimal combination of features. * * Returns: * 0 = Success, !0 = Failure. */ static int bxe_interrupt_alloc(struct bxe_softc *sc) { int msix_count = 0; int msi_count = 0; int num_requested = 0; int num_allocated = 0; int rid, i, j; int rc; /* get the number of available MSI/MSI-X interrupts from the OS */ if (sc->interrupt_mode > 0) { if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { msix_count = pci_msix_count(sc->dev); } if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { msi_count = pci_msi_count(sc->dev); } BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", msi_count, msix_count); } do { /* try allocating MSI-X interrupt resources (at least 2) */ if (sc->interrupt_mode != INTR_MODE_MSIX) { break; } if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || (msix_count < 2)) { sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ break; } /* ask for the necessary number of MSI-X vectors */ num_requested = min((sc->num_queues + 1), msix_count); BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); num_allocated = num_requested; if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ break; } if (num_allocated < 2) { /* possible? */ BLOGE(sc, "MSI-X allocation less than 2!\n"); sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ pci_release_msi(sc->dev); break; } BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", num_requested, num_allocated); /* best effort so use the number of vectors allocated to us */ sc->intr_count = num_allocated; sc->num_queues = num_allocated - 1; rid = 1; /* initial resource identifier */ /* allocate the MSI-X vectors */ for (i = 0; i < num_allocated; i++) { sc->intr[i].rid = (rid + i); if ((sc->intr[i].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[i].rid, RF_ACTIVE)) == NULL) { BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", i, (rid + i)); for (j = (i - 1); j >= 0; j--) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[j].rid, sc->intr[j].resource); } sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ pci_release_msi(sc->dev); break; } BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); } } while (0); do { /* try allocating MSI vector resources (at least 2) */ if (sc->interrupt_mode != INTR_MODE_MSI) { break; } if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || (msi_count < 1)) { sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ break; } /* ask for a single MSI vector */ num_requested = 1; BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); num_allocated = num_requested; if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { BLOGE(sc, "MSI alloc failed (%d)!\n", rc); sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ break; } if (num_allocated != 1) { /* possible? */ BLOGE(sc, "MSI allocation is not 1!\n"); sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ pci_release_msi(sc->dev); break; } BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", num_requested, num_allocated); /* best effort so use the number of vectors allocated to us */ sc->intr_count = num_allocated; sc->num_queues = num_allocated; rid = 1; /* initial resource identifier */ sc->intr[0].rid = rid; if ((sc->intr[0].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[0].rid, RF_ACTIVE)) == NULL) { BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ pci_release_msi(sc->dev); break; } BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); } while (0); do { /* try allocating INTx vector resources */ if (sc->interrupt_mode != INTR_MODE_INTX) { break; } BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); /* only one vector for INTx */ sc->intr_count = 1; sc->num_queues = 1; rid = 0; /* initial resource identifier */ sc->intr[0].rid = rid; if ((sc->intr[0].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[0].rid, (RF_ACTIVE | RF_SHAREABLE))) == NULL) { BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = -1; /* Failed! */ break; } BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); } while (0); if (sc->interrupt_mode == -1) { BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); rc = 1; } else { BLOGD(sc, DBG_LOAD, "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", sc->interrupt_mode, sc->num_queues); rc = 0; } return (rc); } static void bxe_interrupt_detach(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; /* release interrupt resources */ for (i = 0; i < sc->intr_count; i++) { if (sc->intr[i].resource && sc->intr[i].tag) { BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); } } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tq) { taskqueue_drain(fp->tq, &fp->tq_task); taskqueue_free(fp->tq); fp->tq = NULL; } } if (sc->sp_tq) { taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); taskqueue_free(sc->sp_tq); sc->sp_tq = NULL; } } /* * Enables interrupts and attach to the ISR. * * When using multiple MSI/MSI-X vectors the first vector * is used for slowpath operations while all remaining * vectors are used for fastpath operations. If only a * single MSI/MSI-X vector is used (SINGLE_ISR) then the * ISR must look for both slowpath and fastpath completions. */ static int bxe_interrupt_attach(struct bxe_softc *sc) { struct bxe_fastpath *fp; int rc = 0; int i; snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), "bxe%d_sp_tq", sc->unit); TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT, taskqueue_thread_enqueue, &sc->sp_tq); taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ "%s", sc->sp_tq_name); for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; snprintf(fp->tq_name, sizeof(fp->tq_name), "bxe%d_fp%d_tq", sc->unit, i); TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT, taskqueue_thread_enqueue, &fp->tq); taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ "%s", fp->tq_name); } /* setup interrupt handlers */ if (sc->interrupt_mode == INTR_MODE_MSIX) { BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); /* * Setup the interrupt handler. Note that we pass the driver instance * to the interrupt handler for the slowpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_sp, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); goto bxe_interrupt_attach_exit; } bus_describe_intr(sc->dev, sc->intr[0].resource, sc->intr[0].tag, "sp"); /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ /* initialize the fastpath vectors (note the first was used for sp) */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); /* * Setup the interrupt handler. Note that we pass the * fastpath context to the interrupt handler in this * case. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_fp, fp, &sc->intr[i + 1].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", (i + 1), rc); goto bxe_interrupt_attach_exit; } bus_describe_intr(sc->dev, sc->intr[i + 1].resource, sc->intr[i + 1].tag, "fp%02d", i); /* bind the fastpath instance to a cpu */ if (sc->num_queues > 1) { bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); } fp->state = BXE_FP_STATE_IRQ; } } else if (sc->interrupt_mode == INTR_MODE_MSI) { BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); /* * Setup the interrupt handler. Note that we pass the * driver instance to the interrupt handler which * will handle both the slowpath and fastpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_legacy, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); goto bxe_interrupt_attach_exit; } } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); /* * Setup the interrupt handler. Note that we pass the * driver instance to the interrupt handler which * will handle both the slowpath and fastpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_legacy, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); goto bxe_interrupt_attach_exit; } } bxe_interrupt_attach_exit: return (rc); } static int bxe_init_hw_common_chip(struct bxe_softc *sc); static int bxe_init_hw_common(struct bxe_softc *sc); static int bxe_init_hw_port(struct bxe_softc *sc); static int bxe_init_hw_func(struct bxe_softc *sc); static void bxe_reset_common(struct bxe_softc *sc); static void bxe_reset_port(struct bxe_softc *sc); static void bxe_reset_func(struct bxe_softc *sc); static int bxe_gunzip_init(struct bxe_softc *sc); static void bxe_gunzip_end(struct bxe_softc *sc); static int bxe_init_firmware(struct bxe_softc *sc); static void bxe_release_firmware(struct bxe_softc *sc); static struct ecore_func_sp_drv_ops bxe_func_sp_drv = { .init_hw_cmn_chip = bxe_init_hw_common_chip, .init_hw_cmn = bxe_init_hw_common, .init_hw_port = bxe_init_hw_port, .init_hw_func = bxe_init_hw_func, .reset_hw_cmn = bxe_reset_common, .reset_hw_port = bxe_reset_port, .reset_hw_func = bxe_reset_func, .gunzip_init = bxe_gunzip_init, .gunzip_end = bxe_gunzip_end, .init_fw = bxe_init_firmware, .release_fw = bxe_release_firmware, }; static void bxe_init_func_obj(struct bxe_softc *sc) { sc->dmae_ready = 0; ecore_init_func_obj(sc, &sc->func_obj, BXE_SP(sc, func_rdata), BXE_SP_MAPPING(sc, func_rdata), BXE_SP(sc, func_afex_rdata), BXE_SP_MAPPING(sc, func_afex_rdata), &bxe_func_sp_drv); } static int bxe_init_hw(struct bxe_softc *sc, uint32_t load_code) { struct ecore_func_state_params func_params = { NULL }; int rc; /* prepare the parameters for function state transitions */ bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_HW_INIT; func_params.params.hw_init.load_phase = load_code; /* * Via a plethora of function pointers, we will eventually reach * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). */ rc = ecore_func_state_change(sc, &func_params); return (rc); } static void bxe_fill(struct bxe_softc *sc, uint32_t addr, int fill, uint32_t len) { uint32_t i; if (!(len % 4) && !(addr % 4)) { for (i = 0; i < len; i += 4) { REG_WR(sc, (addr + i), fill); } } else { for (i = 0; i < len; i++) { REG_WR8(sc, (addr + i), fill); } } } /* writes FP SP data to FW - data_size in dwords */ static void bxe_wr_fp_sb_data(struct bxe_softc *sc, int fw_sb_id, uint32_t *sb_data_p, uint32_t data_size) { int index; for (index = 0; index < data_size; index++) { REG_WR(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + (sizeof(uint32_t) * index)), *(sb_data_p + index)); } } static void bxe_zero_fp_sb(struct bxe_softc *sc, int fw_sb_id) { struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; uint32_t *sb_data_p; uint32_t data_size = 0; if (!CHIP_IS_E1x(sc)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); sb_data_e2.common.state = SB_DISABLED; sb_data_e2.common.p_func.vf_valid = FALSE; sb_data_p = (uint32_t *)&sb_data_e2; data_size = (sizeof(struct hc_status_block_data_e2) / sizeof(uint32_t)); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); sb_data_e1x.common.state = SB_DISABLED; sb_data_e1x.common.p_func.vf_valid = FALSE; sb_data_p = (uint32_t *)&sb_data_e1x; data_size = (sizeof(struct hc_status_block_data_e1x) / sizeof(uint32_t)); } bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 0, CSTORM_STATUS_BLOCK_SIZE); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 0, CSTORM_SYNC_BLOCK_SIZE); } static void bxe_wr_sp_sb_data(struct bxe_softc *sc, struct hc_sp_status_block_data *sp_sb_data) { int i; for (i = 0; i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); i++) { REG_WR(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + (i * sizeof(uint32_t))), *((uint32_t *)sp_sb_data + i)); } } static void bxe_zero_sp_sb(struct bxe_softc *sc) { struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); sp_sb_data.state = SB_DISABLED; sp_sb_data.p_func.vf_valid = FALSE; bxe_wr_sp_sb_data(sc, &sp_sb_data); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 0, CSTORM_SP_STATUS_BLOCK_SIZE); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 0, CSTORM_SP_SYNC_BLOCK_SIZE); } static void bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, int igu_seg_id) { hc_sm->igu_sb_id = igu_sb_id; hc_sm->igu_seg_id = igu_seg_id; hc_sm->timer_value = 0xFF; hc_sm->time_to_expire = 0xFFFFFFFF; } static void bxe_map_sb_state_machines(struct hc_index_data *index_data) { /* zero out state machine indices */ /* rx indices */ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; /* tx indices */ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; /* map indices */ /* rx indices */ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); /* tx indices */ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); } static void bxe_init_sb(struct bxe_softc *sc, bus_addr_t busaddr, int vfid, uint8_t vf_valid, int fw_sb_id, int igu_sb_id) { struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; struct hc_status_block_sm *hc_sm_p; uint32_t *sb_data_p; int igu_seg_id; int data_size; if (CHIP_INT_MODE_IS_BC(sc)) { igu_seg_id = HC_SEG_ACCESS_NORM; } else { igu_seg_id = IGU_SEG_ACCESS_NORM; } bxe_zero_fp_sb(sc, fw_sb_id); if (!CHIP_IS_E1x(sc)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); sb_data_e2.common.state = SB_ENABLED; sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); sb_data_e2.common.p_func.vf_id = vfid; sb_data_e2.common.p_func.vf_valid = vf_valid; sb_data_e2.common.p_func.vnic_id = SC_VN(sc); sb_data_e2.common.same_igu_sb_1b = TRUE; sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); hc_sm_p = sb_data_e2.common.state_machine; sb_data_p = (uint32_t *)&sb_data_e2; data_size = (sizeof(struct hc_status_block_data_e2) / sizeof(uint32_t)); bxe_map_sb_state_machines(sb_data_e2.index_data); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); sb_data_e1x.common.state = SB_ENABLED; sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); sb_data_e1x.common.p_func.vf_id = 0xff; sb_data_e1x.common.p_func.vf_valid = FALSE; sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); sb_data_e1x.common.same_igu_sb_1b = TRUE; sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); hc_sm_p = sb_data_e1x.common.state_machine; sb_data_p = (uint32_t *)&sb_data_e1x; data_size = (sizeof(struct hc_status_block_data_e1x) / sizeof(uint32_t)); bxe_map_sb_state_machines(sb_data_e1x.index_data); } bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); /* write indices to HW - PCI guarantees endianity of regpairs */ bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); } static inline uint8_t bxe_fp_qzone_id(struct bxe_fastpath *fp) { if (CHIP_IS_E1x(fp->sc)) { return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); } else { return (fp->cl_id); } } static inline uint32_t bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, struct bxe_fastpath *fp) { uint32_t offset = BAR_USTRORM_INTMEM; if (!CHIP_IS_E1x(sc)) { offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); } else { offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); } return (offset); } static void bxe_init_eth_fp(struct bxe_softc *sc, int idx) { struct bxe_fastpath *fp = &sc->fp[idx]; uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; unsigned long q_type = 0; int cos; fp->sc = sc; fp->index = idx; snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), "bxe%d_fp%d_tx_lock", sc->unit, idx); mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), "bxe%d_fp%d_rx_lock", sc->unit, idx); mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); fp->cl_id = (CHIP_IS_E1x(sc)) ? (SC_L_ID(sc) + idx) : /* want client ID same as IGU SB ID for non-E1 */ fp->igu_sb_id; fp->cl_qzone_id = bxe_fp_qzone_id(fp); /* setup sb indices */ if (!CHIP_IS_E1x(sc)) { fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; } else { fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; } /* init shortcut */ fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; /* * XXX If multiple CoS is ever supported then each fastpath structure * will need to maintain tx producer/consumer/dma/etc values *per* CoS. */ for (cos = 0; cos < sc->max_cos; cos++) { cids[cos] = idx; } fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; /* nothing more for a VF to do */ if (IS_VF(sc)) { return; } bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, fp->fw_sb_id, fp->igu_sb_id); bxe_update_fp_sb_idx(fp); /* Configure Queue State object */ bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); ecore_init_queue_obj(sc, &sc->sp_objs[idx].q_obj, fp->cl_id, cids, sc->max_cos, SC_FUNC(sc), BXE_SP(sc, q_rdata), BXE_SP_MAPPING(sc, q_rdata), q_type); /* configure classification DBs */ ecore_init_mac_obj(sc, &sc->sp_objs[idx].mac_obj, fp->cl_id, idx, SC_FUNC(sc), BXE_SP(sc, mac_rdata), BXE_SP_MAPPING(sc, mac_rdata), ECORE_FILTER_MAC_PENDING, &sc->sp_state, ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool); BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); } static inline void bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t rx_bd_prod, uint16_t rx_cq_prod, uint16_t rx_sge_prod) { struct ustorm_eth_rx_producers rx_prods = { 0 }; uint32_t i; /* update producers */ rx_prods.bd_prod = rx_bd_prod; rx_prods.cqe_prod = rx_cq_prod; rx_prods.sge_prod = rx_sge_prod; /* * Make sure that the BD and SGE data is updated before updating the * producers since FW might read the BD/SGE right after the producer * is updated. * This is only applicable for weak-ordered memory model archs such * as IA-64. The following barrier is also mandatory since FW will * assumes BDs must have buffers. */ wmb(); for (i = 0; i < (sizeof(rx_prods) / 4); i++) { REG_WR(sc, (fp->ustorm_rx_prods_offset + (i * 4)), ((uint32_t *)&rx_prods)[i]); } wmb(); /* keep prod updates ordered */ BLOGD(sc, DBG_RX, "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); } static void bxe_init_rx_rings(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->rx_bd_cons = 0; /* * Activate the BD ring... * Warning, this will generate an interrupt (to the TSTORM) * so this can only be done after the chip is initialized */ bxe_update_rx_prod(sc, fp, fp->rx_bd_prod, fp->rx_cq_prod, fp->rx_sge_prod); if (i != 0) { continue; } if (CHIP_IS_E1(sc)) { REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), U64_LO(fp->rcq_dma.paddr)); REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), U64_HI(fp->rcq_dma.paddr)); } } } static void bxe_init_tx_ring_one(struct bxe_fastpath *fp) { SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1); fp->tx_db.data.zero_fill1 = 0; fp->tx_db.data.prod = 0; fp->tx_pkt_prod = 0; fp->tx_pkt_cons = 0; fp->tx_bd_prod = 0; fp->tx_bd_cons = 0; fp->eth_q_stats.tx_pkts = 0; } static inline void bxe_init_tx_rings(struct bxe_softc *sc) { int i; for (i = 0; i < sc->num_queues; i++) { bxe_init_tx_ring_one(&sc->fp[i]); } } static void bxe_init_def_sb(struct bxe_softc *sc) { struct host_sp_status_block *def_sb = sc->def_sb; bus_addr_t mapping = sc->def_sb_dma.paddr; int igu_sp_sb_index; int igu_seg_id; int port = SC_PORT(sc); int func = SC_FUNC(sc); int reg_offset, reg_offset_en5; uint64_t section; int index, sindex; struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); if (CHIP_INT_MODE_IS_BC(sc)) { igu_sp_sb_index = DEF_SB_IGU_ID; igu_seg_id = HC_SEG_ACCESS_DEF; } else { igu_sp_sb_index = sc->igu_dsb_id; igu_seg_id = IGU_SEG_ACCESS_DEF; } /* attentions */ section = ((uint64_t)mapping + offsetof(struct host_sp_status_block, atten_status_block)); def_sb->atten_status_block.status_block_id = igu_sp_sb_index; sc->attn_state = 0; reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; reg_offset_en5 = (port) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { /* take care of sig[0]..sig[4] */ for (sindex = 0; sindex < 4; sindex++) { sc->attn_group[index].sig[sindex] = REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); } if (!CHIP_IS_E1x(sc)) { /* * enable5 is separate from the rest of the registers, * and the address skip is 4 and not 16 between the * different groups */ sc->attn_group[index].sig[4] = REG_RD(sc, (reg_offset_en5 + (0x4 * index))); } else { sc->attn_group[index].sig[4] = 0; } } if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_offset = (port) ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L; REG_WR(sc, reg_offset, U64_LO(section)); REG_WR(sc, (reg_offset + 4), U64_HI(section)); } else if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); } section = ((uint64_t)mapping + offsetof(struct host_sp_status_block, sp_sb)); bxe_zero_sp_sb(sc); /* PCI guarantees endianity of regpair */ sp_sb_data.state = SB_ENABLED; sp_sb_data.host_sb_addr.lo = U64_LO(section); sp_sb_data.host_sb_addr.hi = U64_HI(section); sp_sb_data.igu_sb_id = igu_sp_sb_index; sp_sb_data.igu_seg_id = igu_seg_id; sp_sb_data.p_func.pf_id = func; sp_sb_data.p_func.vnic_id = SC_VN(sc); sp_sb_data.p_func.vf_id = 0xff; bxe_wr_sp_sb_data(sc, &sp_sb_data); bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); } static void bxe_init_sp_ring(struct bxe_softc *sc) { atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); sc->spq_prod_idx = 0; sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; sc->spq_prod_bd = sc->spq; sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); } static void bxe_init_eq_ring(struct bxe_softc *sc) { union event_ring_elem *elem; int i; for (i = 1; i <= NUM_EQ_PAGES; i++) { elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); } sc->eq_cons = 0; sc->eq_prod = NUM_EQ_DESC; sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; atomic_store_rel_long(&sc->eq_spq_left, (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), NUM_EQ_DESC) - 1)); } static void bxe_init_internal_common(struct bxe_softc *sc) { int i; if (IS_MF_SI(sc)) { /* * In switch independent mode, the TSTORM needs to accept * packets that failed classification, since approximate match * mac addresses aren't written to NIG LLH. */ REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 2); } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */ REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 0); } /* * Zero this manually as its initialization is currently missing * in the initTool. */ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 0); } if (!CHIP_IS_E1x(sc)) { REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); } } static void bxe_init_internal(struct bxe_softc *sc, uint32_t load_code) { switch (load_code) { case FW_MSG_CODE_DRV_LOAD_COMMON: case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: bxe_init_internal_common(sc); /* no break */ case FW_MSG_CODE_DRV_LOAD_PORT: /* nothing to do */ /* no break */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: /* internal memory per function is initialized inside bxe_pf_init */ break; default: BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); break; } } static void storm_memset_func_cfg(struct bxe_softc *sc, struct tstorm_eth_function_common_config *tcfg, uint16_t abs_fid) { uint32_t addr; size_t size; addr = (BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); size = sizeof(struct tstorm_eth_function_common_config); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); } static void bxe_func_init(struct bxe_softc *sc, struct bxe_func_init_params *p) { struct tstorm_eth_function_common_config tcfg = { 0 }; if (CHIP_IS_E1x(sc)) { storm_memset_func_cfg(sc, &tcfg, p->func_id); } /* Enable the function in the FW */ storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); storm_memset_func_en(sc, p->func_id, 1); /* spq */ if (p->func_flgs & FUNC_FLG_SPQ) { storm_memset_spq_addr(sc, p->spq_map, p->func_id); REG_WR(sc, (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), p->spq_prod); } } /* * Calculates the sum of vn_min_rates. * It's needed for further normalizing of the min_rates. * Returns: * sum of vn_min_rates. * or * 0 - if all the min_rates are 0. * In the later case fainess algorithm should be deactivated. * If all min rates are not zero then those that are zeroes will be set to 1. */ static void bxe_calc_vn_min(struct bxe_softc *sc, struct cmng_init_input *input) { uint32_t vn_cfg; uint32_t vn_min_rate; int all_zero = 1; int vn; for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { vn_cfg = sc->devinfo.mf_info.mf_config[vn]; vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT) * 100); if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { /* skip hidden VNs */ vn_min_rate = 0; } else if (!vn_min_rate) { /* If min rate is zero - set it to 100 */ vn_min_rate = DEF_MIN_RATE; } else { all_zero = 0; } input->vnic_min_rate[vn] = vn_min_rate; } /* if ETS or all min rates are zeros - disable fairness */ if (BXE_IS_ETS_ENABLED(sc)) { input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); } else if (all_zero) { input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; BLOGD(sc, DBG_LOAD, "Fariness disabled (all MIN values are zeroes)\n"); } else { input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } } static inline uint16_t bxe_extract_max_cfg(struct bxe_softc *sc, uint32_t mf_cfg) { uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); if (!max_cfg) { BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); max_cfg = 100; } return (max_cfg); } static void bxe_calc_vn_max(struct bxe_softc *sc, int vn, struct cmng_init_input *input) { uint16_t vn_max_rate; uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; uint32_t max_cfg; if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { vn_max_rate = 0; } else { max_cfg = bxe_extract_max_cfg(sc, vn_cfg); if (IS_MF_SI(sc)) { /* max_cfg in percents of linkspeed */ vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); } else { /* SD modes */ /* max_cfg is absolute in 100Mb units */ vn_max_rate = (max_cfg * 100); } } BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); input->vnic_max_rate[vn] = vn_max_rate; } static void bxe_cmng_fns_init(struct bxe_softc *sc, uint8_t read_cfg, uint8_t cmng_type) { struct cmng_init_input input; int vn; memset(&input, 0, sizeof(struct cmng_init_input)); input.port_rate = sc->link_vars.line_speed; if (cmng_type == CMNG_FNS_MINMAX) { /* read mf conf from shmem */ if (read_cfg) { bxe_read_mf_cfg(sc); } /* get VN min rate and enable fairness if not 0 */ bxe_calc_vn_min(sc, &input); /* get VN max rate */ if (sc->port.pmf) { for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { bxe_calc_vn_max(sc, vn, &input); } } /* always enable rate shaping and fairness */ input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; ecore_init_cmng(&input, &sc->cmng); return; } /* rate shaping and fairness are disabled */ BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); } static int bxe_get_cmng_fns_mode(struct bxe_softc *sc) { if (CHIP_REV_IS_SLOW(sc)) { return (CMNG_FNS_NONE); } if (IS_MF(sc)) { return (CMNG_FNS_MINMAX); } return (CMNG_FNS_NONE); } static void storm_memset_cmng(struct bxe_softc *sc, struct cmng_init *cmng, uint8_t port) { int vn; int func; uint32_t addr; size_t size; addr = (BAR_XSTRORM_INTMEM + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); size = sizeof(struct cmng_struct_per_port); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { func = func_by_vn(sc, vn); addr = (BAR_XSTRORM_INTMEM + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); size = sizeof(struct rate_shaping_vars_per_vn); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); addr = (BAR_XSTRORM_INTMEM + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); size = sizeof(struct fairness_vars_per_vn); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); } } static void bxe_pf_init(struct bxe_softc *sc) { struct bxe_func_init_params func_init = { 0 }; struct event_ring_data eq_data = { { 0 } }; uint16_t flags; if (!CHIP_IS_E1x(sc)) { /* reset IGU PF statistics: MSIX + ATTN */ /* PF */ REG_WR(sc, (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + (BXE_IGU_STAS_MSG_VF_CNT * 4) + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 0); /* ATTN */ REG_WR(sc, (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + (BXE_IGU_STAS_MSG_VF_CNT * 4) + (BXE_IGU_STAS_MSG_PF_CNT * 4) + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 0); } /* function setup flags */ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); /* * This flag is relevant for E1x only. * E2 doesn't have a TPA configuration in a function level. */ flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; func_init.func_flgs = flags; func_init.pf_id = SC_FUNC(sc); func_init.func_id = SC_FUNC(sc); func_init.spq_map = sc->spq_dma.paddr; func_init.spq_prod = sc->spq_prod_idx; bxe_func_init(sc, &func_init); memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); /* * Congestion management values depend on the link rate. * There is no active link so initial link rate is set to 10Gbps. * When the link comes up the congestion management values are * re-calculated according to the actual link rate. */ sc->link_vars.line_speed = SPEED_10000; bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); /* Only the PMF sets the HW */ if (sc->port.pmf) { storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } /* init Event Queue - PCI bus guarantees correct endainity */ eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); eq_data.producer = sc->eq_prod; eq_data.index_id = HC_SP_INDEX_EQ_CONS; eq_data.sb_id = DEF_SB_ID; storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); } static void bxe_hc_int_enable(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; uint32_t val = REG_RD(sc, addr); uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && (sc->intr_count == 1)) ? TRUE : FALSE; uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; if (msix) { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0); val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (single_msix) { val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; } } else if (msi) { val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else { val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (!CHIP_IS_E1(sc)) { BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); REG_WR(sc, addr, val); val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; } } if (CHIP_IS_E1(sc)) { REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); } BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); REG_WR(sc, addr, val); /* ensure that HC_CONFIG is written before leading/trailing edge config */ mb(); if (!CHIP_IS_E1(sc)) { /* init leading/trailing edge */ if (IS_MF(sc)) { val = (0xee0f | (1 << (SC_VN(sc) + 4))); if (sc->port.pmf) { /* enable nig and gpio3 attention */ val |= 0x1100; } } else { val = 0xffff; } REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); } /* make sure that interrupts are indeed enabled from here on */ mb(); } static void bxe_igu_int_enable(struct bxe_softc *sc) { uint32_t val; uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && (sc->intr_count == 1)) ? TRUE : FALSE; uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); if (msix) { val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); if (single_msix) { val |= IGU_PF_CONF_SINGLE_ISR_EN; } } else if (msi) { val &= ~IGU_PF_CONF_INT_LINE_EN; val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } else { val &= ~IGU_PF_CONF_MSI_MSIX_EN; val |= (IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } /* clean previous status - need to configure igu prior to ack*/ if ((!msix) || single_msix) { REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); bxe_ack_int(sc); } val |= IGU_PF_CONF_FUNC_EN; BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); mb(); /* init leading/trailing edge */ if (IS_MF(sc)) { val = (0xee0f | (1 << (SC_VN(sc) + 4))); if (sc->port.pmf) { /* enable nig and gpio3 attention */ val |= 0x1100; } } else { val = 0xffff; } REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); /* make sure that interrupts are indeed enabled from here on */ mb(); } static void bxe_int_enable(struct bxe_softc *sc) { if (sc->devinfo.int_block == INT_BLOCK_HC) { bxe_hc_int_enable(sc); } else { bxe_igu_int_enable(sc); } } static void bxe_hc_int_disable(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; uint32_t val = REG_RD(sc, addr); /* * In E1 we must use only PCI configuration space to disable MSI/MSIX * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC * block */ if (CHIP_IS_E1(sc)) { /* * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register * to prevent from HC sending interrupts after we exit the function */ REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); /* flush all outstanding writes */ mb(); REG_WR(sc, addr, val); if (REG_RD(sc, addr) != val) { BLOGE(sc, "proper val not read from HC IGU!\n"); } } static void bxe_igu_int_disable(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); val &= ~(IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN); BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); /* flush all outstanding writes */ mb(); REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { BLOGE(sc, "proper val not read from IGU!\n"); } } static void bxe_int_disable(struct bxe_softc *sc) { if (sc->devinfo.int_block == INT_BLOCK_HC) { bxe_hc_int_disable(sc); } else { bxe_igu_int_disable(sc); } } static void bxe_nic_init(struct bxe_softc *sc, int load_code) { int i; for (i = 0; i < sc->num_queues; i++) { bxe_init_eth_fp(sc, i); } rmb(); /* ensure status block indices were read */ bxe_init_rx_rings(sc); bxe_init_tx_rings(sc); if (IS_VF(sc)) { return; } /* initialize MOD_ABS interrupts */ elink_init_mod_abs_int(sc, &sc->link_vars, sc->devinfo.chip_id, sc->devinfo.shmem_base, sc->devinfo.shmem2_base, SC_PORT(sc)); bxe_init_def_sb(sc); bxe_update_dsb_idx(sc); bxe_init_sp_ring(sc); bxe_init_eq_ring(sc); bxe_init_internal(sc, load_code); bxe_pf_init(sc); bxe_stats_init(sc); /* flush all before enabling interrupts */ mb(); bxe_int_enable(sc); /* check for SPIO5 */ bxe_attn_int_deasserted0(sc, REG_RD(sc, (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + SC_PORT(sc)*4)) & AEU_INPUTS_ATTN_BITS_SPIO5); } static inline void bxe_init_objs(struct bxe_softc *sc) { /* mcast rules must be added to tx if tx switching is enabled */ ecore_obj_type o_type = (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : ECORE_OBJ_TYPE_RX; /* RX_MODE controlling object */ ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); /* multicast configuration controlling object */ ecore_init_mcast_obj(sc, &sc->mcast_obj, sc->fp[0].cl_id, sc->fp[0].index, SC_FUNC(sc), SC_FUNC(sc), BXE_SP(sc, mcast_rdata), BXE_SP_MAPPING(sc, mcast_rdata), ECORE_FILTER_MCAST_PENDING, &sc->sp_state, o_type); /* Setup CAM credit pools */ ecore_init_mac_credit_pool(sc, &sc->macs_pool, SC_FUNC(sc), CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : VNICS_PER_PATH(sc)); ecore_init_vlan_credit_pool(sc, &sc->vlans_pool, SC_ABS_FUNC(sc) >> 1, CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : VNICS_PER_PATH(sc)); /* RSS configuration object */ ecore_init_rss_config_obj(sc, &sc->rss_conf_obj, sc->fp[0].cl_id, sc->fp[0].index, SC_FUNC(sc), SC_FUNC(sc), BXE_SP(sc, rss_rdata), BXE_SP_MAPPING(sc, rss_rdata), ECORE_FILTER_RSS_CONF_PENDING, &sc->sp_state, ECORE_OBJ_TYPE_RX); } /* * Initialize the function. This must be called before sending CLIENT_SETUP * for the first client. */ static inline int bxe_func_start(struct bxe_softc *sc) { struct ecore_func_state_params func_params = { NULL }; struct ecore_func_start_params *start_params = &func_params.params.start; /* Prepare parameters for function state transitions */ bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_START; /* Function parameters */ start_params->mf_mode = sc->devinfo.mf_info.mf_mode; start_params->sd_vlan_tag = OVLAN(sc); if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { start_params->network_cos_mode = STATIC_COS; } else { /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; } //start_params->gre_tunnel_mode = 0; //start_params->gre_tunnel_rss = 0; return (ecore_func_state_change(sc, &func_params)); } static int bxe_set_power_state(struct bxe_softc *sc, uint8_t state) { uint16_t pmcsr; /* If there is no power capability, silently succeed */ if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { BLOGW(sc, "No power capability\n"); return (0); } pmcsr = pci_read_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 2); switch (state) { case PCI_PM_D0: pci_write_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); if (pmcsr & PCIM_PSTAT_DMASK) { /* delay required during transition out of D3hot */ DELAY(20000); } break; case PCI_PM_D3hot: /* XXX if there are other clients above don't shut down the power */ /* don't shut down the power for emulation and FPGA */ if (CHIP_REV_IS_SLOW(sc)) { return (0); } pmcsr &= ~PCIM_PSTAT_DMASK; pmcsr |= PCIM_PSTAT_D3; if (sc->wol) { pmcsr |= PCIM_PSTAT_PMEENABLE; } pci_write_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), pmcsr, 4); /* * No more memory access after this point until device is brought back * to D0 state. */ break; default: BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n", state, pmcsr); return (-1); } return (0); } /* return true if succeeded to acquire the lock */ static uint8_t bxe_trylock_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGD(sc, DBG_LOAD, "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return (FALSE); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); } /* try to acquire the lock */ REG_WR(sc, hw_lock_control_reg + 4, resource_bit); lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { return (TRUE); } BLOGE(sc, "Failed to get a resource lock 0x%x func %d " "lock_status 0x%x resource_bit 0x%x\n", resource, func, lock_status, resource_bit); return (FALSE); } /* * Get the recovery leader resource id according to the engine this function * belongs to. Currently only only 2 engines is supported. */ static int bxe_get_leader_lock_resource(struct bxe_softc *sc) { if (SC_PATH(sc)) { return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); } else { return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); } } /* try to acquire a leader lock for current engine */ static uint8_t bxe_trylock_leader_lock(struct bxe_softc *sc) { return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); } static int bxe_release_leader_lock(struct bxe_softc *sc) { return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); } /* close gates #2, #3 and #4 */ static void bxe_set_234_gates(struct bxe_softc *sc, uint8_t close) { uint32_t val; /* gates #2 and #4a are closed/opened for "not E1" only */ if (!CHIP_IS_E1(sc)) { /* #4 */ REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); /* #2 */ REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); } /* #3 */ if (CHIP_IS_E1x(sc)) { /* prevent interrupts from HC on both ports */ val = REG_RD(sc, HC_REG_CONFIG_1); REG_WR(sc, HC_REG_CONFIG_1, (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); val = REG_RD(sc, HC_REG_CONFIG_0); REG_WR(sc, HC_REG_CONFIG_0, (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); } else { /* Prevent incomming interrupts in IGU */ val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, (!close) ? (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); } BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); wmb(); } /* poll for pending writes bit, it should get cleared in no more than 1s */ static int bxe_er_poll_igu_vq(struct bxe_softc *sc) { uint32_t cnt = 1000; uint32_t pend_bits = 0; do { pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); if (pend_bits == 0) { break; } DELAY(1000); } while (--cnt > 0); if (cnt == 0) { BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); return (-1); } return (0); } #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ static void bxe_clp_reset_prep(struct bxe_softc *sc, uint32_t *magic_val) { /* Do some magic... */ uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); *magic_val = val & SHARED_MF_CLP_MAGIC; MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); } /* restore the value of the 'magic' bit */ static void bxe_clp_reset_done(struct bxe_softc *sc, uint32_t magic_val) { /* Restore the 'magic' bit value... */ uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); MFCFG_WR(sc, shared_mf_config.clp_mb, (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); } /* prepare for MCP reset, takes care of CLP configurations */ static void bxe_reset_mcp_prep(struct bxe_softc *sc, uint32_t *magic_val) { uint32_t shmem; uint32_t validity_offset; /* set `magic' bit in order to save MF config */ if (!CHIP_IS_E1(sc)) { bxe_clp_reset_prep(sc, magic_val); } /* get shmem offset */ shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); validity_offset = offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); /* Clear validity map flags */ if (shmem > 0) { REG_WR(sc, shmem + validity_offset, 0); } } #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ #define MCP_ONE_TIMEOUT 100 /* 100 ms */ static void bxe_mcp_wait_one(struct bxe_softc *sc) { /* special handling for emulation and FPGA (10 times longer) */ if (CHIP_REV_IS_SLOW(sc)) { DELAY((MCP_ONE_TIMEOUT*10) * 1000); } else { DELAY((MCP_ONE_TIMEOUT) * 1000); } } /* initialize shmem_base and waits for validity signature to appear */ static int bxe_init_shmem(struct bxe_softc *sc) { int cnt = 0; uint32_t val = 0; do { sc->devinfo.shmem_base = sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); if (sc->devinfo.shmem_base) { val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); if (val & SHR_MEM_VALIDITY_MB) return (0); } bxe_mcp_wait_one(sc); } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); BLOGE(sc, "BAD MCP validity signature\n"); return (-1); } static int bxe_reset_mcp_comp(struct bxe_softc *sc, uint32_t magic_val) { int rc = bxe_init_shmem(sc); /* Restore the `magic' bit value */ if (!CHIP_IS_E1(sc)) { bxe_clp_reset_done(sc, magic_val); } return (rc); } static void bxe_pxp_prep(struct bxe_softc *sc) { if (!CHIP_IS_E1(sc)) { REG_WR(sc, PXP2_REG_RD_START_INIT, 0); REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); wmb(); } } /* * Reset the whole chip except for: * - PCIE core * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) * - IGU * - MISC (including AEU) * - GRC * - RBCN, RBCP */ static void bxe_process_kill_chip_reset(struct bxe_softc *sc, uint8_t global) { uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; uint32_t global_bits2, stay_reset2; /* * Bits that have to be set in reset_mask2 if we want to reset 'global' * (per chip) blocks. */ global_bits2 = MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; /* * Don't reset the following blocks. * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be * reset, as in 4 port device they might still be owned * by the MCP (there is only one leader per path). */ not_reset_mask1 = MISC_REGISTERS_RESET_REG_1_RST_HC | MISC_REGISTERS_RESET_REG_1_RST_PXPV | MISC_REGISTERS_RESET_REG_1_RST_PXP; not_reset_mask2 = MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | MISC_REGISTERS_RESET_REG_2_RST_RBCN | MISC_REGISTERS_RESET_REG_2_RST_GRC | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | MISC_REGISTERS_RESET_REG_2_RST_ATC | MISC_REGISTERS_RESET_REG_2_PGLC | MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | MISC_REGISTERS_RESET_REG_2_UMAC0 | MISC_REGISTERS_RESET_REG_2_UMAC1; /* * Keep the following blocks in reset: * - all xxMACs are handled by the elink code. */ stay_reset2 = MISC_REGISTERS_RESET_REG_2_XMAC | MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; /* Full reset masks according to the chip */ reset_mask1 = 0xffffffff; if (CHIP_IS_E1(sc)) reset_mask2 = 0xffff; else if (CHIP_IS_E1H(sc)) reset_mask2 = 0x1ffff; else if (CHIP_IS_E2(sc)) reset_mask2 = 0xfffff; else /* CHIP_IS_E3 */ reset_mask2 = 0x3ffffff; /* Don't reset global blocks unless we need to */ if (!global) reset_mask2 &= ~global_bits2; /* * In case of attention in the QM, we need to reset PXP * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM * because otherwise QM reset would release 'close the gates' shortly * before resetting the PXP, then the PSWRQ would send a write * request to PGLUE. Then when PXP is reset, PGLUE would try to * read the payload data from PSWWR, but PSWWR would not * respond. The write queue in PGLUE would stuck, dmae commands * would not return. Therefore it's important to reset the second * reset register (containing the * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM * bit). */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, reset_mask2 & (~not_reset_mask2)); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, reset_mask1 & (~not_reset_mask1)); mb(); wmb(); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2 & (~stay_reset2)); mb(); wmb(); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); wmb(); } static int bxe_process_kill(struct bxe_softc *sc, uint8_t global) { int cnt = 1000; uint32_t val = 0; uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; uint32_t tags_63_32 = 0; /* Empty the Tetris buffer, wait for 1s */ do { sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); if (CHIP_IS_E3(sc)) { tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); } if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && ((port_is_idle_0 & 0x1) == 0x1) && ((port_is_idle_1 & 0x1) == 0x1) && (pgl_exp_rom2 == 0xffffffff) && (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) break; DELAY(1000); } while (cnt-- > 0); if (cnt <= 0) { BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " "are still outstanding read requests after 1s! " "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2); return (-1); } mb(); /* Close gates #2, #3 and #4 */ bxe_set_234_gates(sc, TRUE); /* Poll for IGU VQs for 57712 and newer chips */ if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { return (-1); } /* XXX indicate that "process kill" is in progress to MCP */ /* clear "unprepared" bit */ REG_WR(sc, MISC_REG_UNPREPARED, 0); mb(); /* Make sure all is written to the chip before the reset */ wmb(); /* * Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ DELAY(1000); /* Prepare to chip reset: */ /* MCP */ if (global) { bxe_reset_mcp_prep(sc, &val); } /* PXP */ bxe_pxp_prep(sc); mb(); /* reset the chip */ bxe_process_kill_chip_reset(sc, global); mb(); /* clear errors in PGB */ if (!CHIP_IS_E1(sc)) REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); /* Recover after reset: */ /* MCP */ if (global && bxe_reset_mcp_comp(sc, val)) { return (-1); } /* XXX add resetting the NO_MCP mode DB here */ /* Open the gates #2, #3 and #4 */ bxe_set_234_gates(sc, FALSE); /* XXX * IGU/AEU preparation bring back the AEU/IGU to a reset state * re-enable attentions */ return (0); } static int bxe_leader_reset(struct bxe_softc *sc) { int rc = 0; uint8_t global = bxe_reset_is_global(sc); uint32_t load_code; /* * If not going to reset MCP, load "fake" driver to reset HW while * driver is owner of the HW. */ if (!global && !BXE_NOMCP(sc)) { load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, DRV_MSG_CODE_LOAD_REQ_WITH_LFA); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; goto exit_leader_reset; } if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { BLOGE(sc, "MCP unexpected response, aborting\n"); rc = -1; goto exit_leader_reset2; } load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; goto exit_leader_reset2; } } /* try to recover after the failure */ if (bxe_process_kill(sc, global)) { BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); rc = -1; goto exit_leader_reset2; } /* * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver * state. */ bxe_set_reset_done(sc); if (global) { bxe_clear_reset_global(sc); } exit_leader_reset2: /* unload "fake driver" if it was loaded */ if (!global && !BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); } exit_leader_reset: sc->is_leader = 0; bxe_release_leader_lock(sc); mb(); return (rc); } /* * prepare INIT transition, parameters configured: * - HC configuration * - Queue's CDU context */ static void bxe_pf_q_prep_init(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_queue_init_params *init_params) { uint8_t cos; int cxt_index, cxt_offset; bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); /* HC rate */ init_params->rx.hc_rate = sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; init_params->tx.hc_rate = sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; /* FW SB ID */ init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; /* CQ index among the SB indices */ init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; /* set maximum number of COSs supported by this queue */ init_params->max_cos = sc->max_cos; BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", fp->index, init_params->max_cos); /* set the context pointers queue object */ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { /* XXX change index/cid here if ever support multiple tx CoS */ /* fp->txdata[cos]->cid */ cxt_index = fp->index / ILT_PAGE_CIDS; cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; } } /* set flags that are common for the Tx-only and not normal connections */ static unsigned long bxe_get_common_flags(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t zero_stats) { unsigned long flags = 0; /* PF driver will always initialize the Queue to an ACTIVE state */ bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); /* * tx only connections collect statistics (on the same index as the * parent connection). The statistics are zeroed when the parent * connection is initialized. */ bxe_set_bit(ECORE_Q_FLG_STATS, &flags); if (zero_stats) { bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); } /* * tx only connections can support tx-switching, though their * CoS-ness doesn't survive the loopback */ if (sc->flags & BXE_TX_SWITCHING) { bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); } bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); return (flags); } static unsigned long bxe_get_q_flags(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t leading) { unsigned long flags = 0; if (IS_MF_SD(sc)) { bxe_set_bit(ECORE_Q_FLG_OV, &flags); } if (if_getcapenable(sc->ifp) & IFCAP_LRO) { bxe_set_bit(ECORE_Q_FLG_TPA, &flags); bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); } if (leading) { bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); } bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); /* merge with common flags */ return (flags | bxe_get_common_flags(sc, fp, TRUE)); } static void bxe_pf_q_prep_general(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_general_setup_params *gen_init, uint8_t cos) { gen_init->stat_id = bxe_stats_id(fp); gen_init->spcl_id = fp->cl_id; gen_init->mtu = sc->mtu; gen_init->cos = cos; } static void bxe_pf_rx_q_prep(struct bxe_softc *sc, struct bxe_fastpath *fp, struct rxq_pause_params *pause, struct ecore_rxq_setup_params *rxq_init) { uint8_t max_sge = 0; uint16_t sge_sz = 0; uint16_t tpa_agg_size = 0; pause->sge_th_lo = SGE_TH_LO(sc); pause->sge_th_hi = SGE_TH_HI(sc); /* validate SGE ring has enough to cross high threshold */ if (sc->dropless_fc && (pause->sge_th_hi + FW_PREFETCH_CNT) > (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { BLOGW(sc, "sge ring threshold limit\n"); } /* minimum max_aggregation_size is 2*MTU (two full buffers) */ tpa_agg_size = (2 * sc->mtu); if (tpa_agg_size < sc->max_aggregation_size) { tpa_agg_size = sc->max_aggregation_size; } max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; max_sge = ((max_sge + PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); /* pause - not for e1 */ if (!CHIP_IS_E1(sc)) { pause->bd_th_lo = BD_TH_LO(sc); pause->bd_th_hi = BD_TH_HI(sc); pause->rcq_th_lo = RCQ_TH_LO(sc); pause->rcq_th_hi = RCQ_TH_HI(sc); /* validate rings have enough entries to cross high thresholds */ if (sc->dropless_fc && pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) { BLOGW(sc, "rx bd ring threshold limit\n"); } if (sc->dropless_fc && pause->rcq_th_hi + FW_PREFETCH_CNT > RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { BLOGW(sc, "rcq ring threshold limit\n"); } pause->pri_map = 1; } /* rxq setup */ rxq_init->dscr_map = fp->rx_dma.paddr; rxq_init->sge_map = fp->rx_sge_dma.paddr; rxq_init->rcq_map = fp->rcq_dma.paddr; rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); /* * This should be a maximum number of data bytes that may be * placed on the BD (not including paddings). */ rxq_init->buf_sz = (fp->rx_buf_size - IP_HEADER_ALIGNMENT_PADDING); rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; rxq_init->sge_buf_sz = sge_sz; rxq_init->max_sges_pkt = max_sge; rxq_init->rss_engine_id = SC_FUNC(sc); rxq_init->mcast_engine_id = SC_FUNC(sc); /* * Maximum number or simultaneous TPA aggregation for this Queue. * For PF Clients it should be the maximum available number. * VF driver(s) may want to define it to a smaller value. */ rxq_init->max_tpa_queues = MAX_AGG_QS(sc); rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; rxq_init->fw_sb_id = fp->fw_sb_id; rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; /* * configure silent vlan removal * if multi function mode is afex, then mask default vlan */ if (IS_MF_AFEX(sc)) { rxq_init->silent_removal_value = sc->devinfo.mf_info.afex_def_vlan_tag; rxq_init->silent_removal_mask = EVL_VLID_MASK; } } static void bxe_pf_tx_q_prep(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_txq_setup_params *txq_init, uint8_t cos) { /* * XXX If multiple CoS is ever supported then each fastpath structure * will need to maintain tx producer/consumer/dma/etc values *per* CoS. * fp->txdata[cos]->tx_dma.paddr; */ txq_init->dscr_map = fp->tx_dma.paddr; txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; txq_init->fw_sb_id = fp->fw_sb_id; /* * set the TSS leading client id for TX classfication to the * leading RSS client id */ txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); } /* * This function performs 2 steps in a queue state machine: * 1) RESET->INIT * 2) INIT->SETUP */ static int bxe_setup_queue(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t leading) { struct ecore_queue_state_params q_params = { NULL }; struct ecore_queue_setup_params *setup_params = &q_params.params.setup; int rc; BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; /* we want to wait for completion in this context */ bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); /* prepare the INIT parameters */ bxe_pf_q_prep_init(sc, fp, &q_params.params.init); /* Set the command */ q_params.cmd = ECORE_Q_CMD_INIT; /* Change the state to INIT */ rc = ecore_queue_state_change(sc, &q_params); if (rc) { BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc); return (rc); } BLOGD(sc, DBG_LOAD, "init complete\n"); /* now move the Queue to the SETUP state */ memset(setup_params, 0, sizeof(*setup_params)); /* set Queue flags */ setup_params->flags = bxe_get_q_flags(sc, fp, leading); /* set general SETUP parameters */ bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, FIRST_TX_COS_INDEX); bxe_pf_rx_q_prep(sc, fp, &setup_params->pause_params, &setup_params->rxq_params); bxe_pf_tx_q_prep(sc, fp, &setup_params->txq_params, FIRST_TX_COS_INDEX); /* Set the command */ q_params.cmd = ECORE_Q_CMD_SETUP; /* change the state to SETUP */ rc = ecore_queue_state_change(sc, &q_params); if (rc) { BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc); return (rc); } return (rc); } static int bxe_setup_leading(struct bxe_softc *sc) { return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); } static int bxe_config_rss_pf(struct bxe_softc *sc, struct ecore_rss_config_obj *rss_obj, uint8_t config_hash) { struct ecore_config_rss_params params = { NULL }; int i; /* * Although RSS is meaningless when there is a single HW queue we * still need it enabled in order to have HW Rx hash generated. */ params.rss_obj = rss_obj; bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); /* RSS configuration */ bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); if (rss_obj->udp_rss_v4) { bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); } if (rss_obj->udp_rss_v6) { bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); } /* Hash bits */ params.rss_result_mask = MULTI_MASK; memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); if (config_hash) { /* RSS keys */ for (i = 0; i < sizeof(params.rss_key) / 4; i++) { params.rss_key[i] = arc4random(); } bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); } return (ecore_config_rss(sc, ¶ms)); } static int bxe_config_rss_eth(struct bxe_softc *sc, uint8_t config_hash) { return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); } static int bxe_init_rss_pf(struct bxe_softc *sc) { uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); int i; /* * Prepare the initial contents of the indirection table if * RSS is enabled */ for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { sc->rss_conf_obj.ind_table[i] = (sc->fp->cl_id + (i % num_eth_queues)); } if (sc->udp_rss) { sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; } /* * For 57710 and 57711 SEARCHER configuration (rss_keys) is * per-port, so if explicit configuration is needed, do it only * for a PMF. * * For 57712 and newer it's a per-function configuration. */ return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); } static int bxe_set_mac_one(struct bxe_softc *sc, uint8_t *mac, struct ecore_vlan_mac_obj *obj, uint8_t set, int mac_type, unsigned long *ramrod_flags) { struct ecore_vlan_mac_ramrod_params ramrod_param; int rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* fill in general parameters */ ramrod_param.vlan_mac_obj = obj; ramrod_param.ramrod_flags = *ramrod_flags; /* fill a user request section if needed */ if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); /* Set the command: ADD or DEL */ ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : ECORE_VLAN_MAC_DEL; } rc = ecore_config_vlan_mac(sc, &ramrod_param); if (rc == ECORE_EXISTS) { BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); /* do not treat adding same MAC as error */ rc = 0; } else if (rc < 0) { BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); } return (rc); } static int bxe_set_eth_mac(struct bxe_softc *sc, uint8_t set) { unsigned long ramrod_flags = 0; BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Eth MAC is set on RSS leading client (fp[0]) */ return (bxe_set_mac_one(sc, sc->link_params.mac_addr, &sc->sp_objs->mac_obj, set, ECORE_ETH_MAC, &ramrod_flags)); } static int bxe_get_cur_phy_idx(struct bxe_softc *sc) { uint32_t sel_phy_idx = 0; if (sc->link_params.num_phys <= 1) { return (ELINK_INT_PHY); } if (sc->link_vars.link_up) { sel_phy_idx = ELINK_EXT_PHY1; /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && (sc->link_params.phy[ELINK_EXT_PHY2].supported & ELINK_SUPPORTED_FIBRE)) sel_phy_idx = ELINK_EXT_PHY2; } else { switch (elink_phy_selection(&sc->link_params)) { case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: sel_phy_idx = ELINK_EXT_PHY1; break; case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: sel_phy_idx = ELINK_EXT_PHY2; break; } } return (sel_phy_idx); } static int bxe_get_link_cfg_idx(struct bxe_softc *sc) { uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); /* * The selected activated PHY is always after swapping (in case PHY * swapping is enabled). So when swapping is enabled, we need to reverse * the configuration */ if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { if (sel_phy_idx == ELINK_EXT_PHY1) sel_phy_idx = ELINK_EXT_PHY2; else if (sel_phy_idx == ELINK_EXT_PHY2) sel_phy_idx = ELINK_EXT_PHY1; } return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); } static void bxe_set_requested_fc(struct bxe_softc *sc) { /* * Initialize link parameters structure variables * It is recommended to turn off RX FC for jumbo frames * for better performance */ if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; } else { sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; } } static void bxe_calc_fc_adv(struct bxe_softc *sc) { uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); switch (sc->link_vars.ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: default: sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); break; case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | ADVERTISED_Pause); break; case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; break; } } static uint16_t bxe_get_mf_speed(struct bxe_softc *sc) { uint16_t line_speed = sc->link_vars.line_speed; if (IS_MF(sc)) { uint16_t maxCfg = bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); /* calculate the current MAX line speed limit for the MF devices */ if (IS_MF_SI(sc)) { line_speed = (line_speed * maxCfg) / 100; } else { /* SD mode */ uint16_t vn_max_rate = maxCfg * 100; if (vn_max_rate < line_speed) { line_speed = vn_max_rate; } } } return (line_speed); } static void bxe_fill_report_data(struct bxe_softc *sc, struct bxe_link_report_data *data) { uint16_t line_speed = bxe_get_mf_speed(sc); memset(data, 0, sizeof(*data)); /* fill the report data with the effective line speed */ data->line_speed = line_speed; /* Link is down */ if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); } /* Full DUPLEX */ if (sc->link_vars.duplex == DUPLEX_FULL) { bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); } /* Rx Flow Control is ON */ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); } /* Tx Flow Control is ON */ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); } } /* report link status to OS, should be called under phy_lock */ static void bxe_link_report_locked(struct bxe_softc *sc) { struct bxe_link_report_data cur_data; /* reread mf_cfg */ if (IS_PF(sc) && !CHIP_IS_E1(sc)) { bxe_read_mf_cfg(sc); } /* Read the current link report info */ bxe_fill_report_data(sc, &cur_data); /* Don't report link down or exactly the same link status twice */ if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &sc->last_reported_link.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &cur_data.link_report_flags))) { return; } sc->link_cnt++; /* report new link params and remember the state for the next time */ memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &cur_data.link_report_flags)) { if_link_state_change(sc->ifp, LINK_STATE_DOWN); BLOGI(sc, "NIC Link is Down\n"); } else { const char *duplex; const char *flow; if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, &cur_data.link_report_flags)) { duplex = "full"; } else { duplex = "half"; } /* * Handle the FC at the end so that only these flags would be * possibly set. This way we may easily check if there is no FC * enabled. */ if (cur_data.link_report_flags) { if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - receive & transmit"; } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - receive"; } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - transmit"; } else { flow = "none"; /* possible? */ } } else { flow = "none"; } if_link_state_change(sc->ifp, LINK_STATE_UP); BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", cur_data.line_speed, duplex, flow); } } static void bxe_link_report(struct bxe_softc *sc) { bxe_acquire_phy_lock(sc); bxe_link_report_locked(sc); bxe_release_phy_lock(sc); } static void bxe_link_status_update(struct bxe_softc *sc) { if (sc->state != BXE_STATE_OPEN) { return; } if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { elink_link_status_update(&sc->link_params, &sc->link_vars); } else { sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | ELINK_SUPPORTED_10baseT_Full | ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full | ELINK_SUPPORTED_1000baseT_Full | ELINK_SUPPORTED_2500baseX_Full | ELINK_SUPPORTED_10000baseT_Full | ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE | ELINK_SUPPORTED_Autoneg | ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause); sc->port.advertising[0] = sc->port.supported[0]; sc->link_params.sc = sc; sc->link_params.port = SC_PORT(sc); sc->link_params.req_duplex[0] = DUPLEX_FULL; sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; sc->link_params.req_line_speed[0] = SPEED_10000; sc->link_params.speed_cap_mask[0] = 0x7f0000; sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; if (CHIP_REV_IS_FPGA(sc)) { sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; sc->link_vars.line_speed = ELINK_SPEED_1000; sc->link_vars.link_status = (LINK_STATUS_LINK_UP | LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); } else { sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; sc->link_vars.line_speed = ELINK_SPEED_10000; sc->link_vars.link_status = (LINK_STATUS_LINK_UP | LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); } sc->link_vars.link_up = 1; sc->link_vars.duplex = DUPLEX_FULL; sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; if (IS_PF(sc)) { REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); bxe_stats_handle(sc, STATS_EVENT_LINK_UP); bxe_link_report(sc); } } if (IS_PF(sc)) { if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); } else { bxe_stats_handle(sc, STATS_EVENT_STOP); } bxe_link_report(sc); } else { bxe_link_report(sc); bxe_stats_handle(sc, STATS_EVENT_LINK_UP); } } static int bxe_initial_phy_init(struct bxe_softc *sc, int load_mode) { int rc, cfg_idx = bxe_get_link_cfg_idx(sc); uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; struct elink_params *lp = &sc->link_params; bxe_set_requested_fc(sc); if (CHIP_REV_IS_SLOW(sc)) { uint32_t bond = CHIP_BOND_ID(sc); uint32_t feat = 0; if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; } else if (bond & 0x4) { if (CHIP_IS_E3(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; } else { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; } } else if (bond & 0x8) { if (CHIP_IS_E3(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; } else { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; } } /* disable EMAC for E3 and above */ if (bond & 0x2) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; } sc->link_params.feature_config_flags |= feat; } bxe_acquire_phy_lock(sc); if (load_mode == LOAD_DIAG) { lp->loopback_mode = ELINK_LOOPBACK_XGXS; /* Prefer doing PHY loopback at 10G speed, if possible */ if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { if (lp->speed_cap_mask[cfg_idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; } else { lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; } } } if (load_mode == LOAD_LOOPBACK_EXT) { lp->loopback_mode = ELINK_LOOPBACK_EXT; } rc = elink_phy_init(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); bxe_calc_fc_adv(sc); if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); bxe_link_report(sc); } if (!CHIP_REV_IS_SLOW(sc)) { bxe_periodic_start(sc); } sc->link_params.req_line_speed[cfg_idx] = req_line_speed; return (rc); } /* must be called under IF_ADDR_LOCK */ static int bxe_set_mc_list(struct bxe_softc *sc) { struct ecore_mcast_ramrod_params rparam = { NULL }; int rc = 0; int mc_count = 0; int mcnt, i; struct ecore_mcast_list_elem *mc_mac, *mc_mac_start; unsigned char *mta; if_t ifp = sc->ifp; mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */ if (!mc_count) return (0); mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count, M_DEVBUF, M_NOWAIT); if(mta == NULL) { BLOGE(sc, "Failed to allocate temp mcast list\n"); return (-1); } bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count)); mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO)); mc_mac_start = mc_mac; if (!mc_mac) { free(mta, M_DEVBUF); BLOGE(sc, "Failed to allocate temp mcast list\n"); return (-1); } bzero(mc_mac, (sizeof(*mc_mac) * mc_count)); /* mta and mcnt not expected to be different */ if_multiaddr_array(ifp, mta, &mcnt, mc_count); rparam.mcast_obj = &sc->mcast_obj; ECORE_LIST_INIT(&rparam.mcast_list); for(i=0; i< mcnt; i++) { mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN)); ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list); BLOGD(sc, DBG_LOAD, "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n", mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2], mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]); mc_mac++; } rparam.mcast_list_len = mc_count; BXE_MCAST_LOCK(sc); /* first, clear all configured multicast MACs */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); BXE_MCAST_UNLOCK(sc); free(mc_mac_start, M_DEVBUF); free(mta, M_DEVBUF); return (rc); } /* Now add the new MACs */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); if (rc < 0) { BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); } BXE_MCAST_UNLOCK(sc); free(mc_mac_start, M_DEVBUF); free(mta, M_DEVBUF); return (rc); } static int bxe_set_uc_list(struct bxe_softc *sc) { if_t ifp = sc->ifp; struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; struct ifaddr *ifa; unsigned long ramrod_flags = 0; int rc; #if __FreeBSD_version < 800000 IF_ADDR_LOCK(ifp); #else if_addr_rlock(ifp); #endif /* first schedule a cleanup up of old configuration */ rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); if (rc < 0) { BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_addr_runlock(ifp); #endif return (rc); } ifa = if_getifaddr(ifp); /* XXX Is this structure */ while (ifa) { if (ifa->ifa_addr->sa_family != AF_LINK) { ifa = TAILQ_NEXT(ifa, ifa_link); continue; } rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr), mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags); if (rc == -EEXIST) { BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); /* do not treat adding same MAC as an error */ rc = 0; } else if (rc < 0) { BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc); #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_addr_runlock(ifp); #endif return (rc); } ifa = TAILQ_NEXT(ifa, ifa_link); } #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_addr_runlock(ifp); #endif /* Execute the pending commands */ bit_set(&ramrod_flags, RAMROD_CONT); return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, ECORE_UC_LIST_MAC, &ramrod_flags)); } static void bxe_set_rx_mode(struct bxe_softc *sc) { if_t ifp = sc->ifp; uint32_t rx_mode = BXE_RX_MODE_NORMAL; if (sc->state != BXE_STATE_OPEN) { BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); return; } BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); if (if_getflags(ifp) & IFF_PROMISC) { rx_mode = BXE_RX_MODE_PROMISC; } else if ((if_getflags(ifp) & IFF_ALLMULTI) || ((if_getamcount(ifp) > BXE_MAX_MULTICAST) && CHIP_IS_E1(sc))) { rx_mode = BXE_RX_MODE_ALLMULTI; } else { if (IS_PF(sc)) { /* some multicasts */ if (bxe_set_mc_list(sc) < 0) { rx_mode = BXE_RX_MODE_ALLMULTI; } if (bxe_set_uc_list(sc) < 0) { rx_mode = BXE_RX_MODE_PROMISC; } } } sc->rx_mode = rx_mode; /* schedule the rx_mode command */ if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); return; } if (IS_PF(sc)) { bxe_set_storm_rx_mode(sc); } } /* update flags in shmem */ static void bxe_update_drv_flags(struct bxe_softc *sc, uint32_t flags, uint32_t set) { uint32_t drv_flags; if (SHMEM2_HAS(sc, drv_flags)) { bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); drv_flags = SHMEM2_RD(sc, drv_flags); if (set) { SET_FLAGS(drv_flags, flags); } else { RESET_FLAGS(drv_flags, flags); } SHMEM2_WR(sc, drv_flags, drv_flags); BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); } } /* periodic timer callout routine, only runs when the interface is up */ static void bxe_periodic_callout_func(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; int i; if (!BXE_CORE_TRYLOCK(sc)) { /* just bail and try again next time */ if ((sc->state == BXE_STATE_OPEN) && (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { /* schedule the next periodic callout */ callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } return; } if ((sc->state != BXE_STATE_OPEN) || (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); BXE_CORE_UNLOCK(sc); return; } /* Check for TX timeouts on any fastpath. */ FOR_EACH_QUEUE(sc, i) { if (bxe_watchdog(sc, &sc->fp[i]) != 0) { /* Ruh-Roh, chip was reset! */ break; } } if (!CHIP_REV_IS_SLOW(sc)) { /* * This barrier is needed to ensure the ordering between the writing * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and * the reading here. */ mb(); if (sc->port.pmf) { bxe_acquire_phy_lock(sc); elink_period_func(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); } } if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { int mb_idx = SC_FW_MB_IDX(sc); uint32_t drv_pulse; uint32_t mcp_pulse; ++sc->fw_drv_pulse_wr_seq; sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; drv_pulse = sc->fw_drv_pulse_wr_seq; bxe_drv_pulse(sc); mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & MCP_PULSE_SEQ_MASK); /* * The delta between driver pulse and mcp response should * be 1 (before mcp response) or 0 (after mcp response). */ if ((drv_pulse != mcp_pulse) && (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { /* someone lost a heartbeat... */ BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", drv_pulse, mcp_pulse); } } /* state is BXE_STATE_OPEN */ bxe_stats_handle(sc, STATS_EVENT_UPDATE); BXE_CORE_UNLOCK(sc); if ((sc->state == BXE_STATE_OPEN) && (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { /* schedule the next periodic callout */ callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } } static void bxe_periodic_start(struct bxe_softc *sc) { atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } static void bxe_periodic_stop(struct bxe_softc *sc) { atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); callout_drain(&sc->periodic_callout); } /* start the controller */ static __noinline int bxe_nic_load(struct bxe_softc *sc, int load_mode) { uint32_t val; int load_code = 0; int i, rc = 0; BXE_CORE_LOCK_ASSERT(sc); BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); sc->state = BXE_STATE_OPENING_WAITING_LOAD; if (IS_PF(sc)) { /* must be called before memory allocation and HW init */ bxe_ilt_set_info(sc); } sc->last_reported_link_state = LINK_STATE_UNKNOWN; bxe_set_fp_rx_buf_size(sc); if (bxe_alloc_fp_buffers(sc) != 0) { BLOGE(sc, "Failed to allocate fastpath memory\n"); sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (bxe_alloc_mem(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (bxe_alloc_fw_stats_mem(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (IS_PF(sc)) { /* set pf load just before approaching the MCP */ bxe_set_pf_load(sc); /* if MCP exists send load request and analyze response */ if (!BXE_NOMCP(sc)) { /* attempt to load pf */ if (bxe_nic_load_request(sc, &load_code) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error1; } /* what did the MCP say? */ if (bxe_nic_load_analyze_req(sc, load_code) != 0) { bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } } else { BLOGI(sc, "Device has no MCP!\n"); load_code = bxe_nic_load_no_mcp(sc); } /* mark PMF if applicable */ bxe_nic_load_pmf(sc, load_code); /* Init Function state controlling object */ bxe_init_func_obj(sc); /* Initialize HW */ if (bxe_init_hw(sc, load_code) != 0) { BLOGE(sc, "HW init failed\n"); bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } } /* set ALWAYS_ALIVE bit in shmem */ sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bxe_drv_pulse(sc); sc->flags |= BXE_NO_PULSE; /* attach interrupts */ if (bxe_interrupt_attach(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } bxe_nic_init(sc, load_code); /* Init per-function objects */ if (IS_PF(sc)) { bxe_init_objs(sc); // XXX bxe_iov_nic_init(sc); /* set AFEX default VLAN tag to an invalid value */ sc->devinfo.mf_info.afex_def_vlan_tag = -1; // XXX bxe_nic_load_afex_dcc(sc, load_code); sc->state = BXE_STATE_OPENING_WAITING_PORT; rc = bxe_func_start(sc); if (rc) { BLOGE(sc, "Function start failed! rc = %d\n", rc); bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } /* send LOAD_DONE command to MCP */ if (!BXE_NOMCP(sc)) { load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); sc->state = BXE_STATE_ERROR; rc = ENXIO; goto bxe_nic_load_error3; } } rc = bxe_setup_leading(sc); if (rc) { BLOGE(sc, "Setup leading failed! rc = %d\n", rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); if (rc) { BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } rc = bxe_init_rss_pf(sc); if (rc) { BLOGE(sc, "PF RSS init failed\n"); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } /* XXX VF */ /* now when Clients are configured we are ready to work */ sc->state = BXE_STATE_OPEN; /* Configure a ucast MAC */ if (IS_PF(sc)) { rc = bxe_set_eth_mac(sc, TRUE); } if (rc) { BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } if (sc->port.pmf) { rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); if (rc) { sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; /* start fast path */ /* Initialize Rx filter */ bxe_set_rx_mode(sc); /* start the Tx */ switch (/* XXX load_mode */LOAD_OPEN) { case LOAD_NORMAL: case LOAD_OPEN: break; case LOAD_DIAG: case LOAD_LOOPBACK_EXT: sc->state = BXE_STATE_DIAG; break; default: break; } if (sc->port.pmf) { bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); } else { bxe_link_status_update(sc); } /* start the periodic timer callout */ bxe_periodic_start(sc); if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { /* mark driver is loaded in shmem2 */ val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], (val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | DRV_FLAGS_CAPABILITIES_LOADED_L2)); } /* wait for all pending SP commands to complete */ if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); bxe_periodic_stop(sc); bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); return (ENXIO); } /* Tell the stack the driver is running! */ if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); return (0); bxe_nic_load_error3: if (IS_PF(sc)) { bxe_int_disable_sync(sc, 1); /* clean out queued objects */ bxe_squeeze_objects(sc); } bxe_interrupt_detach(sc); bxe_nic_load_error2: if (IS_PF(sc) && !BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); } sc->port.pmf = 0; bxe_nic_load_error1: /* clear pf_load status, as it was already set */ if (IS_PF(sc)) { bxe_clear_pf_load(sc); } bxe_nic_load_error0: bxe_free_fw_stats_mem(sc); bxe_free_fp_buffers(sc); bxe_free_mem(sc); return (rc); } static int bxe_init_locked(struct bxe_softc *sc) { int other_engine = SC_PATH(sc) ? 0 : 1; uint8_t other_load_status, load_status; uint8_t global = FALSE; int rc; BXE_CORE_LOCK_ASSERT(sc); /* check if the driver is already running */ if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); return (0); } bxe_set_power_state(sc, PCI_PM_D0); /* * If parity occurred during the unload, then attentions and/or * RECOVERY_IN_PROGRES may still be set. If so we want the first function * loaded on the current engine to complete the recovery. Parity recovery * is only relevant for PF driver. */ if (IS_PF(sc)) { other_load_status = bxe_get_load_status(sc, other_engine); load_status = bxe_get_load_status(sc, SC_PATH(sc)); if (!bxe_reset_is_done(sc, SC_PATH(sc)) || bxe_chk_parity_attn(sc, &global, TRUE)) { do { /* * If there are attentions and they are in global blocks, set * the GLOBAL_RESET bit regardless whether it will be this * function that will complete the recovery or not. */ if (global) { bxe_set_reset_global(sc); } /* * Only the first function on the current engine should try * to recover in open. In case of attentions in global blocks * only the first in the chip should try to recover. */ if ((!load_status && (!global || !other_load_status)) && bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { BLOGI(sc, "Recovered during init\n"); break; } /* recovery has failed... */ bxe_set_power_state(sc, PCI_PM_D3hot); sc->recovery_state = BXE_RECOVERY_FAILED; BLOGE(sc, "Recovery flow hasn't properly " "completed yet, try again later. " "If you still see this message after a " "few retries then power cycle is required.\n"); rc = ENXIO; goto bxe_init_locked_done; } while (0); } } sc->recovery_state = BXE_RECOVERY_DONE; rc = bxe_nic_load(sc, LOAD_OPEN); bxe_init_locked_done: if (rc) { /* Tell the stack the driver is NOT running! */ BLOGE(sc, "Initialization failed, " "stack notified driver is NOT running!\n"); if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); } return (rc); } static int bxe_stop_locked(struct bxe_softc *sc) { BXE_CORE_LOCK_ASSERT(sc); return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); } /* * Handles controller initialization when called from an unlocked routine. * ifconfig calls this function. * * Returns: * void */ static void bxe_init(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; BXE_CORE_LOCK(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } static int bxe_init_ifnet(struct bxe_softc *sc) { if_t ifp; int capabilities; /* ifconfig entrypoint for media type/status reporting */ ifmedia_init(&sc->ifmedia, IFM_IMASK, bxe_ifmedia_update, bxe_ifmedia_status); /* set the default interface values */ ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ /* allocate the ifnet structure */ if ((ifp = if_gethandle(IFT_ETHER)) == NULL) { BLOGE(sc, "Interface allocation failed!\n"); return (ENXIO); } if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); if_setioctlfn(ifp, bxe_ioctl); if_setstartfn(ifp, bxe_tx_start); if_setgetcounterfn(ifp, bxe_get_counter); #if __FreeBSD_version >= 800000 if_settransmitfn(ifp, bxe_tx_mq_start); if_setqflushfn(ifp, bxe_mq_flush); #endif #ifdef FreeBSD8_0 if_settimer(ifp, 0); #endif if_setinitfn(ifp, bxe_init); if_setmtu(ifp, sc->mtu); if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)); capabilities = #if __FreeBSD_version < 700000 (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_LRO); #else (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWCSUM | IFCAP_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_LRO | IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_WOL_MAGIC); #endif if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */ if_setbaudrate(ifp, IF_Gbps(10)); /* XXX */ if_setsendqlen(ifp, sc->tx_ring_size); if_setsendqready(ifp); /* XXX */ sc->ifp = ifp; /* attach to the Ethernet interface list */ ether_ifattach(ifp, sc->link_params.mac_addr); return (0); } static void bxe_deallocate_bars(struct bxe_softc *sc) { int i; for (i = 0; i < MAX_BARS; i++) { if (sc->bar[i].resource != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->bar[i].rid, sc->bar[i].resource); BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", i, PCIR_BAR(i)); } } } static int bxe_allocate_bars(struct bxe_softc *sc) { u_int flags; int i; memset(sc->bar, 0, sizeof(sc->bar)); for (i = 0; i < MAX_BARS; i++) { /* memory resources reside at BARs 0, 2, 4 */ /* Run `pciconf -lb` to see mappings */ if ((i != 0) && (i != 2) && (i != 4)) { continue; } sc->bar[i].rid = PCIR_BAR(i); flags = RF_ACTIVE; if (i == 0) { flags |= RF_SHAREABLE; } if ((sc->bar[i].resource = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->bar[i].rid, flags)) == NULL) { return (0); } sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n", i, PCIR_BAR(i), (void *)rman_get_start(sc->bar[i].resource), (void *)rman_get_end(sc->bar[i].resource), rman_get_size(sc->bar[i].resource), (void *)sc->bar[i].kva); } return (0); } static void bxe_get_function_num(struct bxe_softc *sc) { uint32_t val = 0; /* * Read the ME register to get the function number. The ME register * holds the relative-function number and absolute-function number. The * absolute-function number appears only in E2 and above. Before that * these bits always contained zero, therefore we cannot blindly use them. */ val = REG_RD(sc, BAR_ME_REGISTER); sc->pfunc_rel = (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); sc->path_id = (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); } else { sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); } BLOGD(sc, DBG_LOAD, "Relative function %d, Absolute function %d, Path %d\n", sc->pfunc_rel, sc->pfunc_abs, sc->path_id); } static uint32_t bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) { uint32_t shmem2_size; uint32_t offset; uint32_t mf_cfg_offset_value; /* Non 57712 */ offset = (SHMEM_RD(sc, func_mb) + (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); /* 57712 plus */ if (sc->devinfo.shmem2_base != 0) { shmem2_size = SHMEM2_RD(sc, size); if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { offset = mf_cfg_offset_value; } } } return (offset); } static uint32_t bxe_pcie_capability_read(struct bxe_softc *sc, int reg, int width) { int pcie_reg; /* ensure PCIe capability is enabled */ if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { if (pcie_reg != 0) { BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); return (pci_read_config(sc->dev, (pcie_reg + reg), width)); } } BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); return (0); } static uint8_t bxe_is_pcie_pending(struct bxe_softc *sc) { return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & PCIM_EXP_STA_TRANSACTION_PND); } /* * Walk the PCI capabiites list for the device to find what features are * supported. These capabilites may be enabled/disabled by firmware so it's * best to walk the list rather than make assumptions. */ static void bxe_probe_pci_caps(struct bxe_softc *sc) { uint16_t link_status; int reg; /* check if PCI Power Management is enabled */ if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; } } link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); /* handle PCIe 2.0 workarounds for 57710 */ if (CHIP_IS_E1(sc)) { /* workaround for 57710 errata E4_57710_27462 */ sc->devinfo.pcie_link_speed = (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; /* workaround for 57710 errata E4_57710_27488 */ sc->devinfo.pcie_link_width = ((link_status & PCIM_LINK_STA_WIDTH) >> 4); if (sc->devinfo.pcie_link_speed > 1) { sc->devinfo.pcie_link_width = ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; } } else { sc->devinfo.pcie_link_speed = (link_status & PCIM_LINK_STA_SPEED); sc->devinfo.pcie_link_width = ((link_status & PCIM_LINK_STA_WIDTH) >> 4); } BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; /* check if MSI capability is enabled */ if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; } } /* check if MSI-X capability is enabled */ if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; } } } static int bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val; /* get the outer vlan if we're in switch-dependent mode */ val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); mf_info->ext_id = (uint16_t)val; mf_info->multi_vnics_mode = 1; if (!VALID_OVLAN(mf_info->ext_id)) { BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); return (1); } /* get the capabilities */ if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == FUNC_MF_CFG_PROTOCOL_ISCSI) { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == FUNC_MF_CFG_PROTOCOL_FCOE) { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; } else { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; } mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static uint32_t bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) { uint32_t retval = 0; uint32_t val; val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { retval |= MF_PROTO_SUPPORT_ETHERNET; } if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { retval |= MF_PROTO_SUPPORT_ISCSI; } if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { retval |= MF_PROTO_SUPPORT_FCOE; } } return (retval); } static int bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val; /* * There is no outer vlan if we're in switch-independent mode. * If the mac is valid then assume multi-function. */ val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static int bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t e1hov_tag; uint32_t func_config; uint32_t niv_config; mf_info->multi_vnics_mode = 1; e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); mf_info->ext_id = (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> FUNC_MF_CFG_E1HOV_TAG_SHIFT); mf_info->default_vlan = (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> FUNC_MF_CFG_AFEX_VLAN_SHIFT); mf_info->niv_allowed_priorities = (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); mf_info->niv_default_cos = (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); mf_info->afex_vlan_mode = ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); mf_info->niv_mba_enabled = ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static int bxe_check_valid_mf_cfg(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t mf_cfg1; uint32_t mf_cfg2; uint32_t ovlan1; uint32_t ovlan2; uint8_t i, j; BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", SC_PORT(sc)); BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", mf_info->mf_config[SC_VN(sc)]); BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", mf_info->multi_vnics_mode); BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", mf_info->vnics_per_port); BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", mf_info->ext_id); BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", mf_info->min_bw[0], mf_info->min_bw[1], mf_info->min_bw[2], mf_info->min_bw[3]); BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", mf_info->max_bw[0], mf_info->max_bw[1], mf_info->max_bw[2], mf_info->max_bw[3]); BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", sc->mac_addr_str); /* various MF mode sanity checks... */ if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { BLOGE(sc, "Enumerated function %d is marked as hidden\n", SC_PORT(sc)); return (1); } if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", mf_info->vnics_per_port, mf_info->multi_vnics_mode); return (1); } if (mf_info->mf_mode == MULTI_FUNCTION_SD) { /* vnic id > 0 must have valid ovlan in switch-dependent mode */ if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", SC_VN(sc), OVLAN(sc)); return (1); } if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", mf_info->multi_vnics_mode, OVLAN(sc)); return (1); } /* * Verify all functions are either MF or SF mode. If MF, make sure * sure that all non-hidden functions have a valid ovlan. If SF, * make sure that all non-hidden functions have an invalid ovlan. */ FOREACH_ABS_FUNC_IN_PORT(sc, i) { mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { BLOGE(sc, "mf_mode=SD function %d MF config " "mismatch, multi_vnics_mode=%d ovlan=%d\n", i, mf_info->multi_vnics_mode, ovlan1); return (1); } } /* Verify all funcs on the same port each have a different ovlan. */ FOREACH_ABS_FUNC_IN_PORT(sc, i) { mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); /* iterate from the next function on the port to the max func */ for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && VALID_OVLAN(ovlan1) && !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && VALID_OVLAN(ovlan2) && (ovlan1 == ovlan2)) { BLOGE(sc, "mf_mode=SD functions %d and %d " "have the same ovlan (%d)\n", i, j, ovlan1); return (1); } } } } /* MULTI_FUNCTION_SD */ return (0); } static int bxe_get_mf_cfg_info(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val, mac_upper; uint8_t i, vnic; /* initialize mf_info defaults */ mf_info->vnics_per_port = 1; mf_info->multi_vnics_mode = FALSE; mf_info->path_has_ovlan = FALSE; mf_info->mf_mode = SINGLE_FUNCTION; if (!CHIP_IS_MF_CAP(sc)) { return (0); } if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { BLOGE(sc, "Invalid mf_cfg_base!\n"); return (1); } /* get the MF mode (switch dependent / independent / single-function) */ val = SHMEM_RD(sc, dev_info.shared_feature_config.config); switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) { case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); /* check for legal upper mac bytes */ if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { mf_info->mf_mode = MULTI_FUNCTION_SI; } else { BLOGE(sc, "Invalid config for Switch Independent mode\n"); } break; case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: /* get outer vlan configuration */ val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { mf_info->mf_mode = MULTI_FUNCTION_SD; } else { BLOGE(sc, "Invalid config for Switch Dependent mode\n"); } break; case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ return (0); case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: /* * Mark MF mode as NIV if MCP version includes NPAR-SD support * and the MAC address is valid. */ mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); if ((SHMEM2_HAS(sc, afex_driver_support)) && (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { mf_info->mf_mode = MULTI_FUNCTION_AFEX; } else { BLOGE(sc, "Invalid config for AFEX mode\n"); } break; default: BLOGE(sc, "Unknown MF mode (0x%08x)\n", (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); return (1); } /* set path mf_mode (which could be different than function mf_mode) */ if (mf_info->mf_mode == MULTI_FUNCTION_SD) { mf_info->path_has_ovlan = TRUE; } else if (mf_info->mf_mode == SINGLE_FUNCTION) { /* * Decide on path multi vnics mode. If we're not in MF mode and in * 4-port mode, this is good enough to check vnic-0 of the other port * on the same path */ if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { uint8_t other_port = !(PORT_ID(sc) & 1); uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; } } if (mf_info->mf_mode == SINGLE_FUNCTION) { /* invalid MF config */ if (SC_VN(sc) >= 1) { BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); return (1); } return (0); } /* get the MF configuration */ mf_info->mf_config[SC_VN(sc)] = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); switch(mf_info->mf_mode) { case MULTI_FUNCTION_SD: bxe_get_shmem_mf_cfg_info_sd(sc); break; case MULTI_FUNCTION_SI: bxe_get_shmem_mf_cfg_info_si(sc); break; case MULTI_FUNCTION_AFEX: bxe_get_shmem_mf_cfg_info_niv(sc); break; default: BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", mf_info->mf_mode); return (1); } /* get the congestion management parameters */ vnic = 0; FOREACH_ABS_FUNC_IN_PORT(sc, i) { /* get min/max bw */ val = MFCFG_RD(sc, func_mf_config[i].config); mf_info->min_bw[vnic] = ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); mf_info->max_bw[vnic] = ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); vnic++; } return (bxe_check_valid_mf_cfg(sc)); } static int bxe_get_shmem_info(struct bxe_softc *sc) { int port; uint32_t mac_hi, mac_lo, val; port = SC_PORT(sc); mac_hi = mac_lo = 0; sc->link_params.sc = sc; sc->link_params.port = port; /* get the hardware config info */ sc->devinfo.hw_config = SHMEM_RD(sc, dev_info.shared_hw_config.config); sc->devinfo.hw_config2 = SHMEM_RD(sc, dev_info.shared_hw_config.config2); sc->link_params.hw_led_mode = ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> SHARED_HW_CFG_LED_MODE_SHIFT); /* get the port feature config */ sc->port.config = SHMEM_RD(sc, dev_info.port_feature_config[port].config), /* get the link params */ sc->link_params.speed_cap_mask[0] = SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); sc->link_params.speed_cap_mask[1] = SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); /* get the lane config */ sc->link_params.lane_config = SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); /* get the link config */ val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); sc->port.link_config[ELINK_INT_PHY] = val; sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); sc->port.link_config[ELINK_EXT_PHY1] = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); /* get the override preemphasis flag and enable it or turn it off */ val = SHMEM_RD(sc, dev_info.shared_feature_config.config); if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; } else { sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; } /* get the initial value of the link params */ sc->link_params.multi_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); /* get external phy info */ sc->port.ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); /* get the multifunction configuration */ bxe_get_mf_cfg_info(sc); /* get the mac address */ if (IS_MF(sc)) { mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); } else { mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); } if ((mac_lo == 0) && (mac_hi == 0)) { *sc->mac_addr_str = 0; BLOGE(sc, "No Ethernet address programmed!\n"); } else { sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), "%02x:%02x:%02x:%02x:%02x:%02x", sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); } return (0); } static void bxe_get_tunable_params(struct bxe_softc *sc) { /* sanity checks */ if ((bxe_interrupt_mode != INTR_MODE_INTX) && (bxe_interrupt_mode != INTR_MODE_MSI) && (bxe_interrupt_mode != INTR_MODE_MSIX)) { BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); bxe_interrupt_mode = INTR_MODE_MSIX; } if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); bxe_queue_count = 0; } if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { if (bxe_max_rx_bufs == 0) { bxe_max_rx_bufs = RX_BD_USABLE; } else { BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); bxe_max_rx_bufs = 2048; } } if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); bxe_hc_rx_ticks = 25; } if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); bxe_hc_tx_ticks = 50; } if (bxe_max_aggregation_size == 0) { bxe_max_aggregation_size = TPA_AGG_SIZE; } if (bxe_max_aggregation_size > 0xffff) { BLOGW(sc, "invalid max_aggregation_size (%d)\n", bxe_max_aggregation_size); bxe_max_aggregation_size = TPA_AGG_SIZE; } if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); bxe_mrrs = -1; } if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); bxe_autogreeen = 0; } if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); bxe_udp_rss = 0; } /* pull in user settings */ sc->interrupt_mode = bxe_interrupt_mode; sc->max_rx_bufs = bxe_max_rx_bufs; sc->hc_rx_ticks = bxe_hc_rx_ticks; sc->hc_tx_ticks = bxe_hc_tx_ticks; sc->max_aggregation_size = bxe_max_aggregation_size; sc->mrrs = bxe_mrrs; sc->autogreeen = bxe_autogreeen; sc->udp_rss = bxe_udp_rss; if (bxe_interrupt_mode == INTR_MODE_INTX) { sc->num_queues = 1; } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ sc->num_queues = min((bxe_queue_count ? bxe_queue_count : mp_ncpus), MAX_RSS_CHAINS); if (sc->num_queues > mp_ncpus) { sc->num_queues = mp_ncpus; } } BLOGD(sc, DBG_LOAD, "User Config: " "debug=0x%lx " "interrupt_mode=%d " "queue_count=%d " "hc_rx_ticks=%d " "hc_tx_ticks=%d " "rx_budget=%d " "max_aggregation_size=%d " "mrrs=%d " "autogreeen=%d " "udp_rss=%d\n", bxe_debug, sc->interrupt_mode, sc->num_queues, sc->hc_rx_ticks, sc->hc_tx_ticks, bxe_rx_budget, sc->max_aggregation_size, sc->mrrs, sc->autogreeen, sc->udp_rss); } static void bxe_media_detect(struct bxe_softc *sc) { uint32_t phy_idx = bxe_get_cur_phy_idx(sc); switch (sc->link_params.phy[phy_idx].media_type) { case ELINK_ETH_PHY_SFPP_10G_FIBER: case ELINK_ETH_PHY_XFP_FIBER: BLOGI(sc, "Found 10Gb Fiber media.\n"); sc->media = IFM_10G_SR; break; case ELINK_ETH_PHY_SFP_1G_FIBER: BLOGI(sc, "Found 1Gb Fiber media.\n"); sc->media = IFM_1000_SX; break; case ELINK_ETH_PHY_KR: case ELINK_ETH_PHY_CX4: BLOGI(sc, "Found 10GBase-CX4 media.\n"); sc->media = IFM_10G_CX4; break; case ELINK_ETH_PHY_DA_TWINAX: BLOGI(sc, "Found 10Gb Twinax media.\n"); sc->media = IFM_10G_TWINAX; break; case ELINK_ETH_PHY_BASE_T: if (sc->link_params.speed_cap_mask[0] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { BLOGI(sc, "Found 10GBase-T media.\n"); sc->media = IFM_10G_T; } else { BLOGI(sc, "Found 1000Base-T media.\n"); sc->media = IFM_1000_T; } break; case ELINK_ETH_PHY_NOT_PRESENT: BLOGI(sc, "Media not present.\n"); sc->media = 0; break; case ELINK_ETH_PHY_UNSPECIFIED: default: BLOGI(sc, "Unknown media!\n"); sc->media = 0; break; } } #define GET_FIELD(value, fname) \ (((value) & (fname##_MASK)) >> (fname##_SHIFT)) #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) static int bxe_get_igu_cam_info(struct bxe_softc *sc) { int pfid = SC_FUNC(sc); int igu_sb_id; uint32_t val; uint8_t fid, igu_sb_cnt = 0; sc->igu_base_sb = 0xff; if (CHIP_INT_MODE_IS_BC(sc)) { int vn = SC_VN(sc); igu_sb_cnt = sc->igu_sb_cnt; sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * FP_SB_MAX_E1x); sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); return (0); } /* IGU in normal mode - read CAM */ for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++) { val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { continue; } fid = IGU_FID(val); if ((fid & IGU_FID_ENCODE_IS_PF)) { if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { continue; } if (IGU_VEC(val) == 0) { /* default status block */ sc->igu_dsb_id = igu_sb_id; } else { if (sc->igu_base_sb == 0xff) { sc->igu_base_sb = igu_sb_id; } igu_sb_cnt++; } } } /* * Due to new PF resource allocation by MFW T7.4 and above, it's optional * that number of CAM entries will not be equal to the value advertised in * PCI. Driver should use the minimal value of both as the actual status * block count */ sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); if (igu_sb_cnt == 0) { BLOGE(sc, "CAM configuration error\n"); return (-1); } return (0); } /* * Gather various information from the device config space, the device itself, * shmem, and the user input. */ static int bxe_get_device_info(struct bxe_softc *sc) { uint32_t val; int rc; /* Get the data for the device */ sc->devinfo.vendor_id = pci_get_vendor(sc->dev); sc->devinfo.device_id = pci_get_device(sc->dev); sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); /* get the chip revision (chip metal comes from pci config space) */ sc->devinfo.chip_id = sc->link_params.chip_id = (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); /* force 57811 according to MISC register */ if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { if (CHIP_IS_57810(sc)) { sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | (sc->devinfo.chip_id & 0x0000ffff)); } else if (CHIP_IS_57810_MF(sc)) { sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | (sc->devinfo.chip_id & 0x0000ffff)); } sc->devinfo.chip_id |= 0x1; } BLOGD(sc, DBG_LOAD, "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", sc->devinfo.chip_id, ((sc->devinfo.chip_id >> 16) & 0xffff), ((sc->devinfo.chip_id >> 12) & 0xf), ((sc->devinfo.chip_id >> 4) & 0xff), ((sc->devinfo.chip_id >> 0) & 0xf)); val = (REG_RD(sc, 0x2874) & 0x55); if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1(sc) && val) || (CHIP_IS_E1H(sc) && (val == 0x55))) { sc->flags |= BXE_ONE_PORT_FLAG; BLOGD(sc, DBG_LOAD, "single port device\n"); } /* set the doorbell size */ sc->doorbell_size = (1 << BXE_DB_SHIFT); /* determine whether the device is in 2 port or 4 port mode */ sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ if (CHIP_IS_E2E3(sc)) { /* * Read port4mode_en_ovwr[0]: * If 1, four port mode is in port4mode_en_ovwr[1]. * If 0, four port mode is in port4mode_en[0]. */ val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); if (val & 1) { val = ((val >> 1) & 1); } else { val = REG_RD(sc, MISC_REG_PORT4MODE_EN); } sc->devinfo.chip_port_mode = (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); } /* get the function and path info for the device */ bxe_get_function_num(sc); /* get the shared memory base address */ sc->devinfo.shmem_base = sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); sc->devinfo.shmem2_base = REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : MISC_REG_GENERIC_CR_0)); BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", sc->devinfo.shmem_base, sc->devinfo.shmem2_base); if (!sc->devinfo.shmem_base) { /* this should ONLY prevent upcoming shmem reads */ BLOGI(sc, "MCP not active\n"); sc->flags |= BXE_NO_MCP_FLAG; return (0); } /* make sure the shared memory contents are valid */ val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); return (0); } BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); /* get the bootcode version */ sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); snprintf(sc->devinfo.bc_ver_str, sizeof(sc->devinfo.bc_ver_str), "%d.%d.%d", ((sc->devinfo.bc_ver >> 24) & 0xff), ((sc->devinfo.bc_ver >> 16) & 0xff), ((sc->devinfo.bc_ver >> 8) & 0xff)); BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); /* get the bootcode shmem address */ sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); /* clean indirect addresses as they're not used */ pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); if (IS_PF(sc)) { REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); if (CHIP_IS_E1x(sc)) { REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); } /* * Enable internal target-read (in case we are probed after PF * FLR). Must be done prior to any BAR read access. Only for * 57712 and up */ if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); } } /* get the nvram size */ val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); sc->devinfo.flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); /* get PCI capabilites */ bxe_probe_pci_caps(sc); bxe_set_power_state(sc, PCI_PM_D0); /* get various configuration parameters from shmem */ bxe_get_shmem_info(sc); if (sc->devinfo.pcie_msix_cap_reg != 0) { val = pci_read_config(sc->dev, (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), 2); sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); } else { sc->igu_sb_cnt = 1; } sc->igu_base_addr = BAR_IGU_INTMEM; /* initialize IGU parameters */ if (CHIP_IS_E1x(sc)) { sc->devinfo.int_block = INT_BLOCK_HC; sc->igu_dsb_id = DEF_SB_IGU_ID; sc->igu_base_sb = 0; } else { sc->devinfo.int_block = INT_BLOCK_IGU; /* do not allow device reset during IGU info preocessing */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { int tout = 5000; BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { tout--; DELAY(1000); } if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); return (-1); } } if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; } else { BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); } rc = bxe_get_igu_cam_info(sc); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); if (rc) { return (rc); } } /* * Get base FW non-default (fast path) status block ID. This value is * used to initialize the fw_sb_id saved on the fp/queue structure to * determine the id used by the FW. */ if (CHIP_IS_E1x(sc)) { sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); } else { /* * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of * the same queue are indicated on the same IGU SB). So we prefer * FW and IGU SBs to be the same value. */ sc->base_fw_ndsb = sc->igu_base_sb; } BLOGD(sc, DBG_LOAD, "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", sc->igu_dsb_id, sc->igu_base_sb, sc->igu_sb_cnt, sc->base_fw_ndsb); elink_phy_probe(&sc->link_params); return (0); } static void bxe_link_settings_supported(struct bxe_softc *sc, uint32_t switch_cfg) { uint32_t cfg_size = 0; uint32_t idx; uint8_t port = SC_PORT(sc); /* aggregation of supported attributes of all external phys */ sc->port.supported[0] = 0; sc->port.supported[1] = 0; switch (sc->link_params.num_phys) { case 1: sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; cfg_size = 1; break; case 2: sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; cfg_size = 1; break; case 3: if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { sc->port.supported[1] = sc->link_params.phy[ELINK_EXT_PHY1].supported; sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY2].supported; } else { sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; sc->port.supported[1] = sc->link_params.phy[ELINK_EXT_PHY2].supported; } cfg_size = 2; break; } if (!(sc->port.supported[0] || sc->port.supported[1])) { BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config), SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config2)); return; } if (CHIP_IS_E3(sc)) sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); else { switch (switch_cfg) { case ELINK_SWITCH_CFG_1G: sc->port.phy_addr = REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); break; case ELINK_SWITCH_CFG_10G: sc->port.phy_addr = REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); break; default: BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", sc->port.link_config[0]); return; } } BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); /* mask what we support according to speed_cap_mask per configuration */ for (idx = 0; idx < cfg_size; idx++) { if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; } } BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", sc->port.supported[0], sc->port.supported[1]); } static void bxe_link_settings_requested(struct bxe_softc *sc) { uint32_t link_config; uint32_t idx; uint32_t cfg_size = 0; sc->port.advertising[0] = 0; sc->port.advertising[1] = 0; switch (sc->link_params.num_phys) { case 1: case 2: cfg_size = 1; break; case 3: cfg_size = 2; break; } for (idx = 0; idx < cfg_size; idx++) { sc->link_params.req_duplex[idx] = DUPLEX_FULL; link_config = sc->port.link_config[idx]; switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { case PORT_FEATURE_LINK_SPEED_AUTO: if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; sc->port.advertising[idx] |= sc->port.supported[idx]; if (sc->link_params.phy[ELINK_EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) sc->port.advertising[idx] |= (ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full); } else { /* force 10G, no AN */ sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); continue; } break; case PORT_FEATURE_LINK_SPEED_10M_FULL: if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_10M_HALF: if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; sc->link_params.req_duplex[idx] = DUPLEX_HALF; sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_100M_FULL: if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_100M_HALF: if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; sc->link_params.req_duplex[idx] = DUPLEX_HALF; sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_1G: if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_2_5G: if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_10G_CX4: if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_20G: sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; break; default: BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; sc->port.advertising[idx] = sc->port.supported[idx]; break; } sc->link_params.req_flow_ctrl[idx] = (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; } else { bxe_set_requested_fc(sc); } } BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " "req_flow_ctrl=0x%x advertising=0x%x\n", sc->link_params.req_line_speed[idx], sc->link_params.req_duplex[idx], sc->link_params.req_flow_ctrl[idx], sc->port.advertising[idx]); } } static void bxe_get_phy_info(struct bxe_softc *sc) { uint8_t port = SC_PORT(sc); uint32_t config = sc->port.config; uint32_t eee_mode; /* shmem data already read in bxe_get_shmem_info() */ BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x " "link_config0=0x%08x\n", sc->link_params.lane_config, sc->link_params.speed_cap_mask[0], sc->port.link_config[0]); bxe_link_settings_supported(sc, sc->link_params.switch_cfg); bxe_link_settings_requested(sc); if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } /* configure link feature according to nvram value */ eee_mode = (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | ELINK_EEE_MODE_ENABLE_LPI | ELINK_EEE_MODE_OUTPUT_TIME); } else { sc->link_params.eee_mode = 0; } /* get the media type */ bxe_media_detect(sc); } static void bxe_get_params(struct bxe_softc *sc) { /* get user tunable params */ bxe_get_tunable_params(sc); /* select the RX and TX ring sizes */ sc->tx_ring_size = TX_BD_USABLE; sc->rx_ring_size = RX_BD_USABLE; /* XXX disable WoL */ sc->wol = 0; } static void bxe_set_modes_bitmap(struct bxe_softc *sc) { uint32_t flags = 0; if (CHIP_REV_IS_FPGA(sc)) { SET_FLAGS(flags, MODE_FPGA); } else if (CHIP_REV_IS_EMUL(sc)) { SET_FLAGS(flags, MODE_EMUL); } else { SET_FLAGS(flags, MODE_ASIC); } if (CHIP_IS_MODE_4_PORT(sc)) { SET_FLAGS(flags, MODE_PORT4); } else { SET_FLAGS(flags, MODE_PORT2); } if (CHIP_IS_E2(sc)) { SET_FLAGS(flags, MODE_E2); } else if (CHIP_IS_E3(sc)) { SET_FLAGS(flags, MODE_E3); if (CHIP_REV(sc) == CHIP_REV_Ax) { SET_FLAGS(flags, MODE_E3_A0); } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); } } if (IS_MF(sc)) { SET_FLAGS(flags, MODE_MF); switch (sc->devinfo.mf_info.mf_mode) { case MULTI_FUNCTION_SD: SET_FLAGS(flags, MODE_MF_SD); break; case MULTI_FUNCTION_SI: SET_FLAGS(flags, MODE_MF_SI); break; case MULTI_FUNCTION_AFEX: SET_FLAGS(flags, MODE_MF_AFEX); break; } } else { SET_FLAGS(flags, MODE_SF); } #if defined(__LITTLE_ENDIAN) SET_FLAGS(flags, MODE_LITTLE_ENDIAN); #else /* __BIG_ENDIAN */ SET_FLAGS(flags, MODE_BIG_ENDIAN); #endif INIT_MODE_FLAGS(sc) = flags; } static int bxe_alloc_hsi_mem(struct bxe_softc *sc) { struct bxe_fastpath *fp; bus_addr_t busaddr; int max_agg_queues; int max_segments; bus_size_t max_size; bus_size_t max_seg_size; char buf[32]; int rc; int i, j; /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ /* allocate the parent bus DMA tag */ rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ BUS_SPACE_MAXSIZE_32BIT, /* max map size */ BUS_SPACE_UNRESTRICTED, /* num discontinuous */ BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &sc->parent_dma_tag); /* returned dma tag */ if (rc != 0) { BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); return (1); } /************************/ /* DEFAULT STATUS BLOCK */ /************************/ if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), &sc->def_sb_dma, "default status block") != 0) { /* XXX */ bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; /***************/ /* EVENT QUEUE */ /***************/ if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, &sc->eq_dma, "event queue") != 0) { /* XXX */ bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; /*************/ /* SLOW PATH */ /*************/ if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), &sc->sp_dma, "slow path") != 0) { /* XXX */ bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; /*******************/ /* SLOW PATH QUEUE */ /*******************/ if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, &sc->spq_dma, "slow path queue") != 0) { /* XXX */ bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; /***************************/ /* FW DECOMPRESSION BUFFER */ /***************************/ if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, "fw decompression buffer") != 0) { /* XXX */ bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; if ((sc->gz_strm = malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { /* XXX */ bxe_dma_free(sc, &sc->gz_buf_dma); sc->gz_buf = NULL; bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } /*************/ /* FASTPATHS */ /*************/ /* allocate DMA memory for each fastpath structure */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->sc = sc; fp->index = i; /*******************/ /* FP STATUS BLOCK */ /*******************/ snprintf(buf, sizeof(buf), "fp %d status block", i); if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), &fp->sb_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { if (CHIP_IS_E2E3(sc)) { fp->status_block.e2_sb = (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; } else { fp->status_block.e1x_sb = (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; } } /******************/ /* FP TX BD CHAIN */ /******************/ snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), &fp->tx_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; } /* link together the tx bd chain pages */ for (j = 1; j <= TX_BD_NUM_PAGES; j++) { /* index into the tx bd chain array to last entry per page */ struct eth_tx_next_bd *tx_next_bd = &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; /* point to the next page and wrap from last page */ busaddr = (fp->tx_dma.paddr + (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); } /******************/ /* FP RX BD CHAIN */ /******************/ snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), &fp->rx_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; } /* link together the rx bd chain pages */ for (j = 1; j <= RX_BD_NUM_PAGES; j++) { /* index into the rx bd chain array to last entry per page */ struct eth_rx_bd *rx_bd = &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; /* point to the next page and wrap from last page */ busaddr = (fp->rx_dma.paddr + (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); rx_bd->addr_hi = htole32(U64_HI(busaddr)); rx_bd->addr_lo = htole32(U64_LO(busaddr)); } /*******************/ /* FP RX RCQ CHAIN */ /*******************/ snprintf(buf, sizeof(buf), "fp %d rcq chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), &fp->rcq_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; } /* link together the rcq chain pages */ for (j = 1; j <= RCQ_NUM_PAGES; j++) { /* index into the rcq chain array to last entry per page */ struct eth_rx_cqe_next_page *rx_cqe_next = (struct eth_rx_cqe_next_page *) &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; /* point to the next page and wrap from last page */ busaddr = (fp->rcq_dma.paddr + (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); } /*******************/ /* FP RX SGE CHAIN */ /*******************/ snprintf(buf, sizeof(buf), "fp %d sge chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), &fp->rx_sge_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; } /* link together the sge chain pages */ for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { /* index into the rcq chain array to last entry per page */ struct eth_rx_sge *rx_sge = &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; /* point to the next page and wrap from last page */ busaddr = (fp->rx_sge_dma.paddr + (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); rx_sge->addr_hi = htole32(U64_HI(busaddr)); rx_sge->addr_lo = htole32(U64_LO(busaddr)); } /***********************/ /* FP TX MBUF DMA MAPS */ /***********************/ /* set required sizes before mapping to conserve resources */ if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { max_size = BXE_TSO_MAX_SIZE; max_segments = BXE_TSO_MAX_SEGMENTS; max_seg_size = BXE_TSO_MAX_SEG_SIZE; } else { max_size = (MCLBYTES * BXE_MAX_SEGMENTS); max_segments = BXE_MAX_SEGMENTS; max_seg_size = MCLBYTES; } /* create a dma tag for the tx mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ max_size, /* max map size */ max_segments, /* num discontinuous */ max_seg_size, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->tx_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d tx mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for each of the tx mbuf clusters */ for (j = 0; j < TX_BD_TOTAL; j++) { if (bus_dmamap_create(fp->tx_mbuf_tag, BUS_DMA_NOWAIT, &fp->tx_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d tx mbuf %d' (%d)\n", i, j, rc); return (1); } } /***********************/ /* FP RX MBUF DMA MAPS */ /***********************/ /* create a dma tag for the rx mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ MJUM9BYTES, /* max map size */ 1, /* num discontinuous */ MJUM9BYTES, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->rx_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d rx mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for each of the rx mbuf clusters */ for (j = 0; j < RX_BD_TOTAL; j++) { if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx mbuf cluster */ if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx mbuf' (%d)\n", i, rc); return (1); } /***************************/ /* FP RX SGE MBUF DMA MAPS */ /***************************/ /* create a dma tag for the rx sge mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ BCM_PAGE_SIZE, /* max map size */ 1, /* num discontinuous */ BCM_PAGE_SIZE, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->rx_sge_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d rx sge mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for the rx sge mbuf clusters */ for (j = 0; j < RX_SGE_TOTAL; j++) { if (bus_dmamap_create(fp->rx_sge_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_sge_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx sge mbuf cluster */ if (bus_dmamap_create(fp->rx_sge_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_sge_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx sge mbuf' (%d)\n", i, rc); return (1); } /***************************/ /* FP RX TPA MBUF DMA MAPS */ /***************************/ /* create dma maps for the rx tpa mbuf clusters */ max_agg_queues = MAX_AGG_QS(sc); for (j = 0; j < max_agg_queues; j++) { if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_tpa_info[j].bd.m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx tpa mbuf cluster */ if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_tpa_info_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx tpa mbuf' (%d)\n", i, rc); return (1); } bxe_init_sge_ring_bit_mask(fp); } return (0); } static void bxe_free_hsi_mem(struct bxe_softc *sc) { struct bxe_fastpath *fp; int max_agg_queues; int i, j; if (sc->parent_dma_tag == NULL) { return; /* assume nothing was allocated */ } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; /*******************/ /* FP STATUS BLOCK */ /*******************/ bxe_dma_free(sc, &fp->sb_dma); memset(&fp->status_block, 0, sizeof(fp->status_block)); /******************/ /* FP TX BD CHAIN */ /******************/ bxe_dma_free(sc, &fp->tx_dma); fp->tx_chain = NULL; /******************/ /* FP RX BD CHAIN */ /******************/ bxe_dma_free(sc, &fp->rx_dma); fp->rx_chain = NULL; /*******************/ /* FP RX RCQ CHAIN */ /*******************/ bxe_dma_free(sc, &fp->rcq_dma); fp->rcq_chain = NULL; /*******************/ /* FP RX SGE CHAIN */ /*******************/ bxe_dma_free(sc, &fp->rx_sge_dma); fp->rx_sge_chain = NULL; /***********************/ /* FP TX MBUF DMA MAPS */ /***********************/ if (fp->tx_mbuf_tag != NULL) { for (j = 0; j < TX_BD_TOTAL; j++) { if (fp->tx_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->tx_mbuf_tag, fp->tx_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->tx_mbuf_tag, fp->tx_mbuf_chain[j].m_map); } } bus_dma_tag_destroy(fp->tx_mbuf_tag); fp->tx_mbuf_tag = NULL; } /***********************/ /* FP RX MBUF DMA MAPS */ /***********************/ if (fp->rx_mbuf_tag != NULL) { for (j = 0; j < RX_BD_TOTAL; j++) { if (fp->rx_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_chain[j].m_map); } } if (fp->rx_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); } /***************************/ /* FP RX TPA MBUF DMA MAPS */ /***************************/ max_agg_queues = MAX_AGG_QS(sc); for (j = 0; j < max_agg_queues; j++) { if (fp->rx_tpa_info[j].bd.m_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info[j].bd.m_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_tpa_info[j].bd.m_map); } } if (fp->rx_tpa_info_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map); } bus_dma_tag_destroy(fp->rx_mbuf_tag); fp->rx_mbuf_tag = NULL; } /***************************/ /* FP RX SGE MBUF DMA MAPS */ /***************************/ if (fp->rx_sge_mbuf_tag != NULL) { for (j = 0; j < RX_SGE_TOTAL; j++) { if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[j].m_map); } } if (fp->rx_sge_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map); bus_dmamap_destroy(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map); } bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); fp->rx_sge_mbuf_tag = NULL; } } /***************************/ /* FW DECOMPRESSION BUFFER */ /***************************/ bxe_dma_free(sc, &sc->gz_buf_dma); sc->gz_buf = NULL; free(sc->gz_strm, M_DEVBUF); sc->gz_strm = NULL; /*******************/ /* SLOW PATH QUEUE */ /*******************/ bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; /*************/ /* SLOW PATH */ /*************/ bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; /***************/ /* EVENT QUEUE */ /***************/ bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; /************************/ /* DEFAULT STATUS BLOCK */ /************************/ bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); sc->parent_dma_tag = NULL; } /* * Previous driver DMAE transaction may have occurred when pre-boot stage * ended and boot began. This would invalidate the addresses of the * transaction, resulting in was-error bit set in the PCI causing all * hw-to-host PCIe transactions to timeout. If this happened we want to clear * the interrupt which detected this from the pglueb and the was-done bit */ static void bxe_prev_interrupted_dmae(struct bxe_softc *sc) { uint32_t val; if (!CHIP_IS_E1x(sc)) { val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { BLOGD(sc, DBG_LOAD, "Clearing 'was-error' bit that was set in pglueb"); REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); } } } static int bxe_prev_mcp_done(struct bxe_softc *sc) { uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); if (!rc) { BLOGE(sc, "MCP response failure, aborting\n"); return (-1); } return (0); } static struct bxe_prev_list_node * bxe_prev_path_get_entry(struct bxe_softc *sc) { struct bxe_prev_list_node *tmp; LIST_FOREACH(tmp, &bxe_prev_list, node) { if ((sc->pcie_bus == tmp->bus) && (sc->pcie_device == tmp->slot) && (SC_PATH(sc) == tmp->path)) { return (tmp); } } return (NULL); } static uint8_t bxe_prev_is_path_marked(struct bxe_softc *sc) { struct bxe_prev_list_node *tmp; int rc = FALSE; mtx_lock(&bxe_prev_mtx); tmp = bxe_prev_path_get_entry(sc); if (tmp) { if (tmp->aer) { BLOGD(sc, DBG_LOAD, "Path %d/%d/%d was marked by AER\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } else { rc = TRUE; BLOGD(sc, DBG_LOAD, "Path %d/%d/%d was already cleaned from previous drivers\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } } mtx_unlock(&bxe_prev_mtx); return (rc); } static int bxe_prev_mark_path(struct bxe_softc *sc, uint8_t after_undi) { struct bxe_prev_list_node *tmp; mtx_lock(&bxe_prev_mtx); /* Check whether the entry for this path already exists */ tmp = bxe_prev_path_get_entry(sc); if (tmp) { if (!tmp->aer) { BLOGD(sc, DBG_LOAD, "Re-marking AER in path %d/%d/%d\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } else { BLOGD(sc, DBG_LOAD, "Removing AER indication from path %d/%d/%d\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); tmp->aer = 0; } mtx_unlock(&bxe_prev_mtx); return (0); } mtx_unlock(&bxe_prev_mtx); /* Create an entry for this path and add it */ tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, (M_NOWAIT | M_ZERO)); if (!tmp) { BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); return (-1); } tmp->bus = sc->pcie_bus; tmp->slot = sc->pcie_device; tmp->path = SC_PATH(sc); tmp->aer = 0; tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; mtx_lock(&bxe_prev_mtx); BLOGD(sc, DBG_LOAD, "Marked path %d/%d/%d - finished previous unload\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); mtx_unlock(&bxe_prev_mtx); return (0); } static int bxe_do_flr(struct bxe_softc *sc) { int i; /* only E2 and onwards support FLR */ if (CHIP_IS_E1x(sc)) { BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); return (-1); } /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", sc->devinfo.bc_ver); return (-1); } /* Wait for Transaction Pending bit clean */ for (i = 0; i < 4; i++) { if (i) { DELAY(((1 << (i - 1)) * 100) * 1000); } if (!bxe_is_pcie_pending(sc)) { goto clear; } } BLOGE(sc, "PCIE transaction is not cleared, " "proceeding with reset anyway\n"); clear: BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); return (0); } struct bxe_mac_vals { uint32_t xmac_addr; uint32_t xmac_val; uint32_t emac_addr; uint32_t emac_val; uint32_t umac_addr; uint32_t umac_val; uint32_t bmac_addr; uint32_t bmac_val[2]; }; static void bxe_prev_unload_close_mac(struct bxe_softc *sc, struct bxe_mac_vals *vals) { uint32_t val, base_addr, offset, mask, reset_reg; uint8_t mac_stopped = FALSE; uint8_t port = SC_PORT(sc); uint32_t wb_data[2]; /* reset addresses as they also mark which values were changed */ vals->bmac_addr = 0; vals->umac_addr = 0; vals->xmac_addr = 0; vals->emac_addr = 0; reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); if (!CHIP_IS_E3(sc)) { val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; if ((mask & reset_reg) && val) { BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM : NIG_REG_INGRESS_BMAC0_MEM; offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL : BIGMAC_REGISTER_BMAC_CONTROL; /* * use rd/wr since we cannot use dmae. This is safe * since MCP won't access the bus due to the request * to unload, and no function on the path can be * loaded at this time. */ wb_data[0] = REG_RD(sc, base_addr + offset); wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); vals->bmac_addr = base_addr + offset; vals->bmac_val[0] = wb_data[0]; vals->bmac_val[1] = wb_data[1]; wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; REG_WR(sc, vals->bmac_addr, wb_data[0]); REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); } BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; vals->emac_val = REG_RD(sc, vals->emac_addr); REG_WR(sc, vals->emac_addr, 0); mac_stopped = TRUE; } else { if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); vals->xmac_addr = base_addr + XMAC_REG_CTRL; vals->xmac_val = REG_RD(sc, vals->xmac_addr); REG_WR(sc, vals->xmac_addr, 0); mac_stopped = TRUE; } mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; if (mask & reset_reg) { BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; vals->umac_val = REG_RD(sc, vals->umac_addr); REG_WR(sc, vals->umac_addr, 0); mac_stopped = TRUE; } } if (mac_stopped) { DELAY(20000); } } #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) static void bxe_prev_unload_undi_inc(struct bxe_softc *sc, uint8_t port, uint8_t inc) { uint16_t rcq, bd; uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); BLOGD(sc, DBG_LOAD, "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", port, bd, rcq); } static int bxe_prev_unload_common(struct bxe_softc *sc) { uint32_t reset_reg, tmp_reg = 0, rc; uint8_t prev_undi = FALSE; struct bxe_mac_vals mac_vals; uint32_t timer_count = 1000; uint32_t prev_brb; /* * It is possible a previous function received 'common' answer, * but hasn't loaded yet, therefore creating a scenario of * multiple functions receiving 'common' on the same path. */ BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); memset(&mac_vals, 0, sizeof(mac_vals)); if (bxe_prev_is_path_marked(sc)) { return (bxe_prev_mcp_done(sc)); } reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); /* Reset should be performed after BRB is emptied */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { /* Close the MAC Rx to prevent BRB from filling up */ bxe_prev_unload_close_mac(sc, &mac_vals); /* close LLH filters towards the BRB */ elink_set_rx_filter(&sc->link_params, 0); /* * Check if the UNDI driver was previously loaded. * UNDI driver initializes CID offset for normal bell to 0x7 */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); if (tmp_reg == 0x7) { BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); prev_undi = TRUE; /* clear the UNDI indication */ REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); /* clear possible idle check errors */ REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); } } /* wait until BRB is empty */ tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); while (timer_count) { prev_brb = tmp_reg; tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); if (!tmp_reg) { break; } BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); /* reset timer as long as BRB actually gets emptied */ if (prev_brb > tmp_reg) { timer_count = 1000; } else { timer_count--; } /* If UNDI resides in memory, manually increment it */ if (prev_undi) { bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); } DELAY(10); } if (!timer_count) { BLOGE(sc, "Failed to empty BRB\n"); } } /* No packets are in the pipeline, path is ready for reset */ bxe_reset_common(sc); if (mac_vals.xmac_addr) { REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); } if (mac_vals.umac_addr) { REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); } if (mac_vals.emac_addr) { REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); } if (mac_vals.bmac_addr) { REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); } rc = bxe_prev_mark_path(sc, prev_undi); if (rc) { bxe_prev_mcp_done(sc); return (rc); } return (bxe_prev_mcp_done(sc)); } static int bxe_prev_unload_uncommon(struct bxe_softc *sc) { int rc; BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); /* Test if previous unload process was already finished for this path */ if (bxe_prev_is_path_marked(sc)) { return (bxe_prev_mcp_done(sc)); } BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); /* * If function has FLR capabilities, and existing FW version matches * the one required, then FLR will be sufficient to clean any residue * left by previous driver */ rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); if (!rc) { /* fw version is good */ BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); rc = bxe_do_flr(sc); } if (!rc) { /* FLR was performed */ BLOGD(sc, DBG_LOAD, "FLR successful\n"); return (0); } BLOGD(sc, DBG_LOAD, "Could not FLR\n"); /* Close the MCP request, return failure*/ rc = bxe_prev_mcp_done(sc); if (!rc) { rc = BXE_PREV_WAIT_NEEDED; } return (rc); } static int bxe_prev_unload(struct bxe_softc *sc) { int time_counter = 10; uint32_t fw, hw_lock_reg, hw_lock_val; uint32_t rc = 0; /* * Clear HW from errors which may have resulted from an interrupted * DMAE transaction. */ bxe_prev_interrupted_dmae(sc); /* Release previously held locks */ hw_lock_reg = (SC_FUNC(sc) <= 5) ? (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); hw_lock_val = (REG_RD(sc, hw_lock_reg)); if (hw_lock_val) { if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); } BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); REG_WR(sc, hw_lock_reg, 0xffffffff); } else { BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); } if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); } do { /* Lock MCP using an unload request */ fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); if (!fw) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; break; } if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { rc = bxe_prev_unload_common(sc); break; } /* non-common reply from MCP night require looping */ rc = bxe_prev_unload_uncommon(sc); if (rc != BXE_PREV_WAIT_NEEDED) { break; } DELAY(20000); } while (--time_counter); if (!time_counter || rc) { BLOGE(sc, "Failed to unload previous driver!" " time_counter %d rc %d\n", time_counter, rc); rc = -1; } return (rc); } void bxe_dcbx_set_state(struct bxe_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabled) { if (!CHIP_IS_E1x(sc)) { sc->dcb_state = dcb_on; sc->dcbx_enabled = dcbx_enabled; } else { sc->dcb_state = FALSE; sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; } BLOGD(sc, DBG_LOAD, "DCB state [%s:%s]\n", dcb_on ? "ON" : "OFF", (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? "on-chip with negotiation" : "invalid"); } /* must be called after sriov-enable */ static int bxe_set_qm_cid_count(struct bxe_softc *sc) { int cid_count = BXE_L2_MAX_CID(sc); if (IS_SRIOV(sc)) { cid_count += BXE_VF_CIDS; } if (CNIC_SUPPORT(sc)) { cid_count += CNIC_CID_MAX; } return (roundup(cid_count, QM_CID_ROUND)); } static void bxe_init_multi_cos(struct bxe_softc *sc) { int pri, cos; uint32_t pri_map = 0; /* XXX change to user config */ for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); if (cos < sc->max_cos) { sc->prio_to_cos[pri] = cos; } else { BLOGW(sc, "Invalid COS %d for priority %d " "(max COS is %d), setting to 0\n", cos, pri, (sc->max_cos - 1)); sc->prio_to_cos[pri] = 0; } } } static int bxe_sysctl_state(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc; int error, result; result = 0; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) { return (error); } if (result == 1) { uint32_t temp; sc = (struct bxe_softc *)arg1; BLOGI(sc, "... dumping driver state ...\n"); temp = SHMEM2_RD(sc, temperature_in_half_celsius); BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2)); } return (error); } static int bxe_sysctl_trigger_grcdump(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc; int error, result; result = 0; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) { return (error); } if (result == 1) { sc = (struct bxe_softc *)arg1; BLOGI(sc, "... grcdump start ...\n"); bxe_grc_dump(sc); BLOGI(sc, "... grcdump done ...\n"); } return (error); } static int bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc = (struct bxe_softc *)arg1; uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; uint32_t *offset; uint64_t value = 0; int index = (int)arg2; if (index >= BXE_NUM_ETH_STATS) { BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); return (-1); } offset = (eth_stats + bxe_eth_stats_arr[index].offset); switch (bxe_eth_stats_arr[index].size) { case 4: value = (uint64_t)*offset; break; case 8: value = HILO_U64(*offset, *(offset + 1)); break; default: BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", index, bxe_eth_stats_arr[index].size); return (-1); } return (sysctl_handle_64(oidp, &value, 0, req)); } static int bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc = (struct bxe_softc *)arg1; uint32_t *eth_stats; uint32_t *offset; uint64_t value = 0; uint32_t q_stat = (uint32_t)arg2; uint32_t fp_index = ((q_stat >> 16) & 0xffff); uint32_t index = (q_stat & 0xffff); eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; if (index >= BXE_NUM_ETH_Q_STATS) { BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); return (-1); } offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); switch (bxe_eth_q_stats_arr[index].size) { case 4: value = (uint64_t)*offset; break; case 8: value = HILO_U64(*offset, *(offset + 1)); break; default: BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", index, bxe_eth_q_stats_arr[index].size); return (-1); } return (sysctl_handle_64(oidp, &value, 0, req)); } static void bxe_add_sysctls(struct bxe_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *queue_top, *queue; struct sysctl_oid_list *queue_top_children, *queue_children; char queue_num_buf[32]; uint32_t q_stat; int i, j; ctx = device_get_sysctl_ctx(sc->dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", CTLFLAG_RD, BXE_DRIVER_VERSION, 0, "version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, "bootcode version"); snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", CTLFLAG_RD, sc->fw_ver_str, 0, "firmware version"); snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : "Unknown")); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", CTLFLAG_RD, sc->mf_mode_str, 0, "multifunction mode"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, "multifunction vnics per port"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", CTLFLAG_RD, sc->mac_addr_str, 0, "mac address"); snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : "???GT/s"), sc->devinfo.pcie_link_width); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", CTLFLAG_RD, sc->pci_link_str, 0, "pci link status"); sc->debug = bxe_debug; SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, "debug logging mode"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "trigger_grcdump", CTLTYPE_UINT | CTLFLAG_RW, sc, 0, bxe_sysctl_trigger_grcdump, "IU", "set by driver when a grcdump is needed"); sc->grcdump_done = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done", CTLFLAG_RW, &sc->grcdump_done, 0, "set by driver when grcdump is done"); sc->rx_budget = bxe_rx_budget; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", CTLFLAG_RW, &sc->rx_budget, 0, "rx processing budget"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", CTLTYPE_UINT | CTLFLAG_RW, sc, 0, bxe_sysctl_state, "IU", "dump driver state"); for (i = 0; i < BXE_NUM_ETH_STATS; i++) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, bxe_eth_stats_arr[i].string, CTLTYPE_U64 | CTLFLAG_RD, sc, i, bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string); } /* add a new parent node for all queues "dev.bxe.#.queue" */ queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", CTLFLAG_RD, NULL, "queue"); queue_top_children = SYSCTL_CHILDREN(queue_top); for (i = 0; i < sc->num_queues; i++) { /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, queue_num_buf, CTLFLAG_RD, NULL, "single queue"); queue_children = SYSCTL_CHILDREN(queue); for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { q_stat = ((i << 16) | j); SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, bxe_eth_q_stats_arr[j].string, CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string); } } } /* * Device attach function. * * Allocates device resources, performs secondary chip identification, and * initializes driver instance variables. This function is called from driver * load after a successful probe. * * Returns: * 0 = Success, >0 = Failure */ static int bxe_attach(device_t dev) { struct bxe_softc *sc; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting attach...\n"); sc->state = BXE_STATE_CLOSED; sc->dev = dev; sc->unit = device_get_unit(dev); BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); sc->pcie_bus = pci_get_bus(dev); sc->pcie_device = pci_get_slot(dev); sc->pcie_func = pci_get_function(dev); /* enable bus master capability */ pci_enable_busmaster(dev); /* get the BARs */ if (bxe_allocate_bars(sc) != 0) { return (ENXIO); } /* initialize the mutexes */ bxe_init_mutexes(sc); /* prepare the periodic callout */ callout_init(&sc->periodic_callout, 0); /* prepare the chip taskqueue */ sc->chip_tq_flags = CHIP_TQ_NONE; snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), "bxe%d_chip_tq", sc->unit); TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, taskqueue_thread_enqueue, &sc->chip_tq); taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ "%s", sc->chip_tq_name); /* get device info and set params */ if (bxe_get_device_info(sc) != 0) { BLOGE(sc, "getting device info\n"); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* get final misc params */ bxe_get_params(sc); /* set the default MTU (changed via ifconfig) */ sc->mtu = ETHERMTU; bxe_set_modes_bitmap(sc); /* XXX * If in AFEX mode and the function is configured for FCoE * then bail... no L2 allowed. */ /* get phy settings from shmem and 'and' against admin settings */ bxe_get_phy_info(sc); /* initialize the FreeBSD ifnet interface */ if (bxe_init_ifnet(sc) != 0) { bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } if (bxe_add_cdev(sc) != 0) { if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate device interrupts */ if (bxe_interrupt_alloc(sc) != 0) { bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate ilt */ if (bxe_alloc_ilt_mem(sc) != 0) { bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate the host hardware/software hsi structures */ if (bxe_alloc_hsi_mem(sc) != 0) { bxe_free_ilt_mem(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* need to reset chip if UNDI was active */ if (IS_PF(sc) && !BXE_NOMCP(sc)) { /* init fw_seq */ sc->fw_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); bxe_prev_unload(sc); } #if 1 /* XXX */ bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); #else if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && SHMEM2_RD(sc, dcbx_lldp_params_offset) && SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); bxe_dcbx_init_params(sc); } else { bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); } #endif /* calculate qm_cid_count */ sc->qm_cid_count = bxe_set_qm_cid_count(sc); BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); sc->max_cos = 1; bxe_init_multi_cos(sc); bxe_add_sysctls(sc); return (0); } /* * Device detach function. * * Stops the controller, resets the controller, and releases resources. * * Returns: * 0 = Success, >0 = Failure */ static int bxe_detach(device_t dev) { struct bxe_softc *sc; if_t ifp; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting detach...\n"); ifp = sc->ifp; if (ifp != NULL && if_vlantrunkinuse(ifp)) { BLOGE(sc, "Cannot detach while VLANs are in use.\n"); return(EBUSY); } bxe_del_cdev(sc); /* stop the periodic callout */ bxe_periodic_stop(sc); /* stop the chip taskqueue */ atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); if (sc->chip_tq) { taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); taskqueue_free(sc->chip_tq); sc->chip_tq = NULL; } /* stop and reset the controller if it was open */ if (sc->state != BXE_STATE_CLOSED) { BXE_CORE_LOCK(sc); bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); BXE_CORE_UNLOCK(sc); } /* release the network interface */ if (ifp != NULL) { ether_ifdetach(ifp); } ifmedia_removeall(&sc->ifmedia); /* XXX do the following based on driver state... */ /* free the host hardware/software hsi structures */ bxe_free_hsi_mem(sc); /* free ilt */ bxe_free_ilt_mem(sc); /* release the interrupts */ bxe_interrupt_free(sc); /* Release the mutexes*/ bxe_release_mutexes(sc); /* Release the PCIe BAR mapped memory */ bxe_deallocate_bars(sc); /* Release the FreeBSD interface. */ if (sc->ifp != NULL) { if_free(sc->ifp); } pci_disable_busmaster(dev); return (0); } /* * Device shutdown function. * * Stops and resets the controller. * * Returns: * Nothing */ static int bxe_shutdown(device_t dev) { struct bxe_softc *sc; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); /* stop the periodic callout */ bxe_periodic_stop(sc); BXE_CORE_LOCK(sc); bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); BXE_CORE_UNLOCK(sc); return (0); } void bxe_igu_ack_sb(struct bxe_softc *sc, uint8_t igu_sb_id, uint8_t segment, uint16_t index, uint8_t op, uint8_t update) { uint32_t igu_addr = sc->igu_base_addr; igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); } static void bxe_igu_clear_sb_gen(struct bxe_softc *sc, uint8_t func, uint8_t idu_sb_id, uint8_t is_pf) { uint32_t data, ctl, cnt = 100; uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; uint32_t sb_bit = 1 << (idu_sb_id%32); uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; /* Not supported in BC mode */ if (CHIP_INT_MODE_IS_BC(sc)) { return; } data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | IGU_REGULAR_CLEANUP_SET | IGU_REGULAR_BCLEANUP); ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | (func_encode << IGU_CTRL_REG_FID_SHIFT) | (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", data, igu_addr_data); REG_WR(sc, igu_addr_data, data); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); mb(); BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", ctl, igu_addr_ctl); REG_WR(sc, igu_addr_ctl, ctl); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); mb(); /* wait for clean up to finish */ while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { DELAY(20000); } if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { BLOGD(sc, DBG_LOAD, "Unable to finish IGU cleanup: " "idu_sb_id %d offset %d bit %d (cnt %d)\n", idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); } } static void bxe_igu_clear_sb(struct bxe_softc *sc, uint8_t idu_sb_id) { bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); } /*******************/ /* ECORE CALLBACKS */ /*******************/ static void bxe_reset_common(struct bxe_softc *sc) { uint32_t val = 0x1400; /* reset_common */ REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); if (CHIP_IS_E3(sc)) { val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; } REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); } static void bxe_common_init_phy(struct bxe_softc *sc) { uint32_t shmem_base[2]; uint32_t shmem2_base[2]; /* Avoid common init in case MFW supports LFA */ if (SHMEM2_RD(sc, size) > (uint32_t)offsetof(struct shmem2_region, lfa_host_addr[SC_PORT(sc)])) { return; } shmem_base[0] = sc->devinfo.shmem_base; shmem2_base[0] = sc->devinfo.shmem2_base; if (!CHIP_IS_E1x(sc)) { shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); } bxe_acquire_phy_lock(sc); elink_common_init_phy(sc, shmem_base, shmem2_base, sc->devinfo.chip_id, 0); bxe_release_phy_lock(sc); } static void bxe_pf_disable(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); val &= ~IGU_PF_CONF_FUNC_EN; REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); } static void bxe_init_pxp(struct bxe_softc *sc) { uint16_t devctl; int r_order, w_order; devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); if (sc->mrrs == -1) { r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); } else { BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); r_order = sc->mrrs; } ecore_init_pxp_arb(sc, r_order, w_order); } static uint32_t bxe_get_pretend_reg(struct bxe_softc *sc) { uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); return (base + (SC_ABS_FUNC(sc)) * stride); } /* * Called only on E1H or E2. * When pretending to be PF, the pretend value is the function number 0..7. * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID * combination. */ static int bxe_pretend_func(struct bxe_softc *sc, uint16_t pretend_func_val) { uint32_t pretend_reg; if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { return (-1); } /* get my own pretend register */ pretend_reg = bxe_get_pretend_reg(sc); REG_WR(sc, pretend_reg, pretend_func_val); REG_RD(sc, pretend_reg); return (0); } static void bxe_iov_init_dmae(struct bxe_softc *sc) { return; } static void bxe_iov_init_dq(struct bxe_softc *sc) { return; } /* send a NIG loopback debug packet */ static void bxe_lb_pckt(struct bxe_softc *sc) { uint32_t wb_write[3]; /* Ethernet source and destination addresses */ wb_write[0] = 0x55555555; wb_write[1] = 0x55555555; wb_write[2] = 0x20; /* SOP */ REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); /* NON-IP protocol */ wb_write[0] = 0x09000000; wb_write[1] = 0x55555555; wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); } /* * Some of the internal memories are not directly readable from the driver. * To test them we send debug packets. */ static int bxe_int_mem_test(struct bxe_softc *sc) { int factor; int count, i; uint32_t val = 0; if (CHIP_REV_IS_FPGA(sc)) { factor = 120; } else if (CHIP_REV_IS_EMUL(sc)) { factor = 200; } else { factor = 1; } /* disable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); REG_WR(sc, CFC_REG_DEBUG0, 0x1); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); /* write 0 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); /* send Ethernet packet */ bxe_lb_pckt(sc); /* TODO do i reset NIG statistic? */ /* Wait until NIG register shows 1 packet of size 0x10 */ count = 1000 * factor; while (count) { bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); if (val == 0x10) { break; } DELAY(10000); count--; } if (val != 0x10) { BLOGE(sc, "NIG timeout val=0x%x\n", val); return (-1); } /* wait until PRS register shows 1 packet */ count = (1000 * factor); while (count) { val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val == 1) { break; } DELAY(10000); count--; } if (val != 0x1) { BLOGE(sc, "PRS timeout val=0x%x\n", val); return (-2); } /* Reset and init BRB, PRS */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); DELAY(50000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); DELAY(50000); ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); /* Disable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); REG_WR(sc, CFC_REG_DEBUG0, 0x1); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); /* Write 0 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); /* send 10 Ethernet packets */ for (i = 0; i < 10; i++) { bxe_lb_pckt(sc); } /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ count = (1000 * factor); while (count) { bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); if (val == 0xb0) { break; } DELAY(10000); count--; } if (val != 0xb0) { BLOGE(sc, "NIG timeout val=0x%x\n", val); return (-3); } /* Wait until PRS register shows 2 packets */ val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val != 2) { BLOGE(sc, "PRS timeout val=0x%x\n", val); } /* Write 1 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); /* Wait until PRS register shows 3 packets */ DELAY(10000 * factor); /* Wait until NIG register shows 1 packet of size 0x10 */ val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val != 3) { BLOGE(sc, "PRS timeout val=0x%x\n", val); } /* clear NIG EOP FIFO */ for (i = 0; i < 11; i++) { REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); } val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); if (val != 1) { BLOGE(sc, "clear of NIG failed val=0x%x\n", val); return (-4); } /* Reset and init BRB, PRS, NIG */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); DELAY(50000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); DELAY(50000); ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); if (!CNIC_SUPPORT(sc)) { /* set NIC mode */ REG_WR(sc, PRS_REG_NIC_MODE, 1); } /* Enable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); REG_WR(sc, CFC_REG_DEBUG0, 0x0); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); return (0); } static void bxe_setup_fan_failure_detection(struct bxe_softc *sc) { int is_required; uint32_t val; int port; is_required = 0; val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & SHARED_HW_CFG_FAN_FAILURE_MASK); if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { is_required = 1; } /* * The fan failure mechanism is usually related to the PHY type since * the power consumption of the board is affected by the PHY. Currently, * fan is required for most designs with SFX7101, BCM8727 and BCM8481. */ else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { for (port = PORT_0; port < PORT_MAX; port++) { is_required |= elink_fan_failure_det_req(sc, sc->devinfo.shmem_base, sc->devinfo.shmem2_base, port); } } BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); if (is_required == 0) { return; } /* Fan failure is indicated by SPIO 5 */ bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); /* set to active low mode */ val = REG_RD(sc, MISC_REG_SPIO_INT); val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); REG_WR(sc, MISC_REG_SPIO_INT, val); /* enable interrupt to signal the IGU */ val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); val |= MISC_SPIO_SPIO5; REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); } static void bxe_enable_blocks_attention(struct bxe_softc *sc) { uint32_t val; REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); } else { REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); } REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); /* * mask read length error interrupts in brb for parser * (parsing unit and 'checksum and crc' unit) * these errors are legal (PU reads fixed length and CAC can cause * read length error on truncated packets) */ REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); REG_WR(sc, QM_REG_QM_INT_MASK, 0); REG_WR(sc, TM_REG_TM_INT_MASK, 0); REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); if (!CHIP_IS_E1x(sc)) { val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); } REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ if (!CHIP_IS_E1x(sc)) { /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); } REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ } /** * bxe_init_hw_common - initialize the HW at the COMMON phase. * * @sc: driver handle */ static int bxe_init_hw_common(struct bxe_softc *sc) { uint8_t abs_func_id; uint32_t val; BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", SC_ABS_FUNC(sc)); /* * take the RESET lock to protect undi_unload flow from accessing * registers while we are resetting the chip */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); bxe_reset_common(sc); REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); val = 0xfffc; if (CHIP_IS_E3(sc)) { val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; } REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); BLOGD(sc, DBG_LOAD, "after misc block init\n"); if (!CHIP_IS_E1x(sc)) { /* * 4-port mode or 2-port mode we need to turn off master-enable for * everyone. After that we turn it back on for self. So, we disregard * multi-function, and always disable all functions on the given path, * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 */ for (abs_func_id = SC_PATH(sc); abs_func_id < (E2_FUNC_MAX * 2); abs_func_id += 2) { if (abs_func_id == SC_ABS_FUNC(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); continue; } bxe_pretend_func(sc, abs_func_id); /* clear pf enable */ bxe_pf_disable(sc); bxe_pretend_func(sc, SC_ABS_FUNC(sc)); } } BLOGD(sc, DBG_LOAD, "after pf disable\n"); ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); if (CHIP_IS_E1(sc)) { /* * enable HW interrupt from PXP on USDM overflow * bit 16 on INT_MASK_0 */ REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); } ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); bxe_init_pxp(sc); #ifdef __BIG_ENDIAN REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); /* make sure this value is 0 */ REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); #endif ecore_ilt_init_page_size(sc, INITOP_SET); if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); } /* let the HW do it's magic... */ DELAY(100000); /* finish PXP init */ val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); if (val != 1) { BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n", val); return (-1); } val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); if (val != 1) { BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val); return (-1); } BLOGD(sc, DBG_LOAD, "after pxp init\n"); /* * Timer bug workaround for E2 only. We need to set the entire ILT to have * entries with value "0" and valid bit on. This needs to be done by the * first PF that is loaded in a path (i.e. common phase) */ if (!CHIP_IS_E1x(sc)) { /* * In E2 there is a bug in the timers block that can cause function 6 / 7 * (i.e. vnic3) to start even if it is marked as "scan-off". * This occurs when a different function (func2,3) is being marked * as "scan-off". Real-life scenario for example: if a driver is being * load-unloaded while func6,7 are down. This will cause the timer to access * the ilt, translate to a logical address and send a request to read/write. * Since the ilt for the function that is down is not valid, this will cause * a translation error which is unrecoverable. * The Workaround is intended to make sure that when this happens nothing * fatal will occur. The workaround: * 1. First PF driver which loads on a path will: * a. After taking the chip out of reset, by using pretend, * it will write "0" to the following registers of * the other vnics. * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); * And for itself it will write '1' to * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable * dmae-operations (writing to pram for example.) * note: can be done for only function 6,7 but cleaner this * way. * b. Write zero+valid to the entire ILT. * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of * VNIC3 (of that port). The range allocated will be the * entire ILT. This is needed to prevent ILT range error. * 2. Any PF driver load flow: * a. ILT update with the physical addresses of the allocated * logical pages. * b. Wait 20msec. - note that this timeout is needed to make * sure there are no requests in one of the PXP internal * queues with "old" ILT addresses. * c. PF enable in the PGLC. * d. Clear the was_error of the PF in the PGLC. (could have * occurred while driver was down) * e. PF enable in the CFC (WEAK + STRONG) * f. Timers scan enable * 3. PF driver unload flow: * a. Clear the Timers scan_en. * b. Polling for scan_on=0 for that PF. * c. Clear the PF enable bit in the PXP. * d. Clear the PF enable in the CFC (WEAK + STRONG) * e. Write zero+valid to all ILT entries (The valid bit must * stay set) * f. If this is VNIC 3 of a port then also init * first_timers_ilt_entry to zero and last_timers_ilt_entry * to the last enrty in the ILT. * * Notes: * Currently the PF error in the PGLC is non recoverable. * In the future the there will be a recovery routine for this error. * Currently attention is masked. * Having an MCP lock on the load/unload process does not guarantee that * there is no Timer disable during Func6/7 enable. This is because the * Timers scan is currently being cleared by the MCP on FLR. * Step 2.d can be done only for PF6/7 and the driver can also check if * there is error before clearing it. But the flow above is simpler and * more general. * All ILT entries are written by zero+valid and not just PF6/7 * ILT entries since in the future the ILT entries allocation for * PF-s might be dynamic. */ struct ilt_client_info ilt_cli; struct ecore_ilt ilt; memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); memset(&ilt, 0, sizeof(struct ecore_ilt)); /* initialize dummy TM client */ ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; /* * Step 1: set zeroes to all ilt page entries with valid bit on * Step 2: set the timers first/last ilt entry to point * to the entire range to prevent ILT range error for 3rd/4th * vnic (this code assumes existence of the vnic) * * both steps performed by call to ecore_ilt_client_init_op() * with dummy TM client * * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT * and his brother are split registers */ bxe_pretend_func(sc, (SC_PATH(sc) + 6)); ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); bxe_pretend_func(sc, SC_ABS_FUNC(sc)); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); } REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); if (!CHIP_IS_E1x(sc)) { int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : (CHIP_REV_IS_FPGA(sc) ? 400 : 0); ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); /* let the HW do it's magic... */ do { DELAY(200000); val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); } while (factor-- && (val != 1)); if (val != 1) { BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val); return (-1); } } BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); bxe_iov_init_dmae(sc); /* clean the DMAE memory */ sc->dmae_ready = 1; ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); /* QM queues pointers table */ ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); /* soft reset pulse */ REG_WR(sc, QM_REG_SOFT_RESET, 1); REG_WR(sc, QM_REG_SOFT_RESET, 0); if (CNIC_SUPPORT(sc)) ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); if (!CHIP_REV_IS_SLOW(sc)) { /* enable hw interrupt from doorbell Q */ REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); } ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); if (!CHIP_IS_E1(sc)) { REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); } if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { if (IS_MF_AFEX(sc)) { /* * configure that AFEX and VLAN headers must be * received in AFEX mode */ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); } else { /* * Bit-map indicating which L2 hdrs may appear * after the basic Ethernet header */ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); } } ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); if (!CHIP_IS_E1x(sc)) { /* reset VFC memories */ REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, VFC_MEMORIES_RST_REG_CAM_RST | VFC_MEMORIES_RST_REG_RAM_RST); REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, VFC_MEMORIES_RST_REG_CAM_RST | VFC_MEMORIES_RST_REG_RAM_RST); DELAY(20000); } ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); /* sync semi rtc */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000); ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); if (!CHIP_IS_E1x(sc)) { if (IS_MF_AFEX(sc)) { /* * configure that AFEX and VLAN headers must be * sent in AFEX mode */ REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); } else { REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); } } REG_WR(sc, SRC_REG_SOFT_RST, 1); ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); if (CNIC_SUPPORT(sc)) { REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); } REG_WR(sc, SRC_REG_SOFT_RST, 0); if (sizeof(union cdu_context) != 1024) { /* we currently assume that a context is 1024 bytes */ BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", (long)sizeof(union cdu_context)); } ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); val = (4 << 24) + (0 << 12) + 1024; REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); /* enable context validation interrupt from CFC */ REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); /* set the thresholds to prevent CFC/CDU race */ REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); } ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); /* Reset PCIE errors for debug */ REG_WR(sc, 0x2814, 0xffffffff); REG_WR(sc, 0x3820, 0xffffffff); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); } ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); if (!CHIP_IS_E1(sc)) { /* in E3 this done in per-port section */ if (!CHIP_IS_E3(sc)) REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); } if (CHIP_IS_E1H(sc)) { /* not applicable for E2 (and above ...) */ REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); } if (CHIP_REV_IS_SLOW(sc)) { DELAY(200000); } /* finish CFC init */ val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val); return (-1); } val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val); return (-1); } val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val); return (-1); } REG_WR(sc, CFC_REG_DEBUG0, 0); if (CHIP_IS_E1(sc)) { /* read NIG statistic to see if this is our first up since powerup */ bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); /* do internal memory self test */ if ((val == 0) && bxe_int_mem_test(sc)) { BLOGE(sc, "internal mem self test failed val=0x%x\n", val); return (-1); } } bxe_setup_fan_failure_detection(sc); /* clear PXP2 attentions */ REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); bxe_enable_blocks_attention(sc); if (!CHIP_REV_IS_SLOW(sc)) { ecore_enable_blocks_parity(sc); } if (!BXE_NOMCP(sc)) { if (CHIP_IS_E1x(sc)) { bxe_common_init_phy(sc); } } return (0); } /** * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. * * @sc: driver handle */ static int bxe_init_hw_common_chip(struct bxe_softc *sc) { int rc = bxe_init_hw_common(sc); if (rc) { BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc); return (rc); } /* In E2 2-PORT mode, same ext phy is used for the two paths */ if (!BXE_NOMCP(sc)) { bxe_common_init_phy(sc); } return (0); } static int bxe_init_hw_port(struct bxe_softc *sc) { int port = SC_PORT(sc); int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; uint32_t low, high; uint32_t val; BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); ecore_init_block(sc, BLOCK_MISC, init_phase); ecore_init_block(sc, BLOCK_PXP, init_phase); ecore_init_block(sc, BLOCK_PXP2, init_phase); /* * Timers bug workaround: disables the pf_master bit in pglue at * common phase, we need to enable it here before any dmae access are * attempted. Therefore we manually added the enable-master to the * port phase (it also happens in the function phase) */ if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); } ecore_init_block(sc, BLOCK_ATC, init_phase); ecore_init_block(sc, BLOCK_DMAE, init_phase); ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); ecore_init_block(sc, BLOCK_QM, init_phase); ecore_init_block(sc, BLOCK_TCM, init_phase); ecore_init_block(sc, BLOCK_UCM, init_phase); ecore_init_block(sc, BLOCK_CCM, init_phase); ecore_init_block(sc, BLOCK_XCM, init_phase); /* QM cid (connection) count */ ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); if (CNIC_SUPPORT(sc)) { ecore_init_block(sc, BLOCK_TM, init_phase); REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); } ecore_init_block(sc, BLOCK_DORQ, init_phase); ecore_init_block(sc, BLOCK_BRB1, init_phase); if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { if (IS_MF(sc)) { low = (BXE_ONE_PORT(sc) ? 160 : 246); } else if (sc->mtu > 4096) { if (BXE_ONE_PORT(sc)) { low = 160; } else { val = sc->mtu; /* (24*1024 + val*4)/256 */ low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); } } else { low = (BXE_ONE_PORT(sc) ? 80 : 160); } high = (low + 56); /* 14*1024/256 */ REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); } if (CHIP_IS_MODE_4_PORT(sc)) { REG_WR(sc, SC_PORT(sc) ? BRB1_REG_MAC_GUARANTIED_1 : BRB1_REG_MAC_GUARANTIED_0, 40); } ecore_init_block(sc, BLOCK_PRS, init_phase); if (CHIP_IS_E3B0(sc)) { if (IS_MF_AFEX(sc)) { /* configure headers for AFEX mode */ REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_BASIC_PORT_1 : PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); REG_WR(sc, SC_PORT(sc) ? PRS_REG_MUST_HAVE_HDRS_PORT_1 : PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); } else { /* Ovlan exists only if we are in multi-function + * switch-dependent mode, in switch-independent there * is no ovlan headers */ REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_BASIC_PORT_1 : PRS_REG_HDRS_AFTER_BASIC_PORT_0, (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); } } ecore_init_block(sc, BLOCK_TSDM, init_phase); ecore_init_block(sc, BLOCK_CSDM, init_phase); ecore_init_block(sc, BLOCK_USDM, init_phase); ecore_init_block(sc, BLOCK_XSDM, init_phase); ecore_init_block(sc, BLOCK_TSEM, init_phase); ecore_init_block(sc, BLOCK_USEM, init_phase); ecore_init_block(sc, BLOCK_CSEM, init_phase); ecore_init_block(sc, BLOCK_XSEM, init_phase); ecore_init_block(sc, BLOCK_UPB, init_phase); ecore_init_block(sc, BLOCK_XPB, init_phase); ecore_init_block(sc, BLOCK_PBF, init_phase); if (CHIP_IS_E1x(sc)) { /* configure PBF to work without PAUSE mtu 9000 */ REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); /* update threshold */ REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); /* update init credit */ REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); /* probe changes */ REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); DELAY(50); REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); } if (CNIC_SUPPORT(sc)) { ecore_init_block(sc, BLOCK_SRC, init_phase); } ecore_init_block(sc, BLOCK_CDU, init_phase); ecore_init_block(sc, BLOCK_CFC, init_phase); if (CHIP_IS_E1(sc)) { REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } ecore_init_block(sc, BLOCK_HC, init_phase); ecore_init_block(sc, BLOCK_IGU, init_phase); ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); /* init aeu_mask_attn_func_0/1: * - SF mode: bits 3-7 are masked. only bits 0-2 are in use * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF * bits 4-7 are used for "per vn group attention" */ val = IS_MF(sc) ? 0xF7 : 0x7; /* Enable DCBX attention for all but E1 */ val |= CHIP_IS_E1(sc) ? 0 : 0x10; REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); ecore_init_block(sc, BLOCK_NIG, init_phase); if (!CHIP_IS_E1x(sc)) { /* Bit-map indicating which L2 hdrs may appear after the * basic Ethernet header */ if (IS_MF_AFEX(sc)) { REG_WR(sc, SC_PORT(sc) ? NIG_REG_P1_HDRS_AFTER_BASIC : NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); } else { REG_WR(sc, SC_PORT(sc) ? NIG_REG_P1_HDRS_AFTER_BASIC : NIG_REG_P0_HDRS_AFTER_BASIC, IS_MF_SD(sc) ? 7 : 6); } if (CHIP_IS_E3(sc)) { REG_WR(sc, SC_PORT(sc) ? NIG_REG_LLH1_MF_MODE : NIG_REG_LLH_MF_MODE, IS_MF(sc)); } } if (!CHIP_IS_E3(sc)) { REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); } if (!CHIP_IS_E1(sc)) { /* 0x2 disable mf_ov, 0x1 enable */ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, (IS_MF_SD(sc) ? 0x1 : 0x2)); if (!CHIP_IS_E1x(sc)) { val = 0; switch (sc->devinfo.mf_info.mf_mode) { case MULTI_FUNCTION_SD: val = 1; break; case MULTI_FUNCTION_SI: case MULTI_FUNCTION_AFEX: val = 2; break; } REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : NIG_REG_LLH0_CLS_TYPE), val); } REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); } /* If SPIO5 is set to generate interrupts, enable it for this port */ val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); if (val & MISC_SPIO_SPIO5) { uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); val = REG_RD(sc, reg_addr); val |= AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(sc, reg_addr, val); } return (0); } static uint32_t bxe_flr_clnup_reg_poll(struct bxe_softc *sc, uint32_t reg, uint32_t expected, uint32_t poll_count) { uint32_t cur_cnt = poll_count; uint32_t val; while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); } return (val); } static int bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, uint32_t reg, char *msg, uint32_t poll_cnt) { uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); if (val != 0) { BLOGE(sc, "%s usage count=%d\n", msg, val); return (1); } return (0); } /* Common routines with VF FLR cleanup */ static uint32_t bxe_flr_clnup_poll_count(struct bxe_softc *sc) { /* adjust polling timeout */ if (CHIP_REV_IS_EMUL(sc)) { return (FLR_POLL_CNT * 2000); } if (CHIP_REV_IS_FPGA(sc)) { return (FLR_POLL_CNT * 120); } return (FLR_POLL_CNT); } static int bxe_poll_hw_usage_counters(struct bxe_softc *sc, uint32_t poll_cnt) { /* wait for CFC PF usage-counter to zero (includes all the VFs) */ if (bxe_flr_clnup_poll_hw_counter(sc, CFC_REG_NUM_LCIDS_INSIDE_PF, "CFC PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, DORQ_REG_PF_USAGE_CNT, "DQ PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), "QM PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), "Timers VNIC usage counter timed out", poll_cnt)) { return (1); } if (bxe_flr_clnup_poll_hw_counter(sc, TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), "Timers NUM_SCANS usage counter timed out", poll_cnt)) { return (1); } /* Wait DMAE PF usage counter to zero */ if (bxe_flr_clnup_poll_hw_counter(sc, dmae_reg_go_c[INIT_DMAE_C(sc)], "DMAE dommand register timed out", poll_cnt)) { return (1); } return (0); } #define OP_GEN_PARAM(param) \ (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) #define OP_GEN_TYPE(type) \ (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) #define OP_GEN_AGG_VECT(index) \ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) static int bxe_send_final_clnup(struct bxe_softc *sc, uint8_t clnup_func, uint32_t poll_cnt) { uint32_t op_gen_command = 0; uint32_t comp_addr = (BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); int ret = 0; if (REG_RD(sc, comp_addr)) { BLOGE(sc, "Cleanup complete was not 0 before sending\n"); return (1); } op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); op_gen_command |= OP_GEN_AGG_VECT(clnup_func); op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { BLOGE(sc, "FW final cleanup did not succeed\n"); BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", (REG_RD(sc, comp_addr))); bxe_panic(sc, ("FLR cleanup failed\n")); return (1); } /* Zero completion for nxt FLR */ REG_WR(sc, comp_addr, 0); return (ret); } static void bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, struct pbf_pN_buf_regs *regs, uint32_t poll_count) { uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; uint32_t cur_cnt = poll_count; crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); crd = crd_start = REG_RD(sc, regs->crd); init_crd = REG_RD(sc, regs->init_crd); BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); while ((crd != init_crd) && ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < (init_crd - crd_start))) { if (cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); crd = REG_RD(sc, regs->crd); crd_freed = REG_RD(sc, regs->crd_freed); } else { BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); break; } } BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static void bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, struct pbf_pN_cmd_regs *regs, uint32_t poll_count) { uint32_t occup, to_free, freed, freed_start; uint32_t cur_cnt = poll_count; occup = to_free = REG_RD(sc, regs->lines_occup); freed = freed_start = REG_RD(sc, regs->lines_freed); BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); while (occup && ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { if (cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); occup = REG_RD(sc, regs->lines_occup); freed = REG_RD(sc, regs->lines_freed); } else { BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); break; } } BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static void bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) { struct pbf_pN_cmd_regs cmd_regs[] = { {0, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT}, {1, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT}, {4, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_LB_Q : PBF_REG_P4_TQ_LINES_FREED_CNT} }; struct pbf_pN_buf_regs buf_regs[] = { {0, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD , (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, {1, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD, (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, {4, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD, (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, }; int i; /* Verify the command queues are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); } /* Verify the transmission buffers are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); } } static void bxe_hw_enable_status(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); val = REG_RD(sc, PBF_REG_DISABLE_PF); BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); } static int bxe_pf_flr_clnup(struct bxe_softc *sc) { uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); /* Re-enable PF target read access */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); /* Poll HW usage counters */ BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { return (-1); } /* Zero the igu 'trailing edge' and 'leading edge' */ /* Send the FW cleanup command */ if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { return (-1); } /* ATC cleanup */ /* Verify TX hw is flushed */ bxe_tx_hw_flushed(sc, poll_cnt); /* Wait 100ms (not adjusted according to platform) */ DELAY(100000); /* Verify no pending pci transactions */ if (bxe_is_pcie_pending(sc)) { BLOGE(sc, "PCIE Transactions still pending\n"); } /* Debug */ bxe_hw_enable_status(sc); /* * Master enable - Due to WB DMAE writes performed before this * register is re-initialized as part of the regular function init */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); return (0); } static int bxe_init_hw_func(struct bxe_softc *sc) { int port = SC_PORT(sc); int func = SC_FUNC(sc); int init_phase = PHASE_PF0 + func; struct ecore_ilt *ilt = sc->ilt; uint16_t cdu_ilt_start; uint32_t addr, val; uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; int i, main_mem_width, rc; BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); /* FLR cleanup */ if (!CHIP_IS_E1x(sc)) { rc = bxe_pf_flr_clnup(sc); if (rc) { BLOGE(sc, "FLR cleanup failed!\n"); // XXX bxe_fw_dump(sc); // XXX bxe_idle_chk(sc); return (rc); } } /* set MSI reconfigure capability */ if (sc->devinfo.int_block == INT_BLOCK_HC) { addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); val = REG_RD(sc, addr); val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; REG_WR(sc, addr, val); } ecore_init_block(sc, BLOCK_PXP, init_phase); ecore_init_block(sc, BLOCK_PXP2, init_phase); ilt = sc->ilt; cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; for (i = 0; i < L2_ILT_LINES(sc); i++) { ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; ilt->lines[cdu_ilt_start + i].page_mapping = sc->context[i].vcxt_dma.paddr; ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; } ecore_ilt_init_op(sc, INITOP_SET); /* Set NIC mode */ REG_WR(sc, PRS_REG_NIC_MODE, 1); BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); if (!CHIP_IS_E1x(sc)) { uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; /* Turn on a single ISR mode in IGU if driver is going to use * INT#x or MSI */ if (sc->interrupt_mode != INTR_MODE_MSIX) { pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; } /* * Timers workaround bug: function init part. * Need to wait 20msec after initializing ILT, * needed to make sure there are no requests in * one of the PXP internal queues with "old" ILT addresses */ DELAY(20000); /* * Master enable - Due to WB DMAE writes performed before this * register is re-initialized as part of the regular function * init */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); /* Enable the function in IGU */ REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); } sc->dmae_ready = 1; ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); ecore_init_block(sc, BLOCK_ATC, init_phase); ecore_init_block(sc, BLOCK_DMAE, init_phase); ecore_init_block(sc, BLOCK_NIG, init_phase); ecore_init_block(sc, BLOCK_SRC, init_phase); ecore_init_block(sc, BLOCK_MISC, init_phase); ecore_init_block(sc, BLOCK_TCM, init_phase); ecore_init_block(sc, BLOCK_UCM, init_phase); ecore_init_block(sc, BLOCK_CCM, init_phase); ecore_init_block(sc, BLOCK_XCM, init_phase); ecore_init_block(sc, BLOCK_TSEM, init_phase); ecore_init_block(sc, BLOCK_USEM, init_phase); ecore_init_block(sc, BLOCK_CSEM, init_phase); ecore_init_block(sc, BLOCK_XSEM, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, QM_REG_PF_EN, 1); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); } ecore_init_block(sc, BLOCK_QM, init_phase); ecore_init_block(sc, BLOCK_TM, init_phase); ecore_init_block(sc, BLOCK_DORQ, init_phase); bxe_iov_init_dq(sc); ecore_init_block(sc, BLOCK_BRB1, init_phase); ecore_init_block(sc, BLOCK_PRS, init_phase); ecore_init_block(sc, BLOCK_TSDM, init_phase); ecore_init_block(sc, BLOCK_CSDM, init_phase); ecore_init_block(sc, BLOCK_USDM, init_phase); ecore_init_block(sc, BLOCK_XSDM, init_phase); ecore_init_block(sc, BLOCK_UPB, init_phase); ecore_init_block(sc, BLOCK_XPB, init_phase); ecore_init_block(sc, BLOCK_PBF, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, PBF_REG_DISABLE_PF, 0); ecore_init_block(sc, BLOCK_CDU, init_phase); ecore_init_block(sc, BLOCK_CFC, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); if (IS_MF(sc)) { REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); } ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); /* HC init per function */ if (sc->devinfo.int_block == INT_BLOCK_HC) { if (CHIP_IS_E1H(sc)) { REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } ecore_init_block(sc, BLOCK_HC, init_phase); } else { int num_segs, sb_idx, prod_offset; REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); } ecore_init_block(sc, BLOCK_IGU, init_phase); if (!CHIP_IS_E1x(sc)) { int dsb_idx = 0; /** * Producer memory: * E2 mode: address 0-135 match to the mapping memory; * 136 - PF0 default prod; 137 - PF1 default prod; * 138 - PF2 default prod; 139 - PF3 default prod; * 140 - PF0 attn prod; 141 - PF1 attn prod; * 142 - PF2 attn prod; 143 - PF3 attn prod; * 144-147 reserved. * * E1.5 mode - In backward compatible mode; * for non default SB; each even line in the memory * holds the U producer and each odd line hold * the C producer. The first 128 producers are for * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 * producers are for the DSB for each PF. * Each PF has five segments: (the order inside each * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; * 132-135 C prods; 136-139 X prods; 140-143 T prods; * 144-147 attn prods; */ /* non-default-status-blocks */ num_segs = CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { prod_offset = (sc->igu_base_sb + sb_idx) * num_segs; for (i = 0; i < num_segs; i++) { addr = IGU_REG_PROD_CONS_MEMORY + (prod_offset + i) * 4; REG_WR(sc, addr, 0); } /* send consumer update with value 0 */ bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); } /* default-status-blocks */ num_segs = CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; if (CHIP_IS_MODE_4_PORT(sc)) dsb_idx = SC_FUNC(sc); else dsb_idx = SC_VN(sc); prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_BASE_DSB_PROD + dsb_idx : IGU_NORM_BASE_DSB_PROD + dsb_idx); /* * igu prods come in chunks of E1HVN_MAX (4) - * does not matters what is the current chip mode */ for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) { addr = IGU_REG_PROD_CONS_MEMORY + (prod_offset + i)*4; REG_WR(sc, addr, 0); } /* send consumer update with 0 */ if (CHIP_INT_MODE_IS_BC(sc)) { bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, CSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, XSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, TSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 0, IGU_INT_NOP, 1); } else { bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 0, IGU_INT_NOP, 1); } bxe_igu_clear_sb(sc, sc->igu_dsb_id); /* !!! these should become driver const once rf-tool supports split-68 const */ REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); } } /* Reset PCIE errors for debug */ REG_WR(sc, 0x2114, 0xffffffff); REG_WR(sc, 0x2120, 0xffffffff); if (CHIP_IS_E1x(sc)) { main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ main_mem_base = HC_REG_MAIN_MEMORY + SC_PORT(sc) * (main_mem_size * 4); main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; main_mem_width = 8; val = REG_RD(sc, main_mem_prty_clr); if (val) { BLOGD(sc, DBG_LOAD, "Parity errors in HC block during function init (0x%x)!\n", val); } /* Clear "false" parity errors in MSI-X table */ for (i = main_mem_base; i < main_mem_base + main_mem_size * 4; i += main_mem_width) { bxe_read_dmae(sc, i, main_mem_width / 4); bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), i, main_mem_width / 4); } /* Clear HC parity attention */ REG_RD(sc, main_mem_prty_clr); } #if 1 /* Enable STORMs SP logging */ REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); #endif elink_phy_probe(&sc->link_params); return (0); } static void bxe_link_reset(struct bxe_softc *sc) { if (!BXE_NOMCP(sc)) { bxe_acquire_phy_lock(sc); elink_lfa_reset(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); } else { if (!CHIP_REV_IS_SLOW(sc)) { BLOGW(sc, "Bootcode is missing - cannot reset link\n"); } } } static void bxe_reset_port(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t val; /* reset physical Link */ bxe_link_reset(sc); REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); /* Do not rcv packets to BRB */ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); /* Do not direct rcv packets that are not for MCP to the BRB */ REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); /* Configure AEU */ REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); DELAY(100000); /* Check for BRB port occupancy */ val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); if (val) { BLOGD(sc, DBG_LOAD, "BRB1 is not empty, %d blocks are occupied\n", val); } /* TODO: Close Doorbell port? */ } static void bxe_ilt_wr(struct bxe_softc *sc, uint32_t index, bus_addr_t addr) { int reg; uint32_t wb_write[2]; if (CHIP_IS_E1(sc)) { reg = PXP2_REG_RQ_ONCHIP_AT + index*8; } else { reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; } wb_write[0] = ONCHIP_ADDR1(addr); wb_write[1] = ONCHIP_ADDR2(addr); REG_WR_DMAE(sc, reg, wb_write, 2); } static void bxe_clear_func_ilt(struct bxe_softc *sc, uint32_t func) { uint32_t i, base = FUNC_ILT_BASE(func); for (i = base; i < base + ILT_PER_FUNC; i++) { bxe_ilt_wr(sc, i, 0); } } static void bxe_reset_func(struct bxe_softc *sc) { struct bxe_fastpath *fp; int port = SC_PORT(sc); int func = SC_FUNC(sc); int i; /* Disable the function in the FW */ REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); /* FP SBs */ FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), SB_DISABLED); } /* SP SB */ REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), SB_DISABLED); for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); } /* Configure IGU */ if (sc->devinfo.int_block == INT_BLOCK_HC) { REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } else { REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); } if (CNIC_LOADED(sc)) { /* Disable Timer scan */ REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); /* * Wait for at least 10ms and up to 2 second for the timers * scan to complete */ for (i = 0; i < 200; i++) { DELAY(10000); if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) break; } } /* Clear ILT */ bxe_clear_func_ilt(sc, func); /* * Timers workaround bug for E2: if this is vnic-3, * we need to set the entire ilt range for this timers. */ if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { struct ilt_client_info ilt_cli; /* use dummy TM client */ memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); } /* this assumes that reset_port() called before reset_func()*/ if (!CHIP_IS_E1x(sc)) { bxe_pf_disable(sc); } sc->dmae_ready = 0; } static int bxe_gunzip_init(struct bxe_softc *sc) { return (0); } static void bxe_gunzip_end(struct bxe_softc *sc) { return; } static int bxe_init_firmware(struct bxe_softc *sc) { if (CHIP_IS_E1(sc)) { ecore_init_e1_firmware(sc); sc->iro_array = e1_iro_arr; } else if (CHIP_IS_E1H(sc)) { ecore_init_e1h_firmware(sc); sc->iro_array = e1h_iro_arr; } else if (!CHIP_IS_E1x(sc)) { ecore_init_e2_firmware(sc); sc->iro_array = e2_iro_arr; } else { BLOGE(sc, "Unsupported chip revision\n"); return (-1); } return (0); } static void bxe_release_firmware(struct bxe_softc *sc) { /* Do nothing */ return; } static int ecore_gunzip(struct bxe_softc *sc, const uint8_t *zbuf, int len) { /* XXX : Implement... */ BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); return (FALSE); } static void ecore_reg_wr_ind(struct bxe_softc *sc, uint32_t addr, uint32_t val) { bxe_reg_wr_ind(sc, addr, val); } static void ecore_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr, uint32_t addr, uint32_t len) { bxe_write_dmae_phys_len(sc, phys_addr, addr, len); } void ecore_storm_memset_struct(struct bxe_softc *sc, uint32_t addr, size_t size, uint32_t *data) { uint8_t i; for (i = 0; i < size/4; i++) { REG_WR(sc, addr + (i * 4), data[i]); } } /* * character device - ioctl interface definitions */ #include "bxe_dump.h" #include "bxe_ioctl.h" #include static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td); static struct cdevsw bxe_cdevsw = { .d_version = D_VERSION, .d_ioctl = bxe_eioctl, .d_name = "bxecnic", }; #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1)) #define DUMP_ALL_PRESETS 0x1FFF #define DUMP_MAX_PRESETS 13 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) #define IS_REG_IN_PRESET(presets, idx) \ ((presets & (1 << (idx-1))) == (1 << (idx-1))) static int bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset) { if (CHIP_IS_E1(sc)) return dump_num_registers[0][preset-1]; else if (CHIP_IS_E1H(sc)) return dump_num_registers[1][preset-1]; else if (CHIP_IS_E2(sc)) return dump_num_registers[2][preset-1]; else if (CHIP_IS_E3A0(sc)) return dump_num_registers[3][preset-1]; else if (CHIP_IS_E3B0(sc)) return dump_num_registers[4][preset-1]; else return 0; } static int bxe_get_total_regs_len32(struct bxe_softc *sc) { uint32_t preset_idx; int regdump_len32 = 0; /* Calculate the total preset regs length */ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx); } return regdump_len32; } static const uint32_t * __bxe_get_page_addr_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_vals_e2; else if (CHIP_IS_E3(sc)) return page_vals_e3; else return NULL; } static uint32_t __bxe_get_page_reg_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_MODE_VALUES_E2; else if (CHIP_IS_E3(sc)) return PAGE_MODE_VALUES_E3; else return 0; } static const uint32_t * __bxe_get_page_write_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_write_regs_e2; else if (CHIP_IS_E3(sc)) return page_write_regs_e3; else return NULL; } static uint32_t __bxe_get_page_write_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_WRITE_REGS_E2; else if (CHIP_IS_E3(sc)) return PAGE_WRITE_REGS_E3; else return 0; } static const struct reg_addr * __bxe_get_page_read_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_read_regs_e2; else if (CHIP_IS_E3(sc)) return page_read_regs_e3; else return NULL; } static uint32_t __bxe_get_page_read_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_READ_REGS_E2; else if (CHIP_IS_E3(sc)) return PAGE_READ_REGS_E3; else return 0; } static bool bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info) { if (CHIP_IS_E1(sc)) return IS_E1_REG(reg_info->chips); else if (CHIP_IS_E1H(sc)) return IS_E1H_REG(reg_info->chips); else if (CHIP_IS_E2(sc)) return IS_E2_REG(reg_info->chips); else if (CHIP_IS_E3A0(sc)) return IS_E3A0_REG(reg_info->chips); else if (CHIP_IS_E3B0(sc)) return IS_E3B0_REG(reg_info->chips); else return 0; } static bool bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info) { if (CHIP_IS_E1(sc)) return IS_E1_REG(wreg_info->chips); else if (CHIP_IS_E1H(sc)) return IS_E1H_REG(wreg_info->chips); else if (CHIP_IS_E2(sc)) return IS_E2_REG(wreg_info->chips); else if (CHIP_IS_E3A0(sc)) return IS_E3A0_REG(wreg_info->chips); else if (CHIP_IS_E3B0(sc)) return IS_E3B0_REG(wreg_info->chips); else return 0; } /** * bxe_read_pages_regs - read "paged" registers * * @bp device handle * @p output buffer * * Reads "paged" memories: memories that may only be read by first writing to a * specific address ("write address") and then reading from a specific address * ("read address"). There may be more than one write address per "page" and * more than one read address per write address. */ static void bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) { uint32_t i, j, k, n; /* addresses of the paged registers */ const uint32_t *page_addr = __bxe_get_page_addr_ar(sc); /* number of paged registers */ int num_pages = __bxe_get_page_reg_num(sc); /* write addresses */ const uint32_t *write_addr = __bxe_get_page_write_ar(sc); /* number of write addresses */ int write_num = __bxe_get_page_write_num(sc); /* read addresses info */ const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc); /* number of read addresses */ int read_num = __bxe_get_page_read_num(sc); uint32_t addr, size; for (i = 0; i < num_pages; i++) { for (j = 0; j < write_num; j++) { REG_WR(sc, write_addr[j], page_addr[i]); for (k = 0; k < read_num; k++) { if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) { size = read_addr[k].size; for (n = 0; n < size; n++) { addr = read_addr[k].addr + n*4; *p++ = REG_RD(sc, addr); } } } } } return; } static int bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) { uint32_t i, j, addr; const struct wreg_addr *wreg_addr_p = NULL; if (CHIP_IS_E1(sc)) wreg_addr_p = &wreg_addr_e1; else if (CHIP_IS_E1H(sc)) wreg_addr_p = &wreg_addr_e1h; else if (CHIP_IS_E2(sc)) wreg_addr_p = &wreg_addr_e2; else if (CHIP_IS_E3A0(sc)) wreg_addr_p = &wreg_addr_e3; else if (CHIP_IS_E3B0(sc)) wreg_addr_p = &wreg_addr_e3b0; else return (-1); /* Read the idle_chk registers */ for (i = 0; i < IDLE_REGS_COUNT; i++) { if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) && IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { for (j = 0; j < idle_reg_addrs[i].size; j++) *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4); } } /* Read the regular registers */ for (i = 0; i < REGS_COUNT; i++) { if (bxe_is_reg_in_chip(sc, ®_addrs[i]) && IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { for (j = 0; j < reg_addrs[i].size; j++) *p++ = REG_RD(sc, reg_addrs[i].addr + j*4); } } /* Read the CAM registers */ if (bxe_is_wreg_in_chip(sc, wreg_addr_p) && IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { for (i = 0; i < wreg_addr_p->size; i++) { *p++ = REG_RD(sc, wreg_addr_p->addr + i*4); /* In case of wreg_addr register, read additional registers from read_regs array */ for (j = 0; j < wreg_addr_p->read_regs_count; j++) { addr = *(wreg_addr_p->read_regs); *p++ = REG_RD(sc, addr + j*4); } } } /* Paged registers are supported in E2 & E3 only */ if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { /* Read "paged" registers */ bxe_read_pages_regs(sc, p, preset); } return 0; } static int bxe_grc_dump(struct bxe_softc *sc) { int rval = 0; uint32_t preset_idx; uint8_t *buf; uint32_t size; struct dump_header *d_hdr; if (sc->grcdump_done) return (rval); ecore_disable_blocks_parity(sc); buf = sc->grc_dump; d_hdr = sc->grc_dump; d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1; d_hdr->version = BNX2X_DUMP_VERSION; d_hdr->preset = DUMP_ALL_PRESETS; if (CHIP_IS_E1(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E1; } else if (CHIP_IS_E1H(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E1H; } else if (CHIP_IS_E2(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E2 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } else if (CHIP_IS_E3A0(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E3A0 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } else if (CHIP_IS_E3B0(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E3B0 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } buf += sizeof(struct dump_header); for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { /* Skip presets with IOR */ if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) || (preset_idx == 11)) continue; rval = bxe_get_preset_regs(sc, sc->grc_dump, preset_idx); if (rval) break; size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t)); buf += size; } ecore_clear_blocks_parity(sc); ecore_enable_blocks_parity(sc); sc->grcdump_done = 1; return(rval); } static int bxe_add_cdev(struct bxe_softc *sc) { int grc_dump_size; grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT); if (sc->grc_dump == NULL) return (-1); sc->ioctl_dev = make_dev(&bxe_cdevsw, sc->ifp->if_dunit, UID_ROOT, GID_WHEEL, 0600, "%s", if_name(sc->ifp)); if (sc->ioctl_dev == NULL) { free(sc->grc_dump, M_DEVBUF); return (-1); } sc->ioctl_dev->si_drv1 = sc; return (0); } static void bxe_del_cdev(struct bxe_softc *sc) { if (sc->ioctl_dev != NULL) destroy_dev(sc->ioctl_dev); if (sc->grc_dump == NULL) free(sc->grc_dump, M_DEVBUF); return; } static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct bxe_softc *sc; int rval = 0; device_t pci_dev; bxe_grcdump_t *dump = NULL; int grc_dump_size; if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL) return ENXIO; pci_dev= sc->dev; dump = (bxe_grcdump_t *)data; switch(cmd) { case BXE_GRC_DUMP_SIZE: dump->pci_func = sc->pcie_func; dump->grcdump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); break; case BXE_GRC_DUMP: grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); if ((sc->grc_dump == NULL) || (dump->grcdump == NULL) || (dump->grcdump_size < grc_dump_size) || (!sc->grcdump_done)) { rval = EINVAL; break; } dump->grcdump_dwords = grc_dump_size >> 2; rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size); sc->grcdump_done = 0; break; default: break; } return (rval); } Index: head/sys/dev/drm2/drm_fb_helper.c =================================================================== --- head/sys/dev/drm2/drm_fb_helper.c (revision 296271) +++ head/sys/dev/drm2/drm_fb_helper.c (revision 296272) @@ -1,1468 +1,1468 @@ /* * Copyright (c) 2006-2009 Red Hat Inc. * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie * * DRM framebuffer helper functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. * * Authors: * Dave Airlie * Jesse Barnes */ #include __FBSDID("$FreeBSD$"); #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include MODULE_AUTHOR("David Airlie, Jesse Barnes"); MODULE_DESCRIPTION("DRM KMS helper"); MODULE_LICENSE("GPL and additional rights"); static DRM_LIST_HEAD(kernel_fb_helper_list); #include #include #include struct vt_kms_softc { struct drm_fb_helper *fb_helper; struct task fb_mode_task; }; /* Call restore out of vt(9) locks. */ static void vt_restore_fbdev_mode(void *arg, int pending) { struct drm_fb_helper *fb_helper; struct vt_kms_softc *sc; sc = (struct vt_kms_softc *)arg; fb_helper = sc->fb_helper; sx_xlock(&fb_helper->dev->mode_config.mutex); drm_fb_helper_restore_fbdev_mode(fb_helper); sx_xunlock(&fb_helper->dev->mode_config.mutex); } static int vt_kms_postswitch(void *arg) { struct vt_kms_softc *sc; sc = (struct vt_kms_softc *)arg; if (!kdb_active && panicstr == NULL) - taskqueue_enqueue_fast(taskqueue_thread, &sc->fb_mode_task); + taskqueue_enqueue(taskqueue_thread, &sc->fb_mode_task); else drm_fb_helper_restore_fbdev_mode(sc->fb_helper); return (0); } struct fb_info * framebuffer_alloc() { struct fb_info *info; struct vt_kms_softc *sc; info = malloc(sizeof(*info), DRM_MEM_KMS, M_WAITOK | M_ZERO); sc = malloc(sizeof(*sc), DRM_MEM_KMS, M_WAITOK | M_ZERO); TASK_INIT(&sc->fb_mode_task, 0, vt_restore_fbdev_mode, sc); info->fb_priv = sc; info->enter = &vt_kms_postswitch; return (info); } void framebuffer_release(struct fb_info *info) { free(info->fb_priv, DRM_MEM_KMS); free(info, DRM_MEM_KMS); } static int fb_get_options(const char *connector_name, char **option) { char tunable[64]; /* * A user may use loader tunables to set a specific mode for the * console. Tunables are read in the following order: * 1. kern.vt.fb.modes.$connector_name * 2. kern.vt.fb.default_mode * * Example of a mode specific to the LVDS connector: * kern.vt.fb.modes.LVDS="1024x768" * * Example of a mode applied to all connectors not having a * connector-specific mode: * kern.vt.fb.default_mode="640x480" */ snprintf(tunable, sizeof(tunable), "kern.vt.fb.modes.%s", connector_name); DRM_INFO("Connector %s: get mode from tunables:\n", connector_name); DRM_INFO(" - %s\n", tunable); DRM_INFO(" - kern.vt.fb.default_mode\n"); *option = kern_getenv(tunable); if (*option == NULL) *option = kern_getenv("kern.vt.fb.default_mode"); return (*option != NULL ? 0 : -ENOENT); } /** * DOC: fbdev helpers * * The fb helper functions are useful to provide an fbdev on top of a drm kernel * mode setting driver. They can be used mostly independantely from the crtc * helper functions used by many drivers to implement the kernel mode setting * interfaces. */ /* simple single crtc case helper function */ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; struct drm_connector *connector; int i; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct drm_fb_helper_connector *fb_helper_connector; fb_helper_connector = malloc(sizeof(struct drm_fb_helper_connector), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!fb_helper_connector) goto fail; fb_helper_connector->connector = connector; fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector; } return 0; fail: for (i = 0; i < fb_helper->connector_count; i++) { free(fb_helper->connector_info[i], DRM_MEM_KMS); fb_helper->connector_info[i] = NULL; } fb_helper->connector_count = 0; return -ENOMEM; } EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) { struct drm_fb_helper_connector *fb_helper_conn; int i; for (i = 0; i < fb_helper->connector_count; i++) { struct drm_cmdline_mode *mode; struct drm_connector *connector; char *option = NULL; fb_helper_conn = fb_helper->connector_info[i]; connector = fb_helper_conn->connector; mode = &fb_helper_conn->cmdline_mode; /* do something on return - turn off connector maybe */ if (fb_get_options(drm_get_connector_name(connector), &option)) continue; if (drm_mode_parse_command_line_for_connector(option, connector, mode)) { if (mode->force) { const char *s; switch (mode->force) { case DRM_FORCE_OFF: s = "OFF"; break; case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break; default: case DRM_FORCE_ON: s = "ON"; break; } DRM_INFO("forcing %s connector %s\n", drm_get_connector_name(connector), s); connector->force = mode->force; } DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n", drm_get_connector_name(connector), mode->xres, mode->yres, mode->refresh_specified ? mode->refresh : 60, mode->rb ? " reduced blanking" : "", mode->margins ? " with margins" : "", mode->interlace ? " interlaced" : ""); } freeenv(option); } return 0; } #if 0 && defined(FREEBSD_NOTYET) static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper) { uint16_t *r_base, *g_base, *b_base; int i; r_base = crtc->gamma_store; g_base = r_base + crtc->gamma_size; b_base = g_base + crtc->gamma_size; for (i = 0; i < crtc->gamma_size; i++) helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i); } static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) { uint16_t *r_base, *g_base, *b_base; if (crtc->funcs->gamma_set == NULL) return; r_base = crtc->gamma_store; g_base = r_base + crtc->gamma_size; b_base = g_base + crtc->gamma_size; crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); } int drm_fb_helper_debug_enter(struct fb_info *info) { struct drm_fb_helper *helper = info->par; struct drm_crtc_helper_funcs *funcs; int i; if (list_empty(&kernel_fb_helper_list)) return false; list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { for (i = 0; i < helper->crtc_count; i++) { struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; if (!mode_set->crtc->enabled) continue; funcs = mode_set->crtc->helper_private; drm_fb_helper_save_lut_atomic(mode_set->crtc, helper); funcs->mode_set_base_atomic(mode_set->crtc, mode_set->fb, mode_set->x, mode_set->y, ENTER_ATOMIC_MODE_SET); } } return 0; } EXPORT_SYMBOL(drm_fb_helper_debug_enter); /* Find the real fb for a given fb helper CRTC */ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_crtc *c; list_for_each_entry(c, &dev->mode_config.crtc_list, head) { if (crtc->base.id == c->base.id) return c->fb; } return NULL; } int drm_fb_helper_debug_leave(struct fb_info *info) { struct drm_fb_helper *helper = info->par; struct drm_crtc *crtc; struct drm_crtc_helper_funcs *funcs; struct drm_framebuffer *fb; int i; for (i = 0; i < helper->crtc_count; i++) { struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; crtc = mode_set->crtc; funcs = crtc->helper_private; fb = drm_mode_config_fb(crtc); if (!crtc->enabled) continue; if (!fb) { DRM_ERROR("no fb to restore??\n"); continue; } drm_fb_helper_restore_lut_atomic(mode_set->crtc); funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, crtc->y, LEAVE_ATOMIC_MODE_SET); } return 0; } EXPORT_SYMBOL(drm_fb_helper_debug_leave); #endif /* FREEBSD_NOTYET */ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) { bool error = false; int i, ret; for (i = 0; i < fb_helper->crtc_count; i++) { struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; ret = mode_set->crtc->funcs->set_config(mode_set); if (ret) error = true; } return error; } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode); static bool drm_fb_helper_force_kernel_mode(void) { bool ret, error = false; struct drm_fb_helper *helper; if (list_empty(&kernel_fb_helper_list)) return false; list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF) continue; ret = drm_fb_helper_restore_fbdev_mode(helper); if (ret) error = true; } return error; } #if 0 && defined(FREEBSD_NOTYET) int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, void *panic_str) { /* * It's a waste of time and effort to switch back to text console * if the kernel should reboot before panic messages can be seen. */ if (panic_timeout < 0) return 0; pr_err("panic occurred, switching back to text console\n"); return drm_fb_helper_force_kernel_mode(); } EXPORT_SYMBOL(drm_fb_helper_panic); static struct notifier_block paniced = { .notifier_call = drm_fb_helper_panic, }; #endif /* FREEBSD_NOTYET */ /** * drm_fb_helper_restore - restore the framebuffer console (kernel) config * * Restore's the kernel's fbcon mode, used for lastclose & panic paths. */ void drm_fb_helper_restore(void) { bool ret; ret = drm_fb_helper_force_kernel_mode(); if (ret == true) DRM_ERROR("Failed to restore crtc configuration\n"); } EXPORT_SYMBOL(drm_fb_helper_restore); #ifdef __linux__ #ifdef CONFIG_MAGIC_SYSRQ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) { drm_fb_helper_restore(); } static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); static void drm_fb_helper_sysrq(int dummy1) { schedule_work(&drm_fb_helper_restore_work); } static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { .handler = drm_fb_helper_sysrq, .help_msg = "force-fb(V)", .action_msg = "Restore framebuffer console", }; #else static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; #endif #endif #if 0 && defined(FREEBSD_NOTYET) static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) { struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct drm_crtc *crtc; struct drm_connector *connector; int i, j; /* * For each CRTC in this fb, turn the connectors on/off. */ sx_xlock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; if (!crtc->enabled) continue; /* Walk the connectors & encoders on this fb turning them on/off */ for (j = 0; j < fb_helper->connector_count; j++) { connector = fb_helper->connector_info[j]->connector; connector->funcs->dpms(connector, dpms_mode); drm_object_property_set_value(&connector->base, dev->mode_config.dpms_property, dpms_mode); } } sx_xunlock(&dev->mode_config.mutex); } int drm_fb_helper_blank(int blank, struct fb_info *info) { switch (blank) { /* Display: On; HSync: On, VSync: On */ case FB_BLANK_UNBLANK: drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON); break; /* Display: Off; HSync: On, VSync: On */ case FB_BLANK_NORMAL: drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY); break; /* Display: Off; HSync: Off, VSync: On */ case FB_BLANK_HSYNC_SUSPEND: drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY); break; /* Display: Off; HSync: On, VSync: Off */ case FB_BLANK_VSYNC_SUSPEND: drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND); break; /* Display: Off; HSync: Off, VSync: Off */ case FB_BLANK_POWERDOWN: drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF); break; } return 0; } EXPORT_SYMBOL(drm_fb_helper_blank); #endif /* FREEBSD_NOTYET */ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) { int i; for (i = 0; i < helper->connector_count; i++) free(helper->connector_info[i], DRM_MEM_KMS); free(helper->connector_info, DRM_MEM_KMS); for (i = 0; i < helper->crtc_count; i++) { free(helper->crtc_info[i].mode_set.connectors, DRM_MEM_KMS); if (helper->crtc_info[i].mode_set.mode) drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode); } free(helper->crtc_info, DRM_MEM_KMS); } int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *fb_helper, int crtc_count, int max_conn_count) { struct drm_crtc *crtc; int i; fb_helper->dev = dev; INIT_LIST_HEAD(&fb_helper->kernel_fb_list); fb_helper->crtc_info = malloc(crtc_count * sizeof(struct drm_fb_helper_crtc), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!fb_helper->crtc_info) return -ENOMEM; fb_helper->crtc_count = crtc_count; fb_helper->connector_info = malloc(dev->mode_config.num_connector * sizeof(struct drm_fb_helper_connector *), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!fb_helper->connector_info) { free(fb_helper->crtc_info, DRM_MEM_KMS); return -ENOMEM; } fb_helper->connector_count = 0; for (i = 0; i < crtc_count; i++) { fb_helper->crtc_info[i].mode_set.connectors = malloc(max_conn_count * sizeof(struct drm_connector *), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!fb_helper->crtc_info[i].mode_set.connectors) goto out_free; fb_helper->crtc_info[i].mode_set.num_connectors = 0; } i = 0; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { fb_helper->crtc_info[i].mode_set.crtc = crtc; i++; } return 0; out_free: drm_fb_helper_crtc_free(fb_helper); return -ENOMEM; } EXPORT_SYMBOL(drm_fb_helper_init); void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) { if (!list_empty(&fb_helper->kernel_fb_list)) { list_del(&fb_helper->kernel_fb_list); #if 0 && defined(FREEBSD_NOTYET) if (list_empty(&kernel_fb_helper_list)) { pr_info("drm: unregistered panic notifier\n"); atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); } #endif /* FREEBSD_NOTYET */ } drm_fb_helper_crtc_free(fb_helper); } EXPORT_SYMBOL(drm_fb_helper_fini); #if 0 && defined(FREEBSD_NOTYET) static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, u16 regno, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; int pindex; if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 *palette; u32 value; /* place color in psuedopalette */ if (regno > 16) return -EINVAL; palette = (u32 *)info->pseudo_palette; red >>= (16 - info->var.red.length); green >>= (16 - info->var.green.length); blue >>= (16 - info->var.blue.length); value = (red << info->var.red.offset) | (green << info->var.green.offset) | (blue << info->var.blue.offset); if (info->var.transp.length > 0) { u32 mask = (1 << info->var.transp.length) - 1; mask <<= info->var.transp.offset; value |= mask; } palette[regno] = value; return 0; } pindex = regno; if (fb->bits_per_pixel == 16) { pindex = regno << 3; if (fb->depth == 16 && regno > 63) return -EINVAL; if (fb->depth == 15 && regno > 31) return -EINVAL; if (fb->depth == 16) { u16 r, g, b; int i; if (regno < 32) { for (i = 0; i < 8; i++) fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex + i); } fb_helper->funcs->gamma_get(crtc, &r, &g, &b, pindex >> 1); for (i = 0; i < 4; i++) fb_helper->funcs->gamma_set(crtc, r, green, b, (pindex >> 1) + i); } } if (fb->depth != 16) fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); return 0; } int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_crtc_helper_funcs *crtc_funcs; u16 *red, *green, *blue, *transp; struct drm_crtc *crtc; int i, j, rc = 0; int start; for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; crtc_funcs = crtc->helper_private; red = cmap->red; green = cmap->green; blue = cmap->blue; transp = cmap->transp; start = cmap->start; for (j = 0; j < cmap->len; j++) { u16 hred, hgreen, hblue, htransp = 0xffff; hred = *red++; hgreen = *green++; hblue = *blue++; if (transp) htransp = *transp++; rc = setcolreg(crtc, hred, hgreen, hblue, start++, info); if (rc) return rc; } crtc_funcs->load_lut(crtc); } return rc; } EXPORT_SYMBOL(drm_fb_helper_setcmap); int drm_fb_helper_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; int depth; if (var->pixclock != 0 || in_dbg_master()) return -EINVAL; /* Need to resize the fb object !!! */ if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height || var->xres_virtual > fb->width || var->yres_virtual > fb->height) { DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb " "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel, var->xres_virtual, var->yres_virtual, fb->width, fb->height, fb->bits_per_pixel); return -EINVAL; } switch (var->bits_per_pixel) { case 16: depth = (var->green.length == 6) ? 16 : 15; break; case 32: depth = (var->transp.length > 0) ? 32 : 24; break; default: depth = var->bits_per_pixel; break; } switch (depth) { case 8: var->red.offset = 0; var->green.offset = 0; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 0; var->transp.offset = 0; break; case 15: var->red.offset = 10; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 5; var->blue.length = 5; var->transp.length = 1; var->transp.offset = 15; break; case 16: var->red.offset = 11; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 6; var->blue.length = 5; var->transp.length = 0; var->transp.offset = 0; break; case 24: var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 0; var->transp.offset = 0; break; case 32: var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 8; var->transp.offset = 24; break; default: return -EINVAL; } return 0; } EXPORT_SYMBOL(drm_fb_helper_check_var); /* this will let fbcon do the mode init */ int drm_fb_helper_set_par(struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct fb_var_screeninfo *var = &info->var; struct drm_crtc *crtc; int ret; int i; if (var->pixclock != 0) { DRM_ERROR("PIXEL CLOCK SET\n"); return -EINVAL; } sx_xlock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set); if (ret) { sx_xunlock(&dev->mode_config.mutex); return ret; } } sx_xunlock(&dev->mode_config.mutex); if (fb_helper->delayed_hotplug) { fb_helper->delayed_hotplug = false; drm_fb_helper_hotplug_event(fb_helper); } return 0; } EXPORT_SYMBOL(drm_fb_helper_set_par); int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; struct drm_mode_set *modeset; struct drm_crtc *crtc; int ret = 0; int i; sx_xlock(&dev->mode_config.mutex); for (i = 0; i < fb_helper->crtc_count; i++) { crtc = fb_helper->crtc_info[i].mode_set.crtc; modeset = &fb_helper->crtc_info[i].mode_set; modeset->x = var->xoffset; modeset->y = var->yoffset; if (modeset->num_connectors) { ret = crtc->funcs->set_config(modeset); if (!ret) { info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; } } } sx_xunlock(&dev->mode_config.mutex); return ret; } EXPORT_SYMBOL(drm_fb_helper_pan_display); #endif /* FREEBSD_NOTYET */ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, int preferred_bpp) { int new_fb = 0; int crtc_count = 0; int i; struct fb_info *info; struct drm_fb_helper_surface_size sizes; int gamma_size = 0; #if defined(__FreeBSD__) device_t kdev; #endif memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size)); sizes.surface_depth = 24; sizes.surface_bpp = 32; sizes.fb_width = (unsigned)-1; sizes.fb_height = (unsigned)-1; /* if driver picks 8 or 16 by default use that for both depth/bpp */ if (preferred_bpp != sizes.surface_bpp) sizes.surface_depth = sizes.surface_bpp = preferred_bpp; /* first up get a count of crtcs now in use and new min/maxes width/heights */ for (i = 0; i < fb_helper->connector_count; i++) { struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; struct drm_cmdline_mode *cmdline_mode; cmdline_mode = &fb_helper_conn->cmdline_mode; if (cmdline_mode->bpp_specified) { switch (cmdline_mode->bpp) { case 8: sizes.surface_depth = sizes.surface_bpp = 8; break; case 15: sizes.surface_depth = 15; sizes.surface_bpp = 16; break; case 16: sizes.surface_depth = sizes.surface_bpp = 16; break; case 24: sizes.surface_depth = sizes.surface_bpp = 24; break; case 32: sizes.surface_depth = 24; sizes.surface_bpp = 32; break; } break; } } crtc_count = 0; for (i = 0; i < fb_helper->crtc_count; i++) { struct drm_display_mode *desired_mode; desired_mode = fb_helper->crtc_info[i].desired_mode; if (desired_mode) { if (gamma_size == 0) gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; if (desired_mode->hdisplay < sizes.fb_width) sizes.fb_width = desired_mode->hdisplay; if (desired_mode->vdisplay < sizes.fb_height) sizes.fb_height = desired_mode->vdisplay; if (desired_mode->hdisplay > sizes.surface_width) sizes.surface_width = desired_mode->hdisplay; if (desired_mode->vdisplay > sizes.surface_height) sizes.surface_height = desired_mode->vdisplay; crtc_count++; } } if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { /* hmm everyone went away - assume VGA cable just fell out and will come back later. */ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n"); sizes.fb_width = sizes.surface_width = 1024; sizes.fb_height = sizes.surface_height = 768; } /* push down into drivers */ new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); if (new_fb < 0) return new_fb; info = fb_helper->fbdev; kdev = fb_helper->dev->dev; info->fb_video_dev = device_get_parent(kdev); /* set the fb pointer */ for (i = 0; i < fb_helper->crtc_count; i++) fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; #if defined(__FreeBSD__) if (new_fb) { int ret; info->fb_fbd_dev = device_add_child(kdev, "fbd", device_get_unit(kdev)); if (info->fb_fbd_dev != NULL) ret = device_probe_and_attach(info->fb_fbd_dev); else ret = ENODEV; #ifdef DEV_VT if (ret != 0) DRM_ERROR("Failed to attach fbd device: %d\n", ret); #endif } #else if (new_fb) { info->var.pixclock = 0; if (register_framebuffer(info) < 0) return -EINVAL; dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n", info->node, info->fix.id); } else { drm_fb_helper_set_par(info); } #endif #if 0 && defined(FREEBSD_NOTYET) /* Switch back to kernel console on panic */ /* multi card linked list maybe */ if (list_empty(&kernel_fb_helper_list)) { dev_info(fb_helper->dev->dev, "registered panic notifier\n"); atomic_notifier_chain_register(&panic_notifier_list, &paniced); register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); } #endif /* FREEBSD_NOTYET */ if (new_fb) list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); return 0; } EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, uint32_t depth) { info->fb_stride = pitch; return; } EXPORT_SYMBOL(drm_fb_helper_fill_fix); void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, uint32_t fb_width, uint32_t fb_height) { struct drm_framebuffer *fb = fb_helper->fb; struct vt_kms_softc *sc; info->fb_name = device_get_nameunit(fb_helper->dev->dev); info->fb_width = fb->width; info->fb_height = fb->height; info->fb_depth = fb->bits_per_pixel; sc = (struct vt_kms_softc *)info->fb_priv; sc->fb_helper = fb_helper; } EXPORT_SYMBOL(drm_fb_helper_fill_var); static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper, uint32_t maxX, uint32_t maxY) { struct drm_connector *connector; int count = 0; int i; for (i = 0; i < fb_helper->connector_count; i++) { connector = fb_helper->connector_info[i]->connector; count += connector->funcs->fill_modes(connector, maxX, maxY); } return count; } static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height) { struct drm_display_mode *mode; list_for_each_entry(mode, &fb_connector->connector->modes, head) { if (drm_mode_width(mode) > width || drm_mode_height(mode) > height) continue; if (mode->type & DRM_MODE_TYPE_PREFERRED) return mode; } return NULL; } static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) { struct drm_cmdline_mode *cmdline_mode; cmdline_mode = &fb_connector->cmdline_mode; return cmdline_mode->specified; } static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, int width, int height) { struct drm_cmdline_mode *cmdline_mode; struct drm_display_mode *mode = NULL; cmdline_mode = &fb_helper_conn->cmdline_mode; if (cmdline_mode->specified == false) return mode; /* attempt to find a matching mode in the list of modes * we have gotten so far, if not add a CVT mode that conforms */ if (cmdline_mode->rb || cmdline_mode->margins) goto create_mode; list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { /* check width/height */ if (mode->hdisplay != cmdline_mode->xres || mode->vdisplay != cmdline_mode->yres) continue; if (cmdline_mode->refresh_specified) { if (mode->vrefresh != cmdline_mode->refresh) continue; } if (cmdline_mode->interlace) { if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) continue; } return mode; } create_mode: mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev, cmdline_mode); list_add(&mode->head, &fb_helper_conn->connector->modes); return mode; } static bool drm_connector_enabled(struct drm_connector *connector, bool strict) { bool enable; if (strict) enable = connector->status == connector_status_connected; else enable = connector->status != connector_status_disconnected; return enable; } static void drm_enable_connectors(struct drm_fb_helper *fb_helper, bool *enabled) { bool any_enabled = false; struct drm_connector *connector; int i = 0; for (i = 0; i < fb_helper->connector_count; i++) { connector = fb_helper->connector_info[i]->connector; enabled[i] = drm_connector_enabled(connector, true); DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, enabled[i] ? "yes" : "no"); any_enabled |= enabled[i]; } if (any_enabled) return; for (i = 0; i < fb_helper->connector_count; i++) { connector = fb_helper->connector_info[i]->connector; enabled[i] = drm_connector_enabled(connector, false); } } static bool drm_target_cloned(struct drm_fb_helper *fb_helper, struct drm_display_mode **modes, bool *enabled, int width, int height) { int count, i, j; bool can_clone = false; struct drm_fb_helper_connector *fb_helper_conn; struct drm_display_mode *dmt_mode, *mode; /* only contemplate cloning in the single crtc case */ if (fb_helper->crtc_count > 1) return false; count = 0; for (i = 0; i < fb_helper->connector_count; i++) { if (enabled[i]) count++; } /* only contemplate cloning if more than one connector is enabled */ if (count <= 1) return false; /* check the command line or if nothing common pick 1024x768 */ can_clone = true; for (i = 0; i < fb_helper->connector_count; i++) { if (!enabled[i]) continue; fb_helper_conn = fb_helper->connector_info[i]; modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); if (!modes[i]) { can_clone = false; break; } for (j = 0; j < i; j++) { if (!enabled[j]) continue; if (!drm_mode_equal(modes[j], modes[i])) can_clone = false; } } if (can_clone) { DRM_DEBUG_KMS("can clone using command line\n"); return true; } /* try and find a 1024x768 mode on each connector */ can_clone = true; dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false); for (i = 0; i < fb_helper->connector_count; i++) { if (!enabled[i]) continue; fb_helper_conn = fb_helper->connector_info[i]; list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { if (drm_mode_equal(mode, dmt_mode)) modes[i] = mode; } if (!modes[i]) can_clone = false; } if (can_clone) { DRM_DEBUG_KMS("can clone using 1024x768\n"); return true; } DRM_INFO("kms: can't enable cloning when we probably wanted to.\n"); return false; } static bool drm_target_preferred(struct drm_fb_helper *fb_helper, struct drm_display_mode **modes, bool *enabled, int width, int height) { struct drm_fb_helper_connector *fb_helper_conn; int i; for (i = 0; i < fb_helper->connector_count; i++) { fb_helper_conn = fb_helper->connector_info[i]; if (enabled[i] == false) continue; DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", fb_helper_conn->connector->base.id); /* got for command line mode first */ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); if (!modes[i]) { DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", fb_helper_conn->connector->base.id); modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height); } /* No preferred modes, pick one off the list */ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) { list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head) break; } DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : "none"); } return true; } static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, struct drm_fb_helper_crtc **best_crtcs, struct drm_display_mode **modes, int n, int width, int height) { int c, o; struct drm_device *dev = fb_helper->dev; struct drm_connector *connector; struct drm_connector_helper_funcs *connector_funcs; struct drm_encoder *encoder; struct drm_fb_helper_crtc *best_crtc; int my_score, best_score, score; struct drm_fb_helper_crtc **crtcs, *crtc; struct drm_fb_helper_connector *fb_helper_conn; if (n == fb_helper->connector_count) return 0; fb_helper_conn = fb_helper->connector_info[n]; connector = fb_helper_conn->connector; best_crtcs[n] = NULL; best_crtc = NULL; best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height); if (modes[n] == NULL) return best_score; crtcs = malloc(dev->mode_config.num_connector * sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!crtcs) return best_score; my_score = 1; if (connector->status == connector_status_connected) my_score++; if (drm_has_cmdline_mode(fb_helper_conn)) my_score++; if (drm_has_preferred_mode(fb_helper_conn, width, height)) my_score++; connector_funcs = connector->helper_private; encoder = connector_funcs->best_encoder(connector); if (!encoder) goto out; /* select a crtc for this connector and then attempt to configure remaining connectors */ for (c = 0; c < fb_helper->crtc_count; c++) { crtc = &fb_helper->crtc_info[c]; if ((encoder->possible_crtcs & (1 << c)) == 0) continue; for (o = 0; o < n; o++) if (best_crtcs[o] == crtc) break; if (o < n) { /* ignore cloning unless only a single crtc */ if (fb_helper->crtc_count > 1) continue; if (!drm_mode_equal(modes[o], modes[n])) continue; } crtcs[n] = crtc; memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *)); score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1, width, height); if (score > best_score) { best_crtc = crtc; best_score = score; memcpy(best_crtcs, crtcs, dev->mode_config.num_connector * sizeof(struct drm_fb_helper_crtc *)); } } out: free(crtcs, DRM_MEM_KMS); return best_score; } static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; struct drm_fb_helper_crtc **crtcs; struct drm_display_mode **modes; struct drm_mode_set *modeset; bool *enabled; int width, height; int i, ret; DRM_DEBUG_KMS("\n"); width = dev->mode_config.max_width; height = dev->mode_config.max_height; crtcs = malloc(dev->mode_config.num_connector * sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS, M_NOWAIT | M_ZERO); modes = malloc(dev->mode_config.num_connector * sizeof(struct drm_display_mode *), DRM_MEM_KMS, M_NOWAIT | M_ZERO); enabled = malloc(dev->mode_config.num_connector * sizeof(bool), DRM_MEM_KMS, M_NOWAIT | M_ZERO); if (!crtcs || !modes || !enabled) { DRM_ERROR("Memory allocation failed\n"); goto out; } drm_enable_connectors(fb_helper, enabled); ret = drm_target_cloned(fb_helper, modes, enabled, width, height); if (!ret) { ret = drm_target_preferred(fb_helper, modes, enabled, width, height); if (!ret) DRM_ERROR("Unable to find initial modes\n"); } DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height); /* need to set the modesets up here for use later */ /* fill out the connector<->crtc mappings into the modesets */ for (i = 0; i < fb_helper->crtc_count; i++) { modeset = &fb_helper->crtc_info[i].mode_set; modeset->num_connectors = 0; } for (i = 0; i < fb_helper->connector_count; i++) { struct drm_display_mode *mode = modes[i]; struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; modeset = &fb_crtc->mode_set; if (mode && fb_crtc) { DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", mode->name, fb_crtc->mode_set.crtc->base.id); fb_crtc->desired_mode = mode; if (modeset->mode) drm_mode_destroy(dev, modeset->mode); modeset->mode = drm_mode_duplicate(dev, fb_crtc->desired_mode); modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; } } out: free(crtcs, DRM_MEM_KMS); free(modes, DRM_MEM_KMS); free(enabled, DRM_MEM_KMS); } /** * drm_helper_initial_config - setup a sane initial connector configuration * @fb_helper: fb_helper device struct * @bpp_sel: bpp value to use for the framebuffer configuration * * LOCKING: * Called at init time by the driver to set up the @fb_helper initial * configuration, must take the mode config lock. * * Scans the CRTCs and connectors and tries to put together an initial setup. * At the moment, this is a cloned configuration across all heads with * a new framebuffer object as the backing store. * * RETURNS: * Zero if everything went ok, nonzero otherwise. */ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) { struct drm_device *dev = fb_helper->dev; int count = 0; /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(fb_helper->dev); drm_fb_helper_parse_command_line(fb_helper); count = drm_fb_helper_probe_connector_modes(fb_helper, dev->mode_config.max_width, dev->mode_config.max_height); /* * we shouldn't end up with no modes here. */ if (count == 0) dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n"); drm_setup_crtcs(fb_helper); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); } EXPORT_SYMBOL(drm_fb_helper_initial_config); /** * drm_fb_helper_hotplug_event - respond to a hotplug notification by * probing all the outputs attached to the fb * @fb_helper: the drm_fb_helper * * LOCKING: * Called at runtime, must take mode config lock. * * Scan the connectors attached to the fb_helper and try to put together a * setup after *notification of a change in output configuration. * * RETURNS: * 0 on success and a non-zero error code otherwise. */ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; int count = 0; u32 max_width, max_height, bpp_sel; int bound = 0, crtcs_bound = 0; struct drm_crtc *crtc; if (!fb_helper->fb) return 0; sx_xlock(&dev->mode_config.mutex); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->fb) crtcs_bound++; if (crtc->fb == fb_helper->fb) bound++; } if (bound < crtcs_bound) { fb_helper->delayed_hotplug = true; sx_xunlock(&dev->mode_config.mutex); return 0; } DRM_DEBUG_KMS("\n"); max_width = fb_helper->fb->width; max_height = fb_helper->fb->height; bpp_sel = fb_helper->fb->bits_per_pixel; count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height); drm_setup_crtcs(fb_helper); sx_xunlock(&dev->mode_config.mutex); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); } EXPORT_SYMBOL(drm_fb_helper_hotplug_event); Index: head/sys/dev/hyperv/vmbus/hv_connection.c =================================================================== --- head/sys/dev/hyperv/vmbus/hv_connection.c (revision 296271) +++ head/sys/dev/hyperv/vmbus/hv_connection.c (revision 296272) @@ -1,410 +1,410 @@ /*- * Copyright (c) 2009-2012 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include "hv_vmbus_priv.h" /* * Globals */ hv_vmbus_connection hv_vmbus_g_connection = { .connect_state = HV_DISCONNECTED, .next_gpadl_handle = 0xE1E10, }; uint32_t hv_vmbus_protocal_version = HV_VMBUS_VERSION_WS2008; static uint32_t hv_vmbus_get_next_version(uint32_t current_ver) { switch (current_ver) { case (HV_VMBUS_VERSION_WIN7): return(HV_VMBUS_VERSION_WS2008); case (HV_VMBUS_VERSION_WIN8): return(HV_VMBUS_VERSION_WIN7); case (HV_VMBUS_VERSION_WIN8_1): return(HV_VMBUS_VERSION_WIN8); case (HV_VMBUS_VERSION_WS2008): default: return(HV_VMBUS_VERSION_INVALID); } } /** * Negotiate the highest supported hypervisor version. */ static int hv_vmbus_negotiate_version(hv_vmbus_channel_msg_info *msg_info, uint32_t version) { int ret = 0; hv_vmbus_channel_initiate_contact *msg; sema_init(&msg_info->wait_sema, 0, "Msg Info Sema"); msg = (hv_vmbus_channel_initiate_contact*) msg_info->msg; msg->header.message_type = HV_CHANNEL_MESSAGE_INITIATED_CONTACT; msg->vmbus_version_requested = version; msg->interrupt_page = hv_get_phys_addr( hv_vmbus_g_connection.interrupt_page); msg->monitor_page_1 = hv_get_phys_addr( hv_vmbus_g_connection.monitor_page_1); msg->monitor_page_2 = hv_get_phys_addr( hv_vmbus_g_connection.monitor_page_2); /** * Add to list before we send the request since we may receive the * response before returning from this routine */ mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_INSERT_TAIL( &hv_vmbus_g_connection.channel_msg_anchor, msg_info, msg_list_entry); mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock); ret = hv_vmbus_post_message( msg, sizeof(hv_vmbus_channel_initiate_contact)); if (ret != 0) { mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_REMOVE( &hv_vmbus_g_connection.channel_msg_anchor, msg_info, msg_list_entry); mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock); return (ret); } /** * Wait for the connection response */ ret = sema_timedwait(&msg_info->wait_sema, 5 * hz); /* KYS 5 seconds */ mtx_lock_spin(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_REMOVE( &hv_vmbus_g_connection.channel_msg_anchor, msg_info, msg_list_entry); mtx_unlock_spin(&hv_vmbus_g_connection.channel_msg_lock); /** * Check if successful */ if (msg_info->response.version_response.version_supported) { hv_vmbus_g_connection.connect_state = HV_CONNECTED; } else { ret = ECONNREFUSED; } return (ret); } /** * Send a connect request on the partition service connection */ int hv_vmbus_connect(void) { int ret = 0; uint32_t version; hv_vmbus_channel_msg_info* msg_info = NULL; /** * Make sure we are not connecting or connected */ if (hv_vmbus_g_connection.connect_state != HV_DISCONNECTED) { return (-1); } /** * Initialize the vmbus connection */ hv_vmbus_g_connection.connect_state = HV_CONNECTING; TAILQ_INIT(&hv_vmbus_g_connection.channel_msg_anchor); mtx_init(&hv_vmbus_g_connection.channel_msg_lock, "vmbus channel msg", NULL, MTX_SPIN); TAILQ_INIT(&hv_vmbus_g_connection.channel_anchor); mtx_init(&hv_vmbus_g_connection.channel_lock, "vmbus channel", NULL, MTX_DEF); /** * Setup the vmbus event connection for channel interrupt abstraction * stuff */ hv_vmbus_g_connection.interrupt_page = malloc( PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO); hv_vmbus_g_connection.recv_interrupt_page = hv_vmbus_g_connection.interrupt_page; hv_vmbus_g_connection.send_interrupt_page = ((uint8_t *) hv_vmbus_g_connection.interrupt_page + (PAGE_SIZE >> 1)); /** * Set up the monitor notification facility. The 1st page for * parent->child and the 2nd page for child->parent */ hv_vmbus_g_connection.monitor_page_1 = malloc( PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO); hv_vmbus_g_connection.monitor_page_2 = malloc( PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO); msg_info = (hv_vmbus_channel_msg_info*) malloc(sizeof(hv_vmbus_channel_msg_info) + sizeof(hv_vmbus_channel_initiate_contact), M_DEVBUF, M_WAITOK | M_ZERO); hv_vmbus_g_connection.channels = malloc(sizeof(hv_vmbus_channel*) * HV_CHANNEL_MAX_COUNT, M_DEVBUF, M_WAITOK | M_ZERO); /* * Find the highest vmbus version number we can support. */ version = HV_VMBUS_VERSION_CURRENT; do { ret = hv_vmbus_negotiate_version(msg_info, version); if (ret == EWOULDBLOCK) { /* * We timed out. */ goto cleanup; } if (hv_vmbus_g_connection.connect_state == HV_CONNECTED) break; version = hv_vmbus_get_next_version(version); } while (version != HV_VMBUS_VERSION_INVALID); hv_vmbus_protocal_version = version; if (bootverbose) printf("VMBUS: Protocol Version: %d.%d\n", version >> 16, version & 0xFFFF); sema_destroy(&msg_info->wait_sema); free(msg_info, M_DEVBUF); return (0); /* * Cleanup after failure! */ cleanup: hv_vmbus_g_connection.connect_state = HV_DISCONNECTED; mtx_destroy(&hv_vmbus_g_connection.channel_lock); mtx_destroy(&hv_vmbus_g_connection.channel_msg_lock); if (hv_vmbus_g_connection.interrupt_page != NULL) { free(hv_vmbus_g_connection.interrupt_page, M_DEVBUF); hv_vmbus_g_connection.interrupt_page = NULL; } free(hv_vmbus_g_connection.monitor_page_1, M_DEVBUF); free(hv_vmbus_g_connection.monitor_page_2, M_DEVBUF); if (msg_info) { sema_destroy(&msg_info->wait_sema); free(msg_info, M_DEVBUF); } free(hv_vmbus_g_connection.channels, M_DEVBUF); return (ret); } /** * Send a disconnect request on the partition service connection */ int hv_vmbus_disconnect(void) { int ret = 0; hv_vmbus_channel_unload msg; msg.message_type = HV_CHANNEL_MESSAGE_UNLOAD; ret = hv_vmbus_post_message(&msg, sizeof(hv_vmbus_channel_unload)); free(hv_vmbus_g_connection.interrupt_page, M_DEVBUF); mtx_destroy(&hv_vmbus_g_connection.channel_msg_lock); free(hv_vmbus_g_connection.channels, M_DEVBUF); hv_vmbus_g_connection.connect_state = HV_DISCONNECTED; return (ret); } /** * Handler for events */ void hv_vmbus_on_events(int cpu) { int bit; int dword; void *page_addr; uint32_t* recv_interrupt_page = NULL; int rel_id; int maxdword; hv_vmbus_synic_event_flags *event; /* int maxdword = PAGE_SIZE >> 3; */ KASSERT(cpu <= mp_maxid, ("VMBUS: hv_vmbus_on_events: " "cpu out of range!")); if ((hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008) || (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7)) { maxdword = HV_MAX_NUM_CHANNELS_SUPPORTED >> 5; /* * receive size is 1/2 page and divide that by 4 bytes */ recv_interrupt_page = hv_vmbus_g_connection.recv_interrupt_page; } else { /* * On Host with Win8 or above, the event page can be * checked directly to get the id of the channel * that has the pending interrupt. */ maxdword = HV_EVENT_FLAGS_DWORD_COUNT; page_addr = hv_vmbus_g_context.syn_ic_event_page[cpu]; event = (hv_vmbus_synic_event_flags *) page_addr + HV_VMBUS_MESSAGE_SINT; recv_interrupt_page = event->flags32; } /* * Check events */ if (recv_interrupt_page != NULL) { for (dword = 0; dword < maxdword; dword++) { if (recv_interrupt_page[dword]) { for (bit = 0; bit < HV_CHANNEL_DWORD_LEN; bit++) { if (synch_test_and_clear_bit(bit, (uint32_t *) &recv_interrupt_page[dword])) { rel_id = (dword << 5) + bit; if (rel_id == 0) { /* * Special case - * vmbus channel protocol msg. */ continue; } else { hv_vmbus_channel * channel = hv_vmbus_g_connection.channels[rel_id]; /* if channel is closed or closing */ if (channel == NULL || channel->rxq == NULL) continue; if (channel->batched_reading) hv_ring_buffer_read_begin(&channel->inbound); - taskqueue_enqueue_fast(channel->rxq, &channel->channel_task); + taskqueue_enqueue(channel->rxq, &channel->channel_task); } } } } } } return; } /** * Send a msg on the vmbus's message connection */ int hv_vmbus_post_message(void *buffer, size_t bufferLen) { int ret = 0; hv_vmbus_connection_id connId; unsigned retries = 0; /* NetScaler delays from previous code were consolidated here */ static int delayAmount[] = {100, 100, 100, 500, 500, 5000, 5000, 5000}; /* for(each entry in delayAmount) try to post message, * delay a little bit before retrying */ for (retries = 0; retries < sizeof(delayAmount)/sizeof(delayAmount[0]); retries++) { connId.as_uint32_t = 0; connId.u.id = HV_VMBUS_MESSAGE_CONNECTION_ID; ret = hv_vmbus_post_msg_via_msg_ipc(connId, 1, buffer, bufferLen); if (ret != HV_STATUS_INSUFFICIENT_BUFFERS) break; /* TODO: KYS We should use a blocking wait call */ DELAY(delayAmount[retries]); } KASSERT(ret == 0, ("Error VMBUS: Message Post Failed\n")); return (ret); } /** * Send an event notification to the parent */ int hv_vmbus_set_event(hv_vmbus_channel *channel) { int ret = 0; uint32_t child_rel_id = channel->offer_msg.child_rel_id; /* Each uint32_t represents 32 channels */ synch_set_bit(child_rel_id & 31, (((uint32_t *)hv_vmbus_g_connection.send_interrupt_page + (child_rel_id >> 5)))); ret = hv_vmbus_signal_event(channel->signal_event_param); return (ret); } Index: head/sys/dev/malo/if_malo.c =================================================================== --- head/sys/dev/malo/if_malo.c (revision 296271) +++ head/sys/dev/malo/if_malo.c (revision 296272) @@ -1,2181 +1,2181 @@ /*- * Copyright (c) 2008 Weongyo Jeong * Copyright (c) 2007 Marvell Semiconductor, Inc. * Copyright (c) 2007 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif #include "opt_malo.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw, OID_AUTO, malo, CTLFLAG_RD, 0, "Marvell 88w8335 driver parameters"); static int malo_txcoalesce = 8; /* # tx pkts to q before poking f/w*/ SYSCTL_INT(_hw_malo, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &malo_txcoalesce, 0, "tx buffers to send at once"); static int malo_rxbuf = MALO_RXBUF; /* # rx buffers to allocate */ SYSCTL_INT(_hw_malo, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &malo_rxbuf, 0, "rx buffers allocated"); static int malo_rxquota = MALO_RXBUF; /* # max buffers to process */ SYSCTL_INT(_hw_malo, OID_AUTO, rxquota, CTLFLAG_RWTUN, &malo_rxquota, 0, "max rx buffers to process per interrupt"); static int malo_txbuf = MALO_TXBUF; /* # tx buffers to allocate */ SYSCTL_INT(_hw_malo, OID_AUTO, txbuf, CTLFLAG_RWTUN, &malo_txbuf, 0, "tx buffers allocated"); #ifdef MALO_DEBUG static int malo_debug = 0; SYSCTL_INT(_hw_malo, OID_AUTO, debug, CTLFLAG_RWTUN, &malo_debug, 0, "control debugging printfs"); enum { MALO_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ MALO_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ MALO_DEBUG_RECV = 0x00000004, /* basic recv operation */ MALO_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ MALO_DEBUG_RESET = 0x00000010, /* reset processing */ MALO_DEBUG_INTR = 0x00000040, /* ISR */ MALO_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */ MALO_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */ MALO_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */ MALO_DEBUG_NODE = 0x00000800, /* node management */ MALO_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */ MALO_DEBUG_FW = 0x00008000, /* firmware */ MALO_DEBUG_ANY = 0xffffffff }; #define IS_BEACON(wh) \ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | \ IEEE80211_FC0_SUBTYPE_MASK)) == \ (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON)) #define IFF_DUMPPKTS_RECV(sc, wh) \ (((sc->malo_debug & MALO_DEBUG_RECV) && \ ((sc->malo_debug & MALO_DEBUG_RECV_ALL) || !IS_BEACON(wh)))) #define IFF_DUMPPKTS_XMIT(sc) \ (sc->malo_debug & MALO_DEBUG_XMIT) #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->malo_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) do { \ (void) sc; \ } while (0) #endif static MALLOC_DEFINE(M_MALODEV, "malodev", "malo driver dma buffers"); static struct ieee80211vap *malo_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void malo_vap_delete(struct ieee80211vap *); static int malo_dma_setup(struct malo_softc *); static int malo_setup_hwdma(struct malo_softc *); static void malo_txq_init(struct malo_softc *, struct malo_txq *, int); static void malo_tx_cleanupq(struct malo_softc *, struct malo_txq *); static void malo_parent(struct ieee80211com *); static int malo_transmit(struct ieee80211com *, struct mbuf *); static void malo_start(struct malo_softc *); static void malo_watchdog(void *); static void malo_updateslot(struct ieee80211com *); static int malo_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void malo_scan_start(struct ieee80211com *); static void malo_scan_end(struct ieee80211com *); static void malo_set_channel(struct ieee80211com *); static int malo_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void malo_sysctlattach(struct malo_softc *); static void malo_announce(struct malo_softc *); static void malo_dma_cleanup(struct malo_softc *); static void malo_stop(struct malo_softc *); static int malo_chan_set(struct malo_softc *, struct ieee80211_channel *); static int malo_mode_init(struct malo_softc *); static void malo_tx_proc(void *, int); static void malo_rx_proc(void *, int); static void malo_init(void *); /* * Read/Write shorthands for accesses to BAR 0. Note that all BAR 1 * operations are done in the "hal" except getting H/W MAC address at * malo_attach and there should be no reference to them here. */ static uint32_t malo_bar0_read4(struct malo_softc *sc, bus_size_t off) { return bus_space_read_4(sc->malo_io0t, sc->malo_io0h, off); } static void malo_bar0_write4(struct malo_softc *sc, bus_size_t off, uint32_t val) { DPRINTF(sc, MALO_DEBUG_FW, "%s: off 0x%jx val 0x%x\n", __func__, (uintmax_t)off, val); bus_space_write_4(sc->malo_io0t, sc->malo_io0h, off, val); } int malo_attach(uint16_t devid, struct malo_softc *sc) { struct ieee80211com *ic = &sc->malo_ic; struct malo_hal *mh; int error; uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)]; MALO_LOCK_INIT(sc); callout_init_mtx(&sc->malo_watchdog_timer, &sc->malo_mtx, 0); mbufq_init(&sc->malo_snd, ifqmaxlen); mh = malo_hal_attach(sc->malo_dev, devid, sc->malo_io1h, sc->malo_io1t, sc->malo_dmat); if (mh == NULL) { device_printf(sc->malo_dev, "unable to attach HAL\n"); error = EIO; goto bad; } sc->malo_mh = mh; /* * Load firmware so we can get setup. We arbitrarily pick station * firmware; we'll re-load firmware as needed so setting up * the wrong mode isn't a big deal. */ error = malo_hal_fwload(mh, "malo8335-h", "malo8335-m"); if (error != 0) { device_printf(sc->malo_dev, "unable to setup firmware\n"); goto bad1; } /* XXX gethwspecs() extracts correct informations? not maybe! */ error = malo_hal_gethwspecs(mh, &sc->malo_hwspecs); if (error != 0) { device_printf(sc->malo_dev, "unable to fetch h/w specs\n"); goto bad1; } DPRINTF(sc, MALO_DEBUG_FW, "malo_hal_gethwspecs: hwversion 0x%x hostif 0x%x" "maxnum_wcb 0x%x maxnum_mcaddr 0x%x maxnum_tx_wcb 0x%x" "regioncode 0x%x num_antenna 0x%x fw_releasenum 0x%x" "wcbbase0 0x%x rxdesc_read 0x%x rxdesc_write 0x%x" "ul_fw_awakecookie 0x%x w[4] = %x %x %x %x", sc->malo_hwspecs.hwversion, sc->malo_hwspecs.hostinterface, sc->malo_hwspecs.maxnum_wcb, sc->malo_hwspecs.maxnum_mcaddr, sc->malo_hwspecs.maxnum_tx_wcb, sc->malo_hwspecs.regioncode, sc->malo_hwspecs.num_antenna, sc->malo_hwspecs.fw_releasenum, sc->malo_hwspecs.wcbbase0, sc->malo_hwspecs.rxdesc_read, sc->malo_hwspecs.rxdesc_write, sc->malo_hwspecs.ul_fw_awakecookie, sc->malo_hwspecs.wcbbase[0], sc->malo_hwspecs.wcbbase[1], sc->malo_hwspecs.wcbbase[2], sc->malo_hwspecs.wcbbase[3]); /* NB: firmware looks that it does not export regdomain info API. */ memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); ieee80211_init_channels(ic, NULL, bands); sc->malo_txantenna = 0x2; /* h/w default */ sc->malo_rxantenna = 0xffff; /* h/w default */ /* * Allocate tx + rx descriptors and populate the lists. * We immediately push the information to the firmware * as otherwise it gets upset. */ error = malo_dma_setup(sc); if (error != 0) { device_printf(sc->malo_dev, "failed to setup descriptors: %d\n", error); goto bad1; } error = malo_setup_hwdma(sc); /* push to firmware */ if (error != 0) /* NB: malo_setupdma prints msg */ goto bad2; sc->malo_tq = taskqueue_create_fast("malo_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->malo_tq); taskqueue_start_threads(&sc->malo_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->malo_dev)); TASK_INIT(&sc->malo_rxtask, 0, malo_rx_proc, sc); TASK_INIT(&sc->malo_txtask, 0, malo_tx_proc, sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->malo_dev); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_TXPMGT /* capable of txpow mgt */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ ; IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->malo_hwspecs.macaddr); /* * Transmit requires space in the packet for a special format transmit * record and optional padding between this record and the payload. * Ask the net80211 layer to arrange this when encapsulating * packets so we can add it efficiently. */ ic->ic_headroom = sizeof(struct malo_txrec) - sizeof(struct ieee80211_frame); /* call MI attach routine. */ ieee80211_ifattach(ic); /* override default methods */ ic->ic_vap_create = malo_vap_create; ic->ic_vap_delete = malo_vap_delete; ic->ic_raw_xmit = malo_raw_xmit; ic->ic_updateslot = malo_updateslot; ic->ic_scan_start = malo_scan_start; ic->ic_scan_end = malo_scan_end; ic->ic_set_channel = malo_set_channel; ic->ic_parent = malo_parent; ic->ic_transmit = malo_transmit; sc->malo_invalid = 0; /* ready to go, enable int handling */ ieee80211_radiotap_attach(ic, &sc->malo_tx_th.wt_ihdr, sizeof(sc->malo_tx_th), MALO_TX_RADIOTAP_PRESENT, &sc->malo_rx_th.wr_ihdr, sizeof(sc->malo_rx_th), MALO_RX_RADIOTAP_PRESENT); /* * Setup dynamic sysctl's. */ malo_sysctlattach(sc); if (bootverbose) ieee80211_announce(ic); malo_announce(sc); return 0; bad2: malo_dma_cleanup(sc); bad1: malo_hal_detach(mh); bad: sc->malo_invalid = 1; return error; } static struct ieee80211vap * malo_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct malo_softc *sc = ic->ic_softc; struct malo_vap *mvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) { device_printf(sc->malo_dev, "multiple vaps not supported\n"); return NULL; } switch (opmode) { case IEEE80211_M_STA: if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; /* fall thru... */ case IEEE80211_M_MONITOR: break; default: device_printf(sc->malo_dev, "%s mode not supported\n", ieee80211_opmode_name[opmode]); return NULL; /* unsupported */ } mvp = malloc(sizeof(struct malo_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &mvp->malo_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override state transition machine */ mvp->malo_newstate = vap->iv_newstate; vap->iv_newstate = malo_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return vap; } static void malo_vap_delete(struct ieee80211vap *vap) { struct malo_vap *mvp = MALO_VAP(vap); ieee80211_vap_detach(vap); free(mvp, M_80211_VAP); } int malo_intr(void *arg) { struct malo_softc *sc = arg; struct malo_hal *mh = sc->malo_mh; uint32_t status; if (sc->malo_invalid) { /* * The hardware is not ready/present, don't touch anything. * Note this can happen early on if the IRQ is shared. */ DPRINTF(sc, MALO_DEBUG_ANY, "%s: invalid; ignored\n", __func__); return (FILTER_STRAY); } /* * Figure out the reason(s) for the interrupt. */ malo_hal_getisr(mh, &status); /* NB: clears ISR too */ if (status == 0) /* must be a shared irq */ return (FILTER_STRAY); DPRINTF(sc, MALO_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n", __func__, status, sc->malo_imask); if (status & MALO_A2HRIC_BIT_RX_RDY) - taskqueue_enqueue_fast(sc->malo_tq, &sc->malo_rxtask); + taskqueue_enqueue(sc->malo_tq, &sc->malo_rxtask); if (status & MALO_A2HRIC_BIT_TX_DONE) - taskqueue_enqueue_fast(sc->malo_tq, &sc->malo_txtask); + taskqueue_enqueue(sc->malo_tq, &sc->malo_txtask); if (status & MALO_A2HRIC_BIT_OPC_DONE) malo_hal_cmddone(mh); if (status & MALO_A2HRIC_BIT_MAC_EVENT) ; if (status & MALO_A2HRIC_BIT_RX_PROBLEM) ; if (status & MALO_A2HRIC_BIT_ICV_ERROR) { /* TKIP ICV error */ sc->malo_stats.mst_rx_badtkipicv++; } #ifdef MALO_DEBUG if (((status | sc->malo_imask) ^ sc->malo_imask) != 0) DPRINTF(sc, MALO_DEBUG_INTR, "%s: can't handle interrupt status 0x%x\n", __func__, status); #endif return (FILTER_HANDLED); } static void malo_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; KASSERT(error == 0, ("error %u on bus_dma callback", error)); *paddr = segs->ds_addr; } static int malo_desc_setup(struct malo_softc *sc, const char *name, struct malo_descdma *dd, int nbuf, size_t bufsize, int ndesc, size_t descsize) { int error; uint8_t *ds; DPRINTF(sc, MALO_DEBUG_RESET, "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n", __func__, name, nbuf, (uintmax_t) bufsize, ndesc, (uintmax_t) descsize); dd->dd_name = name; dd->dd_desc_len = nbuf * ndesc * descsize; /* * Setup DMA descriptor area. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->malo_dev),/* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dd->dd_desc_len, /* maxsize */ 1, /* nsegments */ dd->dd_desc_len, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dd->dd_dmat); if (error != 0) { device_printf(sc->malo_dev, "cannot allocate %s DMA tag\n", dd->dd_name); return error; } /* allocate descriptors */ error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dd->dd_dmamap); if (error != 0) { device_printf(sc->malo_dev, "unable to alloc memory for %u %s descriptors, " "error %u\n", nbuf * ndesc, dd->dd_name, error); goto fail1; } error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, dd->dd_desc, dd->dd_desc_len, malo_load_cb, &dd->dd_desc_paddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->malo_dev, "unable to map %s descriptors, error %u\n", dd->dd_name, error); goto fail2; } ds = dd->dd_desc; memset(ds, 0, dd->dd_desc_len); DPRINTF(sc, MALO_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n", __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); return 0; fail2: bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); fail1: bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); return error; } #define DS2PHYS(_dd, _ds) \ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) static int malo_rxdma_setup(struct malo_softc *sc) { int error, bsize, i; struct malo_rxbuf *bf; struct malo_rxdesc *ds; error = malo_desc_setup(sc, "rx", &sc->malo_rxdma, malo_rxbuf, sizeof(struct malo_rxbuf), 1, sizeof(struct malo_rxdesc)); if (error != 0) return error; /* * Allocate rx buffers and set them up. */ bsize = malo_rxbuf * sizeof(struct malo_rxbuf); bf = malloc(bsize, M_MALODEV, M_NOWAIT | M_ZERO); if (bf == NULL) { device_printf(sc->malo_dev, "malloc of %u rx buffers failed\n", bsize); return error; } sc->malo_rxdma.dd_bufptr = bf; STAILQ_INIT(&sc->malo_rxbuf); ds = sc->malo_rxdma.dd_desc; for (i = 0; i < malo_rxbuf; i++, bf++, ds++) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&sc->malo_rxdma, ds); error = bus_dmamap_create(sc->malo_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { device_printf(sc->malo_dev, "%s: unable to dmamap for rx buffer, error %d\n", __func__, error); return error; } /* NB: tail is intentional to preserve descriptor order */ STAILQ_INSERT_TAIL(&sc->malo_rxbuf, bf, bf_list); } return 0; } static int malo_txdma_setup(struct malo_softc *sc, struct malo_txq *txq) { int error, bsize, i; struct malo_txbuf *bf; struct malo_txdesc *ds; error = malo_desc_setup(sc, "tx", &txq->dma, malo_txbuf, sizeof(struct malo_txbuf), MALO_TXDESC, sizeof(struct malo_txdesc)); if (error != 0) return error; /* allocate and setup tx buffers */ bsize = malo_txbuf * sizeof(struct malo_txbuf); bf = malloc(bsize, M_MALODEV, M_NOWAIT | M_ZERO); if (bf == NULL) { device_printf(sc->malo_dev, "malloc of %u tx buffers failed\n", malo_txbuf); return ENOMEM; } txq->dma.dd_bufptr = bf; STAILQ_INIT(&txq->free); txq->nfree = 0; ds = txq->dma.dd_desc; for (i = 0; i < malo_txbuf; i++, bf++, ds += MALO_TXDESC) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&txq->dma, ds); error = bus_dmamap_create(sc->malo_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { device_printf(sc->malo_dev, "unable to create dmamap for tx " "buffer %u, error %u\n", i, error); return error; } STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; } return 0; } static void malo_desc_cleanup(struct malo_softc *sc, struct malo_descdma *dd) { bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); } static void malo_rxdma_cleanup(struct malo_softc *sc) { struct malo_rxbuf *bf; STAILQ_FOREACH(bf, &sc->malo_rxbuf, bf_list) { if (bf->bf_m != NULL) { m_freem(bf->bf_m); bf->bf_m = NULL; } if (bf->bf_dmamap != NULL) { bus_dmamap_destroy(sc->malo_dmat, bf->bf_dmamap); bf->bf_dmamap = NULL; } } STAILQ_INIT(&sc->malo_rxbuf); if (sc->malo_rxdma.dd_bufptr != NULL) { free(sc->malo_rxdma.dd_bufptr, M_MALODEV); sc->malo_rxdma.dd_bufptr = NULL; } if (sc->malo_rxdma.dd_desc_len != 0) malo_desc_cleanup(sc, &sc->malo_rxdma); } static void malo_txdma_cleanup(struct malo_softc *sc, struct malo_txq *txq) { struct malo_txbuf *bf; struct ieee80211_node *ni; STAILQ_FOREACH(bf, &txq->free, bf_list) { if (bf->bf_m != NULL) { m_freem(bf->bf_m); bf->bf_m = NULL; } ni = bf->bf_node; bf->bf_node = NULL; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } if (bf->bf_dmamap != NULL) { bus_dmamap_destroy(sc->malo_dmat, bf->bf_dmamap); bf->bf_dmamap = NULL; } } STAILQ_INIT(&txq->free); txq->nfree = 0; if (txq->dma.dd_bufptr != NULL) { free(txq->dma.dd_bufptr, M_MALODEV); txq->dma.dd_bufptr = NULL; } if (txq->dma.dd_desc_len != 0) malo_desc_cleanup(sc, &txq->dma); } static void malo_dma_cleanup(struct malo_softc *sc) { int i; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) malo_txdma_cleanup(sc, &sc->malo_txq[i]); malo_rxdma_cleanup(sc); } static int malo_dma_setup(struct malo_softc *sc) { int error, i; /* rxdma initializing. */ error = malo_rxdma_setup(sc); if (error != 0) return error; /* NB: we just have 1 tx queue now. */ for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { error = malo_txdma_setup(sc, &sc->malo_txq[i]); if (error != 0) { malo_dma_cleanup(sc); return error; } malo_txq_init(sc, &sc->malo_txq[i], i); } return 0; } static void malo_hal_set_rxtxdma(struct malo_softc *sc) { int i; malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_read, sc->malo_hwdma.rxdesc_read); malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_write, sc->malo_hwdma.rxdesc_read); for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { malo_bar0_write4(sc, sc->malo_hwspecs.wcbbase[i], sc->malo_hwdma.wcbbase[i]); } } /* * Inform firmware of our tx/rx dma setup. The BAR 0 writes below are * for compatibility with older firmware. For current firmware we send * this information with a cmd block via malo_hal_sethwdma. */ static int malo_setup_hwdma(struct malo_softc *sc) { int i; struct malo_txq *txq; sc->malo_hwdma.rxdesc_read = sc->malo_rxdma.dd_desc_paddr; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { txq = &sc->malo_txq[i]; sc->malo_hwdma.wcbbase[i] = txq->dma.dd_desc_paddr; } sc->malo_hwdma.maxnum_txwcb = malo_txbuf; sc->malo_hwdma.maxnum_wcb = MALO_NUM_TX_QUEUES; malo_hal_set_rxtxdma(sc); return 0; } static void malo_txq_init(struct malo_softc *sc, struct malo_txq *txq, int qnum) { struct malo_txbuf *bf, *bn; struct malo_txdesc *ds; MALO_TXQ_LOCK_INIT(sc, txq); txq->qnum = qnum; txq->txpri = 0; /* XXX */ STAILQ_FOREACH(bf, &txq->free, bf_list) { bf->bf_txq = txq; ds = bf->bf_desc; bn = STAILQ_NEXT(bf, bf_list); if (bn == NULL) bn = STAILQ_FIRST(&txq->free); ds->physnext = htole32(bn->bf_daddr); } STAILQ_INIT(&txq->active); } /* * Reclaim resources for a setup queue. */ static void malo_tx_cleanupq(struct malo_softc *sc, struct malo_txq *txq) { /* XXX hal work? */ MALO_TXQ_LOCK_DESTROY(txq); } /* * Allocate a tx buffer for sending a frame. */ static struct malo_txbuf * malo_getbuf(struct malo_softc *sc, struct malo_txq *txq) { struct malo_txbuf *bf; MALO_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->free); if (bf != NULL) { STAILQ_REMOVE_HEAD(&txq->free, bf_list); txq->nfree--; } MALO_TXQ_UNLOCK(txq); if (bf == NULL) { DPRINTF(sc, MALO_DEBUG_XMIT, "%s: out of xmit buffers on q %d\n", __func__, txq->qnum); sc->malo_stats.mst_tx_qstop++; } return bf; } static int malo_tx_dmasetup(struct malo_softc *sc, struct malo_txbuf *bf, struct mbuf *m0) { struct mbuf *m; int error; /* * Load the DMA map so any coalescing is done. This also calculates * the number of descriptors we need. */ error = bus_dmamap_load_mbuf_sg(sc->malo_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* XXX packet requires too many descriptors */ bf->bf_nseg = MALO_TXDESC + 1; } else if (error != 0) { sc->malo_stats.mst_tx_busdma++; m_freem(m0); return error; } /* * Discard null packets and check for packets that require too many * TX descriptors. We try to convert the latter to a cluster. */ if (error == EFBIG) { /* too many desc's, linearize */ sc->malo_stats.mst_tx_linear++; m = m_defrag(m0, M_NOWAIT); if (m == NULL) { m_freem(m0); sc->malo_stats.mst_tx_nombuf++; return ENOMEM; } m0 = m; error = bus_dmamap_load_mbuf_sg(sc->malo_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { sc->malo_stats.mst_tx_busdma++; m_freem(m0); return error; } KASSERT(bf->bf_nseg <= MALO_TXDESC, ("too many segments after defrag; nseg %u", bf->bf_nseg)); } else if (bf->bf_nseg == 0) { /* null packet, discard */ sc->malo_stats.mst_tx_nodata++; m_freem(m0); return EIO; } DPRINTF(sc, MALO_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, m0->m_pkthdr.len); bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); bf->bf_m = m0; return 0; } #ifdef MALO_DEBUG static void malo_printrxbuf(const struct malo_rxbuf *bf, u_int ix) { const struct malo_rxdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->status); printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n" " STAT:%02x LEN:%04x SNR:%02x NF:%02x CHAN:%02x" " RATE:%02x QOS:%04x\n", ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->physnext), le32toh(ds->physbuffdata), ds->rxcontrol, ds->rxcontrol != MALO_RXD_CTRL_DRIVER_OWN ? "" : (status & MALO_RXD_STATUS_OK) ? " *" : " !", ds->status, le16toh(ds->pktlen), ds->snr, ds->nf, ds->channel, ds->rate, le16toh(ds->qosctrl)); } static void malo_printtxbuf(const struct malo_txbuf *bf, u_int qnum, u_int ix) { const struct malo_txdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->status); printf("Q%u[%3u]", qnum, ix); printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr); printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n", le32toh(ds->physnext), le32toh(ds->pktptr), le16toh(ds->pktlen), status, status & MALO_TXD_STATUS_USED ? "" : (status & 3) != 0 ? " *" : " !"); printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n", ds->datarate, ds->txpriority, le16toh(ds->qosctrl), le32toh(ds->sap_pktinfo), le16toh(ds->format)); #if 0 { const uint8_t *cp = (const uint8_t *) ds; int i; for (i = 0; i < sizeof(struct malo_txdesc); i++) { printf("%02x ", cp[i]); if (((i+1) % 16) == 0) printf("\n"); } printf("\n"); } #endif } #endif /* MALO_DEBUG */ static __inline void malo_updatetxrate(struct ieee80211_node *ni, int rix) { static const int ieeerates[] = { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 96, 108 }; if (rix < nitems(ieeerates)) ni->ni_txrate = ieeerates[rix]; } static int malo_fix2rate(int fix_rate) { static const int rates[] = { 2, 4, 11, 22, 12, 18, 24, 36, 48, 96, 108 }; return (fix_rate < nitems(rates) ? rates[fix_rate] : 0); } /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */ #define MS(v,x) (((v) & x) >> x##_S) #define SM(v,x) (((v) << x##_S) & x) /* * Process completed xmit descriptors from the specified queue. */ static int malo_tx_processq(struct malo_softc *sc, struct malo_txq *txq) { struct malo_txbuf *bf; struct malo_txdesc *ds; struct ieee80211_node *ni; int nreaped; uint32_t status; DPRINTF(sc, MALO_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum); for (nreaped = 0;; nreaped++) { MALO_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MALO_TXQ_UNLOCK(txq); break; } ds = bf->bf_desc; MALO_TXDESC_SYNC(txq, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->status & htole32(MALO_TXD_STATUS_FW_OWNED)) { MALO_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MALO_TXQ_UNLOCK(txq); #ifdef MALO_DEBUG if (sc->malo_debug & MALO_DEBUG_XMIT_DESC) malo_printtxbuf(bf, txq->qnum, nreaped); #endif ni = bf->bf_node; if (ni != NULL) { status = le32toh(ds->status); if (status & MALO_TXD_STATUS_OK) { uint16_t format = le16toh(ds->format); uint8_t txant = MS(format, MALO_TXD_ANTENNA); sc->malo_stats.mst_ant_tx[txant]++; if (status & MALO_TXD_STATUS_OK_RETRY) sc->malo_stats.mst_tx_retries++; if (status & MALO_TXD_STATUS_OK_MORE_RETRY) sc->malo_stats.mst_tx_mretries++; malo_updatetxrate(ni, ds->datarate); sc->malo_stats.mst_tx_rate = ds->datarate; } else { if (status & MALO_TXD_STATUS_FAILED_LINK_ERROR) sc->malo_stats.mst_tx_linkerror++; if (status & MALO_TXD_STATUS_FAILED_XRETRY) sc->malo_stats.mst_tx_xretries++; if (status & MALO_TXD_STATUS_FAILED_AGING) sc->malo_stats.mst_tx_aging++; } /* XXX strip fw len in case header inspected */ m_adj(bf->bf_m, sizeof(uint16_t)); ieee80211_tx_complete(ni, bf->bf_m, (status & MALO_TXD_STATUS_OK) == 0); } else m_freem(bf->bf_m); ds->status = htole32(MALO_TXD_STATUS_IDLE); ds->pktlen = htole32(0); bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->malo_dmat, bf->bf_dmamap); bf->bf_m = NULL; bf->bf_node = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; MALO_TXQ_UNLOCK(txq); } return nreaped; } /* * Deferred processing of transmit interrupt. */ static void malo_tx_proc(void *arg, int npending) { struct malo_softc *sc = arg; int i, nreaped; /* * Process each active queue. */ nreaped = 0; MALO_LOCK(sc); for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { if (!STAILQ_EMPTY(&sc->malo_txq[i].active)) nreaped += malo_tx_processq(sc, &sc->malo_txq[i]); } if (nreaped != 0) { sc->malo_timer = 0; malo_start(sc); } MALO_UNLOCK(sc); } static int malo_tx_start(struct malo_softc *sc, struct ieee80211_node *ni, struct malo_txbuf *bf, struct mbuf *m0) { #define IS_DATA_FRAME(wh) \ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK)) == IEEE80211_FC0_TYPE_DATA) int error, ismcast, iswep; int copyhdrlen, hdrlen, pktlen; struct ieee80211_frame *wh; struct ieee80211com *ic = &sc->malo_ic; struct ieee80211vap *vap = ni->ni_vap; struct malo_txdesc *ds; struct malo_txrec *tr; struct malo_txq *txq; uint16_t qos; wh = mtod(m0, struct ieee80211_frame *); iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); copyhdrlen = hdrlen = ieee80211_anyhdrsize(wh); pktlen = m0->m_pkthdr.len; if (IEEE80211_QOS_HAS_SEQ(wh)) { if (IEEE80211_IS_DSTODS(wh)) { qos = *(uint16_t *) (((struct ieee80211_qosframe_addr4 *) wh)->i_qos); copyhdrlen -= sizeof(qos); } else qos = *(uint16_t *) (((struct ieee80211_qosframe *) wh)->i_qos); } else qos = 0; if (iswep) { struct ieee80211_key *k; /* * Construct the 802.11 header+trailer for an encrypted * frame. The only reason this can fail is because of an * unknown or unsupported cipher/key type. * * NB: we do this even though the firmware will ignore * what we've done for WEP and TKIP as we need the * ExtIV filled in for CCMP and this also adjusts * the headers which simplifies our work below. */ k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { /* * This can happen when the key is yanked after the * frame was queued. Just discard the frame; the * 802.11 layer counts failures and provides * debugging/diagnostics. */ m_freem(m0); return EIO; } /* * Adjust the packet length for the crypto additions * done during encap and any other bits that the f/w * will add later on. */ pktlen = m0->m_pkthdr.len; /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { sc->malo_tx_th.wt_flags = 0; /* XXX */ if (iswep) sc->malo_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; sc->malo_tx_th.wt_txpower = ni->ni_txpower; sc->malo_tx_th.wt_antenna = sc->malo_txantenna; ieee80211_radiotap_tx(vap, m0); } /* * Copy up/down the 802.11 header; the firmware requires * we present a 2-byte payload length followed by a * 4-address header (w/o QoS), followed (optionally) by * any WEP/ExtIV header (but only filled in for CCMP). * We are assured the mbuf has sufficient headroom to * prepend in-place by the setup of ic_headroom in * malo_attach. */ if (hdrlen < sizeof(struct malo_txrec)) { const int space = sizeof(struct malo_txrec) - hdrlen; if (M_LEADINGSPACE(m0) < space) { /* NB: should never happen */ device_printf(sc->malo_dev, "not enough headroom, need %d found %zd, " "m_flags 0x%x m_len %d\n", space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len); ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 0, -1); m_freem(m0); /* XXX stat */ return EIO; } M_PREPEND(m0, space, M_NOWAIT); } tr = mtod(m0, struct malo_txrec *); if (wh != (struct ieee80211_frame *) &tr->wh) ovbcopy(wh, &tr->wh, hdrlen); /* * Note: the "firmware length" is actually the length of the fully * formed "802.11 payload". That is, it's everything except for * the 802.11 header. In particular this includes all crypto * material including the MIC! */ tr->fwlen = htole16(pktlen - hdrlen); /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = malo_tx_dmasetup(sc, bf, m0); if (error != 0) return error; bf->bf_node = ni; /* NB: held reference */ m0 = bf->bf_m; /* NB: may have changed */ tr = mtod(m0, struct malo_txrec *); wh = (struct ieee80211_frame *)&tr->wh; /* * Formulate tx descriptor. */ ds = bf->bf_desc; txq = bf->bf_txq; ds->qosctrl = qos; /* NB: already little-endian */ ds->pktptr = htole32(bf->bf_segs[0].ds_addr); ds->pktlen = htole16(bf->bf_segs[0].ds_len); /* NB: pPhysNext setup once, don't touch */ ds->datarate = IS_DATA_FRAME(wh) ? 1 : 0; ds->sap_pktinfo = 0; ds->format = 0; /* * Select transmit rate. */ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: sc->malo_stats.mst_tx_mgmt++; /* fall thru... */ case IEEE80211_FC0_TYPE_CTL: ds->txpriority = 1; break; case IEEE80211_FC0_TYPE_DATA: ds->txpriority = txq->qnum; break; default: device_printf(sc->malo_dev, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); /* XXX statistic */ m_freem(m0); return EIO; } #ifdef MALO_DEBUG if (IFF_DUMPPKTS_XMIT(sc)) ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *)+sizeof(uint16_t), m0->m_len - sizeof(uint16_t), ds->datarate, -1); #endif MALO_TXQ_LOCK(txq); if (!IS_DATA_FRAME(wh)) ds->status |= htole32(1); ds->status |= htole32(MALO_TXD_STATUS_FW_OWNED); STAILQ_INSERT_TAIL(&txq->active, bf, bf_list); MALO_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->malo_timer = 5; MALO_TXQ_UNLOCK(txq); return 0; } static int malo_transmit(struct ieee80211com *ic, struct mbuf *m) { struct malo_softc *sc = ic->ic_softc; int error; MALO_LOCK(sc); if (!sc->malo_running) { MALO_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->malo_snd, m); if (error) { MALO_UNLOCK(sc); return (error); } malo_start(sc); MALO_UNLOCK(sc); return (0); } static void malo_start(struct malo_softc *sc) { struct ieee80211_node *ni; struct malo_txq *txq = &sc->malo_txq[0]; struct malo_txbuf *bf = NULL; struct mbuf *m; int nqueued = 0; MALO_LOCK_ASSERT(sc); if (!sc->malo_running || sc->malo_invalid) return; while ((m = mbufq_dequeue(&sc->malo_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; bf = malo_getbuf(sc, txq); if (bf == NULL) { mbufq_prepend(&sc->malo_snd, m); sc->malo_stats.mst_tx_qstop++; break; } /* * Pass the frame to the h/w for transmission. */ if (malo_tx_start(sc, ni, bf, m)) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); if (bf != NULL) { bf->bf_m = NULL; bf->bf_node = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_HEAD(&txq->free, bf, bf_list); MALO_TXQ_UNLOCK(txq); } ieee80211_free_node(ni); continue; } nqueued++; if (nqueued >= malo_txcoalesce) { /* * Poke the firmware to process queued frames; * see below about (lack of) locking. */ nqueued = 0; malo_hal_txstart(sc->malo_mh, 0/*XXX*/); } } if (nqueued) { /* * NB: We don't need to lock against tx done because * this just prods the firmware to check the transmit * descriptors. The firmware will also start fetching * descriptors by itself if it notices new ones are * present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing * it's ok. Delivering the kick here rather than in * malo_tx_start is an optimization to avoid poking the * firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ malo_hal_txstart(sc->malo_mh, 0/*XXX*/); } } static void malo_watchdog(void *arg) { struct malo_softc *sc = arg; callout_reset(&sc->malo_watchdog_timer, hz, malo_watchdog, sc); if (sc->malo_timer == 0 || --sc->malo_timer > 0) return; if (sc->malo_running && !sc->malo_invalid) { device_printf(sc->malo_dev, "watchdog timeout\n"); /* XXX no way to reset h/w. now */ counter_u64_add(sc->malo_ic.ic_oerrors, 1); sc->malo_stats.mst_watchdog++; } } static int malo_hal_reset(struct malo_softc *sc) { static int first = 0; struct ieee80211com *ic = &sc->malo_ic; struct malo_hal *mh = sc->malo_mh; if (first == 0) { /* * NB: when the device firstly is initialized, sometimes * firmware could override rx/tx dma registers so we re-set * these values once. */ malo_hal_set_rxtxdma(sc); first = 1; } malo_hal_setantenna(mh, MHA_ANTENNATYPE_RX, sc->malo_rxantenna); malo_hal_setantenna(mh, MHA_ANTENNATYPE_TX, sc->malo_txantenna); malo_hal_setradio(mh, 1, MHP_AUTO_PREAMBLE); malo_chan_set(sc, ic->ic_curchan); /* XXX needs other stuffs? */ return 1; } static __inline struct mbuf * malo_getrxmbuf(struct malo_softc *sc, struct malo_rxbuf *bf) { struct mbuf *m; bus_addr_t paddr; int error; /* XXX don't need mbuf, just dma buffer */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (m == NULL) { sc->malo_stats.mst_rx_nombuf++; /* XXX */ return NULL; } error = bus_dmamap_load(sc->malo_dmat, bf->bf_dmamap, mtod(m, caddr_t), MJUMPAGESIZE, malo_load_cb, &paddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->malo_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(m); return NULL; } bf->bf_data = paddr; bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); return m; } static int malo_rxbuf_init(struct malo_softc *sc, struct malo_rxbuf *bf) { struct malo_rxdesc *ds; ds = bf->bf_desc; if (bf->bf_m == NULL) { bf->bf_m = malo_getrxmbuf(sc, bf); if (bf->bf_m == NULL) { /* mark descriptor to be skipped */ ds->rxcontrol = MALO_RXD_CTRL_OS_OWN; /* NB: don't need PREREAD */ MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE); return ENOMEM; } } /* * Setup descriptor. */ ds->qosctrl = 0; ds->snr = 0; ds->status = MALO_RXD_STATUS_IDLE; ds->channel = 0; ds->pktlen = htole16(MALO_RXSIZE); ds->nf = 0; ds->physbuffdata = htole32(bf->bf_data); /* NB: don't touch pPhysNext, set once */ ds->rxcontrol = MALO_RXD_CTRL_DRIVER_OWN; MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return 0; } /* * Setup the rx data structures. This should only be done once or we may get * out of sync with the firmware. */ static int malo_startrecv(struct malo_softc *sc) { struct malo_rxbuf *bf, *prev; struct malo_rxdesc *ds; if (sc->malo_recvsetup == 1) { malo_mode_init(sc); /* set filters, etc. */ return 0; } prev = NULL; STAILQ_FOREACH(bf, &sc->malo_rxbuf, bf_list) { int error = malo_rxbuf_init(sc, bf); if (error != 0) { DPRINTF(sc, MALO_DEBUG_RECV, "%s: malo_rxbuf_init failed %d\n", __func__, error); return error; } if (prev != NULL) { ds = prev->bf_desc; ds->physnext = htole32(bf->bf_daddr); } prev = bf; } if (prev != NULL) { ds = prev->bf_desc; ds->physnext = htole32(STAILQ_FIRST(&sc->malo_rxbuf)->bf_daddr); } sc->malo_recvsetup = 1; malo_mode_init(sc); /* set filters, etc. */ return 0; } static void malo_init_locked(struct malo_softc *sc) { struct malo_hal *mh = sc->malo_mh; int error; MALO_LOCK_ASSERT(sc); /* * Stop anything previously setup. This is safe whether this is * the first time through or not. */ malo_stop(sc); /* * Push state to the firmware. */ if (!malo_hal_reset(sc)) { device_printf(sc->malo_dev, "%s: unable to reset hardware\n", __func__); return; } /* * Setup recv (once); transmit is already good to go. */ error = malo_startrecv(sc); if (error != 0) { device_printf(sc->malo_dev, "%s: unable to start recv logic, error %d\n", __func__, error); return; } /* * Enable interrupts. */ sc->malo_imask = MALO_A2HRIC_BIT_RX_RDY | MALO_A2HRIC_BIT_TX_DONE | MALO_A2HRIC_BIT_OPC_DONE | MALO_A2HRIC_BIT_MAC_EVENT | MALO_A2HRIC_BIT_RX_PROBLEM | MALO_A2HRIC_BIT_ICV_ERROR | MALO_A2HRIC_BIT_RADAR_DETECT | MALO_A2HRIC_BIT_CHAN_SWITCH; sc->malo_running = 1; malo_hal_intrset(mh, sc->malo_imask); callout_reset(&sc->malo_watchdog_timer, hz, malo_watchdog, sc); } static void malo_init(void *arg) { struct malo_softc *sc = (struct malo_softc *) arg; struct ieee80211com *ic = &sc->malo_ic; MALO_LOCK(sc); malo_init_locked(sc); MALO_UNLOCK(sc); if (sc->malo_running) ieee80211_start_all(ic); /* start all vap's */ } /* * Set the multicast filter contents into the hardware. */ static void malo_setmcastfilter(struct malo_softc *sc) { struct ieee80211com *ic = &sc->malo_ic; struct ieee80211vap *vap; uint8_t macs[IEEE80211_ADDR_LEN * MALO_HAL_MCAST_MAX]; uint8_t *mp; int nmc; mp = macs; nmc = 0; if (ic->ic_opmode == IEEE80211_M_MONITOR || ic->ic_allmulti > 0 || ic->ic_promisc > 0) goto all; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { struct ifnet *ifp; struct ifmultiaddr *ifma; ifp = vap->iv_ifp; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (nmc == MALO_HAL_MCAST_MAX) { ifp->if_flags |= IFF_ALLMULTI; if_maddr_runlock(ifp); goto all; } IEEE80211_ADDR_COPY(mp, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); mp += IEEE80211_ADDR_LEN, nmc++; } if_maddr_runlock(ifp); } malo_hal_setmcast(sc->malo_mh, nmc, macs); all: /* * XXX we don't know how to set the f/w for supporting * IFF_ALLMULTI | IFF_PROMISC cases */ return; } static int malo_mode_init(struct malo_softc *sc) { struct ieee80211com *ic = &sc->malo_ic; struct malo_hal *mh = sc->malo_mh; /* * NB: Ignore promisc in hostap mode; it's set by the * bridge. This is wrong but we have no way to * identify internal requests (from the bridge) * versus external requests such as for tcpdump. */ malo_hal_setpromisc(mh, ic->ic_promisc > 0 && ic->ic_opmode != IEEE80211_M_HOSTAP); malo_setmcastfilter(sc); return ENXIO; } static void malo_tx_draintxq(struct malo_softc *sc, struct malo_txq *txq) { struct ieee80211_node *ni; struct malo_txbuf *bf; u_int ix; /* * NB: this assumes output has been stopped and * we do not need to block malo_tx_tasklet */ for (ix = 0;; ix++) { MALO_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MALO_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MALO_TXQ_UNLOCK(txq); #ifdef MALO_DEBUG if (sc->malo_debug & MALO_DEBUG_RESET) { struct ieee80211com *ic = &sc->malo_ic; const struct malo_txrec *tr = mtod(bf->bf_m, const struct malo_txrec *); malo_printtxbuf(bf, txq->qnum, ix); ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh, bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1); } #endif /* MALO_DEBUG */ bus_dmamap_unload(sc->malo_dmat, bf->bf_dmamap); ni = bf->bf_node; bf->bf_node = NULL; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } m_freem(bf->bf_m); bf->bf_m = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; MALO_TXQ_UNLOCK(txq); } } static void malo_stop(struct malo_softc *sc) { struct malo_hal *mh = sc->malo_mh; int i; DPRINTF(sc, MALO_DEBUG_ANY, "%s: invalid %u running %u\n", __func__, sc->malo_invalid, sc->malo_running); MALO_LOCK_ASSERT(sc); if (!sc->malo_running) return; /* * Shutdown the hardware and driver: * disable interrupts * turn off the radio * drain and release tx queues * * Note that some of this work is not possible if the hardware * is gone (invalid). */ sc->malo_running = 0; callout_stop(&sc->malo_watchdog_timer); sc->malo_timer = 0; /* disable interrupt. */ malo_hal_intrset(mh, 0); /* turn off the radio. */ malo_hal_setradio(mh, 0, MHP_AUTO_PREAMBLE); /* drain and release tx queues. */ for (i = 0; i < MALO_NUM_TX_QUEUES; i++) malo_tx_draintxq(sc, &sc->malo_txq[i]); } static void malo_parent(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; int startall = 0; MALO_LOCK(sc); if (ic->ic_nrunning > 0) { /* * Beware of being called during attach/detach * to reset promiscuous mode. In that case we * will still be marked UP but not RUNNING. * However trying to re-init the interface * is the wrong thing to do as we've already * torn down much of our state. There's * probably a better way to deal with this. */ if (!sc->malo_running && !sc->malo_invalid) { malo_init(sc); startall = 1; } /* * To avoid rescanning another access point, * do not call malo_init() here. Instead, * only reflect promisc mode settings. */ malo_mode_init(sc); } else if (sc->malo_running) malo_stop(sc); MALO_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void malo_updateslot(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; struct malo_hal *mh = sc->malo_mh; int error; /* NB: can be called early; suppress needless cmds */ if (!sc->malo_running) return; DPRINTF(sc, MALO_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x %s slot, (ic_flags 0x%x)\n", __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", ic->ic_flags); if (ic->ic_flags & IEEE80211_F_SHSLOT) error = malo_hal_set_slot(mh, 1); else error = malo_hal_set_slot(mh, 0); if (error != 0) device_printf(sc->malo_dev, "setting %s slot failed\n", ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long"); } static int malo_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct malo_softc *sc = ic->ic_softc; struct malo_hal *mh = sc->malo_mh; int error; DPRINTF(sc, MALO_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); /* * Invoke the net80211 layer first so iv_bss is setup. */ error = MALO_VAP(vap)->malo_newstate(vap, nstate, arg); if (error != 0) return error; if (nstate == IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; enum ieee80211_phymode mode = ieee80211_chan2mode(ni->ni_chan); const struct ieee80211_txparam *tp = &vap->iv_txparms[mode]; DPRINTF(sc, MALO_DEBUG_STATE, "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s " "capinfo 0x%04x chan %d associd 0x%x mode %d rate %d\n", vap->iv_ifp->if_xname, __func__, vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan), ni->ni_associd, mode, tp->ucastrate); malo_hal_setradio(mh, 1, (ic->ic_flags & IEEE80211_F_SHPREAMBLE) ? MHP_SHORT_PREAMBLE : MHP_LONG_PREAMBLE); malo_hal_setassocid(sc->malo_mh, ni->ni_bssid, ni->ni_associd); malo_hal_set_rate(mh, mode, tp->ucastrate == IEEE80211_FIXED_RATE_NONE ? 0 : malo_fix2rate(tp->ucastrate)); } return 0; } static int malo_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct malo_softc *sc = ic->ic_softc; struct malo_txbuf *bf; struct malo_txq *txq; if (!sc->malo_running || sc->malo_invalid) { m_freem(m); return ENETDOWN; } /* * Grab a TX buffer and associated resources. Note that we depend * on the classification by the 802.11 layer to get to the right h/w * queue. Management frames must ALWAYS go on queue 1 but we * cannot just force that here because we may receive non-mgt frames. */ txq = &sc->malo_txq[0]; bf = malo_getbuf(sc, txq); if (bf == NULL) { m_freem(m); return ENOBUFS; } /* * Pass the frame to the h/w for transmission. */ if (malo_tx_start(sc, ni, bf, m) != 0) { bf->bf_m = NULL; bf->bf_node = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_HEAD(&txq->free, bf, bf_list); txq->nfree++; MALO_TXQ_UNLOCK(txq); return EIO; /* XXX */ } /* * NB: We don't need to lock against tx done because this just * prods the firmware to check the transmit descriptors. The firmware * will also start fetching descriptors by itself if it notices * new ones are present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing it's ok. * Delivering the kick here rather than in malo_tx_start is * an optimization to avoid poking the firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ malo_hal_txstart(sc->malo_mh, 0/*XXX*/); return 0; } static void malo_sysctlattach(struct malo_softc *sc) { #ifdef MALO_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->malo_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->malo_dev); sc->malo_debug = malo_debug; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->malo_debug, 0, "control debugging printfs"); #endif } static void malo_announce(struct malo_softc *sc) { device_printf(sc->malo_dev, "versions [hw %d fw %d.%d.%d.%d] (regioncode %d)\n", sc->malo_hwspecs.hwversion, (sc->malo_hwspecs.fw_releasenum >> 24) & 0xff, (sc->malo_hwspecs.fw_releasenum >> 16) & 0xff, (sc->malo_hwspecs.fw_releasenum >> 8) & 0xff, (sc->malo_hwspecs.fw_releasenum >> 0) & 0xff, sc->malo_hwspecs.regioncode); if (bootverbose || malo_rxbuf != MALO_RXBUF) device_printf(sc->malo_dev, "using %u rx buffers\n", malo_rxbuf); if (bootverbose || malo_txbuf != MALO_TXBUF) device_printf(sc->malo_dev, "using %u tx buffers\n", malo_txbuf); } /* * Convert net80211 channel to a HAL channel. */ static void malo_mapchan(struct malo_hal_channel *hc, const struct ieee80211_channel *chan) { hc->channel = chan->ic_ieee; *(uint32_t *)&hc->flags = 0; if (IEEE80211_IS_CHAN_2GHZ(chan)) hc->flags.freqband = MALO_FREQ_BAND_2DOT4GHZ; } /* * Set/change channels. If the channel is really being changed, * it's done by reseting the chip. To accomplish this we must * first cleanup any pending DMA, then restart stuff after a la * malo_init. */ static int malo_chan_set(struct malo_softc *sc, struct ieee80211_channel *chan) { struct malo_hal *mh = sc->malo_mh; struct malo_hal_channel hchan; DPRINTF(sc, MALO_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n", __func__, chan->ic_freq, chan->ic_flags); /* * Convert to a HAL channel description with the flags constrained * to reflect the current operating mode. */ malo_mapchan(&hchan, chan); malo_hal_intrset(mh, 0); /* disable interrupts */ malo_hal_setchannel(mh, &hchan); malo_hal_settxpower(mh, &hchan); /* * Update internal state. */ sc->malo_tx_th.wt_chan_freq = htole16(chan->ic_freq); sc->malo_rx_th.wr_chan_freq = htole16(chan->ic_freq); if (IEEE80211_IS_CHAN_ANYG(chan)) { sc->malo_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G); sc->malo_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G); } else { sc->malo_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B); sc->malo_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B); } sc->malo_curchan = hchan; malo_hal_intrset(mh, sc->malo_imask); return 0; } static void malo_scan_start(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; DPRINTF(sc, MALO_DEBUG_STATE, "%s\n", __func__); } static void malo_scan_end(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; DPRINTF(sc, MALO_DEBUG_STATE, "%s\n", __func__); } static void malo_set_channel(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; (void) malo_chan_set(sc, ic->ic_curchan); } static void malo_rx_proc(void *arg, int npending) { struct malo_softc *sc = arg; struct ieee80211com *ic = &sc->malo_ic; struct malo_rxbuf *bf; struct malo_rxdesc *ds; struct mbuf *m, *mnew; struct ieee80211_qosframe *wh; struct ieee80211_qosframe_addr4 *wh4; struct ieee80211_node *ni; int off, len, hdrlen, pktlen, rssi, ntodo; uint8_t *data, status; uint32_t readptr, writeptr; DPRINTF(sc, MALO_DEBUG_RX_PROC, "%s: pending %u rdptr(0x%x) 0x%x wrptr(0x%x) 0x%x\n", __func__, npending, sc->malo_hwspecs.rxdesc_read, malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_read), sc->malo_hwspecs.rxdesc_write, malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_write)); readptr = malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_read); writeptr = malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_write); if (readptr == writeptr) return; bf = sc->malo_rxnext; for (ntodo = malo_rxquota; ntodo > 0 && readptr != writeptr; ntodo--) { if (bf == NULL) { bf = STAILQ_FIRST(&sc->malo_rxbuf); break; } ds = bf->bf_desc; if (bf->bf_m == NULL) { /* * If data allocation failed previously there * will be no buffer; try again to re-populate it. * Note the firmware will not advance to the next * descriptor with a dma buffer so we must mimic * this or we'll get out of sync. */ DPRINTF(sc, MALO_DEBUG_ANY, "%s: rx buf w/o dma memory\n", __func__); (void)malo_rxbuf_init(sc, bf); break; } MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->rxcontrol != MALO_RXD_CTRL_DMA_OWN) break; readptr = le32toh(ds->physnext); #ifdef MALO_DEBUG if (sc->malo_debug & MALO_DEBUG_RECV_DESC) malo_printrxbuf(bf, 0); #endif status = ds->status; if (status & MALO_RXD_STATUS_DECRYPT_ERR_MASK) { counter_u64_add(ic->ic_ierrors, 1); goto rx_next; } /* * Sync the data buffer. */ len = le16toh(ds->pktlen); bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD); /* * The 802.11 header is provided all or in part at the front; * use it to calculate the true size of the header that we'll * construct below. We use this to figure out where to copy * payload prior to constructing the header. */ m = bf->bf_m; data = mtod(m, uint8_t *); hdrlen = ieee80211_anyhdrsize(data + sizeof(uint16_t)); off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4); /* * Calculate RSSI. XXX wrong */ rssi = 2 * ((int) ds->snr - ds->nf); /* NB: .5 dBm */ if (rssi > 100) rssi = 100; pktlen = hdrlen + (len - off); /* * NB: we know our frame is at least as large as * IEEE80211_MIN_LEN because there is a 4-address frame at * the front. Hence there's no need to vet the packet length. * If the frame in fact is too small it should be discarded * at the net80211 layer. */ /* XXX don't need mbuf, just dma buffer */ mnew = malo_getrxmbuf(sc, bf); if (mnew == NULL) { counter_u64_add(ic->ic_ierrors, 1); goto rx_next; } /* * Attach the dma buffer to the mbuf; malo_rxbuf_init will * re-setup the rx descriptor using the replacement dma * buffer we just installed above. */ bf->bf_m = mnew; m->m_data += off - hdrlen; m->m_pkthdr.len = m->m_len = pktlen; /* * Piece 802.11 header together. */ wh = mtod(m, struct ieee80211_qosframe *); /* NB: don't need to do this sometimes but ... */ /* XXX special case so we can memcpy after m_devget? */ ovbcopy(data + sizeof(uint16_t), wh, hdrlen); if (IEEE80211_QOS_HAS_SEQ(wh)) { if (IEEE80211_IS_DSTODS(wh)) { wh4 = mtod(m, struct ieee80211_qosframe_addr4*); *(uint16_t *)wh4->i_qos = ds->qosctrl; } else { *(uint16_t *)wh->i_qos = ds->qosctrl; } } if (ieee80211_radiotap_active(ic)) { sc->malo_rx_th.wr_flags = 0; sc->malo_rx_th.wr_rate = ds->rate; sc->malo_rx_th.wr_antsignal = rssi; sc->malo_rx_th.wr_antnoise = ds->nf; } #ifdef MALO_DEBUG if (IFF_DUMPPKTS_RECV(sc, wh)) { ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, ds->rate, rssi); } #endif /* dispatch */ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, ds->nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, ds->nf); rx_next: /* NB: ignore ENOMEM so we process more descriptors */ (void) malo_rxbuf_init(sc, bf); bf = STAILQ_NEXT(bf, bf_list); } malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_read, readptr); sc->malo_rxnext = bf; if (mbufq_first(&sc->malo_snd) != NULL) malo_start(sc); } /* * Reclaim all tx queue resources. */ static void malo_tx_cleanup(struct malo_softc *sc) { int i; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) malo_tx_cleanupq(sc, &sc->malo_txq[i]); } int malo_detach(struct malo_softc *sc) { struct ieee80211com *ic = &sc->malo_ic; malo_stop(sc); if (sc->malo_tq != NULL) { taskqueue_drain(sc->malo_tq, &sc->malo_rxtask); taskqueue_drain(sc->malo_tq, &sc->malo_txtask); taskqueue_free(sc->malo_tq); sc->malo_tq = NULL; } /* * NB: the order of these is important: * o call the 802.11 layer before detaching the hal to * insure callbacks into the driver to delete global * key cache entries can be handled * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * Other than that, it's straightforward... */ ieee80211_ifdetach(ic); callout_drain(&sc->malo_watchdog_timer); malo_dma_cleanup(sc); malo_tx_cleanup(sc); malo_hal_detach(sc->malo_mh); mbufq_drain(&sc->malo_snd); MALO_LOCK_DESTROY(sc); return 0; } void malo_shutdown(struct malo_softc *sc) { malo_stop(sc); } void malo_suspend(struct malo_softc *sc) { malo_stop(sc); } void malo_resume(struct malo_softc *sc) { if (sc->malo_ic.ic_nrunning > 0) malo_init(sc); } Index: head/sys/dev/nfe/if_nfe.c =================================================================== --- head/sys/dev/nfe/if_nfe.c (revision 296271) +++ head/sys/dev/nfe/if_nfe.c (revision 296272) @@ -1,3413 +1,3413 @@ /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ /*- * Copyright (c) 2006 Shigeaki Tagashira * Copyright (c) 2006 Damien Bergamini * Copyright (c) 2005, 2006 Jonathan Gray * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ #include __FBSDID("$FreeBSD$"); #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(nfe, pci, 1, 1, 1); MODULE_DEPEND(nfe, ether, 1, 1, 1); MODULE_DEPEND(nfe, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" static int nfe_probe(device_t); static int nfe_attach(device_t); static int nfe_detach(device_t); static int nfe_suspend(device_t); static int nfe_resume(device_t); static int nfe_shutdown(device_t); static int nfe_can_use_msix(struct nfe_softc *); static int nfe_detect_msik9(struct nfe_softc *); static void nfe_power(struct nfe_softc *); static int nfe_miibus_readreg(device_t, int, int); static int nfe_miibus_writereg(device_t, int, int, int); static void nfe_miibus_statchg(device_t); static void nfe_mac_config(struct nfe_softc *, struct mii_data *); static void nfe_set_intr(struct nfe_softc *); static __inline void nfe_enable_intr(struct nfe_softc *); static __inline void nfe_disable_intr(struct nfe_softc *); static int nfe_ioctl(if_t, u_long, caddr_t); static void nfe_alloc_msix(struct nfe_softc *, int); static int nfe_intr(void *); static void nfe_int_task(void *, int); static __inline void nfe_discard_rxbuf(struct nfe_softc *, int); static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int); static int nfe_newbuf(struct nfe_softc *, int); static int nfe_jnewbuf(struct nfe_softc *, int); static int nfe_rxeof(struct nfe_softc *, int, int *); static int nfe_jrxeof(struct nfe_softc *, int, int *); static void nfe_txeof(struct nfe_softc *); static int nfe_encap(struct nfe_softc *, struct mbuf **); static void nfe_setmulti(struct nfe_softc *); static void nfe_start(if_t); static void nfe_start_locked(if_t); static void nfe_watchdog(if_t); static void nfe_init(void *); static void nfe_init_locked(void *); static void nfe_stop(if_t); static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); static int nfe_ifmedia_upd(if_t); static void nfe_ifmedia_sts(if_t, struct ifmediareq *); static void nfe_tick(void *); static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); static void nfe_set_macaddr(struct nfe_softc *, uint8_t *); static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS); static void nfe_sysctl_node(struct nfe_softc *); static void nfe_stats_clear(struct nfe_softc *); static void nfe_stats_update(struct nfe_softc *); static void nfe_set_linkspeed(struct nfe_softc *); static void nfe_set_wol(struct nfe_softc *); #ifdef NFE_DEBUG static int nfedebug = 0; #define DPRINTF(sc, ...) do { \ if (nfedebug) \ device_printf((sc)->nfe_dev, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, ...) do { \ if (nfedebug >= (n)) \ device_printf((sc)->nfe_dev, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, ...) #define DPRINTFN(sc, n, ...) #endif #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx) #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx) #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED) /* Tunables. */ static int msi_disable = 0; static int msix_disable = 0; static int jumbo_disable = 0; TUNABLE_INT("hw.nfe.msi_disable", &msi_disable); TUNABLE_INT("hw.nfe.msix_disable", &msix_disable); TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable); static device_method_t nfe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nfe_probe), DEVMETHOD(device_attach, nfe_attach), DEVMETHOD(device_detach, nfe_detach), DEVMETHOD(device_suspend, nfe_suspend), DEVMETHOD(device_resume, nfe_resume), DEVMETHOD(device_shutdown, nfe_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, nfe_miibus_readreg), DEVMETHOD(miibus_writereg, nfe_miibus_writereg), DEVMETHOD(miibus_statchg, nfe_miibus_statchg), DEVMETHOD_END }; static driver_t nfe_driver = { "nfe", nfe_methods, sizeof(struct nfe_softc) }; static devclass_t nfe_devclass; DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); static struct nfe_type nfe_devs[] = { {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, "NVIDIA nForce MCP Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, "NVIDIA nForce2 MCP2 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, "NVIDIA nForce2 400 MCP4 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, "NVIDIA nForce2 400 MCP5 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, "NVIDIA nForce3 MCP3 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, "NVIDIA nForce3 250 MCP6 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, "NVIDIA nForce3 MCP7 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */ {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */ {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, "NVIDIA nForce 430 MCP12 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, "NVIDIA nForce 430 MCP13 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, "NVIDIA nForce MCP55 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, "NVIDIA nForce MCP55 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, "NVIDIA nForce MCP61 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, "NVIDIA nForce MCP61 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, "NVIDIA nForce MCP61 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, "NVIDIA nForce MCP61 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, "NVIDIA nForce MCP65 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, "NVIDIA nForce MCP65 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, "NVIDIA nForce MCP65 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, "NVIDIA nForce MCP65 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, "NVIDIA nForce MCP67 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, "NVIDIA nForce MCP67 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, "NVIDIA nForce MCP67 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, "NVIDIA nForce MCP67 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, "NVIDIA nForce MCP73 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, "NVIDIA nForce MCP73 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, "NVIDIA nForce MCP73 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, "NVIDIA nForce MCP73 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, "NVIDIA nForce MCP77 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, "NVIDIA nForce MCP77 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, "NVIDIA nForce MCP77 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, "NVIDIA nForce MCP77 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, "NVIDIA nForce MCP79 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, "NVIDIA nForce MCP79 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, "NVIDIA nForce MCP79 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, "NVIDIA nForce MCP79 Networking Adapter"}, {0, 0, NULL} }; /* Probe for supported hardware ID's */ static int nfe_probe(device_t dev) { struct nfe_type *t; t = nfe_devs; /* Check for matching PCI DEVICE ID's */ while (t->name != NULL) { if ((pci_get_vendor(dev) == t->vid_id) && (pci_get_device(dev) == t->dev_id)) { device_set_desc(dev, t->name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void nfe_alloc_msix(struct nfe_softc *sc, int count) { int rid; rid = PCIR_BAR(2); sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->nfe_msix_res == NULL) { device_printf(sc->nfe_dev, "couldn't allocate MSIX table resource\n"); return; } rid = PCIR_BAR(3); sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->nfe_msix_pba_res == NULL) { device_printf(sc->nfe_dev, "couldn't allocate MSIX PBA resource\n"); bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->nfe_msix_res); sc->nfe_msix_res = NULL; return; } if (pci_alloc_msix(sc->nfe_dev, &count) == 0) { if (count == NFE_MSI_MESSAGES) { if (bootverbose) device_printf(sc->nfe_dev, "Using %d MSIX messages\n", count); sc->nfe_msix = 1; } else { if (bootverbose) device_printf(sc->nfe_dev, "couldn't allocate MSIX\n"); pci_release_msi(sc->nfe_dev); bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(3), sc->nfe_msix_pba_res); bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->nfe_msix_res); sc->nfe_msix_pba_res = NULL; sc->nfe_msix_res = NULL; } } } static int nfe_detect_msik9(struct nfe_softc *sc) { static const char *maker = "MSI"; static const char *product = "K9N6PGM2-V2 (MS-7309)"; char *m, *p; int found; found = 0; m = kern_getenv("smbios.planar.maker"); p = kern_getenv("smbios.planar.product"); if (m != NULL && p != NULL) { if (strcmp(m, maker) == 0 && strcmp(p, product) == 0) found = 1; } if (m != NULL) freeenv(m); if (p != NULL) freeenv(p); return (found); } static int nfe_attach(device_t dev) { struct nfe_softc *sc; if_t ifp; bus_addr_t dma_addr_max; int error = 0, i, msic, phyloc, reg, rid; sc = device_get_softc(dev); sc->nfe_dev = dev; mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); pci_enable_busmaster(dev); rid = PCIR_BAR(0); sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->nfe_res[0] == NULL) { device_printf(dev, "couldn't map memory resources\n"); mtx_destroy(&sc->nfe_mtx); return (ENXIO); } if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { uint16_t v, width; v = pci_read_config(dev, reg + 0x08, 2); /* Change max. read request size to 4096. */ v &= ~(7 << 12); v |= (5 << 12); pci_write_config(dev, reg + 0x08, v, 2); v = pci_read_config(dev, reg + 0x0c, 2); /* link capability */ v = (v >> 4) & 0x0f; width = pci_read_config(dev, reg + 0x12, 2); /* negotiated link width */ width = (width >> 4) & 0x3f; if (v != width) device_printf(sc->nfe_dev, "warning, negotiated width of link(x%d) != " "max. width of link(x%d)\n", width, v); } if (nfe_can_use_msix(sc) == 0) { device_printf(sc->nfe_dev, "MSI/MSI-X capability black-listed, will use INTx\n"); msix_disable = 1; msi_disable = 1; } /* Allocate interrupt */ if (msix_disable == 0 || msi_disable == 0) { if (msix_disable == 0 && (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES) nfe_alloc_msix(sc, msic); if (msi_disable == 0 && sc->nfe_msix == 0 && (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES && pci_alloc_msi(dev, &msic) == 0) { if (msic == NFE_MSI_MESSAGES) { if (bootverbose) device_printf(dev, "Using %d MSI messages\n", msic); sc->nfe_msi = 1; } else pci_release_msi(dev); } } if (sc->nfe_msix == 0 && sc->nfe_msi == 0) { rid = 0; sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->nfe_irq[0] == NULL) { device_printf(dev, "couldn't allocate IRQ resources\n"); error = ENXIO; goto fail; } } else { for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { sc->nfe_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->nfe_irq[i] == NULL) { device_printf(dev, "couldn't allocate IRQ resources for " "message %d\n", rid); error = ENXIO; goto fail; } } /* Map interrupts to vector 0. */ if (sc->nfe_msix != 0) { NFE_WRITE(sc, NFE_MSIX_MAP0, 0); NFE_WRITE(sc, NFE_MSIX_MAP1, 0); } else if (sc->nfe_msi != 0) { NFE_WRITE(sc, NFE_MSI_MAP0, 0); NFE_WRITE(sc, NFE_MSI_MAP1, 0); } } /* Set IRQ status/mask register. */ sc->nfe_irq_status = NFE_IRQ_STATUS; sc->nfe_irq_mask = NFE_IRQ_MASK; sc->nfe_intrs = NFE_IRQ_WANTED; sc->nfe_nointrs = 0; if (sc->nfe_msix != 0) { sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS; sc->nfe_nointrs = NFE_IRQ_WANTED; } else if (sc->nfe_msi != 0) { sc->nfe_irq_mask = NFE_MSI_IRQ_MASK; sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED; } sc->nfe_devid = pci_get_device(dev); sc->nfe_revid = pci_get_revid(dev); sc->nfe_flags = 0; switch (sc->nfe_devid) { case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; break; case PCI_PRODUCT_NVIDIA_MCP51_LAN1: case PCI_PRODUCT_NVIDIA_MCP51_LAN2: sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1; break; case PCI_PRODUCT_NVIDIA_CK804_LAN1: case PCI_PRODUCT_NVIDIA_CK804_LAN2: case PCI_PRODUCT_NVIDIA_MCP04_LAN1: case PCI_PRODUCT_NVIDIA_MCP04_LAN2: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_MIB_V1; break; case PCI_PRODUCT_NVIDIA_MCP55_LAN1: case PCI_PRODUCT_NVIDIA_MCP55_LAN2: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2; break; case PCI_PRODUCT_NVIDIA_MCP61_LAN1: case PCI_PRODUCT_NVIDIA_MCP61_LAN2: case PCI_PRODUCT_NVIDIA_MCP61_LAN3: case PCI_PRODUCT_NVIDIA_MCP61_LAN4: case PCI_PRODUCT_NVIDIA_MCP67_LAN1: case PCI_PRODUCT_NVIDIA_MCP67_LAN2: case PCI_PRODUCT_NVIDIA_MCP67_LAN3: case PCI_PRODUCT_NVIDIA_MCP67_LAN4: case PCI_PRODUCT_NVIDIA_MCP73_LAN1: case PCI_PRODUCT_NVIDIA_MCP73_LAN2: case PCI_PRODUCT_NVIDIA_MCP73_LAN3: case PCI_PRODUCT_NVIDIA_MCP73_LAN4: sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2; break; case PCI_PRODUCT_NVIDIA_MCP77_LAN1: case PCI_PRODUCT_NVIDIA_MCP77_LAN2: case PCI_PRODUCT_NVIDIA_MCP77_LAN3: case PCI_PRODUCT_NVIDIA_MCP77_LAN4: /* XXX flow control */ sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3; break; case PCI_PRODUCT_NVIDIA_MCP79_LAN1: case PCI_PRODUCT_NVIDIA_MCP79_LAN2: case PCI_PRODUCT_NVIDIA_MCP79_LAN3: case PCI_PRODUCT_NVIDIA_MCP79_LAN4: /* XXX flow control */ sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3; break; case PCI_PRODUCT_NVIDIA_MCP65_LAN1: case PCI_PRODUCT_NVIDIA_MCP65_LAN2: case PCI_PRODUCT_NVIDIA_MCP65_LAN3: case PCI_PRODUCT_NVIDIA_MCP65_LAN4: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2; break; } nfe_power(sc); /* Check for reversed ethernet address */ if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) sc->nfe_flags |= NFE_CORRECT_MACADDR; nfe_get_macaddr(sc, sc->eaddr); /* * Allocate the parent bus DMA tag appropriate for PCI. */ dma_addr_max = BUS_SPACE_MAXADDR_32BIT; if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0) dma_addr_max = NFE_DMA_MAXADDR; error = bus_dma_tag_create( bus_get_dma_tag(sc->nfe_dev), /* parent */ 1, 0, /* alignment, boundary */ dma_addr_max, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->nfe_parent_tag); if (error) goto fail; ifp = sc->nfe_ifp = if_gethandle(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_gethandle()\n"); error = ENOSPC; goto fail; } /* * Allocate Tx and Rx rings. */ if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0) goto fail; if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0) goto fail; nfe_alloc_jrx_ring(sc, &sc->jrxq); /* Create sysctl node. */ nfe_sysctl_node(sc); if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setioctlfn(ifp, nfe_ioctl); if_setstartfn(ifp, nfe_start); if_sethwassist(ifp, 0); if_setcapabilities(ifp, 0); if_setinitfn(ifp, nfe_init); if_setsendqlen(ifp, NFE_TX_RING_COUNT - 1); if_setsendqready(ifp); if (sc->nfe_flags & NFE_HW_CSUM) { if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0); if_sethwassistbits(ifp, NFE_CSUM_FEATURES | CSUM_TSO, 0); } if_setcapenable(ifp, if_getcapabilities(ifp)); sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS; /* VLAN capability setup. */ if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); if ((sc->nfe_flags & NFE_HW_VLAN) != 0) { if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0); if ((if_getcapabilities(ifp) & IFCAP_HWCSUM) != 0) if_setcapabilitiesbit(ifp, (IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO), 0); } if (pci_find_cap(dev, PCIY_PMG, ®) == 0) if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0); if_setcapenable(ifp, if_getcapabilities(ifp)); /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); #ifdef DEVICE_POLLING if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); #endif /* Do MII setup */ phyloc = MII_PHY_ANY; if (sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN1 || sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN2 || sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN3 || sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN4) { if (nfe_detect_msik9(sc) != 0) phyloc = 0; } error = mii_attach(dev, &sc->nfe_miibus, ifp, (ifm_change_cb_t)nfe_ifmedia_upd, (ifm_stat_cb_t)nfe_ifmedia_sts, BMSR_DEFCAPMASK, phyloc, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } ether_ifattach(ifp, sc->eaddr); TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->nfe_tq); taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->nfe_dev)); error = 0; if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { error = bus_setup_intr(dev, sc->nfe_irq[0], INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, &sc->nfe_intrhand[0]); } else { for (i = 0; i < NFE_MSI_MESSAGES; i++) { error = bus_setup_intr(dev, sc->nfe_irq[i], INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, &sc->nfe_intrhand[i]); if (error != 0) break; } } if (error) { device_printf(dev, "couldn't set up irq\n"); taskqueue_free(sc->nfe_tq); sc->nfe_tq = NULL; ether_ifdetach(ifp); goto fail; } fail: if (error) nfe_detach(dev); return (error); } static int nfe_detach(device_t dev) { struct nfe_softc *sc; if_t ifp; uint8_t eaddr[ETHER_ADDR_LEN]; int i, rid; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); ifp = sc->nfe_ifp; #ifdef DEVICE_POLLING if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (device_is_attached(dev)) { NFE_LOCK(sc); nfe_stop(ifp); if_setflagbits(ifp, 0, IFF_UP); NFE_UNLOCK(sc); callout_drain(&sc->nfe_stat_ch); ether_ifdetach(ifp); } if (ifp) { /* restore ethernet address */ if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { for (i = 0; i < ETHER_ADDR_LEN; i++) { eaddr[i] = sc->eaddr[5 - i]; } } else bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN); nfe_set_macaddr(sc, eaddr); if_free(ifp); } if (sc->nfe_miibus) device_delete_child(dev, sc->nfe_miibus); bus_generic_detach(dev); if (sc->nfe_tq != NULL) { taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task); taskqueue_free(sc->nfe_tq); sc->nfe_tq = NULL; } for (i = 0; i < NFE_MSI_MESSAGES; i++) { if (sc->nfe_intrhand[i] != NULL) { bus_teardown_intr(dev, sc->nfe_irq[i], sc->nfe_intrhand[i]); sc->nfe_intrhand[i] = NULL; } } if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { if (sc->nfe_irq[0] != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nfe_irq[0]); } else { for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { if (sc->nfe_irq[i] != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->nfe_irq[i]); sc->nfe_irq[i] = NULL; } } pci_release_msi(dev); } if (sc->nfe_msix_pba_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3), sc->nfe_msix_pba_res); sc->nfe_msix_pba_res = NULL; } if (sc->nfe_msix_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->nfe_msix_res); sc->nfe_msix_res = NULL; } if (sc->nfe_res[0] != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->nfe_res[0]); sc->nfe_res[0] = NULL; } nfe_free_tx_ring(sc, &sc->txq); nfe_free_rx_ring(sc, &sc->rxq); nfe_free_jrx_ring(sc, &sc->jrxq); if (sc->nfe_parent_tag) { bus_dma_tag_destroy(sc->nfe_parent_tag); sc->nfe_parent_tag = NULL; } mtx_destroy(&sc->nfe_mtx); return (0); } static int nfe_suspend(device_t dev) { struct nfe_softc *sc; sc = device_get_softc(dev); NFE_LOCK(sc); nfe_stop(sc->nfe_ifp); nfe_set_wol(sc); sc->nfe_suspended = 1; NFE_UNLOCK(sc); return (0); } static int nfe_resume(device_t dev) { struct nfe_softc *sc; if_t ifp; sc = device_get_softc(dev); NFE_LOCK(sc); nfe_power(sc); ifp = sc->nfe_ifp; if (if_getflags(ifp) & IFF_UP) nfe_init_locked(sc); sc->nfe_suspended = 0; NFE_UNLOCK(sc); return (0); } static int nfe_can_use_msix(struct nfe_softc *sc) { static struct msix_blacklist { char *maker; char *product; } msix_blacklists[] = { { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" } }; struct msix_blacklist *mblp; char *maker, *product; int count, n, use_msix; /* * Search base board manufacturer and product name table * to see this system has a known MSI/MSI-X issue. */ maker = kern_getenv("smbios.planar.maker"); product = kern_getenv("smbios.planar.product"); use_msix = 1; if (maker != NULL && product != NULL) { count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]); mblp = msix_blacklists; for (n = 0; n < count; n++) { if (strcmp(maker, mblp->maker) == 0 && strcmp(product, mblp->product) == 0) { use_msix = 0; break; } mblp++; } } if (maker != NULL) freeenv(maker); if (product != NULL) freeenv(product); return (use_msix); } /* Take PHY/NIC out of powerdown, from Linux */ static void nfe_power(struct nfe_softc *sc) { uint32_t pwr; if ((sc->nfe_flags & NFE_PWR_MGMT) == 0) return; NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); DELAY(100); NFE_WRITE(sc, NFE_MAC_RESET, 0); DELAY(100); NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); pwr = NFE_READ(sc, NFE_PWR2_CTL); pwr &= ~NFE_PWR2_WAKEUP_MASK; if (sc->nfe_revid >= 0xa3 && (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 || sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2)) pwr |= NFE_PWR2_REVA3; NFE_WRITE(sc, NFE_PWR2_CTL, pwr); } static void nfe_miibus_statchg(device_t dev) { struct nfe_softc *sc; struct mii_data *mii; if_t ifp; uint32_t rxctl, txctl; sc = device_get_softc(dev); mii = device_get_softc(sc->nfe_miibus); ifp = sc->nfe_ifp; sc->nfe_link = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: case IFM_1000_T: sc->nfe_link = 1; break; default: break; } } nfe_mac_config(sc, mii); txctl = NFE_READ(sc, NFE_TX_CTL); rxctl = NFE_READ(sc, NFE_RX_CTL); if (sc->nfe_link != 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { txctl |= NFE_TX_START; rxctl |= NFE_RX_START; } else { txctl &= ~NFE_TX_START; rxctl &= ~NFE_RX_START; } NFE_WRITE(sc, NFE_TX_CTL, txctl); NFE_WRITE(sc, NFE_RX_CTL, rxctl); } static void nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii) { uint32_t link, misc, phy, seed; uint32_t val; NFE_LOCK_ASSERT(sc); phy = NFE_READ(sc, NFE_PHY_IFACE); phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); seed = NFE_READ(sc, NFE_RNDSEED); seed &= ~NFE_SEED_MASK; misc = NFE_MISC1_MAGIC; link = NFE_MEDIA_SET; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) { phy |= NFE_PHY_HDX; /* half-duplex */ misc |= NFE_MISC1_HDX; } switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: /* full-duplex only */ link |= NFE_MEDIA_1000T; seed |= NFE_SEED_1000T; phy |= NFE_PHY_1000T; break; case IFM_100_TX: link |= NFE_MEDIA_100TX; seed |= NFE_SEED_100TX; phy |= NFE_PHY_100TX; break; case IFM_10_T: link |= NFE_MEDIA_10T; seed |= NFE_SEED_10T; break; } if ((phy & 0x10000000) != 0) { if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) val = NFE_R1_MAGIC_1000; else val = NFE_R1_MAGIC_10_100; } else val = NFE_R1_MAGIC_DEFAULT; NFE_WRITE(sc, NFE_SETUP_R1, val); NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ NFE_WRITE(sc, NFE_PHY_IFACE, phy); NFE_WRITE(sc, NFE_MISC1, misc); NFE_WRITE(sc, NFE_LINKSPEED, link); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { /* It seems all hardwares supports Rx pause frames. */ val = NFE_READ(sc, NFE_RXFILTER); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) val |= NFE_PFF_RX_PAUSE; else val &= ~NFE_PFF_RX_PAUSE; NFE_WRITE(sc, NFE_RXFILTER, val); if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { val = NFE_READ(sc, NFE_MISC1); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) { NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_ENABLE); val |= NFE_MISC1_TX_PAUSE; } else { val &= ~NFE_MISC1_TX_PAUSE; NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); } NFE_WRITE(sc, NFE_MISC1, val); } } else { /* disable rx/tx pause frames */ val = NFE_READ(sc, NFE_RXFILTER); val &= ~NFE_PFF_RX_PAUSE; NFE_WRITE(sc, NFE_RXFILTER, val); if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); val = NFE_READ(sc, NFE_MISC1); val &= ~NFE_MISC1_TX_PAUSE; NFE_WRITE(sc, NFE_MISC1, val); } } } static int nfe_miibus_readreg(device_t dev, int phy, int reg) { struct nfe_softc *sc = device_get_softc(dev); uint32_t val; int ntries; NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); DELAY(100); } NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { DELAY(100); if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) break; } if (ntries == NFE_TIMEOUT) { DPRINTFN(sc, 2, "timeout waiting for PHY\n"); return 0; } if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { DPRINTFN(sc, 2, "could not read PHY\n"); return 0; } val = NFE_READ(sc, NFE_PHY_DATA); if (val != 0xffffffff && val != 0) sc->mii_phyaddr = phy; DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); return (val); } static int nfe_miibus_writereg(device_t dev, int phy, int reg, int val) { struct nfe_softc *sc = device_get_softc(dev); uint32_t ctl; int ntries; NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); DELAY(100); } NFE_WRITE(sc, NFE_PHY_DATA, val); ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; NFE_WRITE(sc, NFE_PHY_CTL, ctl); for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { DELAY(100); if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) break; } #ifdef NFE_DEBUG if (nfedebug >= 2 && ntries == NFE_TIMEOUT) device_printf(sc->nfe_dev, "could not write to PHY\n"); #endif return (0); } struct nfe_dmamap_arg { bus_addr_t nfe_busaddr; }; static int nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) { struct nfe_dmamap_arg ctx; struct nfe_rx_data *data; void *desc; int i, error, descsize; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } ring->cur = ring->next = 0; error = bus_dma_tag_create(sc->nfe_parent_tag, NFE_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ NFE_RX_RING_COUNT * descsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->rx_desc_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); goto fail; } /* allocate memory to desc */ error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create desc DMA map\n"); goto fail; } if (sc->nfe_flags & NFE_40BIT_ADDR) ring->desc64 = desc; else ring->desc32 = desc; /* map desc to device visible address space */ ctx.nfe_busaddr = 0; error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc, NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); if (error != 0) { device_printf(sc->nfe_dev, "could not load desc DMA map\n"); goto fail; } ring->physaddr = ctx.nfe_busaddr; error = bus_dma_tag_create(sc->nfe_parent_tag, 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, 1, /* maxsize, nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->rx_data_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create Rx DMA tag\n"); goto fail; } error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create Rx DMA spare map\n"); goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ for (i = 0; i < NFE_RX_RING_COUNT; i++) { data = &sc->rxq.data[i]; data->rx_data_map = NULL; data->m = NULL; error = bus_dmamap_create(ring->rx_data_tag, 0, &data->rx_data_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create Rx DMA map\n"); goto fail; } } fail: return (error); } static void nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) { struct nfe_dmamap_arg ctx; struct nfe_rx_data *data; void *desc; int i, error, descsize; if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) return; if (jumbo_disable != 0) { device_printf(sc->nfe_dev, "disabling jumbo frame support\n"); sc->nfe_jumbo_disable = 1; return; } if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->jdesc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->jdesc32; descsize = sizeof (struct nfe_desc32); } ring->jcur = ring->jnext = 0; /* Create DMA tag for jumbo Rx ring. */ error = bus_dma_tag_create(sc->nfe_parent_tag, NFE_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */ 1, /* nsegments */ NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->jrx_desc_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create jumbo ring DMA tag\n"); goto fail; } /* Create DMA tag for jumbo Rx buffers. */ error = bus_dma_tag_create(sc->nfe_parent_tag, 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->jrx_data_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create jumbo Rx buffer DMA tag\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map); if (error != 0) { device_printf(sc->nfe_dev, "could not allocate DMA'able memory for jumbo Rx ring\n"); goto fail; } if (sc->nfe_flags & NFE_40BIT_ADDR) ring->jdesc64 = desc; else ring->jdesc32 = desc; ctx.nfe_busaddr = 0; error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc, NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); if (error != 0) { device_printf(sc->nfe_dev, "could not load DMA'able memory for jumbo Rx ring\n"); goto fail; } ring->jphysaddr = ctx.nfe_busaddr; /* Create DMA maps for jumbo Rx buffers. */ error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create jumbo Rx DMA spare map\n"); goto fail; } for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { data = &sc->jrxq.jdata[i]; data->rx_data_map = NULL; data->m = NULL; error = bus_dmamap_create(ring->jrx_data_tag, 0, &data->rx_data_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create jumbo Rx DMA map\n"); goto fail; } } return; fail: /* * Running without jumbo frame support is ok for most cases * so don't fail on creating dma tag/map for jumbo frame. */ nfe_free_jrx_ring(sc, ring); device_printf(sc->nfe_dev, "disabling jumbo frame support due to " "resource shortage\n"); sc->nfe_jumbo_disable = 1; } static int nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) { void *desc; size_t descsize; int i; ring->cur = ring->next = 0; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } bzero(desc, descsize * NFE_RX_RING_COUNT); for (i = 0; i < NFE_RX_RING_COUNT; i++) { if (nfe_newbuf(sc, i) != 0) return (ENOBUFS); } bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static int nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) { void *desc; size_t descsize; int i; ring->jcur = ring->jnext = 0; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->jdesc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->jdesc32; descsize = sizeof (struct nfe_desc32); } bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT); for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { if (nfe_jnewbuf(sc, i) != 0) return (ENOBUFS); } bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) { struct nfe_rx_data *data; void *desc; int i; if (sc->nfe_flags & NFE_40BIT_ADDR) desc = ring->desc64; else desc = ring->desc32; for (i = 0; i < NFE_RX_RING_COUNT; i++) { data = &ring->data[i]; if (data->rx_data_map != NULL) { bus_dmamap_destroy(ring->rx_data_tag, data->rx_data_map); data->rx_data_map = NULL; } if (data->m != NULL) { m_freem(data->m); data->m = NULL; } } if (ring->rx_data_tag != NULL) { if (ring->rx_spare_map != NULL) { bus_dmamap_destroy(ring->rx_data_tag, ring->rx_spare_map); ring->rx_spare_map = NULL; } bus_dma_tag_destroy(ring->rx_data_tag); ring->rx_data_tag = NULL; } if (desc != NULL) { bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map); bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map); ring->desc64 = NULL; ring->desc32 = NULL; } if (ring->rx_desc_tag != NULL) { bus_dma_tag_destroy(ring->rx_desc_tag); ring->rx_desc_tag = NULL; } } static void nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) { struct nfe_rx_data *data; void *desc; int i, descsize; if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) return; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->jdesc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->jdesc32; descsize = sizeof (struct nfe_desc32); } for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { data = &ring->jdata[i]; if (data->rx_data_map != NULL) { bus_dmamap_destroy(ring->jrx_data_tag, data->rx_data_map); data->rx_data_map = NULL; } if (data->m != NULL) { m_freem(data->m); data->m = NULL; } } if (ring->jrx_data_tag != NULL) { if (ring->jrx_spare_map != NULL) { bus_dmamap_destroy(ring->jrx_data_tag, ring->jrx_spare_map); ring->jrx_spare_map = NULL; } bus_dma_tag_destroy(ring->jrx_data_tag); ring->jrx_data_tag = NULL; } if (desc != NULL) { bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map); bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map); ring->jdesc64 = NULL; ring->jdesc32 = NULL; } if (ring->jrx_desc_tag != NULL) { bus_dma_tag_destroy(ring->jrx_desc_tag); ring->jrx_desc_tag = NULL; } } static int nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) { struct nfe_dmamap_arg ctx; int i, error; void *desc; int descsize; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } ring->queued = 0; ring->cur = ring->next = 0; error = bus_dma_tag_create(sc->nfe_parent_tag, NFE_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ NFE_TX_RING_COUNT * descsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->tx_desc_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create desc DMA map\n"); goto fail; } if (sc->nfe_flags & NFE_40BIT_ADDR) ring->desc64 = desc; else ring->desc32 = desc; ctx.nfe_busaddr = 0; error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc, NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); if (error != 0) { device_printf(sc->nfe_dev, "could not load desc DMA map\n"); goto fail; } ring->physaddr = ctx.nfe_busaddr; error = bus_dma_tag_create(sc->nfe_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, NFE_TSO_MAXSIZE, NFE_MAX_SCATTER, NFE_TSO_MAXSGSIZE, 0, NULL, NULL, &ring->tx_data_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create Tx DMA tag\n"); goto fail; } for (i = 0; i < NFE_TX_RING_COUNT; i++) { error = bus_dmamap_create(ring->tx_data_tag, 0, &ring->data[i].tx_data_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create Tx DMA map\n"); goto fail; } } fail: return (error); } static void nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) { void *desc; size_t descsize; sc->nfe_force_tx = 0; ring->queued = 0; ring->cur = ring->next = 0; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } bzero(desc, descsize * NFE_TX_RING_COUNT); bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) { struct nfe_tx_data *data; void *desc; int i, descsize; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } for (i = 0; i < NFE_TX_RING_COUNT; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map); m_freem(data->m); data->m = NULL; } if (data->tx_data_map != NULL) { bus_dmamap_destroy(ring->tx_data_tag, data->tx_data_map); data->tx_data_map = NULL; } } if (ring->tx_data_tag != NULL) { bus_dma_tag_destroy(ring->tx_data_tag); ring->tx_data_tag = NULL; } if (desc != NULL) { bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map); bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map); ring->desc64 = NULL; ring->desc32 = NULL; bus_dma_tag_destroy(ring->tx_desc_tag); ring->tx_desc_tag = NULL; } } #ifdef DEVICE_POLLING static poll_handler_t nfe_poll; static int nfe_poll(if_t ifp, enum poll_cmd cmd, int count) { struct nfe_softc *sc = if_getsoftc(ifp); uint32_t r; int rx_npkts = 0; NFE_LOCK(sc); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { NFE_UNLOCK(sc); return (rx_npkts); } if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) rx_npkts = nfe_jrxeof(sc, count, &rx_npkts); else rx_npkts = nfe_rxeof(sc, count, &rx_npkts); nfe_txeof(sc); if (!if_sendq_empty(ifp)) nfe_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { NFE_UNLOCK(sc); return (rx_npkts); } NFE_WRITE(sc, sc->nfe_irq_status, r); if (r & NFE_IRQ_LINK) { NFE_READ(sc, NFE_PHY_STATUS); NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); DPRINTF(sc, "link state changed\n"); } } NFE_UNLOCK(sc); return (rx_npkts); } #endif /* DEVICE_POLLING */ static void nfe_set_intr(struct nfe_softc *sc) { if (sc->nfe_msi != 0) NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); } /* In MSIX, a write to mask reegisters behaves as XOR. */ static __inline void nfe_enable_intr(struct nfe_softc *sc) { if (sc->nfe_msix != 0) { /* XXX Should have a better way to enable interrupts! */ if (NFE_READ(sc, sc->nfe_irq_mask) == 0) NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); } else NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); } static __inline void nfe_disable_intr(struct nfe_softc *sc) { if (sc->nfe_msix != 0) { /* XXX Should have a better way to disable interrupts! */ if (NFE_READ(sc, sc->nfe_irq_mask) != 0) NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); } else NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); } static int nfe_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct nfe_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error, init, mask; sc = if_getsoftc(ifp); ifr = (struct ifreq *) data; error = 0; init = 0; switch (cmd) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU) error = EINVAL; else if (if_getmtu(ifp) != ifr->ifr_mtu) { if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) || (sc->nfe_jumbo_disable != 0)) && ifr->ifr_mtu > ETHERMTU) error = EINVAL; else { NFE_LOCK(sc); if_setmtu(ifp, ifr->ifr_mtu); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); nfe_init_locked(sc); } NFE_UNLOCK(sc); } } break; case SIOCSIFFLAGS: NFE_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { /* * If only the PROMISC or ALLMULTI flag changes, then * don't do a full re-init of the chip, just update * the Rx filter. */ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) && ((if_getflags(ifp) ^ sc->nfe_if_flags) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) nfe_setmulti(sc); else nfe_init_locked(sc); } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) nfe_stop(ifp); } sc->nfe_if_flags = if_getflags(ifp); NFE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { NFE_LOCK(sc); nfe_setmulti(sc); NFE_UNLOCK(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->nfe_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); #ifdef DEVICE_POLLING if ((mask & IFCAP_POLLING) != 0) { if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { error = ether_poll_register(nfe_poll, ifp); if (error) break; NFE_LOCK(sc); nfe_disable_intr(sc); if_setcapenablebit(ifp, IFCAP_POLLING, 0); NFE_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ NFE_LOCK(sc); nfe_enable_intr(sc); if_setcapenablebit(ifp, 0, IFCAP_POLLING); NFE_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ if ((mask & IFCAP_WOL_MAGIC) != 0 && (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0) if_togglecapenable(ifp, IFCAP_WOL_MAGIC); if ((mask & IFCAP_TXCSUM) != 0 && (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { if_togglecapenable(ifp, IFCAP_TXCSUM); if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) if_sethwassistbits(ifp, NFE_CSUM_FEATURES, 0); else if_sethwassistbits(ifp, 0, NFE_CSUM_FEATURES); } if ((mask & IFCAP_RXCSUM) != 0 && (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) { if_togglecapenable(ifp, IFCAP_RXCSUM); init++; } if ((mask & IFCAP_TSO4) != 0 && (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) { if_togglecapenable(ifp, IFCAP_TSO4); if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0) if_sethwassistbits(ifp, CSUM_TSO, 0); else if_sethwassistbits(ifp, 0, CSUM_TSO); } if ((mask & IFCAP_VLAN_HWTSO) != 0 && (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0) if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO); init++; } /* * XXX * It seems that VLAN stripping requires Rx checksum offload. * Unfortunately FreeBSD has no way to disable only Rx side * VLAN stripping. So when we know Rx checksum offload is * disabled turn entire hardware VLAN assist off. */ if ((if_getcapenable(ifp) & IFCAP_RXCSUM) == 0) { if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) init++; if_setcapenablebit(ifp, 0, (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO)); } if (init > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); nfe_init(sc); } if_vlancap(ifp); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static int nfe_intr(void *arg) { struct nfe_softc *sc; uint32_t status; sc = (struct nfe_softc *)arg; status = NFE_READ(sc, sc->nfe_irq_status); if (status == 0 || status == 0xffffffff) return (FILTER_STRAY); nfe_disable_intr(sc); - taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); + taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task); return (FILTER_HANDLED); } static void nfe_int_task(void *arg, int pending) { struct nfe_softc *sc = arg; if_t ifp = sc->nfe_ifp; uint32_t r; int domore; NFE_LOCK(sc); if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { nfe_enable_intr(sc); NFE_UNLOCK(sc); return; /* not for us */ } NFE_WRITE(sc, sc->nfe_irq_status, r); DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r); #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) { NFE_UNLOCK(sc); return; } #endif if (r & NFE_IRQ_LINK) { NFE_READ(sc, NFE_PHY_STATUS); NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); DPRINTF(sc, "link state changed\n"); } if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { NFE_UNLOCK(sc); nfe_disable_intr(sc); return; } domore = 0; /* check Rx ring */ if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL); else domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL); /* check Tx ring */ nfe_txeof(sc); if (!if_sendq_empty(ifp)) nfe_start_locked(ifp); NFE_UNLOCK(sc); if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) { - taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); + taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task); return; } /* Reenable interrupts. */ nfe_enable_intr(sc); } static __inline void nfe_discard_rxbuf(struct nfe_softc *sc, int idx) { struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; struct mbuf *m; data = &sc->rxq.data[idx]; m = data->m; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[idx]; /* VLAN packet may have overwritten it. */ desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); desc64->length = htole16(m->m_len); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->rxq.desc32[idx]; desc32->length = htole16(m->m_len); desc32->flags = htole16(NFE_RX_READY); } } static __inline void nfe_discard_jrxbuf(struct nfe_softc *sc, int idx) { struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; struct mbuf *m; data = &sc->jrxq.jdata[idx]; m = data->m; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->jrxq.jdesc64[idx]; /* VLAN packet may have overwritten it. */ desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); desc64->length = htole16(m->m_len); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->jrxq.jdesc32[idx]; desc32->length = htole16(m->m_len); desc32->flags = htole16(NFE_RX_READY); } } static int nfe_newbuf(struct nfe_softc *sc, int idx) { struct nfe_rx_data *data; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, ETHER_ALIGN); if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); data = &sc->rxq.data[idx]; if (data->m != NULL) { bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map); } map = data->rx_data_map; data->rx_data_map = sc->rxq.rx_spare_map; sc->rxq.rx_spare_map = map; bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, BUS_DMASYNC_PREREAD); data->paddr = segs[0].ds_addr; data->m = m; /* update mapping address in h/w descriptor */ if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[idx]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); desc64->length = htole16(segs[0].ds_len); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->rxq.desc32[idx]; desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); desc32->length = htole16(segs[0].ds_len); desc32->flags = htole16(NFE_RX_READY); } return (0); } static int nfe_jnewbuf(struct nfe_softc *sc, int idx) { struct nfe_rx_data *data; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_pkthdr.len = m->m_len = MJUM9BYTES; m_adj(m, ETHER_ALIGN); if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag, sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); data = &sc->jrxq.jdata[idx]; if (data->m != NULL) { bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map); } map = data->rx_data_map; data->rx_data_map = sc->jrxq.jrx_spare_map; sc->jrxq.jrx_spare_map = map; bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, BUS_DMASYNC_PREREAD); data->paddr = segs[0].ds_addr; data->m = m; /* update mapping address in h/w descriptor */ if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->jrxq.jdesc64[idx]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); desc64->length = htole16(segs[0].ds_len); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->jrxq.jdesc32[idx]; desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); desc32->length = htole16(segs[0].ds_len); desc32->flags = htole16(NFE_RX_READY); } return (0); } static int nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp) { if_t ifp = sc->nfe_ifp; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; struct mbuf *m; uint16_t flags; int len, prog, rx_npkts; uint32_t vtag = 0; rx_npkts = 0; NFE_LOCK_ASSERT(sc); bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, BUS_DMASYNC_POSTREAD); for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { if (count <= 0) break; count--; data = &sc->rxq.data[sc->rxq.cur]; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[sc->rxq.cur]; vtag = le32toh(desc64->physaddr[1]); flags = le16toh(desc64->flags); len = le16toh(desc64->length) & NFE_RX_LEN_MASK; } else { desc32 = &sc->rxq.desc32[sc->rxq.cur]; flags = le16toh(desc32->flags); len = le16toh(desc32->length) & NFE_RX_LEN_MASK; } if (flags & NFE_RX_READY) break; prog++; if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { if (!(flags & NFE_RX_VALID_V1)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); nfe_discard_rxbuf(sc, sc->rxq.cur); continue; } if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { flags &= ~NFE_RX_ERROR; len--; /* fix buffer length */ } } else { if (!(flags & NFE_RX_VALID_V2)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); nfe_discard_rxbuf(sc, sc->rxq.cur); continue; } if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { flags &= ~NFE_RX_ERROR; len--; /* fix buffer length */ } } if (flags & NFE_RX_ERROR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); nfe_discard_rxbuf(sc, sc->rxq.cur); continue; } m = data->m; if (nfe_newbuf(sc, sc->rxq.cur) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); nfe_discard_rxbuf(sc, sc->rxq.cur); continue; } if ((vtag & NFE_RX_VTAG) != 0 && (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { m->m_pkthdr.ether_vtag = vtag & 0xffff; m->m_flags |= M_VLANTAG; } m->m_pkthdr.len = m->m_len = len; m->m_pkthdr.rcvif = ifp; if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { if ((flags & NFE_RX_IP_CSUMOK) != 0) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((flags & NFE_RX_TCP_CSUMOK) != 0 || (flags & NFE_RX_UDP_CSUMOK) != 0) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); NFE_UNLOCK(sc); if_input(ifp, m); NFE_LOCK(sc); rx_npkts++; } if (prog > 0) bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (rx_npktsp != NULL) *rx_npktsp = rx_npkts; return (count > 0 ? 0 : EAGAIN); } static int nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp) { if_t ifp = sc->nfe_ifp; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; struct mbuf *m; uint16_t flags; int len, prog, rx_npkts; uint32_t vtag = 0; rx_npkts = 0; NFE_LOCK_ASSERT(sc); bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, BUS_DMASYNC_POSTREAD); for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), vtag = 0) { if (count <= 0) break; count--; data = &sc->jrxq.jdata[sc->jrxq.jcur]; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur]; vtag = le32toh(desc64->physaddr[1]); flags = le16toh(desc64->flags); len = le16toh(desc64->length) & NFE_RX_LEN_MASK; } else { desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur]; flags = le16toh(desc32->flags); len = le16toh(desc32->length) & NFE_RX_LEN_MASK; } if (flags & NFE_RX_READY) break; prog++; if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { if (!(flags & NFE_RX_VALID_V1)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); nfe_discard_jrxbuf(sc, sc->jrxq.jcur); continue; } if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { flags &= ~NFE_RX_ERROR; len--; /* fix buffer length */ } } else { if (!(flags & NFE_RX_VALID_V2)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); nfe_discard_jrxbuf(sc, sc->jrxq.jcur); continue; } if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { flags &= ~NFE_RX_ERROR; len--; /* fix buffer length */ } } if (flags & NFE_RX_ERROR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); nfe_discard_jrxbuf(sc, sc->jrxq.jcur); continue; } m = data->m; if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); nfe_discard_jrxbuf(sc, sc->jrxq.jcur); continue; } if ((vtag & NFE_RX_VTAG) != 0 && (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { m->m_pkthdr.ether_vtag = vtag & 0xffff; m->m_flags |= M_VLANTAG; } m->m_pkthdr.len = m->m_len = len; m->m_pkthdr.rcvif = ifp; if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { if ((flags & NFE_RX_IP_CSUMOK) != 0) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((flags & NFE_RX_TCP_CSUMOK) != 0 || (flags & NFE_RX_UDP_CSUMOK) != 0) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); NFE_UNLOCK(sc); if_input(ifp, m); NFE_LOCK(sc); rx_npkts++; } if (prog > 0) bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (rx_npktsp != NULL) *rx_npktsp = rx_npkts; return (count > 0 ? 0 : EAGAIN); } static void nfe_txeof(struct nfe_softc *sc) { if_t ifp = sc->nfe_ifp; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_tx_data *data = NULL; uint16_t flags; int cons, prog; NFE_LOCK_ASSERT(sc); bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, BUS_DMASYNC_POSTREAD); prog = 0; for (cons = sc->txq.next; cons != sc->txq.cur; NFE_INC(cons, NFE_TX_RING_COUNT)) { if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->txq.desc64[cons]; flags = le16toh(desc64->flags); } else { desc32 = &sc->txq.desc32[cons]; flags = le16toh(desc32->flags); } if (flags & NFE_TX_VALID) break; prog++; sc->txq.queued--; data = &sc->txq.data[cons]; if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { if ((flags & NFE_TX_LASTFRAG_V1) == 0) continue; if ((flags & NFE_TX_ERROR_V1) != 0) { device_printf(sc->nfe_dev, "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } else { if ((flags & NFE_TX_LASTFRAG_V2) == 0) continue; if ((flags & NFE_TX_ERROR_V2) != 0) { device_printf(sc->nfe_dev, "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } /* last fragment of the mbuf chain transmitted */ KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__)); bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map); m_freem(data->m); data->m = NULL; } if (prog > 0) { sc->nfe_force_tx = 0; sc->txq.next = cons; if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); if (sc->txq.queued == 0) sc->nfe_watchdog_timer = 0; } } static int nfe_encap(struct nfe_softc *sc, struct mbuf **m_head) { struct nfe_desc32 *desc32 = NULL; struct nfe_desc64 *desc64 = NULL; bus_dmamap_t map; bus_dma_segment_t segs[NFE_MAX_SCATTER]; int error, i, nsegs, prod, si; uint32_t tsosegsz; uint16_t cflags, flags; struct mbuf *m; prod = si = sc->txq.cur; map = sc->txq.data[prod].tx_data_map; error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) { bus_dmamap_unload(sc->txq.tx_data_tag, map); return (ENOBUFS); } m = *m_head; cflags = flags = 0; tsosegsz = 0; if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz << NFE_TX_TSO_SHIFT; cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); cflags |= NFE_TX_TSO; } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) { if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) cflags |= NFE_TX_IP_CSUM; if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) cflags |= NFE_TX_TCP_UDP_CSUM; if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) cflags |= NFE_TX_TCP_UDP_CSUM; } for (i = 0; i < nsegs; i++) { if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->txq.desc64[prod]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[i].ds_addr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[i].ds_addr)); desc64->vtag = 0; desc64->length = htole16(segs[i].ds_len - 1); desc64->flags = htole16(flags); } else { desc32 = &sc->txq.desc32[prod]; desc32->physaddr = htole32(NFE_ADDR_LO(segs[i].ds_addr)); desc32->length = htole16(segs[i].ds_len - 1); desc32->flags = htole16(flags); } /* * Setting of the valid bit in the first descriptor is * deferred until the whole chain is fully setup. */ flags |= NFE_TX_VALID; sc->txq.queued++; NFE_INC(prod, NFE_TX_RING_COUNT); } /* * the whole mbuf chain has been DMA mapped, fix last/first descriptor. * csum flags, vtag and TSO belong to the first fragment only. */ if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); desc64 = &sc->txq.desc64[si]; if ((m->m_flags & M_VLANTAG) != 0) desc64->vtag = htole32(NFE_TX_VTAG | m->m_pkthdr.ether_vtag); if (tsosegsz != 0) { /* * XXX * The following indicates the descriptor element * is a 32bit quantity. */ desc64->length |= htole16((uint16_t)tsosegsz); desc64->flags |= htole16(tsosegsz >> 16); } /* * finally, set the valid/checksum/TSO bit in the first * descriptor. */ desc64->flags |= htole16(NFE_TX_VALID | cflags); } else { if (sc->nfe_flags & NFE_JUMBO_SUP) desc32->flags |= htole16(NFE_TX_LASTFRAG_V2); else desc32->flags |= htole16(NFE_TX_LASTFRAG_V1); desc32 = &sc->txq.desc32[si]; if (tsosegsz != 0) { /* * XXX * The following indicates the descriptor element * is a 32bit quantity. */ desc32->length |= htole16((uint16_t)tsosegsz); desc32->flags |= htole16(tsosegsz >> 16); } /* * finally, set the valid/checksum/TSO bit in the first * descriptor. */ desc32->flags |= htole16(NFE_TX_VALID | cflags); } sc->txq.cur = prod; prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT; sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map; sc->txq.data[prod].tx_data_map = map; sc->txq.data[prod].m = m; bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE); return (0); } static void nfe_setmulti(struct nfe_softc *sc) { if_t ifp = sc->nfe_ifp; int i, mc_count, mcnt; uint32_t filter; uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; uint8_t *mta; NFE_LOCK_ASSERT(sc); if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { bzero(addr, ETHER_ADDR_LEN); bzero(mask, ETHER_ADDR_LEN); goto done; } bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); mc_count = if_multiaddr_count(ifp, -1); mta = malloc(sizeof(uint8_t) * ETHER_ADDR_LEN * mc_count, M_DEVBUF, M_NOWAIT); /* Unable to get memory - process without filtering */ if (mta == NULL) { device_printf(sc->nfe_dev, "nfe_setmulti: failed to allocate" "temp multicast buffer!\n"); bzero(addr, ETHER_ADDR_LEN); bzero(mask, ETHER_ADDR_LEN); goto done; }; if_multiaddr_array(ifp, mta, &mcnt, mc_count); for (i = 0; i < mcnt; i++) { uint8_t *addrp; int j; addrp = mta + (i * ETHER_ADDR_LEN); for (j = 0; j < ETHER_ADDR_LEN; j++) { u_int8_t mcaddr = addrp[j]; addr[j] &= mcaddr; mask[j] &= ~mcaddr; } } free(mta, M_DEVBUF); for (i = 0; i < ETHER_ADDR_LEN; i++) { mask[i] |= addr[i]; } done: addr[0] |= 0x01; /* make sure multicast bit is set */ NFE_WRITE(sc, NFE_MULTIADDR_HI, addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); NFE_WRITE(sc, NFE_MULTIADDR_LO, addr[5] << 8 | addr[4]); NFE_WRITE(sc, NFE_MULTIMASK_HI, mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); NFE_WRITE(sc, NFE_MULTIMASK_LO, mask[5] << 8 | mask[4]); filter = NFE_READ(sc, NFE_RXFILTER); filter &= NFE_PFF_RX_PAUSE; filter |= NFE_RXFILTER_MAGIC; filter |= (if_getflags(ifp) & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M; NFE_WRITE(sc, NFE_RXFILTER, filter); } static void nfe_start(if_t ifp) { struct nfe_softc *sc = if_getsoftc(ifp); NFE_LOCK(sc); nfe_start_locked(ifp); NFE_UNLOCK(sc); } static void nfe_start_locked(if_t ifp) { struct nfe_softc *sc = if_getsoftc(ifp); struct mbuf *m0; int enq = 0; NFE_LOCK_ASSERT(sc); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || sc->nfe_link == 0) return; while (!if_sendq_empty(ifp)) { m0 = if_dequeue(ifp); if (m0 == NULL) break; if (nfe_encap(sc, &m0) != 0) { if (m0 == NULL) break; if_sendq_prepend(ifp, m0); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } enq++; if_etherbpfmtap(ifp, m0); } if (enq > 0) { bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* kick Tx */ NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); /* * Set a timeout in case the chip goes out to lunch. */ sc->nfe_watchdog_timer = 5; } } static void nfe_watchdog(if_t ifp) { struct nfe_softc *sc = if_getsoftc(ifp); if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer) return; /* Check if we've lost Tx completion interrupt. */ nfe_txeof(sc); if (sc->txq.queued == 0) { if_printf(ifp, "watchdog timeout (missed Tx interrupts) " "-- recovering\n"); if (!if_sendq_empty(ifp)) nfe_start_locked(ifp); return; } /* Check if we've lost start Tx command. */ sc->nfe_force_tx++; if (sc->nfe_force_tx <= 3) { /* * If this is the case for watchdog timeout, the following * code should go to nfe_txeof(). */ NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); return; } sc->nfe_force_tx = 0; if_printf(ifp, "watchdog timeout\n"); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); nfe_init_locked(sc); } static void nfe_init(void *xsc) { struct nfe_softc *sc = xsc; NFE_LOCK(sc); nfe_init_locked(sc); NFE_UNLOCK(sc); } static void nfe_init_locked(void *xsc) { struct nfe_softc *sc = xsc; if_t ifp = sc->nfe_ifp; struct mii_data *mii; uint32_t val; int error; NFE_LOCK_ASSERT(sc); mii = device_get_softc(sc->nfe_miibus); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; nfe_stop(ifp); sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS; nfe_init_tx_ring(sc, &sc->txq); if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN)) error = nfe_init_jrx_ring(sc, &sc->jrxq); else error = nfe_init_rx_ring(sc, &sc->rxq); if (error != 0) { device_printf(sc->nfe_dev, "initialization failed: no memory for rx buffers\n"); nfe_stop(ifp); return; } val = 0; if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0) val |= NFE_MAC_ADDR_INORDER; NFE_WRITE(sc, NFE_TX_UNK, val); NFE_WRITE(sc, NFE_STATUS, 0); if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); sc->rxtxctl = NFE_RXTX_BIT2; if (sc->nfe_flags & NFE_40BIT_ADDR) sc->rxtxctl |= NFE_RXTX_V3MAGIC; else if (sc->nfe_flags & NFE_JUMBO_SUP) sc->rxtxctl |= NFE_RXTX_V2MAGIC; if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) sc->rxtxctl |= NFE_RXTX_RXCSUM; if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); DELAY(10); NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); else NFE_WRITE(sc, NFE_VTAG_CTL, 0); NFE_WRITE(sc, NFE_SETUP_R6, 0); /* set MAC address */ nfe_set_macaddr(sc, if_getlladdr(ifp)); /* tell MAC where rings are in memory */ if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) { NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, NFE_ADDR_HI(sc->jrxq.jphysaddr)); NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->jrxq.jphysaddr)); } else { NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, NFE_ADDR_HI(sc->rxq.physaddr)); NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr)); } NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr)); NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); NFE_WRITE(sc, NFE_RING_SIZE, (NFE_RX_RING_COUNT - 1) << 16 | (NFE_TX_RING_COUNT - 1)); NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize); /* force MAC to wakeup */ val = NFE_READ(sc, NFE_PWR_STATE); if ((val & NFE_PWR_WAKEUP) == 0) NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP); DELAY(10); val = NFE_READ(sc, NFE_PWR_STATE); NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID); #if 1 /* configure interrupts coalescing/mitigation */ NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); #else /* no interrupt mitigation: one interrupt per packet */ NFE_WRITE(sc, NFE_IMTIMER, 970); #endif NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100); NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); /* Disable WOL. */ NFE_WRITE(sc, NFE_WOL_CTL, 0); sc->rxtxctl &= ~NFE_RXTX_BIT2; NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); DELAY(10); NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); /* set Rx filter */ nfe_setmulti(sc); /* enable Rx */ NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); /* enable Tx */ NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); /* Clear hardware stats. */ nfe_stats_clear(sc); #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) nfe_disable_intr(sc); else #endif nfe_set_intr(sc); nfe_enable_intr(sc); /* enable interrupts */ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); sc->nfe_link = 0; mii_mediachg(mii); callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); } static void nfe_stop(if_t ifp) { struct nfe_softc *sc = if_getsoftc(ifp); struct nfe_rx_ring *rx_ring; struct nfe_jrx_ring *jrx_ring; struct nfe_tx_ring *tx_ring; struct nfe_rx_data *rdata; struct nfe_tx_data *tdata; int i; NFE_LOCK_ASSERT(sc); sc->nfe_watchdog_timer = 0; if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); callout_stop(&sc->nfe_stat_ch); /* abort Tx */ NFE_WRITE(sc, NFE_TX_CTL, 0); /* disable Rx */ NFE_WRITE(sc, NFE_RX_CTL, 0); /* disable interrupts */ nfe_disable_intr(sc); sc->nfe_link = 0; /* free Rx and Tx mbufs still in the queues. */ rx_ring = &sc->rxq; for (i = 0; i < NFE_RX_RING_COUNT; i++) { rdata = &rx_ring->data[i]; if (rdata->m != NULL) { bus_dmamap_sync(rx_ring->rx_data_tag, rdata->rx_data_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rx_ring->rx_data_tag, rdata->rx_data_map); m_freem(rdata->m); rdata->m = NULL; } } if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) { jrx_ring = &sc->jrxq; for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { rdata = &jrx_ring->jdata[i]; if (rdata->m != NULL) { bus_dmamap_sync(jrx_ring->jrx_data_tag, rdata->rx_data_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(jrx_ring->jrx_data_tag, rdata->rx_data_map); m_freem(rdata->m); rdata->m = NULL; } } } tx_ring = &sc->txq; for (i = 0; i < NFE_RX_RING_COUNT; i++) { tdata = &tx_ring->data[i]; if (tdata->m != NULL) { bus_dmamap_sync(tx_ring->tx_data_tag, tdata->tx_data_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(tx_ring->tx_data_tag, tdata->tx_data_map); m_freem(tdata->m); tdata->m = NULL; } } /* Update hardware stats. */ nfe_stats_update(sc); } static int nfe_ifmedia_upd(if_t ifp) { struct nfe_softc *sc = if_getsoftc(ifp); struct mii_data *mii; NFE_LOCK(sc); mii = device_get_softc(sc->nfe_miibus); mii_mediachg(mii); NFE_UNLOCK(sc); return (0); } static void nfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct nfe_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); NFE_LOCK(sc); mii = device_get_softc(sc->nfe_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; NFE_UNLOCK(sc); } void nfe_tick(void *xsc) { struct nfe_softc *sc; struct mii_data *mii; if_t ifp; sc = (struct nfe_softc *)xsc; NFE_LOCK_ASSERT(sc); ifp = sc->nfe_ifp; mii = device_get_softc(sc->nfe_miibus); mii_tick(mii); nfe_stats_update(sc); nfe_watchdog(ifp); callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); } static int nfe_shutdown(device_t dev) { return (nfe_suspend(dev)); } static void nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) { uint32_t val; if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { val = NFE_READ(sc, NFE_MACADDR_LO); addr[0] = (val >> 8) & 0xff; addr[1] = (val & 0xff); val = NFE_READ(sc, NFE_MACADDR_HI); addr[2] = (val >> 24) & 0xff; addr[3] = (val >> 16) & 0xff; addr[4] = (val >> 8) & 0xff; addr[5] = (val & 0xff); } else { val = NFE_READ(sc, NFE_MACADDR_LO); addr[5] = (val >> 8) & 0xff; addr[4] = (val & 0xff); val = NFE_READ(sc, NFE_MACADDR_HI); addr[3] = (val >> 24) & 0xff; addr[2] = (val >> 16) & 0xff; addr[1] = (val >> 8) & 0xff; addr[0] = (val & 0xff); } } static void nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr) { NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); } /* * Map a single buffer address. */ static void nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct nfe_dmamap_arg *ctx; if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); ctx = (struct nfe_dmamap_arg *)arg; ctx->nfe_busaddr = segs[0].ds_addr; } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (!arg1) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN, NFE_PROC_MAX)); } #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) static void nfe_sysctl_node(struct nfe_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child, *parent; struct sysctl_oid *tree; struct nfe_hw_stats *stats; int error; stats = &sc->nfe_stats; ctx = device_get_sysctl_ctx(sc->nfe_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev)); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I", "max number of Rx events to process"); sc->nfe_process_limit = NFE_PROC_DEFAULT; error = resource_int_value(device_get_name(sc->nfe_dev), device_get_unit(sc->nfe_dev), "process_limit", &sc->nfe_process_limit); if (error == 0) { if (sc->nfe_process_limit < NFE_PROC_MIN || sc->nfe_process_limit > NFE_PROC_MAX) { device_printf(sc->nfe_dev, "process_limit value out of range; " "using default: %d\n", NFE_PROC_DEFAULT); sc->nfe_process_limit = NFE_PROC_DEFAULT; } } if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) return; tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "NFE statistics"); parent = SYSCTL_CHILDREN(tree); /* Rx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, NULL, "Rx MAC statistics"); child = SYSCTL_CHILDREN(tree); NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors", &stats->rx_frame_errors, "Framing Errors"); NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes", &stats->rx_extra_bytes, "Extra Bytes"); NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", &stats->rx_late_cols, "Late Collisions"); NFE_SYSCTL_STAT_ADD32(ctx, child, "runts", &stats->rx_runts, "Runts"); NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos", &stats->rx_jumbos, "Jumbos"); NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns", &stats->rx_fifo_overuns, "FIFO Overruns"); NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors", &stats->rx_crc_errors, "CRC Errors"); NFE_SYSCTL_STAT_ADD32(ctx, child, "fae", &stats->rx_fae, "Frame Alignment Errors"); NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors", &stats->rx_len_errors, "Length Errors"); NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", &stats->rx_unicast, "Unicast Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", &stats->rx_multicast, "Multicast Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", &stats->rx_broadcast, "Broadcast Frames"); if ((sc->nfe_flags & NFE_MIB_V2) != 0) { NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", &stats->rx_octets, "Octets"); NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", &stats->rx_pause, "Pause frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "drops", &stats->rx_drops, "Drop frames"); } /* Tx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, NULL, "Tx MAC statistics"); child = SYSCTL_CHILDREN(tree); NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", &stats->tx_octets, "Octets"); NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits", &stats->tx_zero_rexmits, "Zero Retransmits"); NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits", &stats->tx_one_rexmits, "One Retransmits"); NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits", &stats->tx_multi_rexmits, "Multiple Retransmits"); NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", &stats->tx_late_cols, "Late Collisions"); NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns", &stats->tx_fifo_underuns, "FIFO Underruns"); NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts", &stats->tx_carrier_losts, "Carrier Losts"); NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals", &stats->tx_excess_deferals, "Excess Deferrals"); NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors", &stats->tx_retry_errors, "Retry Errors"); if ((sc->nfe_flags & NFE_MIB_V2) != 0) { NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals", &stats->tx_deferals, "Deferrals"); NFE_SYSCTL_STAT_ADD32(ctx, child, "frames", &stats->tx_frames, "Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", &stats->tx_pause, "Pause Frames"); } if ((sc->nfe_flags & NFE_MIB_V3) != 0) { NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", &stats->tx_deferals, "Unicast Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", &stats->tx_frames, "Multicast Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", &stats->tx_pause, "Broadcast Frames"); } } #undef NFE_SYSCTL_STAT_ADD32 #undef NFE_SYSCTL_STAT_ADD64 static void nfe_stats_clear(struct nfe_softc *sc) { int i, mib_cnt; if ((sc->nfe_flags & NFE_MIB_V1) != 0) mib_cnt = NFE_NUM_MIB_STATV1; else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0) mib_cnt = NFE_NUM_MIB_STATV2; else return; for (i = 0; i < mib_cnt; i++) NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t)); if ((sc->nfe_flags & NFE_MIB_V3) != 0) { NFE_READ(sc, NFE_TX_UNICAST); NFE_READ(sc, NFE_TX_MULTICAST); NFE_READ(sc, NFE_TX_BROADCAST); } } static void nfe_stats_update(struct nfe_softc *sc) { struct nfe_hw_stats *stats; NFE_LOCK_ASSERT(sc); if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) return; stats = &sc->nfe_stats; stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET); stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT); stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT); stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT); stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL); stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN); stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST); stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL); stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR); stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR); stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES); stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL); stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT); stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO); stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN); stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR); stats->rx_fae += NFE_READ(sc, NFE_RX_FAE); stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR); stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST); stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST); stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST); if ((sc->nfe_flags & NFE_MIB_V2) != 0) { stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL); stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME); stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET); stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE); stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE); stats->rx_drops += NFE_READ(sc, NFE_RX_DROP); } if ((sc->nfe_flags & NFE_MIB_V3) != 0) { stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST); stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST); stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST); } } static void nfe_set_linkspeed(struct nfe_softc *sc) { struct mii_softc *miisc; struct mii_data *mii; int aneg, i, phyno; NFE_LOCK_ASSERT(sc); mii = device_get_softc(sc->nfe_miibus); mii_pollstat(mii); aneg = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch IFM_SUBTYPE(mii->mii_media_active) { case IFM_10_T: case IFM_100_TX: return; case IFM_1000_T: aneg++; break; default: break; } } miisc = LIST_FIRST(&mii->mii_phys); phyno = miisc->mii_phy; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0); nfe_miibus_writereg(sc->nfe_dev, phyno, MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); nfe_miibus_writereg(sc->nfe_dev, phyno, MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); DELAY(1000); if (aneg != 0) { /* * Poll link state until nfe(4) get a 10/100Mbps link. */ for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { mii_pollstat(mii); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: nfe_mac_config(sc, mii); return; default: break; } } NFE_UNLOCK(sc); pause("nfelnk", hz); NFE_LOCK(sc); } if (i == MII_ANEGTICKS_GIGE) device_printf(sc->nfe_dev, "establishing a link failed, WOL may not work!"); } /* * No link, force MAC to have 100Mbps, full-duplex link. * This is the last resort and may/may not work. */ mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; nfe_mac_config(sc, mii); } static void nfe_set_wol(struct nfe_softc *sc) { if_t ifp; uint32_t wolctl; int pmc; uint16_t pmstat; NFE_LOCK_ASSERT(sc); if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0) return; ifp = sc->nfe_ifp; if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) wolctl = NFE_WOL_MAGIC; else wolctl = 0; NFE_WRITE(sc, NFE_WOL_CTL, wolctl); if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) { nfe_set_linkspeed(sc); if ((sc->nfe_flags & NFE_PWR_MGMT) != 0) NFE_WRITE(sc, NFE_PWR2_CTL, NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS); /* Enable RX. */ NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0); NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0); NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) | NFE_RX_START); } /* Request PME if WOL is requested. */ pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } Index: head/sys/dev/oce/oce_if.c =================================================================== --- head/sys/dev/oce/oce_if.c (revision 296271) +++ head/sys/dev/oce/oce_if.c (revision 296272) @@ -1,2359 +1,2359 @@ /*- * Copyright (C) 2013 Emulex * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Emulex Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * freebsd-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ /* $FreeBSD$ */ #include "opt_inet6.h" #include "opt_inet.h" #include "oce_if.h" /* UE Status Low CSR */ static char *ue_status_low_desc[] = { "CEV", "CTX", "DBUF", "ERX", "Host", "MPU", "NDMA", "PTC ", "RDMA ", "RXF ", "RXIPS ", "RXULP0 ", "RXULP1 ", "RXULP2 ", "TIM ", "TPOST ", "TPRE ", "TXIPS ", "TXULP0 ", "TXULP1 ", "UC ", "WDMA ", "TXULP2 ", "HOST1 ", "P0_OB_LINK ", "P1_OB_LINK ", "HOST_GPIO ", "MBOX ", "AXGMAC0", "AXGMAC1", "JTAG", "MPU_INTPEND" }; /* UE Status High CSR */ static char *ue_status_hi_desc[] = { "LPCMEMHOST", "MGMT_MAC", "PCS0ONLINE", "MPU_IRAM", "PCS1ONLINE", "PCTL0", "PCTL1", "PMEM", "RR", "TXPB", "RXPP", "XAUI", "TXP", "ARM", "IPC", "HOST2", "HOST3", "HOST4", "HOST5", "HOST6", "HOST7", "HOST8", "HOST9", "NETC", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown" }; /* Driver entry points prototypes */ static int oce_probe(device_t dev); static int oce_attach(device_t dev); static int oce_detach(device_t dev); static int oce_shutdown(device_t dev); static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void oce_init(void *xsc); static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m); static void oce_multiq_flush(struct ifnet *ifp); /* Driver interrupt routines protypes */ static void oce_intr(void *arg, int pending); static int oce_setup_intr(POCE_SOFTC sc); static int oce_fast_isr(void *arg); static int oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending)); /* Media callbacks prototypes */ static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req); static int oce_media_change(struct ifnet *ifp); /* Transmit routines prototypes */ static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index); static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq); static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status); static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq); /* Receive routines prototypes */ static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe); static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe); static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe); static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe); /* Helper function prototypes in this file */ static int oce_attach_ifp(POCE_SOFTC sc); static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag); static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag); static int oce_vid_config(POCE_SOFTC sc); static void oce_mac_addr_set(POCE_SOFTC sc); static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data); static void oce_local_timer(void *arg); static void oce_if_deactivate(POCE_SOFTC sc); static void oce_if_activate(POCE_SOFTC sc); static void setup_max_queues_want(POCE_SOFTC sc); static void update_queues_got(POCE_SOFTC sc); static void process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe); static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m); static void oce_get_config(POCE_SOFTC sc); static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete); /* IP specific */ #if defined(INET6) || defined(INET) static int oce_init_lro(POCE_SOFTC sc); static void oce_rx_flush_lro(struct oce_rq *rq); static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp); #endif static device_method_t oce_dispatch[] = { DEVMETHOD(device_probe, oce_probe), DEVMETHOD(device_attach, oce_attach), DEVMETHOD(device_detach, oce_detach), DEVMETHOD(device_shutdown, oce_shutdown), DEVMETHOD_END }; static driver_t oce_driver = { "oce", oce_dispatch, sizeof(OCE_SOFTC) }; static devclass_t oce_devclass; DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0); MODULE_DEPEND(oce, pci, 1, 1, 1); MODULE_DEPEND(oce, ether, 1, 1, 1); MODULE_VERSION(oce, 1); /* global vars */ const char component_revision[32] = {"///" COMPONENT_REVISION "///"}; /* Module capabilites and parameters */ uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED; uint32_t oce_enable_rss = OCE_MODCAP_RSS; TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled); TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss); /* Supported devices table */ static uint32_t supportedDevices[] = { (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2, (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3, (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3, (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201, (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF, (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH }; /***************************************************************************** * Driver entry points functions * *****************************************************************************/ static int oce_probe(device_t dev) { uint16_t vendor = 0; uint16_t device = 0; int i = 0; char str[256] = {0}; POCE_SOFTC sc; sc = device_get_softc(dev); bzero(sc, sizeof(OCE_SOFTC)); sc->dev = dev; vendor = pci_get_vendor(dev); device = pci_get_device(dev); for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) { if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) { if (device == (supportedDevices[i] & 0xffff)) { sprintf(str, "%s:%s", "Emulex CNA NIC function", component_revision); device_set_desc_copy(dev, str); switch (device) { case PCI_PRODUCT_BE2: sc->flags |= OCE_FLAGS_BE2; break; case PCI_PRODUCT_BE3: sc->flags |= OCE_FLAGS_BE3; break; case PCI_PRODUCT_XE201: case PCI_PRODUCT_XE201_VF: sc->flags |= OCE_FLAGS_XE201; break; case PCI_PRODUCT_SH: sc->flags |= OCE_FLAGS_SH; break; default: return ENXIO; } return BUS_PROBE_DEFAULT; } } } return ENXIO; } static int oce_attach(device_t dev) { POCE_SOFTC sc; int rc = 0; sc = device_get_softc(dev); rc = oce_hw_pci_alloc(sc); if (rc) return rc; sc->tx_ring_size = OCE_TX_RING_SIZE; sc->rx_ring_size = OCE_RX_RING_SIZE; sc->rq_frag_size = OCE_RQ_BUF_SIZE; sc->flow_control = OCE_DEFAULT_FLOW_CONTROL; sc->promisc = OCE_DEFAULT_PROMISCUOUS; LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock"); LOCK_CREATE(&sc->dev_lock, "Device_lock"); /* initialise the hardware */ rc = oce_hw_init(sc); if (rc) goto pci_res_free; oce_get_config(sc); setup_max_queues_want(sc); rc = oce_setup_intr(sc); if (rc) goto mbox_free; rc = oce_queue_init_all(sc); if (rc) goto intr_free; rc = oce_attach_ifp(sc); if (rc) goto queues_free; #if defined(INET6) || defined(INET) rc = oce_init_lro(sc); if (rc) goto ifp_free; #endif rc = oce_hw_start(sc); if (rc) goto lro_free; sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST); sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST); rc = oce_stats_init(sc); if (rc) goto vlan_free; oce_add_sysctls(sc); callout_init(&sc->timer, 1); rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc); if (rc) goto stats_free; return 0; stats_free: callout_drain(&sc->timer); oce_stats_free(sc); vlan_free: if (sc->vlan_attach) EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); if (sc->vlan_detach) EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); oce_hw_intr_disable(sc); lro_free: #if defined(INET6) || defined(INET) oce_free_lro(sc); ifp_free: #endif ether_ifdetach(sc->ifp); if_free(sc->ifp); queues_free: oce_queue_release_all(sc); intr_free: oce_intr_free(sc); mbox_free: oce_dma_free(sc, &sc->bsmbx); pci_res_free: oce_hw_pci_free(sc); LOCK_DESTROY(&sc->dev_lock); LOCK_DESTROY(&sc->bmbx_lock); return rc; } static int oce_detach(device_t dev) { POCE_SOFTC sc = device_get_softc(dev); LOCK(&sc->dev_lock); oce_if_deactivate(sc); UNLOCK(&sc->dev_lock); callout_drain(&sc->timer); if (sc->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); if (sc->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); ether_ifdetach(sc->ifp); if_free(sc->ifp); oce_hw_shutdown(sc); bus_generic_detach(dev); return 0; } static int oce_shutdown(device_t dev) { int rc; rc = oce_detach(dev); return rc; } static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; POCE_SOFTC sc = ifp->if_softc; int rc = 0; uint32_t u; switch (command) { case SIOCGIFMEDIA: rc = ifmedia_ioctl(ifp, ifr, &sc->media, command); break; case SIOCSIFMTU: if (ifr->ifr_mtu > OCE_MAX_MTU) rc = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; oce_init(sc); } device_printf(sc->dev, "Interface Up\n"); } else { LOCK(&sc->dev_lock); sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); oce_if_deactivate(sc); UNLOCK(&sc->dev_lock); device_printf(sc->dev, "Interface Down\n"); } if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) { if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1)))) sc->promisc = TRUE; } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) { if (!oce_rxf_set_promiscuous(sc, 0)) sc->promisc = FALSE; } break; case SIOCADDMULTI: case SIOCDELMULTI: rc = oce_hw_update_multicast(sc); if (rc) device_printf(sc->dev, "Update multicast address failed\n"); break; case SIOCSIFCAP: u = ifr->ifr_reqcap ^ ifp->if_capenable; if (u & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (IFCAP_TSO & ifp->if_capenable && !(IFCAP_TXCSUM & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO; ifp->if_hwassist &= ~CSUM_TSO; if_printf(ifp, "TSO disabled due to -txcsum.\n"); } } if (u & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (u & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; if (IFCAP_TSO & ifp->if_capenable) { if (IFCAP_TXCSUM & ifp->if_capenable) ifp->if_hwassist |= CSUM_TSO; else { ifp->if_capenable &= ~IFCAP_TSO; ifp->if_hwassist &= ~CSUM_TSO; if_printf(ifp, "Enable txcsum first.\n"); rc = EAGAIN; } } else ifp->if_hwassist &= ~CSUM_TSO; } if (u & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (u & IFCAP_VLAN_HWFILTER) { ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; oce_vid_config(sc); } #if defined(INET6) || defined(INET) if (u & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; #endif break; case SIOCGPRIVATE_0: rc = oce_handle_passthrough(ifp, data); break; default: rc = ether_ioctl(ifp, command, data); break; } return rc; } static void oce_init(void *arg) { POCE_SOFTC sc = arg; LOCK(&sc->dev_lock); if (sc->ifp->if_flags & IFF_UP) { oce_if_deactivate(sc); oce_if_activate(sc); } UNLOCK(&sc->dev_lock); } static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m) { POCE_SOFTC sc = ifp->if_softc; struct oce_wq *wq = NULL; int queue_index = 0; int status = 0; if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) queue_index = m->m_pkthdr.flowid % sc->nwqs; wq = sc->wq[queue_index]; LOCK(&wq->tx_lock); status = oce_multiq_transmit(ifp, m, wq); UNLOCK(&wq->tx_lock); return status; } static void oce_multiq_flush(struct ifnet *ifp) { POCE_SOFTC sc = ifp->if_softc; struct mbuf *m; int i = 0; for (i = 0; i < sc->nwqs; i++) { while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL) m_freem(m); } if_qflush(ifp); } /***************************************************************************** * Driver interrupt routines functions * *****************************************************************************/ static void oce_intr(void *arg, int pending) { POCE_INTR_INFO ii = (POCE_INTR_INFO) arg; POCE_SOFTC sc = ii->sc; struct oce_eq *eq = ii->eq; struct oce_eqe *eqe; struct oce_cq *cq = NULL; int i, num_eqes = 0; bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map, BUS_DMASYNC_POSTWRITE); do { eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe); if (eqe->evnt == 0) break; eqe->evnt = 0; bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map, BUS_DMASYNC_POSTWRITE); RING_GET(eq->ring, 1); num_eqes++; } while (TRUE); if (!num_eqes) goto eq_arm; /* Spurious */ /* Clear EQ entries, but dont arm */ oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE); /* Process TX, RX and MCC. But dont arm CQ*/ for (i = 0; i < eq->cq_valid; i++) { cq = eq->cq[i]; (*cq->cq_handler)(cq->cb_arg); } /* Arm all cqs connected to this EQ */ for (i = 0; i < eq->cq_valid; i++) { cq = eq->cq[i]; oce_arm_cq(sc, cq->cq_id, 0, TRUE); } eq_arm: oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE); return; } static int oce_setup_intr(POCE_SOFTC sc) { int rc = 0, use_intx = 0; int vector = 0, req_vectors = 0; if (is_rss_enabled(sc)) req_vectors = MAX((sc->nrqs - 1), sc->nwqs); else req_vectors = 1; if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) { sc->intr_count = req_vectors; rc = pci_alloc_msix(sc->dev, &sc->intr_count); if (rc != 0) { use_intx = 1; pci_release_msi(sc->dev); } else sc->flags |= OCE_FLAGS_USING_MSIX; } else use_intx = 1; if (use_intx) sc->intr_count = 1; /* Scale number of queues based on intr we got */ update_queues_got(sc); if (use_intx) { device_printf(sc->dev, "Using legacy interrupt\n"); rc = oce_alloc_intr(sc, vector, oce_intr); if (rc) goto error; } else { for (; vector < sc->intr_count; vector++) { rc = oce_alloc_intr(sc, vector, oce_intr); if (rc) goto error; } } return 0; error: oce_intr_free(sc); return rc; } static int oce_fast_isr(void *arg) { POCE_INTR_INFO ii = (POCE_INTR_INFO) arg; POCE_SOFTC sc = ii->sc; if (ii->eq == NULL) return FILTER_STRAY; oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE); - taskqueue_enqueue_fast(ii->tq, &ii->task); + taskqueue_enqueue(ii->tq, &ii->task); ii->eq->intr++; return FILTER_HANDLED; } static int oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending)) { POCE_INTR_INFO ii = &sc->intrs[vector]; int rc = 0, rr; if (vector >= OCE_MAX_EQ) return (EINVAL); /* Set the resource id for the interrupt. * MSIx is vector + 1 for the resource id, * INTx is 0 for the resource id. */ if (sc->flags & OCE_FLAGS_USING_MSIX) rr = vector + 1; else rr = 0; ii->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rr, RF_ACTIVE|RF_SHAREABLE); ii->irq_rr = rr; if (ii->intr_res == NULL) { device_printf(sc->dev, "Could not allocate interrupt\n"); rc = ENXIO; return rc; } TASK_INIT(&ii->task, 0, isr, ii); ii->vector = vector; sprintf(ii->task_name, "oce_task[%d]", ii->vector); ii->tq = taskqueue_create_fast(ii->task_name, M_NOWAIT, taskqueue_thread_enqueue, &ii->tq); taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->dev)); ii->sc = sc; rc = bus_setup_intr(sc->dev, ii->intr_res, INTR_TYPE_NET, oce_fast_isr, NULL, ii, &ii->tag); return rc; } void oce_intr_free(POCE_SOFTC sc) { int i = 0; for (i = 0; i < sc->intr_count; i++) { if (sc->intrs[i].tag != NULL) bus_teardown_intr(sc->dev, sc->intrs[i].intr_res, sc->intrs[i].tag); if (sc->intrs[i].tq != NULL) taskqueue_free(sc->intrs[i].tq); if (sc->intrs[i].intr_res != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intrs[i].irq_rr, sc->intrs[i].intr_res); sc->intrs[i].tag = NULL; sc->intrs[i].intr_res = NULL; } if (sc->flags & OCE_FLAGS_USING_MSIX) pci_release_msi(sc->dev); } /****************************************************************************** * Media callbacks functions * ******************************************************************************/ static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req) { POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc; req->ifm_status = IFM_AVALID; req->ifm_active = IFM_ETHER; if (sc->link_status == 1) req->ifm_status |= IFM_ACTIVE; else return; switch (sc->link_speed) { case 1: /* 10 Mbps */ req->ifm_active |= IFM_10_T | IFM_FDX; sc->speed = 10; break; case 2: /* 100 Mbps */ req->ifm_active |= IFM_100_TX | IFM_FDX; sc->speed = 100; break; case 3: /* 1 Gbps */ req->ifm_active |= IFM_1000_T | IFM_FDX; sc->speed = 1000; break; case 4: /* 10 Gbps */ req->ifm_active |= IFM_10G_SR | IFM_FDX; sc->speed = 10000; break; case 5: /* 20 Gbps */ req->ifm_active |= IFM_10G_SR | IFM_FDX; sc->speed = 20000; break; case 6: /* 25 Gbps */ req->ifm_active |= IFM_10G_SR | IFM_FDX; sc->speed = 25000; break; case 7: /* 40 Gbps */ req->ifm_active |= IFM_40G_SR4 | IFM_FDX; sc->speed = 40000; break; default: sc->speed = 0; break; } return; } int oce_media_change(struct ifnet *ifp) { return 0; } /***************************************************************************** * Transmit routines functions * *****************************************************************************/ static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index) { int rc = 0, i, retry_cnt = 0; bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS]; struct mbuf *m, *m_temp; struct oce_wq *wq = sc->wq[wq_index]; struct oce_packet_desc *pd; struct oce_nic_hdr_wqe *nichdr; struct oce_nic_frag_wqe *nicfrag; int num_wqes; uint32_t reg_value; boolean_t complete = TRUE; m = *mpp; if (!m) return EINVAL; if (!(m->m_flags & M_PKTHDR)) { rc = ENXIO; goto free_ret; } if(oce_tx_asic_stall_verify(sc, m)) { m = oce_insert_vlan_tag(sc, m, &complete); if(!m) { device_printf(sc->dev, "Insertion unsuccessful\n"); return 0; } } if (m->m_pkthdr.csum_flags & CSUM_TSO) { /* consolidate packet buffers for TSO/LSO segment offload */ #if defined(INET6) || defined(INET) m = oce_tso_setup(sc, mpp); #else m = NULL; #endif if (m == NULL) { rc = ENXIO; goto free_ret; } } pd = &wq->pckts[wq->pkt_desc_head]; retry: rc = bus_dmamap_load_mbuf_sg(wq->tag, pd->map, m, segs, &pd->nsegs, BUS_DMA_NOWAIT); if (rc == 0) { num_wqes = pd->nsegs + 1; if (IS_BE(sc) || IS_SH(sc)) { /*Dummy required only for BE3.*/ if (num_wqes & 1) num_wqes++; } if (num_wqes >= RING_NUM_FREE(wq->ring)) { bus_dmamap_unload(wq->tag, pd->map); return EBUSY; } atomic_store_rel_int(&wq->pkt_desc_head, (wq->pkt_desc_head + 1) % \ OCE_WQ_PACKET_ARRAY_SIZE); bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE); pd->mbuf = m; nichdr = RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe); nichdr->u0.dw[0] = 0; nichdr->u0.dw[1] = 0; nichdr->u0.dw[2] = 0; nichdr->u0.dw[3] = 0; nichdr->u0.s.complete = complete; nichdr->u0.s.event = 1; nichdr->u0.s.crc = 1; nichdr->u0.s.forward = 0; nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0; nichdr->u0.s.udpcs = (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0; nichdr->u0.s.tcpcs = (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0; nichdr->u0.s.num_wqe = num_wqes; nichdr->u0.s.total_length = m->m_pkthdr.len; if (m->m_flags & M_VLANTAG) { nichdr->u0.s.vlan = 1; /*Vlan present*/ nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag; } if (m->m_pkthdr.csum_flags & CSUM_TSO) { if (m->m_pkthdr.tso_segsz) { nichdr->u0.s.lso = 1; nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz; } if (!IS_BE(sc) || !IS_SH(sc)) nichdr->u0.s.ipcs = 1; } RING_PUT(wq->ring, 1); atomic_add_int(&wq->ring->num_used, 1); for (i = 0; i < pd->nsegs; i++) { nicfrag = RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_frag_wqe); nicfrag->u0.s.rsvd0 = 0; nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr); nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr); nicfrag->u0.s.frag_len = segs[i].ds_len; pd->wqe_idx = wq->ring->pidx; RING_PUT(wq->ring, 1); atomic_add_int(&wq->ring->num_used, 1); } if (num_wqes > (pd->nsegs + 1)) { nicfrag = RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_frag_wqe); nicfrag->u0.dw[0] = 0; nicfrag->u0.dw[1] = 0; nicfrag->u0.dw[2] = 0; nicfrag->u0.dw[3] = 0; pd->wqe_idx = wq->ring->pidx; RING_PUT(wq->ring, 1); atomic_add_int(&wq->ring->num_used, 1); pd->nsegs++; } if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); wq->tx_stats.tx_reqs++; wq->tx_stats.tx_wrbs += num_wqes; wq->tx_stats.tx_bytes += m->m_pkthdr.len; wq->tx_stats.tx_pkts++; bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); reg_value = (num_wqes << 16) | wq->wq_id; OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value); } else if (rc == EFBIG) { if (retry_cnt == 0) { m_temp = m_defrag(m, M_NOWAIT); if (m_temp == NULL) goto free_ret; m = m_temp; *mpp = m_temp; retry_cnt = retry_cnt + 1; goto retry; } else goto free_ret; } else if (rc == ENOMEM) return rc; else goto free_ret; return 0; free_ret: m_freem(*mpp); *mpp = NULL; return rc; } static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status) { struct oce_packet_desc *pd; POCE_SOFTC sc = (POCE_SOFTC) wq->parent; struct mbuf *m; pd = &wq->pckts[wq->pkt_desc_tail]; atomic_store_rel_int(&wq->pkt_desc_tail, (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE); atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1); bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(wq->tag, pd->map); m = pd->mbuf; m_freem(m); pd->mbuf = NULL; if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) { if (wq->ring->num_used < (wq->ring->num_items / 2)) { sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE); oce_tx_restart(sc, wq); } } } static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq) { if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING) return; #if __FreeBSD_version >= 800000 if (!drbr_empty(sc->ifp, wq->br)) #else if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd)) #endif - taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask); + taskqueue_enqueue(taskqueue_swi, &wq->txtask); } #if defined(INET6) || defined(INET) static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp) { struct mbuf *m; #ifdef INET struct ip *ip; #endif #ifdef INET6 struct ip6_hdr *ip6; #endif struct ether_vlan_header *eh; struct tcphdr *th; uint16_t etype; int total_len = 0, ehdrlen = 0; m = *mpp; if (M_WRITABLE(m) == 0) { m = m_dup(*mpp, M_NOWAIT); if (!m) return NULL; m_freem(*mpp); *mpp = m; } eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehdrlen = ETHER_HDR_LEN; } switch (etype) { #ifdef INET case ETHERTYPE_IP: ip = (struct ip *)(m->m_data + ehdrlen); if (ip->ip_p != IPPROTO_TCP) return NULL; th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2); break; #endif #ifdef INET6 case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen); if (ip6->ip6_nxt != IPPROTO_TCP) return NULL; th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2); break; #endif default: return NULL; } m = m_pullup(m, total_len); if (!m) return NULL; *mpp = m; return m; } #endif /* INET6 || INET */ void oce_tx_task(void *arg, int npending) { struct oce_wq *wq = arg; POCE_SOFTC sc = wq->parent; struct ifnet *ifp = sc->ifp; int rc = 0; #if __FreeBSD_version >= 800000 LOCK(&wq->tx_lock); rc = oce_multiq_transmit(ifp, NULL, wq); if (rc) { device_printf(sc->dev, "TX[%d] restart failed\n", wq->queue_index); } UNLOCK(&wq->tx_lock); #else oce_start(ifp); #endif } void oce_start(struct ifnet *ifp) { POCE_SOFTC sc = ifp->if_softc; struct mbuf *m; int rc = 0; int def_q = 0; /* Defualt tx queue is 0*/ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (!sc->link_status) return; do { IF_DEQUEUE(&sc->ifp->if_snd, m); if (m == NULL) break; LOCK(&sc->wq[def_q]->tx_lock); rc = oce_tx(sc, &m, def_q); UNLOCK(&sc->wq[def_q]->tx_lock); if (rc) { if (m != NULL) { sc->wq[def_q]->tx_stats.tx_stops ++; ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m); m = NULL; } break; } if (m != NULL) ETHER_BPF_MTAP(ifp, m); } while (TRUE); return; } /* Handle the Completion Queue for transmit */ uint16_t oce_wq_handler(void *arg) { struct oce_wq *wq = (struct oce_wq *)arg; POCE_SOFTC sc = wq->parent; struct oce_cq *cq = wq->cq; struct oce_nic_tx_cqe *cqe; int num_cqes = 0; bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); while (cqe->u0.dw[3]) { DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe)); wq->ring->cidx = cqe->u0.s.wqe_index + 1; if (wq->ring->cidx >= wq->ring->num_items) wq->ring->cidx -= wq->ring->num_items; oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status); wq->tx_stats.tx_compl++; cqe->u0.dw[3] = 0; RING_GET(cq->ring, 1); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); num_cqes++; } if (num_cqes) oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); return 0; } static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq) { POCE_SOFTC sc = ifp->if_softc; int status = 0, queue_index = 0; struct mbuf *next = NULL; struct buf_ring *br = NULL; br = wq->br; queue_index = wq->queue_index; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { if (m != NULL) status = drbr_enqueue(ifp, br, m); return status; } if (m != NULL) { if ((status = drbr_enqueue(ifp, br, m)) != 0) return status; } while ((next = drbr_peek(ifp, br)) != NULL) { if (oce_tx(sc, &next, queue_index)) { if (next == NULL) { drbr_advance(ifp, br); } else { drbr_putback(ifp, br, next); wq->tx_stats.tx_stops ++; ifp->if_drv_flags |= IFF_DRV_OACTIVE; } break; } drbr_advance(ifp, br); if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len); if (next->m_flags & M_MCAST) if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); ETHER_BPF_MTAP(ifp, next); } return 0; } /***************************************************************************** * Receive routines functions * *****************************************************************************/ static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe) { uint32_t out; struct oce_packet_desc *pd; POCE_SOFTC sc = (POCE_SOFTC) rq->parent; int i, len, frag_len; struct mbuf *m = NULL, *tail = NULL; uint16_t vtag; len = cqe->u0.s.pkt_size; if (!len) { /*partial DMA workaround for Lancer*/ oce_discard_rx_comp(rq, cqe); goto exit; } /* Get vlan_tag value */ if(IS_BE(sc) || IS_SH(sc)) vtag = BSWAP_16(cqe->u0.s.vlan_tag); else vtag = cqe->u0.s.vlan_tag; for (i = 0; i < cqe->u0.s.num_fragments; i++) { if (rq->packets_out == rq->packets_in) { device_printf(sc->dev, "RQ transmit descriptor missing\n"); } out = rq->packets_out + 1; if (out == OCE_RQ_PACKET_ARRAY_SIZE) out = 0; pd = &rq->pckts[rq->packets_out]; rq->packets_out = out; bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(rq->tag, pd->map); rq->pending--; frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len; pd->mbuf->m_len = frag_len; if (tail != NULL) { /* additional fragments */ pd->mbuf->m_flags &= ~M_PKTHDR; tail->m_next = pd->mbuf; tail = pd->mbuf; } else { /* first fragment, fill out much of the packet header */ pd->mbuf->m_pkthdr.len = len; pd->mbuf->m_pkthdr.csum_flags = 0; if (IF_CSUM_ENABLED(sc)) { if (cqe->u0.s.l4_cksum_pass) { pd->mbuf->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); pd->mbuf->m_pkthdr.csum_data = 0xffff; } if (cqe->u0.s.ip_cksum_pass) { if (!cqe->u0.s.ip_ver) { /* IPV4 */ pd->mbuf->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID); } } } m = tail = pd->mbuf; } pd->mbuf = NULL; len -= frag_len; } if (m) { if (!oce_cqe_portid_valid(sc, cqe)) { m_freem(m); goto exit; } m->m_pkthdr.rcvif = sc->ifp; #if __FreeBSD_version >= 800000 if (rq->queue_index) m->m_pkthdr.flowid = (rq->queue_index - 1); else m->m_pkthdr.flowid = rq->queue_index; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); #endif /* This deternies if vlan tag is Valid */ if (oce_cqe_vtp_valid(sc, cqe)) { if (sc->function_mode & FNM_FLEX10_MODE) { /* FLEX10. If QnQ is not set, neglect VLAN */ if (cqe->u0.s.qnq) { m->m_pkthdr.ether_vtag = vtag; m->m_flags |= M_VLANTAG; } } else if (sc->pvid != (vtag & VLAN_VID_MASK)) { /* In UMC mode generally pvid will be striped by hw. But in some cases we have seen it comes with pvid. So if pvid == vlan, neglect vlan. */ m->m_pkthdr.ether_vtag = vtag; m->m_flags |= M_VLANTAG; } } if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); #if defined(INET6) || defined(INET) /* Try to queue to LRO */ if (IF_LRO_ENABLED(sc) && (cqe->u0.s.ip_cksum_pass) && (cqe->u0.s.l4_cksum_pass) && (!cqe->u0.s.ip_ver) && (rq->lro.lro_cnt != 0)) { if (tcp_lro_rx(&rq->lro, m, 0) == 0) { rq->lro_pkts_queued ++; goto post_done; } /* If LRO posting fails then try to post to STACK */ } #endif (*sc->ifp->if_input) (sc->ifp, m); #if defined(INET6) || defined(INET) post_done: #endif /* Update rx stats per queue */ rq->rx_stats.rx_pkts++; rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size; rq->rx_stats.rx_frags += cqe->u0.s.num_fragments; if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET) rq->rx_stats.rx_mcast_pkts++; if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET) rq->rx_stats.rx_ucast_pkts++; } exit: return; } static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) { uint32_t out, i = 0; struct oce_packet_desc *pd; POCE_SOFTC sc = (POCE_SOFTC) rq->parent; int num_frags = cqe->u0.s.num_fragments; for (i = 0; i < num_frags; i++) { if (rq->packets_out == rq->packets_in) { device_printf(sc->dev, "RQ transmit descriptor missing\n"); } out = rq->packets_out + 1; if (out == OCE_RQ_PACKET_ARRAY_SIZE) out = 0; pd = &rq->pckts[rq->packets_out]; rq->packets_out = out; bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(rq->tag, pd->map); rq->pending--; m_freem(pd->mbuf); } } static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe) { struct oce_nic_rx_cqe_v1 *cqe_v1; int vtp = 0; if (sc->be3_native) { cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe; vtp = cqe_v1->u0.s.vlan_tag_present; } else vtp = cqe->u0.s.vlan_tag_present; return vtp; } static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe) { struct oce_nic_rx_cqe_v1 *cqe_v1; int port_id = 0; if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) { cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe; port_id = cqe_v1->u0.s.port; if (sc->port_id != port_id) return 0; } else ;/* For BE3 legacy and Lancer this is dummy */ return 1; } #if defined(INET6) || defined(INET) static void oce_rx_flush_lro(struct oce_rq *rq) { struct lro_ctrl *lro = &rq->lro; struct lro_entry *queued; POCE_SOFTC sc = (POCE_SOFTC) rq->parent; if (!IF_LRO_ENABLED(sc)) return; while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } rq->lro_pkts_queued = 0; return; } static int oce_init_lro(POCE_SOFTC sc) { struct lro_ctrl *lro = NULL; int i = 0, rc = 0; for (i = 0; i < sc->nrqs; i++) { lro = &sc->rq[i]->lro; rc = tcp_lro_init(lro); if (rc != 0) { device_printf(sc->dev, "LRO init failed\n"); return rc; } lro->ifp = sc->ifp; } return rc; } void oce_free_lro(POCE_SOFTC sc) { struct lro_ctrl *lro = NULL; int i = 0; for (i = 0; i < sc->nrqs; i++) { lro = &sc->rq[i]->lro; if (lro) tcp_lro_free(lro); } } #endif int oce_alloc_rx_bufs(struct oce_rq *rq, int count) { POCE_SOFTC sc = (POCE_SOFTC) rq->parent; int i, in, rc; struct oce_packet_desc *pd; bus_dma_segment_t segs[6]; int nsegs, added = 0; struct oce_nic_rqe *rqe; pd_rxulp_db_t rxdb_reg; bzero(&rxdb_reg, sizeof(pd_rxulp_db_t)); for (i = 0; i < count; i++) { in = rq->packets_in + 1; if (in == OCE_RQ_PACKET_ARRAY_SIZE) in = 0; if (in == rq->packets_out) break; /* no more room */ pd = &rq->pckts[rq->packets_in]; pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (pd->mbuf == NULL) break; pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES; rc = bus_dmamap_load_mbuf_sg(rq->tag, pd->map, pd->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); if (rc) { m_free(pd->mbuf); break; } if (nsegs != 1) { i--; continue; } rq->packets_in = in; bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD); rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe); rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr); rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr); DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe)); RING_PUT(rq->ring, 1); added++; rq->pending++; } if (added != 0) { for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) { rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS; rxdb_reg.bits.qid = rq->rq_id; OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0); added -= OCE_MAX_RQ_POSTS; } if (added > 0) { rxdb_reg.bits.qid = rq->rq_id; rxdb_reg.bits.num_posted = added; OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0); } } return 0; } /* Handle the Completion Queue for receive */ uint16_t oce_rq_handler(void *arg) { struct oce_rq *rq = (struct oce_rq *)arg; struct oce_cq *cq = rq->cq; POCE_SOFTC sc = rq->parent; struct oce_nic_rx_cqe *cqe; int num_cqes = 0, rq_buffers_used = 0; bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); while (cqe->u0.dw[2]) { DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe)); RING_GET(rq->ring, 1); if (cqe->u0.s.error == 0) { oce_rx(rq, cqe->u0.s.frag_index, cqe); } else { rq->rx_stats.rxcp_err++; if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); /* Post L3/L4 errors to stack.*/ oce_rx(rq, cqe->u0.s.frag_index, cqe); } rq->rx_stats.rx_compl++; cqe->u0.dw[2] = 0; #if defined(INET6) || defined(INET) if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) { oce_rx_flush_lro(rq); } #endif RING_GET(cq->ring, 1); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); num_cqes++; if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled)) break; } #if defined(INET6) || defined(INET) if (IF_LRO_ENABLED(sc)) oce_rx_flush_lro(rq); #endif if (num_cqes) { oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending; if (rq_buffers_used > 1) oce_alloc_rx_bufs(rq, (rq_buffers_used - 1)); } return 0; } /***************************************************************************** * Helper function prototypes in this file * *****************************************************************************/ static int oce_attach_ifp(POCE_SOFTC sc) { sc->ifp = if_alloc(IFT_ETHER); if (!sc->ifp) return ENOMEM; ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status); ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST; sc->ifp->if_ioctl = oce_ioctl; sc->ifp->if_start = oce_start; sc->ifp->if_init = oce_init; sc->ifp->if_mtu = ETHERMTU; sc->ifp->if_softc = sc; #if __FreeBSD_version >= 800000 sc->ifp->if_transmit = oce_multiq_start; sc->ifp->if_qflush = oce_multiq_flush; #endif if_initname(sc->ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1; IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&sc->ifp->if_snd); sc->ifp->if_hwassist = OCE_IF_HWASSIST; sc->ifp->if_hwassist |= CSUM_TSO; sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP); sc->ifp->if_capabilities = OCE_IF_CAPABILITIES; sc->ifp->if_capabilities |= IFCAP_HWCSUM; sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; #if defined(INET6) || defined(INET) sc->ifp->if_capabilities |= IFCAP_TSO; sc->ifp->if_capabilities |= IFCAP_LRO; sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO; #endif sc->ifp->if_capenable = sc->ifp->if_capabilities; sc->ifp->if_baudrate = IF_Gbps(10); #if __FreeBSD_version >= 1000000 sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS; sc->ifp->if_hw_tsomaxsegsize = 4096; #endif ether_ifattach(sc->ifp, sc->macaddr.mac_addr); return 0; } static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag) { POCE_SOFTC sc = ifp->if_softc; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) return; sc->vlan_tag[vtag] = 1; sc->vlans_added++; if (sc->vlans_added <= (sc->max_vlans + 1)) oce_vid_config(sc); } static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag) { POCE_SOFTC sc = ifp->if_softc; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) return; sc->vlan_tag[vtag] = 0; sc->vlans_added--; oce_vid_config(sc); } /* * A max of 64 vlans can be configured in BE. If the user configures * more, place the card in vlan promiscuous mode. */ static int oce_vid_config(POCE_SOFTC sc) { struct normal_vlan vtags[MAX_VLANFILTER_SIZE]; uint16_t ntags = 0, i; int status = 0; if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) && (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) { for (i = 0; i < MAX_VLANS; i++) { if (sc->vlan_tag[i]) { vtags[ntags].vtag = i; ntags++; } } if (ntags) status = oce_config_vlan(sc, (uint8_t) sc->if_id, vtags, ntags, 1, 0); } else status = oce_config_vlan(sc, (uint8_t) sc->if_id, NULL, 0, 1, 1); return status; } static void oce_mac_addr_set(POCE_SOFTC sc) { uint32_t old_pmac_id = sc->pmac_id; int status = 0; status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr, sc->macaddr.size_of_struct); if (!status) return; status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)), sc->if_id, &sc->pmac_id); if (!status) { status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id); bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr, sc->macaddr.size_of_struct); } if (status) device_printf(sc->dev, "Failed update macaddress\n"); } static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data) { POCE_SOFTC sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int rc = ENXIO; char cookie[32] = {0}; void *priv_data = (void *)ifr->ifr_data; void *ioctl_ptr; uint32_t req_size; struct mbx_hdr req; OCE_DMA_MEM dma_mem; struct mbx_common_get_cntl_attr *fw_cmd; if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE))) return EFAULT; if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE))) return EINVAL; ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE); if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr))) return EFAULT; req_size = le32toh(req.u0.req.request_length); if (req_size > 65536) return EINVAL; req_size += sizeof(struct mbx_hdr); rc = oce_dma_alloc(sc, req_size, &dma_mem, 0); if (rc) return ENOMEM; if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) { rc = EFAULT; goto dma_free; } rc = oce_pass_through_mbox(sc, &dma_mem, req_size); if (rc) { rc = EIO; goto dma_free; } if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size)) rc = EFAULT; /* firmware is filling all the attributes for this ioctl except the driver version..so fill it */ if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) { fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr; strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str, COMPONENT_REVISION, strlen(COMPONENT_REVISION)); } dma_free: oce_dma_free(sc, &dma_mem); return rc; } static void oce_eqd_set_periodic(POCE_SOFTC sc) { struct oce_set_eqd set_eqd[OCE_MAX_EQ]; struct oce_aic_obj *aic; struct oce_eq *eqo; uint64_t now = 0, delta; int eqd, i, num = 0; uint32_t ips = 0; int tps; for (i = 0 ; i < sc->neqs; i++) { eqo = sc->eq[i]; aic = &sc->aic_obj[i]; /* When setting the static eq delay from the user space */ if (!aic->enable) { eqd = aic->et_eqd; goto modify_eqd; } now = ticks; /* Over flow check */ if ((now < aic->ticks) || (eqo->intr < aic->intr_prev)) goto done; delta = now - aic->ticks; tps = delta/hz; /* Interrupt rate based on elapsed ticks */ if(tps) ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps; if (ips > INTR_RATE_HWM) eqd = aic->cur_eqd + 20; else if (ips < INTR_RATE_LWM) eqd = aic->cur_eqd / 2; else goto done; if (eqd < 10) eqd = 0; /* Make sure that the eq delay is in the known range */ eqd = min(eqd, aic->max_eqd); eqd = max(eqd, aic->min_eqd); modify_eqd: if (eqd != aic->cur_eqd) { set_eqd[num].delay_multiplier = (eqd * 65)/100; set_eqd[num].eq_id = eqo->eq_id; aic->cur_eqd = eqd; num++; } done: aic->intr_prev = eqo->intr; aic->ticks = now; } /* Is there atleast one eq that needs to be modified? */ if(num) oce_mbox_eqd_modify_periodic(sc, set_eqd, num); } static void oce_detect_hw_error(POCE_SOFTC sc) { uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0; uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; uint32_t i; if (sc->hw_error) return; if (IS_XE201(sc)) { sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET); if (sliport_status & SLIPORT_STATUS_ERR_MASK) { sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET); sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET); } } else { ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW); ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH); ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK); ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK); ue_low = (ue_low & ~ue_low_mask); ue_high = (ue_high & ~ue_high_mask); } /* On certain platforms BE hardware can indicate spurious UEs. * Allow the h/w to stop working completely in case of a real UE. * Hence not setting the hw_error for UE detection. */ if (sliport_status & SLIPORT_STATUS_ERR_MASK) { sc->hw_error = TRUE; device_printf(sc->dev, "Error detected in the card\n"); } if (sliport_status & SLIPORT_STATUS_ERR_MASK) { device_printf(sc->dev, "ERR: sliport status 0x%x\n", sliport_status); device_printf(sc->dev, "ERR: sliport error1 0x%x\n", sliport_err1); device_printf(sc->dev, "ERR: sliport error2 0x%x\n", sliport_err2); } if (ue_low) { for (i = 0; ue_low; ue_low >>= 1, i++) { if (ue_low & 1) device_printf(sc->dev, "UE: %s bit set\n", ue_status_low_desc[i]); } } if (ue_high) { for (i = 0; ue_high; ue_high >>= 1, i++) { if (ue_high & 1) device_printf(sc->dev, "UE: %s bit set\n", ue_status_hi_desc[i]); } } } static void oce_local_timer(void *arg) { POCE_SOFTC sc = arg; int i = 0; oce_detect_hw_error(sc); oce_refresh_nic_stats(sc); oce_refresh_queue_stats(sc); oce_mac_addr_set(sc); /* TX Watch Dog*/ for (i = 0; i < sc->nwqs; i++) oce_tx_restart(sc, sc->wq[i]); /* calculate and set the eq delay for optimal interrupt rate */ if (IS_BE(sc) || IS_SH(sc)) oce_eqd_set_periodic(sc); callout_reset(&sc->timer, hz, oce_local_timer, sc); } /* NOTE : This should only be called holding * DEVICE_LOCK. */ static void oce_if_deactivate(POCE_SOFTC sc) { int i, mtime = 0; int wait_req = 0; struct oce_rq *rq; struct oce_wq *wq; struct oce_eq *eq; sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /*Wait for max of 400ms for TX completions to be done */ while (mtime < 400) { wait_req = 0; for_all_wq_queues(sc, wq, i) { if (wq->ring->num_used) { wait_req = 1; DELAY(1); break; } } mtime += 1; if (!wait_req) break; } /* Stop intrs and finish any bottom halves pending */ oce_hw_intr_disable(sc); /* Since taskqueue_drain takes a Gaint Lock, We should not acquire any other lock. So unlock device lock and require after completing taskqueue_drain. */ UNLOCK(&sc->dev_lock); for (i = 0; i < sc->intr_count; i++) { if (sc->intrs[i].tq != NULL) { taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task); } } LOCK(&sc->dev_lock); /* Delete RX queue in card with flush param */ oce_stop_rx(sc); /* Invalidate any pending cq and eq entries*/ for_all_evnt_queues(sc, eq, i) oce_drain_eq(eq); for_all_rq_queues(sc, rq, i) oce_drain_rq_cq(rq); for_all_wq_queues(sc, wq, i) oce_drain_wq_cq(wq); /* But still we need to get MCC aync events. So enable intrs and also arm first EQ */ oce_hw_intr_enable(sc); oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE); DELAY(10); } static void oce_if_activate(POCE_SOFTC sc) { struct oce_eq *eq; struct oce_rq *rq; struct oce_wq *wq; int i, rc = 0; sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; oce_hw_intr_disable(sc); oce_start_rx(sc); for_all_rq_queues(sc, rq, i) { rc = oce_start_rq(rq); if (rc) device_printf(sc->dev, "Unable to start RX\n"); } for_all_wq_queues(sc, wq, i) { rc = oce_start_wq(wq); if (rc) device_printf(sc->dev, "Unable to start TX\n"); } for_all_evnt_queues(sc, eq, i) oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE); oce_hw_intr_enable(sc); } static void process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe) { /* Update Link status */ if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) == ASYNC_EVENT_LINK_UP) { sc->link_status = ASYNC_EVENT_LINK_UP; if_link_state_change(sc->ifp, LINK_STATE_UP); } else { sc->link_status = ASYNC_EVENT_LINK_DOWN; if_link_state_change(sc->ifp, LINK_STATE_DOWN); } } /* Handle the Completion Queue for the Mailbox/Async notifications */ uint16_t oce_mq_handler(void *arg) { struct oce_mq *mq = (struct oce_mq *)arg; POCE_SOFTC sc = mq->parent; struct oce_cq *cq = mq->cq; int num_cqes = 0, evt_type = 0, optype = 0; struct oce_mq_cqe *cqe; struct oce_async_cqe_link_state *acqe; struct oce_async_event_grp5_pvid_state *gcqe; struct oce_async_event_qnq *dbgcqe; bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); while (cqe->u0.dw[3]) { DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe)); if (cqe->u0.s.async_event) { evt_type = cqe->u0.s.event_type; optype = cqe->u0.s.async_type; if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) { /* Link status evt */ acqe = (struct oce_async_cqe_link_state *)cqe; process_link_state(sc, acqe); } else if ((evt_type == ASYNC_EVENT_GRP5) && (optype == ASYNC_EVENT_PVID_STATE)) { /* GRP5 PVID */ gcqe = (struct oce_async_event_grp5_pvid_state *)cqe; if (gcqe->enabled) sc->pvid = gcqe->tag & VLAN_VID_MASK; else sc->pvid = 0; } else if(evt_type == ASYNC_EVENT_CODE_DEBUG && optype == ASYNC_EVENT_DEBUG_QNQ) { dbgcqe = (struct oce_async_event_qnq *)cqe; if(dbgcqe->valid) sc->qnqid = dbgcqe->vlan_tag; sc->qnq_debug_event = TRUE; } } cqe->u0.dw[3] = 0; RING_GET(cq->ring, 1); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); num_cqes++; } if (num_cqes) oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); return 0; } static void setup_max_queues_want(POCE_SOFTC sc) { /* Check if it is FLEX machine. Is so dont use RSS */ if ((sc->function_mode & FNM_FLEX10_MODE) || (sc->function_mode & FNM_UMC_MODE) || (sc->function_mode & FNM_VNIC_MODE) || (!is_rss_enabled(sc)) || IS_BE2(sc)) { sc->nrqs = 1; sc->nwqs = 1; } else { sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1; sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs); } if (IS_BE2(sc) && is_rss_enabled(sc)) sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1; } static void update_queues_got(POCE_SOFTC sc) { if (is_rss_enabled(sc)) { sc->nrqs = sc->intr_count + 1; sc->nwqs = sc->intr_count; } else { sc->nrqs = 1; sc->nwqs = 1; } if (IS_BE2(sc)) sc->nwqs = 1; } static int oce_check_ipv6_ext_hdr(struct mbuf *m) { struct ether_header *eh = mtod(m, struct ether_header *); caddr_t m_datatemp = m->m_data; if (eh->ether_type == htons(ETHERTYPE_IPV6)) { m->m_data += sizeof(struct ether_header); struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); if((ip6->ip6_nxt != IPPROTO_TCP) && \ (ip6->ip6_nxt != IPPROTO_UDP)){ struct ip6_ext *ip6e = NULL; m->m_data += sizeof(struct ip6_hdr); ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *); if(ip6e->ip6e_len == 0xff) { m->m_data = m_datatemp; return TRUE; } } m->m_data = m_datatemp; } return FALSE; } static int is_be3_a1(POCE_SOFTC sc) { if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) { return TRUE; } return FALSE; } static struct mbuf * oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete) { uint16_t vlan_tag = 0; if(!M_WRITABLE(m)) return NULL; /* Embed vlan tag in the packet if it is not part of it */ if(m->m_flags & M_VLANTAG) { vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag); m->m_flags &= ~M_VLANTAG; } /* if UMC, ignore vlan tag insertion and instead insert pvid */ if(sc->pvid) { if(!vlan_tag) vlan_tag = sc->pvid; *complete = FALSE; } if(vlan_tag) { m = ether_vlanencap(m, vlan_tag); } if(sc->qnqid) { m = ether_vlanencap(m, sc->qnqid); *complete = FALSE; } return m; } static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m) { if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \ oce_check_ipv6_ext_hdr(m)) { return TRUE; } return FALSE; } static void oce_get_config(POCE_SOFTC sc) { int rc = 0; uint32_t max_rss = 0; if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native)) max_rss = OCE_LEGACY_MODE_RSS; else max_rss = OCE_MAX_RSS; if (!IS_BE(sc)) { rc = oce_get_profile_config(sc, max_rss); if (rc) { sc->nwqs = OCE_MAX_WQ; sc->nrssqs = max_rss; sc->nrqs = sc->nrssqs + 1; } } else { /* For BE3 don't rely on fw for determining the resources */ sc->nrssqs = max_rss; sc->nrqs = sc->nrssqs + 1; sc->nwqs = OCE_MAX_WQ; sc->max_vlans = MAX_VLANFILTER_SIZE; } } Index: head/sys/dev/re/if_re.c =================================================================== --- head/sys/dev/re/if_re.c (revision 296271) +++ head/sys/dev/re/if_re.c (revision 296272) @@ -1,4074 +1,4074 @@ /*- * Copyright (c) 1997, 1998-2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver * * Written by Bill Paul * Senior Networking Software Engineer * Wind River Systems */ /* * This driver is designed to support RealTek's next generation of * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. * * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible * with the older 8139 family, however it also supports a special * C+ mode of operation that provides several new performance enhancing * features. These include: * * o Descriptor based DMA mechanism. Each descriptor represents * a single packet fragment. Data buffers may be aligned on * any byte boundary. * * o 64-bit DMA * * o TCP/IP checksum offload for both RX and TX * * o High and normal priority transmit DMA rings * * o VLAN tag insertion and extraction * * o TCP large send (segmentation offload) * * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ * programming API is fairly straightforward. The RX filtering, EEPROM * access and PHY access is the same as it is on the older 8139 series * chips. * * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the * same programming API and feature set as the 8139C+ with the following * differences and additions: * * o 1000Mbps mode * * o Jumbo frames * * o GMII and TBI ports/registers for interfacing with copper * or fiber PHYs * * o RX and TX DMA rings can have up to 1024 descriptors * (the 8139C+ allows a maximum of 64) * * o Slight differences in register layout from the 8139C+ * * The TX start and timer interrupt registers are at different locations * on the 8169 than they are on the 8139C+. Also, the status word in the * RX descriptor has a slightly different bit layout. The 8169 does not * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' * copper gigE PHY. * * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs * (the 'S' stands for 'single-chip'). These devices have the same * programming API as the older 8169, but also have some vendor-specific * registers for the on-board PHY. The 8110S is a LAN-on-motherboard * part designed to be pin-compatible with the RealTek 8100 10/100 chip. * * This driver takes advantage of the RX and TX checksum offload and * VLAN tag insertion/extraction features. It also implements TX * interrupt moderation using the timer interrupt registers, which * significantly reduces TX interrupt load. There is also support * for jumbo frames, however the 8169/8169S/8110S can not transmit * jumbo frames larger than 7440, so the max MTU possible with this * driver is 7422 bytes. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(re, pci, 1, 1, 1); MODULE_DEPEND(re, ether, 1, 1, 1); MODULE_DEPEND(re, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* Tunables. */ static int intr_filter = 0; TUNABLE_INT("hw.re.intr_filter", &intr_filter); static int msi_disable = 0; TUNABLE_INT("hw.re.msi_disable", &msi_disable); static int msix_disable = 0; TUNABLE_INT("hw.re.msix_disable", &msix_disable); static int prefer_iomap = 0; TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap); #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types and their names. */ static const struct rl_type re_devs[] = { { DLINK_VENDORID, DLINK_DEVICEID_528T, 0, "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, { DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0, "D-Link DGE-530(T) Gigabit Ethernet Adapter" }, { RT_VENDORID, RT_DEVICEID_8139, 0, "RealTek 8139C+ 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8101E, 0, "RealTek 810xE PCIe 10/100baseTX" }, { RT_VENDORID, RT_DEVICEID_8168, 0, "RealTek 8168/8111 B/C/CP/D/DP/E/F/G PCIe Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169, 0, "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169SC, 0, "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0, "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0, "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, { USR_VENDORID, USR_DEVICEID_997902, 0, "US Robotics 997902 (RTL8169S) Gigabit Ethernet" } }; static const struct rl_hwrev re_hwrevs[] = { { RL_HWREV_8139, RL_8139, "", RL_MTU }, { RL_HWREV_8139A, RL_8139, "A", RL_MTU }, { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU }, { RL_HWREV_8139B, RL_8139, "B", RL_MTU }, { RL_HWREV_8130, RL_8139, "8130", RL_MTU }, { RL_HWREV_8139C, RL_8139, "C", RL_MTU }, { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU }, { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU }, { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU }, { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU }, { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU }, { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, { RL_HWREV_8100, RL_8139, "8100", RL_MTU }, { RL_HWREV_8101, RL_8139, "8101", RL_MTU }, { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU }, { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU }, { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU }, { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU }, { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU }, { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU }, { RL_HWREV_8401E, RL_8169, "8401E", RL_MTU }, { RL_HWREV_8402, RL_8169, "8402", RL_MTU }, { RL_HWREV_8105E, RL_8169, "8105E", RL_MTU }, { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU }, { RL_HWREV_8106E, RL_8169, "8106E", RL_MTU }, { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU }, { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU }, { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K }, { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K }, { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K }, { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K}, { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K}, { RL_HWREV_8168EP, RL_8169, "8168EP/8111EP", RL_JUMBO_MTU_9K}, { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K}, { RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K}, { RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K}, { RL_HWREV_8168H, RL_8169, "8168H/8111H", RL_JUMBO_MTU_9K}, { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K}, { RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K}, { 0, 0, NULL, 0 } }; static int re_probe (device_t); static int re_attach (device_t); static int re_detach (device_t); static int re_encap (struct rl_softc *, struct mbuf **); static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); static int re_allocmem (device_t, struct rl_softc *); static __inline void re_discard_rxbuf (struct rl_softc *, int); static int re_newbuf (struct rl_softc *, int); static int re_jumbo_newbuf (struct rl_softc *, int); static int re_rx_list_init (struct rl_softc *); static int re_jrx_list_init (struct rl_softc *); static int re_tx_list_init (struct rl_softc *); #ifdef RE_FIXUP_RX static __inline void re_fixup_rx (struct mbuf *); #endif static int re_rxeof (struct rl_softc *, int *); static void re_txeof (struct rl_softc *); #ifdef DEVICE_POLLING static int re_poll (struct ifnet *, enum poll_cmd, int); static int re_poll_locked (struct ifnet *, enum poll_cmd, int); #endif static int re_intr (void *); static void re_intr_msi (void *); static void re_tick (void *); static void re_int_task (void *, int); static void re_start (struct ifnet *); static void re_start_locked (struct ifnet *); static int re_ioctl (struct ifnet *, u_long, caddr_t); static void re_init (void *); static void re_init_locked (struct rl_softc *); static void re_stop (struct rl_softc *); static void re_watchdog (struct rl_softc *); static int re_suspend (device_t); static int re_resume (device_t); static int re_shutdown (device_t); static int re_ifmedia_upd (struct ifnet *); static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); static void re_eeprom_putbyte (struct rl_softc *, int); static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); static int re_gmii_readreg (device_t, int, int); static int re_gmii_writereg (device_t, int, int, int); static int re_miibus_readreg (device_t, int, int); static int re_miibus_writereg (device_t, int, int, int); static void re_miibus_statchg (device_t); static void re_set_jumbo (struct rl_softc *, int); static void re_set_rxmode (struct rl_softc *); static void re_reset (struct rl_softc *); static void re_setwol (struct rl_softc *); static void re_clrwol (struct rl_softc *); static void re_set_linkspeed (struct rl_softc *); #ifdef DEV_NETMAP /* see ixgbe.c for details */ #include MODULE_DEPEND(re, netmap, 1, 1, 1); #endif /* !DEV_NETMAP */ #ifdef RE_DIAG static int re_diag (struct rl_softc *); #endif static void re_add_sysctls (struct rl_softc *); static int re_sysctl_stats (SYSCTL_HANDLER_ARGS); static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS); static device_method_t re_methods[] = { /* Device interface */ DEVMETHOD(device_probe, re_probe), DEVMETHOD(device_attach, re_attach), DEVMETHOD(device_detach, re_detach), DEVMETHOD(device_suspend, re_suspend), DEVMETHOD(device_resume, re_resume), DEVMETHOD(device_shutdown, re_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, re_miibus_readreg), DEVMETHOD(miibus_writereg, re_miibus_writereg), DEVMETHOD(miibus_statchg, re_miibus_statchg), DEVMETHOD_END }; static driver_t re_driver = { "re", re_methods, sizeof(struct rl_softc) }; static devclass_t re_devclass; DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); #define EE_SET(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) | x) #define EE_CLR(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) & ~x) /* * Send a read command and address to the EEPROM, check for ACK. */ static void re_eeprom_putbyte(struct rl_softc *sc, int addr) { int d, i; d = addr | (RL_9346_READ << sc->rl_eewidth); /* * Feed in each bit and strobe the clock. */ for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { if (d & i) { EE_SET(RL_EE_DATAIN); } else { EE_CLR(RL_EE_DATAIN); } DELAY(100); EE_SET(RL_EE_CLK); DELAY(150); EE_CLR(RL_EE_CLK); DELAY(100); } } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) { int i; u_int16_t word = 0; /* * Send address of word we want to read. */ re_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { EE_SET(RL_EE_CLK); DELAY(100); if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) word |= i; EE_CLR(RL_EE_CLK); DELAY(100); } *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) { int i; u_int16_t word = 0, *ptr; CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); DELAY(100); for (i = 0; i < cnt; i++) { CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); re_eeprom_getword(sc, off + i, &word); CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); ptr = (u_int16_t *)(dest + (i * 2)); *ptr = word; } CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); } static int re_gmii_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; u_int32_t rval; int i; sc = device_get_softc(dev); /* Let the rgephy driver read the GMEDIASTAT register */ if (reg == RL_GMEDIASTAT) { rval = CSR_READ_1(sc, RL_GMEDIASTAT); return (rval); } CSR_WRITE_4(sc, RL_PHYAR, reg << 16); for (i = 0; i < RL_PHY_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (rval & RL_PHYAR_BUSY) break; DELAY(25); } if (i == RL_PHY_TIMEOUT) { device_printf(sc->rl_dev, "PHY read failed\n"); return (0); } /* * Controller requires a 20us delay to process next MDIO request. */ DELAY(20); return (rval & RL_PHYAR_PHYDATA); } static int re_gmii_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; u_int32_t rval; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); for (i = 0; i < RL_PHY_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (!(rval & RL_PHYAR_BUSY)) break; DELAY(25); } if (i == RL_PHY_TIMEOUT) { device_printf(sc->rl_dev, "PHY write failed\n"); return (0); } /* * Controller requires a 20us delay to process next MDIO request. */ DELAY(20); return (0); } static int re_miibus_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; u_int16_t rval = 0; u_int16_t re8139_reg = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8169) { rval = re_gmii_readreg(dev, phy, reg); return (rval); } switch (reg) { case MII_BMCR: re8139_reg = RL_BMCR; break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); /* * Allow the rlphy driver to read the media status * register. If we have a link partner which does not * support NWAY, this is the register which will tell * us the results of parallel detection. */ case RL_MEDIASTAT: rval = CSR_READ_1(sc, RL_MEDIASTAT); return (rval); default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } rval = CSR_READ_2(sc, re8139_reg); if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { /* 8139C+ has different bit layout. */ rval &= ~(BMCR_LOOP | BMCR_ISO); } return (rval); } static int re_miibus_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; u_int16_t re8139_reg = 0; int rval = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8169) { rval = re_gmii_writereg(dev, phy, reg, data); return (rval); } switch (reg) { case MII_BMCR: re8139_reg = RL_BMCR; if (sc->rl_type == RL_8139CPLUS) { /* 8139C+ has different bit layout. */ data &= ~(BMCR_LOOP | BMCR_ISO); } break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } CSR_WRITE_2(sc, re8139_reg, data); return (0); } static void re_miibus_statchg(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->rl_miibus); ifp = sc->rl_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; sc->rl_flags &= ~RL_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->rl_flags |= RL_FLAG_LINK; break; case IFM_1000_T: if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) break; sc->rl_flags |= RL_FLAG_LINK; break; default: break; } } /* * RealTek controllers do not provide any interface to the RX/TX * MACs for resolved speed, duplex and flow-control parameters. */ } /* * Set the RX configuration and 64-bit multicast hash filter. */ static void re_set_rxmode(struct rl_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; uint32_t hashes[2] = { 0, 0 }; uint32_t h, rxfilt; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; if ((sc->rl_flags & RL_FLAG_EARLYOFF) != 0) rxfilt |= RL_RXCFG_EARLYOFF; else if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) rxfilt |= RL_RXCFG_EARLYOFFV2; if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { if (ifp->if_flags & IFF_PROMISC) rxfilt |= RL_RXCFG_RX_ALLPHYS; /* * Unlike other hardwares, we have to explicitly set * RL_RXCFG_RX_MULTI to receive multicast frames in * promiscuous mode. */ rxfilt |= RL_RXCFG_RX_MULTI; hashes[0] = hashes[1] = 0xffffffff; goto done; } if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } if_maddr_runlock(ifp); if (hashes[0] != 0 || hashes[1] != 0) { /* * For some unfathomable reason, RealTek decided to * reverse the order of the multicast hash registers * in the PCI Express parts. This means we have to * write the hash pattern in reverse order for those * devices. */ if ((sc->rl_flags & RL_FLAG_PCIE) != 0) { h = bswap32(hashes[0]); hashes[0] = bswap32(hashes[1]); hashes[1] = h; } rxfilt |= RL_RXCFG_RX_MULTI; } if (sc->rl_hwrev->rl_rev == RL_HWREV_8168F) { /* Disable multicast filtering due to silicon bug. */ hashes[0] = 0xffffffff; hashes[1] = 0xffffffff; } done: CSR_WRITE_4(sc, RL_MAR0, hashes[0]); CSR_WRITE_4(sc, RL_MAR4, hashes[1]); CSR_WRITE_4(sc, RL_RXCFG, rxfilt); } static void re_reset(struct rl_softc *sc) { int i; RL_LOCK_ASSERT(sc); CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) break; } if (i == RL_TIMEOUT) device_printf(sc->rl_dev, "reset never completed!\n"); if ((sc->rl_flags & RL_FLAG_MACRESET) != 0) CSR_WRITE_1(sc, 0x82, 1); if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S) re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0); } #ifdef RE_DIAG /* * The following routine is designed to test for a defect on some * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# * lines connected to the bus, however for a 32-bit only card, they * should be pulled high. The result of this defect is that the * NIC will not work right if you plug it into a 64-bit slot: DMA * operations will be done with 64-bit transfers, which will fail * because the 64-bit data lines aren't connected. * * There's no way to work around this (short of talking a soldering * iron to the board), however we can detect it. The method we use * here is to put the NIC into digital loopback mode, set the receiver * to promiscuous mode, and then try to send a frame. We then compare * the frame data we sent to what was received. If the data matches, * then the NIC is working correctly, otherwise we know the user has * a defective NIC which has been mistakenly plugged into a 64-bit PCI * slot. In the latter case, there's no way the NIC can work correctly, * so we print out a message on the console and abort the device attach. */ static int re_diag(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mbuf *m0; struct ether_header *eh; struct rl_desc *cur_rx; u_int16_t status; u_int32_t rxstat; int total_len, i, error = 0, phyaddr; u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; /* Allocate a single mbuf */ MGETHDR(m0, M_NOWAIT, MT_DATA); if (m0 == NULL) return (ENOBUFS); RL_LOCK(sc); /* * Initialize the NIC in test mode. This sets the chip up * so that it can send and receive frames, but performs the * following special functions: * - Puts receiver in promiscuous mode * - Enables digital loopback mode * - Leaves interrupts turned off */ ifp->if_flags |= IFF_PROMISC; sc->rl_testmode = 1; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); sc->rl_flags |= RL_FLAG_LINK; if (sc->rl_type == RL_8169) phyaddr = 1; else phyaddr = 0; re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); for (i = 0; i < RL_TIMEOUT; i++) { status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); if (!(status & BMCR_RESET)) break; } re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); CSR_WRITE_2(sc, RL_ISR, RL_INTRS); DELAY(100000); /* Put some data in the mbuf */ eh = mtod(m0, struct ether_header *); bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); eh->ether_type = htons(ETHERTYPE_IP); m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; /* * Queue the packet, start transmission. * Note: IF_HANDOFF() ultimately calls re_start() for us. */ CSR_WRITE_2(sc, RL_ISR, 0xFFFF); RL_UNLOCK(sc); /* XXX: re_diag must not be called when in ALTQ mode */ IF_HANDOFF(&ifp->if_snd, m0, ifp); RL_LOCK(sc); m0 = NULL; /* Wait for it to propagate through the chip */ DELAY(100000); for (i = 0; i < RL_TIMEOUT; i++) { status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) break; DELAY(10); } if (i == RL_TIMEOUT) { device_printf(sc->rl_dev, "diagnostic failed, failed to receive packet in" " loopback mode\n"); error = EIO; goto done; } /* * The packet should have been dumped into the first * entry in the RX DMA ring. Grab it from there. */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[0].rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[0].rx_dmamap); m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; eh = mtod(m0, struct ether_header *); cur_rx = &sc->rl_ldata.rl_rx_list[0]; total_len = RL_RXBYTES(cur_rx); rxstat = le32toh(cur_rx->rl_cmdstat); if (total_len != ETHER_MIN_LEN) { device_printf(sc->rl_dev, "diagnostic failed, received short packet\n"); error = EIO; goto done; } /* Test that the received packet data matches what we sent. */ if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || ntohs(eh->ether_type) != ETHERTYPE_IP) { device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", dst, ":", src, ":", ETHERTYPE_IP); device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", eh->ether_dhost, ":", eh->ether_shost, ":", ntohs(eh->ether_type)); device_printf(sc->rl_dev, "You may have a defective 32-bit " "NIC plugged into a 64-bit PCI slot.\n"); device_printf(sc->rl_dev, "Please re-install the NIC in a " "32-bit slot for proper operation.\n"); device_printf(sc->rl_dev, "Read the re(4) man page for more " "details.\n"); error = EIO; } done: /* Turn interface off, release resources */ sc->rl_testmode = 0; sc->rl_flags &= ~RL_FLAG_LINK; ifp->if_flags &= ~IFF_PROMISC; re_stop(sc); if (m0 != NULL) m_freem(m0); RL_UNLOCK(sc); return (error); } #endif /* * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int re_probe(device_t dev) { const struct rl_type *t; uint16_t devid, vendor; uint16_t revid, sdevid; int i; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); revid = pci_get_revid(dev); sdevid = pci_get_subdevice(dev); if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) { if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) { /* * Only attach to rev. 3 of the Linksys EG1032 adapter. * Rev. 2 is supported by sk(4). */ return (ENXIO); } } if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { if (revid != 0x20) { /* 8139, let rl(4) take care of this device. */ return (ENXIO); } } t = re_devs; for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) { if (vendor == t->rl_vid && devid == t->rl_did) { device_set_desc(dev, t->rl_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } /* * Map a single buffer address. */ static void re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int re_allocmem(device_t dev, struct rl_softc *sc) { bus_addr_t lowaddr; bus_size_t rx_list_size, tx_list_size; int error; int i; rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); /* * Allocate the parent bus DMA tag appropriate for PCI. * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD * register should be set. However some RealTek chips are known * to be buggy on DAC handling, therefore disable DAC by limiting * DMA address space to 32bit. PCIe variants of RealTek chips * may not have the limitation. */ lowaddr = BUS_SPACE_MAXADDR; if ((sc->rl_flags & RL_FLAG_PCIE) == 0) lowaddr = BUS_SPACE_MAXADDR_32BIT; error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->rl_parent_tag); if (error) { device_printf(dev, "could not allocate parent DMA tag\n"); return (error); } /* * Allocate map for TX mbufs. */ error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0, NULL, NULL, &sc->rl_ldata.rl_tx_mtag); if (error) { device_printf(dev, "could not allocate TX DMA tag\n"); return (error); } /* * Allocate map for RX mbufs. */ if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL, &sc->rl_ldata.rl_jrx_mtag); if (error) { device_printf(dev, "could not allocate jumbo RX DMA tag\n"); return (error); } } error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); if (error) { device_printf(dev, "could not allocate RX DMA tag\n"); return (error); } /* * Allocate map for TX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, tx_list_size, 1, tx_list_size, 0, NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); if (error) { device_printf(dev, "could not allocate TX DMA ring tag\n"); return (error); } /* Allocate DMA'able memory for the TX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_tx_list_map); if (error) { device_printf(dev, "could not allocate TX DMA ring\n"); return (error); } /* Load the map for the TX ring. */ sc->rl_ldata.rl_tx_list_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, tx_list_size, re_dma_map_addr, &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { device_printf(dev, "could not load TX DMA ring\n"); return (ENOMEM); } /* Create DMA maps for TX buffers */ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); if (error) { device_printf(dev, "could not create DMA map for TX\n"); return (error); } } /* * Allocate map for RX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, rx_list_size, 1, rx_list_size, 0, NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); if (error) { device_printf(dev, "could not create RX DMA ring tag\n"); return (error); } /* Allocate DMA'able memory for the RX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_rx_list_map); if (error) { device_printf(dev, "could not allocate RX DMA ring\n"); return (error); } /* Load the map for the RX ring. */ sc->rl_ldata.rl_rx_list_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, rx_list_size, re_dma_map_addr, &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { device_printf(dev, "could not load RX DMA ring\n"); return (ENOMEM); } /* Create DMA maps for RX buffers */ if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, &sc->rl_ldata.rl_jrx_sparemap); if (error) { device_printf(dev, "could not create spare DMA map for jumbo RX\n"); return (error); } for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); if (error) { device_printf(dev, "could not create DMA map for jumbo RX\n"); return (error); } } } error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, &sc->rl_ldata.rl_rx_sparemap); if (error) { device_printf(dev, "could not create spare DMA map for RX\n"); return (error); } for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); if (error) { device_printf(dev, "could not create DMA map for RX\n"); return (error); } } /* Create DMA map for statistics. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL, &sc->rl_ldata.rl_stag); if (error) { device_printf(dev, "could not create statistics DMA tag\n"); return (error); } /* Allocate DMA'able memory for statistics. */ error = bus_dmamem_alloc(sc->rl_ldata.rl_stag, (void **)&sc->rl_ldata.rl_stats, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_smap); if (error) { device_printf(dev, "could not allocate statistics DMA memory\n"); return (error); } /* Load the map for statistics. */ sc->rl_ldata.rl_stats_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr, &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) { device_printf(dev, "could not load statistics DMA memory\n"); return (ENOMEM); } return (0); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int re_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; u_int16_t as[ETHER_ADDR_LEN / 2]; struct rl_softc *sc; struct ifnet *ifp; const struct rl_hwrev *hw_rev; int capmask, error = 0, hwrev, i, msic, msixc, phy, reg, rid; u_int32_t cap, ctl; u_int16_t devid, re_did = 0; uint8_t cfg; sc = device_get_softc(dev); sc->rl_dev = dev; mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); /* * Map control/status registers. */ pci_enable_busmaster(dev); devid = pci_get_device(dev); /* * Prefer memory space register mapping over IO space. * Because RTL8169SC does not seem to work when memory mapping * is used always activate io mapping. */ if (devid == RT_DEVICEID_8169SC) prefer_iomap = 1; if (prefer_iomap == 0) { sc->rl_res_id = PCIR_BAR(1); sc->rl_res_type = SYS_RES_MEMORY; /* RTL8168/8101E seems to use different BARs. */ if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E) sc->rl_res_id = PCIR_BAR(2); } else { sc->rl_res_id = PCIR_BAR(0); sc->rl_res_type = SYS_RES_IOPORT; } sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, &sc->rl_res_id, RF_ACTIVE); if (sc->rl_res == NULL && prefer_iomap == 0) { sc->rl_res_id = PCIR_BAR(0); sc->rl_res_type = SYS_RES_IOPORT; sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, &sc->rl_res_id, RF_ACTIVE); } if (sc->rl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); msic = pci_msi_count(dev); msixc = pci_msix_count(dev); if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { sc->rl_flags |= RL_FLAG_PCIE; sc->rl_expcap = reg; } if (bootverbose) { device_printf(dev, "MSI count : %d\n", msic); device_printf(dev, "MSI-X count : %d\n", msixc); } if (msix_disable > 0) msixc = 0; if (msi_disable > 0) msic = 0; /* Prefer MSI-X to MSI. */ if (msixc > 0) { msixc = RL_MSI_MESSAGES; rid = PCIR_BAR(4); sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->rl_res_pba == NULL) { device_printf(sc->rl_dev, "could not allocate MSI-X PBA resource\n"); } if (sc->rl_res_pba != NULL && pci_alloc_msix(dev, &msixc) == 0) { if (msixc == RL_MSI_MESSAGES) { device_printf(dev, "Using %d MSI-X message\n", msixc); sc->rl_flags |= RL_FLAG_MSIX; } else pci_release_msi(dev); } if ((sc->rl_flags & RL_FLAG_MSIX) == 0) { if (sc->rl_res_pba != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); sc->rl_res_pba = NULL; msixc = 0; } } /* Prefer MSI to INTx. */ if (msixc == 0 && msic > 0) { msic = RL_MSI_MESSAGES; if (pci_alloc_msi(dev, &msic) == 0) { if (msic == RL_MSI_MESSAGES) { device_printf(dev, "Using %d MSI message\n", msic); sc->rl_flags |= RL_FLAG_MSI; /* Explicitly set MSI enable bit. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, RL_CFG2); cfg |= RL_CFG2_MSI; CSR_WRITE_1(sc, RL_CFG2, cfg); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); } else pci_release_msi(dev); } if ((sc->rl_flags & RL_FLAG_MSI) == 0) msic = 0; } /* Allocate interrupt */ if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) { rid = 0; sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->rl_irq[0] == NULL) { device_printf(dev, "couldn't allocate IRQ resources\n"); error = ENXIO; goto fail; } } else { for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { sc->rl_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->rl_irq[i] == NULL) { device_printf(dev, "couldn't allocate IRQ resources for " "message %d\n", rid); error = ENXIO; goto fail; } } } if ((sc->rl_flags & RL_FLAG_MSI) == 0) { CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, RL_CFG2); if ((cfg & RL_CFG2_MSI) != 0) { device_printf(dev, "turning off MSI enable bit.\n"); cfg &= ~RL_CFG2_MSI; CSR_WRITE_1(sc, RL_CFG2, cfg); } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); } /* Disable ASPM L0S/L1. */ if (sc->rl_expcap != 0) { cap = pci_read_config(dev, sc->rl_expcap + PCIER_LINK_CAP, 2); if ((cap & PCIEM_LINK_CAP_ASPM) != 0) { ctl = pci_read_config(dev, sc->rl_expcap + PCIER_LINK_CTL, 2); if ((ctl & PCIEM_LINK_CTL_ASPMC) != 0) { ctl &= ~PCIEM_LINK_CTL_ASPMC; pci_write_config(dev, sc->rl_expcap + PCIER_LINK_CTL, ctl, 2); device_printf(dev, "ASPM disabled\n"); } } else device_printf(dev, "no ASPM capability\n"); } hw_rev = re_hwrevs; hwrev = CSR_READ_4(sc, RL_TXCFG); switch (hwrev & 0x70000000) { case 0x00000000: case 0x10000000: device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000); hwrev &= (RL_TXCFG_HWREV | 0x80000000); break; default: device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000); sc->rl_macrev = hwrev & 0x00700000; hwrev &= RL_TXCFG_HWREV; break; } device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev); while (hw_rev->rl_desc != NULL) { if (hw_rev->rl_rev == hwrev) { sc->rl_type = hw_rev->rl_type; sc->rl_hwrev = hw_rev; break; } hw_rev++; } if (hw_rev->rl_desc == NULL) { device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev); error = ENXIO; goto fail; } switch (hw_rev->rl_rev) { case RL_HWREV_8139CPLUS: sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; break; case RL_HWREV_8100E: case RL_HWREV_8101E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; break; case RL_HWREV_8102E: case RL_HWREV_8102EL: case RL_HWREV_8102EL_SPIN1: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; break; case RL_HWREV_8103E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP; break; case RL_HWREV_8401E: case RL_HWREV_8105E: case RL_HWREV_8105E_SPIN1: case RL_HWREV_8106E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; break; case RL_HWREV_8402: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ; break; case RL_HWREV_8168B_SPIN1: case RL_HWREV_8168B_SPIN2: sc->rl_flags |= RL_FLAG_WOLRXENB; /* FALLTHROUGH */ case RL_HWREV_8168B_SPIN3: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; break; case RL_HWREV_8168C_SPIN2: sc->rl_flags |= RL_FLAG_MACSLEEP; /* FALLTHROUGH */ case RL_HWREV_8168C: if (sc->rl_macrev == 0x00200000) sc->rl_flags |= RL_FLAG_MACSLEEP; /* FALLTHROUGH */ case RL_HWREV_8168CP: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168D: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168DP: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168E_VL: case RL_HWREV_8168F: sc->rl_flags |= RL_FLAG_EARLYOFF; /* FALLTHROUGH */ case RL_HWREV_8411: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168EP: case RL_HWREV_8168G: case RL_HWREV_8411B: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK | RL_FLAG_8168G_PLUS; break; case RL_HWREV_8168GU: case RL_HWREV_8168H: if (pci_get_device(dev) == RT_DEVICEID_8101E) { /* RTL8106E(US), RTL8107E */ sc->rl_flags |= RL_FLAG_FASTETHER; } else sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_8168G_PLUS; break; case RL_HWREV_8169_8110SB: case RL_HWREV_8169_8110SBL: case RL_HWREV_8169_8110SC: case RL_HWREV_8169_8110SCE: sc->rl_flags |= RL_FLAG_PHYWAKE; /* FALLTHROUGH */ case RL_HWREV_8169: case RL_HWREV_8169S: case RL_HWREV_8110S: sc->rl_flags |= RL_FLAG_MACRESET; break; default: break; } if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) { sc->rl_cfg0 = RL_8139_CFG0; sc->rl_cfg1 = RL_8139_CFG1; sc->rl_cfg2 = 0; sc->rl_cfg3 = RL_8139_CFG3; sc->rl_cfg4 = RL_8139_CFG4; sc->rl_cfg5 = RL_8139_CFG5; } else { sc->rl_cfg0 = RL_CFG0; sc->rl_cfg1 = RL_CFG1; sc->rl_cfg2 = RL_CFG2; sc->rl_cfg3 = RL_CFG3; sc->rl_cfg4 = RL_CFG4; sc->rl_cfg5 = RL_CFG5; } /* Reset the adapter. */ RL_LOCK(sc); re_reset(sc); RL_UNLOCK(sc); /* Enable PME. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, sc->rl_cfg1); cfg |= RL_CFG1_PME; CSR_WRITE_1(sc, sc->rl_cfg1, cfg); cfg = CSR_READ_1(sc, sc->rl_cfg5); cfg &= RL_CFG5_PME_STS; CSR_WRITE_1(sc, sc->rl_cfg5, cfg); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); if ((sc->rl_flags & RL_FLAG_PAR) != 0) { /* * XXX Should have a better way to extract station * address from EEPROM. */ for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); } else { sc->rl_eewidth = RL_9356_ADDR_LEN; re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); if (re_did != 0x8129) sc->rl_eewidth = RL_9346_ADDR_LEN; /* * Get station address from the EEPROM. */ re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); for (i = 0; i < ETHER_ADDR_LEN / 2; i++) as[i] = le16toh(as[i]); bcopy(as, eaddr, ETHER_ADDR_LEN); } if (sc->rl_type == RL_8169) { /* Set RX length mask and number of descriptors. */ sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; sc->rl_txstart = RL_GTXSTART; sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; } else { /* Set RX length mask and number of descriptors. */ sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; sc->rl_txstart = RL_TXSTART; sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; } error = re_allocmem(dev, sc); if (error) goto fail; re_add_sysctls(sc); ifp = sc->rl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* Take controller out of deep sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) | 0x01); else CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) & ~0x01); } /* Take PHY out of power down mode. */ if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) { CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); if (hw_rev->rl_rev == RL_HWREV_8401E) CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); } if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) { re_gmii_writereg(dev, 1, 0x1f, 0); re_gmii_writereg(dev, 1, 0x0e, 0); } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = re_ioctl; ifp->if_start = re_start; /* * RTL8168/8111C generates wrong IP checksummed frame if the * packet has IP options so disable TX checksum offloading. */ if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C || sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 || sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) { ifp->if_hwassist = 0; ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TSO4; } else { ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; } ifp->if_hwassist |= CSUM_TSO; ifp->if_capenable = ifp->if_capabilities; ifp->if_init = re_init; IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); #define RE_PHYAD_INTERNAL 0 /* Do MII setup. */ phy = RE_PHYAD_INTERNAL; if (sc->rl_type == RL_8169) phy = 1; capmask = BMSR_DEFCAPMASK; if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) capmask &= ~BMSR_EXTSTAT; error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd, re_ifmedia_sts, capmask, phy, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* VLAN capability setup */ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; if (ifp->if_capabilities & IFCAP_HWCSUM) ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; /* Enable WOL if PM is supported. */ if (pci_find_cap(sc->rl_dev, PCIY_PMG, ®) == 0) ifp->if_capabilities |= IFCAP_WOL; ifp->if_capenable = ifp->if_capabilities; ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST); /* * Don't enable TSO by default. It is known to generate * corrupted TCP segments(bad TCP options) under certain * circumstances. */ ifp->if_hwassist &= ~CSUM_TSO; ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); #ifdef DEV_NETMAP re_netmap_attach(sc); #endif /* DEV_NETMAP */ #ifdef RE_DIAG /* * Perform hardware diagnostic on the original RTL8169. * Some 32-bit cards were incorrectly wired and would * malfunction if plugged into a 64-bit slot. */ if (hwrev == RL_HWREV_8169) { error = re_diag(sc); if (error) { device_printf(dev, "attach aborted due to hardware diag failure\n"); ether_ifdetach(ifp); goto fail; } } #endif #ifdef RE_TX_MODERATION intr_filter = 1; #endif /* Hook interrupt last to avoid having to lock softc */ if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && intr_filter == 0) { error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc, &sc->rl_intrhand[0]); } else { error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, &sc->rl_intrhand[0]); } if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); } fail: if (error) re_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int re_detach(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; int i, rid; sc = device_get_softc(dev); ifp = sc->rl_ifp; KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif RL_LOCK(sc); #if 0 sc->suspended = 1; #endif re_stop(sc); RL_UNLOCK(sc); callout_drain(&sc->rl_stat_callout); taskqueue_drain(taskqueue_fast, &sc->rl_inttask); /* * Force off the IFF_UP flag here, in case someone * still had a BPF descriptor attached to this * interface. If they do, ether_ifdetach() will cause * the BPF code to try and clear the promisc mode * flag, which will bubble down to re_ioctl(), * which will try to call re_init() again. This will * turn the NIC back on and restart the MII ticker, * which will panic the system when the kernel tries * to invoke the re_tick() function that isn't there * anymore. */ ifp->if_flags &= ~IFF_UP; ether_ifdetach(ifp); } if (sc->rl_miibus) device_delete_child(dev, sc->rl_miibus); bus_generic_detach(dev); /* * The rest is resource deallocation, so we should already be * stopped here. */ if (sc->rl_intrhand[0] != NULL) { bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); sc->rl_intrhand[0] = NULL; } if (ifp != NULL) { #ifdef DEV_NETMAP netmap_detach(ifp); #endif /* DEV_NETMAP */ if_free(ifp); } if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) rid = 0; else rid = 1; if (sc->rl_irq[0] != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]); sc->rl_irq[0] = NULL; } if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0) pci_release_msi(dev); if (sc->rl_res_pba) { rid = PCIR_BAR(4); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); } if (sc->rl_res) bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, sc->rl_res); /* Unload and free the RX DMA ring memory and map */ if (sc->rl_ldata.rl_rx_list_tag) { if (sc->rl_ldata.rl_rx_list_addr) bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map); if (sc->rl_ldata.rl_rx_list) bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); } /* Unload and free the TX DMA ring memory and map */ if (sc->rl_ldata.rl_tx_list_tag) { if (sc->rl_ldata.rl_tx_list_addr) bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map); if (sc->rl_ldata.rl_tx_list) bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list, sc->rl_ldata.rl_tx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); } /* Destroy all the RX and TX buffer maps */ if (sc->rl_ldata.rl_tx_mtag) { for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap) bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, sc->rl_ldata.rl_tx_desc[i].tx_dmamap); } bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); } if (sc->rl_ldata.rl_rx_mtag) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap) bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[i].rx_dmamap); } if (sc->rl_ldata.rl_rx_sparemap) bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_sparemap); bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); } if (sc->rl_ldata.rl_jrx_mtag) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap) bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); } if (sc->rl_ldata.rl_jrx_sparemap) bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, sc->rl_ldata.rl_jrx_sparemap); bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag); } /* Unload and free the stats buffer and map */ if (sc->rl_ldata.rl_stag) { if (sc->rl_ldata.rl_stats_addr) bus_dmamap_unload(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap); if (sc->rl_ldata.rl_stats) bus_dmamem_free(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); bus_dma_tag_destroy(sc->rl_ldata.rl_stag); } if (sc->rl_parent_tag) bus_dma_tag_destroy(sc->rl_parent_tag); mtx_destroy(&sc->rl_mtx); return (0); } static __inline void re_discard_rxbuf(struct rl_softc *sc, int idx) { struct rl_desc *desc; struct rl_rxdesc *rxd; uint32_t cmdstat; if (sc->rl_ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) rxd = &sc->rl_ldata.rl_jrx_desc[idx]; else rxd = &sc->rl_ldata.rl_rx_desc[idx]; desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; cmdstat = rxd->rx_size; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); } static int re_newbuf(struct rl_softc *sc, int idx) { struct mbuf *m; struct rl_rxdesc *rxd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct rl_desc *desc; uint32_t cmdstat; int error, nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; #ifdef RE_FIXUP_RX /* * This is part of an evil trick to deal with non-x86 platforms. * The RealTek chip requires RX buffers to be aligned on 64-bit * boundaries, but that will hose non-x86 machines. To get around * this, we leave some empty space at the start of each buffer * and for non-x86 hosts, we copy the buffer back six bytes * to achieve word alignment. This is slightly more efficient * than allocating a new buffer, copying the contents, and * discarding the old buffer. */ m_adj(m, RE_ETHER_ALIGN); #endif error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); rxd = &sc->rl_ldata.rl_rx_desc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); } rxd->rx_m = m; map = rxd->rx_dmamap; rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; rxd->rx_size = segs[0].ds_len; sc->rl_ldata.rl_rx_sparemap = map; bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); cmdstat = segs[0].ds_len; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); return (0); } static int re_jumbo_newbuf(struct rl_softc *sc, int idx) { struct mbuf *m; struct rl_rxdesc *rxd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct rl_desc *desc; uint32_t cmdstat; int error, nsegs; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MJUM9BYTES; #ifdef RE_FIXUP_RX m_adj(m, RE_ETHER_ALIGN); #endif error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag, sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); rxd = &sc->rl_ldata.rl_jrx_desc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); } rxd->rx_m = m; map = rxd->rx_dmamap; rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap; rxd->rx_size = segs[0].ds_len; sc->rl_ldata.rl_jrx_sparemap = map; bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); cmdstat = segs[0].ds_len; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); return (0); } #ifdef RE_FIXUP_RX static __inline void re_fixup_rx(struct mbuf *m) { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; } #endif static int re_tx_list_init(struct rl_softc *sc) { struct rl_desc *desc; int i; RL_LOCK_ASSERT(sc); bzero(sc->rl_ldata.rl_tx_list, sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; #ifdef DEV_NETMAP re_netmap_tx_init(sc); #endif /* DEV_NETMAP */ /* Set EOR. */ desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->rl_ldata.rl_tx_prodidx = 0; sc->rl_ldata.rl_tx_considx = 0; sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; return (0); } static int re_rx_list_init(struct rl_softc *sc) { int error, i; bzero(sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; if ((error = re_newbuf(sc, i)) != 0) return (error); } #ifdef DEV_NETMAP re_netmap_rx_init(sc); #endif /* DEV_NETMAP */ /* Flush the RX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = 0; sc->rl_head = sc->rl_tail = NULL; sc->rl_int_rx_act = 0; return (0); } static int re_jrx_list_init(struct rl_softc *sc) { int error, i; bzero(sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL; if ((error = re_jumbo_newbuf(sc, i)) != 0) return (error); } bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = 0; sc->rl_head = sc->rl_tail = NULL; sc->rl_int_rx_act = 0; return (0); } /* * RX handler for C+ and 8169. For the gigE chips, we support * the reception of jumbo frames that have been fragmented * across multiple 2K mbuf cluster buffers. */ static int re_rxeof(struct rl_softc *sc, int *rx_npktsp) { struct mbuf *m; struct ifnet *ifp; int i, rxerr, total_len; struct rl_desc *cur_rx; u_int32_t rxstat, rxvlan; int jumbo, maxpkt = 16, rx_npkts = 0; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; #ifdef DEV_NETMAP if (netmap_rx_irq(ifp, 0, &rx_npkts)) return 0; #endif /* DEV_NETMAP */ if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) jumbo = 1; else jumbo = 0; /* Invalidate the descriptor memory */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; i = RL_RX_DESC_NXT(sc, i)) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; cur_rx = &sc->rl_ldata.rl_rx_list[i]; rxstat = le32toh(cur_rx->rl_cmdstat); if ((rxstat & RL_RDESC_STAT_OWN) != 0) break; total_len = rxstat & sc->rl_rxlenmask; rxvlan = le32toh(cur_rx->rl_vlanctl); if (jumbo != 0) m = sc->rl_ldata.rl_jrx_desc[i].rx_m; else m = sc->rl_ldata.rl_rx_desc[i].rx_m; if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { /* * RTL8168C or later controllers do not * support multi-fragment packet. */ re_discard_rxbuf(sc, i); continue; } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) { if (re_newbuf(sc, i) != 0) { /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } m->m_len = RE_RX_DESC_BUFLEN; if (sc->rl_head == NULL) sc->rl_head = sc->rl_tail = m; else { m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; sc->rl_tail = m; } continue; } /* * NOTE: for the 8139C+, the frame length field * is always 12 bits in size, but for the gigE chips, * it is 13 bits (since the max RX frame length is 16K). * Unfortunately, all 32 bits in the status word * were already used, so to make room for the extra * length bit, RealTek took out the 'frame alignment * error' bit and shifted the other status bits * over one slot. The OWN, EOR, FS and LS bits are * still in the same places. We have already extracted * the frame length and checked the OWN bit, so rather * than using an alternate bit mapping, we shift the * status bits one space to the right so we can evaluate * them using the 8169 status as though it was in the * same format as that of the 8139C+. */ if (sc->rl_type == RL_8169) rxstat >>= 1; /* * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be * set, but if CRC is clear, it will still be a valid frame. */ if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) { rxerr = 1; if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 && total_len > 8191 && (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT) rxerr = 0; if (rxerr != 0) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } } /* * If allocating a replacement mbuf fails, * reload the current one. */ if (jumbo != 0) rxerr = re_jumbo_newbuf(sc, i); else rxerr = re_newbuf(sc, i); if (rxerr != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } if (sc->rl_head != NULL) { if (jumbo != 0) m->m_len = total_len; else { m->m_len = total_len % RE_RX_DESC_BUFLEN; if (m->m_len == 0) m->m_len = RE_RX_DESC_BUFLEN; } /* * Special case: if there's 4 bytes or less * in this buffer, the mbuf can be discarded: * the last 4 bytes is the CRC, which we don't * care about anyway. */ if (m->m_len <= ETHER_CRC_LEN) { sc->rl_tail->m_len -= (ETHER_CRC_LEN - m->m_len); m_freem(m); } else { m->m_len -= ETHER_CRC_LEN; m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; } m = sc->rl_head; sc->rl_head = sc->rl_tail = NULL; m->m_pkthdr.len = total_len - ETHER_CRC_LEN; } else m->m_pkthdr.len = m->m_len = (total_len - ETHER_CRC_LEN); #ifdef RE_FIXUP_RX re_fixup_rx(m); #endif if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; /* Do RX checksumming if enabled */ if (ifp->if_capenable & IFCAP_RXCSUM) { if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { /* Check IP header checksum */ if (rxstat & RL_RDESC_STAT_PROTOID) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; /* Check TCP/UDP checksum */ if ((RL_TCPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || (RL_UDPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } else { /* * RTL8168C/RTL816CP/RTL8111C/RTL8111CP */ if ((rxstat & RL_RDESC_STAT_PROTOID) && (rxvlan & RL_RDESC_IPV4)) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) && (rxvlan & RL_RDESC_IPV4)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (((rxstat & RL_RDESC_STAT_TCP) && !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || ((rxstat & RL_RDESC_STAT_UDP) && !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } maxpkt--; if (rxvlan & RL_RDESC_VLANCTL_TAG) { m->m_pkthdr.ether_vtag = bswap16((rxvlan & RL_RDESC_VLANCTL_DATA)); m->m_flags |= M_VLANTAG; } RL_UNLOCK(sc); (*ifp->if_input)(ifp, m); RL_LOCK(sc); rx_npkts++; } /* Flush the RX DMA ring */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = i; if (rx_npktsp != NULL) *rx_npktsp = rx_npkts; if (maxpkt) return (EAGAIN); return (0); } static void re_txeof(struct rl_softc *sc) { struct ifnet *ifp; struct rl_txdesc *txd; u_int32_t txstat; int cons; cons = sc->rl_ldata.rl_tx_considx; if (cons == sc->rl_ldata.rl_tx_prodidx) return; ifp = sc->rl_ifp; #ifdef DEV_NETMAP if (netmap_tx_irq(ifp, 0)) return; #endif /* DEV_NETMAP */ /* Invalidate the TX descriptor list */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (; cons != sc->rl_ldata.rl_tx_prodidx; cons = RL_TX_DESC_NXT(sc, cons)) { txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); if (txstat & RL_TDESC_STAT_OWN) break; /* * We only stash mbufs in the last descriptor * in a fragment chain, which also happens to * be the only place where the TX status bits * are valid. */ if (txstat & RL_TDESC_CMD_EOF) { txd = &sc->rl_ldata.rl_tx_desc[cons]; bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbufs!", __func__)); m_freem(txd->tx_m); txd->tx_m = NULL; if (txstat & (RL_TDESC_STAT_EXCESSCOL| RL_TDESC_STAT_COLCNT)) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); if (txstat & RL_TDESC_STAT_TXERRSUM) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } sc->rl_ldata.rl_tx_free++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->rl_ldata.rl_tx_considx = cons; /* No changes made to the TX ring, so no flush needed */ if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { #ifdef RE_TX_MODERATION /* * If not all descriptors have been reaped yet, reload * the timer so that we will eventually get another * interrupt that will cause us to re-enter this routine. * This is done in case the transmitter has gone idle. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif } else sc->rl_watchdog_timer = 0; } static void re_tick(void *xsc) { struct rl_softc *sc; struct mii_data *mii; sc = xsc; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); mii_tick(mii); if ((sc->rl_flags & RL_FLAG_LINK) == 0) re_miibus_statchg(sc->rl_dev); /* * Reclaim transmitted frames here. Technically it is not * necessary to do here but it ensures periodic reclamation * regardless of Tx completion interrupt which seems to be * lost on PCIe based controllers under certain situations. */ re_txeof(sc); re_watchdog(sc); callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); } #ifdef DEVICE_POLLING static int re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; int rx_npkts = 0; RL_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) rx_npkts = re_poll_locked(ifp, cmd, count); RL_UNLOCK(sc); return (rx_npkts); } static int re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; int rx_npkts; RL_LOCK_ASSERT(sc); sc->rxcycles = count; re_rxeof(sc, &rx_npkts); re_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ u_int16_t status; status = CSR_READ_2(sc, RL_ISR); if (status == 0xffff) return (rx_npkts); if (status) CSR_WRITE_2(sc, RL_ISR, status); if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); /* * XXX check behaviour on receiver stalls. */ if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } } return (rx_npkts); } #endif /* DEVICE_POLLING */ static int re_intr(void *arg) { struct rl_softc *sc; uint16_t status; sc = arg; status = CSR_READ_2(sc, RL_ISR); if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) return (FILTER_STRAY); CSR_WRITE_2(sc, RL_IMR, 0); - taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); + taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask); return (FILTER_HANDLED); } static void re_int_task(void *arg, int npending) { struct rl_softc *sc; struct ifnet *ifp; u_int16_t status; int rval = 0; sc = arg; ifp = sc->rl_ifp; RL_LOCK(sc); status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if (sc->suspended || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); return; } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { RL_UNLOCK(sc); return; } #endif if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) rval = re_rxeof(sc, NULL); /* * Some chips will ignore a second TX request issued * while an existing transmission is in progress. If * the transmitter goes idle but there are still * packets waiting to be sent, we need to restart the * channel here to flush them out. This only seems to * be required with the PCIe devices. */ if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); if (status & ( #ifdef RE_TX_MODERATION RL_ISR_TIMEOUT_EXPIRED| #else RL_ISR_TX_OK| #endif RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) re_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); RL_UNLOCK(sc); if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { - taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); + taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask); return; } CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); } static void re_intr_msi(void *xsc) { struct rl_softc *sc; struct ifnet *ifp; uint16_t intrs, status; sc = xsc; RL_LOCK(sc); ifp = sc->rl_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { RL_UNLOCK(sc); return; } #endif /* Disable interrupts. */ CSR_WRITE_2(sc, RL_IMR, 0); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); return; } intrs = RL_INTRS_CPLUS; status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if (sc->rl_int_rx_act > 0) { intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); } if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) { re_rxeof(sc, NULL); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (sc->rl_int_rx_mod != 0 && (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) { /* Rearm one-shot timer. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); sc->rl_int_rx_act = 1; } else { intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN; sc->rl_int_rx_act = 0; } } } /* * Some chips will ignore a second TX request issued * while an existing transmission is in progress. If * the transmitter goes idle but there are still * packets waiting to be sent, we need to restart the * channel here to flush them out. This only seems to * be required with the PCIe devices. */ if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL)) re_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); CSR_WRITE_2(sc, RL_IMR, intrs); } RL_UNLOCK(sc); } static int re_encap(struct rl_softc *sc, struct mbuf **m_head) { struct rl_txdesc *txd, *txd_last; bus_dma_segment_t segs[RL_NTXSEGS]; bus_dmamap_t map; struct mbuf *m_new; struct rl_desc *desc; int nsegs, prod; int i, error, ei, si; int padlen; uint32_t cmdstat, csum_flags, vlanctl; RL_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); /* * With some of the RealTek chips, using the checksum offload * support in conjunction with the autopadding feature results * in the transmission of corrupt frames. For example, if we * need to send a really small IP fragment that's less than 60 * bytes in size, and IP header checksumming is enabled, the * resulting ethernet frame that appears on the wire will * have garbled payload. To work around this, if TX IP checksum * offload is enabled, we always manually pad short frames out * to the minimum ethernet frame size. */ if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) { padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; if (M_WRITABLE(*m_head) == 0) { /* Get a writable copy. */ m_new = m_dup(*m_head, M_NOWAIT); m_freem(*m_head); if (m_new == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m_new; } if ((*m_head)->m_next != NULL || M_TRAILINGSPACE(*m_head) < padlen) { m_new = m_defrag(*m_head, M_NOWAIT); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } } else m_new = *m_head; /* * Manually pad short frames, and zero the pad space * to avoid leaking data. */ bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); m_new->m_pkthdr.len += padlen; m_new->m_len = m_new->m_pkthdr.len; *m_head = m_new; } prod = sc->rl_ldata.rl_tx_prodidx; txd = &sc->rl_ldata.rl_tx_desc[prod]; error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m_new; error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check for number of available descriptors. */ if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); return (ENOBUFS); } bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); /* * Set up checksum offload. Note: checksum offload bits must * appear in all descriptors of a multi-descriptor transmit * attempt. This is according to testing done with an 8169 * chip. This is a requirement. */ vlanctl = 0; csum_flags = 0; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) { csum_flags |= RL_TDESC_CMD_LGSEND; vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << RL_TDESC_CMD_MSSVALV2_SHIFT); } else { csum_flags |= RL_TDESC_CMD_LGSEND | ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << RL_TDESC_CMD_MSSVAL_SHIFT); } } else { /* * Unconditionally enable IP checksum if TCP or UDP * checksum is required. Otherwise, TCP/UDP checksum * doesn't make effects. */ if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) { if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { csum_flags |= RL_TDESC_CMD_IPCSUM; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) != 0) csum_flags |= RL_TDESC_CMD_TCPCSUM; if (((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) != 0) csum_flags |= RL_TDESC_CMD_UDPCSUM; } else { vlanctl |= RL_TDESC_CMD_IPCSUMV2; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) != 0) vlanctl |= RL_TDESC_CMD_TCPCSUMV2; if (((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) != 0) vlanctl |= RL_TDESC_CMD_UDPCSUMV2; } } } /* * Set up hardware VLAN tagging. Note: vlan tag info must * appear in all descriptors of a multi-descriptor * transmission attempt. */ if ((*m_head)->m_flags & M_VLANTAG) vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) | RL_TDESC_VLANCTL_TAG; si = prod; for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) { desc = &sc->rl_ldata.rl_tx_list[prod]; desc->rl_vlanctl = htole32(vlanctl); desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); cmdstat = segs[i].ds_len; if (i != 0) cmdstat |= RL_TDESC_CMD_OWN; if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) cmdstat |= RL_TDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | csum_flags); sc->rl_ldata.rl_tx_free--; } /* Update producer index. */ sc->rl_ldata.rl_tx_prodidx = prod; /* Set EOF on the last descriptor. */ ei = RL_TX_DESC_PRV(sc, prod); desc = &sc->rl_ldata.rl_tx_list[ei]; desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); desc = &sc->rl_ldata.rl_tx_list[si]; /* Set SOF and transfer ownership of packet to the chip. */ desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. (Swap last and first dmamaps.) */ txd_last = &sc->rl_ldata.rl_tx_desc[ei]; map = txd->tx_dmamap; txd->tx_dmamap = txd_last->tx_dmamap; txd_last->tx_dmamap = map; txd_last->tx_m = *m_head; return (0); } static void re_start(struct ifnet *ifp) { struct rl_softc *sc; sc = ifp->if_softc; RL_LOCK(sc); re_start_locked(ifp); RL_UNLOCK(sc); } /* * Main transmit routine for C+ and gigE NICs. */ static void re_start_locked(struct ifnet *ifp) { struct rl_softc *sc; struct mbuf *m_head; int queued; sc = ifp->if_softc; #ifdef DEV_NETMAP /* XXX is this necessary ? */ if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_kring *kring = &NA(ifp)->tx_rings[0]; if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) { /* kick the tx unit */ CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); #ifdef RE_TX_MODERATION CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif sc->rl_watchdog_timer = 5; } return; } #endif /* DEV_NETMAP */ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) return; for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->rl_ldata.rl_tx_free > 1;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (re_encap(sc, &m_head) != 0) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); queued++; } if (queued == 0) { #ifdef RE_TX_MODERATION if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif return; } /* Flush the TX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); #ifdef RE_TX_MODERATION /* * Use the countdown timer for interrupt moderation. * 'TX done' interrupts are disabled. Instead, we reset the * countdown timer, which will begin counting until it hits * the value in the TIMERINT register, and then trigger an * interrupt. Each time we write to the TIMERCNT register, * the timer count is reset to 0. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif /* * Set a timeout in case the chip goes out to lunch. */ sc->rl_watchdog_timer = 5; } static void re_set_jumbo(struct rl_softc *sc, int jumbo) { if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) { pci_set_max_read_req(sc->rl_dev, 4096); return; } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); if (jumbo != 0) { CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) | RL_CFG3_JUMBO_EN0); switch (sc->rl_hwrev->rl_rev) { case RL_HWREV_8168DP: break; case RL_HWREV_8168E: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) | 0x01); break; default: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1); } } else { CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) & ~RL_CFG3_JUMBO_EN0); switch (sc->rl_hwrev->rl_rev) { case RL_HWREV_8168DP: break; case RL_HWREV_8168E: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) & ~0x01); break; default: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1); } } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); switch (sc->rl_hwrev->rl_rev) { case RL_HWREV_8168DP: pci_set_max_read_req(sc->rl_dev, 4096); break; default: if (jumbo != 0) pci_set_max_read_req(sc->rl_dev, 512); else pci_set_max_read_req(sc->rl_dev, 4096); } } static void re_init(void *xsc) { struct rl_softc *sc = xsc; RL_LOCK(sc); re_init_locked(sc); RL_UNLOCK(sc); } static void re_init_locked(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mii_data *mii; uint32_t reg; uint16_t cfg; union { uint32_t align_dummy; u_char eaddr[ETHER_ADDR_LEN]; } eaddr; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* * Cancel pending I/O and free all RX/TX buffers. */ re_stop(sc); /* Put controller into known state. */ re_reset(sc); /* * For C+ mode, initialize the RX descriptors and mbufs. */ if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { if (ifp->if_mtu > RL_MTU) { if (re_jrx_list_init(sc) != 0) { device_printf(sc->rl_dev, "no memory for jumbo RX buffers\n"); re_stop(sc); return; } /* Disable checksum offloading for jumbo frames. */ ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4); ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO); } else { if (re_rx_list_init(sc) != 0) { device_printf(sc->rl_dev, "no memory for RX buffers\n"); re_stop(sc); return; } } re_set_jumbo(sc, ifp->if_mtu > RL_MTU); } else { if (re_rx_list_init(sc) != 0) { device_printf(sc->rl_dev, "no memory for RX buffers\n"); re_stop(sc); return; } if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) { if (ifp->if_mtu > RL_MTU) pci_set_max_read_req(sc->rl_dev, 512); else pci_set_max_read_req(sc->rl_dev, 4096); } } re_tx_list_init(sc); /* * Enable C+ RX and TX mode, as well as VLAN stripping and * RX checksum offload. We must configure the C+ register * before all others. */ cfg = RL_CPLUSCMD_PCI_MRW; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) cfg |= RL_CPLUSCMD_RXCSUM_ENB; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) cfg |= RL_CPLUSCMD_VLANSTRIP; if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) { cfg |= RL_CPLUSCMD_MACSTAT_DIS; /* XXX magic. */ cfg |= 0x0001; } else cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB; CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC || sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) { reg = 0x000fff00; if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0) reg |= 0x000000ff; if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) reg |= 0x00f00000; CSR_WRITE_4(sc, 0x7c, reg); /* Disable interrupt mitigation. */ CSR_WRITE_2(sc, 0xe2, 0); } /* * Disable TSO if interface MTU size is greater than MSS * allowed in controller. */ if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } /* * Init our MAC address. Even though the chipset * documentation doesn't mention it, we need to enter "Config * register write enable" mode to modify the ID registers. */ /* Copy MAC address on stack to align. */ bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); CSR_WRITE_4(sc, RL_IDR0, htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); CSR_WRITE_4(sc, RL_IDR4, htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); /* * Load the addresses of the RX and TX lists into the chip. */ CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) { /* Disable RXDV gate. */ CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & ~0x00080000); } /* * Enable transmit and receive for pre-RTL8168G controllers. * RX/TX MACs should be enabled before RX/TX configuration. */ if ((sc->rl_flags & RL_FLAG_8168G_PLUS) == 0) CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB); /* * Set the initial TX configuration. */ if (sc->rl_testmode) { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON); else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); } else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); /* * Set the initial RX configuration. */ re_set_rxmode(sc); /* Configure interrupt moderation. */ if (sc->rl_type == RL_8169) { /* Magic from vendor. */ CSR_WRITE_2(sc, RL_INTRMOD, 0x5100); } /* * Enable transmit and receive for RTL8168G and later controllers. * RX/TX MACs should be enabled after RX/TX configuration. */ if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB); #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, RL_IMR, 0); else /* otherwise ... */ #endif /* * Enable interrupts. */ if (sc->rl_testmode) CSR_WRITE_2(sc, RL_IMR, 0); else CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); /* Set initial TX threshold */ sc->rl_txthresh = RL_TX_THRESH_INIT; /* Start RX/TX process. */ CSR_WRITE_4(sc, RL_MISSEDPKT, 0); /* * Initialize the timer interrupt register so that * a timer interrupt will be generated once the timer * reaches a certain number of ticks. The timer is * reloaded on each transmit. */ #ifdef RE_TX_MODERATION /* * Use timer interrupt register to moderate TX interrupt * moderation, which dramatically improves TX frame rate. */ if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); else CSR_WRITE_4(sc, RL_TIMERINT, 0x400); #else /* * Use timer interrupt register to moderate RX interrupt * moderation. */ if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && intr_filter == 0) { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(sc->rl_int_rx_mod)); } else { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0)); } #endif /* * For 8169 gigE NICs, set the max allowed RX packet * size so we can receive jumbo frames. */ if (sc->rl_type == RL_8169) { if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { /* * For controllers that use new jumbo frame scheme, * set maximum size of jumbo frame depending on * controller revisions. */ if (ifp->if_mtu > RL_MTU) CSR_WRITE_2(sc, RL_MAXRXPKTLEN, sc->rl_hwrev->rl_max_mtu + ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN + ETHER_CRC_LEN); else CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && sc->rl_hwrev->rl_max_mtu == RL_MTU) { /* RTL810x has no jumbo frame support. */ CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); } else CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); } if (sc->rl_testmode) return; CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) | RL_CFG1_DRVLOAD); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->rl_flags &= ~RL_FLAG_LINK; mii_mediachg(mii); sc->rl_watchdog_timer = 0; callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); } /* * Set media options. */ static int re_ifmedia_upd(struct ifnet *ifp) { struct rl_softc *sc; struct mii_data *mii; int error; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); error = mii_mediachg(mii); RL_UNLOCK(sc); return (error); } /* * Report current media status. */ static void re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct rl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; RL_UNLOCK(sc); } static int re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct rl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu || ((sc->rl_flags & RL_FLAG_FASTETHER) != 0 && ifr->ifr_mtu > RL_MTU)) { error = EINVAL; break; } RL_LOCK(sc); if (ifp->if_mtu != ifr->ifr_mtu) { ifp->if_mtu = ifr->ifr_mtu; if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); ifp->if_hwassist &= ~CSUM_TSO; } VLAN_CAPABILITIES(ifp); } RL_UNLOCK(sc); break; case SIOCSIFFLAGS: RL_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (((ifp->if_flags ^ sc->rl_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) re_set_rxmode(sc); } else re_init_locked(sc); } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) re_stop(sc); } sc->rl_if_flags = ifp->if_flags; RL_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: RL_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) re_set_rxmode(sc); RL_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->rl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: { int mask, reinit; mask = ifr->ifr_reqcap ^ ifp->if_capenable; reinit = 0; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(re_poll, ifp); if (error) return (error); RL_LOCK(sc); /* Disable interrupts */ CSR_WRITE_2(sc, RL_IMR, 0x0000); ifp->if_capenable |= IFCAP_POLLING; RL_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ RL_LOCK(sc); CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); ifp->if_capenable &= ~IFCAP_POLLING; RL_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ RL_LOCK(sc); if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= RE_CSUM_FEATURES; else ifp->if_hwassist &= ~RE_CSUM_FEATURES; reinit = 1; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; reinit = 1; } if ((mask & IFCAP_TSO4) != 0 && (ifp->if_capabilities & IFCAP_TSO4) != 0) { ifp->if_capenable ^= IFCAP_TSO4; if ((IFCAP_TSO4 & ifp->if_capenable) != 0) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } } if ((mask & IFCAP_VLAN_HWTSO) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; /* TSO over VLAN requires VLAN hardware tagging. */ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; reinit = 1; } if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && (mask & (IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWTSO)) != 0) reinit = 1; if ((mask & IFCAP_WOL) != 0 && (ifp->if_capabilities & IFCAP_WOL) != 0) { if ((mask & IFCAP_WOL_UCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_UCAST; if ((mask & IFCAP_WOL_MCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_MCAST; if ((mask & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; } if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } RL_UNLOCK(sc); VLAN_CAPABILITIES(ifp); } break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void re_watchdog(struct rl_softc *sc) { struct ifnet *ifp; RL_LOCK_ASSERT(sc); if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) return; ifp = sc->rl_ifp; re_txeof(sc); if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) { if_printf(ifp, "watchdog timeout (missed Tx interrupts) " "-- recovering\n"); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); return; } if_printf(ifp, "watchdog timeout\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); re_rxeof(sc, NULL); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void re_stop(struct rl_softc *sc) { int i; struct ifnet *ifp; struct rl_txdesc *txd; struct rl_rxdesc *rxd; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; sc->rl_watchdog_timer = 0; callout_stop(&sc->rl_stat_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* * Disable accepting frames to put RX MAC into idle state. * Otherwise it's possible to get frames while stop command * execution is in progress and controller can DMA the frame * to already freed RX buffer during that period. */ CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI | RL_RXCFG_RX_BROAD)); if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) { /* Enable RXDV gate. */ CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) | 0x00080000); } if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) { for (i = RL_TIMEOUT; i > 0; i--) { if ((CSR_READ_1(sc, sc->rl_txstart) & RL_TXSTART_START) == 0) break; DELAY(20); } if (i == 0) device_printf(sc->rl_dev, "stopping TX poll timed out!\n"); CSR_WRITE_1(sc, RL_COMMAND, 0x00); } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) { CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | RL_CMD_RX_ENB); if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) { for (i = RL_TIMEOUT; i > 0; i--) { if ((CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_QUEUE_EMPTY) != 0) break; DELAY(100); } if (i == 0) device_printf(sc->rl_dev, "stopping TXQ timed out!\n"); } } else CSR_WRITE_1(sc, RL_COMMAND, 0x00); DELAY(1000); CSR_WRITE_2(sc, RL_IMR, 0x0000); CSR_WRITE_2(sc, RL_ISR, 0xFFFF); if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } /* Free the TX list buffers. */ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { txd = &sc->rl_ldata.rl_tx_desc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } /* Free the RX list buffers. */ for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { rxd = &sc->rl_ldata.rl_rx_desc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { rxd = &sc->rl_ldata.rl_jrx_desc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } } } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int re_suspend(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); re_stop(sc); re_setwol(sc); sc->suspended = 1; RL_UNLOCK(sc); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int re_resume(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); RL_LOCK(sc); ifp = sc->rl_ifp; /* Take controller out of sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) | 0x01); } /* * Clear WOL matching such that normal Rx filtering * wouldn't interfere with WOL patterns. */ re_clrwol(sc); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) re_init_locked(sc); sc->suspended = 0; RL_UNLOCK(sc); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int re_shutdown(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); re_stop(sc); /* * Mark interface as down since otherwise we will panic if * interrupt comes in later on, which can happen in some * cases. */ sc->rl_ifp->if_flags &= ~IFF_UP; re_setwol(sc); RL_UNLOCK(sc); return (0); } static void re_set_linkspeed(struct rl_softc *sc) { struct mii_softc *miisc; struct mii_data *mii; int aneg, i, phyno; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); mii_pollstat(mii); aneg = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch IFM_SUBTYPE(mii->mii_media_active) { case IFM_10_T: case IFM_100_TX: return; case IFM_1000_T: aneg++; break; default: break; } } miisc = LIST_FIRST(&mii->mii_phys); phyno = miisc->mii_phy; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0); re_miibus_writereg(sc->rl_dev, phyno, MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); re_miibus_writereg(sc->rl_dev, phyno, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG); DELAY(1000); if (aneg != 0) { /* * Poll link state until re(4) get a 10/100Mbps link. */ for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { mii_pollstat(mii); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: return; default: break; } } RL_UNLOCK(sc); pause("relnk", hz); RL_LOCK(sc); } if (i == MII_ANEGTICKS_GIGE) device_printf(sc->rl_dev, "establishing a link failed, WOL may not work!"); } /* * No link, force MAC to have 100Mbps, full-duplex link. * MAC does not require reprogramming on resolved speed/duplex, * so this is just for completeness. */ mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; } static void re_setwol(struct rl_softc *sc) { struct ifnet *ifp; int pmc; uint16_t pmstat; uint8_t v; RL_LOCK_ASSERT(sc); if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) return; ifp = sc->rl_ifp; /* Put controller into sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) & ~0x01); } if ((ifp->if_capenable & IFCAP_WOL) != 0) { if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) { /* Disable RXDV gate. */ CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & ~0x00080000); } re_set_rxmode(sc); if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0) re_set_linkspeed(sc); if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0) CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB); } /* Enable config register write. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); /* Enable PME. */ v = CSR_READ_1(sc, sc->rl_cfg1); v &= ~RL_CFG1_PME; if ((ifp->if_capenable & IFCAP_WOL) != 0) v |= RL_CFG1_PME; CSR_WRITE_1(sc, sc->rl_cfg1, v); v = CSR_READ_1(sc, sc->rl_cfg3); v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) v |= RL_CFG3_WOL_MAGIC; CSR_WRITE_1(sc, sc->rl_cfg3, v); v = CSR_READ_1(sc, sc->rl_cfg5); v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST | RL_CFG5_WOL_LANWAKE); if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) v |= RL_CFG5_WOL_UCAST; if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; if ((ifp->if_capenable & IFCAP_WOL) != 0) v |= RL_CFG5_WOL_LANWAKE; CSR_WRITE_1(sc, sc->rl_cfg5, v); /* Config register write done. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); if ((ifp->if_capenable & IFCAP_WOL) == 0 && (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80); /* * It seems that hardware resets its link speed to 100Mbps in * power down mode so switching to 100Mbps in driver is not * needed. */ /* Request PME if WOL is requested. */ pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } static void re_clrwol(struct rl_softc *sc) { int pmc; uint8_t v; RL_LOCK_ASSERT(sc); if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) return; /* Enable config register write. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); v = CSR_READ_1(sc, sc->rl_cfg3); v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); CSR_WRITE_1(sc, sc->rl_cfg3, v); /* Config register write done. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); v = CSR_READ_1(sc, sc->rl_cfg5); v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); v &= ~RL_CFG5_WOL_LANWAKE; CSR_WRITE_1(sc, sc->rl_cfg5, v); } static void re_add_sysctls(struct rl_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; int error; ctx = device_get_sysctl_ctx(sc->rl_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I", "Statistics Information"); if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) return; SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0, sysctl_hw_re_int_mod, "I", "re RX interrupt moderation"); /* Pull in device tunables. */ sc->rl_int_rx_mod = RL_TIMER_DEFAULT; error = resource_int_value(device_get_name(sc->rl_dev), device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod); if (error == 0) { if (sc->rl_int_rx_mod < RL_TIMER_MIN || sc->rl_int_rx_mod > RL_TIMER_MAX) { device_printf(sc->rl_dev, "int_rx_mod value out of " "range; using default: %d\n", RL_TIMER_DEFAULT); sc->rl_int_rx_mod = RL_TIMER_DEFAULT; } } } static int re_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct rl_softc *sc; struct rl_stats *stats; int error, i, result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || req->newptr == NULL) return (error); if (result == 1) { sc = (struct rl_softc *)arg1; RL_LOCK(sc); if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); goto done; } bus_dmamap_sync(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD); CSR_WRITE_4(sc, RL_DUMPSTATS_HI, RL_ADDR_HI(sc->rl_ldata.rl_stats_addr)); CSR_WRITE_4(sc, RL_DUMPSTATS_LO, RL_ADDR_LO(sc->rl_ldata.rl_stats_addr)); CSR_WRITE_4(sc, RL_DUMPSTATS_LO, RL_ADDR_LO(sc->rl_ldata.rl_stats_addr | RL_DUMPSTATS_START)); for (i = RL_TIMEOUT; i > 0; i--) { if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) & RL_DUMPSTATS_START) == 0) break; DELAY(1000); } bus_dmamap_sync(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD); RL_UNLOCK(sc); if (i == 0) { device_printf(sc->rl_dev, "DUMP statistics request timed out\n"); return (ETIMEDOUT); } done: stats = sc->rl_ldata.rl_stats; printf("%s statistics:\n", device_get_nameunit(sc->rl_dev)); printf("Tx frames : %ju\n", (uintmax_t)le64toh(stats->rl_tx_pkts)); printf("Rx frames : %ju\n", (uintmax_t)le64toh(stats->rl_rx_pkts)); printf("Tx errors : %ju\n", (uintmax_t)le64toh(stats->rl_tx_errs)); printf("Rx errors : %u\n", le32toh(stats->rl_rx_errs)); printf("Rx missed frames : %u\n", (uint32_t)le16toh(stats->rl_missed_pkts)); printf("Rx frame alignment errs : %u\n", (uint32_t)le16toh(stats->rl_rx_framealign_errs)); printf("Tx single collisions : %u\n", le32toh(stats->rl_tx_onecoll)); printf("Tx multiple collisions : %u\n", le32toh(stats->rl_tx_multicolls)); printf("Rx unicast frames : %ju\n", (uintmax_t)le64toh(stats->rl_rx_ucasts)); printf("Rx broadcast frames : %ju\n", (uintmax_t)le64toh(stats->rl_rx_bcasts)); printf("Rx multicast frames : %u\n", le32toh(stats->rl_rx_mcasts)); printf("Tx aborts : %u\n", (uint32_t)le16toh(stats->rl_tx_aborts)); printf("Tx underruns : %u\n", (uint32_t)le16toh(stats->rl_rx_underruns)); } return (error); } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (arg1 == NULL) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN, RL_TIMER_MAX)); } Index: head/sys/dev/smc/if_smc.c =================================================================== --- head/sys/dev/smc/if_smc.c (revision 296271) +++ head/sys/dev/smc/if_smc.c (revision 296272) @@ -1,1328 +1,1328 @@ /*- * Copyright (c) 2008 Benno Rice. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for SMSC LAN91C111, may work for older variants. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include #include #include #include #define SMC_LOCK(sc) mtx_lock(&(sc)->smc_mtx) #define SMC_UNLOCK(sc) mtx_unlock(&(sc)->smc_mtx) #define SMC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->smc_mtx, MA_OWNED) #define SMC_INTR_PRIORITY 0 #define SMC_RX_PRIORITY 5 #define SMC_TX_PRIORITY 10 devclass_t smc_devclass; static const char *smc_chip_ids[16] = { NULL, NULL, NULL, /* 3 */ "SMSC LAN91C90 or LAN91C92", /* 4 */ "SMSC LAN91C94", /* 5 */ "SMSC LAN91C95", /* 6 */ "SMSC LAN91C96", /* 7 */ "SMSC LAN91C100", /* 8 */ "SMSC LAN91C100FD", /* 9 */ "SMSC LAN91C110FD or LAN91C111FD", NULL, NULL, NULL, NULL, NULL, NULL }; static void smc_init(void *); static void smc_start(struct ifnet *); static void smc_stop(struct smc_softc *); static int smc_ioctl(struct ifnet *, u_long, caddr_t); static void smc_init_locked(struct smc_softc *); static void smc_start_locked(struct ifnet *); static void smc_reset(struct smc_softc *); static int smc_mii_ifmedia_upd(struct ifnet *); static void smc_mii_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void smc_mii_tick(void *); static void smc_mii_mediachg(struct smc_softc *); static int smc_mii_mediaioctl(struct smc_softc *, struct ifreq *, u_long); static void smc_task_intr(void *, int); static void smc_task_rx(void *, int); static void smc_task_tx(void *, int); static driver_filter_t smc_intr; static timeout_t smc_watchdog; #ifdef DEVICE_POLLING static poll_handler_t smc_poll; #endif /* * MII bit-bang glue */ static uint32_t smc_mii_bitbang_read(device_t); static void smc_mii_bitbang_write(device_t, uint32_t); static const struct mii_bitbang_ops smc_mii_bitbang_ops = { smc_mii_bitbang_read, smc_mii_bitbang_write, { MGMT_MDO, /* MII_BIT_MDO */ MGMT_MDI, /* MII_BIT_MDI */ MGMT_MCLK, /* MII_BIT_MDC */ MGMT_MDOE, /* MII_BIT_DIR_HOST_PHY */ 0, /* MII_BIT_DIR_PHY_HOST */ } }; static __inline void smc_select_bank(struct smc_softc *sc, uint16_t bank) { bus_barrier(sc->smc_reg, BSR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); bus_write_2(sc->smc_reg, BSR, bank & BSR_BANK_MASK); bus_barrier(sc->smc_reg, BSR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } /* Never call this when not in bank 2. */ static __inline void smc_mmu_wait(struct smc_softc *sc) { KASSERT((bus_read_2(sc->smc_reg, BSR) & BSR_BANK_MASK) == 2, ("%s: smc_mmu_wait called when not in bank 2", device_get_nameunit(sc->smc_dev))); while (bus_read_2(sc->smc_reg, MMUCR) & MMUCR_BUSY) ; } static __inline uint8_t smc_read_1(struct smc_softc *sc, bus_size_t offset) { return (bus_read_1(sc->smc_reg, offset)); } static __inline void smc_write_1(struct smc_softc *sc, bus_size_t offset, uint8_t val) { bus_write_1(sc->smc_reg, offset, val); } static __inline uint16_t smc_read_2(struct smc_softc *sc, bus_size_t offset) { return (bus_read_2(sc->smc_reg, offset)); } static __inline void smc_write_2(struct smc_softc *sc, bus_size_t offset, uint16_t val) { bus_write_2(sc->smc_reg, offset, val); } static __inline void smc_read_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap, bus_size_t count) { bus_read_multi_2(sc->smc_reg, offset, datap, count); } static __inline void smc_write_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap, bus_size_t count) { bus_write_multi_2(sc->smc_reg, offset, datap, count); } static __inline void smc_barrier(struct smc_softc *sc, bus_size_t offset, bus_size_t length, int flags) { bus_barrier(sc->smc_reg, offset, length, flags); } int smc_probe(device_t dev) { int rid, type, error; uint16_t val; struct smc_softc *sc; struct resource *reg; sc = device_get_softc(dev); rid = 0; type = SYS_RES_IOPORT; error = 0; if (sc->smc_usemem) type = SYS_RES_MEMORY; reg = bus_alloc_resource_anywhere(dev, type, &rid, 16, RF_ACTIVE); if (reg == NULL) { if (bootverbose) device_printf(dev, "could not allocate I/O resource for probe\n"); return (ENXIO); } /* Check for the identification value in the BSR. */ val = bus_read_2(reg, BSR); if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { if (bootverbose) device_printf(dev, "identification value not in BSR\n"); error = ENXIO; goto done; } /* * Try switching banks and make sure we still get the identification * value. */ bus_write_2(reg, BSR, 0); val = bus_read_2(reg, BSR); if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { if (bootverbose) device_printf(dev, "identification value not in BSR after write\n"); error = ENXIO; goto done; } #if 0 /* Check the BAR. */ bus_write_2(reg, BSR, 1); val = bus_read_2(reg, BAR); val = BAR_ADDRESS(val); if (rman_get_start(reg) != val) { if (bootverbose) device_printf(dev, "BAR address %x does not match " "I/O resource address %lx\n", val, rman_get_start(reg)); error = ENXIO; goto done; } #endif /* Compare REV against known chip revisions. */ bus_write_2(reg, BSR, 3); val = bus_read_2(reg, REV); val = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; if (smc_chip_ids[val] == NULL) { if (bootverbose) device_printf(dev, "Unknown chip revision: %d\n", val); error = ENXIO; goto done; } device_set_desc(dev, smc_chip_ids[val]); done: bus_release_resource(dev, type, rid, reg); return (error); } int smc_attach(device_t dev) { int type, error; uint16_t val; u_char eaddr[ETHER_ADDR_LEN]; struct smc_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); error = 0; sc->smc_dev = dev; ifp = sc->smc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { error = ENOSPC; goto done; } mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); /* Set up watchdog callout. */ callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0); type = SYS_RES_IOPORT; if (sc->smc_usemem) type = SYS_RES_MEMORY; sc->smc_reg_rid = 0; sc->smc_reg = bus_alloc_resource_anywhere(dev, type, &sc->smc_reg_rid, 16, RF_ACTIVE); if (sc->smc_reg == NULL) { error = ENXIO; goto done; } sc->smc_irq = bus_alloc_resource_anywhere(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 1, RF_ACTIVE | RF_SHAREABLE); if (sc->smc_irq == NULL) { error = ENXIO; goto done; } SMC_LOCK(sc); smc_reset(sc); SMC_UNLOCK(sc); smc_select_bank(sc, 3); val = smc_read_2(sc, REV); sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT; if (bootverbose) device_printf(dev, "revision %x\n", sc->smc_rev); callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx, CALLOUT_RETURNUNLOCKED); if (sc->smc_chip >= REV_CHIP_91110FD) { (void)mii_attach(dev, &sc->smc_miibus, ifp, smc_mii_ifmedia_upd, smc_mii_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (sc->smc_miibus != NULL) { sc->smc_mii_tick = smc_mii_tick; sc->smc_mii_mediachg = smc_mii_mediachg; sc->smc_mii_mediaioctl = smc_mii_mediaioctl; } } smc_select_bank(sc, 1); eaddr[0] = smc_read_1(sc, IAR0); eaddr[1] = smc_read_1(sc, IAR1); eaddr[2] = smc_read_1(sc, IAR2); eaddr[3] = smc_read_1(sc, IAR3); eaddr[4] = smc_read_1(sc, IAR4); eaddr[5] = smc_read_1(sc, IAR5); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = smc_init; ifp->if_ioctl = smc_ioctl; ifp->if_start = smc_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = ifp->if_capenable = 0; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ether_ifattach(ifp, eaddr); /* Set up taskqueue */ TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp); TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp); sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->smc_tq); taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->smc_dev)); /* Mask all interrupts. */ sc->smc_mask = 0; smc_write_1(sc, MSK, 0); /* Wire up interrupt */ error = bus_setup_intr(dev, sc->smc_irq, INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih); if (error != 0) goto done; done: if (error != 0) smc_detach(dev); return (error); } int smc_detach(device_t dev) { int type; struct smc_softc *sc; sc = device_get_softc(dev); SMC_LOCK(sc); smc_stop(sc); SMC_UNLOCK(sc); if (sc->smc_ifp != NULL) { ether_ifdetach(sc->smc_ifp); } callout_drain(&sc->smc_watchdog); callout_drain(&sc->smc_mii_tick_ch); #ifdef DEVICE_POLLING if (sc->smc_ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->smc_ifp); #endif if (sc->smc_ih != NULL) bus_teardown_intr(sc->smc_dev, sc->smc_irq, sc->smc_ih); if (sc->smc_tq != NULL) { taskqueue_drain(sc->smc_tq, &sc->smc_intr); taskqueue_drain(sc->smc_tq, &sc->smc_rx); taskqueue_drain(sc->smc_tq, &sc->smc_tx); taskqueue_free(sc->smc_tq); sc->smc_tq = NULL; } if (sc->smc_ifp != NULL) { if_free(sc->smc_ifp); } if (sc->smc_miibus != NULL) { device_delete_child(sc->smc_dev, sc->smc_miibus); bus_generic_detach(sc->smc_dev); } if (sc->smc_reg != NULL) { type = SYS_RES_IOPORT; if (sc->smc_usemem) type = SYS_RES_MEMORY; bus_release_resource(sc->smc_dev, type, sc->smc_reg_rid, sc->smc_reg); } if (sc->smc_irq != NULL) bus_release_resource(sc->smc_dev, SYS_RES_IRQ, sc->smc_irq_rid, sc->smc_irq); if (mtx_initialized(&sc->smc_mtx)) mtx_destroy(&sc->smc_mtx); return (0); } static void smc_start(struct ifnet *ifp) { struct smc_softc *sc; sc = ifp->if_softc; SMC_LOCK(sc); smc_start_locked(ifp); SMC_UNLOCK(sc); } static void smc_start_locked(struct ifnet *ifp) { struct smc_softc *sc; struct mbuf *m; u_int len, npages, spin_count; sc = ifp->if_softc; SMC_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; if (IFQ_IS_EMPTY(&ifp->if_snd)) return; /* * Grab the next packet. If it's too big, drop it. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m); len = m_length(m, NULL); len += (len & 1); if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); return; /* XXX readcheck? */ } /* * Flag that we're busy. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->smc_pending = m; /* * Work out how many 256 byte "pages" we need. We have to include the * control data for the packet in this calculation. */ npages = (len + PKT_CTRL_DATA_LEN) >> 8; if (npages == 0) npages = 1; /* * Request memory. */ smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages); /* * Spin briefly to see if the allocation succeeds. */ spin_count = TX_ALLOC_WAIT_TIME; do { if (smc_read_1(sc, IST) & ALLOC_INT) { smc_write_1(sc, ACK, ALLOC_INT); break; } } while (--spin_count); /* * If the allocation is taking too long, unmask the alloc interrupt * and wait. */ if (spin_count == 0) { sc->smc_mask |= ALLOC_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); return; } - taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); + taskqueue_enqueue(sc->smc_tq, &sc->smc_tx); } static void smc_task_tx(void *context, int pending) { struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *m0; u_int packet, len; int last_len; uint8_t *data; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; SMC_LOCK(sc); if (sc->smc_pending == NULL) { SMC_UNLOCK(sc); goto next_packet; } m = m0 = sc->smc_pending; sc->smc_pending = NULL; smc_select_bank(sc, 2); /* * Check the allocation result. */ packet = smc_read_1(sc, ARR); /* * If the allocation failed, requeue the packet and retry. */ if (packet & ARR_FAILED) { IFQ_DRV_PREPEND(&ifp->if_snd, m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; smc_start_locked(ifp); SMC_UNLOCK(sc); return; } /* * Tell the device to write to our packet number. */ smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_AUTO_INCR); /* * Tell the device how long the packet is (including control data). */ len = m_length(m, 0); len += PKT_CTRL_DATA_LEN; smc_write_2(sc, DATA0, 0); smc_write_2(sc, DATA0, len); /* * Push the data out to the device. */ data = NULL; last_len = 0; for (; m != NULL; m = m->m_next) { data = mtod(m, uint8_t *); smc_write_multi_2(sc, DATA0, (uint16_t *)data, m->m_len / 2); last_len = m->m_len; } /* * Push out the control byte and and the odd byte if needed. */ if ((len & 1) != 0 && data != NULL) smc_write_2(sc, DATA0, (CTRL_ODD << 8) | data[last_len - 1]); else smc_write_2(sc, DATA0, 0); /* * Unmask the TX empty interrupt. */ sc->smc_mask |= TX_EMPTY_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); /* * Enqueue the packet. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_ENQUEUE); callout_reset(&sc->smc_watchdog, hz * 2, smc_watchdog, sc); /* * Finish up. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; SMC_UNLOCK(sc); BPF_MTAP(ifp, m0); m_freem(m0); next_packet: /* * See if there's anything else to do. */ smc_start(ifp); } static void smc_task_rx(void *context, int pending) { u_int packet, status, len; uint8_t *data; struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *mhead, *mtail; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; mhead = mtail = NULL; SMC_LOCK(sc); packet = smc_read_1(sc, FIFO_RX); while ((packet & FIFO_EMPTY) == 0) { /* * Grab an mbuf and attach a cluster. */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { break; } if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); break; } /* * Point to the start of the packet. */ smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); /* * Grab status and packet length. */ status = smc_read_2(sc, DATA0); len = smc_read_2(sc, DATA0) & RX_LEN_MASK; len -= 6; if (status & RX_ODDFRM) len += 1; /* * Check for errors. */ if (status & (RX_TOOSHORT | RX_TOOLNG | RX_BADCRC | RX_ALGNERR)) { smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); m_freem(m); break; } /* * Set the mbuf up the way we want it. */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len + 2; /* XXX: Is this right? */ m_adj(m, ETHER_ALIGN); /* * Pull the packet out of the device. Make sure we're in the * right bank first as things may have changed while we were * allocating our mbuf. */ smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 4 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); data = mtod(m, uint8_t *); smc_read_multi_2(sc, DATA0, (uint16_t *)data, len >> 1); if (len & 1) { data += len & ~1; *data = smc_read_1(sc, DATA0); } /* * Tell the device we're done. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); if (m == NULL) { break; } if (mhead == NULL) { mhead = mtail = m; m->m_next = NULL; } else { mtail->m_next = m; mtail = m; } packet = smc_read_1(sc, FIFO_RX); } sc->smc_mask |= RCV_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); SMC_UNLOCK(sc); while (mhead != NULL) { m = mhead; mhead = mhead->m_next; m->m_next = NULL; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); (*ifp->if_input)(ifp, m); } } #ifdef DEVICE_POLLING static void smc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct smc_softc *sc; sc = ifp->if_softc; SMC_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { SMC_UNLOCK(sc); return; } SMC_UNLOCK(sc); if (cmd == POLL_AND_CHECK_STATUS) - taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); + taskqueue_enqueue(sc->smc_tq, &sc->smc_intr); } #endif static int smc_intr(void *context) { struct smc_softc *sc; uint32_t curbank; sc = (struct smc_softc *)context; /* * Save current bank and restore later in this function */ curbank = (smc_read_2(sc, BSR) & BSR_BANK_MASK); /* * Block interrupts in order to let smc_task_intr to kick in */ smc_select_bank(sc, 2); smc_write_1(sc, MSK, 0); /* Restore bank */ smc_select_bank(sc, curbank); - taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); + taskqueue_enqueue(sc->smc_tq, &sc->smc_intr); return (FILTER_HANDLED); } static void smc_task_intr(void *context, int pending) { struct smc_softc *sc; struct ifnet *ifp; u_int status, packet, counter, tcr; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; SMC_LOCK(sc); smc_select_bank(sc, 2); /* * Find out what interrupts are flagged. */ status = smc_read_1(sc, IST) & sc->smc_mask; /* * Transmit error */ if (status & TX_INT) { /* * Kill off the packet if there is one and re-enable transmit. */ packet = smc_read_1(sc, FIFO_TX); if ((packet & FIFO_EMPTY) == 0) { callout_stop(&sc->smc_watchdog); smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_READ | PTR_AUTO_INCR); smc_select_bank(sc, 0); tcr = smc_read_2(sc, EPHSR); #if 0 if ((tcr & EPHSR_TX_SUC) == 0) device_printf(sc->smc_dev, "bad packet\n"); #endif smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE_PKT); smc_select_bank(sc, 0); tcr = smc_read_2(sc, TCR); tcr |= TCR_TXENA | TCR_PAD_EN; smc_write_2(sc, TCR, tcr); smc_select_bank(sc, 2); - taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); + taskqueue_enqueue(sc->smc_tq, &sc->smc_tx); } /* * Ack the interrupt. */ smc_write_1(sc, ACK, TX_INT); } /* * Receive */ if (status & RCV_INT) { smc_write_1(sc, ACK, RCV_INT); sc->smc_mask &= ~RCV_INT; - taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_rx); + taskqueue_enqueue(sc->smc_tq, &sc->smc_rx); } /* * Allocation */ if (status & ALLOC_INT) { smc_write_1(sc, ACK, ALLOC_INT); sc->smc_mask &= ~ALLOC_INT; - taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); + taskqueue_enqueue(sc->smc_tq, &sc->smc_tx); } /* * Receive overrun */ if (status & RX_OVRN_INT) { smc_write_1(sc, ACK, RX_OVRN_INT); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } /* * Transmit empty */ if (status & TX_EMPTY_INT) { smc_write_1(sc, ACK, TX_EMPTY_INT); sc->smc_mask &= ~TX_EMPTY_INT; callout_stop(&sc->smc_watchdog); /* * Update collision stats. */ smc_select_bank(sc, 0); counter = smc_read_2(sc, ECR); smc_select_bank(sc, 2); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, ((counter & ECR_SNGLCOL_MASK) >> ECR_SNGLCOL_SHIFT) + ((counter & ECR_MULCOL_MASK) >> ECR_MULCOL_SHIFT)); /* * See if there are any packets to transmit. */ - taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); + taskqueue_enqueue(sc->smc_tq, &sc->smc_tx); } /* * Update the interrupt mask. */ smc_select_bank(sc, 2); if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); SMC_UNLOCK(sc); } static uint32_t smc_mii_bitbang_read(device_t dev) { struct smc_softc *sc; uint32_t val; sc = device_get_softc(dev); SMC_ASSERT_LOCKED(sc); KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, ("%s: smc_mii_bitbang_read called with bank %d (!= 3)", device_get_nameunit(sc->smc_dev), smc_read_2(sc, BSR) & BSR_BANK_MASK)); val = smc_read_2(sc, MGMT); smc_barrier(sc, MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } static void smc_mii_bitbang_write(device_t dev, uint32_t val) { struct smc_softc *sc; sc = device_get_softc(dev); SMC_ASSERT_LOCKED(sc); KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, ("%s: smc_mii_bitbang_write called with bank %d (!= 3)", device_get_nameunit(sc->smc_dev), smc_read_2(sc, BSR) & BSR_BANK_MASK)); smc_write_2(sc, MGMT, val); smc_barrier(sc, MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } int smc_miibus_readreg(device_t dev, int phy, int reg) { struct smc_softc *sc; int val; sc = device_get_softc(dev); SMC_LOCK(sc); smc_select_bank(sc, 3); val = mii_bitbang_readreg(dev, &smc_mii_bitbang_ops, phy, reg); SMC_UNLOCK(sc); return (val); } int smc_miibus_writereg(device_t dev, int phy, int reg, int data) { struct smc_softc *sc; sc = device_get_softc(dev); SMC_LOCK(sc); smc_select_bank(sc, 3); mii_bitbang_writereg(dev, &smc_mii_bitbang_ops, phy, reg, data); SMC_UNLOCK(sc); return (0); } void smc_miibus_statchg(device_t dev) { struct smc_softc *sc; struct mii_data *mii; uint16_t tcr; sc = device_get_softc(dev); mii = device_get_softc(sc->smc_miibus); SMC_LOCK(sc); smc_select_bank(sc, 0); tcr = smc_read_2(sc, TCR); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) tcr |= TCR_SWFDUP; else tcr &= ~TCR_SWFDUP; smc_write_2(sc, TCR, tcr); SMC_UNLOCK(sc); } static int smc_mii_ifmedia_upd(struct ifnet *ifp) { struct smc_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->smc_miibus == NULL) return (ENXIO); mii = device_get_softc(sc->smc_miibus); return (mii_mediachg(mii)); } static void smc_mii_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct smc_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->smc_miibus == NULL) return; mii = device_get_softc(sc->smc_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static void smc_mii_tick(void *context) { struct smc_softc *sc; sc = (struct smc_softc *)context; if (sc->smc_miibus == NULL) return; SMC_UNLOCK(sc); mii_tick(device_get_softc(sc->smc_miibus)); callout_reset(&sc->smc_mii_tick_ch, hz, smc_mii_tick, sc); } static void smc_mii_mediachg(struct smc_softc *sc) { if (sc->smc_miibus == NULL) return; mii_mediachg(device_get_softc(sc->smc_miibus)); } static int smc_mii_mediaioctl(struct smc_softc *sc, struct ifreq *ifr, u_long command) { struct mii_data *mii; if (sc->smc_miibus == NULL) return (EINVAL); mii = device_get_softc(sc->smc_miibus); return (ifmedia_ioctl(sc->smc_ifp, ifr, &mii->mii_media, command)); } static void smc_reset(struct smc_softc *sc) { u_int ctr; SMC_ASSERT_LOCKED(sc); smc_select_bank(sc, 2); /* * Mask all interrupts. */ smc_write_1(sc, MSK, 0); /* * Tell the device to reset. */ smc_select_bank(sc, 0); smc_write_2(sc, RCR, RCR_SOFT_RST); /* * Set up the configuration register. */ smc_select_bank(sc, 1); smc_write_2(sc, CR, CR_EPH_POWER_EN); DELAY(1); /* * Turn off transmit and receive. */ smc_select_bank(sc, 0); smc_write_2(sc, TCR, 0); smc_write_2(sc, RCR, 0); /* * Set up the control register. */ smc_select_bank(sc, 1); ctr = smc_read_2(sc, CTR); ctr |= CTR_LE_ENABLE | CTR_AUTO_RELEASE; smc_write_2(sc, CTR, ctr); /* * Reset the MMU. */ smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_MMU_RESET); } static void smc_enable(struct smc_softc *sc) { struct ifnet *ifp; SMC_ASSERT_LOCKED(sc); ifp = sc->smc_ifp; /* * Set up the receive/PHY control register. */ smc_select_bank(sc, 0); smc_write_2(sc, RPCR, RPCR_ANEG | (RPCR_LED_LINK_ANY << RPCR_LSA_SHIFT) | (RPCR_LED_ACT_ANY << RPCR_LSB_SHIFT)); /* * Set up the transmit and receive control registers. */ smc_write_2(sc, TCR, TCR_TXENA | TCR_PAD_EN); smc_write_2(sc, RCR, RCR_RXEN | RCR_STRIP_CRC); /* * Set up the interrupt mask. */ smc_select_bank(sc, 2); sc->smc_mask = EPH_INT | RX_OVRN_INT | RCV_INT | TX_INT; if ((ifp->if_capenable & IFCAP_POLLING) != 0) smc_write_1(sc, MSK, sc->smc_mask); } static void smc_stop(struct smc_softc *sc) { SMC_ASSERT_LOCKED(sc); /* * Turn off callouts. */ callout_stop(&sc->smc_watchdog); callout_stop(&sc->smc_mii_tick_ch); /* * Mask all interrupts. */ smc_select_bank(sc, 2); sc->smc_mask = 0; smc_write_1(sc, MSK, 0); #ifdef DEVICE_POLLING ether_poll_deregister(sc->smc_ifp); sc->smc_ifp->if_capenable &= ~IFCAP_POLLING; sc->smc_ifp->if_capenable &= ~IFCAP_POLLING_NOCOUNT; #endif /* * Disable transmit and receive. */ smc_select_bank(sc, 0); smc_write_2(sc, TCR, 0); smc_write_2(sc, RCR, 0); sc->smc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } static void smc_watchdog(void *arg) { struct smc_softc *sc; sc = (struct smc_softc *)arg; device_printf(sc->smc_dev, "watchdog timeout\n"); - taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); + taskqueue_enqueue(sc->smc_tq, &sc->smc_intr); } static void smc_init(void *context) { struct smc_softc *sc; sc = (struct smc_softc *)context; SMC_LOCK(sc); smc_init_locked(sc); SMC_UNLOCK(sc); } static void smc_init_locked(struct smc_softc *sc) { struct ifnet *ifp; SMC_ASSERT_LOCKED(sc); ifp = sc->smc_ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; smc_reset(sc); smc_enable(sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; smc_start_locked(ifp); if (sc->smc_mii_tick != NULL) callout_reset(&sc->smc_mii_tick_ch, hz, sc->smc_mii_tick, sc); #ifdef DEVICE_POLLING SMC_UNLOCK(sc); ether_poll_register(smc_poll, ifp); SMC_LOCK(sc); ifp->if_capenable |= IFCAP_POLLING; ifp->if_capenable |= IFCAP_POLLING_NOCOUNT; #endif } static int smc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct smc_softc *sc; int error; sc = ifp->if_softc; error = 0; switch (cmd) { case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { SMC_LOCK(sc); smc_stop(sc); SMC_UNLOCK(sc); } else { smc_init(sc); if (sc->smc_mii_mediachg != NULL) sc->smc_mii_mediachg(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: /* XXX SMC_LOCK(sc); smc_setmcast(sc); SMC_UNLOCK(sc); */ error = EINVAL; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->smc_mii_mediaioctl == NULL) { error = EINVAL; break; } sc->smc_mii_mediaioctl(sc, (struct ifreq *)data, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } Index: head/sys/dev/vr/if_vr.c =================================================================== --- head/sys/dev/vr/if_vr.c (revision 296271) +++ head/sys/dev/vr/if_vr.c (revision 296272) @@ -1,2654 +1,2654 @@ /*- * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * VIA Rhine fast ethernet PCI NIC driver * * Supports various network adapters based on the VIA Rhine * and Rhine II PCI controllers, including the D-Link DFE530TX. * Datasheets are available at http://www.via.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The VIA Rhine controllers are similar in some respects to the * the DEC tulip chips, except less complicated. The controller * uses an MII bus and an external physical layer interface. The * receiver has a one entry perfect filter and a 64-bit hash table * multicast filter. Transmit and receive descriptors are similar * to the tulip. * * Some Rhine chips has a serious flaw in its transmit DMA mechanism: * transmit buffers must be longword aligned. Unfortunately, * FreeBSD doesn't guarantee that mbufs will be filled in starting * at longword boundaries, so we have to do a buffer copy before * transmission. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" MODULE_DEPEND(vr, pci, 1, 1, 1); MODULE_DEPEND(vr, ether, 1, 1, 1); MODULE_DEPEND(vr, miibus, 1, 1, 1); /* Define to show Rx/Tx error status. */ #undef VR_SHOW_ERRORS #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types, their names & quirks. */ #define VR_Q_NEEDALIGN (1<<0) #define VR_Q_CSUM (1<<1) #define VR_Q_CAM (1<<2) static const struct vr_type { u_int16_t vr_vid; u_int16_t vr_did; int vr_quirks; const char *vr_name; } vr_devs[] = { { VIA_VENDORID, VIA_DEVICEID_RHINE, VR_Q_NEEDALIGN, "VIA VT3043 Rhine I 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_II, VR_Q_NEEDALIGN, "VIA VT86C100A Rhine II 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 0, "VIA VT6102 Rhine II 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 0, "VIA VT6105 Rhine III 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, VR_Q_CSUM, "VIA VT6105M Rhine III 10/100BaseTX" }, { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, VR_Q_NEEDALIGN, "Delta Electronics Rhine II 10/100BaseTX" }, { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, VR_Q_NEEDALIGN, "Addtron Technology Rhine II 10/100BaseTX" }, { 0, 0, 0, NULL } }; static int vr_probe(device_t); static int vr_attach(device_t); static int vr_detach(device_t); static int vr_shutdown(device_t); static int vr_suspend(device_t); static int vr_resume(device_t); static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int vr_dma_alloc(struct vr_softc *); static void vr_dma_free(struct vr_softc *); static __inline void vr_discard_rxbuf(struct vr_rxdesc *); static int vr_newbuf(struct vr_softc *, int); #ifndef __NO_STRICT_ALIGNMENT static __inline void vr_fixup_rx(struct mbuf *); #endif static int vr_rxeof(struct vr_softc *); static void vr_txeof(struct vr_softc *); static void vr_tick(void *); static int vr_error(struct vr_softc *, uint16_t); static void vr_tx_underrun(struct vr_softc *); static int vr_intr(void *); static void vr_int_task(void *, int); static void vr_start(struct ifnet *); static void vr_start_locked(struct ifnet *); static int vr_encap(struct vr_softc *, struct mbuf **); static int vr_ioctl(struct ifnet *, u_long, caddr_t); static void vr_init(void *); static void vr_init_locked(struct vr_softc *); static void vr_tx_start(struct vr_softc *); static void vr_rx_start(struct vr_softc *); static int vr_tx_stop(struct vr_softc *); static int vr_rx_stop(struct vr_softc *); static void vr_stop(struct vr_softc *); static void vr_watchdog(struct vr_softc *); static int vr_ifmedia_upd(struct ifnet *); static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int vr_miibus_readreg(device_t, int, int); static int vr_miibus_writereg(device_t, int, int, int); static void vr_miibus_statchg(device_t); static void vr_cam_mask(struct vr_softc *, uint32_t, int); static int vr_cam_data(struct vr_softc *, int, int, uint8_t *); static void vr_set_filter(struct vr_softc *); static void vr_reset(const struct vr_softc *); static int vr_tx_ring_init(struct vr_softc *); static int vr_rx_ring_init(struct vr_softc *); static void vr_setwol(struct vr_softc *); static void vr_clrwol(struct vr_softc *); static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS); static const struct vr_tx_threshold_table { int tx_cfg; int bcr_cfg; int value; } vr_tx_threshold_tables[] = { { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 }, { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 }, { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 }, { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 }, { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 }, { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 } }; static device_method_t vr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vr_probe), DEVMETHOD(device_attach, vr_attach), DEVMETHOD(device_detach, vr_detach), DEVMETHOD(device_shutdown, vr_shutdown), DEVMETHOD(device_suspend, vr_suspend), DEVMETHOD(device_resume, vr_resume), /* MII interface */ DEVMETHOD(miibus_readreg, vr_miibus_readreg), DEVMETHOD(miibus_writereg, vr_miibus_writereg), DEVMETHOD(miibus_statchg, vr_miibus_statchg), DEVMETHOD_END }; static driver_t vr_driver = { "vr", vr_methods, sizeof(struct vr_softc) }; static devclass_t vr_devclass; DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); static int vr_miibus_readreg(device_t dev, int phy, int reg) { struct vr_softc *sc; int i; sc = device_get_softc(dev); /* Set the register address. */ CSR_WRITE_1(sc, VR_MIIADDR, reg); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); for (i = 0; i < VR_MII_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) break; } if (i == VR_MII_TIMEOUT) device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg); return (CSR_READ_2(sc, VR_MIIDATA)); } static int vr_miibus_writereg(device_t dev, int phy, int reg, int data) { struct vr_softc *sc; int i; sc = device_get_softc(dev); /* Set the register address and data to write. */ CSR_WRITE_1(sc, VR_MIIADDR, reg); CSR_WRITE_2(sc, VR_MIIDATA, data); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); for (i = 0; i < VR_MII_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) break; } if (i == VR_MII_TIMEOUT) device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy, reg); return (0); } /* * In order to fiddle with the * 'full-duplex' and '100Mbps' bits in the netconfig register, we * first have to put the transmit and/or receive logic in the idle state. */ static void vr_miibus_statchg(device_t dev) { struct vr_softc *sc; struct mii_data *mii; struct ifnet *ifp; int lfdx, mfdx; uint8_t cr0, cr1, fc; sc = device_get_softc(dev); mii = device_get_softc(sc->vr_miibus); ifp = sc->vr_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->vr_flags |= VR_F_LINK; break; default: break; } } if ((sc->vr_flags & VR_F_LINK) != 0) { cr0 = CSR_READ_1(sc, VR_CR0); cr1 = CSR_READ_1(sc, VR_CR1); mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0; lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0; if (mfdx != lfdx) { if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) { if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { device_printf(sc->vr_dev, "%s: Tx/Rx shutdown error -- " "resetting\n", __func__); sc->vr_flags |= VR_F_RESTART; VR_UNLOCK(sc); return; } } if (lfdx) cr1 |= VR_CR1_FULLDUPLEX; else cr1 &= ~VR_CR1_FULLDUPLEX; CSR_WRITE_1(sc, VR_CR1, cr1); } fc = 0; /* Configure flow-control. */ if (sc->vr_revid >= REV_ID_VT6105_A0) { fc = CSR_READ_1(sc, VR_FLOWCR1); fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) fc |= VR_FLOWCR1_RXPAUSE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) { fc |= VR_FLOWCR1_TXPAUSE; sc->vr_flags |= VR_F_TXPAUSE; } CSR_WRITE_1(sc, VR_FLOWCR1, fc); } else if (sc->vr_revid >= REV_ID_VT6102_A) { /* No Tx puase capability available for Rhine II. */ fc = CSR_READ_1(sc, VR_MISC_CR0); fc &= ~VR_MISCCR0_RXPAUSE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) fc |= VR_MISCCR0_RXPAUSE; CSR_WRITE_1(sc, VR_MISC_CR0, fc); } vr_rx_start(sc); vr_tx_start(sc); } else { if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { device_printf(sc->vr_dev, "%s: Tx/Rx shutdown error -- resetting\n", __func__); sc->vr_flags |= VR_F_RESTART; } } } static void vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type) { if (type == VR_MCAST_CAM) CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); else CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); CSR_WRITE_4(sc, VR_CAMMASK, mask); CSR_WRITE_1(sc, VR_CAMCTL, 0); } static int vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac) { int i; if (type == VR_MCAST_CAM) { if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL) return (EINVAL); CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); } else CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); /* Set CAM entry address. */ CSR_WRITE_1(sc, VR_CAMADDR, idx); /* Set CAM entry data. */ if (type == VR_MCAST_CAM) { for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]); } else { CSR_WRITE_1(sc, VR_VCAM0, mac[0]); CSR_WRITE_1(sc, VR_VCAM1, mac[1]); } DELAY(10); /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */ CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE); for (i = 0; i < VR_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0) break; } if (i == VR_TIMEOUT) device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n", __func__); CSR_WRITE_1(sc, VR_CAMCTL, 0); return (i == VR_TIMEOUT ? ETIMEDOUT : 0); } /* * Program the 64-bit multicast hash filter. */ static void vr_set_filter(struct vr_softc *sc) { struct ifnet *ifp; int h; uint32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; uint8_t rxfilt; int error, mcnt; uint32_t cam_mask; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; rxfilt = CSR_READ_1(sc, VR_RXCFG); rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI); if (ifp->if_flags & IFF_BROADCAST) rxfilt |= VR_RXCFG_RX_BROAD; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= VR_RXCFG_RX_MULTI; if (ifp->if_flags & IFF_PROMISC) rxfilt |= VR_RXCFG_RX_PROMISC; CSR_WRITE_1(sc, VR_RXCFG, rxfilt); CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); return; } /* Now program new ones. */ error = 0; mcnt = 0; if_maddr_rlock(ifp); if ((sc->vr_quirks & VR_Q_CAM) != 0) { /* * For hardwares that have CAM capability, use * 32 entries multicast perfect filter. */ cam_mask = 0; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; error = vr_cam_data(sc, VR_MCAST_CAM, mcnt, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (error != 0) { cam_mask = 0; break; } cam_mask |= 1 << mcnt; mcnt++; } vr_cam_mask(sc, VR_MCAST_CAM, cam_mask); } if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) { /* * If there are too many multicast addresses or * setting multicast CAM filter failed, use hash * table based filtering. */ mcnt = 0; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } } if_maddr_runlock(ifp); if (mcnt > 0) rxfilt |= VR_RXCFG_RX_MULTI; CSR_WRITE_4(sc, VR_MAR0, hashes[0]); CSR_WRITE_4(sc, VR_MAR1, hashes[1]); CSR_WRITE_1(sc, VR_RXCFG, rxfilt); } static void vr_reset(const struct vr_softc *sc) { int i; /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */ CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET); if (sc->vr_revid < REV_ID_VT6102_A) { /* VT86C100A needs more delay after reset. */ DELAY(100); } for (i = 0; i < VR_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET)) break; } if (i == VR_TIMEOUT) { if (sc->vr_revid < REV_ID_VT6102_A) device_printf(sc->vr_dev, "reset never completed!\n"); else { /* Use newer force reset command. */ device_printf(sc->vr_dev, "Using force reset command.\n"); VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); /* * Wait a little while for the chip to get its brains * in order. */ DELAY(2000); } } } /* * Probe for a VIA Rhine chip. Check the PCI vendor and device * IDs against our list and return a match or NULL */ static const struct vr_type * vr_match(device_t dev) { const struct vr_type *t = vr_devs; for (t = vr_devs; t->vr_name != NULL; t++) if ((pci_get_vendor(dev) == t->vr_vid) && (pci_get_device(dev) == t->vr_did)) return (t); return (NULL); } /* * Probe for a VIA Rhine chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int vr_probe(device_t dev) { const struct vr_type *t; t = vr_match(dev); if (t != NULL) { device_set_desc(dev, t->vr_name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int vr_attach(device_t dev) { struct vr_softc *sc; struct ifnet *ifp; const struct vr_type *t; uint8_t eaddr[ETHER_ADDR_LEN]; int error, rid; int i, phy, pmc; sc = device_get_softc(dev); sc->vr_dev = dev; t = vr_match(dev); KASSERT(t != NULL, ("Lost if_vr device match")); sc->vr_quirks = t->vr_quirks; device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks); mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, vr_sysctl_stats, "I", "Statistics"); error = 0; /* * Map control/status registers. */ pci_enable_busmaster(dev); sc->vr_revid = pci_get_revid(dev); device_printf(dev, "Revision: 0x%x\n", sc->vr_revid); sc->vr_res_id = PCIR_BAR(0); sc->vr_res_type = SYS_RES_IOPORT; sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type, &sc->vr_res_id, RF_ACTIVE); if (sc->vr_res == NULL) { device_printf(dev, "couldn't map ports\n"); error = ENXIO; goto fail; } /* Allocate interrupt. */ rid = 0; sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->vr_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Allocate ifnet structure. */ ifp = sc->vr_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "couldn't allocate ifnet structure\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = vr_ioctl; ifp->if_start = vr_start; ifp->if_init = vr_init; IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1); ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1; IFQ_SET_READY(&ifp->if_snd); TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc); /* Configure Tx FIFO threshold. */ sc->vr_txthresh = VR_TXTHRESH_MIN; if (sc->vr_revid < REV_ID_VT6105_A0) { /* * Use store and forward mode for Rhine I/II. * Otherwise they produce a lot of Tx underruns and * it would take a while to get working FIFO threshold * value. */ sc->vr_txthresh = VR_TXTHRESH_MAX; } if ((sc->vr_quirks & VR_Q_CSUM) != 0) { ifp->if_hwassist = VR_CSUM_FEATURES; ifp->if_capabilities |= IFCAP_HWCSUM; /* * To update checksum field the hardware may need to * store entire frames into FIFO before transmitting. */ sc->vr_txthresh = VR_TXTHRESH_MAX; } if (sc->vr_revid >= REV_ID_VT6102_A && pci_find_cap(dev, PCIY_PMG, &pmc) == 0) ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC; /* Rhine supports oversized VLAN frame. */ ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Windows may put the chip in suspend mode when it * shuts down. Be sure to kick it in the head to wake it * up again. */ if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); /* * Get station address. The way the Rhine chips work, * you're not allowed to directly access the EEPROM once * they've been programmed a special way. Consequently, * we need to read the node address from the PAR0 and PAR1 * registers. * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB, * VR_CFGC and VR_CFGD such that memory mapped IO configured * by driver is reset to default state. */ VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); for (i = VR_TIMEOUT; i > 0; i--) { DELAY(1); if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0) break; } if (i == 0) device_printf(dev, "Reloading EEPROM timeout!\n"); for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); /* Reset the adapter. */ vr_reset(sc); /* Ack intr & disable further interrupts. */ CSR_WRITE_2(sc, VR_ISR, 0xFFFF); CSR_WRITE_2(sc, VR_IMR, 0); if (sc->vr_revid >= REV_ID_VT6102_A) CSR_WRITE_2(sc, VR_MII_IMR, 0); if (sc->vr_revid < REV_ID_VT6102_A) { pci_write_config(dev, VR_PCI_MODE2, pci_read_config(dev, VR_PCI_MODE2, 1) | VR_MODE2_MODE10T, 1); } else { /* Report error instead of retrying forever. */ pci_write_config(dev, VR_PCI_MODE2, pci_read_config(dev, VR_PCI_MODE2, 1) | VR_MODE2_PCEROPT, 1); /* Detect MII coding error. */ pci_write_config(dev, VR_PCI_MODE3, pci_read_config(dev, VR_PCI_MODE3, 1) | VR_MODE3_MIION, 1); if (sc->vr_revid >= REV_ID_VT6105_LOM && sc->vr_revid < REV_ID_VT6105M_A0) pci_write_config(dev, VR_PCI_MODE2, pci_read_config(dev, VR_PCI_MODE2, 1) | VR_MODE2_MODE10T, 1); /* Enable Memory-Read-Multiple. */ if (sc->vr_revid >= REV_ID_VT6107_A1 && sc->vr_revid < REV_ID_VT6105M_A0) pci_write_config(dev, VR_PCI_MODE2, pci_read_config(dev, VR_PCI_MODE2, 1) | VR_MODE2_MRDPL, 1); } /* Disable MII AUTOPOLL. */ VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); if (vr_dma_alloc(sc) != 0) { error = ENXIO; goto fail; } /* Do MII setup. */ if (sc->vr_revid >= REV_ID_VT6105_A0) phy = 1; else phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK; error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd, vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } /* Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* Hook interrupt last to avoid having to lock softc. */ error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, vr_intr, NULL, sc, &sc->vr_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) vr_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int vr_detach(device_t dev) { struct vr_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->vr_ifp; KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); #ifdef DEVICE_POLLING if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif /* These should only be active if attach succeeded. */ if (device_is_attached(dev)) { VR_LOCK(sc); sc->vr_flags |= VR_F_DETACHED; vr_stop(sc); VR_UNLOCK(sc); callout_drain(&sc->vr_stat_callout); taskqueue_drain(taskqueue_fast, &sc->vr_inttask); ether_ifdetach(ifp); } if (sc->vr_miibus) device_delete_child(dev, sc->vr_miibus); bus_generic_detach(dev); if (sc->vr_intrhand) bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); if (sc->vr_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); if (sc->vr_res) bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id, sc->vr_res); if (ifp) if_free(ifp); vr_dma_free(sc); mtx_destroy(&sc->vr_mtx); return (0); } struct vr_dmamap_arg { bus_addr_t vr_busaddr; }; static void vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct vr_dmamap_arg *ctx; if (error != 0) return; ctx = arg; ctx->vr_busaddr = segs[0].ds_addr; } static int vr_dma_alloc(struct vr_softc *sc) { struct vr_dmamap_arg ctx; struct vr_txdesc *txd; struct vr_rxdesc *rxd; bus_size_t tx_alignment; int error, i; /* Create parent DMA tag. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->vr_dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_parent_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create parent DMA tag\n"); goto fail; } /* Create tag for Tx ring. */ error = bus_dma_tag_create( sc->vr_cdata.vr_parent_tag, /* parent */ VR_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ VR_TX_RING_SIZE, /* maxsize */ 1, /* nsegments */ VR_TX_RING_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_tx_ring_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n"); goto fail; } /* Create tag for Rx ring. */ error = bus_dma_tag_create( sc->vr_cdata.vr_parent_tag, /* parent */ VR_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ VR_RX_RING_SIZE, /* maxsize */ 1, /* nsegments */ VR_RX_RING_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_rx_ring_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n"); goto fail; } if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) tx_alignment = sizeof(uint32_t); else tx_alignment = 1; /* Create tag for Tx buffers. */ error = bus_dma_tag_create( sc->vr_cdata.vr_parent_tag, /* parent */ tx_alignment, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * VR_MAXFRAGS, /* maxsize */ VR_MAXFRAGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_tx_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create Tx DMA tag\n"); goto fail; } /* Create tag for Rx buffers. */ error = bus_dma_tag_create( sc->vr_cdata.vr_parent_tag, /* parent */ VR_RX_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_rx_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create Rx DMA tag\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map for Tx ring. */ error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag, (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map); if (error != 0) { device_printf(sc->vr_dev, "failed to allocate DMA'able memory for Tx ring\n"); goto fail; } ctx.vr_busaddr = 0; error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring, VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0); if (error != 0 || ctx.vr_busaddr == 0) { device_printf(sc->vr_dev, "failed to load DMA'able memory for Tx ring\n"); goto fail; } sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr; /* Allocate DMA'able memory and load the DMA map for Rx ring. */ error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag, (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map); if (error != 0) { device_printf(sc->vr_dev, "failed to allocate DMA'able memory for Rx ring\n"); goto fail; } ctx.vr_busaddr = 0; error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring, VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0); if (error != 0 || ctx.vr_busaddr == 0) { device_printf(sc->vr_dev, "failed to load DMA'able memory for Rx ring\n"); goto fail; } sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr; /* Create DMA maps for Tx buffers. */ for (i = 0; i < VR_TX_RING_CNT; i++) { txd = &sc->vr_cdata.vr_txdesc[i]; txd->tx_m = NULL; txd->tx_dmamap = NULL; error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc->vr_dev, "failed to create Tx dmamap\n"); goto fail; } } /* Create DMA maps for Rx buffers. */ if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, &sc->vr_cdata.vr_rx_sparemap)) != 0) { device_printf(sc->vr_dev, "failed to create spare Rx dmamap\n"); goto fail; } for (i = 0; i < VR_RX_RING_CNT; i++) { rxd = &sc->vr_cdata.vr_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_dmamap = NULL; error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, &rxd->rx_dmamap); if (error != 0) { device_printf(sc->vr_dev, "failed to create Rx dmamap\n"); goto fail; } } fail: return (error); } static void vr_dma_free(struct vr_softc *sc) { struct vr_txdesc *txd; struct vr_rxdesc *rxd; int i; /* Tx ring. */ if (sc->vr_cdata.vr_tx_ring_tag) { if (sc->vr_rdata.vr_tx_ring_paddr) bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map); if (sc->vr_rdata.vr_tx_ring) bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag, sc->vr_rdata.vr_tx_ring, sc->vr_cdata.vr_tx_ring_map); sc->vr_rdata.vr_tx_ring = NULL; sc->vr_rdata.vr_tx_ring_paddr = 0; bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag); sc->vr_cdata.vr_tx_ring_tag = NULL; } /* Rx ring. */ if (sc->vr_cdata.vr_rx_ring_tag) { if (sc->vr_rdata.vr_rx_ring_paddr) bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map); if (sc->vr_rdata.vr_rx_ring) bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag, sc->vr_rdata.vr_rx_ring, sc->vr_cdata.vr_rx_ring_map); sc->vr_rdata.vr_rx_ring = NULL; sc->vr_rdata.vr_rx_ring_paddr = 0; bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag); sc->vr_cdata.vr_rx_ring_tag = NULL; } /* Tx buffers. */ if (sc->vr_cdata.vr_tx_tag) { for (i = 0; i < VR_TX_RING_CNT; i++) { txd = &sc->vr_cdata.vr_txdesc[i]; if (txd->tx_dmamap) { bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); txd->tx_dmamap = NULL; } } bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag); sc->vr_cdata.vr_tx_tag = NULL; } /* Rx buffers. */ if (sc->vr_cdata.vr_rx_tag) { for (i = 0; i < VR_RX_RING_CNT; i++) { rxd = &sc->vr_cdata.vr_rxdesc[i]; if (rxd->rx_dmamap) { bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); rxd->rx_dmamap = NULL; } } if (sc->vr_cdata.vr_rx_sparemap) { bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, sc->vr_cdata.vr_rx_sparemap); sc->vr_cdata.vr_rx_sparemap = 0; } bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag); sc->vr_cdata.vr_rx_tag = NULL; } if (sc->vr_cdata.vr_parent_tag) { bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag); sc->vr_cdata.vr_parent_tag = NULL; } } /* * Initialize the transmit descriptors. */ static int vr_tx_ring_init(struct vr_softc *sc) { struct vr_ring_data *rd; struct vr_txdesc *txd; bus_addr_t addr; int i; sc->vr_cdata.vr_tx_prod = 0; sc->vr_cdata.vr_tx_cons = 0; sc->vr_cdata.vr_tx_cnt = 0; sc->vr_cdata.vr_tx_pkts = 0; rd = &sc->vr_rdata; bzero(rd->vr_tx_ring, VR_TX_RING_SIZE); for (i = 0; i < VR_TX_RING_CNT; i++) { if (i == VR_TX_RING_CNT - 1) addr = VR_TX_RING_ADDR(sc, 0); else addr = VR_TX_RING_ADDR(sc, i + 1); rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); txd = &sc->vr_cdata.vr_txdesc[i]; txd->tx_m = NULL; } bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int vr_rx_ring_init(struct vr_softc *sc) { struct vr_ring_data *rd; struct vr_rxdesc *rxd; bus_addr_t addr; int i; sc->vr_cdata.vr_rx_cons = 0; rd = &sc->vr_rdata; bzero(rd->vr_rx_ring, VR_RX_RING_SIZE); for (i = 0; i < VR_RX_RING_CNT; i++) { rxd = &sc->vr_cdata.vr_rxdesc[i]; rxd->rx_m = NULL; rxd->desc = &rd->vr_rx_ring[i]; if (i == VR_RX_RING_CNT - 1) addr = VR_RX_RING_ADDR(sc, 0); else addr = VR_RX_RING_ADDR(sc, i + 1); rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); if (vr_newbuf(sc, i) != 0) return (ENOBUFS); } bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static __inline void vr_discard_rxbuf(struct vr_rxdesc *rxd) { struct vr_desc *desc; desc = rxd->desc; desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t))); desc->vr_status = htole32(VR_RXSTAT_OWN); } /* * Initialize an RX descriptor and attach an MBUF cluster. * Note: the length fields are only 11 bits wide, which means the * largest size we can specify is 2047. This is important because * MCLBYTES is 2048, so we have to subtract one otherwise we'll * overflow the field and make a mess. */ static int vr_newbuf(struct vr_softc *sc, int idx) { struct vr_desc *desc; struct vr_rxdesc *rxd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, sizeof(uint64_t)); if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag, sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc->vr_cdata.vr_rxdesc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap; sc->vr_cdata.vr_rx_sparemap = map; bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; desc = rxd->desc; desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr)); desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len); desc->vr_status = htole32(VR_RXSTAT_OWN); return (0); } #ifndef __NO_STRICT_ALIGNMENT static __inline void vr_fixup_rx(struct mbuf *m) { uint16_t *src, *dst; int i; src = mtod(m, uint16_t *); dst = src - 1; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= ETHER_ALIGN; } #endif /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static int vr_rxeof(struct vr_softc *sc) { struct vr_rxdesc *rxd; struct mbuf *m; struct ifnet *ifp; struct vr_desc *cur_rx; int cons, prog, total_len, rx_npkts; uint32_t rxstat, rxctl; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; cons = sc->vr_cdata.vr_rx_cons; rx_npkts = 0; bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = &sc->vr_rdata.vr_rx_ring[cons]; rxstat = le32toh(cur_rx->vr_status); rxctl = le32toh(cur_rx->vr_ctl); if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN) break; prog++; rxd = &sc->vr_cdata.vr_rxdesc[cons]; m = rxd->rx_m; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. * We don't support SG in Rx path yet, so discard * partial frame. */ if ((rxstat & VR_RXSTAT_RX_OK) == 0 || (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) != (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); sc->vr_stat.rx_errors++; if (rxstat & VR_RXSTAT_CRCERR) sc->vr_stat.rx_crc_errors++; if (rxstat & VR_RXSTAT_FRAMEALIGNERR) sc->vr_stat.rx_alignment++; if (rxstat & VR_RXSTAT_FIFOOFLOW) sc->vr_stat.rx_fifo_overflows++; if (rxstat & VR_RXSTAT_GIANT) sc->vr_stat.rx_giants++; if (rxstat & VR_RXSTAT_RUNT) sc->vr_stat.rx_runts++; if (rxstat & VR_RXSTAT_BUFFERR) sc->vr_stat.rx_no_buffers++; #ifdef VR_SHOW_ERRORS device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS); #endif vr_discard_rxbuf(rxd); continue; } if (vr_newbuf(sc, cons) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); sc->vr_stat.rx_errors++; sc->vr_stat.rx_no_mbufs++; vr_discard_rxbuf(rxd); continue; } /* * XXX The VIA Rhine chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len = VR_RXBYTES(rxstat); total_len -= ETHER_CRC_LEN; m->m_pkthdr.len = m->m_len = total_len; #ifndef __NO_STRICT_ALIGNMENT /* * RX buffers must be 32-bit aligned. * Ignore the alignment problems on the non-strict alignment * platform. The performance hit incurred due to unaligned * accesses is much smaller than the hit produced by forcing * buffer copies all the time. */ vr_fixup_rx(m); #endif m->m_pkthdr.rcvif = ifp; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); sc->vr_stat.rx_ok++; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && (rxstat & VR_RXSTAT_FRAG) == 0 && (rxctl & VR_RXCTL_IP) != 0) { /* Checksum is valid for non-fragmented IP packets. */ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) { m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; if ((rxctl & VR_RXCTL_TCPUDPOK) != 0) m->m_pkthdr.csum_data = 0xffff; } } } VR_UNLOCK(sc); (*ifp->if_input)(ifp, m); VR_LOCK(sc); rx_npkts++; } if (prog > 0) { /* * Let controller know how many number of RX buffers * are posted but avoid expensive register access if * TX pause capability was not negotiated with link * partner. */ if ((sc->vr_flags & VR_F_TXPAUSE) != 0) { if (prog >= VR_RX_RING_CNT) prog = VR_RX_RING_CNT - 1; CSR_WRITE_1(sc, VR_FLOWCR0, prog); } sc->vr_cdata.vr_rx_cons = cons; bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } return (rx_npkts); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void vr_txeof(struct vr_softc *sc) { struct vr_txdesc *txd; struct vr_desc *cur_tx; struct ifnet *ifp; uint32_t txctl, txstat; int cons, prod; VR_LOCK_ASSERT(sc); cons = sc->vr_cdata.vr_tx_cons; prod = sc->vr_cdata.vr_tx_prod; if (cons == prod) return; bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); ifp = sc->vr_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) { cur_tx = &sc->vr_rdata.vr_tx_ring[cons]; txctl = le32toh(cur_tx->vr_ctl); txstat = le32toh(cur_tx->vr_status); if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN) break; sc->vr_cdata.vr_tx_cnt--; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* Only the first descriptor in the chain is valid. */ if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) continue; txd = &sc->vr_cdata.vr_txdesc[cons]; KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n", __func__)); if ((txstat & VR_TXSTAT_ERRSUM) != 0) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); sc->vr_stat.tx_errors++; if ((txstat & VR_TXSTAT_ABRT) != 0) { /* Give up and restart Tx. */ sc->vr_stat.tx_abort++; bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; VR_INC(cons, VR_TX_RING_CNT); sc->vr_cdata.vr_tx_cons = cons; if (vr_tx_stop(sc) != 0) { device_printf(sc->vr_dev, "%s: Tx shutdown error -- " "resetting\n", __func__); sc->vr_flags |= VR_F_RESTART; return; } vr_tx_start(sc); break; } if ((sc->vr_revid < REV_ID_VT3071_A && (txstat & VR_TXSTAT_UNDERRUN)) || (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) { sc->vr_stat.tx_underrun++; /* Retry and restart Tx. */ sc->vr_cdata.vr_tx_cnt++; sc->vr_cdata.vr_tx_cons = cons; cur_tx->vr_status = htole32(VR_TXSTAT_OWN); bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); vr_tx_underrun(sc); return; } if ((txstat & VR_TXSTAT_DEFER) != 0) { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); sc->vr_stat.tx_collisions++; } if ((txstat & VR_TXSTAT_LATECOLL) != 0) { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); sc->vr_stat.tx_late_collisions++; } } else { sc->vr_stat.tx_ok++; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); if (sc->vr_revid < REV_ID_VT3071_A) { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & VR_TXSTAT_COLLCNT) >> 3); sc->vr_stat.tx_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3; } else { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0x0f)); sc->vr_stat.tx_collisions += (txstat & 0x0f); } m_freem(txd->tx_m); txd->tx_m = NULL; } sc->vr_cdata.vr_tx_cons = cons; if (sc->vr_cdata.vr_tx_cnt == 0) sc->vr_watchdog_timer = 0; } static void vr_tick(void *xsc) { struct vr_softc *sc; struct mii_data *mii; sc = (struct vr_softc *)xsc; VR_LOCK_ASSERT(sc); if ((sc->vr_flags & VR_F_RESTART) != 0) { device_printf(sc->vr_dev, "restarting\n"); sc->vr_stat.num_restart++; sc->vr_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vr_init_locked(sc); sc->vr_flags &= ~VR_F_RESTART; } mii = device_get_softc(sc->vr_miibus); mii_tick(mii); if ((sc->vr_flags & VR_F_LINK) == 0) vr_miibus_statchg(sc->vr_dev); vr_watchdog(sc); callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); } #ifdef DEVICE_POLLING static poll_handler_t vr_poll; static poll_handler_t vr_poll_locked; static int vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct vr_softc *sc; int rx_npkts; sc = ifp->if_softc; rx_npkts = 0; VR_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) rx_npkts = vr_poll_locked(ifp, cmd, count); VR_UNLOCK(sc); return (rx_npkts); } static int vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct vr_softc *sc; int rx_npkts; sc = ifp->if_softc; VR_LOCK_ASSERT(sc); sc->rxcycles = count; rx_npkts = vr_rxeof(sc); vr_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vr_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { uint16_t status; /* Also check status register. */ status = CSR_READ_2(sc, VR_ISR); if (status) CSR_WRITE_2(sc, VR_ISR, status); if ((status & VR_INTRS) == 0) return (rx_npkts); if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW)) != 0) { if (vr_error(sc, status) != 0) return (rx_npkts); } if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { #ifdef VR_SHOW_ERRORS device_printf(sc->vr_dev, "%s: receive error : 0x%b\n", __func__, status, VR_ISR_ERR_BITS); #endif vr_rx_start(sc); } } return (rx_npkts); } #endif /* DEVICE_POLLING */ /* Back off the transmit threshold. */ static void vr_tx_underrun(struct vr_softc *sc) { int thresh; device_printf(sc->vr_dev, "Tx underrun -- "); if (sc->vr_txthresh < VR_TXTHRESH_MAX) { thresh = sc->vr_txthresh; sc->vr_txthresh++; if (sc->vr_txthresh >= VR_TXTHRESH_MAX) { sc->vr_txthresh = VR_TXTHRESH_MAX; printf("using store and forward mode\n"); } else printf("increasing Tx threshold(%d -> %d)\n", vr_tx_threshold_tables[thresh].value, vr_tx_threshold_tables[thresh + 1].value); } else printf("\n"); sc->vr_stat.tx_underrun++; if (vr_tx_stop(sc) != 0) { device_printf(sc->vr_dev, "%s: Tx shutdown error -- " "resetting\n", __func__); sc->vr_flags |= VR_F_RESTART; return; } vr_tx_start(sc); } static int vr_intr(void *arg) { struct vr_softc *sc; uint16_t status; sc = (struct vr_softc *)arg; status = CSR_READ_2(sc, VR_ISR); if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0) return (FILTER_STRAY); /* Disable interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0x0000); - taskqueue_enqueue_fast(taskqueue_fast, &sc->vr_inttask); + taskqueue_enqueue(taskqueue_fast, &sc->vr_inttask); return (FILTER_HANDLED); } static void vr_int_task(void *arg, int npending) { struct vr_softc *sc; struct ifnet *ifp; uint16_t status; sc = (struct vr_softc *)arg; VR_LOCK(sc); if ((sc->vr_flags & VR_F_SUSPENDED) != 0) goto done_locked; status = CSR_READ_2(sc, VR_ISR); ifp = sc->vr_ifp; #ifdef DEVICE_POLLING if ((ifp->if_capenable & IFCAP_POLLING) != 0) goto done_locked; #endif /* Suppress unwanted interrupts. */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || (sc->vr_flags & VR_F_RESTART) != 0) { CSR_WRITE_2(sc, VR_IMR, 0); CSR_WRITE_2(sc, VR_ISR, status); goto done_locked; } for (; (status & VR_INTRS) != 0;) { CSR_WRITE_2(sc, VR_ISR, status); if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW)) != 0) { if (vr_error(sc, status) != 0) { VR_UNLOCK(sc); return; } } vr_rxeof(sc); if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { #ifdef VR_SHOW_ERRORS device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", __func__, status, VR_ISR_ERR_BITS); #endif /* Restart Rx if RxDMA SM was stopped. */ vr_rx_start(sc); } vr_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vr_start_locked(ifp); status = CSR_READ_2(sc, VR_ISR); } /* Re-enable interrupts. */ CSR_WRITE_2(sc, VR_IMR, VR_INTRS); done_locked: VR_UNLOCK(sc); } static int vr_error(struct vr_softc *sc, uint16_t status) { uint16_t pcis; status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW; if ((status & VR_ISR_BUSERR) != 0) { status &= ~VR_ISR_BUSERR; sc->vr_stat.bus_errors++; /* Disable further interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0); pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2); device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- " "resetting\n", pcis); pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2); sc->vr_flags |= VR_F_RESTART; return (EAGAIN); } if ((status & VR_ISR_LINKSTAT2) != 0) { /* Link state change, duplex changes etc. */ status &= ~VR_ISR_LINKSTAT2; } if ((status & VR_ISR_STATSOFLOW) != 0) { status &= ~VR_ISR_STATSOFLOW; if (sc->vr_revid >= REV_ID_VT6105M_A0) { /* Update MIB counters. */ } } if (status != 0) device_printf(sc->vr_dev, "unhandled interrupt, status = 0x%04x\n", status); return (0); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int vr_encap(struct vr_softc *sc, struct mbuf **m_head) { struct vr_txdesc *txd; struct vr_desc *desc; struct mbuf *m; bus_dma_segment_t txsegs[VR_MAXFRAGS]; uint32_t csum_flags, txctl; int error, i, nsegs, prod, si; int padlen; VR_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); /* * Some VIA Rhine wants packet buffers to be longword * aligned, but very often our mbufs aren't. Rather than * waste time trying to decide when to copy and when not * to copy, just do it all the time. */ if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) { m = m_defrag(*m_head, M_NOWAIT); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; } /* * The Rhine chip doesn't auto-pad, so we have to make * sure to pad short frames out to the minimum frame length * ourselves. */ if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) { m = *m_head; padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len; if (M_WRITABLE(m) == 0) { /* Get a writable copy. */ m = m_dup(*m_head, M_NOWAIT); m_freem(*m_head); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m; } if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) { m = m_defrag(m, M_NOWAIT); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } } /* * Manually pad short frames, and zero the pad space * to avoid leaking data. */ bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); m->m_pkthdr.len += padlen; m->m_len = m->m_pkthdr.len; *m_head = m; } prod = sc->vr_cdata.vr_tx_prod; txd = &sc->vr_cdata.vr_txdesc[prod]; error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, VR_MAXFRAGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check number of available descriptors. */ if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) { bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); return (ENOBUFS); } txd->tx_m = *m_head; bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); /* Set checksum offload. */ csum_flags = 0; if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) { if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= VR_TXCTL_IPCSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) csum_flags |= VR_TXCTL_TCPCSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) csum_flags |= VR_TXCTL_UDPCSUM; } /* * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit * is required for all descriptors regardless of single or * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for * the first descriptor for a multi-fragmented frames. Without * that VIA Rhine chip generates Tx underrun interrupts and can't * send any frames. */ si = prod; for (i = 0; i < nsegs; i++) { desc = &sc->vr_rdata.vr_tx_ring[prod]; desc->vr_status = 0; txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags; if (i == 0) txctl |= VR_TXCTL_FIRSTFRAG; desc->vr_ctl = htole32(txctl); desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr)); sc->vr_cdata.vr_tx_cnt++; VR_INC(prod, VR_TX_RING_CNT); } /* Update producer index. */ sc->vr_cdata.vr_tx_prod = prod; prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT; desc = &sc->vr_rdata.vr_tx_ring[prod]; /* * Set EOP on the last desciptor and reuqest Tx completion * interrupt for every VR_TX_INTR_THRESH-th frames. */ VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH); if (sc->vr_cdata.vr_tx_pkts == 0) desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT); else desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); /* Lastly turn the first descriptor ownership to hardware. */ desc = &sc->vr_rdata.vr_tx_ring[si]; desc->vr_status |= htole32(VR_TXSTAT_OWN); /* Sync descriptors. */ bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void vr_start(struct ifnet *ifp) { struct vr_softc *sc; sc = ifp->if_softc; VR_LOCK(sc); vr_start_locked(ifp); VR_UNLOCK(sc); } static void vr_start_locked(struct ifnet *ifp) { struct vr_softc *sc; struct mbuf *m_head; int enq; sc = ifp->if_softc; VR_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0) return; for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (vr_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } if (enq > 0) { /* Tell the chip to start transmitting. */ VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); /* Set a timeout in case the chip goes out to lunch. */ sc->vr_watchdog_timer = 5; } } static void vr_init(void *xsc) { struct vr_softc *sc; sc = (struct vr_softc *)xsc; VR_LOCK(sc); vr_init_locked(sc); VR_UNLOCK(sc); } static void vr_init_locked(struct vr_softc *sc) { struct ifnet *ifp; struct mii_data *mii; bus_addr_t addr; int i; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; mii = device_get_softc(sc->vr_miibus); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* Cancel pending I/O and free all RX/TX buffers. */ vr_stop(sc); vr_reset(sc); /* Set our station address. */ for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]); /* Set DMA size. */ VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); /* * BCR0 and BCR1 can override the RXCFG and TXCFG registers, * so we must set both. */ VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg); VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg); /* Init circular RX list. */ if (vr_rx_ring_init(sc) != 0) { device_printf(sc->vr_dev, "initialization failed: no memory for rx buffers\n"); vr_stop(sc); return; } /* Init tx descriptors. */ vr_tx_ring_init(sc); if ((sc->vr_quirks & VR_Q_CAM) != 0) { uint8_t vcam[2] = { 0, 0 }; /* Disable VLAN hardware tag insertion/stripping. */ VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL); /* Disable VLAN hardware filtering. */ VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB); /* Disable all CAM entries. */ vr_cam_mask(sc, VR_MCAST_CAM, 0); vr_cam_mask(sc, VR_VLAN_CAM, 0); /* Enable the first VLAN CAM. */ vr_cam_data(sc, VR_VLAN_CAM, 0, vcam); vr_cam_mask(sc, VR_VLAN_CAM, 1); } /* * Set up receive filter. */ vr_set_filter(sc); /* * Load the address of the RX ring. */ addr = VR_RX_RING_ADDR(sc, 0); CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); /* * Load the address of the TX ring. */ addr = VR_TX_RING_ADDR(sc, 0); CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); /* Default : full-duplex, no Tx poll. */ CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL); /* Set flow-control parameters for Rhine III. */ if (sc->vr_revid >= REV_ID_VT6105_A0) { /* * Configure Rx buffer count available for incoming * packet. * Even though data sheet says almost nothing about * this register, this register should be updated * whenever driver adds new RX buffers to controller. * Otherwise, XON frame is not sent to link partner * even if controller has enough RX buffers and you * would be isolated from network. * The controller is not smart enough to know number * of available RX buffers so driver have to let * controller know how many RX buffers are posted. * In other words, this register works like a residue * counter for RX buffers and should be initialized * to the number of total RX buffers - 1 before * enabling RX MAC. Note, this register is 8bits so * it effectively limits the maximum number of RX * buffer to be configured by controller is 255. */ CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1); /* * Tx pause low threshold : 8 free receive buffers * Tx pause XON high threshold : 24 free receive buffers */ CSR_WRITE_1(sc, VR_FLOWCR1, VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF); /* Set Tx pause timer. */ CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff); } /* Enable receiver and transmitter. */ CSR_WRITE_1(sc, VR_CR0, VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO); CSR_WRITE_2(sc, VR_ISR, 0xFFFF); #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, VR_IMR, 0); else #endif /* * Enable interrupts and disable MII intrs. */ CSR_WRITE_2(sc, VR_IMR, VR_INTRS); if (sc->vr_revid > REV_ID_VT6102_A) CSR_WRITE_2(sc, VR_MII_IMR, 0); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); mii_mediachg(mii); callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); } /* * Set media options. */ static int vr_ifmedia_upd(struct ifnet *ifp) { struct vr_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int error; sc = ifp->if_softc; VR_LOCK(sc); mii = device_get_softc(sc->vr_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); error = mii_mediachg(mii); VR_UNLOCK(sc); return (error); } /* * Report current media status. */ static void vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct vr_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->vr_miibus); VR_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0) { VR_UNLOCK(sc); return; } mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; VR_UNLOCK(sc); } static int vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct vr_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error, mask; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = 0; switch (command) { case SIOCSIFFLAGS: VR_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->vr_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) vr_set_filter(sc); } else { if ((sc->vr_flags & VR_F_DETACHED) == 0) vr_init_locked(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) vr_stop(sc); } sc->vr_if_flags = ifp->if_flags; VR_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: VR_LOCK(sc); vr_set_filter(sc); VR_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->vr_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(vr_poll, ifp); if (error != 0) break; VR_LOCK(sc); /* Disable interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0x0000); ifp->if_capenable |= IFCAP_POLLING; VR_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ VR_LOCK(sc); CSR_WRITE_2(sc, VR_IMR, VR_INTRS); ifp->if_capenable &= ~IFCAP_POLLING; VR_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ if ((mask & IFCAP_TXCSUM) != 0 && (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) ifp->if_hwassist |= VR_CSUM_FEATURES; else ifp->if_hwassist &= ~VR_CSUM_FEATURES; } if ((mask & IFCAP_RXCSUM) != 0 && (IFCAP_RXCSUM & ifp->if_capabilities) != 0) ifp->if_capenable ^= IFCAP_RXCSUM; if ((mask & IFCAP_WOL_UCAST) != 0 && (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_UCAST; if ((mask & IFCAP_WOL_MAGIC) != 0 && (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void vr_watchdog(struct vr_softc *sc) { struct ifnet *ifp; VR_LOCK_ASSERT(sc); if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer) return; ifp = sc->vr_ifp; /* * Reclaim first as we don't request interrupt for every packets. */ vr_txeof(sc); if (sc->vr_cdata.vr_tx_cnt == 0) return; if ((sc->vr_flags & VR_F_LINK) == 0) { if (bootverbose) if_printf(sc->vr_ifp, "watchdog timeout " "(missed link)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vr_init_locked(sc); return; } if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if_printf(ifp, "watchdog timeout\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vr_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vr_start_locked(ifp); } static void vr_tx_start(struct vr_softc *sc) { bus_addr_t addr; uint8_t cmd; cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_TX_ON) == 0) { addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons); CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); cmd |= VR_CR0_TX_ON; CSR_WRITE_1(sc, VR_CR0, cmd); } if (sc->vr_cdata.vr_tx_cnt != 0) { sc->vr_watchdog_timer = 5; VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); } } static void vr_rx_start(struct vr_softc *sc) { bus_addr_t addr; uint8_t cmd; cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_RX_ON) == 0) { addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons); CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); cmd |= VR_CR0_RX_ON; CSR_WRITE_1(sc, VR_CR0, cmd); } CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO); } static int vr_tx_stop(struct vr_softc *sc) { int i; uint8_t cmd; cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_TX_ON) != 0) { cmd &= ~VR_CR0_TX_ON; CSR_WRITE_1(sc, VR_CR0, cmd); for (i = VR_TIMEOUT; i > 0; i--) { DELAY(5); cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_TX_ON) == 0) break; } if (i == 0) return (ETIMEDOUT); } return (0); } static int vr_rx_stop(struct vr_softc *sc) { int i; uint8_t cmd; cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_RX_ON) != 0) { cmd &= ~VR_CR0_RX_ON; CSR_WRITE_1(sc, VR_CR0, cmd); for (i = VR_TIMEOUT; i > 0; i--) { DELAY(5); cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_RX_ON) == 0) break; } if (i == 0) return (ETIMEDOUT); } return (0); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void vr_stop(struct vr_softc *sc) { struct vr_txdesc *txd; struct vr_rxdesc *rxd; struct ifnet *ifp; int i; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; sc->vr_watchdog_timer = 0; callout_stop(&sc->vr_stat_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP); if (vr_rx_stop(sc) != 0) device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__); if (vr_tx_stop(sc) != 0) device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__); /* Clear pending interrupts. */ CSR_WRITE_2(sc, VR_ISR, 0xFFFF); CSR_WRITE_2(sc, VR_IMR, 0x0000); CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); /* * Free RX and TX mbufs still in the queues. */ for (i = 0; i < VR_RX_RING_CNT; i++) { rxd = &sc->vr_cdata.vr_rxdesc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } for (i = 0; i < VR_TX_RING_CNT; i++) { txd = &sc->vr_cdata.vr_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int vr_shutdown(device_t dev) { return (vr_suspend(dev)); } static int vr_suspend(device_t dev) { struct vr_softc *sc; sc = device_get_softc(dev); VR_LOCK(sc); vr_stop(sc); vr_setwol(sc); sc->vr_flags |= VR_F_SUSPENDED; VR_UNLOCK(sc); return (0); } static int vr_resume(device_t dev) { struct vr_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); VR_LOCK(sc); ifp = sc->vr_ifp; vr_clrwol(sc); vr_reset(sc); if (ifp->if_flags & IFF_UP) vr_init_locked(sc); sc->vr_flags &= ~VR_F_SUSPENDED; VR_UNLOCK(sc); return (0); } static void vr_setwol(struct vr_softc *sc) { struct ifnet *ifp; int pmc; uint16_t pmstat; uint8_t v; VR_LOCK_ASSERT(sc); if (sc->vr_revid < REV_ID_VT6102_A || pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0) return; ifp = sc->vr_ifp; /* Clear WOL configuration. */ CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM); CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); if (sc->vr_revid > REV_ID_VT6105_B0) { /* Newer Rhine III supports two additional patterns. */ CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); } if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC); /* * It seems that multicast wakeup frames require programming pattern * registers and valid CRC as well as pattern mask for each pattern. * While it's possible to setup such a pattern it would complicate * WOL configuration so ignore multicast wakeup frames. */ if ((ifp->if_capenable & IFCAP_WOL) != 0) { CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM); v = CSR_READ_1(sc, VR_STICKHW); CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB); CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN); } /* Put hardware into sleep. */ v = CSR_READ_1(sc, VR_STICKHW); v |= VR_STICKHW_DS0 | VR_STICKHW_DS1; CSR_WRITE_1(sc, VR_STICKHW, v); /* Request PME if WOL is requested. */ pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } static void vr_clrwol(struct vr_softc *sc) { uint8_t v; VR_LOCK_ASSERT(sc); if (sc->vr_revid < REV_ID_VT6102_A) return; /* Take hardware out of sleep. */ v = CSR_READ_1(sc, VR_STICKHW); v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB); CSR_WRITE_1(sc, VR_STICKHW, v); /* Clear WOL configuration as WOL may interfere normal operation. */ CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR); CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); if (sc->vr_revid > REV_ID_VT6105_B0) { /* Newer Rhine III supports two additional patterns. */ CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); } } static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct vr_softc *sc; struct vr_statistics *stat; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (result == 1) { sc = (struct vr_softc *)arg1; stat = &sc->vr_stat; printf("%s statistics:\n", device_get_nameunit(sc->vr_dev)); printf("Outbound good frames : %ju\n", (uintmax_t)stat->tx_ok); printf("Inbound good frames : %ju\n", (uintmax_t)stat->rx_ok); printf("Outbound errors : %u\n", stat->tx_errors); printf("Inbound errors : %u\n", stat->rx_errors); printf("Inbound no buffers : %u\n", stat->rx_no_buffers); printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs); printf("Inbound FIFO overflows : %d\n", stat->rx_fifo_overflows); printf("Inbound CRC errors : %u\n", stat->rx_crc_errors); printf("Inbound frame alignment errors : %u\n", stat->rx_alignment); printf("Inbound giant frames : %u\n", stat->rx_giants); printf("Inbound runt frames : %u\n", stat->rx_runts); printf("Outbound aborted with excessive collisions : %u\n", stat->tx_abort); printf("Outbound collisions : %u\n", stat->tx_collisions); printf("Outbound late collisions : %u\n", stat->tx_late_collisions); printf("Outbound underrun : %u\n", stat->tx_underrun); printf("PCI bus errors : %u\n", stat->bus_errors); printf("driver restarted due to Rx/Tx shutdown failure : %u\n", stat->num_restart); } return (error); } Index: head/sys/dev/xen/netfront/netfront.c =================================================================== --- head/sys/dev/xen/netfront/netfront.c (revision 296271) +++ head/sys/dev/xen/netfront/netfront.c (revision 296272) @@ -1,2349 +1,2349 @@ /*- * Copyright (c) 2004-2006 Kip Macy * Copyright (c) 2015 Wei Liu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_if.h" /* Features supported by all backends. TSO and LRO can be negotiated */ #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) /* * Should the driver do LRO on the RX end * this can be toggled on the fly, but the * interface must be reset (down/up) for it * to take effect. */ static int xn_enable_lro = 1; TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); /* * Number of pairs of queues. */ static unsigned long xn_num_queues = 4; TUNABLE_ULONG("hw.xn.num_queues", &xn_num_queues); /** * \brief The maximum allowed data fragments in a single transmit * request. * * This limit is imposed by the backend driver. We assume here that * we are dealing with a Linux driver domain and have set our limit * to mirror the Linux MAX_SKB_FRAGS constant. */ #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2) #define RX_COPY_THRESHOLD 256 #define net_ratelimit() 0 struct netfront_rxq; struct netfront_txq; struct netfront_info; struct netfront_rx_info; static void xn_txeof(struct netfront_txq *); static void xn_rxeof(struct netfront_rxq *); static void xn_alloc_rx_buffers(struct netfront_rxq *); static void xn_release_rx_bufs(struct netfront_rxq *); static void xn_release_tx_bufs(struct netfront_txq *); static void xn_rxq_intr(void *); static void xn_txq_intr(void *); static int xn_intr(void *); static inline int xn_count_frags(struct mbuf *m); static int xn_assemble_tx_request(struct netfront_txq *, struct mbuf *); static int xn_ioctl(struct ifnet *, u_long, caddr_t); static void xn_ifinit_locked(struct netfront_info *); static void xn_ifinit(void *); static void xn_stop(struct netfront_info *); static void xn_query_features(struct netfront_info *np); static int xn_configure_features(struct netfront_info *np); static void netif_free(struct netfront_info *info); static int netfront_detach(device_t dev); static int xn_txq_mq_start_locked(struct netfront_txq *, struct mbuf *); static int xn_txq_mq_start(struct ifnet *, struct mbuf *); static int talk_to_backend(device_t dev, struct netfront_info *info); static int create_netdev(device_t dev); static void netif_disconnect_backend(struct netfront_info *info); static int setup_device(device_t dev, struct netfront_info *info, unsigned long); static int xn_ifmedia_upd(struct ifnet *ifp); static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); int xn_connect(struct netfront_info *); static int xn_get_responses(struct netfront_rxq *, struct netfront_rx_info *, RING_IDX, RING_IDX *, struct mbuf **); #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT) #define INVALID_P2M_ENTRY (~0UL) struct xn_rx_stats { u_long rx_packets; /* total packets received */ u_long rx_bytes; /* total bytes received */ u_long rx_errors; /* bad packets received */ }; struct xn_tx_stats { u_long tx_packets; /* total packets transmitted */ u_long tx_bytes; /* total bytes transmitted */ u_long tx_errors; /* packet transmit problems */ }; #define XN_QUEUE_NAME_LEN 8 /* xn{t,r}x_%u, allow for two digits */ struct netfront_rxq { struct netfront_info *info; u_int id; char name[XN_QUEUE_NAME_LEN]; struct mtx lock; int ring_ref; netif_rx_front_ring_t ring; xen_intr_handle_t xen_intr_handle; grant_ref_t gref_head; grant_ref_t grant_ref[NET_TX_RING_SIZE + 1]; struct mbuf *mbufs[NET_RX_RING_SIZE + 1]; struct mbufq batch; /* batch queue */ int target; xen_pfn_t pfn_array[NET_RX_RING_SIZE]; struct lro_ctrl lro; struct taskqueue *tq; struct task intrtask; struct xn_rx_stats stats; }; struct netfront_txq { struct netfront_info *info; u_int id; char name[XN_QUEUE_NAME_LEN]; struct mtx lock; int ring_ref; netif_tx_front_ring_t ring; xen_intr_handle_t xen_intr_handle; grant_ref_t gref_head; grant_ref_t grant_ref[NET_TX_RING_SIZE + 1]; struct mbuf *mbufs[NET_TX_RING_SIZE + 1]; int mbufs_cnt; struct buf_ring *br; struct taskqueue *tq; struct task intrtask; struct task defrtask; bool full; struct xn_tx_stats stats; }; struct netfront_info { struct ifnet *xn_ifp; struct mtx sc_lock; u_int num_queues; struct netfront_rxq *rxq; struct netfront_txq *txq; u_int carrier; u_int maxfrags; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 32 #define RX_MAX_TARGET NET_RX_RING_SIZE int rx_min_target; int rx_max_target; device_t xbdev; uint8_t mac[ETHER_ADDR_LEN]; int xn_if_flags; struct ifmedia sc_media; bool xn_resume; }; struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; #define XN_RX_LOCK(_q) mtx_lock(&(_q)->lock) #define XN_RX_UNLOCK(_q) mtx_unlock(&(_q)->lock) #define XN_TX_LOCK(_q) mtx_lock(&(_q)->lock) #define XN_TX_TRYLOCK(_q) mtx_trylock(&(_q)->lock) #define XN_TX_UNLOCK(_q) mtx_unlock(&(_q)->lock) #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock); #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock); #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED); #define XN_RX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED); #define XN_TX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED); #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ static inline void add_id_to_freelist(struct mbuf **list, uintptr_t id) { KASSERT(id != 0, ("%s: the head item (0) must always be free.", __func__)); list[id] = list[0]; list[0] = (struct mbuf *)id; } static inline unsigned short get_id_from_freelist(struct mbuf **list) { uintptr_t id; id = (uintptr_t)list[0]; KASSERT(id != 0, ("%s: the head item (0) must always remain free.", __func__)); list[0] = list[id]; return (id); } static inline int xn_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct mbuf * xn_get_rx_mbuf(struct netfront_rxq *rxq, RING_IDX ri) { int i; struct mbuf *m; i = xn_rxidx(ri); m = rxq->mbufs[i]; rxq->mbufs[i] = NULL; return (m); } static inline grant_ref_t xn_get_rx_ref(struct netfront_rxq *rxq, RING_IDX ri) { int i = xn_rxidx(ri); grant_ref_t ref = rxq->grant_ref[i]; KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n")); rxq->grant_ref[i] = GRANT_REF_INVALID; return (ref); } #define IPRINTK(fmt, args...) \ printf("[XEN] " fmt, ##args) #ifdef INVARIANTS #define WPRINTK(fmt, args...) \ printf("[XEN] " fmt, ##args) #else #define WPRINTK(fmt, args...) #endif #ifdef DEBUG #define DPRINTK(fmt, args...) \ printf("[XEN] %s: " fmt, __func__, ##args) #else #define DPRINTK(fmt, args...) #endif /** * Read the 'mac' node at the given device's node in the store, and parse that * as colon-separated octets, placing result the given mac array. mac must be * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). * Return 0 on success, or errno on error. */ static int xen_net_read_mac(device_t dev, uint8_t mac[]) { int error, i; char *s, *e, *macstr; const char *path; path = xenbus_get_node(dev); error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); if (error == ENOENT) { /* * Deal with missing mac XenStore nodes on devices with * HVM emulation (the 'ioemu' configuration attribute) * enabled. * * The HVM emulator may execute in a stub device model * domain which lacks the permission, only given to Dom0, * to update the guest's XenStore tree. For this reason, * the HVM emulator doesn't even attempt to write the * front-side mac node, even when operating in Dom0. * However, there should always be a mac listed in the * backend tree. Fallback to this version if our query * of the front side XenStore location doesn't find * anything. */ path = xenbus_get_otherend_path(dev); error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); } if (error != 0) { xenbus_dev_fatal(dev, error, "parsing %s/mac", path); return (error); } s = macstr; for (i = 0; i < ETHER_ADDR_LEN; i++) { mac[i] = strtoul(s, &e, 16); if (s == e || (e[0] != ':' && e[0] != 0)) { free(macstr, M_XENBUS); return (ENOENT); } s = &e[1]; } free(macstr, M_XENBUS); return (0); } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Connected state. */ static int netfront_probe(device_t dev) { if (xen_hvm_domain() && xen_disable_pv_nics != 0) return (ENXIO); if (!strcmp(xenbus_get_type(dev), "vif")) { device_set_desc(dev, "Virtual Network Interface"); return (0); } return (ENXIO); } static int netfront_attach(device_t dev) { int err; err = create_netdev(dev); if (err != 0) { xenbus_dev_fatal(dev, err, "creating netdev"); return (err); } SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_lro", CTLFLAG_RW, &xn_enable_lro, 0, "Large Receive Offload"); SYSCTL_ADD_ULONG(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_queues", CTLFLAG_RD, &xn_num_queues, "Number of pairs of queues"); return (0); } static int netfront_suspend(device_t dev) { struct netfront_info *np = device_get_softc(dev); u_int i; for (i = 0; i < np->num_queues; i++) { XN_RX_LOCK(&np->rxq[i]); XN_TX_LOCK(&np->txq[i]); } netfront_carrier_off(np); for (i = 0; i < np->num_queues; i++) { XN_RX_UNLOCK(&np->rxq[i]); XN_TX_UNLOCK(&np->txq[i]); } return (0); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(device_t dev) { struct netfront_info *info = device_get_softc(dev); info->xn_resume = true; netif_disconnect_backend(info); return (0); } static int write_queue_xenstore_keys(device_t dev, struct netfront_rxq *rxq, struct netfront_txq *txq, struct xs_transaction *xst, bool hierarchy) { int err; const char *message; const char *node = xenbus_get_node(dev); char *path; size_t path_size; KASSERT(rxq->id == txq->id, ("Mismatch between RX and TX queue ids")); /* Split event channel support is not yet there. */ KASSERT(rxq->xen_intr_handle == txq->xen_intr_handle, ("Split event channels are not supported")); if (hierarchy) { path_size = strlen(node) + 10; path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO); snprintf(path, path_size, "%s/queue-%u", node, rxq->id); } else { path_size = strlen(node) + 1; path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO); snprintf(path, path_size, "%s", node); } err = xs_printf(*xst, path, "tx-ring-ref","%u", txq->ring_ref); if (err != 0) { message = "writing tx ring-ref"; goto error; } err = xs_printf(*xst, path, "rx-ring-ref","%u", rxq->ring_ref); if (err != 0) { message = "writing rx ring-ref"; goto error; } err = xs_printf(*xst, path, "event-channel", "%u", xen_intr_port(rxq->xen_intr_handle)); if (err != 0) { message = "writing event-channel"; goto error; } free(path, M_DEVBUF); return (0); error: free(path, M_DEVBUF); xenbus_dev_fatal(dev, err, "%s", message); return (err); } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(device_t dev, struct netfront_info *info) { const char *message; struct xs_transaction xst; const char *node = xenbus_get_node(dev); int err; unsigned long num_queues, max_queues = 0; unsigned int i; err = xen_net_read_mac(dev, info->mac); if (err != 0) { xenbus_dev_fatal(dev, err, "parsing %s/mac", node); goto out; } err = xs_scanf(XST_NIL, xenbus_get_otherend_path(info->xbdev), "multi-queue-max-queues", NULL, "%lu", &max_queues); if (err != 0) max_queues = 1; num_queues = xn_num_queues; if (num_queues > max_queues) num_queues = max_queues; err = setup_device(dev, info, num_queues); if (err != 0) goto out; again: err = xs_transaction_start(&xst); if (err != 0) { xenbus_dev_fatal(dev, err, "starting transaction"); goto free; } if (info->num_queues == 1) { err = write_queue_xenstore_keys(dev, &info->rxq[0], &info->txq[0], &xst, false); if (err != 0) goto abort_transaction_no_def_error; } else { err = xs_printf(xst, node, "multi-queue-num-queues", "%u", info->num_queues); if (err != 0) { message = "writing multi-queue-num-queues"; goto abort_transaction; } for (i = 0; i < info->num_queues; i++) { err = write_queue_xenstore_keys(dev, &info->rxq[i], &info->txq[i], &xst, true); if (err != 0) goto abort_transaction_no_def_error; } } err = xs_printf(xst, node, "request-rx-copy", "%u", 1); if (err != 0) { message = "writing request-rx-copy"; goto abort_transaction; } err = xs_printf(xst, node, "feature-rx-notify", "%d", 1); if (err != 0) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xs_printf(xst, node, "feature-sg", "%d", 1); if (err != 0) { message = "writing feature-sg"; goto abort_transaction; } err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1); if (err != 0) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xs_transaction_end(xst, 0); if (err != 0) { if (err == EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto free; } return 0; abort_transaction: xenbus_dev_fatal(dev, err, "%s", message); abort_transaction_no_def_error: xs_transaction_end(xst, 1); free: netif_free(info); out: return (err); } static void xn_rxq_tq_intr(void *xrxq, int pending) { struct netfront_rxq *rxq = xrxq; XN_RX_LOCK(rxq); xn_rxeof(rxq); XN_RX_UNLOCK(rxq); } static void xn_txq_start(struct netfront_txq *txq) { struct netfront_info *np = txq->info; struct ifnet *ifp = np->xn_ifp; XN_TX_LOCK_ASSERT(txq); if (!drbr_empty(ifp, txq->br)) xn_txq_mq_start_locked(txq, NULL); } static void xn_txq_tq_intr(void *xtxq, int pending) { struct netfront_txq *txq = xtxq; XN_TX_LOCK(txq); if (RING_HAS_UNCONSUMED_RESPONSES(&txq->ring)) xn_txeof(txq); xn_txq_start(txq); XN_TX_UNLOCK(txq); } static void xn_txq_tq_deferred(void *xtxq, int pending) { struct netfront_txq *txq = xtxq; XN_TX_LOCK(txq); xn_txq_start(txq); XN_TX_UNLOCK(txq); } static void disconnect_rxq(struct netfront_rxq *rxq) { xn_release_rx_bufs(rxq); gnttab_free_grant_references(rxq->gref_head); gnttab_end_foreign_access_ref(rxq->ring_ref); /* * No split event channel support at the moment, handle will * be unbound in tx. So no need to call xen_intr_unbind here, * but we do want to reset the handler to 0. */ rxq->xen_intr_handle = 0; } static void destroy_rxq(struct netfront_rxq *rxq) { free(rxq->ring.sring, M_DEVBUF); taskqueue_drain_all(rxq->tq); taskqueue_free(rxq->tq); } static void destroy_rxqs(struct netfront_info *np) { int i; for (i = 0; i < np->num_queues; i++) destroy_rxq(&np->rxq[i]); free(np->rxq, M_DEVBUF); np->rxq = NULL; } static int setup_rxqs(device_t dev, struct netfront_info *info, unsigned long num_queues) { int q, i; int error; netif_rx_sring_t *rxs; struct netfront_rxq *rxq; info->rxq = malloc(sizeof(struct netfront_rxq) * num_queues, M_DEVBUF, M_WAITOK|M_ZERO); for (q = 0; q < num_queues; q++) { rxq = &info->rxq[q]; rxq->id = q; rxq->info = info; rxq->target = RX_MIN_TARGET; rxq->ring_ref = GRANT_REF_INVALID; rxq->ring.sring = NULL; snprintf(rxq->name, XN_QUEUE_NAME_LEN, "xnrx_%u", q); mtx_init(&rxq->lock, rxq->name, "netfront receive lock", MTX_DEF); for (i = 0; i <= NET_RX_RING_SIZE; i++) { rxq->mbufs[i] = NULL; rxq->grant_ref[i] = GRANT_REF_INVALID; } mbufq_init(&rxq->batch, INT_MAX); /* Start resources allocation */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &rxq->gref_head) != 0) { device_printf(dev, "allocating rx gref"); error = ENOMEM; goto fail; } rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK|M_ZERO); SHARED_RING_INIT(rxs); FRONT_RING_INIT(&rxq->ring, rxs, PAGE_SIZE); error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &rxq->ring_ref); if (error != 0) { device_printf(dev, "granting rx ring page"); goto fail_grant_ring; } TASK_INIT(&rxq->intrtask, 0, xn_rxq_tq_intr, rxq); rxq->tq = taskqueue_create_fast(rxq->name, M_WAITOK, taskqueue_thread_enqueue, &rxq->tq); error = taskqueue_start_threads(&rxq->tq, 1, PI_NET, "%s rxq %d", device_get_nameunit(dev), rxq->id); if (error != 0) { device_printf(dev, "failed to start rx taskq %d\n", rxq->id); goto fail_start_thread; } } return (0); fail_start_thread: gnttab_end_foreign_access_ref(rxq->ring_ref); taskqueue_drain_all(rxq->tq); taskqueue_free(rxq->tq); fail_grant_ring: gnttab_free_grant_references(rxq->gref_head); free(rxq->ring.sring, M_DEVBUF); fail: for (; q >= 0; q--) { disconnect_rxq(&info->rxq[q]); destroy_rxq(&info->rxq[q]); } free(info->rxq, M_DEVBUF); return (error); } static void disconnect_txq(struct netfront_txq *txq) { xn_release_tx_bufs(txq); gnttab_free_grant_references(txq->gref_head); gnttab_end_foreign_access_ref(txq->ring_ref); xen_intr_unbind(&txq->xen_intr_handle); } static void destroy_txq(struct netfront_txq *txq) { free(txq->ring.sring, M_DEVBUF); buf_ring_free(txq->br, M_DEVBUF); taskqueue_drain_all(txq->tq); taskqueue_free(txq->tq); } static void destroy_txqs(struct netfront_info *np) { int i; for (i = 0; i < np->num_queues; i++) destroy_txq(&np->txq[i]); free(np->txq, M_DEVBUF); np->txq = NULL; } static int setup_txqs(device_t dev, struct netfront_info *info, unsigned long num_queues) { int q, i; int error; netif_tx_sring_t *txs; struct netfront_txq *txq; info->txq = malloc(sizeof(struct netfront_txq) * num_queues, M_DEVBUF, M_WAITOK|M_ZERO); for (q = 0; q < num_queues; q++) { txq = &info->txq[q]; txq->id = q; txq->info = info; txq->ring_ref = GRANT_REF_INVALID; txq->ring.sring = NULL; snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q); mtx_init(&txq->lock, txq->name, "netfront transmit lock", MTX_DEF); for (i = 0; i <= NET_TX_RING_SIZE; i++) { txq->mbufs[i] = (void *) ((u_long) i+1); txq->grant_ref[i] = GRANT_REF_INVALID; } txq->mbufs[NET_TX_RING_SIZE] = (void *)0; /* Start resources allocation. */ if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, &txq->gref_head) != 0) { device_printf(dev, "failed to allocate tx grant refs\n"); error = ENOMEM; goto fail; } txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK|M_ZERO); SHARED_RING_INIT(txs); FRONT_RING_INIT(&txq->ring, txs, PAGE_SIZE); error = xenbus_grant_ring(dev, virt_to_mfn(txs), &txq->ring_ref); if (error != 0) { device_printf(dev, "failed to grant tx ring\n"); goto fail_grant_ring; } txq->br = buf_ring_alloc(NET_TX_RING_SIZE, M_DEVBUF, M_WAITOK, &txq->lock); TASK_INIT(&txq->defrtask, 0, xn_txq_tq_deferred, txq); TASK_INIT(&txq->intrtask, 0, xn_txq_tq_intr, txq); txq->tq = taskqueue_create_fast(txq->name, M_WAITOK, taskqueue_thread_enqueue, &txq->tq); error = taskqueue_start_threads(&txq->tq, 1, PI_NET, "%s txq %d", device_get_nameunit(dev), txq->id); if (error != 0) { device_printf(dev, "failed to start tx taskq %d\n", txq->id); goto fail_start_thread; } error = xen_intr_alloc_and_bind_local_port(dev, xenbus_get_otherend_id(dev), xn_intr, /* handler */ NULL, &info->txq[q], INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, &txq->xen_intr_handle); if (error != 0) { device_printf(dev, "xen_intr_alloc_and_bind_local_port failed\n"); goto fail_bind_port; } } return (0); fail_bind_port: taskqueue_drain_all(txq->tq); fail_start_thread: buf_ring_free(txq->br, M_DEVBUF); taskqueue_free(txq->tq); gnttab_end_foreign_access_ref(txq->ring_ref); fail_grant_ring: gnttab_free_grant_references(txq->gref_head); free(txq->ring.sring, M_DEVBUF); fail: for (; q >= 0; q--) { disconnect_txq(&info->txq[q]); destroy_txq(&info->txq[q]); } free(info->txq, M_DEVBUF); return (error); } static int setup_device(device_t dev, struct netfront_info *info, unsigned long num_queues) { int error; int q; if (info->txq) destroy_txqs(info); if (info->rxq) destroy_rxqs(info); info->num_queues = 0; error = setup_rxqs(dev, info, num_queues); if (error != 0) goto out; error = setup_txqs(dev, info, num_queues); if (error != 0) goto out; info->num_queues = num_queues; /* No split event channel at the moment. */ for (q = 0; q < num_queues; q++) info->rxq[q].xen_intr_handle = info->txq[q].xen_intr_handle; return (0); out: KASSERT(error != 0, ("Error path taken without providing an error code")); return (error); } #ifdef INET /** * If this interface has an ipv4 address, send an arp for it. This * helps to get the network going again after migrating hosts. */ static void netfront_send_fake_arp(device_t dev, struct netfront_info *info) { struct ifnet *ifp; struct ifaddr *ifa; ifp = info->xn_ifp; TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family == AF_INET) { arp_ifinit(ifp, ifa); } } } #endif /** * Callback received when the backend's state changes. */ static void netfront_backend_changed(device_t dev, XenbusState newstate) { struct netfront_info *sc = device_get_softc(dev); DPRINTK("newstate=%d\n", newstate); switch (newstate) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateUnknown: case XenbusStateClosed: case XenbusStateReconfigured: case XenbusStateReconfiguring: break; case XenbusStateInitWait: if (xenbus_get_state(dev) != XenbusStateInitialising) break; if (xn_connect(sc) != 0) break; xenbus_set_state(dev, XenbusStateConnected); break; case XenbusStateClosing: xenbus_set_state(dev, XenbusStateClosed); break; case XenbusStateConnected: #ifdef INET netfront_send_fake_arp(dev, sc); #endif break; } } /** * \brief Verify that there is sufficient space in the Tx ring * buffer for a maximally sized request to be enqueued. * * A transmit request requires a transmit descriptor for each packet * fragment, plus up to 2 entries for "options" (e.g. TSO). */ static inline int xn_tx_slot_available(struct netfront_txq *txq) { return (RING_FREE_REQUESTS(&txq->ring) > (MAX_TX_REQ_FRAGS + 2)); } static void xn_release_tx_bufs(struct netfront_txq *txq) { int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { struct mbuf *m; m = txq->mbufs[i]; /* * We assume that no kernel addresses are * less than NET_TX_RING_SIZE. Any entry * in the table that is below this number * must be an index from free-list tracking. */ if (((uintptr_t)m) <= NET_TX_RING_SIZE) continue; gnttab_end_foreign_access_ref(txq->grant_ref[i]); gnttab_release_grant_reference(&txq->gref_head, txq->grant_ref[i]); txq->grant_ref[i] = GRANT_REF_INVALID; add_id_to_freelist(txq->mbufs, i); txq->mbufs_cnt--; if (txq->mbufs_cnt < 0) { panic("%s: tx_chain_cnt must be >= 0", __func__); } m_free(m); } } static void xn_alloc_rx_buffers(struct netfront_rxq *rxq) { struct netfront_info *np = rxq->info; int otherend_id = xenbus_get_otherend_id(np->xbdev); unsigned short id; struct mbuf *m_new; int i, batch_target, notify; RING_IDX req_prod; grant_ref_t ref; netif_rx_request_t *req; vm_offset_t vaddr; u_long pfn; req_prod = rxq->ring.req_prod_pvt; if (__predict_false(np->carrier == 0)) return; /* * Allocate mbufs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, and so should reduce the chance of failed allocation * requests both for ourself and for other kernel subsystems. * * Here we attempt to maintain rx_target buffers in flight, counting * buffers that we have yet to process in the receive ring. */ batch_target = rxq->target - (req_prod - rxq->ring.rsp_cons); for (i = mbufq_len(&rxq->batch); i < batch_target; i++) { m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (m_new == NULL) { if (i != 0) goto refill; /* XXX set timer */ break; } m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE; /* queue the mbufs allocated */ mbufq_enqueue(&rxq->batch, m_new); } /* * If we've allocated at least half of our target number of entries, * submit them to the backend - we have enough to make the overhead * of submission worthwhile. Otherwise wait for more mbufs and * request entries to become available. */ if (i < (rxq->target/2)) { if (req_prod > rxq->ring.sring->req_prod) goto push; return; } /* * Double floating fill target if we risked having the backend * run out of empty buffers for receive traffic. We define "running * low" as having less than a fourth of our target buffers free * at the time we refilled the queue. */ if ((req_prod - rxq->ring.sring->rsp_prod) < (rxq->target / 4)) { rxq->target *= 2; if (rxq->target > np->rx_max_target) rxq->target = np->rx_max_target; } refill: for (i = 0; ; i++) { if ((m_new = mbufq_dequeue(&rxq->batch)) == NULL) break; m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)( vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT); id = xn_rxidx(req_prod + i); KASSERT(rxq->mbufs[id] == NULL, ("non-NULL xn_rx_chain")); rxq->mbufs[id] = m_new; ref = gnttab_claim_grant_reference(&rxq->gref_head); KASSERT(ref != GNTTAB_LIST_END, ("reserved grant references exhuasted")); rxq->grant_ref[id] = ref; vaddr = mtod(m_new, vm_offset_t); pfn = vtophys(vaddr) >> PAGE_SHIFT; req = RING_GET_REQUEST(&rxq->ring, req_prod + i); gnttab_grant_foreign_access_ref(ref, otherend_id, pfn, 0); req->id = id; req->gref = ref; rxq->pfn_array[i] = vtophys(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT; } KASSERT(i, ("no mbufs processed")); /* should have returned earlier */ KASSERT(mbufq_len(&rxq->batch) == 0, ("not all mbufs processed")); /* * We may have allocated buffers which have entries outstanding * in the page * update queue -- make sure we flush those first! */ wmb(); /* Above is a suitable barrier to ensure backend will see requests. */ rxq->ring.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify); if (notify) xen_intr_signal(rxq->xen_intr_handle); } static void xn_release_rx_bufs(struct netfront_rxq *rxq) { int i, ref; struct mbuf *m; for (i = 0; i < NET_RX_RING_SIZE; i++) { m = rxq->mbufs[i]; if (m == NULL) continue; ref = rxq->grant_ref[i]; if (ref == GRANT_REF_INVALID) continue; gnttab_end_foreign_access_ref(ref); gnttab_release_grant_reference(&rxq->gref_head, ref); rxq->mbufs[i] = NULL; rxq->grant_ref[i] = GRANT_REF_INVALID; m_freem(m); } } static void xn_rxeof(struct netfront_rxq *rxq) { struct ifnet *ifp; struct netfront_info *np = rxq->info; #if (defined(INET) || defined(INET6)) struct lro_ctrl *lro = &rxq->lro; struct lro_entry *queued; #endif struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; struct mbuf *m; struct mbufq mbufq_rxq, mbufq_errq; int err, work_to_do; do { XN_RX_LOCK_ASSERT(rxq); if (!netfront_carrier_ok(np)) return; /* XXX: there should be some sane limit. */ mbufq_init(&mbufq_errq, INT_MAX); mbufq_init(&mbufq_rxq, INT_MAX); ifp = np->xn_ifp; rp = rxq->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = rxq->ring.rsp_cons; while ((i != rp)) { memcpy(rx, RING_GET_RESPONSE(&rxq->ring, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); m = NULL; err = xn_get_responses(rxq, &rinfo, rp, &i, &m); if (__predict_false(err)) { if (m) (void )mbufq_enqueue(&mbufq_errq, m); rxq->stats.rx_errors++; continue; } m->m_pkthdr.rcvif = ifp; if ( rx->flags & NETRXF_data_validated ) { /* Tell the stack the checksums are okay */ /* * XXX this isn't necessarily the case - need to add * check */ m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m->m_pkthdr.csum_data = 0xffff; } rxq->stats.rx_packets++; rxq->stats.rx_bytes += m->m_pkthdr.len; (void )mbufq_enqueue(&mbufq_rxq, m); rxq->ring.rsp_cons = i; } mbufq_drain(&mbufq_errq); /* * Process all the mbufs after the remapping is complete. * Break the mbuf chain first though. */ while ((m = mbufq_dequeue(&mbufq_rxq)) != NULL) { if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* XXX: Do we really need to drop the rx lock? */ XN_RX_UNLOCK(rxq); #if (defined(INET) || defined(INET6)) /* Use LRO if possible */ if ((ifp->if_capenable & IFCAP_LRO) == 0 || lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { /* * If LRO fails, pass up to the stack * directly. */ (*ifp->if_input)(ifp, m); } #else (*ifp->if_input)(ifp, m); #endif XN_RX_LOCK(rxq); } rxq->ring.rsp_cons = i; #if (defined(INET) || defined(INET6)) /* * Flush any outstanding LRO work */ while (!SLIST_EMPTY(&lro->lro_active)) { queued = SLIST_FIRST(&lro->lro_active); SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } #endif xn_alloc_rx_buffers(rxq); RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do); } while (work_to_do); } static void xn_txeof(struct netfront_txq *txq) { RING_IDX i, prod; unsigned short id; struct ifnet *ifp; netif_tx_response_t *txr; struct mbuf *m; struct netfront_info *np = txq->info; XN_TX_LOCK_ASSERT(txq); if (!netfront_carrier_ok(np)) return; ifp = np->xn_ifp; do { prod = txq->ring.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (i = txq->ring.rsp_cons; i != prod; i++) { txr = RING_GET_RESPONSE(&txq->ring, i); if (txr->status == NETIF_RSP_NULL) continue; if (txr->status != NETIF_RSP_OKAY) { printf("%s: WARNING: response is %d!\n", __func__, txr->status); } id = txr->id; m = txq->mbufs[id]; KASSERT(m != NULL, ("mbuf not found in chain")); KASSERT((uintptr_t)m > NET_TX_RING_SIZE, ("mbuf already on the free list, but we're " "trying to free it again!")); M_ASSERTVALID(m); /* * Increment packet count if this is the last * mbuf of the chain. */ if (!m->m_next) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (__predict_false(gnttab_query_foreign_access( txq->grant_ref[id]) != 0)) { panic("%s: grant id %u still in use by the " "backend", __func__, id); } gnttab_end_foreign_access_ref(txq->grant_ref[id]); gnttab_release_grant_reference( &txq->gref_head, txq->grant_ref[id]); txq->grant_ref[id] = GRANT_REF_INVALID; txq->mbufs[id] = NULL; add_id_to_freelist(txq->mbufs, id); txq->mbufs_cnt--; m_free(m); /* Only mark the txq active if we've freed up at least one slot to try */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } txq->ring.rsp_cons = prod; /* * Set a new event, then check for race with update of * tx_cons. Note that it is essential to schedule a * callback, no matter how few buffers are pending. Even if * there is space in the transmit ring, higher layers may * be blocked because too much data is outstanding: in such * cases notification from Xen is likely to be the only kick * that we'll get. */ txq->ring.sring->rsp_event = prod + ((txq->ring.sring->req_prod - prod) >> 1) + 1; mb(); } while (prod != txq->ring.sring->rsp_prod); if (txq->full && ((txq->ring.sring->req_prod - prod) < NET_TX_RING_SIZE)) { txq->full = false; taskqueue_enqueue(txq->tq, &txq->intrtask); } } static void xn_rxq_intr(void *xrxq) { struct netfront_rxq *rxq = xrxq; - taskqueue_enqueue_fast(rxq->tq, &rxq->intrtask); + taskqueue_enqueue(rxq->tq, &rxq->intrtask); } static void xn_txq_intr(void *xtxq) { struct netfront_txq *txq = xtxq; - taskqueue_enqueue_fast(txq->tq, &txq->intrtask); + taskqueue_enqueue(txq->tq, &txq->intrtask); } static int xn_intr(void *xsc) { struct netfront_txq *txq = xsc; struct netfront_info *np = txq->info; struct netfront_rxq *rxq = &np->rxq[txq->id]; /* kick both tx and rx */ xn_rxq_intr(rxq); xn_txq_intr(txq); return (FILTER_HANDLED); } static void xn_move_rx_slot(struct netfront_rxq *rxq, struct mbuf *m, grant_ref_t ref) { int new = xn_rxidx(rxq->ring.req_prod_pvt); KASSERT(rxq->mbufs[new] == NULL, ("mbufs != NULL")); rxq->mbufs[new] = m; rxq->grant_ref[new] = ref; RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->id = new; RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->gref = ref; rxq->ring.req_prod_pvt++; } static int xn_get_extras(struct netfront_rxq *rxq, struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons) { struct netif_extra_info *extra; int err = 0; do { struct mbuf *m; grant_ref_t ref; if (__predict_false(*cons + 1 == rp)) { err = EINVAL; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&rxq->ring, ++(*cons)); if (__predict_false(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { err = EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } m = xn_get_rx_mbuf(rxq, *cons); ref = xn_get_rx_ref(rxq, *cons); xn_move_rx_slot(rxq, m, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); return err; } static int xn_get_responses(struct netfront_rxq *rxq, struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, struct mbuf **list) { struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; struct mbuf *m, *m0, *m_prev; grant_ref_t ref = xn_get_rx_ref(rxq, *cons); RING_IDX ref_cons = *cons; int frags = 1; int err = 0; u_long ret; m0 = m = m_prev = xn_get_rx_mbuf(rxq, *cons); if (rx->flags & NETRXF_extra_info) { err = xn_get_extras(rxq, extras, rp, cons); } if (m0 != NULL) { m0->m_pkthdr.len = 0; m0->m_next = NULL; } for (;;) { #if 0 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n", rx->status, rx->offset, frags); #endif if (__predict_false(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { xn_move_rx_slot(rxq, m, ref); if (m0 == m) m0 = NULL; m = NULL; err = EINVAL; goto next_skip_queue; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_REF_INVALID) { printf("%s: Bad rx response id %d.\n", __func__, rx->id); err = EINVAL; goto next; } ret = gnttab_end_foreign_access_ref(ref); KASSERT(ret, ("Unable to end access to grant references")); gnttab_release_grant_reference(&rxq->gref_head, ref); next: if (m == NULL) break; m->m_len = rx->status; m->m_data += rx->offset; m0->m_pkthdr.len += rx->status; next_skip_queue: if (!(rx->flags & NETRXF_more_data)) break; if (*cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = ENOENT; printf("%s: cons %u frags %u rp %u, not enough frags\n", __func__, *cons, frags, rp); break; } /* * Note that m can be NULL, if rx->status < 0 or if * rx->offset + rx->status > PAGE_SIZE above. */ m_prev = m; rx = RING_GET_RESPONSE(&rxq->ring, *cons + frags); m = xn_get_rx_mbuf(rxq, *cons + frags); /* * m_prev == NULL can happen if rx->status < 0 or if * rx->offset + * rx->status > PAGE_SIZE above. */ if (m_prev != NULL) m_prev->m_next = m; /* * m0 can be NULL if rx->status < 0 or if * rx->offset + * rx->status > PAGE_SIZE above. */ if (m0 == NULL) m0 = m; m->m_next = NULL; ref = xn_get_rx_ref(rxq, *cons + frags); ref_cons = *cons + frags; frags++; } *list = m0; *cons += frags; return (err); } /** * \brief Count the number of fragments in an mbuf chain. * * Surprisingly, there isn't an M* macro for this. */ static inline int xn_count_frags(struct mbuf *m) { int nfrags; for (nfrags = 0; m != NULL; m = m->m_next) nfrags++; return (nfrags); } /** * Given an mbuf chain, make sure we have enough room and then push * it onto the transmit ring. */ static int xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head) { struct mbuf *m; struct netfront_info *np = txq->info; struct ifnet *ifp = np->xn_ifp; u_int nfrags; int otherend_id; /** * Defragment the mbuf if necessary. */ nfrags = xn_count_frags(m_head); /* * Check to see whether this request is longer than netback * can handle, and try to defrag it. */ /** * It is a bit lame, but the netback driver in Linux can't * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of * the Linux network stack. */ if (nfrags > np->maxfrags) { m = m_defrag(m_head, M_NOWAIT); if (!m) { /* * Defrag failed, so free the mbuf and * therefore drop the packet. */ m_freem(m_head); return (EMSGSIZE); } m_head = m; } /* Determine how many fragments now exist */ nfrags = xn_count_frags(m_head); /* * Check to see whether the defragmented packet has too many * segments for the Linux netback driver. */ /** * The FreeBSD TCP stack, with TSO enabled, can produce a chain * of mbufs longer than Linux can handle. Make sure we don't * pass a too-long chain over to the other side by dropping the * packet. It doesn't look like there is currently a way to * tell the TCP stack to generate a shorter chain of packets. */ if (nfrags > MAX_TX_REQ_FRAGS) { #ifdef DEBUG printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback " "won't be able to handle it, dropping\n", __func__, nfrags, MAX_TX_REQ_FRAGS); #endif m_freem(m_head); return (EMSGSIZE); } /* * This check should be redundant. We've already verified that we * have enough slots in the ring to handle a packet of maximum * size, and that our packet is less than the maximum size. Keep * it in here as an assert for now just to make certain that * chain_cnt is accurate. */ KASSERT((txq->mbufs_cnt + nfrags) <= NET_TX_RING_SIZE, ("%s: chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE " "(%d)!", __func__, (int) txq->mbufs_cnt, (int) nfrags, (int) NET_TX_RING_SIZE)); /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; otherend_id = xenbus_get_otherend_id(np->xbdev); for (m = m_head; m; m = m->m_next) { netif_tx_request_t *tx; uintptr_t id; grant_ref_t ref; u_long mfn; /* XXX Wrong type? */ tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt); id = get_id_from_freelist(txq->mbufs); if (id == 0) panic("%s: was allocated the freelist head!\n", __func__); txq->mbufs_cnt++; if (txq->mbufs_cnt > NET_TX_RING_SIZE) panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n", __func__); txq->mbufs[id] = m; tx->id = id; ref = gnttab_claim_grant_reference(&txq->gref_head); KASSERT((short)ref >= 0, ("Negative ref")); mfn = virt_to_mfn(mtod(m, vm_offset_t)); gnttab_grant_foreign_access_ref(ref, otherend_id, mfn, GNTMAP_readonly); tx->gref = txq->grant_ref[id] = ref; tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); tx->flags = 0; if (m == m_head) { /* * The first fragment has the entire packet * size, subsequent fragments have just the * fragment size. The backend works out the * true size of the first fragment by * subtracting the sizes of the other * fragments. */ tx->size = m->m_pkthdr.len; /* * The first fragment contains the checksum flags * and is optionally followed by extra data for * TSO etc. */ /** * CSUM_TSO requires checksum offloading. * Some versions of FreeBSD fail to * set CSUM_TCP in the CSUM_TSO case, * so we have to test for CSUM_TSO * explicitly. */ if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) { tx->flags |= (NETTXF_csum_blank | NETTXF_data_validated); } if (m->m_pkthdr.csum_flags & CSUM_TSO) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&txq->ring, ++txq->ring.req_prod_pvt); tx->flags |= NETTXF_extra_info; gso->u.gso.size = m->m_pkthdr.tso_segsz; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; } } else { tx->size = m->m_len; } if (m->m_next) tx->flags |= NETTXF_more_data; txq->ring.req_prod_pvt++; } BPF_MTAP(ifp, m_head); xn_txeof(txq); txq->stats.tx_bytes += m_head->m_pkthdr.len; txq->stats.tx_packets++; return (0); } /* equivalent of network_open() in Linux */ static void xn_ifinit_locked(struct netfront_info *np) { struct ifnet *ifp; int i; struct netfront_rxq *rxq; XN_LOCK_ASSERT(np); ifp = np->xn_ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; xn_stop(np); for (i = 0; i < np->num_queues; i++) { rxq = &np->rxq[i]; xn_alloc_rx_buffers(rxq); rxq->ring.sring->rsp_event = rxq->ring.rsp_cons + 1; } ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if_link_state_change(ifp, LINK_STATE_UP); } static void xn_ifinit(void *xsc) { struct netfront_info *sc = xsc; XN_LOCK(sc); xn_ifinit_locked(sc); XN_UNLOCK(sc); } static int xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct netfront_info *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; #ifdef INET struct ifaddr *ifa = (struct ifaddr *)data; #endif int mask, error = 0; switch(cmd) { case SIOCSIFADDR: #ifdef INET XN_LOCK(sc); if (ifa->ifa_addr->sa_family == AF_INET) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) xn_ifinit_locked(sc); arp_ifinit(ifp, ifa); XN_UNLOCK(sc); } else { XN_UNLOCK(sc); #endif error = ether_ioctl(ifp, cmd, data); #ifdef INET } #endif break; case SIOCSIFMTU: ifp->if_mtu = ifr->ifr_mtu; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xn_ifinit(sc); break; case SIOCSIFFLAGS: XN_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. */ xn_ifinit_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { xn_stop(sc); } } sc->xn_if_flags = ifp->if_flags; XN_UNLOCK(sc); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { if (IFCAP_TXCSUM & ifp->if_capenable) { ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO); } else { ifp->if_capenable |= IFCAP_TXCSUM; ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); } } if (mask & IFCAP_RXCSUM) { ifp->if_capenable ^= IFCAP_RXCSUM; } if (mask & IFCAP_TSO4) { if (IFCAP_TSO4 & ifp->if_capenable) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } else if (IFCAP_TXCSUM & ifp->if_capenable) { ifp->if_capenable |= IFCAP_TSO4; ifp->if_hwassist |= CSUM_TSO; } else { IPRINTK("Xen requires tx checksum offload" " be enabled to use TSO\n"); error = EINVAL; } } if (mask & IFCAP_LRO) { ifp->if_capenable ^= IFCAP_LRO; } break; case SIOCADDMULTI: case SIOCDELMULTI: break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); } return (error); } static void xn_stop(struct netfront_info *sc) { struct ifnet *ifp; XN_LOCK_ASSERT(sc); ifp = sc->xn_ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if_link_state_change(ifp, LINK_STATE_DOWN); } static void xn_rebuild_rx_bufs(struct netfront_rxq *rxq) { int requeue_idx, i; grant_ref_t ref; netif_rx_request_t *req; for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { struct mbuf *m; u_long pfn; if (rxq->mbufs[i] == NULL) continue; m = rxq->mbufs[requeue_idx] = xn_get_rx_mbuf(rxq, i); ref = rxq->grant_ref[requeue_idx] = xn_get_rx_ref(rxq, i); req = RING_GET_REQUEST(&rxq->ring, requeue_idx); pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; gnttab_grant_foreign_access_ref(ref, xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0); req->gref = ref; req->id = requeue_idx; requeue_idx++; } rxq->ring.req_prod_pvt = requeue_idx; } /* START of Xenolinux helper functions adapted to FreeBSD */ int xn_connect(struct netfront_info *np) { int i, error; u_int feature_rx_copy; struct netfront_rxq *rxq; struct netfront_txq *txq; error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), "feature-rx-copy", NULL, "%u", &feature_rx_copy); if (error != 0) feature_rx_copy = 0; /* We only support rx copy. */ if (!feature_rx_copy) return (EPROTONOSUPPORT); /* Recovery procedure: */ error = talk_to_backend(np->xbdev, np); if (error != 0) return (error); /* Step 1: Reinitialise variables. */ xn_query_features(np); xn_configure_features(np); /* Step 2: Release TX buffer */ for (i = 0; i < np->num_queues; i++) { txq = &np->txq[i]; xn_release_tx_bufs(txq); } /* Step 3: Rebuild the RX buffer freelist and the RX ring itself. */ for (i = 0; i < np->num_queues; i++) { rxq = &np->rxq[i]; xn_rebuild_rx_bufs(rxq); } /* Step 4: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); for (i = 0; i < np->num_queues; i++) { txq = &np->txq[i]; xen_intr_signal(txq->xen_intr_handle); XN_TX_LOCK(txq); xn_txeof(txq); XN_TX_UNLOCK(txq); xn_alloc_rx_buffers(rxq); } return (0); } static void xn_query_features(struct netfront_info *np) { int val; device_printf(np->xbdev, "backend features:"); if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), "feature-sg", NULL, "%d", &val) < 0) val = 0; np->maxfrags = 1; if (val) { np->maxfrags = MAX_TX_REQ_FRAGS; printf(" feature-sg"); } if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), "feature-gso-tcpv4", NULL, "%d", &val) < 0) val = 0; np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO); if (val) { np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO; printf(" feature-gso-tcp4"); } printf("\n"); } static int xn_configure_features(struct netfront_info *np) { int err, cap_enabled; #if (defined(INET) || defined(INET6)) int i; #endif err = 0; if (np->xn_resume && ((np->xn_ifp->if_capenable & np->xn_ifp->if_capabilities) == np->xn_ifp->if_capenable)) { /* Current options are available, no need to do anything. */ return (0); } /* Try to preserve as many options as possible. */ if (np->xn_resume) cap_enabled = np->xn_ifp->if_capenable; else cap_enabled = UINT_MAX; #if (defined(INET) || defined(INET6)) for (i = 0; i < np->num_queues; i++) if ((np->xn_ifp->if_capenable & IFCAP_LRO) == (cap_enabled & IFCAP_LRO)) tcp_lro_free(&np->rxq[i].lro); #endif np->xn_ifp->if_capenable = np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4) & cap_enabled; np->xn_ifp->if_hwassist &= ~CSUM_TSO; #if (defined(INET) || defined(INET6)) for (i = 0; i < np->num_queues; i++) { if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) == (cap_enabled & IFCAP_LRO)) { err = tcp_lro_init(&np->rxq[i].lro); if (err != 0) { device_printf(np->xbdev, "LRO initialization failed\n"); } else { np->rxq[i].lro.ifp = np->xn_ifp; np->xn_ifp->if_capenable |= IFCAP_LRO; } } } if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) == (cap_enabled & IFCAP_TSO4)) { np->xn_ifp->if_capenable |= IFCAP_TSO4; np->xn_ifp->if_hwassist |= CSUM_TSO; } #endif return (err); } static int xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m) { struct netfront_info *np; struct ifnet *ifp; struct buf_ring *br; int error, notify; np = txq->info; br = txq->br; ifp = np->xn_ifp; error = 0; XN_TX_LOCK_ASSERT(txq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !netfront_carrier_ok(np)) { if (m != NULL) error = drbr_enqueue(ifp, br, m); return (error); } if (m != NULL) { error = drbr_enqueue(ifp, br, m); if (error != 0) return (error); } while ((m = drbr_peek(ifp, br)) != NULL) { if (!xn_tx_slot_available(txq)) { drbr_putback(ifp, br, m); break; } error = xn_assemble_tx_request(txq, m); /* xn_assemble_tx_request always consumes the mbuf*/ if (error != 0) { drbr_advance(ifp, br); break; } RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&txq->ring, notify); if (notify) xen_intr_signal(txq->xen_intr_handle); drbr_advance(ifp, br); } if (RING_FULL(&txq->ring)) txq->full = true; return (0); } static int xn_txq_mq_start(struct ifnet *ifp, struct mbuf *m) { struct netfront_info *np; struct netfront_txq *txq; int i, npairs, error; np = ifp->if_softc; npairs = np->num_queues; /* check if flowid is set */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % npairs; else i = curcpu % npairs; txq = &np->txq[i]; if (XN_TX_TRYLOCK(txq) != 0) { error = xn_txq_mq_start_locked(txq, m); XN_TX_UNLOCK(txq); } else { error = drbr_enqueue(ifp, txq->br, m); taskqueue_enqueue(txq->tq, &txq->defrtask); } return (error); } static void xn_qflush(struct ifnet *ifp) { struct netfront_info *np; struct netfront_txq *txq; struct mbuf *m; int i; np = ifp->if_softc; for (i = 0; i < np->num_queues; i++) { txq = &np->txq[i]; XN_TX_LOCK(txq); while ((m = buf_ring_dequeue_sc(txq->br)) != NULL) m_freem(m); XN_TX_UNLOCK(txq); } if_qflush(ifp); } /** * Create a network device. * @param dev Newbus device representing this virtual NIC. */ int create_netdev(device_t dev) { struct netfront_info *np; int err; struct ifnet *ifp; np = device_get_softc(dev); np->xbdev = dev; mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF); ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts); ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL); np->rx_min_target = RX_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; err = xen_net_read_mac(dev, np->mac); if (err != 0) goto error; /* Set up ifnet structure */ ifp = np->xn_ifp = if_alloc(IFT_ETHER); ifp->if_softc = np; if_initname(ifp, "xn", device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = xn_ioctl; ifp->if_transmit = xn_txq_mq_start; ifp->if_qflush = xn_qflush; ifp->if_init = xn_ifinit; ifp->if_hwassist = XN_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM; ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS; ifp->if_hw_tsomaxsegsize = PAGE_SIZE; ether_ifattach(ifp, np->mac); netfront_carrier_off(np); return (0); error: KASSERT(err != 0, ("Error path with no error code specified")); return (err); } static int netfront_detach(device_t dev) { struct netfront_info *info = device_get_softc(dev); DPRINTK("%s\n", xenbus_get_node(dev)); netif_free(info); return 0; } static void netif_free(struct netfront_info *np) { XN_LOCK(np); xn_stop(np); XN_UNLOCK(np); netif_disconnect_backend(np); free(np->rxq, M_DEVBUF); free(np->txq, M_DEVBUF); ether_ifdetach(np->xn_ifp); if_free(np->xn_ifp); np->xn_ifp = NULL; ifmedia_removeall(&np->sc_media); } static void netif_disconnect_backend(struct netfront_info *np) { u_int i; for (i = 0; i < np->num_queues; i++) { XN_RX_LOCK(&np->rxq[i]); XN_TX_LOCK(&np->txq[i]); } netfront_carrier_off(np); for (i = 0; i < np->num_queues; i++) { XN_RX_UNLOCK(&np->rxq[i]); XN_TX_UNLOCK(&np->txq[i]); } for (i = 0; i < np->num_queues; i++) { disconnect_rxq(&np->rxq[i]); disconnect_txq(&np->txq[i]); } } static int xn_ifmedia_upd(struct ifnet *ifp) { return (0); } static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; } /* ** Driver registration ** */ static device_method_t netfront_methods[] = { /* Device interface */ DEVMETHOD(device_probe, netfront_probe), DEVMETHOD(device_attach, netfront_attach), DEVMETHOD(device_detach, netfront_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, netfront_suspend), DEVMETHOD(device_resume, netfront_resume), /* Xenbus interface */ DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed), DEVMETHOD_END }; static driver_t netfront_driver = { "xn", netfront_methods, sizeof(struct netfront_info), }; devclass_t netfront_devclass; DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL, NULL); Index: head/sys/kern/subr_taskqueue.c =================================================================== --- head/sys/kern/subr_taskqueue.c (revision 296271) +++ head/sys/kern/subr_taskqueue.c (revision 296272) @@ -1,781 +1,774 @@ /*- * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); static void *taskqueue_giant_ih; static void *taskqueue_ih; static void taskqueue_fast_enqueue(void *); static void taskqueue_swi_enqueue(void *); static void taskqueue_swi_giant_enqueue(void *); struct taskqueue_busy { struct task *tb_running; TAILQ_ENTRY(taskqueue_busy) tb_link; }; struct task * const TB_DRAIN_WAITER = (struct task *)0x1; struct taskqueue { STAILQ_HEAD(, task) tq_queue; taskqueue_enqueue_fn tq_enqueue; void *tq_context; TAILQ_HEAD(, taskqueue_busy) tq_active; struct mtx tq_mutex; struct thread **tq_threads; int tq_tcount; int tq_spin; int tq_flags; int tq_callouts; taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS]; void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS]; }; #define TQ_FLAGS_ACTIVE (1 << 0) #define TQ_FLAGS_BLOCKED (1 << 1) #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2) #define DT_CALLOUT_ARMED (1 << 0) #define TQ_LOCK(tq) \ do { \ if ((tq)->tq_spin) \ mtx_lock_spin(&(tq)->tq_mutex); \ else \ mtx_lock(&(tq)->tq_mutex); \ } while (0) #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED) #define TQ_UNLOCK(tq) \ do { \ if ((tq)->tq_spin) \ mtx_unlock_spin(&(tq)->tq_mutex); \ else \ mtx_unlock(&(tq)->tq_mutex); \ } while (0) #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED) void _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, int priority, task_fn_t func, void *context) { TASK_INIT(&timeout_task->t, priority, func, context); callout_init_mtx(&timeout_task->c, &queue->tq_mutex, CALLOUT_RETURNUNLOCKED); timeout_task->q = queue; timeout_task->f = 0; } static __inline int TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, int t) { if (tq->tq_spin) return (msleep_spin(p, m, wm, t)); return (msleep(p, m, pri, wm, t)); } static struct taskqueue * _taskqueue_create(const char *name __unused, int mflags, taskqueue_enqueue_fn enqueue, void *context, int mtxflags, const char *mtxname) { struct taskqueue *queue; queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); if (!queue) return NULL; STAILQ_INIT(&queue->tq_queue); TAILQ_INIT(&queue->tq_active); queue->tq_enqueue = enqueue; queue->tq_context = context; queue->tq_spin = (mtxflags & MTX_SPIN) != 0; queue->tq_flags |= TQ_FLAGS_ACTIVE; if (enqueue == taskqueue_fast_enqueue || enqueue == taskqueue_swi_enqueue || enqueue == taskqueue_swi_giant_enqueue || enqueue == taskqueue_thread_enqueue) queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE; mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); return queue; } struct taskqueue * taskqueue_create(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context) { return _taskqueue_create(name, mflags, enqueue, context, MTX_DEF, "taskqueue"); } void taskqueue_set_callback(struct taskqueue *queue, enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback, void *context) { KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) && (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)), ("Callback type %d not valid, must be %d-%d", cb_type, TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX)); KASSERT((queue->tq_callbacks[cb_type] == NULL), ("Re-initialization of taskqueue callback?")); queue->tq_callbacks[cb_type] = callback; queue->tq_cb_contexts[cb_type] = context; } /* * Signal a taskqueue thread to terminate. */ static void taskqueue_terminate(struct thread **pp, struct taskqueue *tq) { while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { wakeup(tq); TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); } } void taskqueue_free(struct taskqueue *queue) { TQ_LOCK(queue); queue->tq_flags &= ~TQ_FLAGS_ACTIVE; taskqueue_terminate(queue->tq_threads, queue); KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?")); KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); mtx_destroy(&queue->tq_mutex); free(queue->tq_threads, M_TASKQUEUE); free(queue, M_TASKQUEUE); } static int taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task) { struct task *ins; struct task *prev; /* * Count multiple enqueues. */ if (task->ta_pending) { if (task->ta_pending < USHRT_MAX) task->ta_pending++; TQ_UNLOCK(queue); return (0); } /* * Optimise the case when all tasks have the same priority. */ prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); if (!prev || prev->ta_priority >= task->ta_priority) { STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); } else { prev = NULL; for (ins = STAILQ_FIRST(&queue->tq_queue); ins; prev = ins, ins = STAILQ_NEXT(ins, ta_link)) if (ins->ta_priority < task->ta_priority) break; if (prev) STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); else STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); } task->ta_pending = 1; if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0) TQ_UNLOCK(queue); if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) queue->tq_enqueue(queue->tq_context); if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0) TQ_UNLOCK(queue); /* Return with lock released. */ return (0); } int taskqueue_enqueue(struct taskqueue *queue, struct task *task) { int res; TQ_LOCK(queue); res = taskqueue_enqueue_locked(queue, task); /* The lock is released inside. */ return (res); } static void taskqueue_timeout_func(void *arg) { struct taskqueue *queue; struct timeout_task *timeout_task; timeout_task = arg; queue = timeout_task->q; KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout")); timeout_task->f &= ~DT_CALLOUT_ARMED; queue->tq_callouts--; taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t); /* The lock is released inside. */ } int taskqueue_enqueue_timeout(struct taskqueue *queue, struct timeout_task *timeout_task, int ticks) { int res; TQ_LOCK(queue); KASSERT(timeout_task->q == NULL || timeout_task->q == queue, ("Migrated queue")); KASSERT(!queue->tq_spin, ("Timeout for spin-queue")); timeout_task->q = queue; res = timeout_task->t.ta_pending; if (ticks == 0) { taskqueue_enqueue_locked(queue, &timeout_task->t); /* The lock is released inside. */ } else { if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { res++; } else { queue->tq_callouts++; timeout_task->f |= DT_CALLOUT_ARMED; if (ticks < 0) ticks = -ticks; /* Ignore overflow. */ } if (ticks > 0) { callout_reset(&timeout_task->c, ticks, taskqueue_timeout_func, timeout_task); } TQ_UNLOCK(queue); } return (res); } static void taskqueue_task_nop_fn(void *context, int pending) { } /* * Block until all currently queued tasks in this taskqueue * have begun execution. Tasks queued during execution of * this function are ignored. */ static void taskqueue_drain_tq_queue(struct taskqueue *queue) { struct task t_barrier; if (STAILQ_EMPTY(&queue->tq_queue)) return; /* * Enqueue our barrier after all current tasks, but with * the highest priority so that newly queued tasks cannot * pass it. Because of the high priority, we can not use * taskqueue_enqueue_locked directly (which drops the lock * anyway) so just insert it at tail while we have the * queue lock. */ TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier); STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link); t_barrier.ta_pending = 1; /* * Once the barrier has executed, all previously queued tasks * have completed or are currently executing. */ while (t_barrier.ta_pending != 0) TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0); } /* * Block until all currently executing tasks for this taskqueue * complete. Tasks that begin execution during the execution * of this function are ignored. */ static void taskqueue_drain_tq_active(struct taskqueue *queue) { struct taskqueue_busy tb_marker, *tb_first; if (TAILQ_EMPTY(&queue->tq_active)) return; /* Block taskq_terminate().*/ queue->tq_callouts++; /* * Wait for all currently executing taskqueue threads * to go idle. */ tb_marker.tb_running = TB_DRAIN_WAITER; TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link); while (TAILQ_FIRST(&queue->tq_active) != &tb_marker) TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0); TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link); /* * Wakeup any other drain waiter that happened to queue up * without any intervening active thread. */ tb_first = TAILQ_FIRST(&queue->tq_active); if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER) wakeup(tb_first); /* Release taskqueue_terminate(). */ queue->tq_callouts--; if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0) wakeup_one(queue->tq_threads); } void taskqueue_block(struct taskqueue *queue) { TQ_LOCK(queue); queue->tq_flags |= TQ_FLAGS_BLOCKED; TQ_UNLOCK(queue); } void taskqueue_unblock(struct taskqueue *queue) { TQ_LOCK(queue); queue->tq_flags &= ~TQ_FLAGS_BLOCKED; if (!STAILQ_EMPTY(&queue->tq_queue)) queue->tq_enqueue(queue->tq_context); TQ_UNLOCK(queue); } static void taskqueue_run_locked(struct taskqueue *queue) { struct taskqueue_busy tb; struct taskqueue_busy *tb_first; struct task *task; int pending; TQ_ASSERT_LOCKED(queue); tb.tb_running = NULL; while (STAILQ_FIRST(&queue->tq_queue)) { TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link); /* * Carefully remove the first task from the queue and * zero its pending count. */ task = STAILQ_FIRST(&queue->tq_queue); STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); pending = task->ta_pending; task->ta_pending = 0; tb.tb_running = task; TQ_UNLOCK(queue); task->ta_func(task->ta_context, pending); TQ_LOCK(queue); tb.tb_running = NULL; wakeup(task); TAILQ_REMOVE(&queue->tq_active, &tb, tb_link); tb_first = TAILQ_FIRST(&queue->tq_active); if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER) wakeup(tb_first); } } void taskqueue_run(struct taskqueue *queue) { TQ_LOCK(queue); taskqueue_run_locked(queue); TQ_UNLOCK(queue); } static int task_is_running(struct taskqueue *queue, struct task *task) { struct taskqueue_busy *tb; TQ_ASSERT_LOCKED(queue); TAILQ_FOREACH(tb, &queue->tq_active, tb_link) { if (tb->tb_running == task) return (1); } return (0); } static int taskqueue_cancel_locked(struct taskqueue *queue, struct task *task, u_int *pendp) { if (task->ta_pending > 0) STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link); if (pendp != NULL) *pendp = task->ta_pending; task->ta_pending = 0; return (task_is_running(queue, task) ? EBUSY : 0); } int taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp) { int error; TQ_LOCK(queue); error = taskqueue_cancel_locked(queue, task, pendp); TQ_UNLOCK(queue); return (error); } int taskqueue_cancel_timeout(struct taskqueue *queue, struct timeout_task *timeout_task, u_int *pendp) { u_int pending, pending1; int error; TQ_LOCK(queue); pending = !!(callout_stop(&timeout_task->c) > 0); error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1); if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) { timeout_task->f &= ~DT_CALLOUT_ARMED; queue->tq_callouts--; } TQ_UNLOCK(queue); if (pendp != NULL) *pendp = pending + pending1; return (error); } void taskqueue_drain(struct taskqueue *queue, struct task *task) { if (!queue->tq_spin) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); TQ_LOCK(queue); while (task->ta_pending != 0 || task_is_running(queue, task)) TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); TQ_UNLOCK(queue); } void taskqueue_drain_all(struct taskqueue *queue) { if (!queue->tq_spin) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); TQ_LOCK(queue); taskqueue_drain_tq_queue(queue); taskqueue_drain_tq_active(queue); TQ_UNLOCK(queue); } void taskqueue_drain_timeout(struct taskqueue *queue, struct timeout_task *timeout_task) { callout_drain(&timeout_task->c); taskqueue_drain(queue, &timeout_task->t); } static void taskqueue_swi_enqueue(void *context) { swi_sched(taskqueue_ih, 0); } static void taskqueue_swi_run(void *dummy) { taskqueue_run(taskqueue_swi); } static void taskqueue_swi_giant_enqueue(void *context) { swi_sched(taskqueue_giant_ih, 0); } static void taskqueue_swi_giant_run(void *dummy) { taskqueue_run(taskqueue_swi_giant); } static int _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, cpuset_t *mask, const char *name, va_list ap) { char ktname[MAXCOMLEN + 1]; struct thread *td; struct taskqueue *tq; int i, error; if (count <= 0) return (EINVAL); vsnprintf(ktname, sizeof(ktname), name, ap); tq = *tqp; tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, M_NOWAIT | M_ZERO); if (tq->tq_threads == NULL) { printf("%s: no memory for %s threads\n", __func__, ktname); return (ENOMEM); } for (i = 0; i < count; i++) { if (count == 1) error = kthread_add(taskqueue_thread_loop, tqp, NULL, &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); else error = kthread_add(taskqueue_thread_loop, tqp, NULL, &tq->tq_threads[i], RFSTOPPED, 0, "%s_%d", ktname, i); if (error) { /* should be ok to continue, taskqueue_free will dtrt */ printf("%s: kthread_add(%s): error %d", __func__, ktname, error); tq->tq_threads[i] = NULL; /* paranoid */ } else tq->tq_tcount++; } for (i = 0; i < count; i++) { if (tq->tq_threads[i] == NULL) continue; td = tq->tq_threads[i]; if (mask) { error = cpuset_setthread(td->td_tid, mask); /* * Failing to pin is rarely an actual fatal error; * it'll just affect performance. */ if (error) printf("%s: curthread=%llu: can't pin; " "error=%d\n", __func__, (unsigned long long) td->td_tid, error); } thread_lock(td); sched_prio(td, pri); sched_add(td, SRQ_BORING); thread_unlock(td); } return (0); } int taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, const char *name, ...) { va_list ap; int error; va_start(ap, name); error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap); va_end(ap); return (error); } int taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri, cpuset_t *mask, const char *name, ...) { va_list ap; int error; va_start(ap, name); error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap); va_end(ap); return (error); } static inline void taskqueue_run_callback(struct taskqueue *tq, enum taskqueue_callback_type cb_type) { taskqueue_callback_fn tq_callback; TQ_ASSERT_UNLOCKED(tq); tq_callback = tq->tq_callbacks[cb_type]; if (tq_callback != NULL) tq_callback(tq->tq_cb_contexts[cb_type]); } void taskqueue_thread_loop(void *arg) { struct taskqueue **tqp, *tq; tqp = arg; tq = *tqp; taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT); TQ_LOCK(tq); while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { taskqueue_run_locked(tq); /* * Because taskqueue_run() can drop tq_mutex, we need to * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the * meantime, which means we missed a wakeup. */ if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) break; TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); } taskqueue_run_locked(tq); /* * This thread is on its way out, so just drop the lock temporarily * in order to call the shutdown callback. This allows the callback * to look at the taskqueue, even just before it dies. */ TQ_UNLOCK(tq); taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN); TQ_LOCK(tq); /* rendezvous with thread that asked us to terminate */ tq->tq_tcount--; wakeup_one(tq->tq_threads); TQ_UNLOCK(tq); kthread_exit(); } void taskqueue_thread_enqueue(void *context) { struct taskqueue **tqp, *tq; tqp = context; tq = *tqp; wakeup_one(tq); } TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, INTR_MPSAFE, &taskqueue_ih)); TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); TASKQUEUE_DEFINE_THREAD(thread); struct taskqueue * taskqueue_create_fast(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context) { return _taskqueue_create(name, mflags, enqueue, context, MTX_SPIN, "fast_taskqueue"); } -/* NB: for backwards compatibility */ -int -taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) -{ - return taskqueue_enqueue(queue, task); -} - static void *taskqueue_fast_ih; static void taskqueue_fast_enqueue(void *context) { swi_sched(taskqueue_fast_ih, 0); } static void taskqueue_fast_run(void *dummy) { taskqueue_run(taskqueue_fast); } TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL, SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); int taskqueue_member(struct taskqueue *queue, struct thread *td) { int i, j, ret = 0; for (i = 0, j = 0; ; i++) { if (queue->tq_threads[i] == NULL) continue; if (queue->tq_threads[i] == td) { ret = 1; break; } if (++j >= queue->tq_tcount) break; } return (ret); } Index: head/sys/netpfil/ipfw/ip_dummynet.c =================================================================== --- head/sys/netpfil/ipfw/ip_dummynet.c (revision 296271) +++ head/sys/netpfil/ipfw/ip_dummynet.c (revision 296272) @@ -1,2321 +1,2321 @@ /*- * Copyright (c) 1998-2002,2010 Luigi Rizzo, Universita` di Pisa * Portions Copyright (c) 2000 Akamba Corp. * All rights reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Configuration and internal object management for dummynet. */ #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ #include #include /* ip_output(), IP_FORWARDING */ #include #include #include #include #include #include /* which objects to copy */ #define DN_C_LINK 0x01 #define DN_C_SCH 0x02 #define DN_C_FLOW 0x04 #define DN_C_FS 0x08 #define DN_C_QUEUE 0x10 /* we use this argument in case of a schk_new */ struct schk_new_arg { struct dn_alg *fp; struct dn_sch *sch; }; /*---- callout hooks. ----*/ static struct callout dn_timeout; static int dn_gone; static struct task dn_task; static struct taskqueue *dn_tq = NULL; static void dummynet(void *arg) { (void)arg; /* UNUSED */ - taskqueue_enqueue_fast(dn_tq, &dn_task); + taskqueue_enqueue(dn_tq, &dn_task); } void dn_reschedule(void) { if (dn_gone != 0) return; callout_reset_sbt(&dn_timeout, tick_sbt, 0, dummynet, NULL, C_HARDCLOCK | C_DIRECT_EXEC); } /*----- end of callout hooks -----*/ /* Return a scheduler descriptor given the type or name. */ static struct dn_alg * find_sched_type(int type, char *name) { struct dn_alg *d; SLIST_FOREACH(d, &dn_cfg.schedlist, next) { if (d->type == type || (name && !strcasecmp(d->name, name))) return d; } return NULL; /* not found */ } int ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg) { int oldv = *v; const char *op = NULL; if (dflt < lo) dflt = lo; if (dflt > hi) dflt = hi; if (oldv < lo) { *v = dflt; op = "Bump"; } else if (oldv > hi) { *v = hi; op = "Clamp"; } else return *v; if (op && msg) printf("%s %s to %d (was %d)\n", op, msg, *v, oldv); return *v; } /*---- flow_id mask, hash and compare functions ---*/ /* * The flow_id includes the 5-tuple, the queue/pipe number * which we store in the extra area in host order, * and for ipv6 also the flow_id6. * XXX see if we want the tos byte (can store in 'flags') */ static struct ipfw_flow_id * flow_id_mask(struct ipfw_flow_id *mask, struct ipfw_flow_id *id) { int is_v6 = IS_IP6_FLOW_ID(id); id->dst_port &= mask->dst_port; id->src_port &= mask->src_port; id->proto &= mask->proto; id->extra &= mask->extra; if (is_v6) { APPLY_MASK(&id->dst_ip6, &mask->dst_ip6); APPLY_MASK(&id->src_ip6, &mask->src_ip6); id->flow_id6 &= mask->flow_id6; } else { id->dst_ip &= mask->dst_ip; id->src_ip &= mask->src_ip; } return id; } /* computes an OR of two masks, result in dst and also returned */ static struct ipfw_flow_id * flow_id_or(struct ipfw_flow_id *src, struct ipfw_flow_id *dst) { int is_v6 = IS_IP6_FLOW_ID(dst); dst->dst_port |= src->dst_port; dst->src_port |= src->src_port; dst->proto |= src->proto; dst->extra |= src->extra; if (is_v6) { #define OR_MASK(_d, _s) \ (_d)->__u6_addr.__u6_addr32[0] |= (_s)->__u6_addr.__u6_addr32[0]; \ (_d)->__u6_addr.__u6_addr32[1] |= (_s)->__u6_addr.__u6_addr32[1]; \ (_d)->__u6_addr.__u6_addr32[2] |= (_s)->__u6_addr.__u6_addr32[2]; \ (_d)->__u6_addr.__u6_addr32[3] |= (_s)->__u6_addr.__u6_addr32[3]; OR_MASK(&dst->dst_ip6, &src->dst_ip6); OR_MASK(&dst->src_ip6, &src->src_ip6); #undef OR_MASK dst->flow_id6 |= src->flow_id6; } else { dst->dst_ip |= src->dst_ip; dst->src_ip |= src->src_ip; } return dst; } static int nonzero_mask(struct ipfw_flow_id *m) { if (m->dst_port || m->src_port || m->proto || m->extra) return 1; if (IS_IP6_FLOW_ID(m)) { return m->dst_ip6.__u6_addr.__u6_addr32[0] || m->dst_ip6.__u6_addr.__u6_addr32[1] || m->dst_ip6.__u6_addr.__u6_addr32[2] || m->dst_ip6.__u6_addr.__u6_addr32[3] || m->src_ip6.__u6_addr.__u6_addr32[0] || m->src_ip6.__u6_addr.__u6_addr32[1] || m->src_ip6.__u6_addr.__u6_addr32[2] || m->src_ip6.__u6_addr.__u6_addr32[3] || m->flow_id6; } else { return m->dst_ip || m->src_ip; } } /* XXX we may want a better hash function */ static uint32_t flow_id_hash(struct ipfw_flow_id *id) { uint32_t i; if (IS_IP6_FLOW_ID(id)) { uint32_t *d = (uint32_t *)&id->dst_ip6; uint32_t *s = (uint32_t *)&id->src_ip6; i = (d[0] ) ^ (d[1]) ^ (d[2] ) ^ (d[3]) ^ (d[0] >> 15) ^ (d[1] >> 15) ^ (d[2] >> 15) ^ (d[3] >> 15) ^ (s[0] << 1) ^ (s[1] << 1) ^ (s[2] << 1) ^ (s[3] << 1) ^ (s[0] << 16) ^ (s[1] << 16) ^ (s[2] << 16) ^ (s[3] << 16) ^ (id->dst_port << 1) ^ (id->src_port) ^ (id->extra) ^ (id->proto ) ^ (id->flow_id6); } else { i = (id->dst_ip) ^ (id->dst_ip >> 15) ^ (id->src_ip << 1) ^ (id->src_ip >> 16) ^ (id->extra) ^ (id->dst_port << 1) ^ (id->src_port) ^ (id->proto); } return i; } /* Like bcmp, returns 0 if ids match, 1 otherwise. */ static int flow_id_cmp(struct ipfw_flow_id *id1, struct ipfw_flow_id *id2) { int is_v6 = IS_IP6_FLOW_ID(id1); if (!is_v6) { if (IS_IP6_FLOW_ID(id2)) return 1; /* different address families */ return (id1->dst_ip == id2->dst_ip && id1->src_ip == id2->src_ip && id1->dst_port == id2->dst_port && id1->src_port == id2->src_port && id1->proto == id2->proto && id1->extra == id2->extra) ? 0 : 1; } /* the ipv6 case */ return ( !bcmp(&id1->dst_ip6,&id2->dst_ip6, sizeof(id1->dst_ip6)) && !bcmp(&id1->src_ip6,&id2->src_ip6, sizeof(id1->src_ip6)) && id1->dst_port == id2->dst_port && id1->src_port == id2->src_port && id1->proto == id2->proto && id1->extra == id2->extra && id1->flow_id6 == id2->flow_id6) ? 0 : 1; } /*--------- end of flow-id mask, hash and compare ---------*/ /*--- support functions for the qht hashtable ---- * Entries are hashed by flow-id */ static uint32_t q_hash(uintptr_t key, int flags, void *arg) { /* compute the hash slot from the flow id */ struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ? &((struct dn_queue *)key)->ni.fid : (struct ipfw_flow_id *)key; return flow_id_hash(id); } static int q_match(void *obj, uintptr_t key, int flags, void *arg) { struct dn_queue *o = (struct dn_queue *)obj; struct ipfw_flow_id *id2; if (flags & DNHT_KEY_IS_OBJ) { /* compare pointers */ id2 = &((struct dn_queue *)key)->ni.fid; } else { id2 = (struct ipfw_flow_id *)key; } return (0 == flow_id_cmp(&o->ni.fid, id2)); } /* * create a new queue instance for the given 'key'. */ static void * q_new(uintptr_t key, int flags, void *arg) { struct dn_queue *q, *template = arg; struct dn_fsk *fs = template->fs; int size = sizeof(*q) + fs->sched->fp->q_datalen; q = malloc(size, M_DUMMYNET, M_NOWAIT | M_ZERO); if (q == NULL) { D("no memory for new queue"); return NULL; } set_oid(&q->ni.oid, DN_QUEUE, size); if (fs->fs.flags & DN_QHT_HASH) q->ni.fid = *(struct ipfw_flow_id *)key; q->fs = fs; q->_si = template->_si; q->_si->q_count++; if (fs->sched->fp->new_queue) fs->sched->fp->new_queue(q); dn_cfg.queue_count++; return q; } /* * Notify schedulers that a queue is going away. * If (flags & DN_DESTROY), also free the packets. * The version for callbacks is called q_delete_cb(). */ static void dn_delete_queue(struct dn_queue *q, int flags) { struct dn_fsk *fs = q->fs; // D("fs %p si %p\n", fs, q->_si); /* notify the parent scheduler that the queue is going away */ if (fs && fs->sched->fp->free_queue) fs->sched->fp->free_queue(q); q->_si->q_count--; q->_si = NULL; if (flags & DN_DESTROY) { if (q->mq.head) dn_free_pkts(q->mq.head); bzero(q, sizeof(*q)); // safety free(q, M_DUMMYNET); dn_cfg.queue_count--; } } static int q_delete_cb(void *q, void *arg) { int flags = (int)(uintptr_t)arg; dn_delete_queue(q, flags); return (flags & DN_DESTROY) ? DNHT_SCAN_DEL : 0; } /* * calls dn_delete_queue/q_delete_cb on all queues, * which notifies the parent scheduler and possibly drains packets. * flags & DN_DESTROY: drains queues and destroy qht; */ static void qht_delete(struct dn_fsk *fs, int flags) { ND("fs %d start flags %d qht %p", fs->fs.fs_nr, flags, fs->qht); if (!fs->qht) return; if (fs->fs.flags & DN_QHT_HASH) { dn_ht_scan(fs->qht, q_delete_cb, (void *)(uintptr_t)flags); if (flags & DN_DESTROY) { dn_ht_free(fs->qht, 0); fs->qht = NULL; } } else { dn_delete_queue((struct dn_queue *)(fs->qht), flags); if (flags & DN_DESTROY) fs->qht = NULL; } } /* * Find and possibly create the queue for a MULTIQUEUE scheduler. * We never call it for !MULTIQUEUE (the queue is in the sch_inst). */ struct dn_queue * ipdn_q_find(struct dn_fsk *fs, struct dn_sch_inst *si, struct ipfw_flow_id *id) { struct dn_queue template; template._si = si; template.fs = fs; if (fs->fs.flags & DN_QHT_HASH) { struct ipfw_flow_id masked_id; if (fs->qht == NULL) { fs->qht = dn_ht_init(NULL, fs->fs.buckets, offsetof(struct dn_queue, q_next), q_hash, q_match, q_new); if (fs->qht == NULL) return NULL; } masked_id = *id; flow_id_mask(&fs->fsk_mask, &masked_id); return dn_ht_find(fs->qht, (uintptr_t)&masked_id, DNHT_INSERT, &template); } else { if (fs->qht == NULL) fs->qht = q_new(0, 0, &template); return (struct dn_queue *)fs->qht; } } /*--- end of queue hash table ---*/ /*--- support functions for the sch_inst hashtable ---- * * These are hashed by flow-id */ static uint32_t si_hash(uintptr_t key, int flags, void *arg) { /* compute the hash slot from the flow id */ struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ? &((struct dn_sch_inst *)key)->ni.fid : (struct ipfw_flow_id *)key; return flow_id_hash(id); } static int si_match(void *obj, uintptr_t key, int flags, void *arg) { struct dn_sch_inst *o = obj; struct ipfw_flow_id *id2; id2 = (flags & DNHT_KEY_IS_OBJ) ? &((struct dn_sch_inst *)key)->ni.fid : (struct ipfw_flow_id *)key; return flow_id_cmp(&o->ni.fid, id2) == 0; } /* * create a new instance for the given 'key' * Allocate memory for instance, delay line and scheduler private data. */ static void * si_new(uintptr_t key, int flags, void *arg) { struct dn_schk *s = arg; struct dn_sch_inst *si; int l = sizeof(*si) + s->fp->si_datalen; si = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO); if (si == NULL) goto error; /* Set length only for the part passed up to userland. */ set_oid(&si->ni.oid, DN_SCH_I, sizeof(struct dn_flow)); set_oid(&(si->dline.oid), DN_DELAY_LINE, sizeof(struct delay_line)); /* mark si and dline as outside the event queue */ si->ni.oid.id = si->dline.oid.id = -1; si->sched = s; si->dline.si = si; if (s->fp->new_sched && s->fp->new_sched(si)) { D("new_sched error"); goto error; } if (s->sch.flags & DN_HAVE_MASK) si->ni.fid = *(struct ipfw_flow_id *)key; dn_cfg.si_count++; return si; error: if (si) { bzero(si, sizeof(*si)); // safety free(si, M_DUMMYNET); } return NULL; } /* * Callback from siht to delete all scheduler instances. Remove * si and delay line from the system heap, destroy all queues. * We assume that all flowset have been notified and do not * point to us anymore. */ static int si_destroy(void *_si, void *arg) { struct dn_sch_inst *si = _si; struct dn_schk *s = si->sched; struct delay_line *dl = &si->dline; if (dl->oid.subtype) /* remove delay line from event heap */ heap_extract(&dn_cfg.evheap, dl); dn_free_pkts(dl->mq.head); /* drain delay line */ if (si->kflags & DN_ACTIVE) /* remove si from event heap */ heap_extract(&dn_cfg.evheap, si); if (s->fp->free_sched) s->fp->free_sched(si); bzero(si, sizeof(*si)); /* safety */ free(si, M_DUMMYNET); dn_cfg.si_count--; return DNHT_SCAN_DEL; } /* * Find the scheduler instance for this packet. If we need to apply * a mask, do on a local copy of the flow_id to preserve the original. * Assume siht is always initialized if we have a mask. */ struct dn_sch_inst * ipdn_si_find(struct dn_schk *s, struct ipfw_flow_id *id) { if (s->sch.flags & DN_HAVE_MASK) { struct ipfw_flow_id id_t = *id; flow_id_mask(&s->sch.sched_mask, &id_t); return dn_ht_find(s->siht, (uintptr_t)&id_t, DNHT_INSERT, s); } if (!s->siht) s->siht = si_new(0, 0, s); return (struct dn_sch_inst *)s->siht; } /* callback to flush credit for the scheduler instance */ static int si_reset_credit(void *_si, void *arg) { struct dn_sch_inst *si = _si; struct dn_link *p = &si->sched->link; si->credit = p->burst + (dn_cfg.io_fast ? p->bandwidth : 0); return 0; } static void schk_reset_credit(struct dn_schk *s) { if (s->sch.flags & DN_HAVE_MASK) dn_ht_scan(s->siht, si_reset_credit, NULL); else if (s->siht) si_reset_credit(s->siht, NULL); } /*---- end of sch_inst hashtable ---------------------*/ /*------------------------------------------------------- * flowset hash (fshash) support. Entries are hashed by fs_nr. * New allocations are put in the fsunlinked list, from which * they are removed when they point to a specific scheduler. */ static uint32_t fsk_hash(uintptr_t key, int flags, void *arg) { uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key : ((struct dn_fsk *)key)->fs.fs_nr; return ( (i>>8)^(i>>4)^i ); } static int fsk_match(void *obj, uintptr_t key, int flags, void *arg) { struct dn_fsk *fs = obj; int i = !(flags & DNHT_KEY_IS_OBJ) ? key : ((struct dn_fsk *)key)->fs.fs_nr; return (fs->fs.fs_nr == i); } static void * fsk_new(uintptr_t key, int flags, void *arg) { struct dn_fsk *fs; fs = malloc(sizeof(*fs), M_DUMMYNET, M_NOWAIT | M_ZERO); if (fs) { set_oid(&fs->fs.oid, DN_FS, sizeof(fs->fs)); dn_cfg.fsk_count++; fs->drain_bucket = 0; SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain); } return fs; } /* * detach flowset from its current scheduler. Flags as follows: * DN_DETACH removes from the fsk_list * DN_DESTROY deletes individual queues * DN_DELETE_FS destroys the flowset (otherwise goes in unlinked). */ static void fsk_detach(struct dn_fsk *fs, int flags) { if (flags & DN_DELETE_FS) flags |= DN_DESTROY; ND("fs %d from sched %d flags %s %s %s", fs->fs.fs_nr, fs->fs.sched_nr, (flags & DN_DELETE_FS) ? "DEL_FS":"", (flags & DN_DESTROY) ? "DEL":"", (flags & DN_DETACH) ? "DET":""); if (flags & DN_DETACH) { /* detach from the list */ struct dn_fsk_head *h; h = fs->sched ? &fs->sched->fsk_list : &dn_cfg.fsu; SLIST_REMOVE(h, fs, dn_fsk, sch_chain); } /* Free the RED parameters, they will be recomputed on * subsequent attach if needed. */ if (fs->w_q_lookup) free(fs->w_q_lookup, M_DUMMYNET); fs->w_q_lookup = NULL; qht_delete(fs, flags); if (fs->sched && fs->sched->fp->free_fsk) fs->sched->fp->free_fsk(fs); fs->sched = NULL; if (flags & DN_DELETE_FS) { bzero(fs, sizeof(*fs)); /* safety */ free(fs, M_DUMMYNET); dn_cfg.fsk_count--; } else { SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain); } } /* * Detach or destroy all flowsets in a list. * flags specifies what to do: * DN_DESTROY: flush all queues * DN_DELETE_FS: DN_DESTROY + destroy flowset * DN_DELETE_FS implies DN_DESTROY */ static void fsk_detach_list(struct dn_fsk_head *h, int flags) { struct dn_fsk *fs; int n = 0; /* only for stats */ ND("head %p flags %x", h, flags); while ((fs = SLIST_FIRST(h))) { SLIST_REMOVE_HEAD(h, sch_chain); n++; fsk_detach(fs, flags); } ND("done %d flowsets", n); } /* * called on 'queue X delete' -- removes the flowset from fshash, * deletes all queues for the flowset, and removes the flowset. */ static int delete_fs(int i, int locked) { struct dn_fsk *fs; int err = 0; if (!locked) DN_BH_WLOCK(); fs = dn_ht_find(dn_cfg.fshash, i, DNHT_REMOVE, NULL); ND("fs %d found %p", i, fs); if (fs) { fsk_detach(fs, DN_DETACH | DN_DELETE_FS); err = 0; } else err = EINVAL; if (!locked) DN_BH_WUNLOCK(); return err; } /*----- end of flowset hashtable support -------------*/ /*------------------------------------------------------------ * Scheduler hash. When searching by index we pass sched_nr, * otherwise we pass struct dn_sch * which is the first field in * struct dn_schk so we can cast between the two. We use this trick * because in the create phase (but it should be fixed). */ static uint32_t schk_hash(uintptr_t key, int flags, void *_arg) { uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key : ((struct dn_schk *)key)->sch.sched_nr; return ( (i>>8)^(i>>4)^i ); } static int schk_match(void *obj, uintptr_t key, int flags, void *_arg) { struct dn_schk *s = (struct dn_schk *)obj; int i = !(flags & DNHT_KEY_IS_OBJ) ? key : ((struct dn_schk *)key)->sch.sched_nr; return (s->sch.sched_nr == i); } /* * Create the entry and intialize with the sched hash if needed. * Leave s->fp unset so we can tell whether a dn_ht_find() returns * a new object or a previously existing one. */ static void * schk_new(uintptr_t key, int flags, void *arg) { struct schk_new_arg *a = arg; struct dn_schk *s; int l = sizeof(*s) +a->fp->schk_datalen; s = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO); if (s == NULL) return NULL; set_oid(&s->link.oid, DN_LINK, sizeof(s->link)); s->sch = *a->sch; // copy initial values s->link.link_nr = s->sch.sched_nr; SLIST_INIT(&s->fsk_list); /* initialize the hash table or create the single instance */ s->fp = a->fp; /* si_new needs this */ s->drain_bucket = 0; if (s->sch.flags & DN_HAVE_MASK) { s->siht = dn_ht_init(NULL, s->sch.buckets, offsetof(struct dn_sch_inst, si_next), si_hash, si_match, si_new); if (s->siht == NULL) { free(s, M_DUMMYNET); return NULL; } } s->fp = NULL; /* mark as a new scheduler */ dn_cfg.schk_count++; return s; } /* * Callback for sched delete. Notify all attached flowsets to * detach from the scheduler, destroy the internal flowset, and * all instances. The scheduler goes away too. * arg is 0 (only detach flowsets and destroy instances) * DN_DESTROY (detach & delete queues, delete schk) * or DN_DELETE_FS (delete queues and flowsets, delete schk) */ static int schk_delete_cb(void *obj, void *arg) { struct dn_schk *s = obj; #if 0 int a = (int)arg; ND("sched %d arg %s%s", s->sch.sched_nr, a&DN_DESTROY ? "DEL ":"", a&DN_DELETE_FS ? "DEL_FS":""); #endif fsk_detach_list(&s->fsk_list, arg ? DN_DESTROY : 0); /* no more flowset pointing to us now */ if (s->sch.flags & DN_HAVE_MASK) { dn_ht_scan(s->siht, si_destroy, NULL); dn_ht_free(s->siht, 0); } else if (s->siht) si_destroy(s->siht, NULL); if (s->profile) { free(s->profile, M_DUMMYNET); s->profile = NULL; } s->siht = NULL; if (s->fp->destroy) s->fp->destroy(s); bzero(s, sizeof(*s)); // safety free(obj, M_DUMMYNET); dn_cfg.schk_count--; return DNHT_SCAN_DEL; } /* * called on a 'sched X delete' command. Deletes a single scheduler. * This is done by removing from the schedhash, unlinking all * flowsets and deleting their traffic. */ static int delete_schk(int i) { struct dn_schk *s; s = dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL); ND("%d %p", i, s); if (!s) return EINVAL; delete_fs(i + DN_MAX_ID, 1); /* first delete internal fs */ /* then detach flowsets, delete traffic */ schk_delete_cb(s, (void*)(uintptr_t)DN_DESTROY); return 0; } /*--- end of schk hashtable support ---*/ static int copy_obj(char **start, char *end, void *_o, const char *msg, int i) { struct dn_id *o = _o; int have = end - *start; if (have < o->len || o->len == 0 || o->type == 0) { D("(WARN) type %d %s %d have %d need %d", o->type, msg, i, have, o->len); return 1; } ND("type %d %s %d len %d", o->type, msg, i, o->len); bcopy(_o, *start, o->len); if (o->type == DN_LINK) { /* Adjust burst parameter for link */ struct dn_link *l = (struct dn_link *)*start; l->burst = div64(l->burst, 8 * hz); l->delay = l->delay * 1000 / hz; } else if (o->type == DN_SCH) { /* Set id->id to the number of instances */ struct dn_schk *s = _o; struct dn_id *id = (struct dn_id *)(*start); id->id = (s->sch.flags & DN_HAVE_MASK) ? dn_ht_entries(s->siht) : (s->siht ? 1 : 0); } *start += o->len; return 0; } /* Specific function to copy a queue. * Copies only the user-visible part of a queue (which is in * a struct dn_flow), and sets len accordingly. */ static int copy_obj_q(char **start, char *end, void *_o, const char *msg, int i) { struct dn_id *o = _o; int have = end - *start; int len = sizeof(struct dn_flow); /* see above comment */ if (have < len || o->len == 0 || o->type != DN_QUEUE) { D("ERROR type %d %s %d have %d need %d", o->type, msg, i, have, len); return 1; } ND("type %d %s %d len %d", o->type, msg, i, len); bcopy(_o, *start, len); ((struct dn_id*)(*start))->len = len; *start += len; return 0; } static int copy_q_cb(void *obj, void *arg) { struct dn_queue *q = obj; struct copy_args *a = arg; struct dn_flow *ni = (struct dn_flow *)(*a->start); if (copy_obj_q(a->start, a->end, &q->ni, "queue", -1)) return DNHT_SCAN_END; ni->oid.type = DN_FLOW; /* override the DN_QUEUE */ ni->oid.id = si_hash((uintptr_t)&ni->fid, 0, NULL); return 0; } static int copy_q(struct copy_args *a, struct dn_fsk *fs, int flags) { if (!fs->qht) return 0; if (fs->fs.flags & DN_QHT_HASH) dn_ht_scan(fs->qht, copy_q_cb, a); else copy_q_cb(fs->qht, a); return 0; } /* * This routine only copies the initial part of a profile ? XXX */ static int copy_profile(struct copy_args *a, struct dn_profile *p) { int have = a->end - *a->start; /* XXX here we check for max length */ int profile_len = sizeof(struct dn_profile) - ED_MAX_SAMPLES_NO*sizeof(int); if (p == NULL) return 0; if (have < profile_len) { D("error have %d need %d", have, profile_len); return 1; } bcopy(p, *a->start, profile_len); ((struct dn_id *)(*a->start))->len = profile_len; *a->start += profile_len; return 0; } static int copy_flowset(struct copy_args *a, struct dn_fsk *fs, int flags) { struct dn_fs *ufs = (struct dn_fs *)(*a->start); if (!fs) return 0; ND("flowset %d", fs->fs.fs_nr); if (copy_obj(a->start, a->end, &fs->fs, "flowset", fs->fs.fs_nr)) return DNHT_SCAN_END; ufs->oid.id = (fs->fs.flags & DN_QHT_HASH) ? dn_ht_entries(fs->qht) : (fs->qht ? 1 : 0); if (flags) { /* copy queues */ copy_q(a, fs, 0); } return 0; } static int copy_si_cb(void *obj, void *arg) { struct dn_sch_inst *si = obj; struct copy_args *a = arg; struct dn_flow *ni = (struct dn_flow *)(*a->start); if (copy_obj(a->start, a->end, &si->ni, "inst", si->sched->sch.sched_nr)) return DNHT_SCAN_END; ni->oid.type = DN_FLOW; /* override the DN_SCH_I */ ni->oid.id = si_hash((uintptr_t)si, DNHT_KEY_IS_OBJ, NULL); return 0; } static int copy_si(struct copy_args *a, struct dn_schk *s, int flags) { if (s->sch.flags & DN_HAVE_MASK) dn_ht_scan(s->siht, copy_si_cb, a); else if (s->siht) copy_si_cb(s->siht, a); return 0; } /* * compute a list of children of a scheduler and copy up */ static int copy_fsk_list(struct copy_args *a, struct dn_schk *s, int flags) { struct dn_fsk *fs; struct dn_id *o; uint32_t *p; int n = 0, space = sizeof(*o); SLIST_FOREACH(fs, &s->fsk_list, sch_chain) { if (fs->fs.fs_nr < DN_MAX_ID) n++; } space += n * sizeof(uint32_t); DX(3, "sched %d has %d flowsets", s->sch.sched_nr, n); if (a->end - *(a->start) < space) return DNHT_SCAN_END; o = (struct dn_id *)(*(a->start)); o->len = space; *a->start += o->len; o->type = DN_TEXT; p = (uint32_t *)(o+1); SLIST_FOREACH(fs, &s->fsk_list, sch_chain) if (fs->fs.fs_nr < DN_MAX_ID) *p++ = fs->fs.fs_nr; return 0; } static int copy_data_helper(void *_o, void *_arg) { struct copy_args *a = _arg; uint32_t *r = a->extra->r; /* start of first range */ uint32_t *lim; /* first invalid pointer */ int n; lim = (uint32_t *)((char *)(a->extra) + a->extra->o.len); if (a->type == DN_LINK || a->type == DN_SCH) { /* pipe|sched show, we receive a dn_schk */ struct dn_schk *s = _o; n = s->sch.sched_nr; if (a->type == DN_SCH && n >= DN_MAX_ID) return 0; /* not a scheduler */ if (a->type == DN_LINK && n <= DN_MAX_ID) return 0; /* not a pipe */ /* see if the object is within one of our ranges */ for (;r < lim; r += 2) { if (n < r[0] || n > r[1]) continue; /* Found a valid entry, copy and we are done */ if (a->flags & DN_C_LINK) { if (copy_obj(a->start, a->end, &s->link, "link", n)) return DNHT_SCAN_END; if (copy_profile(a, s->profile)) return DNHT_SCAN_END; if (copy_flowset(a, s->fs, 0)) return DNHT_SCAN_END; } if (a->flags & DN_C_SCH) { if (copy_obj(a->start, a->end, &s->sch, "sched", n)) return DNHT_SCAN_END; /* list all attached flowsets */ if (copy_fsk_list(a, s, 0)) return DNHT_SCAN_END; } if (a->flags & DN_C_FLOW) copy_si(a, s, 0); break; } } else if (a->type == DN_FS) { /* queue show, skip internal flowsets */ struct dn_fsk *fs = _o; n = fs->fs.fs_nr; if (n >= DN_MAX_ID) return 0; /* see if the object is within one of our ranges */ for (;r < lim; r += 2) { if (n < r[0] || n > r[1]) continue; if (copy_flowset(a, fs, 0)) return DNHT_SCAN_END; copy_q(a, fs, 0); break; /* we are done */ } } return 0; } static inline struct dn_schk * locate_scheduler(int i) { return dn_ht_find(dn_cfg.schedhash, i, 0, NULL); } /* * red parameters are in fixed point arithmetic. */ static int config_red(struct dn_fsk *fs) { int64_t s, idle, weight, w0; int t, i; fs->w_q = fs->fs.w_q; fs->max_p = fs->fs.max_p; ND("called"); /* Doing stuff that was in userland */ i = fs->sched->link.bandwidth; s = (i <= 0) ? 0 : hz * dn_cfg.red_avg_pkt_size * 8 * SCALE(1) / i; idle = div64((s * 3) , fs->w_q); /* s, fs->w_q scaled; idle not scaled */ fs->lookup_step = div64(idle , dn_cfg.red_lookup_depth); /* fs->lookup_step not scaled, */ if (!fs->lookup_step) fs->lookup_step = 1; w0 = weight = SCALE(1) - fs->w_q; //fs->w_q scaled for (t = fs->lookup_step; t > 1; --t) weight = SCALE_MUL(weight, w0); fs->lookup_weight = (int)(weight); // scaled /* Now doing stuff that was in kerneland */ fs->min_th = SCALE(fs->fs.min_th); fs->max_th = SCALE(fs->fs.max_th); if (fs->fs.max_th == fs->fs.min_th) fs->c_1 = fs->max_p; else fs->c_1 = SCALE((int64_t)(fs->max_p)) / (fs->fs.max_th - fs->fs.min_th); fs->c_2 = SCALE_MUL(fs->c_1, SCALE(fs->fs.min_th)); if (fs->fs.flags & DN_IS_GENTLE_RED) { fs->c_3 = (SCALE(1) - fs->max_p) / fs->fs.max_th; fs->c_4 = SCALE(1) - 2 * fs->max_p; } /* If the lookup table already exist, free and create it again. */ if (fs->w_q_lookup) { free(fs->w_q_lookup, M_DUMMYNET); fs->w_q_lookup = NULL; } if (dn_cfg.red_lookup_depth == 0) { printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth" "must be > 0\n"); fs->fs.flags &= ~DN_IS_RED; fs->fs.flags &= ~DN_IS_GENTLE_RED; return (EINVAL); } fs->lookup_depth = dn_cfg.red_lookup_depth; fs->w_q_lookup = (u_int *)malloc(fs->lookup_depth * sizeof(int), M_DUMMYNET, M_NOWAIT); if (fs->w_q_lookup == NULL) { printf("dummynet: sorry, cannot allocate red lookup table\n"); fs->fs.flags &= ~DN_IS_RED; fs->fs.flags &= ~DN_IS_GENTLE_RED; return(ENOSPC); } /* Fill the lookup table with (1 - w_q)^x */ fs->w_q_lookup[0] = SCALE(1) - fs->w_q; for (i = 1; i < fs->lookup_depth; i++) fs->w_q_lookup[i] = SCALE_MUL(fs->w_q_lookup[i - 1], fs->lookup_weight); if (dn_cfg.red_avg_pkt_size < 1) dn_cfg.red_avg_pkt_size = 512; fs->avg_pkt_size = dn_cfg.red_avg_pkt_size; if (dn_cfg.red_max_pkt_size < 1) dn_cfg.red_max_pkt_size = 1500; fs->max_pkt_size = dn_cfg.red_max_pkt_size; ND("exit"); return 0; } /* Scan all flowset attached to this scheduler and update red */ static void update_red(struct dn_schk *s) { struct dn_fsk *fs; SLIST_FOREACH(fs, &s->fsk_list, sch_chain) { if (fs && (fs->fs.flags & DN_IS_RED)) config_red(fs); } } /* attach flowset to scheduler s, possibly requeue */ static void fsk_attach(struct dn_fsk *fs, struct dn_schk *s) { ND("remove fs %d from fsunlinked, link to sched %d", fs->fs.fs_nr, s->sch.sched_nr); SLIST_REMOVE(&dn_cfg.fsu, fs, dn_fsk, sch_chain); fs->sched = s; SLIST_INSERT_HEAD(&s->fsk_list, fs, sch_chain); if (s->fp->new_fsk) s->fp->new_fsk(fs); /* XXX compute fsk_mask */ fs->fsk_mask = fs->fs.flow_mask; if (fs->sched->sch.flags & DN_HAVE_MASK) flow_id_or(&fs->sched->sch.sched_mask, &fs->fsk_mask); if (fs->qht) { /* * we must drain qht according to the old * type, and reinsert according to the new one. * The requeue is complex -- in general we need to * reclassify every single packet. * For the time being, let's hope qht is never set * when we reach this point. */ D("XXX TODO requeue from fs %d to sch %d", fs->fs.fs_nr, s->sch.sched_nr); fs->qht = NULL; } /* set the new type for qht */ if (nonzero_mask(&fs->fsk_mask)) fs->fs.flags |= DN_QHT_HASH; else fs->fs.flags &= ~DN_QHT_HASH; /* XXX config_red() can fail... */ if (fs->fs.flags & DN_IS_RED) config_red(fs); } /* update all flowsets which may refer to this scheduler */ static void update_fs(struct dn_schk *s) { struct dn_fsk *fs, *tmp; SLIST_FOREACH_SAFE(fs, &dn_cfg.fsu, sch_chain, tmp) { if (s->sch.sched_nr != fs->fs.sched_nr) { D("fs %d for sch %d not %d still unlinked", fs->fs.fs_nr, fs->fs.sched_nr, s->sch.sched_nr); continue; } fsk_attach(fs, s); } } /* * Configuration -- to preserve backward compatibility we use * the following scheme (N is 65536) * NUMBER SCHED LINK FLOWSET * 1 .. N-1 (1)WFQ (2)WFQ (3)queue * N+1 .. 2N-1 (4)FIFO (5)FIFO (6)FIFO for sched 1..N-1 * 2N+1 .. 3N-1 -- -- (7)FIFO for sched N+1..2N-1 * * "pipe i config" configures #1, #2 and #3 * "sched i config" configures #1 and possibly #6 * "queue i config" configures #3 * #1 is configured with 'pipe i config' or 'sched i config' * #2 is configured with 'pipe i config', and created if not * existing with 'sched i config' * #3 is configured with 'queue i config' * #4 is automatically configured after #1, can only be FIFO * #5 is automatically configured after #2 * #6 is automatically created when #1 is !MULTIQUEUE, * and can be updated. * #7 is automatically configured after #2 */ /* * configure a link (and its FIFO instance) */ static int config_link(struct dn_link *p, struct dn_id *arg) { int i; if (p->oid.len != sizeof(*p)) { D("invalid pipe len %d", p->oid.len); return EINVAL; } i = p->link_nr; if (i <= 0 || i >= DN_MAX_ID) return EINVAL; /* * The config program passes parameters as follows: * bw = bits/second (0 means no limits), * delay = ms, must be translated into ticks. * qsize = slots/bytes * burst ??? */ p->delay = (p->delay * hz) / 1000; /* Scale burst size: bytes -> bits * hz */ p->burst *= 8 * hz; DN_BH_WLOCK(); /* do it twice, base link and FIFO link */ for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) { struct dn_schk *s = locate_scheduler(i); if (s == NULL) { DN_BH_WUNLOCK(); D("sched %d not found", i); return EINVAL; } /* remove profile if exists */ if (s->profile) { free(s->profile, M_DUMMYNET); s->profile = NULL; } /* copy all parameters */ s->link.oid = p->oid; s->link.link_nr = i; s->link.delay = p->delay; if (s->link.bandwidth != p->bandwidth) { /* XXX bandwidth changes, need to update red params */ s->link.bandwidth = p->bandwidth; update_red(s); } s->link.burst = p->burst; schk_reset_credit(s); } dn_cfg.id++; DN_BH_WUNLOCK(); return 0; } /* * configure a flowset. Can be called from inside with locked=1, */ static struct dn_fsk * config_fs(struct dn_fs *nfs, struct dn_id *arg, int locked) { int i; struct dn_fsk *fs; if (nfs->oid.len != sizeof(*nfs)) { D("invalid flowset len %d", nfs->oid.len); return NULL; } i = nfs->fs_nr; if (i <= 0 || i >= 3*DN_MAX_ID) return NULL; ND("flowset %d", i); /* XXX other sanity checks */ if (nfs->flags & DN_QSIZE_BYTES) { ipdn_bound_var(&nfs->qsize, 16384, 1500, dn_cfg.byte_limit, NULL); // "queue byte size"); } else { ipdn_bound_var(&nfs->qsize, 50, 1, dn_cfg.slot_limit, NULL); // "queue slot size"); } if (nfs->flags & DN_HAVE_MASK) { /* make sure we have some buckets */ ipdn_bound_var((int *)&nfs->buckets, dn_cfg.hash_size, 1, dn_cfg.max_hash_size, "flowset buckets"); } else { nfs->buckets = 1; /* we only need 1 */ } if (!locked) DN_BH_WLOCK(); do { /* exit with break when done */ struct dn_schk *s; int flags = nfs->sched_nr ? DNHT_INSERT : 0; int j; int oldc = dn_cfg.fsk_count; fs = dn_ht_find(dn_cfg.fshash, i, flags, NULL); if (fs == NULL) { D("missing sched for flowset %d", i); break; } /* grab some defaults from the existing one */ if (nfs->sched_nr == 0) /* reuse */ nfs->sched_nr = fs->fs.sched_nr; for (j = 0; j < sizeof(nfs->par)/sizeof(nfs->par[0]); j++) { if (nfs->par[j] == -1) /* reuse */ nfs->par[j] = fs->fs.par[j]; } if (bcmp(&fs->fs, nfs, sizeof(*nfs)) == 0) { ND("flowset %d unchanged", i); break; /* no change, nothing to do */ } if (oldc != dn_cfg.fsk_count) /* new item */ dn_cfg.id++; s = locate_scheduler(nfs->sched_nr); /* detach from old scheduler if needed, preserving * queues if we need to reattach. Then update the * configuration, and possibly attach to the new sched. */ DX(2, "fs %d changed sched %d@%p to %d@%p", fs->fs.fs_nr, fs->fs.sched_nr, fs->sched, nfs->sched_nr, s); if (fs->sched) { int flags = s ? DN_DETACH : (DN_DETACH | DN_DESTROY); flags |= DN_DESTROY; /* XXX temporary */ fsk_detach(fs, flags); } fs->fs = *nfs; /* copy configuration */ if (s != NULL) fsk_attach(fs, s); } while (0); if (!locked) DN_BH_WUNLOCK(); return fs; } /* * config/reconfig a scheduler and its FIFO variant. * For !MULTIQUEUE schedulers, also set up the flowset. * * On reconfigurations (detected because s->fp is set), * detach existing flowsets preserving traffic, preserve link, * and delete the old scheduler creating a new one. */ static int config_sched(struct dn_sch *_nsch, struct dn_id *arg) { struct dn_schk *s; struct schk_new_arg a; /* argument for schk_new */ int i; struct dn_link p; /* copy of oldlink */ struct dn_profile *pf = NULL; /* copy of old link profile */ /* Used to preserv mask parameter */ struct ipfw_flow_id new_mask; int new_buckets = 0; int new_flags = 0; int pipe_cmd; int err = ENOMEM; a.sch = _nsch; if (a.sch->oid.len != sizeof(*a.sch)) { D("bad sched len %d", a.sch->oid.len); return EINVAL; } i = a.sch->sched_nr; if (i <= 0 || i >= DN_MAX_ID) return EINVAL; /* make sure we have some buckets */ if (a.sch->flags & DN_HAVE_MASK) ipdn_bound_var((int *)&a.sch->buckets, dn_cfg.hash_size, 1, dn_cfg.max_hash_size, "sched buckets"); /* XXX other sanity checks */ bzero(&p, sizeof(p)); pipe_cmd = a.sch->flags & DN_PIPE_CMD; a.sch->flags &= ~DN_PIPE_CMD; //XXX do it even if is not set? if (pipe_cmd) { /* Copy mask parameter */ new_mask = a.sch->sched_mask; new_buckets = a.sch->buckets; new_flags = a.sch->flags; } DN_BH_WLOCK(); again: /* run twice, for wfq and fifo */ /* * lookup the type. If not supplied, use the previous one * or default to WF2Q+. Otherwise, return an error. */ dn_cfg.id++; a.fp = find_sched_type(a.sch->oid.subtype, a.sch->name); if (a.fp != NULL) { /* found. Lookup or create entry */ s = dn_ht_find(dn_cfg.schedhash, i, DNHT_INSERT, &a); } else if (a.sch->oid.subtype == 0 && !a.sch->name[0]) { /* No type. search existing s* or retry with WF2Q+ */ s = dn_ht_find(dn_cfg.schedhash, i, 0, &a); if (s != NULL) { a.fp = s->fp; /* Scheduler exists, skip to FIFO scheduler * if command was pipe config... */ if (pipe_cmd) goto next; } else { /* New scheduler, create a wf2q+ with no mask * if command was pipe config... */ if (pipe_cmd) { /* clear mask parameter */ bzero(&a.sch->sched_mask, sizeof(new_mask)); a.sch->buckets = 0; a.sch->flags &= ~DN_HAVE_MASK; } a.sch->oid.subtype = DN_SCHED_WF2QP; goto again; } } else { D("invalid scheduler type %d %s", a.sch->oid.subtype, a.sch->name); err = EINVAL; goto error; } /* normalize name and subtype */ a.sch->oid.subtype = a.fp->type; bzero(a.sch->name, sizeof(a.sch->name)); strlcpy(a.sch->name, a.fp->name, sizeof(a.sch->name)); if (s == NULL) { D("cannot allocate scheduler %d", i); goto error; } /* restore existing link if any */ if (p.link_nr) { s->link = p; if (!pf || pf->link_nr != p.link_nr) { /* no saved value */ s->profile = NULL; /* XXX maybe not needed */ } else { s->profile = malloc(sizeof(struct dn_profile), M_DUMMYNET, M_NOWAIT | M_ZERO); if (s->profile == NULL) { D("cannot allocate profile"); goto error; //XXX } bcopy(pf, s->profile, sizeof(*pf)); } } p.link_nr = 0; if (s->fp == NULL) { DX(2, "sched %d new type %s", i, a.fp->name); } else if (s->fp != a.fp || bcmp(a.sch, &s->sch, sizeof(*a.sch)) ) { /* already existing. */ DX(2, "sched %d type changed from %s to %s", i, s->fp->name, a.fp->name); DX(4, " type/sub %d/%d -> %d/%d", s->sch.oid.type, s->sch.oid.subtype, a.sch->oid.type, a.sch->oid.subtype); if (s->link.link_nr == 0) D("XXX WARNING link 0 for sched %d", i); p = s->link; /* preserve link */ if (s->profile) {/* preserve profile */ if (!pf) pf = malloc(sizeof(*pf), M_DUMMYNET, M_NOWAIT | M_ZERO); if (pf) /* XXX should issue a warning otherwise */ bcopy(s->profile, pf, sizeof(*pf)); } /* remove from the hash */ dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL); /* Detach flowsets, preserve queues. */ // schk_delete_cb(s, NULL); // XXX temporarily, kill queues schk_delete_cb(s, (void *)DN_DESTROY); goto again; } else { DX(4, "sched %d unchanged type %s", i, a.fp->name); } /* complete initialization */ s->sch = *a.sch; s->fp = a.fp; s->cfg = arg; // XXX schk_reset_credit(s); /* create the internal flowset if needed, * trying to reuse existing ones if available */ if (!(s->fp->flags & DN_MULTIQUEUE) && !s->fs) { s->fs = dn_ht_find(dn_cfg.fshash, i, 0, NULL); if (!s->fs) { struct dn_fs fs; bzero(&fs, sizeof(fs)); set_oid(&fs.oid, DN_FS, sizeof(fs)); fs.fs_nr = i + DN_MAX_ID; fs.sched_nr = i; s->fs = config_fs(&fs, NULL, 1 /* locked */); } if (!s->fs) { schk_delete_cb(s, (void *)DN_DESTROY); D("error creating internal fs for %d", i); goto error; } } /* call init function after the flowset is created */ if (s->fp->config) s->fp->config(s); update_fs(s); next: if (i < DN_MAX_ID) { /* now configure the FIFO instance */ i += DN_MAX_ID; if (pipe_cmd) { /* Restore mask parameter for FIFO */ a.sch->sched_mask = new_mask; a.sch->buckets = new_buckets; a.sch->flags = new_flags; } else { /* sched config shouldn't modify the FIFO scheduler */ if (dn_ht_find(dn_cfg.schedhash, i, 0, &a) != NULL) { /* FIFO already exist, don't touch it */ err = 0; /* and this is not an error */ goto error; } } a.sch->sched_nr = i; a.sch->oid.subtype = DN_SCHED_FIFO; bzero(a.sch->name, sizeof(a.sch->name)); goto again; } err = 0; error: DN_BH_WUNLOCK(); if (pf) free(pf, M_DUMMYNET); return err; } /* * attach a profile to a link */ static int config_profile(struct dn_profile *pf, struct dn_id *arg) { struct dn_schk *s; int i, olen, err = 0; if (pf->oid.len < sizeof(*pf)) { D("short profile len %d", pf->oid.len); return EINVAL; } i = pf->link_nr; if (i <= 0 || i >= DN_MAX_ID) return EINVAL; /* XXX other sanity checks */ DN_BH_WLOCK(); for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) { s = locate_scheduler(i); if (s == NULL) { err = EINVAL; break; } dn_cfg.id++; /* * If we had a profile and the new one does not fit, * or it is deleted, then we need to free memory. */ if (s->profile && (pf->samples_no == 0 || s->profile->oid.len < pf->oid.len)) { free(s->profile, M_DUMMYNET); s->profile = NULL; } if (pf->samples_no == 0) continue; /* * new profile, possibly allocate memory * and copy data. */ if (s->profile == NULL) s->profile = malloc(pf->oid.len, M_DUMMYNET, M_NOWAIT | M_ZERO); if (s->profile == NULL) { D("no memory for profile %d", i); err = ENOMEM; break; } /* preserve larger length XXX double check */ olen = s->profile->oid.len; if (olen < pf->oid.len) olen = pf->oid.len; bcopy(pf, s->profile, pf->oid.len); s->profile->oid.len = olen; } DN_BH_WUNLOCK(); return err; } /* * Delete all objects: */ static void dummynet_flush(void) { /* delete all schedulers and related links/queues/flowsets */ dn_ht_scan(dn_cfg.schedhash, schk_delete_cb, (void *)(uintptr_t)DN_DELETE_FS); /* delete all remaining (unlinked) flowsets */ DX(4, "still %d unlinked fs", dn_cfg.fsk_count); dn_ht_free(dn_cfg.fshash, DNHT_REMOVE); fsk_detach_list(&dn_cfg.fsu, DN_DELETE_FS); /* Reinitialize system heap... */ heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id)); } /* * Main handler for configuration. We are guaranteed to be called * with an oid which is at least a dn_id. * - the first object is the command (config, delete, flush, ...) * - config_link must be issued after the corresponding config_sched * - parameters (DN_TXT) for an object must preceed the object * processed on a config_sched. */ int do_config(void *p, int l) { struct dn_id *next, *o; int err = 0, err2 = 0; struct dn_id *arg = NULL; uintptr_t *a; o = p; if (o->id != DN_API_VERSION) { D("invalid api version got %d need %d", o->id, DN_API_VERSION); return EINVAL; } for (; l >= sizeof(*o); o = next) { struct dn_id *prev = arg; if (o->len < sizeof(*o) || l < o->len) { D("bad len o->len %d len %d", o->len, l); err = EINVAL; break; } l -= o->len; next = (struct dn_id *)((char *)o + o->len); err = 0; switch (o->type) { default: D("cmd %d not implemented", o->type); break; #ifdef EMULATE_SYSCTL /* sysctl emulation. * if we recognize the command, jump to the correct * handler and return */ case DN_SYSCTL_SET: err = kesysctl_emu_set(p, l); return err; #endif case DN_CMD_CONFIG: /* simply a header */ break; case DN_CMD_DELETE: /* the argument is in the first uintptr_t after o */ a = (uintptr_t *)(o+1); if (o->len < sizeof(*o) + sizeof(*a)) { err = EINVAL; break; } switch (o->subtype) { case DN_LINK: /* delete base and derived schedulers */ DN_BH_WLOCK(); err = delete_schk(*a); err2 = delete_schk(*a + DN_MAX_ID); DN_BH_WUNLOCK(); if (!err) err = err2; break; default: D("invalid delete type %d", o->subtype); err = EINVAL; break; case DN_FS: err = (*a <1 || *a >= DN_MAX_ID) ? EINVAL : delete_fs(*a, 0) ; break; } break; case DN_CMD_FLUSH: DN_BH_WLOCK(); dummynet_flush(); DN_BH_WUNLOCK(); break; case DN_TEXT: /* store argument the next block */ prev = NULL; arg = o; break; case DN_LINK: err = config_link((struct dn_link *)o, arg); break; case DN_PROFILE: err = config_profile((struct dn_profile *)o, arg); break; case DN_SCH: err = config_sched((struct dn_sch *)o, arg); break; case DN_FS: err = (NULL==config_fs((struct dn_fs *)o, arg, 0)); break; } if (prev) arg = NULL; if (err != 0) break; } return err; } static int compute_space(struct dn_id *cmd, struct copy_args *a) { int x = 0, need = 0; int profile_size = sizeof(struct dn_profile) - ED_MAX_SAMPLES_NO*sizeof(int); /* NOTE about compute space: * NP = dn_cfg.schk_count * NSI = dn_cfg.si_count * NF = dn_cfg.fsk_count * NQ = dn_cfg.queue_count * - ipfw pipe show * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler * link, scheduler template, flowset * integrated in scheduler and header * for flowset list * (NSI)*(dn_flow) all scheduler instance (includes * the queue instance) * - ipfw sched show * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler * link, scheduler template, flowset * integrated in scheduler and header * for flowset list * (NSI * dn_flow) all scheduler instances * (NF * sizeof(uint_32)) space for flowset list linked to scheduler * (NQ * dn_queue) all queue [XXXfor now not listed] * - ipfw queue show * (NF * dn_fs) all flowset * (NQ * dn_queue) all queues */ switch (cmd->subtype) { default: return -1; /* XXX where do LINK and SCH differ ? */ /* 'ipfw sched show' could list all queues associated to * a scheduler. This feature for now is disabled */ case DN_LINK: /* pipe show */ x = DN_C_LINK | DN_C_SCH | DN_C_FLOW; need += dn_cfg.schk_count * (sizeof(struct dn_fs) + profile_size) / 2; need += dn_cfg.fsk_count * sizeof(uint32_t); break; case DN_SCH: /* sched show */ need += dn_cfg.schk_count * (sizeof(struct dn_fs) + profile_size) / 2; need += dn_cfg.fsk_count * sizeof(uint32_t); x = DN_C_SCH | DN_C_LINK | DN_C_FLOW; break; case DN_FS: /* queue show */ x = DN_C_FS | DN_C_QUEUE; break; case DN_GET_COMPAT: /* compatibility mode */ need = dn_compat_calc_size(); break; } a->flags = x; if (x & DN_C_SCH) { need += dn_cfg.schk_count * sizeof(struct dn_sch) / 2; /* NOT also, each fs might be attached to a sched */ need += dn_cfg.schk_count * sizeof(struct dn_id) / 2; } if (x & DN_C_FS) need += dn_cfg.fsk_count * sizeof(struct dn_fs); if (x & DN_C_LINK) { need += dn_cfg.schk_count * sizeof(struct dn_link) / 2; } /* * When exporting a queue to userland, only pass up the * struct dn_flow, which is the only visible part. */ if (x & DN_C_QUEUE) need += dn_cfg.queue_count * sizeof(struct dn_flow); if (x & DN_C_FLOW) need += dn_cfg.si_count * (sizeof(struct dn_flow)); return need; } /* * If compat != NULL dummynet_get is called in compatibility mode. * *compat will be the pointer to the buffer to pass to ipfw */ int dummynet_get(struct sockopt *sopt, void **compat) { int have, i, need, error; char *start = NULL, *buf; size_t sopt_valsize; struct dn_id *cmd; struct copy_args a; struct copy_range r; int l = sizeof(struct dn_id); bzero(&a, sizeof(a)); bzero(&r, sizeof(r)); /* save and restore original sopt_valsize around copyin */ sopt_valsize = sopt->sopt_valsize; cmd = &r.o; if (!compat) { /* copy at least an oid, and possibly a full object */ error = sooptcopyin(sopt, cmd, sizeof(r), sizeof(*cmd)); sopt->sopt_valsize = sopt_valsize; if (error) goto done; l = cmd->len; #ifdef EMULATE_SYSCTL /* sysctl emulation. */ if (cmd->type == DN_SYSCTL_GET) return kesysctl_emu_get(sopt); #endif if (l > sizeof(r)) { /* request larger than default, allocate buffer */ cmd = malloc(l, M_DUMMYNET, M_WAITOK); error = sooptcopyin(sopt, cmd, l, l); sopt->sopt_valsize = sopt_valsize; if (error) goto done; } } else { /* compatibility */ error = 0; cmd->type = DN_CMD_GET; cmd->len = sizeof(struct dn_id); cmd->subtype = DN_GET_COMPAT; // cmd->id = sopt_valsize; D("compatibility mode"); } a.extra = (struct copy_range *)cmd; if (cmd->len == sizeof(*cmd)) { /* no range, create a default */ uint32_t *rp = (uint32_t *)(cmd + 1); cmd->len += 2* sizeof(uint32_t); rp[0] = 1; rp[1] = DN_MAX_ID - 1; if (cmd->subtype == DN_LINK) { rp[0] += DN_MAX_ID; rp[1] += DN_MAX_ID; } } /* Count space (under lock) and allocate (outside lock). * Exit with lock held if we manage to get enough buffer. * Try a few times then give up. */ for (have = 0, i = 0; i < 10; i++) { DN_BH_WLOCK(); need = compute_space(cmd, &a); /* if there is a range, ignore value from compute_space() */ if (l > sizeof(*cmd)) need = sopt_valsize - sizeof(*cmd); if (need < 0) { DN_BH_WUNLOCK(); error = EINVAL; goto done; } need += sizeof(*cmd); cmd->id = need; if (have >= need) break; DN_BH_WUNLOCK(); if (start) free(start, M_DUMMYNET); start = NULL; if (need > sopt_valsize) break; have = need; start = malloc(have, M_DUMMYNET, M_WAITOK | M_ZERO); } if (start == NULL) { if (compat) { *compat = NULL; error = 1; // XXX } else { error = sooptcopyout(sopt, cmd, sizeof(*cmd)); } goto done; } ND("have %d:%d sched %d, %d:%d links %d, %d:%d flowsets %d, " "%d:%d si %d, %d:%d queues %d", dn_cfg.schk_count, sizeof(struct dn_sch), DN_SCH, dn_cfg.schk_count, sizeof(struct dn_link), DN_LINK, dn_cfg.fsk_count, sizeof(struct dn_fs), DN_FS, dn_cfg.si_count, sizeof(struct dn_flow), DN_SCH_I, dn_cfg.queue_count, sizeof(struct dn_queue), DN_QUEUE); sopt->sopt_valsize = sopt_valsize; a.type = cmd->subtype; if (compat == NULL) { bcopy(cmd, start, sizeof(*cmd)); ((struct dn_id*)(start))->len = sizeof(struct dn_id); buf = start + sizeof(*cmd); } else buf = start; a.start = &buf; a.end = start + have; /* start copying other objects */ if (compat) { a.type = DN_COMPAT_PIPE; dn_ht_scan(dn_cfg.schedhash, copy_data_helper_compat, &a); a.type = DN_COMPAT_QUEUE; dn_ht_scan(dn_cfg.fshash, copy_data_helper_compat, &a); } else if (a.type == DN_FS) { dn_ht_scan(dn_cfg.fshash, copy_data_helper, &a); } else { dn_ht_scan(dn_cfg.schedhash, copy_data_helper, &a); } DN_BH_WUNLOCK(); if (compat) { *compat = start; sopt->sopt_valsize = buf - start; /* free() is done by ip_dummynet_compat() */ start = NULL; //XXX hack } else { error = sooptcopyout(sopt, start, buf - start); } done: if (cmd && cmd != &r.o) free(cmd, M_DUMMYNET); if (start) free(start, M_DUMMYNET); return error; } /* Callback called on scheduler instance to delete it if idle */ static int drain_scheduler_cb(void *_si, void *arg) { struct dn_sch_inst *si = _si; if ((si->kflags & DN_ACTIVE) || si->dline.mq.head != NULL) return 0; if (si->sched->fp->flags & DN_MULTIQUEUE) { if (si->q_count == 0) return si_destroy(si, NULL); else return 0; } else { /* !DN_MULTIQUEUE */ if ((si+1)->ni.length == 0) return si_destroy(si, NULL); else return 0; } return 0; /* unreachable */ } /* Callback called on scheduler to check if it has instances */ static int drain_scheduler_sch_cb(void *_s, void *arg) { struct dn_schk *s = _s; if (s->sch.flags & DN_HAVE_MASK) { dn_ht_scan_bucket(s->siht, &s->drain_bucket, drain_scheduler_cb, NULL); s->drain_bucket++; } else { if (s->siht) { if (drain_scheduler_cb(s->siht, NULL) == DNHT_SCAN_DEL) s->siht = NULL; } } return 0; } /* Called every tick, try to delete a 'bucket' of scheduler */ void dn_drain_scheduler(void) { dn_ht_scan_bucket(dn_cfg.schedhash, &dn_cfg.drain_sch, drain_scheduler_sch_cb, NULL); dn_cfg.drain_sch++; } /* Callback called on queue to delete if it is idle */ static int drain_queue_cb(void *_q, void *arg) { struct dn_queue *q = _q; if (q->ni.length == 0) { dn_delete_queue(q, DN_DESTROY); return DNHT_SCAN_DEL; /* queue is deleted */ } return 0; /* queue isn't deleted */ } /* Callback called on flowset used to check if it has queues */ static int drain_queue_fs_cb(void *_fs, void *arg) { struct dn_fsk *fs = _fs; if (fs->fs.flags & DN_QHT_HASH) { /* Flowset has a hash table for queues */ dn_ht_scan_bucket(fs->qht, &fs->drain_bucket, drain_queue_cb, NULL); fs->drain_bucket++; } else { /* No hash table for this flowset, null the pointer * if the queue is deleted */ if (fs->qht) { if (drain_queue_cb(fs->qht, NULL) == DNHT_SCAN_DEL) fs->qht = NULL; } } return 0; } /* Called every tick, try to delete a 'bucket' of queue */ void dn_drain_queue(void) { /* scan a bucket of flowset */ dn_ht_scan_bucket(dn_cfg.fshash, &dn_cfg.drain_fs, drain_queue_fs_cb, NULL); dn_cfg.drain_fs++; } /* * Handler for the various dummynet socket options */ static int ip_dn_ctl(struct sockopt *sopt) { void *p = NULL; int error, l; error = priv_check(sopt->sopt_td, PRIV_NETINET_DUMMYNET); if (error) return (error); /* Disallow sets in really-really secure mode. */ if (sopt->sopt_dir == SOPT_SET) { error = securelevel_ge(sopt->sopt_td->td_ucred, 3); if (error) return (error); } switch (sopt->sopt_name) { default : D("dummynet: unknown option %d", sopt->sopt_name); error = EINVAL; break; case IP_DUMMYNET_FLUSH: case IP_DUMMYNET_CONFIGURE: case IP_DUMMYNET_DEL: /* remove a pipe or queue */ case IP_DUMMYNET_GET: D("dummynet: compat option %d", sopt->sopt_name); error = ip_dummynet_compat(sopt); break; case IP_DUMMYNET3 : if (sopt->sopt_dir == SOPT_GET) { error = dummynet_get(sopt, NULL); break; } l = sopt->sopt_valsize; if (l < sizeof(struct dn_id) || l > 12000) { D("argument len %d invalid", l); break; } p = malloc(l, M_TEMP, M_WAITOK); // XXX can it fail ? error = sooptcopyin(sopt, p, l, l); if (error) break ; error = do_config(p, l); break; } if (p != NULL) free(p, M_TEMP); return error ; } static void ip_dn_init(void) { if (dn_cfg.init_done) return; printf("DUMMYNET %p with IPv6 initialized (100409)\n", curvnet); dn_cfg.init_done = 1; /* Set defaults here. MSVC does not accept initializers, * and this is also useful for vimages */ /* queue limits */ dn_cfg.slot_limit = 100; /* Foot shooting limit for queues. */ dn_cfg.byte_limit = 1024 * 1024; dn_cfg.expire = 1; /* RED parameters */ dn_cfg.red_lookup_depth = 256; /* default lookup table depth */ dn_cfg.red_avg_pkt_size = 512; /* default medium packet size */ dn_cfg.red_max_pkt_size = 1500; /* default max packet size */ /* hash tables */ dn_cfg.max_hash_size = 65536; /* max in the hash tables */ dn_cfg.hash_size = 64; /* default hash size */ /* create hash tables for schedulers and flowsets. * In both we search by key and by pointer. */ dn_cfg.schedhash = dn_ht_init(NULL, dn_cfg.hash_size, offsetof(struct dn_schk, schk_next), schk_hash, schk_match, schk_new); dn_cfg.fshash = dn_ht_init(NULL, dn_cfg.hash_size, offsetof(struct dn_fsk, fsk_next), fsk_hash, fsk_match, fsk_new); /* bucket index to drain object */ dn_cfg.drain_fs = 0; dn_cfg.drain_sch = 0; heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id)); SLIST_INIT(&dn_cfg.fsu); SLIST_INIT(&dn_cfg.schedlist); DN_LOCK_INIT(); TASK_INIT(&dn_task, 0, dummynet_task, curvnet); dn_tq = taskqueue_create_fast("dummynet", M_WAITOK, taskqueue_thread_enqueue, &dn_tq); taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet"); callout_init(&dn_timeout, 1); dn_reschedule(); /* Initialize curr_time adjustment mechanics. */ getmicrouptime(&dn_cfg.prev_t); } static void ip_dn_destroy(int last) { DN_BH_WLOCK(); /* ensure no more callouts are started */ dn_gone = 1; /* check for last */ if (last) { ND("removing last instance\n"); ip_dn_ctl_ptr = NULL; ip_dn_io_ptr = NULL; } dummynet_flush(); DN_BH_WUNLOCK(); callout_drain(&dn_timeout); taskqueue_drain(dn_tq, &dn_task); taskqueue_free(dn_tq); dn_ht_free(dn_cfg.schedhash, 0); dn_ht_free(dn_cfg.fshash, 0); heap_free(&dn_cfg.evheap); DN_LOCK_DESTROY(); } static int dummynet_modevent(module_t mod, int type, void *data) { if (type == MOD_LOAD) { if (ip_dn_io_ptr) { printf("DUMMYNET already loaded\n"); return EEXIST ; } ip_dn_init(); ip_dn_ctl_ptr = ip_dn_ctl; ip_dn_io_ptr = dummynet_io; return 0; } else if (type == MOD_UNLOAD) { ip_dn_destroy(1 /* last */); return 0; } else return EOPNOTSUPP; } /* modevent helpers for the modules */ static int load_dn_sched(struct dn_alg *d) { struct dn_alg *s; if (d == NULL) return 1; /* error */ ip_dn_init(); /* just in case, we need the lock */ /* Check that mandatory funcs exists */ if (d->enqueue == NULL || d->dequeue == NULL) { D("missing enqueue or dequeue for %s", d->name); return 1; } /* Search if scheduler already exists */ DN_BH_WLOCK(); SLIST_FOREACH(s, &dn_cfg.schedlist, next) { if (strcmp(s->name, d->name) == 0) { D("%s already loaded", d->name); break; /* scheduler already exists */ } } if (s == NULL) SLIST_INSERT_HEAD(&dn_cfg.schedlist, d, next); DN_BH_WUNLOCK(); D("dn_sched %s %sloaded", d->name, s ? "not ":""); return s ? 1 : 0; } static int unload_dn_sched(struct dn_alg *s) { struct dn_alg *tmp, *r; int err = EINVAL; ND("called for %s", s->name); DN_BH_WLOCK(); SLIST_FOREACH_SAFE(r, &dn_cfg.schedlist, next, tmp) { if (strcmp(s->name, r->name) != 0) continue; ND("ref_count = %d", r->ref_count); err = (r->ref_count != 0) ? EBUSY : 0; if (err == 0) SLIST_REMOVE(&dn_cfg.schedlist, r, dn_alg, next); break; } DN_BH_WUNLOCK(); D("dn_sched %s %sunloaded", s->name, err ? "not ":""); return err; } int dn_sched_modevent(module_t mod, int cmd, void *arg) { struct dn_alg *sch = arg; if (cmd == MOD_LOAD) return load_dn_sched(sch); else if (cmd == MOD_UNLOAD) return unload_dn_sched(sch); else return EINVAL; } static moduledata_t dummynet_mod = { "dummynet", dummynet_modevent, NULL }; #define DN_SI_SUB SI_SUB_PROTO_IFATTACHDOMAIN #define DN_MODEV_ORD (SI_ORDER_ANY - 128) /* after ipfw */ DECLARE_MODULE(dummynet, dummynet_mod, DN_SI_SUB, DN_MODEV_ORD); MODULE_DEPEND(dummynet, ipfw, 3, 3, 3); MODULE_VERSION(dummynet, 3); /* * Starting up. Done in order after dummynet_modevent() has been called. * VNET_SYSINIT is also called for each existing vnet and each new vnet. */ //VNET_SYSINIT(vnet_dn_init, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_init, NULL); /* * Shutdown handlers up shop. These are done in REVERSE ORDER, but still * after dummynet_modevent() has been called. Not called on reboot. * VNET_SYSUNINIT is also called for each exiting vnet as it exits. * or when the module is unloaded. */ //VNET_SYSUNINIT(vnet_dn_uninit, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_destroy, NULL); /* end of file */ Index: head/sys/sys/taskqueue.h =================================================================== --- head/sys/sys/taskqueue.h (revision 296271) +++ head/sys/sys/taskqueue.h (revision 296272) @@ -1,206 +1,205 @@ /*- * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_TASKQUEUE_H_ #define _SYS_TASKQUEUE_H_ #ifndef _KERNEL #error "no user-servicable parts inside" #endif #include #include #include #include struct taskqueue; struct thread; struct timeout_task { struct taskqueue *q; struct task t; struct callout c; int f; }; enum taskqueue_callback_type { TASKQUEUE_CALLBACK_TYPE_INIT, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN, }; #define TASKQUEUE_CALLBACK_TYPE_MIN TASKQUEUE_CALLBACK_TYPE_INIT #define TASKQUEUE_CALLBACK_TYPE_MAX TASKQUEUE_CALLBACK_TYPE_SHUTDOWN #define TASKQUEUE_NUM_CALLBACKS TASKQUEUE_CALLBACK_TYPE_MAX + 1 typedef void (*taskqueue_callback_fn)(void *context); /* * A notification callback function which is called from * taskqueue_enqueue(). The context argument is given in the call to * taskqueue_create(). This function would normally be used to allow the * queue to arrange to run itself later (e.g., by scheduling a software * interrupt or waking a kernel thread). */ typedef void (*taskqueue_enqueue_fn)(void *context); struct taskqueue *taskqueue_create(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context); int taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, const char *name, ...) __printflike(4, 5); int taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri, cpuset_t *mask, const char *name, ...) __printflike(5, 6); int taskqueue_enqueue(struct taskqueue *queue, struct task *task); int taskqueue_enqueue_timeout(struct taskqueue *queue, struct timeout_task *timeout_task, int ticks); int taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp); int taskqueue_cancel_timeout(struct taskqueue *queue, struct timeout_task *timeout_task, u_int *pendp); void taskqueue_drain(struct taskqueue *queue, struct task *task); void taskqueue_drain_timeout(struct taskqueue *queue, struct timeout_task *timeout_task); void taskqueue_drain_all(struct taskqueue *queue); void taskqueue_free(struct taskqueue *queue); void taskqueue_run(struct taskqueue *queue); void taskqueue_block(struct taskqueue *queue); void taskqueue_unblock(struct taskqueue *queue); int taskqueue_member(struct taskqueue *queue, struct thread *td); void taskqueue_set_callback(struct taskqueue *queue, enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback, void *context); #define TASK_INITIALIZER(priority, func, context) \ { .ta_pending = 0, \ .ta_priority = (priority), \ .ta_func = (func), \ .ta_context = (context) } /* * Functions for dedicated thread taskqueues */ void taskqueue_thread_loop(void *arg); void taskqueue_thread_enqueue(void *context); /* * Initialise a task structure. */ #define TASK_INIT(task, priority, func, context) do { \ (task)->ta_pending = 0; \ (task)->ta_priority = (priority); \ (task)->ta_func = (func); \ (task)->ta_context = (context); \ } while (0) void _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task, int priority, task_fn_t func, void *context); #define TIMEOUT_TASK_INIT(queue, timeout_task, priority, func, context) \ _timeout_task_init(queue, timeout_task, priority, func, context); /* * Declare a reference to a taskqueue. */ #define TASKQUEUE_DECLARE(name) \ extern struct taskqueue *taskqueue_##name /* * Define and initialise a global taskqueue that uses sleep mutexes. */ #define TASKQUEUE_DEFINE(name, enqueue, context, init) \ \ struct taskqueue *taskqueue_##name; \ \ static void \ taskqueue_define_##name(void *arg) \ { \ taskqueue_##name = \ taskqueue_create(#name, M_WAITOK, (enqueue), (context)); \ init; \ } \ \ SYSINIT(taskqueue_##name, SI_SUB_CONFIGURE, SI_ORDER_SECOND, \ taskqueue_define_##name, NULL); \ \ struct __hack #define TASKQUEUE_DEFINE_THREAD(name) \ TASKQUEUE_DEFINE(name, taskqueue_thread_enqueue, &taskqueue_##name, \ taskqueue_start_threads(&taskqueue_##name, 1, PWAIT, \ "%s taskq", #name)) /* * Define and initialise a global taskqueue that uses spin mutexes. */ #define TASKQUEUE_FAST_DEFINE(name, enqueue, context, init) \ \ struct taskqueue *taskqueue_##name; \ \ static void \ taskqueue_define_##name(void *arg) \ { \ taskqueue_##name = \ taskqueue_create_fast(#name, M_WAITOK, (enqueue), \ (context)); \ init; \ } \ \ SYSINIT(taskqueue_##name, SI_SUB_CONFIGURE, SI_ORDER_SECOND, \ taskqueue_define_##name, NULL); \ \ struct __hack #define TASKQUEUE_FAST_DEFINE_THREAD(name) \ TASKQUEUE_FAST_DEFINE(name, taskqueue_thread_enqueue, \ &taskqueue_##name, taskqueue_start_threads(&taskqueue_##name \ 1, PWAIT, "%s taskq", #name)) /* * These queues are serviced by software interrupt handlers. To enqueue * a task, call taskqueue_enqueue(taskqueue_swi, &task) or * taskqueue_enqueue(taskqueue_swi_giant, &task). */ TASKQUEUE_DECLARE(swi_giant); TASKQUEUE_DECLARE(swi); /* * This queue is serviced by a kernel thread. To enqueue a task, call * taskqueue_enqueue(taskqueue_thread, &task). */ TASKQUEUE_DECLARE(thread); /* * Queue for swi handlers dispatched from fast interrupt handlers. * These are necessarily different from the above because the queue * must be locked with spinlocks since sleep mutex's cannot be used * from a fast interrupt handler context. */ TASKQUEUE_DECLARE(fast); -int taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task); struct taskqueue *taskqueue_create_fast(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context); #endif /* !_SYS_TASKQUEUE_H_ */ Index: head/sys/x86/iommu/intel_fault.c =================================================================== --- head/sys/x86/iommu/intel_fault.c (revision 296271) +++ head/sys/x86/iommu/intel_fault.c (revision 296272) @@ -1,328 +1,328 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Fault interrupt handling for DMARs. If advanced fault logging is * not implemented by hardware, the code emulates it. Fast interrupt * handler flushes the fault registers into circular buffer at * unit->fault_log, and schedules a task. * * The fast handler is used since faults usually come in bursts, and * number of fault log registers is limited, e.g. down to one for 5400 * MCH. We are trying to reduce the latency for clearing the fault * register file. The task is usually long-running, since printf() is * slow, but this is not problematic because bursts are rare. * * For the same reason, each translation unit task is executed in its * own thread. * * XXXKIB It seems there is no hardware available which implements * advanced fault logging, so the code to handle AFL is not written. */ static int dmar_fault_next(struct dmar_unit *unit, int faultp) { faultp += 2; if (faultp == unit->fault_log_size) faultp = 0; return (faultp); } static void dmar_fault_intr_clear(struct dmar_unit *unit, uint32_t fsts) { uint32_t clear; clear = 0; if ((fsts & DMAR_FSTS_ITE) != 0) { printf("DMAR%d: Invalidation timed out\n", unit->unit); clear |= DMAR_FSTS_ITE; } if ((fsts & DMAR_FSTS_ICE) != 0) { printf("DMAR%d: Invalidation completion error\n", unit->unit); clear |= DMAR_FSTS_ICE; } if ((fsts & DMAR_FSTS_IQE) != 0) { printf("DMAR%d: Invalidation queue error\n", unit->unit); clear |= DMAR_FSTS_IQE; } if ((fsts & DMAR_FSTS_APF) != 0) { printf("DMAR%d: Advanced pending fault\n", unit->unit); clear |= DMAR_FSTS_APF; } if ((fsts & DMAR_FSTS_AFO) != 0) { printf("DMAR%d: Advanced fault overflow\n", unit->unit); clear |= DMAR_FSTS_AFO; } if (clear != 0) dmar_write4(unit, DMAR_FSTS_REG, clear); } int dmar_fault_intr(void *arg) { struct dmar_unit *unit; uint64_t fault_rec[2]; uint32_t fsts; int fri, frir, faultp; bool enqueue; unit = arg; enqueue = false; fsts = dmar_read4(unit, DMAR_FSTS_REG); dmar_fault_intr_clear(unit, fsts); if ((fsts & DMAR_FSTS_PPF) == 0) goto done; fri = DMAR_FSTS_FRI(fsts); for (;;) { frir = (DMAR_CAP_FRO(unit->hw_cap) + fri) * 16; fault_rec[1] = dmar_read8(unit, frir + 8); if ((fault_rec[1] & DMAR_FRCD2_F) == 0) break; fault_rec[0] = dmar_read8(unit, frir); dmar_write4(unit, frir + 12, DMAR_FRCD2_F32); DMAR_FAULT_LOCK(unit); faultp = unit->fault_log_head; if (dmar_fault_next(unit, faultp) == unit->fault_log_tail) { /* XXXKIB log overflow */ } else { unit->fault_log[faultp] = fault_rec[0]; unit->fault_log[faultp + 1] = fault_rec[1]; unit->fault_log_head = dmar_fault_next(unit, faultp); enqueue = true; } DMAR_FAULT_UNLOCK(unit); fri += 1; if (fri >= DMAR_CAP_NFR(unit->hw_cap)) fri = 0; } done: /* * On SandyBridge, due to errata BJ124, IvyBridge errata * BV100, and Haswell errata HSD40, "Spurious Intel VT-d * Interrupts May Occur When the PFO Bit is Set". Handle the * cases by clearing overflow bit even if no fault is * reported. * * On IvyBridge, errata BV30 states that clearing clear * DMAR_FRCD2_F bit in the fault register causes spurious * interrupt. Do nothing. * */ if ((fsts & DMAR_FSTS_PFO) != 0) { printf("DMAR%d: Fault Overflow\n", unit->unit); dmar_write4(unit, DMAR_FSTS_REG, DMAR_FSTS_PFO); } if (enqueue) { - taskqueue_enqueue_fast(unit->fault_taskqueue, + taskqueue_enqueue(unit->fault_taskqueue, &unit->fault_task); } return (FILTER_HANDLED); } static void dmar_fault_task(void *arg, int pending __unused) { struct dmar_unit *unit; struct dmar_ctx *ctx; uint64_t fault_rec[2]; int sid, bus, slot, func, faultp; unit = arg; DMAR_FAULT_LOCK(unit); for (;;) { faultp = unit->fault_log_tail; if (faultp == unit->fault_log_head) break; fault_rec[0] = unit->fault_log[faultp]; fault_rec[1] = unit->fault_log[faultp + 1]; unit->fault_log_tail = dmar_fault_next(unit, faultp); DMAR_FAULT_UNLOCK(unit); sid = DMAR_FRCD2_SID(fault_rec[1]); printf("DMAR%d: ", unit->unit); DMAR_LOCK(unit); ctx = dmar_find_ctx_locked(unit, sid); if (ctx == NULL) { printf(":"); /* * Note that the slot and function will not be correct * if ARI is in use, but without a ctx entry we have * no way of knowing whether ARI is in use or not. */ bus = PCI_RID2BUS(sid); slot = PCI_RID2SLOT(sid); func = PCI_RID2FUNC(sid); } else { ctx->flags |= DMAR_CTX_FAULTED; ctx->last_fault_rec[0] = fault_rec[0]; ctx->last_fault_rec[1] = fault_rec[1]; device_print_prettyname(ctx->ctx_tag.owner); bus = pci_get_bus(ctx->ctx_tag.owner); slot = pci_get_slot(ctx->ctx_tag.owner); func = pci_get_function(ctx->ctx_tag.owner); } DMAR_UNLOCK(unit); printf( "pci%d:%d:%d sid %x fault acc %x adt 0x%x reason 0x%x " "addr %jx\n", bus, slot, func, sid, DMAR_FRCD2_T(fault_rec[1]), DMAR_FRCD2_AT(fault_rec[1]), DMAR_FRCD2_FR(fault_rec[1]), (uintmax_t)fault_rec[0]); DMAR_FAULT_LOCK(unit); } DMAR_FAULT_UNLOCK(unit); } static void dmar_clear_faults(struct dmar_unit *unit) { uint32_t frec, frir, fsts; int i; for (i = 0; i < DMAR_CAP_NFR(unit->hw_cap); i++) { frir = (DMAR_CAP_FRO(unit->hw_cap) + i) * 16; frec = dmar_read4(unit, frir + 12); if ((frec & DMAR_FRCD2_F32) == 0) continue; dmar_write4(unit, frir + 12, DMAR_FRCD2_F32); } fsts = dmar_read4(unit, DMAR_FSTS_REG); dmar_write4(unit, DMAR_FSTS_REG, fsts); } int dmar_init_fault_log(struct dmar_unit *unit) { mtx_init(&unit->fault_lock, "dmarflt", NULL, MTX_SPIN); unit->fault_log_size = 256; /* 128 fault log entries */ TUNABLE_INT_FETCH("hw.dmar.fault_log_size", &unit->fault_log_size); if (unit->fault_log_size % 2 != 0) panic("hw.dmar_fault_log_size must be even"); unit->fault_log = malloc(sizeof(uint64_t) * unit->fault_log_size, M_DEVBUF, M_WAITOK | M_ZERO); TASK_INIT(&unit->fault_task, 0, dmar_fault_task, unit); unit->fault_taskqueue = taskqueue_create_fast("dmar", M_WAITOK, taskqueue_thread_enqueue, &unit->fault_taskqueue); taskqueue_start_threads(&unit->fault_taskqueue, 1, PI_AV, "dmar%d fault taskq", unit->unit); DMAR_LOCK(unit); dmar_disable_fault_intr(unit); dmar_clear_faults(unit); dmar_enable_fault_intr(unit); DMAR_UNLOCK(unit); return (0); } void dmar_fini_fault_log(struct dmar_unit *unit) { DMAR_LOCK(unit); dmar_disable_fault_intr(unit); DMAR_UNLOCK(unit); if (unit->fault_taskqueue == NULL) return; taskqueue_drain(unit->fault_taskqueue, &unit->fault_task); taskqueue_free(unit->fault_taskqueue); unit->fault_taskqueue = NULL; mtx_destroy(&unit->fault_lock); free(unit->fault_log, M_DEVBUF); unit->fault_log = NULL; unit->fault_log_head = unit->fault_log_tail = 0; } void dmar_enable_fault_intr(struct dmar_unit *unit) { uint32_t fectl; DMAR_ASSERT_LOCKED(unit); fectl = dmar_read4(unit, DMAR_FECTL_REG); fectl &= ~DMAR_FECTL_IM; dmar_write4(unit, DMAR_FECTL_REG, fectl); } void dmar_disable_fault_intr(struct dmar_unit *unit) { uint32_t fectl; DMAR_ASSERT_LOCKED(unit); fectl = dmar_read4(unit, DMAR_FECTL_REG); dmar_write4(unit, DMAR_FECTL_REG, fectl | DMAR_FECTL_IM); } Index: head/sys/x86/iommu/intel_qi.c =================================================================== --- head/sys/x86/iommu/intel_qi.c (revision 296271) +++ head/sys/x86/iommu/intel_qi.c (revision 296272) @@ -1,471 +1,471 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static bool dmar_qi_seq_processed(const struct dmar_unit *unit, const struct dmar_qi_genseq *pseq) { return (pseq->gen < unit->inv_waitd_gen || (pseq->gen == unit->inv_waitd_gen && pseq->seq <= unit->inv_waitd_seq_hw)); } static int dmar_enable_qi(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); unit->hw_gcmd |= DMAR_GCMD_QIE; dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES) == 0) cpu_spinwait(); return (0); } static int dmar_disable_qi(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); unit->hw_gcmd &= ~DMAR_GCMD_QIE; dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd); /* XXXKIB should have a timeout */ while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES) != 0) cpu_spinwait(); return (0); } static void dmar_qi_advance_tail(struct dmar_unit *unit) { DMAR_ASSERT_LOCKED(unit); dmar_write4(unit, DMAR_IQT_REG, unit->inv_queue_tail); } static void dmar_qi_ensure(struct dmar_unit *unit, int descr_count) { uint32_t head; int bytes; DMAR_ASSERT_LOCKED(unit); bytes = descr_count << DMAR_IQ_DESCR_SZ_SHIFT; for (;;) { if (bytes <= unit->inv_queue_avail) break; /* refill */ head = dmar_read4(unit, DMAR_IQH_REG); head &= DMAR_IQH_MASK; unit->inv_queue_avail = head - unit->inv_queue_tail - DMAR_IQ_DESCR_SZ; if (head <= unit->inv_queue_tail) unit->inv_queue_avail += unit->inv_queue_size; if (bytes <= unit->inv_queue_avail) break; /* * No space in the queue, do busy wait. Hardware must * make a progress. But first advance the tail to * inform the descriptor streamer about entries we * might have already filled, otherwise they could * clog the whole queue.. */ dmar_qi_advance_tail(unit); unit->inv_queue_full++; cpu_spinwait(); } unit->inv_queue_avail -= bytes; } static void dmar_qi_emit(struct dmar_unit *unit, uint64_t data1, uint64_t data2) { DMAR_ASSERT_LOCKED(unit); *(volatile uint64_t *)(unit->inv_queue + unit->inv_queue_tail) = data1; unit->inv_queue_tail += DMAR_IQ_DESCR_SZ / 2; KASSERT(unit->inv_queue_tail <= unit->inv_queue_size, ("tail overflow 0x%x 0x%jx", unit->inv_queue_tail, (uintmax_t)unit->inv_queue_size)); unit->inv_queue_tail &= unit->inv_queue_size - 1; *(volatile uint64_t *)(unit->inv_queue + unit->inv_queue_tail) = data2; unit->inv_queue_tail += DMAR_IQ_DESCR_SZ / 2; KASSERT(unit->inv_queue_tail <= unit->inv_queue_size, ("tail overflow 0x%x 0x%jx", unit->inv_queue_tail, (uintmax_t)unit->inv_queue_size)); unit->inv_queue_tail &= unit->inv_queue_size - 1; } static void dmar_qi_emit_wait_descr(struct dmar_unit *unit, uint32_t seq, bool intr, bool memw, bool fence) { DMAR_ASSERT_LOCKED(unit); dmar_qi_emit(unit, DMAR_IQ_DESCR_WAIT_ID | (intr ? DMAR_IQ_DESCR_WAIT_IF : 0) | (memw ? DMAR_IQ_DESCR_WAIT_SW : 0) | (fence ? DMAR_IQ_DESCR_WAIT_FN : 0) | (memw ? DMAR_IQ_DESCR_WAIT_SD(seq) : 0), memw ? unit->inv_waitd_seq_hw_phys : 0); } static void dmar_qi_emit_wait_seq(struct dmar_unit *unit, struct dmar_qi_genseq *pseq) { struct dmar_qi_genseq gsec; uint32_t seq; KASSERT(pseq != NULL, ("wait descriptor with no place for seq")); DMAR_ASSERT_LOCKED(unit); if (unit->inv_waitd_seq == 0xffffffff) { gsec.gen = unit->inv_waitd_gen; gsec.seq = unit->inv_waitd_seq; dmar_qi_ensure(unit, 1); dmar_qi_emit_wait_descr(unit, gsec.seq, false, true, false); dmar_qi_advance_tail(unit); while (!dmar_qi_seq_processed(unit, &gsec)) cpu_spinwait(); unit->inv_waitd_gen++; unit->inv_waitd_seq = 1; } seq = unit->inv_waitd_seq++; pseq->gen = unit->inv_waitd_gen; pseq->seq = seq; dmar_qi_emit_wait_descr(unit, seq, true, true, false); } static void dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct dmar_qi_genseq *gseq, bool nowait) { DMAR_ASSERT_LOCKED(unit); unit->inv_seq_waiters++; while (!dmar_qi_seq_processed(unit, gseq)) { if (cold || nowait) { cpu_spinwait(); } else { msleep(&unit->inv_seq_waiters, &unit->lock, 0, "dmarse", hz); } } unit->inv_seq_waiters--; } void dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size, struct dmar_qi_genseq *pseq) { struct dmar_unit *unit; dmar_gaddr_t isize; int am; unit = domain->dmar; DMAR_ASSERT_LOCKED(unit); for (; size > 0; base += isize, size -= isize) { am = calc_am(unit, base, size, &isize); dmar_qi_ensure(unit, 1); dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV | DMAR_IQ_DESCR_IOTLB_PAGE | DMAR_IQ_DESCR_IOTLB_DW | DMAR_IQ_DESCR_IOTLB_DR | DMAR_IQ_DESCR_IOTLB_DID(domain->domain), base | am); } if (pseq != NULL) { dmar_qi_ensure(unit, 1); dmar_qi_emit_wait_seq(unit, pseq); } dmar_qi_advance_tail(unit); } void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit) { struct dmar_qi_genseq gseq; DMAR_ASSERT_LOCKED(unit); dmar_qi_ensure(unit, 2); dmar_qi_emit(unit, DMAR_IQ_DESCR_CTX_INV | DMAR_IQ_DESCR_CTX_GLOB, 0); dmar_qi_emit_wait_seq(unit, &gseq); dmar_qi_advance_tail(unit); dmar_qi_wait_for_seq(unit, &gseq, false); } void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit) { struct dmar_qi_genseq gseq; DMAR_ASSERT_LOCKED(unit); dmar_qi_ensure(unit, 2); dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV | DMAR_IQ_DESCR_IOTLB_GLOB | DMAR_IQ_DESCR_IOTLB_DW | DMAR_IQ_DESCR_IOTLB_DR, 0); dmar_qi_emit_wait_seq(unit, &gseq); dmar_qi_advance_tail(unit); dmar_qi_wait_for_seq(unit, &gseq, false); } void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit) { struct dmar_qi_genseq gseq; DMAR_ASSERT_LOCKED(unit); dmar_qi_ensure(unit, 2); dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV, 0); dmar_qi_emit_wait_seq(unit, &gseq); dmar_qi_advance_tail(unit); dmar_qi_wait_for_seq(unit, &gseq, false); } void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt) { struct dmar_qi_genseq gseq; u_int c, l; DMAR_ASSERT_LOCKED(unit); KASSERT(start < unit->irte_cnt && start < start + cnt && start + cnt <= unit->irte_cnt, ("inv iec overflow %d %d %d", unit->irte_cnt, start, cnt)); for (; cnt > 0; cnt -= c, start += c) { l = ffs(start | cnt) - 1; c = 1 << l; dmar_qi_ensure(unit, 1); dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV | DMAR_IQ_DESCR_IEC_IDX | DMAR_IQ_DESCR_IEC_IIDX(start) | DMAR_IQ_DESCR_IEC_IM(l), 0); } dmar_qi_ensure(unit, 1); dmar_qi_emit_wait_seq(unit, &gseq); dmar_qi_advance_tail(unit); /* * The caller of the function, in particular, * dmar_ir_program_irte(), may be called from the context * where the sleeping is forbidden (in fact, the * intr_table_lock mutex may be held, locked from * intr_shuffle_irqs()). Wait for the invalidation completion * using the busy wait. * * The impact on the interrupt input setup code is small, the * expected overhead is comparable with the chipset register * read. It is more harmful for the parallel DMA operations, * since we own the dmar unit lock until whole invalidation * queue is processed, which includes requests possibly issued * before our request. */ dmar_qi_wait_for_seq(unit, &gseq, true); } int dmar_qi_intr(void *arg) { struct dmar_unit *unit; unit = arg; KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled", unit->unit)); - taskqueue_enqueue_fast(unit->qi_taskqueue, &unit->qi_task); + taskqueue_enqueue(unit->qi_taskqueue, &unit->qi_task); return (FILTER_HANDLED); } static void dmar_qi_task(void *arg, int pending __unused) { struct dmar_unit *unit; struct dmar_map_entry *entry; uint32_t ics; unit = arg; DMAR_LOCK(unit); for (;;) { entry = TAILQ_FIRST(&unit->tlb_flush_entries); if (entry == NULL) break; if ((entry->gseq.gen == 0 && entry->gseq.seq == 0) || !dmar_qi_seq_processed(unit, &entry->gseq)) break; TAILQ_REMOVE(&unit->tlb_flush_entries, entry, dmamap_link); DMAR_UNLOCK(unit); dmar_domain_free_entry(entry, (entry->flags & DMAR_MAP_ENTRY_QI_NF) == 0); DMAR_LOCK(unit); } ics = dmar_read4(unit, DMAR_ICS_REG); if ((ics & DMAR_ICS_IWC) != 0) { ics = DMAR_ICS_IWC; dmar_write4(unit, DMAR_ICS_REG, ics); } if (unit->inv_seq_waiters > 0) wakeup(&unit->inv_seq_waiters); DMAR_UNLOCK(unit); } int dmar_init_qi(struct dmar_unit *unit) { uint64_t iqa; uint32_t ics; int qi_sz; if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0) return (0); unit->qi_enabled = 1; TUNABLE_INT_FETCH("hw.dmar.qi", &unit->qi_enabled); if (!unit->qi_enabled) return (0); TAILQ_INIT(&unit->tlb_flush_entries); TASK_INIT(&unit->qi_task, 0, dmar_qi_task, unit); unit->qi_taskqueue = taskqueue_create_fast("dmar", M_WAITOK, taskqueue_thread_enqueue, &unit->qi_taskqueue); taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV, "dmar%d qi taskq", unit->unit); unit->inv_waitd_gen = 0; unit->inv_waitd_seq = 1; qi_sz = DMAR_IQA_QS_DEF; TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz); if (qi_sz > DMAR_IQA_QS_MAX) qi_sz = DMAR_IQA_QS_MAX; unit->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE; /* Reserve one descriptor to prevent wraparound. */ unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ; /* The invalidation queue reads by DMARs are always coherent. */ unit->inv_queue = kmem_alloc_contig(kernel_arena, unit->inv_queue_size, M_WAITOK | M_ZERO, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); unit->inv_waitd_seq_hw_phys = pmap_kextract( (vm_offset_t)&unit->inv_waitd_seq_hw); DMAR_LOCK(unit); dmar_write8(unit, DMAR_IQT_REG, 0); iqa = pmap_kextract(unit->inv_queue); iqa |= qi_sz; dmar_write8(unit, DMAR_IQA_REG, iqa); dmar_enable_qi(unit); ics = dmar_read4(unit, DMAR_ICS_REG); if ((ics & DMAR_ICS_IWC) != 0) { ics = DMAR_ICS_IWC; dmar_write4(unit, DMAR_ICS_REG, ics); } dmar_enable_qi_intr(unit); DMAR_UNLOCK(unit); return (0); } void dmar_fini_qi(struct dmar_unit *unit) { struct dmar_qi_genseq gseq; if (unit->qi_enabled) return; taskqueue_drain(unit->qi_taskqueue, &unit->qi_task); taskqueue_free(unit->qi_taskqueue); unit->qi_taskqueue = NULL; DMAR_LOCK(unit); /* quisce */ dmar_qi_ensure(unit, 1); dmar_qi_emit_wait_seq(unit, &gseq); dmar_qi_advance_tail(unit); dmar_qi_wait_for_seq(unit, &gseq, false); /* only after the quisce, disable queue */ dmar_disable_qi_intr(unit); dmar_disable_qi(unit); KASSERT(unit->inv_seq_waiters == 0, ("dmar%d: waiters on disabled queue", unit->unit)); DMAR_UNLOCK(unit); kmem_free(kernel_arena, unit->inv_queue, unit->inv_queue_size); unit->inv_queue = 0; unit->inv_queue_size = 0; unit->qi_enabled = 0; } void dmar_enable_qi_intr(struct dmar_unit *unit) { uint32_t iectl; DMAR_ASSERT_LOCKED(unit); KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit)); iectl = dmar_read4(unit, DMAR_IECTL_REG); iectl &= ~DMAR_IECTL_IM; dmar_write4(unit, DMAR_IECTL_REG, iectl); } void dmar_disable_qi_intr(struct dmar_unit *unit) { uint32_t iectl; DMAR_ASSERT_LOCKED(unit); KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit)); iectl = dmar_read4(unit, DMAR_IECTL_REG); dmar_write4(unit, DMAR_IECTL_REG, iectl | DMAR_IECTL_IM); } Index: head/sys/x86/x86/mca.c =================================================================== --- head/sys/x86/x86/mca.c (revision 296271) +++ head/sys/x86/x86/mca.c (revision 296272) @@ -1,1040 +1,1040 @@ /*- * Copyright (c) 2009 Hudson River Trading LLC * Written by: John H. Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Support for x86 machine check architecture. */ #include __FBSDID("$FreeBSD$"); #ifdef __amd64__ #define DEV_APIC #else #include "opt_apic.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Modes for mca_scan() */ enum scan_mode { POLLED, MCE, CMCI, }; #ifdef DEV_APIC /* * State maintained for each monitored MCx bank to control the * corrected machine check interrupt threshold. */ struct cmc_state { int max_threshold; int last_intr; }; #endif struct mca_internal { struct mca_record rec; int logged; STAILQ_ENTRY(mca_internal) link; }; static MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture"); static volatile int mca_count; /* Number of records stored. */ static int mca_banks; /* Number of per-CPU register banks. */ static SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RD, NULL, "Machine Check Architecture"); static int mca_enabled = 1; SYSCTL_INT(_hw_mca, OID_AUTO, enabled, CTLFLAG_RDTUN, &mca_enabled, 0, "Administrative toggle for machine check support"); static int amd10h_L1TP = 1; SYSCTL_INT(_hw_mca, OID_AUTO, amd10h_L1TP, CTLFLAG_RDTUN, &amd10h_L1TP, 0, "Administrative toggle for logging of level one TLB parity (L1TP) errors"); static int intel6h_HSD131; SYSCTL_INT(_hw_mca, OID_AUTO, intel6h_HSD131, CTLFLAG_RDTUN, &intel6h_HSD131, 0, "Administrative toggle for logging of spurious corrected errors"); int workaround_erratum383; SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RD, &workaround_erratum383, 0, "Is the workaround for Erratum 383 on AMD Family 10h processors enabled?"); static STAILQ_HEAD(, mca_internal) mca_freelist; static int mca_freecount; static STAILQ_HEAD(, mca_internal) mca_records; static struct callout mca_timer; static int mca_ticks = 3600; /* Check hourly by default. */ static struct taskqueue *mca_tq; static struct task mca_refill_task, mca_scan_task; static struct mtx mca_lock; #ifdef DEV_APIC static struct cmc_state **cmc_state; /* Indexed by cpuid, bank */ static int cmc_throttle = 60; /* Time in seconds to throttle CMCI. */ #endif static int sysctl_positive_int(SYSCTL_HANDLER_ARGS) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); if (value <= 0) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_mca_records(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; struct mca_record record; struct mca_internal *rec; int i; if (namelen != 1) return (EINVAL); if (name[0] < 0 || name[0] >= mca_count) return (EINVAL); mtx_lock_spin(&mca_lock); if (name[0] >= mca_count) { mtx_unlock_spin(&mca_lock); return (EINVAL); } i = 0; STAILQ_FOREACH(rec, &mca_records, link) { if (i == name[0]) { record = rec->rec; break; } i++; } mtx_unlock_spin(&mca_lock); return (SYSCTL_OUT(req, &record, sizeof(record))); } static const char * mca_error_ttype(uint16_t mca_error) { switch ((mca_error & 0x000c) >> 2) { case 0: return ("I"); case 1: return ("D"); case 2: return ("G"); } return ("?"); } static const char * mca_error_level(uint16_t mca_error) { switch (mca_error & 0x0003) { case 0: return ("L0"); case 1: return ("L1"); case 2: return ("L2"); case 3: return ("LG"); } return ("L?"); } static const char * mca_error_request(uint16_t mca_error) { switch ((mca_error & 0x00f0) >> 4) { case 0x0: return ("ERR"); case 0x1: return ("RD"); case 0x2: return ("WR"); case 0x3: return ("DRD"); case 0x4: return ("DWR"); case 0x5: return ("IRD"); case 0x6: return ("PREFETCH"); case 0x7: return ("EVICT"); case 0x8: return ("SNOOP"); } return ("???"); } static const char * mca_error_mmtype(uint16_t mca_error) { switch ((mca_error & 0x70) >> 4) { case 0x0: return ("GEN"); case 0x1: return ("RD"); case 0x2: return ("WR"); case 0x3: return ("AC"); case 0x4: return ("MS"); } return ("???"); } static int __nonnull(1) mca_mute(const struct mca_record *rec) { /* * Skip spurious corrected parity errors generated by Intel Haswell- * and Broadwell-based CPUs (see HSD131, HSM142, HSW131 and BDM48 * erratum respectively), unless reporting is enabled. * Note that these errors also have been observed with the D0-stepping * of Haswell, while at least initially the CPU specification updates * suggested only the C0-stepping to be affected. Similarly, Celeron * 2955U with a CPU ID of 0x45 apparently are also concerned with the * same problem, with HSM142 only referring to 0x3c and 0x46. */ if (cpu_vendor_id == CPU_VENDOR_INTEL && CPUID_TO_FAMILY(cpu_id) == 0x6 && (CPUID_TO_MODEL(cpu_id) == 0x3c || /* HSD131, HSM142, HSW131 */ CPUID_TO_MODEL(cpu_id) == 0x3d || /* BDM48 */ CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46) && /* HSM142 */ rec->mr_bank == 0 && (rec->mr_status & 0xa0000000ffffffff) == 0x80000000000f0005 && !intel6h_HSD131) return (1); return (0); } /* Dump details about a single machine check. */ static void __nonnull(1) mca_log(const struct mca_record *rec) { uint16_t mca_error; if (mca_mute(rec)) return; printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank, (long long)rec->mr_status); printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n", (long long)rec->mr_mcg_cap, (long long)rec->mr_mcg_status); printf("MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n", cpu_vendor, rec->mr_cpu_id, rec->mr_apic_id); printf("MCA: CPU %d ", rec->mr_cpu); if (rec->mr_status & MC_STATUS_UC) printf("UNCOR "); else { printf("COR "); if (rec->mr_mcg_cap & MCG_CAP_CMCI_P) printf("(%lld) ", ((long long)rec->mr_status & MC_STATUS_COR_COUNT) >> 38); } if (rec->mr_status & MC_STATUS_PCC) printf("PCC "); if (rec->mr_status & MC_STATUS_OVER) printf("OVER "); mca_error = rec->mr_status & MC_STATUS_MCA_ERROR; switch (mca_error) { /* Simple error codes. */ case 0x0000: printf("no error"); break; case 0x0001: printf("unclassified error"); break; case 0x0002: printf("ucode ROM parity error"); break; case 0x0003: printf("external error"); break; case 0x0004: printf("FRC error"); break; case 0x0005: printf("internal parity error"); break; case 0x0400: printf("internal timer error"); break; default: if ((mca_error & 0xfc00) == 0x0400) { printf("internal error %x", mca_error & 0x03ff); break; } /* Compound error codes. */ /* Memory hierarchy error. */ if ((mca_error & 0xeffc) == 0x000c) { printf("%s memory error", mca_error_level(mca_error)); break; } /* TLB error. */ if ((mca_error & 0xeff0) == 0x0010) { printf("%sTLB %s error", mca_error_ttype(mca_error), mca_error_level(mca_error)); break; } /* Memory controller error. */ if ((mca_error & 0xef80) == 0x0080) { printf("%s channel ", mca_error_mmtype(mca_error)); if ((mca_error & 0x000f) != 0x000f) printf("%d", mca_error & 0x000f); else printf("??"); printf(" memory error"); break; } /* Cache error. */ if ((mca_error & 0xef00) == 0x0100) { printf("%sCACHE %s %s error", mca_error_ttype(mca_error), mca_error_level(mca_error), mca_error_request(mca_error)); break; } /* Bus and/or Interconnect error. */ if ((mca_error & 0xe800) == 0x0800) { printf("BUS%s ", mca_error_level(mca_error)); switch ((mca_error & 0x0600) >> 9) { case 0: printf("Source"); break; case 1: printf("Responder"); break; case 2: printf("Observer"); break; default: printf("???"); break; } printf(" %s ", mca_error_request(mca_error)); switch ((mca_error & 0x000c) >> 2) { case 0: printf("Memory"); break; case 2: printf("I/O"); break; case 3: printf("Other"); break; default: printf("???"); break; } if (mca_error & 0x0100) printf(" timed out"); break; } printf("unknown error %x", mca_error); break; } printf("\n"); if (rec->mr_status & MC_STATUS_ADDRV) printf("MCA: Address 0x%llx\n", (long long)rec->mr_addr); if (rec->mr_status & MC_STATUS_MISCV) printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc); } static int __nonnull(2) mca_check_status(int bank, struct mca_record *rec) { uint64_t status; u_int p[4]; status = rdmsr(MSR_MC_STATUS(bank)); if (!(status & MC_STATUS_VAL)) return (0); /* Save exception information. */ rec->mr_status = status; rec->mr_bank = bank; rec->mr_addr = 0; if (status & MC_STATUS_ADDRV) rec->mr_addr = rdmsr(MSR_MC_ADDR(bank)); rec->mr_misc = 0; if (status & MC_STATUS_MISCV) rec->mr_misc = rdmsr(MSR_MC_MISC(bank)); rec->mr_tsc = rdtsc(); rec->mr_apic_id = PCPU_GET(apic_id); rec->mr_mcg_cap = rdmsr(MSR_MCG_CAP); rec->mr_mcg_status = rdmsr(MSR_MCG_STATUS); rec->mr_cpu_id = cpu_id; rec->mr_cpu_vendor_id = cpu_vendor_id; rec->mr_cpu = PCPU_GET(cpuid); /* * Clear machine check. Don't do this for uncorrectable * errors so that the BIOS can see them. */ if (!(rec->mr_status & (MC_STATUS_PCC | MC_STATUS_UC))) { wrmsr(MSR_MC_STATUS(bank), 0); do_cpuid(0, p); } return (1); } static void mca_fill_freelist(void) { struct mca_internal *rec; int desired; /* * Ensure we have at least one record for each bank and one * record per CPU. */ desired = imax(mp_ncpus, mca_banks); mtx_lock_spin(&mca_lock); while (mca_freecount < desired) { mtx_unlock_spin(&mca_lock); rec = malloc(sizeof(*rec), M_MCA, M_WAITOK); mtx_lock_spin(&mca_lock); STAILQ_INSERT_TAIL(&mca_freelist, rec, link); mca_freecount++; } mtx_unlock_spin(&mca_lock); } static void mca_refill(void *context, int pending) { mca_fill_freelist(); } static void __nonnull(2) mca_record_entry(enum scan_mode mode, const struct mca_record *record) { struct mca_internal *rec; if (mode == POLLED) { rec = malloc(sizeof(*rec), M_MCA, M_WAITOK); mtx_lock_spin(&mca_lock); } else { mtx_lock_spin(&mca_lock); rec = STAILQ_FIRST(&mca_freelist); if (rec == NULL) { printf("MCA: Unable to allocate space for an event.\n"); mca_log(record); mtx_unlock_spin(&mca_lock); return; } STAILQ_REMOVE_HEAD(&mca_freelist, link); mca_freecount--; } rec->rec = *record; rec->logged = 0; STAILQ_INSERT_TAIL(&mca_records, rec, link); mca_count++; mtx_unlock_spin(&mca_lock); if (mode == CMCI) - taskqueue_enqueue_fast(mca_tq, &mca_refill_task); + taskqueue_enqueue(mca_tq, &mca_refill_task); } #ifdef DEV_APIC /* * Update the interrupt threshold for a CMCI. The strategy is to use * a low trigger that interrupts as soon as the first event occurs. * However, if a steady stream of events arrive, the threshold is * increased until the interrupts are throttled to once every * cmc_throttle seconds or the periodic scan. If a periodic scan * finds that the threshold is too high, it is lowered. */ static void cmci_update(enum scan_mode mode, int bank, int valid, struct mca_record *rec) { struct cmc_state *cc; uint64_t ctl; u_int delta; int count, limit; /* Fetch the current limit for this bank. */ cc = &cmc_state[PCPU_GET(cpuid)][bank]; ctl = rdmsr(MSR_MC_CTL2(bank)); count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38; delta = (u_int)(ticks - cc->last_intr); /* * If an interrupt was received less than cmc_throttle seconds * since the previous interrupt and the count from the current * event is greater than or equal to the current threshold, * double the threshold up to the max. */ if (mode == CMCI && valid) { limit = ctl & MC_CTL2_THRESHOLD; if (delta < cmc_throttle && count >= limit && limit < cc->max_threshold) { limit = min(limit << 1, cc->max_threshold); ctl &= ~MC_CTL2_THRESHOLD; ctl |= limit; wrmsr(MSR_MC_CTL2(bank), limit); } cc->last_intr = ticks; return; } /* * When the banks are polled, check to see if the threshold * should be lowered. */ if (mode != POLLED) return; /* If a CMCI occured recently, do nothing for now. */ if (delta < cmc_throttle) return; /* * Compute a new limit based on the average rate of events per * cmc_throttle seconds since the last interrupt. */ if (valid) { count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38; limit = count * cmc_throttle / delta; if (limit <= 0) limit = 1; else if (limit > cc->max_threshold) limit = cc->max_threshold; } else limit = 1; if ((ctl & MC_CTL2_THRESHOLD) != limit) { ctl &= ~MC_CTL2_THRESHOLD; ctl |= limit; wrmsr(MSR_MC_CTL2(bank), limit); } } #endif /* * This scans all the machine check banks of the current CPU to see if * there are any machine checks. Any non-recoverable errors are * reported immediately via mca_log(). The current thread must be * pinned when this is called. The 'mode' parameter indicates if we * are being called from the MC exception handler, the CMCI handler, * or the periodic poller. In the MC exception case this function * returns true if the system is restartable. Otherwise, it returns a * count of the number of valid MC records found. */ static int mca_scan(enum scan_mode mode) { struct mca_record rec; uint64_t mcg_cap, ucmask; int count, i, recoverable, valid; count = 0; recoverable = 1; ucmask = MC_STATUS_UC | MC_STATUS_PCC; /* When handling a MCE#, treat the OVER flag as non-restartable. */ if (mode == MCE) ucmask |= MC_STATUS_OVER; mcg_cap = rdmsr(MSR_MCG_CAP); for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) { #ifdef DEV_APIC /* * For a CMCI, only check banks this CPU is * responsible for. */ if (mode == CMCI && !(PCPU_GET(cmci_mask) & 1 << i)) continue; #endif valid = mca_check_status(i, &rec); if (valid) { count++; if (rec.mr_status & ucmask) { recoverable = 0; mtx_lock_spin(&mca_lock); mca_log(&rec); mtx_unlock_spin(&mca_lock); } mca_record_entry(mode, &rec); } #ifdef DEV_APIC /* * If this is a bank this CPU monitors via CMCI, * update the threshold. */ if (PCPU_GET(cmci_mask) & 1 << i) cmci_update(mode, i, valid, &rec); #endif } if (mode == POLLED) mca_fill_freelist(); return (mode == MCE ? recoverable : count); } /* * Scan the machine check banks on all CPUs by binding to each CPU in * turn. If any of the CPUs contained new machine check records, log * them to the console. */ static void mca_scan_cpus(void *context, int pending) { struct mca_internal *mca; struct thread *td; int count, cpu; mca_fill_freelist(); td = curthread; count = 0; thread_lock(td); CPU_FOREACH(cpu) { sched_bind(td, cpu); thread_unlock(td); count += mca_scan(POLLED); thread_lock(td); sched_unbind(td); } thread_unlock(td); if (count != 0) { mtx_lock_spin(&mca_lock); STAILQ_FOREACH(mca, &mca_records, link) { if (!mca->logged) { mca->logged = 1; mca_log(&mca->rec); } } mtx_unlock_spin(&mca_lock); } } static void mca_periodic_scan(void *arg) { - taskqueue_enqueue_fast(mca_tq, &mca_scan_task); + taskqueue_enqueue(mca_tq, &mca_scan_task); callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL); } static int sysctl_mca_scan(SYSCTL_HANDLER_ARGS) { int error, i; i = 0; error = sysctl_handle_int(oidp, &i, 0, req); if (error) return (error); if (i) - taskqueue_enqueue_fast(mca_tq, &mca_scan_task); + taskqueue_enqueue(mca_tq, &mca_scan_task); return (0); } static void mca_createtq(void *dummy) { if (mca_banks <= 0) return; mca_tq = taskqueue_create_fast("mca", M_WAITOK, taskqueue_thread_enqueue, &mca_tq); taskqueue_start_threads(&mca_tq, 1, PI_SWI(SWI_TQ), "mca taskq"); } SYSINIT(mca_createtq, SI_SUB_CONFIGURE, SI_ORDER_ANY, mca_createtq, NULL); static void mca_startup(void *dummy) { if (mca_banks <= 0) return; callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL); } SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL); #ifdef DEV_APIC static void cmci_setup(void) { int i; cmc_state = malloc((mp_maxid + 1) * sizeof(struct cmc_state *), M_MCA, M_WAITOK); for (i = 0; i <= mp_maxid; i++) cmc_state[i] = malloc(sizeof(struct cmc_state) * mca_banks, M_MCA, M_WAITOK | M_ZERO); SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &cmc_throttle, 0, sysctl_positive_int, "I", "Interval in seconds to throttle corrected MC interrupts"); } #endif static void mca_setup(uint64_t mcg_cap) { /* * On AMD Family 10h processors, unless logging of level one TLB * parity (L1TP) errors is disabled, enable the recommended workaround * for Erratum 383. */ if (cpu_vendor_id == CPU_VENDOR_AMD && CPUID_TO_FAMILY(cpu_id) == 0x10 && amd10h_L1TP) workaround_erratum383 = 1; mca_banks = mcg_cap & MCG_CAP_COUNT; mtx_init(&mca_lock, "mca", NULL, MTX_SPIN); STAILQ_INIT(&mca_records); TASK_INIT(&mca_scan_task, 0, mca_scan_cpus, NULL); callout_init(&mca_timer, 1); STAILQ_INIT(&mca_freelist); TASK_INIT(&mca_refill_task, 0, mca_refill, NULL); mca_fill_freelist(); SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, "count", CTLFLAG_RD, (int *)(uintptr_t)&mca_count, 0, "Record count"); SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, "interval", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &mca_ticks, 0, sysctl_positive_int, "I", "Periodic interval in seconds to scan for machine checks"); SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, "records", CTLFLAG_RD, sysctl_mca_records, "Machine check records"); SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, "force_scan", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, sysctl_mca_scan, "I", "Force an immediate scan for machine checks"); #ifdef DEV_APIC if (mcg_cap & MCG_CAP_CMCI_P) cmci_setup(); #endif } #ifdef DEV_APIC /* * See if we should monitor CMCI for this bank. If CMCI_EN is already * set in MC_CTL2, then another CPU is responsible for this bank, so * ignore it. If CMCI_EN returns zero after being set, then this bank * does not support CMCI_EN. If this CPU sets CMCI_EN, then it should * now monitor this bank. */ static void cmci_monitor(int i) { struct cmc_state *cc; uint64_t ctl; KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid))); ctl = rdmsr(MSR_MC_CTL2(i)); if (ctl & MC_CTL2_CMCI_EN) /* Already monitored by another CPU. */ return; /* Set the threshold to one event for now. */ ctl &= ~MC_CTL2_THRESHOLD; ctl |= MC_CTL2_CMCI_EN | 1; wrmsr(MSR_MC_CTL2(i), ctl); ctl = rdmsr(MSR_MC_CTL2(i)); if (!(ctl & MC_CTL2_CMCI_EN)) /* This bank does not support CMCI. */ return; cc = &cmc_state[PCPU_GET(cpuid)][i]; /* Determine maximum threshold. */ ctl &= ~MC_CTL2_THRESHOLD; ctl |= 0x7fff; wrmsr(MSR_MC_CTL2(i), ctl); ctl = rdmsr(MSR_MC_CTL2(i)); cc->max_threshold = ctl & MC_CTL2_THRESHOLD; /* Start off with a threshold of 1. */ ctl &= ~MC_CTL2_THRESHOLD; ctl |= 1; wrmsr(MSR_MC_CTL2(i), ctl); /* Mark this bank as monitored. */ PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i); } /* * For resume, reset the threshold for any banks we monitor back to * one and throw away the timestamp of the last interrupt. */ static void cmci_resume(int i) { struct cmc_state *cc; uint64_t ctl; KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid))); /* Ignore banks not monitored by this CPU. */ if (!(PCPU_GET(cmci_mask) & 1 << i)) return; cc = &cmc_state[PCPU_GET(cpuid)][i]; cc->last_intr = -ticks; ctl = rdmsr(MSR_MC_CTL2(i)); ctl &= ~MC_CTL2_THRESHOLD; ctl |= MC_CTL2_CMCI_EN | 1; wrmsr(MSR_MC_CTL2(i), ctl); } #endif /* * Initializes per-CPU machine check registers and enables corrected * machine check interrupts. */ static void _mca_init(int boot) { uint64_t mcg_cap; uint64_t ctl, mask; int i, skip; /* MCE is required. */ if (!mca_enabled || !(cpu_feature & CPUID_MCE)) return; if (cpu_feature & CPUID_MCA) { if (boot) PCPU_SET(cmci_mask, 0); mcg_cap = rdmsr(MSR_MCG_CAP); if (mcg_cap & MCG_CAP_CTL_P) /* Enable MCA features. */ wrmsr(MSR_MCG_CTL, MCG_CTL_ENABLE); if (PCPU_GET(cpuid) == 0 && boot) mca_setup(mcg_cap); /* * Disable logging of level one TLB parity (L1TP) errors by * the data cache as an alternative workaround for AMD Family * 10h Erratum 383. Unlike the recommended workaround, there * is no performance penalty to this workaround. However, * L1TP errors will go unreported. */ if (cpu_vendor_id == CPU_VENDOR_AMD && CPUID_TO_FAMILY(cpu_id) == 0x10 && !amd10h_L1TP) { mask = rdmsr(MSR_MC0_CTL_MASK); if ((mask & (1UL << 5)) == 0) wrmsr(MSR_MC0_CTL_MASK, mask | (1UL << 5)); } for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) { /* By default enable logging of all errors. */ ctl = 0xffffffffffffffffUL; skip = 0; if (cpu_vendor_id == CPU_VENDOR_INTEL) { /* * For P6 models before Nehalem MC0_CTL is * always enabled and reserved. */ if (i == 0 && CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) < 0x1a) skip = 1; } else if (cpu_vendor_id == CPU_VENDOR_AMD) { /* BKDG for Family 10h: unset GartTblWkEn. */ if (i == 4 && CPUID_TO_FAMILY(cpu_id) >= 0xf) ctl &= ~(1UL << 10); } if (!skip) wrmsr(MSR_MC_CTL(i), ctl); #ifdef DEV_APIC if (mcg_cap & MCG_CAP_CMCI_P) { if (boot) cmci_monitor(i); else cmci_resume(i); } #endif /* Clear all errors. */ wrmsr(MSR_MC_STATUS(i), 0); } #ifdef DEV_APIC if (PCPU_GET(cmci_mask) != 0 && boot) lapic_enable_cmc(); #endif } load_cr4(rcr4() | CR4_MCE); } /* Must be executed on each CPU during boot. */ void mca_init(void) { _mca_init(1); } /* Must be executed on each CPU during resume. */ void mca_resume(void) { _mca_init(0); } /* * The machine check registers for the BSP cannot be initialized until * the local APIC is initialized. This happens at SI_SUB_CPU, * SI_ORDER_SECOND. */ static void mca_init_bsp(void *arg __unused) { mca_init(); } SYSINIT(mca_init_bsp, SI_SUB_CPU, SI_ORDER_ANY, mca_init_bsp, NULL); /* Called when a machine check exception fires. */ void mca_intr(void) { uint64_t mcg_status; int old_count, recoverable; if (!(cpu_feature & CPUID_MCA)) { /* * Just print the values of the old Pentium registers * and panic. */ printf("MC Type: 0x%jx Address: 0x%jx\n", (uintmax_t)rdmsr(MSR_P5_MC_TYPE), (uintmax_t)rdmsr(MSR_P5_MC_ADDR)); panic("Machine check"); } /* Scan the banks and check for any non-recoverable errors. */ old_count = mca_count; recoverable = mca_scan(MCE); mcg_status = rdmsr(MSR_MCG_STATUS); if (!(mcg_status & MCG_STATUS_RIPV)) recoverable = 0; if (!recoverable) { /* * Wait for at least one error to be logged before * panic'ing. Some errors will assert a machine check * on all CPUs, but only certain CPUs will find a valid * bank to log. */ while (mca_count == old_count) cpu_spinwait(); panic("Unrecoverable machine check exception"); } /* Clear MCIP. */ wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP); } #ifdef DEV_APIC /* Called for a CMCI (correctable machine check interrupt). */ void cmc_intr(void) { struct mca_internal *mca; int count; /* * Serialize MCA bank scanning to prevent collisions from * sibling threads. */ count = mca_scan(CMCI); /* If we found anything, log them to the console. */ if (count != 0) { mtx_lock_spin(&mca_lock); STAILQ_FOREACH(mca, &mca_records, link) { if (!mca->logged) { mca->logged = 1; mca_log(&mca->rec); } } mtx_unlock_spin(&mca_lock); } } #endif