Index: user/jeff/numa/lib/libc/sys/Symbol.map =================================================================== --- user/jeff/numa/lib/libc/sys/Symbol.map (revision 326888) +++ user/jeff/numa/lib/libc/sys/Symbol.map (revision 326889) @@ -1,1025 +1,1031 @@ /* * $FreeBSD$ */ /* * It'd be nice to automatically generate the syscall symbols, but we * don't know to what version they will eventually belong to, so for now * it has to be manual. */ FBSD_1.0 { __acl_aclcheck_fd; __acl_aclcheck_file; __acl_aclcheck_link; __acl_delete_fd; __acl_delete_file; __acl_delete_link; __acl_get_fd; __acl_get_file; __acl_get_link; __acl_set_fd; __acl_set_file; __acl_set_link; __getcwd; __mac_execve; __mac_get_fd; __mac_get_file; __mac_get_link; __mac_get_pid; __mac_get_proc; __mac_set_fd; __mac_set_file; __mac_set_link; __mac_set_proc; __setugid; __syscall; __sysctl; _umtx_op; abort2; accept; access; acct; adjtime; aio_cancel; aio_error; aio_fsync; aio_read; aio_return; aio_suspend; aio_waitcomplete; aio_write; audit; auditctl; auditon; bind; chdir; chflags; chmod; chown; chroot; clock_getres; clock_gettime; clock_settime; close; connect; dup; dup2; eaccess; execve; extattr_delete_fd; extattr_delete_file; extattr_delete_link; extattr_get_fd; extattr_get_file; extattr_get_link; extattr_list_fd; extattr_list_file; extattr_list_link; extattr_set_fd; extattr_set_file; extattr_set_link; extattrctl; fchdir; fchflags; fchmod; fchown; fcntl; fhopen; flock; fork; fpathconf; fsync; futimes; getaudit; getaudit_addr; getauid; getcontext; getdtablesize; getegid; geteuid; getfh; getgid; getgroups; getitimer; getpeername; getpgid; getpgrp; getpid; getppid; getpriority; getresgid; getresuid; getrlimit; getrusage; getsid; getsockname; getsockopt; gettimeofday; getuid; ioctl; issetugid; jail; jail_attach; kenv; kill; kldfind; kldfirstmod; kldload; kldnext; kldstat; kldsym; kldunload; kldunloadf; kqueue; kmq_notify; /* Do we want these to be public interfaces? */ kmq_open; /* librt uses them to provide mq_xxx. */ kmq_setattr; kmq_timedreceive; kmq_timedsend; kmq_unlink; ksem_close; ksem_destroy; ksem_getvalue; ksem_init; ksem_open; ksem_post; ksem_timedwait; ksem_trywait; ksem_unlink; ksem_wait; ktrace; lchflags; lchmod; lchown; lgetfh; link; lio_listio; listen; lutimes; mac_syscall; madvise; mincore; minherit; mkdir; mkfifo; mlock; mlockall; modfind; modfnext; modnext; modstat; mount; mprotect; msgget; msgrcv; msgsnd; msgsys; msync; munlock; munlockall; munmap; nanosleep; netbsd_lchown; netbsd_msync; nfssvc; nmount; ntp_adjtime; ntp_gettime; open; pathconf; pipe; poll; posix_openpt; preadv; profil; pselect; ptrace; pwritev; quotactl; read; readlink; readv; reboot; recvfrom; recvmsg; rename; revoke; rfork; rmdir; rtprio; rtprio_thread; sched_get_priority_max; sched_get_priority_min; sched_getparam; sched_getscheduler; sched_rr_get_interval; sched_setparam; sched_setscheduler; sched_yield; select; semget; semop; semsys; sendfile; sendmsg; sendto; setaudit; setaudit_addr; setauid; setegid; seteuid; setgid; setgroups; setitimer; setlogin; setpgid; setpriority; setregid; setresgid; setresuid; setreuid; setrlimit; setsid; setsockopt; settimeofday; setuid; shm_open; shm_unlink; shmat; shmdt; shmget; shmsys; shutdown; sigaction; sigaltstack; sigpending; sigprocmask; sigqueue; sigreturn; sigsuspend; sigtimedwait; sigwait; sigwaitinfo; socket; socketpair; swapoff; swapon; symlink; sync; sysarch; syscall; thr_create; thr_exit; thr_kill; thr_kill2; thr_new; thr_self; thr_set_name; thr_suspend; thr_wake; ktimer_create; /* Do we want these to be public interfaces? */ ktimer_delete; /* librt uses them to provide timer_xxx. */ ktimer_getoverrun; ktimer_gettime; ktimer_settime; umask; undelete; unlink; unmount; utimes; utrace; uuidgen; vadvise; wait4; write; writev; __error; ftruncate; lseek; mmap; pread; pwrite; truncate; }; FBSD_1.1 { __semctl; closefrom; cpuset; cpuset_getid; cpuset_setid; cpuset_getaffinity; cpuset_setaffinity; faccessat; fchmodat; fchownat; fexecve; futimesat; jail_get; jail_set; jail_remove; linkat; lpathconf; mkdirat; mkfifoat; msgctl; readlinkat; renameat; setfib; shmctl; symlinkat; unlinkat; }; FBSD_1.2 { cap_enter; cap_getmode; getloginclass; pdfork; pdgetpid; pdkill; posix_fallocate; rctl_get_racct; rctl_get_rules; rctl_get_limits; rctl_add_rule; rctl_remove_rule; setloginclass; }; FBSD_1.3 { accept4; aio_mlock; bindat; cap_fcntls_get; cap_fcntls_limit; cap_ioctls_get; cap_ioctls_limit; __cap_rights_get; cap_rights_limit; cap_sandboxed; chflagsat; clock_getcpuclockid2; connectat; ffclock_getcounter; ffclock_getestimate; ffclock_setestimate; pipe2; posix_fadvise; procctl; wait6; }; FBSD_1.4 { futimens; ppoll; utimensat; numa_setaffinity; numa_getaffinity; sendmmsg; recvmmsg; }; FBSD_1.5 { clock_nanosleep; fdatasync; fhstat; fhstatfs; fstat; fstatat; fstatfs; getdents; getdirentries; getfsstat; kevent; lstat; mknod; mknodat; stat; statfs; + cpuset_getdomain; + cpuset_setdomain; }; FBSDprivate_1.0 { ___acl_aclcheck_fd; __sys___acl_aclcheck_fd; ___acl_aclcheck_file; __sys___acl_aclcheck_file; ___acl_aclcheck_link; __sys___acl_aclcheck_link; ___acl_delete_fd; __sys___acl_delete_fd; ___acl_delete_file; __sys___acl_delete_file; ___acl_delete_link; __sys___acl_delete_link; ___acl_get_fd; __sys___acl_get_fd; ___acl_get_file; __sys___acl_get_file; ___acl_get_link; __sys___acl_get_link; ___acl_set_fd; __sys___acl_set_fd; ___acl_set_file; __sys___acl_set_file; ___acl_set_link; __sys___acl_set_link; ___getcwd; __sys___getcwd; ___mac_execve; __sys___mac_execve; ___mac_get_fd; __sys___mac_get_fd; ___mac_get_file; __sys___mac_get_file; ___mac_get_link; __sys___mac_get_link; ___mac_get_pid; __sys___mac_get_pid; ___mac_get_proc; __sys___mac_get_proc; ___mac_set_fd; __sys___mac_set_fd; ___mac_set_file; __sys___mac_set_file; ___mac_set_link; __sys___mac_set_link; ___mac_set_proc; __sys___mac_set_proc; ___semctl; __sys___semctl; ___setugid; __sys___setugid; ___syscall; __sys___syscall; ___sysctl; __sys___sysctl; __umtx_op; __sys__umtx_op; _abort2; __sys_abort2; _accept; __sys_accept; _accept4; __sys_accept4; _access; __sys_access; _acct; __sys_acct; _adjtime; __sys_adjtime; __sys_aio_cancel; __sys_aio_error; __sys_aio_fsync; __sys_aio_read; __sys_aio_return; __sys_aio_suspend; __sys_aio_waitcomplete; __sys_aio_write; _audit; __sys_audit; _auditctl; __sys_auditctl; _auditon; __sys_auditon; _bind; __sys_bind; _chdir; __sys_chdir; _chflags; __sys_chflags; _chmod; __sys_chmod; _chown; __sys_chown; _chroot; __sys_chroot; _clock_getcpuclockid2; __sys_clock_getcpuclockid2; _clock_getres; __sys_clock_getres; _clock_gettime; __sys_clock_gettime; __sys_clock_nanosleep; _clock_settime; __sys_clock_settime; _close; __sys_close; _closefrom; __sys_closefrom; _connect; __sys_connect; _cpuset; __sys_cpuset; _cpuset_getid; __sys_cpuset_getid; _cpuset_setid; __sys_cpuset_setid; _cpuset_getaffinity; __sys_cpuset_getaffinity; _cpuset_setaffinity; __sys_cpuset_setaffinity; _dup; __sys_dup; _dup2; __sys_dup2; _eaccess; __sys_eaccess; _execve; __sys_execve; _extattr_delete_fd; __sys_extattr_delete_fd; _extattr_delete_file; __sys_extattr_delete_file; _extattr_delete_link; __sys_extattr_delete_link; _extattr_get_fd; __sys_extattr_get_fd; _extattr_get_file; __sys_extattr_get_file; _extattr_get_link; __sys_extattr_get_link; _extattr_list_fd; __sys_extattr_list_fd; _extattr_list_file; __sys_extattr_list_file; _extattr_list_link; __sys_extattr_list_link; _extattr_set_fd; __sys_extattr_set_fd; _extattr_set_file; __sys_extattr_set_file; _extattr_set_link; __sys_extattr_set_link; _extattrctl; __sys_extattrctl; _fchdir; __sys_fchdir; _fchflags; __sys_fchflags; _fchmod; __sys_fchmod; _fchown; __sys_fchown; _fcntl; __sys_fcntl; __fcntl_compat; _fhopen; __sys_fhopen; _fhstat; __sys_fhstat; _fhstatfs; __sys_fhstatfs; _flock; __sys_flock; _fork; __sys_fork; _fpathconf; __sys_fpathconf; _fstat; __sys_fstat; _fstatfs; __sys_fstatfs; _fsync; __sys_fsync; _fdatasync; __sys_fdatasync; _futimes; __sys_futimes; _getaudit; __sys_getaudit; _getaudit_addr; __sys_getaudit_addr; _getauid; __sys_getauid; _getcontext; __sys_getcontext; _getdirentries; __sys_getdirentries; _getdtablesize; __sys_getdtablesize; _getegid; __sys_getegid; _geteuid; __sys_geteuid; _getfh; __sys_getfh; _getfsstat; __sys_getfsstat; _getgid; __sys_getgid; _getgroups; __sys_getgroups; _getitimer; __sys_getitimer; _getpeername; __sys_getpeername; _getpgid; __sys_getpgid; _getpgrp; __sys_getpgrp; _getpid; __sys_getpid; _getppid; __sys_getppid; _getpriority; __sys_getpriority; _getresgid; __sys_getresgid; _getresuid; __sys_getresuid; _getrlimit; __sys_getrlimit; _getrusage; __sys_getrusage; _getsid; __sys_getsid; _getsockname; __sys_getsockname; _getsockopt; __sys_getsockopt; _gettimeofday; __sys_gettimeofday; _getuid; __sys_getuid; _ioctl; __sys_ioctl; _issetugid; __sys_issetugid; _jail; __sys_jail; _jail_attach; __sys_jail_attach; _kenv; __sys_kenv; _kevent; __sys_kevent; _kill; __sys_kill; _kldfind; __sys_kldfind; _kldfirstmod; __sys_kldfirstmod; _kldload; __sys_kldload; _kldnext; __sys_kldnext; _kldstat; __sys_kldstat; _kldsym; __sys_kldsym; _kldunload; __sys_kldunload; _kldunloadf; __sys_kldunloadf; _kmq_notify; __sys_kmq_notify; _kmq_open; __sys_kmq_open; _kmq_setattr; __sys_kmq_setattr; _kmq_timedreceive; __sys_kmq_timedreceive; _kmq_timedsend; __sys_kmq_timedsend; _kmq_unlink; __sys_kmq_unlink; _kqueue; __sys_kqueue; _ksem_close; __sys_ksem_close; _ksem_destroy; __sys_ksem_destroy; _ksem_getvalue; __sys_ksem_getvalue; _ksem_init; __sys_ksem_init; _ksem_open; __sys_ksem_open; _ksem_post; __sys_ksem_post; _ksem_timedwait; __sys_ksem_timedwait; _ksem_trywait; __sys_ksem_trywait; _ksem_unlink; __sys_ksem_unlink; _ksem_wait; __sys_ksem_wait; _ktrace; __sys_ktrace; _lchflags; __sys_lchflags; _lchmod; __sys_lchmod; _lchown; __sys_lchown; _lgetfh; __sys_lgetfh; _link; __sys_link; __sys_lio_listio; _listen; __sys_listen; _lutimes; __sys_lutimes; _mac_syscall; __sys_mac_syscall; _madvise; __sys_madvise; _mincore; __sys_mincore; _minherit; __sys_minherit; _mkdir; __sys_mkdir; _mkfifo; __sys_mkfifo; _mknod; __sys_mknod; _mlock; __sys_mlock; _mlockall; __sys_mlockall; _modfind; __sys_modfind; _modfnext; __sys_modfnext; _modnext; __sys_modnext; _modstat; __sys_modstat; _mount; __sys_mount; _mprotect; __sys_mprotect; _msgctl; __sys_msgctl; _msgget; __sys_msgget; _msgrcv; __sys_msgrcv; _msgsnd; __sys_msgsnd; _msgsys; __sys_msgsys; _msync; __sys_msync; _munlock; __sys_munlock; _munlockall; __sys_munlockall; _munmap; __sys_munmap; _nanosleep; __sys_nanosleep; _netbsd_lchown; __sys_netbsd_lchown; _netbsd_msync; __sys_netbsd_msync; _nfssvc; __sys_nfssvc; _nmount; __sys_nmount; _ntp_adjtime; __sys_ntp_adjtime; _ntp_gettime; __sys_ntp_gettime; _open; __sys_open; _openat; __sys_openat; _pathconf; __sys_pathconf; _pipe; __sys_pipe; _poll; __sys_poll; _ppoll; __sys_ppoll; _preadv; __sys_preadv; _procctl; __sys_procctl; _profil; __sys_profil; _pselect; __sys_pselect; _ptrace; __sys_ptrace; _pwritev; __sys_pwritev; _quotactl; __sys_quotactl; _read; __sys_read; _readlink; __sys_readlink; _readv; __sys_readv; _reboot; __sys_reboot; _recvfrom; __sys_recvfrom; _recvmsg; __sys_recvmsg; _rename; __sys_rename; _revoke; __sys_revoke; _rfork; __sys_rfork; _rmdir; __sys_rmdir; _rtprio; __sys_rtprio; _rtprio_thread; __sys_rtprio_thread; _sched_get_priority_max; __sys_sched_get_priority_max; _sched_get_priority_min; __sys_sched_get_priority_min; _sched_getparam; __sys_sched_getparam; _sched_getscheduler; __sys_sched_getscheduler; _sched_rr_get_interval; __sys_sched_rr_get_interval; _sched_setparam; __sys_sched_setparam; _sched_setscheduler; __sys_sched_setscheduler; _sched_yield; __sys_sched_yield; _select; __sys_select; _semget; __sys_semget; _semop; __sys_semop; _semsys; __sys_semsys; _sendfile; __sys_sendfile; _sendmsg; __sys_sendmsg; _sendto; __sys_sendto; _setaudit; __sys_setaudit; _setaudit_addr; __sys_setaudit_addr; _setauid; __sys_setauid; _setcontext; __sys_setcontext; _setegid; __sys_setegid; _seteuid; __sys_seteuid; _setgid; __sys_setgid; _setgroups; __sys_setgroups; _setitimer; __sys_setitimer; _setlogin; __sys_setlogin; _setpgid; __sys_setpgid; _setpriority; __sys_setpriority; _setregid; __sys_setregid; _setresgid; __sys_setresgid; _setresuid; __sys_setresuid; _setreuid; __sys_setreuid; _setrlimit; __sys_setrlimit; _setsid; __sys_setsid; _setsockopt; __sys_setsockopt; _settimeofday; __sys_settimeofday; _setuid; __sys_setuid; _shm_open; __sys_shm_open; _shm_unlink; __sys_shm_unlink; _shmat; __sys_shmat; _shmctl; __sys_shmctl; _shmdt; __sys_shmdt; _shmget; __sys_shmget; _shmsys; __sys_shmsys; _shutdown; __sys_shutdown; _sigaction; __sys_sigaction; _sigaltstack; __sys_sigaltstack; _sigpending; __sys_sigpending; _sigprocmask; __sys_sigprocmask; _sigqueue; __sys_sigqueue; _sigreturn; __sys_sigreturn; _sigsuspend; __sys_sigsuspend; _sigtimedwait; __sys_sigtimedwait; _sigwait; __sigwait; __sys_sigwait; _sigwaitinfo; __sys_sigwaitinfo; _socket; __sys_socket; _socketpair; __sys_socketpair; _statfs; __sys_statfs; _swapcontext; __sys_swapcontext; _swapoff; __sys_swapoff; _swapon; __sys_swapon; _symlink; __sys_symlink; _sync; __sys_sync; _sysarch; __sys_sysarch; _syscall; __sys_syscall; _thr_create; __sys_thr_create; _thr_exit; __sys_thr_exit; _thr_kill; __sys_thr_kill; _thr_kill2; __sys_thr_kill2; _thr_new; __sys_thr_new; _thr_self; __sys_thr_self; _thr_set_name; __sys_thr_set_name; _thr_suspend; __sys_thr_suspend; _thr_wake; __sys_thr_wake; _ktimer_create; __sys_ktimer_create; _ktimer_delete; __sys_ktimer_delete; _ktimer_getoverrun; __sys_ktimer_getoverrun; _ktimer_gettime; __sys_ktimer_gettime; _ktimer_settime; __sys_ktimer_settime; _umask; __sys_umask; _undelete; __sys_undelete; _unlink; __sys_unlink; _unmount; __sys_unmount; _utimes; __sys_utimes; _utrace; __sys_utrace; _uuidgen; __sys_uuidgen; _vadvise; __sys_vadvise; _wait4; __sys_wait4; _wait6; __sys_wait6; _write; __sys_write; _writev; __sys_writev; __set_error_selector; nlm_syscall; gssd_syscall; __libc_interposing_slot; __libc_sigwait; + _cpuset_getdomain; + __sys_cpuset_getdomain; + _cpuset_setdomain; + __sys_cpuset_setdomain; }; Index: user/jeff/numa/sys/compat/freebsd32/syscalls.master =================================================================== --- user/jeff/numa/sys/compat/freebsd32/syscalls.master (revision 326888) +++ user/jeff/numa/sys/compat/freebsd32/syscalls.master (revision 326889) @@ -1,1122 +1,1131 @@ $FreeBSD$ ; from: @(#)syscalls.master 8.2 (Berkeley) 1/13/94 ; from: src/sys/kern/syscalls.master 1.107 ; ; System call name/number master file. ; Processed to created init_sysent.c, syscalls.c and syscall.h. ; Columns: number audit type name alt{name,tag,rtyp}/comments ; number system call number, must be in order ; audit the audit event associated with the system call ; A value of AUE_NULL means no auditing, but it also means that ; there is no audit event for the call at this time. For the ; case where the event exists, but we don't want auditing, the ; event should be #defined to AUE_NULL in audit_kevents.h. ; type one of STD, OBSOL, UNIMPL, COMPAT, COMPAT4, COMPAT6, ; COMPAT7, COMPAT11, NODEF, NOARGS, NOPROTO, NOSTD ; The COMPAT* options may be combined with one or more NO* ; options separated by '|' with no spaces (e.g. COMPAT|NOARGS) ; name psuedo-prototype of syscall routine ; If one of the following alts is different, then all appear: ; altname name of system call if different ; alttag name of args struct tag if different from [o]`name'"_args" ; altrtyp return type if not int (bogus - syscalls always return int) ; for UNIMPL/OBSOL, name continues with comments ; types: ; STD always included ; COMPAT included on COMPAT #ifdef ; COMPAT4 included on COMPAT4 #ifdef (FreeBSD 4 compat) ; COMPAT6 included on COMPAT6 #ifdef (FreeBSD 6 compat) ; COMPAT7 included on COMPAT7 #ifdef (FreeBSD 7 compat) ; COMPAT10 included on COMPAT10 #ifdef (FreeBSD 10 compat) ; COMPAT11 included on COMPAT11 #ifdef (FreeBSD 11 compat) ; OBSOL obsolete, not included in system, only specifies name ; UNIMPL not implemented, placeholder only ; NOSTD implemented but as a lkm that can be statically ; compiled in; sysent entry will be filled with lkmressys ; so the SYSCALL_MODULE macro works ; NOARGS same as STD except do not create structure in sys/sysproto.h ; NODEF same as STD except only have the entry in the syscall table ; added. Meaning - do not create structure or function ; prototype in sys/sysproto.h ; NOPROTO same as STD except do not create structure or ; function prototype in sys/sysproto.h. Does add a ; definition to syscall.h besides adding a sysent. ; #ifdef's, etc. may be included, and are copied to the output files. #include #include #include #include #include #include #include #if !defined(PAD64_REQUIRED) && (defined(__powerpc__) || defined(__mips__)) #define PAD64_REQUIRED #endif ; Reserved/unimplemented system calls in the range 0-150 inclusive ; are reserved for use in future Berkeley releases. ; Additional system calls implemented in vendor and other ; redistributions should be placed in the reserved range at the end ; of the current calls. 0 AUE_NULL NOPROTO { int nosys(void); } syscall nosys_args int 1 AUE_EXIT NOPROTO { void sys_exit(int rval); } exit \ sys_exit_args void 2 AUE_FORK NOPROTO { int fork(void); } 3 AUE_READ NOPROTO { ssize_t read(int fd, void *buf, \ size_t nbyte); } 4 AUE_WRITE NOPROTO { ssize_t write(int fd, const void *buf, \ size_t nbyte); } 5 AUE_OPEN_RWTC NOPROTO { int open(char *path, int flags, \ int mode); } 6 AUE_CLOSE NOPROTO { int close(int fd); } 7 AUE_WAIT4 STD { int freebsd32_wait4(int pid, int *status, \ int options, struct rusage32 *rusage); } 8 AUE_CREAT OBSOL old creat 9 AUE_LINK NOPROTO { int link(char *path, char *link); } 10 AUE_UNLINK NOPROTO { int unlink(char *path); } 11 AUE_NULL OBSOL execv 12 AUE_CHDIR NOPROTO { int chdir(char *path); } 13 AUE_FCHDIR NOPROTO { int fchdir(int fd); } 14 AUE_MKNOD COMPAT11 { int freebsd32_mknod(char *path, \ int mode, int dev); } 15 AUE_CHMOD NOPROTO { int chmod(char *path, int mode); } 16 AUE_CHOWN NOPROTO { int chown(char *path, int uid, int gid); } 17 AUE_NULL NOPROTO { int obreak(char *nsize); } break \ obreak_args int 18 AUE_GETFSSTAT COMPAT4 { int freebsd32_getfsstat( \ struct statfs32 *buf, long bufsize, \ int mode); } 19 AUE_LSEEK COMPAT { int freebsd32_lseek(int fd, int offset, \ int whence); } 20 AUE_GETPID NOPROTO { pid_t getpid(void); } 21 AUE_MOUNT NOPROTO { int mount(char *type, char *path, \ int flags, caddr_t data); } 22 AUE_UMOUNT NOPROTO { int unmount(char *path, int flags); } 23 AUE_SETUID NOPROTO { int setuid(uid_t uid); } 24 AUE_GETUID NOPROTO { uid_t getuid(void); } 25 AUE_GETEUID NOPROTO { uid_t geteuid(void); } 26 AUE_PTRACE NOPROTO { int ptrace(int req, pid_t pid, \ caddr_t addr, int data); } 27 AUE_RECVMSG STD { int freebsd32_recvmsg(int s, struct msghdr32 *msg, \ int flags); } 28 AUE_SENDMSG STD { int freebsd32_sendmsg(int s, struct msghdr32 *msg, \ int flags); } 29 AUE_RECVFROM STD { int freebsd32_recvfrom(int s, uint32_t buf, \ uint32_t len, int flags, uint32_t from, \ uint32_t fromlenaddr); } 30 AUE_ACCEPT NOPROTO { int accept(int s, caddr_t name, \ int *anamelen); } 31 AUE_GETPEERNAME NOPROTO { int getpeername(int fdes, caddr_t asa, \ int *alen); } 32 AUE_GETSOCKNAME NOPROTO { int getsockname(int fdes, caddr_t asa, \ int *alen); } 33 AUE_ACCESS NOPROTO { int access(char *path, int amode); } 34 AUE_CHFLAGS NOPROTO { int chflags(const char *path, u_long flags); } 35 AUE_FCHFLAGS NOPROTO { int fchflags(int fd, u_long flags); } 36 AUE_SYNC NOPROTO { int sync(void); } 37 AUE_KILL NOPROTO { int kill(int pid, int signum); } 38 AUE_STAT COMPAT { int freebsd32_stat(char *path, \ struct ostat32 *ub); } 39 AUE_GETPPID NOPROTO { pid_t getppid(void); } 40 AUE_LSTAT COMPAT { int freebsd32_lstat(char *path, \ struct ostat *ub); } 41 AUE_DUP NOPROTO { int dup(u_int fd); } 42 AUE_PIPE COMPAT10 { int freebsd32_pipe(void); } 43 AUE_GETEGID NOPROTO { gid_t getegid(void); } 44 AUE_PROFILE NOPROTO { int profil(caddr_t samples, size_t size, \ size_t offset, u_int scale); } 45 AUE_KTRACE NOPROTO { int ktrace(const char *fname, int ops, \ int facs, int pid); } 46 AUE_SIGACTION COMPAT { int freebsd32_sigaction( int signum, \ struct osigaction32 *nsa, \ struct osigaction32 *osa); } 47 AUE_GETGID NOPROTO { gid_t getgid(void); } 48 AUE_SIGPROCMASK COMPAT { int freebsd32_sigprocmask(int how, \ osigset_t mask); } 49 AUE_GETLOGIN NOPROTO { int getlogin(char *namebuf, \ u_int namelen); } 50 AUE_SETLOGIN NOPROTO { int setlogin(char *namebuf); } 51 AUE_ACCT NOPROTO { int acct(char *path); } 52 AUE_SIGPENDING COMPAT { int freebsd32_sigpending(void); } 53 AUE_SIGALTSTACK STD { int freebsd32_sigaltstack( \ struct sigaltstack32 *ss, \ struct sigaltstack32 *oss); } 54 AUE_IOCTL STD { int freebsd32_ioctl(int fd, uint32_t com, \ struct md_ioctl32 *data); } 55 AUE_REBOOT NOPROTO { int reboot(int opt); } 56 AUE_REVOKE NOPROTO { int revoke(char *path); } 57 AUE_SYMLINK NOPROTO { int symlink(char *path, char *link); } 58 AUE_READLINK NOPROTO { ssize_t readlink(char *path, char *buf, \ size_t count); } 59 AUE_EXECVE STD { int freebsd32_execve(char *fname, \ uint32_t *argv, uint32_t *envv); } 60 AUE_UMASK NOPROTO { int umask(int newmask); } umask \ umask_args int 61 AUE_CHROOT NOPROTO { int chroot(char *path); } 62 AUE_FSTAT COMPAT { int freebsd32_fstat(int fd, \ struct ostat32 *ub); } 63 AUE_NULL OBSOL ogetkerninfo 64 AUE_NULL COMPAT { int freebsd32_getpagesize( \ int32_t dummy); } 65 AUE_MSYNC NOPROTO { int msync(void *addr, size_t len, \ int flags); } 66 AUE_VFORK NOPROTO { int vfork(void); } 67 AUE_NULL OBSOL vread 68 AUE_NULL OBSOL vwrite 69 AUE_SBRK NOPROTO { int sbrk(int incr); } 70 AUE_SSTK NOPROTO { int sstk(int incr); } 71 AUE_MMAP COMPAT|NOPROTO { int mmap(void *addr, int len, \ int prot, int flags, int fd, int pos); } 72 AUE_O_VADVISE NOPROTO { int ovadvise(int anom); } vadvise \ ovadvise_args int 73 AUE_MUNMAP NOPROTO { int munmap(void *addr, size_t len); } 74 AUE_MPROTECT STD { int freebsd32_mprotect(void *addr, \ size_t len, int prot); } 75 AUE_MADVISE NOPROTO { int madvise(void *addr, size_t len, \ int behav); } 76 AUE_NULL OBSOL vhangup 77 AUE_NULL OBSOL vlimit 78 AUE_MINCORE NOPROTO { int mincore(const void *addr, size_t len, \ char *vec); } 79 AUE_GETGROUPS NOPROTO { int getgroups(u_int gidsetsize, \ gid_t *gidset); } 80 AUE_SETGROUPS NOPROTO { int setgroups(u_int gidsetsize, \ gid_t *gidset); } 81 AUE_GETPGRP NOPROTO { int getpgrp(void); } 82 AUE_SETPGRP NOPROTO { int setpgid(int pid, int pgid); } 83 AUE_SETITIMER STD { int freebsd32_setitimer(u_int which, \ struct itimerval32 *itv, \ struct itimerval32 *oitv); } 84 AUE_NULL OBSOL owait ; XXX implement 85 AUE_SWAPON NOPROTO { int swapon(char *name); } 86 AUE_GETITIMER STD { int freebsd32_getitimer(u_int which, \ struct itimerval32 *itv); } 87 AUE_O_GETHOSTNAME OBSOL ogethostname 88 AUE_O_SETHOSTNAME OBSOL osethostname 89 AUE_GETDTABLESIZE NOPROTO { int getdtablesize(void); } 90 AUE_DUP2 NOPROTO { int dup2(u_int from, u_int to); } 91 AUE_NULL UNIMPL getdopt 92 AUE_FCNTL STD { int freebsd32_fcntl(int fd, int cmd, \ int arg); } 93 AUE_SELECT STD { int freebsd32_select(int nd, fd_set *in, \ fd_set *ou, fd_set *ex, \ struct timeval32 *tv); } 94 AUE_NULL UNIMPL setdopt 95 AUE_FSYNC NOPROTO { int fsync(int fd); } 96 AUE_SETPRIORITY NOPROTO { int setpriority(int which, int who, \ int prio); } 97 AUE_SOCKET NOPROTO { int socket(int domain, int type, \ int protocol); } 98 AUE_CONNECT NOPROTO { int connect(int s, caddr_t name, \ int namelen); } 99 AUE_NULL OBSOL oaccept 100 AUE_GETPRIORITY NOPROTO { int getpriority(int which, int who); } 101 AUE_NULL OBSOL osend 102 AUE_NULL OBSOL orecv 103 AUE_SIGRETURN COMPAT { int freebsd32_sigreturn( \ struct ia32_sigcontext3 *sigcntxp); } 104 AUE_BIND NOPROTO { int bind(int s, caddr_t name, \ int namelen); } 105 AUE_SETSOCKOPT NOPROTO { int setsockopt(int s, int level, \ int name, caddr_t val, int valsize); } 106 AUE_LISTEN NOPROTO { int listen(int s, int backlog); } 107 AUE_NULL OBSOL vtimes 108 AUE_O_SIGVEC COMPAT { int freebsd32_sigvec(int signum, \ struct sigvec32 *nsv, \ struct sigvec32 *osv); } 109 AUE_O_SIGBLOCK COMPAT { int freebsd32_sigblock(int mask); } 110 AUE_O_SIGSETMASK COMPAT { int freebsd32_sigsetmask( int mask); } 111 AUE_SIGSUSPEND COMPAT { int freebsd32_sigsuspend( int mask); } 112 AUE_O_SIGSTACK COMPAT { int freebsd32_sigstack( \ struct sigstack32 *nss, \ struct sigstack32 *oss); } 113 AUE_NULL OBSOL orecvmsg 114 AUE_NULL OBSOL osendmsg 115 AUE_NULL OBSOL vtrace 116 AUE_GETTIMEOFDAY STD { int freebsd32_gettimeofday( \ struct timeval32 *tp, \ struct timezone *tzp); } 117 AUE_GETRUSAGE STD { int freebsd32_getrusage(int who, \ struct rusage32 *rusage); } 118 AUE_GETSOCKOPT NOPROTO { int getsockopt(int s, int level, \ int name, caddr_t val, int *avalsize); } 119 AUE_NULL UNIMPL resuba (BSD/OS 2.x) 120 AUE_READV STD { int freebsd32_readv(int fd, \ struct iovec32 *iovp, u_int iovcnt); } 121 AUE_WRITEV STD { int freebsd32_writev(int fd, \ struct iovec32 *iovp, u_int iovcnt); } 122 AUE_SETTIMEOFDAY STD { int freebsd32_settimeofday( \ struct timeval32 *tv, \ struct timezone *tzp); } 123 AUE_FCHOWN NOPROTO { int fchown(int fd, int uid, int gid); } 124 AUE_FCHMOD NOPROTO { int fchmod(int fd, int mode); } 125 AUE_RECVFROM OBSOL orecvfrom 126 AUE_SETREUID NOPROTO { int setreuid(int ruid, int euid); } 127 AUE_SETREGID NOPROTO { int setregid(int rgid, int egid); } 128 AUE_RENAME NOPROTO { int rename(char *from, char *to); } 129 AUE_TRUNCATE COMPAT|NOPROTO { int truncate(char *path, \ int length); } 130 AUE_FTRUNCATE COMPAT|NOPROTO { int ftruncate(int fd, int length); } 131 AUE_FLOCK NOPROTO { int flock(int fd, int how); } 132 AUE_MKFIFO NOPROTO { int mkfifo(char *path, int mode); } 133 AUE_SENDTO NOPROTO { int sendto(int s, caddr_t buf, \ size_t len, int flags, caddr_t to, \ int tolen); } 134 AUE_SHUTDOWN NOPROTO { int shutdown(int s, int how); } 135 AUE_SOCKETPAIR NOPROTO { int socketpair(int domain, int type, \ int protocol, int *rsv); } 136 AUE_MKDIR NOPROTO { int mkdir(char *path, int mode); } 137 AUE_RMDIR NOPROTO { int rmdir(char *path); } 138 AUE_UTIMES STD { int freebsd32_utimes(char *path, \ struct timeval32 *tptr); } 139 AUE_NULL OBSOL 4.2 sigreturn 140 AUE_ADJTIME STD { int freebsd32_adjtime( \ struct timeval32 *delta, \ struct timeval32 *olddelta); } 141 AUE_GETPEERNAME OBSOL ogetpeername 142 AUE_SYSCTL OBSOL ogethostid 143 AUE_SYSCTL OBSOL sethostid 144 AUE_GETRLIMIT OBSOL getrlimit 145 AUE_SETRLIMIT OBSOL setrlimit 146 AUE_KILLPG OBSOL killpg 147 AUE_SETSID NOPROTO { int setsid(void); } 148 AUE_QUOTACTL NOPROTO { int quotactl(char *path, int cmd, int uid, \ caddr_t arg); } 149 AUE_O_QUOTA OBSOL oquota 150 AUE_GETSOCKNAME OBSOL ogetsockname ; Syscalls 151-180 inclusive are reserved for vendor-specific ; system calls. (This includes various calls added for compatibity ; with other Unix variants.) ; Some of these calls are now supported by BSD... 151 AUE_NULL UNIMPL sem_lock (BSD/OS 2.x) 152 AUE_NULL UNIMPL sem_wakeup (BSD/OS 2.x) 153 AUE_NULL UNIMPL asyncdaemon (BSD/OS 2.x) ; 154 is initialised by the NLM code, if present. 154 AUE_NULL UNIMPL nlm_syscall ; 155 is initialized by the NFS code, if present. ; XXX this is a problem!!! 155 AUE_NFS_SVC UNIMPL nfssvc 156 AUE_GETDIRENTRIES COMPAT { int freebsd32_getdirentries(int fd, \ char *buf, u_int count, uint32_t *basep); } 157 AUE_STATFS COMPAT4 { int freebsd32_statfs(char *path, \ struct statfs32 *buf); } 158 AUE_FSTATFS COMPAT4 { int freebsd32_fstatfs(int fd, \ struct statfs32 *buf); } 159 AUE_NULL UNIMPL nosys 160 AUE_LGETFH UNIMPL lgetfh 161 AUE_NFS_GETFH NOPROTO { int getfh(char *fname, \ struct fhandle *fhp); } 162 AUE_SYSCTL OBSOL getdomainname 163 AUE_SYSCTL OBSOL setdomainname 164 AUE_NULL OBSOL uname 165 AUE_SYSARCH STD { int freebsd32_sysarch(int op, char *parms); } 166 AUE_RTPRIO NOPROTO { int rtprio(int function, pid_t pid, \ struct rtprio *rtp); } 167 AUE_NULL UNIMPL nosys 168 AUE_NULL UNIMPL nosys 169 AUE_SEMSYS NOSTD { int freebsd32_semsys(int which, int a2, \ int a3, int a4, int a5); } 170 AUE_MSGSYS NOSTD { int freebsd32_msgsys(int which, int a2, \ int a3, int a4, int a5, int a6); } 171 AUE_SHMSYS NOSTD { int freebsd32_shmsys(uint32_t which, uint32_t a2, \ uint32_t a3, uint32_t a4); } 172 AUE_NULL UNIMPL nosys 173 AUE_PREAD COMPAT6 { ssize_t freebsd32_pread(int fd, void *buf, \ size_t nbyte, int pad, \ uint32_t offset1, uint32_t offset2); } 174 AUE_PWRITE COMPAT6 { ssize_t freebsd32_pwrite(int fd, \ const void *buf, size_t nbyte, int pad, \ uint32_t offset1, uint32_t offset2); } 175 AUE_NULL UNIMPL nosys 176 AUE_NTP_ADJTIME NOPROTO { int ntp_adjtime(struct timex *tp); } 177 AUE_NULL UNIMPL sfork (BSD/OS 2.x) 178 AUE_NULL UNIMPL getdescriptor (BSD/OS 2.x) 179 AUE_NULL UNIMPL setdescriptor (BSD/OS 2.x) 180 AUE_NULL UNIMPL nosys ; Syscalls 181-199 are used by/reserved for BSD 181 AUE_SETGID NOPROTO { int setgid(gid_t gid); } 182 AUE_SETEGID NOPROTO { int setegid(gid_t egid); } 183 AUE_SETEUID NOPROTO { int seteuid(uid_t euid); } 184 AUE_NULL UNIMPL lfs_bmapv 185 AUE_NULL UNIMPL lfs_markv 186 AUE_NULL UNIMPL lfs_segclean 187 AUE_NULL UNIMPL lfs_segwait 188 AUE_STAT COMPAT11 { int freebsd32_stat(char *path, \ struct freebsd11_stat32 *ub); } 189 AUE_FSTAT COMPAT11 { int freebsd32_fstat(int fd, \ struct freebsd11_stat32 *ub); } 190 AUE_LSTAT COMPAT11 { int freebsd32_lstat(char *path, \ struct freebsd11_stat32 *ub); } 191 AUE_PATHCONF NOPROTO { int pathconf(char *path, int name); } 192 AUE_FPATHCONF NOPROTO { int fpathconf(int fd, int name); } 193 AUE_NULL UNIMPL nosys 194 AUE_GETRLIMIT NOPROTO { int getrlimit(u_int which, \ struct rlimit *rlp); } getrlimit \ __getrlimit_args int 195 AUE_SETRLIMIT NOPROTO { int setrlimit(u_int which, \ struct rlimit *rlp); } setrlimit \ __setrlimit_args int 196 AUE_GETDIRENTRIES COMPAT11 { int freebsd32_getdirentries(int fd, \ char *buf, u_int count, int32_t *basep); } 197 AUE_MMAP COMPAT6 { caddr_t freebsd32_mmap(caddr_t addr, \ size_t len, int prot, int flags, int fd, \ int pad, uint32_t pos1, uint32_t pos2); } 198 AUE_NULL NOPROTO { int nosys(void); } __syscall \ __syscall_args int 199 AUE_LSEEK COMPAT6 { off_t freebsd32_lseek(int fd, int pad, \ uint32_t offset1, uint32_t offset2, \ int whence); } 200 AUE_TRUNCATE COMPAT6 { int freebsd32_truncate(char *path, \ int pad, uint32_t length1, \ uint32_t length2); } 201 AUE_FTRUNCATE COMPAT6 { int freebsd32_ftruncate(int fd, int pad, \ uint32_t length1, uint32_t length2); } 202 AUE_SYSCTL STD { int freebsd32_sysctl(int *name, \ u_int namelen, void *old, \ uint32_t *oldlenp, void *new, \ uint32_t newlen); } 203 AUE_MLOCK NOPROTO { int mlock(const void *addr, \ size_t len); } 204 AUE_MUNLOCK NOPROTO { int munlock(const void *addr, \ size_t len); } 205 AUE_UNDELETE NOPROTO { int undelete(char *path); } 206 AUE_FUTIMES STD { int freebsd32_futimes(int fd, \ struct timeval32 *tptr); } 207 AUE_GETPGID NOPROTO { int getpgid(pid_t pid); } 208 AUE_NULL UNIMPL newreboot (NetBSD) 209 AUE_POLL NOPROTO { int poll(struct pollfd *fds, u_int nfds, \ int timeout); } ; ; The following are reserved for loadable syscalls ; 210 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 211 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 212 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 213 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 214 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 215 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 216 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 217 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 218 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 219 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int ; ; The following were introduced with NetBSD/4.4Lite-2 ; They are initialized by their respective modules/sysinits ; XXX PROBLEM!! 220 AUE_SEMCTL COMPAT7|NOSTD { int freebsd32_semctl( \ int semid, int semnum, \ int cmd, union semun32 *arg); } 221 AUE_SEMGET NOSTD|NOPROTO { int semget(key_t key, int nsems, \ int semflg); } 222 AUE_SEMOP NOSTD|NOPROTO { int semop(int semid, \ struct sembuf *sops, u_int nsops); } 223 AUE_NULL UNIMPL semconfig 224 AUE_MSGCTL COMPAT7|NOSTD { int freebsd32_msgctl( \ int msqid, int cmd, \ struct msqid_ds32_old *buf); } 225 AUE_MSGGET NOSTD|NOPROTO { int msgget(key_t key, int msgflg); } 226 AUE_MSGSND NOSTD { int freebsd32_msgsnd(int msqid, void *msgp, \ size_t msgsz, int msgflg); } 227 AUE_MSGRCV NOSTD { int freebsd32_msgrcv(int msqid, void *msgp, \ size_t msgsz, long msgtyp, int msgflg); } 228 AUE_SHMAT NOSTD|NOPROTO { int shmat(int shmid, void *shmaddr, \ int shmflg); } 229 AUE_SHMCTL COMPAT7|NOSTD { int freebsd32_shmctl( \ int shmid, int cmd, \ struct shmid_ds32_old *buf); } 230 AUE_SHMDT NOSTD|NOPROTO { int shmdt(void *shmaddr); } 231 AUE_SHMGET NOSTD|NOPROTO { int shmget(key_t key, int size, \ int shmflg); } ; 232 AUE_NULL STD { int freebsd32_clock_gettime(clockid_t clock_id, \ struct timespec32 *tp); } 233 AUE_CLOCK_SETTIME STD { int freebsd32_clock_settime(clockid_t clock_id, \ const struct timespec32 *tp); } 234 AUE_NULL STD { int freebsd32_clock_getres(clockid_t clock_id, \ struct timespec32 *tp); } 235 AUE_NULL STD { int freebsd32_ktimer_create(\ clockid_t clock_id, \ struct sigevent32 *evp, int *timerid); } 236 AUE_NULL NOPROTO { int ktimer_delete(int timerid); } 237 AUE_NULL STD { int freebsd32_ktimer_settime(int timerid,\ int flags, \ const struct itimerspec32 *value, \ struct itimerspec32 *ovalue); } 238 AUE_NULL STD { int freebsd32_ktimer_gettime(int timerid,\ struct itimerspec32 *value); } 239 AUE_NULL NOPROTO { int ktimer_getoverrun(int timerid); } 240 AUE_NULL STD { int freebsd32_nanosleep( \ const struct timespec32 *rqtp, \ struct timespec32 *rmtp); } 241 AUE_NULL NOPROTO { int ffclock_getcounter(ffcounter *ffcount); } 242 AUE_NULL NOPROTO { int ffclock_setestimate( \ struct ffclock_estimate *cest); } 243 AUE_NULL NOPROTO { int ffclock_getestimate( \ struct ffclock_estimate *cest); } 244 AUE_NULL STD { int freebsd32_clock_nanosleep( \ clockid_t clock_id, int flags, \ const struct timespec32 *rqtp, \ struct timespec32 *rmtp); } 245 AUE_NULL UNIMPL nosys 246 AUE_NULL UNIMPL nosys 247 AUE_NULL STD { int freebsd32_clock_getcpuclockid2(\ uint32_t id1, uint32_t id2,\ int which, clockid_t *clock_id); } 248 AUE_NULL UNIMPL ntp_gettime 249 AUE_NULL UNIMPL nosys ; syscall numbers initially used in OpenBSD 250 AUE_MINHERIT NOPROTO { int minherit(void *addr, size_t len, \ int inherit); } 251 AUE_RFORK NOPROTO { int rfork(int flags); } 252 AUE_POLL OBSOL openbsd_poll 253 AUE_ISSETUGID NOPROTO { int issetugid(void); } 254 AUE_LCHOWN NOPROTO { int lchown(char *path, int uid, int gid); } 255 AUE_AIO_READ STD { int freebsd32_aio_read( \ struct aiocb32 *aiocbp); } 256 AUE_AIO_WRITE STD { int freebsd32_aio_write( \ struct aiocb32 *aiocbp); } 257 AUE_LIO_LISTIO STD { int freebsd32_lio_listio(int mode, \ struct aiocb32 * const *acb_list, \ int nent, struct sigevent32 *sig); } 258 AUE_NULL UNIMPL nosys 259 AUE_NULL UNIMPL nosys 260 AUE_NULL UNIMPL nosys 261 AUE_NULL UNIMPL nosys 262 AUE_NULL UNIMPL nosys 263 AUE_NULL UNIMPL nosys 264 AUE_NULL UNIMPL nosys 265 AUE_NULL UNIMPL nosys 266 AUE_NULL UNIMPL nosys 267 AUE_NULL UNIMPL nosys 268 AUE_NULL UNIMPL nosys 269 AUE_NULL UNIMPL nosys 270 AUE_NULL UNIMPL nosys 271 AUE_NULL UNIMPL nosys 272 AUE_O_GETDENTS COMPAT11 { int freebsd32_getdents(int fd, char *buf, \ int count); } 273 AUE_NULL UNIMPL nosys 274 AUE_LCHMOD NOPROTO { int lchmod(char *path, mode_t mode); } 275 AUE_LCHOWN NOPROTO { int lchown(char *path, uid_t uid, \ gid_t gid); } netbsd_lchown \ lchown_args int 276 AUE_LUTIMES STD { int freebsd32_lutimes(char *path, \ struct timeval32 *tptr); } 277 AUE_MSYNC NOPROTO { int msync(void *addr, size_t len, \ int flags); } netbsd_msync msync_args int 278 AUE_STAT COMPAT11|NOPROTO { int nstat(char *path, struct nstat *ub); } 279 AUE_FSTAT COMPAT11|NOPROTO { int nfstat(int fd, struct nstat *sb); } 280 AUE_LSTAT COMPAT11|NOPROTO { int nlstat(char *path, struct nstat *ub); } 281 AUE_NULL UNIMPL nosys 282 AUE_NULL UNIMPL nosys 283 AUE_NULL UNIMPL nosys 284 AUE_NULL UNIMPL nosys 285 AUE_NULL UNIMPL nosys 286 AUE_NULL UNIMPL nosys 287 AUE_NULL UNIMPL nosys 288 AUE_NULL UNIMPL nosys ; 289 and 290 from NetBSD (OpenBSD: 267 and 268) 289 AUE_PREADV STD { ssize_t freebsd32_preadv(int fd, \ struct iovec32 *iovp, \ u_int iovcnt, \ uint32_t offset1, uint32_t offset2); } 290 AUE_PWRITEV STD { ssize_t freebsd32_pwritev(int fd, \ struct iovec32 *iovp, \ u_int iovcnt, \ uint32_t offset1, uint32_t offset2); } 291 AUE_NULL UNIMPL nosys 292 AUE_NULL UNIMPL nosys 293 AUE_NULL UNIMPL nosys 294 AUE_NULL UNIMPL nosys 295 AUE_NULL UNIMPL nosys 296 AUE_NULL UNIMPL nosys ; XXX 297 is 300 in NetBSD 297 AUE_FHSTATFS COMPAT4 { int freebsd32_fhstatfs( \ const struct fhandle *u_fhp, \ struct statfs32 *buf); } 298 AUE_FHOPEN NOPROTO { int fhopen(const struct fhandle *u_fhp, \ int flags); } 299 AUE_FHSTAT COMPAT11 { int freebsd32_fhstat( \ const struct fhandle *u_fhp, \ struct freebsd11_stat32 *sb); } ; syscall numbers for FreeBSD 300 AUE_NULL NOPROTO { int modnext(int modid); } 301 AUE_NULL STD { int freebsd32_modstat(int modid, \ struct module_stat32* stat); } 302 AUE_NULL NOPROTO { int modfnext(int modid); } 303 AUE_NULL NOPROTO { int modfind(const char *name); } 304 AUE_MODLOAD NOPROTO { int kldload(const char *file); } 305 AUE_MODUNLOAD NOPROTO { int kldunload(int fileid); } 306 AUE_NULL NOPROTO { int kldfind(const char *file); } 307 AUE_NULL NOPROTO { int kldnext(int fileid); } 308 AUE_NULL STD { int freebsd32_kldstat(int fileid, \ struct kld32_file_stat* stat); } 309 AUE_NULL NOPROTO { int kldfirstmod(int fileid); } 310 AUE_GETSID NOPROTO { int getsid(pid_t pid); } 311 AUE_SETRESUID NOPROTO { int setresuid(uid_t ruid, uid_t euid, \ uid_t suid); } 312 AUE_SETRESGID NOPROTO { int setresgid(gid_t rgid, gid_t egid, \ gid_t sgid); } 313 AUE_NULL OBSOL signanosleep 314 AUE_AIO_RETURN STD { int freebsd32_aio_return( \ struct aiocb32 *aiocbp); } 315 AUE_AIO_SUSPEND STD { int freebsd32_aio_suspend( \ struct aiocb32 * const * aiocbp, int nent, \ const struct timespec32 *timeout); } 316 AUE_AIO_CANCEL NOPROTO { int aio_cancel(int fd, \ struct aiocb *aiocbp); } 317 AUE_AIO_ERROR STD { int freebsd32_aio_error( \ struct aiocb32 *aiocbp); } 318 AUE_AIO_READ COMPAT6 { int freebsd32_aio_read( \ struct oaiocb32 *aiocbp); } 319 AUE_AIO_WRITE COMPAT6 { int freebsd32_aio_write( \ struct oaiocb32 *aiocbp); } 320 AUE_LIO_LISTIO COMPAT6 { int freebsd32_lio_listio(int mode, \ struct oaiocb32 * const *acb_list, \ int nent, struct osigevent32 *sig); } 321 AUE_NULL NOPROTO { int yield(void); } 322 AUE_NULL OBSOL thr_sleep 323 AUE_NULL OBSOL thr_wakeup 324 AUE_MLOCKALL NOPROTO { int mlockall(int how); } 325 AUE_MUNLOCKALL NOPROTO { int munlockall(void); } 326 AUE_GETCWD NOPROTO { int __getcwd(char *buf, size_t buflen); } 327 AUE_NULL NOPROTO { int sched_setparam (pid_t pid, \ const struct sched_param *param); } 328 AUE_NULL NOPROTO { int sched_getparam (pid_t pid, \ struct sched_param *param); } 329 AUE_NULL NOPROTO { int sched_setscheduler (pid_t pid, \ int policy, \ const struct sched_param *param); } 330 AUE_NULL NOPROTO { int sched_getscheduler (pid_t pid); } 331 AUE_NULL NOPROTO { int sched_yield (void); } 332 AUE_NULL NOPROTO { int sched_get_priority_max (int policy); } 333 AUE_NULL NOPROTO { int sched_get_priority_min (int policy); } 334 AUE_NULL NOPROTO { int sched_rr_get_interval (pid_t pid, \ struct timespec *interval); } 335 AUE_NULL NOPROTO { int utrace(const void *addr, size_t len); } 336 AUE_SENDFILE COMPAT4 { int freebsd32_sendfile(int fd, int s, \ uint32_t offset1, uint32_t offset2, \ size_t nbytes, struct sf_hdtr32 *hdtr, \ off_t *sbytes, int flags); } 337 AUE_NULL NOPROTO { int kldsym(int fileid, int cmd, \ void *data); } 338 AUE_JAIL STD { int freebsd32_jail(struct jail32 *jail); } 339 AUE_NULL UNIMPL pioctl 340 AUE_SIGPROCMASK NOPROTO { int sigprocmask(int how, \ const sigset_t *set, sigset_t *oset); } 341 AUE_SIGSUSPEND NOPROTO { int sigsuspend(const sigset_t *sigmask); } 342 AUE_SIGACTION COMPAT4 { int freebsd32_sigaction(int sig, \ struct sigaction32 *act, \ struct sigaction32 *oact); } 343 AUE_SIGPENDING NOPROTO { int sigpending(sigset_t *set); } 344 AUE_SIGRETURN COMPAT4 { int freebsd32_sigreturn( \ const struct freebsd4_freebsd32_ucontext *sigcntxp); } 345 AUE_SIGWAIT STD { int freebsd32_sigtimedwait(const sigset_t *set, \ siginfo_t *info, \ const struct timespec *timeout); } 346 AUE_NULL STD { int freebsd32_sigwaitinfo(const sigset_t *set, \ siginfo_t *info); } 347 AUE_ACL_GET_FILE NOPROTO { int __acl_get_file(const char *path, \ acl_type_t type, struct acl *aclp); } 348 AUE_ACL_SET_FILE NOPROTO { int __acl_set_file(const char *path, \ acl_type_t type, struct acl *aclp); } 349 AUE_ACL_GET_FD NOPROTO { int __acl_get_fd(int filedes, \ acl_type_t type, struct acl *aclp); } 350 AUE_ACL_SET_FD NOPROTO { int __acl_set_fd(int filedes, \ acl_type_t type, struct acl *aclp); } 351 AUE_ACL_DELETE_FILE NOPROTO { int __acl_delete_file(const char *path, \ acl_type_t type); } 352 AUE_ACL_DELETE_FD NOPROTO { int __acl_delete_fd(int filedes, \ acl_type_t type); } 353 AUE_ACL_CHECK_FILE NOPROTO { int __acl_aclcheck_file(const char *path, \ acl_type_t type, struct acl *aclp); } 354 AUE_ACL_CHECK_FD NOPROTO { int __acl_aclcheck_fd(int filedes, \ acl_type_t type, struct acl *aclp); } 355 AUE_EXTATTRCTL NOPROTO { int extattrctl(const char *path, int cmd, \ const char *filename, int attrnamespace, \ const char *attrname); } 356 AUE_EXTATTR_SET_FILE NOPROTO { ssize_t extattr_set_file( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 357 AUE_EXTATTR_GET_FILE NOPROTO { ssize_t extattr_get_file( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 358 AUE_EXTATTR_DELETE_FILE NOPROTO { int extattr_delete_file( \ const char *path, int attrnamespace, \ const char *attrname); } 359 AUE_AIO_WAITCOMPLETE STD { int freebsd32_aio_waitcomplete( \ struct aiocb32 **aiocbp, \ struct timespec32 *timeout); } 360 AUE_GETRESUID NOPROTO { int getresuid(uid_t *ruid, uid_t *euid, \ uid_t *suid); } 361 AUE_GETRESGID NOPROTO { int getresgid(gid_t *rgid, gid_t *egid, \ gid_t *sgid); } 362 AUE_KQUEUE NOPROTO { int kqueue(void); } 363 AUE_KEVENT COMPAT11 { int freebsd32_kevent(int fd, \ const struct kevent32_freebsd11 * \ changelist, \ int nchanges, \ struct kevent32_freebsd11 *eventlist, \ int nevents, \ const struct timespec32 *timeout); } 364 AUE_NULL UNIMPL __cap_get_proc 365 AUE_NULL UNIMPL __cap_set_proc 366 AUE_NULL UNIMPL __cap_get_fd 367 AUE_NULL UNIMPL __cap_get_file 368 AUE_NULL UNIMPL __cap_set_fd 369 AUE_NULL UNIMPL __cap_set_file 370 AUE_NULL UNIMPL nosys 371 AUE_EXTATTR_SET_FD NOPROTO { ssize_t extattr_set_fd(int fd, \ int attrnamespace, const char *attrname, \ void *data, size_t nbytes); } 372 AUE_EXTATTR_GET_FD NOPROTO { ssize_t extattr_get_fd(int fd, \ int attrnamespace, const char *attrname, \ void *data, size_t nbytes); } 373 AUE_EXTATTR_DELETE_FD NOPROTO { int extattr_delete_fd(int fd, \ int attrnamespace, \ const char *attrname); } 374 AUE_SETUGID NOPROTO { int __setugid(int flag); } 375 AUE_NULL UNIMPL nfsclnt 376 AUE_EACCESS NOPROTO { int eaccess(char *path, int amode); } 377 AUE_NULL UNIMPL afs_syscall 378 AUE_NMOUNT STD { int freebsd32_nmount(struct iovec32 *iovp, \ unsigned int iovcnt, int flags); } 379 AUE_NULL UNIMPL kse_exit 380 AUE_NULL UNIMPL kse_wakeup 381 AUE_NULL UNIMPL kse_create 382 AUE_NULL UNIMPL kse_thr_interrupt 383 AUE_NULL UNIMPL kse_release 384 AUE_NULL UNIMPL __mac_get_proc 385 AUE_NULL UNIMPL __mac_set_proc 386 AUE_NULL UNIMPL __mac_get_fd 387 AUE_NULL UNIMPL __mac_get_file 388 AUE_NULL UNIMPL __mac_set_fd 389 AUE_NULL UNIMPL __mac_set_file 390 AUE_NULL NOPROTO { int kenv(int what, const char *name, \ char *value, int len); } 391 AUE_LCHFLAGS NOPROTO { int lchflags(const char *path, \ u_long flags); } 392 AUE_NULL NOPROTO { int uuidgen(struct uuid *store, \ int count); } 393 AUE_SENDFILE STD { int freebsd32_sendfile(int fd, int s, \ uint32_t offset1, uint32_t offset2, \ size_t nbytes, struct sf_hdtr32 *hdtr, \ off_t *sbytes, int flags); } 394 AUE_NULL UNIMPL mac_syscall 395 AUE_GETFSSTAT COMPAT11|NOPROTO { int getfsstat( \ struct freebsd11_statfs *buf, \ long bufsize, int mode); } 396 AUE_STATFS COMPAT11|NOPROTO { int statfs(char *path, \ struct statfs *buf); } 397 AUE_FSTATFS COMPAT11|NOPROTO { int fstatfs(int fd, \ struct freebsd11_statfs *buf); } 398 AUE_FHSTATFS COMPAT11|NOPROTO { int fhstatfs( \ const struct fhandle *u_fhp, \ struct freebsd11_statfs *buf); } 399 AUE_NULL UNIMPL nosys 400 AUE_SEMCLOSE NOSTD|NOPROTO { int ksem_close(semid_t id); } 401 AUE_SEMPOST NOSTD|NOPROTO { int ksem_post(semid_t id); } 402 AUE_SEMWAIT NOSTD|NOPROTO { int ksem_wait(semid_t id); } 403 AUE_SEMTRYWAIT NOSTD|NOPROTO { int ksem_trywait(semid_t id); } 404 AUE_SEMINIT NOSTD { int freebsd32_ksem_init(semid_t *idp, \ unsigned int value); } 405 AUE_SEMOPEN NOSTD { int freebsd32_ksem_open(semid_t *idp, \ const char *name, int oflag, \ mode_t mode, unsigned int value); } 406 AUE_SEMUNLINK NOSTD|NOPROTO { int ksem_unlink(const char *name); } 407 AUE_SEMGETVALUE NOSTD|NOPROTO { int ksem_getvalue(semid_t id, \ int *val); } 408 AUE_SEMDESTROY NOSTD|NOPROTO { int ksem_destroy(semid_t id); } 409 AUE_NULL UNIMPL __mac_get_pid 410 AUE_NULL UNIMPL __mac_get_link 411 AUE_NULL UNIMPL __mac_set_link 412 AUE_EXTATTR_SET_LINK NOPROTO { ssize_t extattr_set_link( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 413 AUE_EXTATTR_GET_LINK NOPROTO { ssize_t extattr_get_link( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 414 AUE_EXTATTR_DELETE_LINK NOPROTO { int extattr_delete_link( \ const char *path, int attrnamespace, \ const char *attrname); } 415 AUE_NULL UNIMPL __mac_execve 416 AUE_SIGACTION STD { int freebsd32_sigaction(int sig, \ struct sigaction32 *act, \ struct sigaction32 *oact); } 417 AUE_SIGRETURN STD { int freebsd32_sigreturn( \ const struct freebsd32_ucontext *sigcntxp); } 418 AUE_NULL UNIMPL __xstat 419 AUE_NULL UNIMPL __xfstat 420 AUE_NULL UNIMPL __xlstat 421 AUE_NULL STD { int freebsd32_getcontext( \ struct freebsd32_ucontext *ucp); } 422 AUE_NULL STD { int freebsd32_setcontext( \ const struct freebsd32_ucontext *ucp); } 423 AUE_NULL STD { int freebsd32_swapcontext( \ struct freebsd32_ucontext *oucp, \ const struct freebsd32_ucontext *ucp); } 424 AUE_SWAPOFF UNIMPL swapoff 425 AUE_ACL_GET_LINK NOPROTO { int __acl_get_link(const char *path, \ acl_type_t type, struct acl *aclp); } 426 AUE_ACL_SET_LINK NOPROTO { int __acl_set_link(const char *path, \ acl_type_t type, struct acl *aclp); } 427 AUE_ACL_DELETE_LINK NOPROTO { int __acl_delete_link(const char *path, \ acl_type_t type); } 428 AUE_ACL_CHECK_LINK NOPROTO { int __acl_aclcheck_link(const char *path, \ acl_type_t type, struct acl *aclp); } 429 AUE_SIGWAIT NOPROTO { int sigwait(const sigset_t *set, \ int *sig); } 430 AUE_THR_CREATE UNIMPL thr_create; 431 AUE_THR_EXIT NOPROTO { void thr_exit(long *state); } 432 AUE_NULL NOPROTO { int thr_self(long *id); } 433 AUE_THR_KILL NOPROTO { int thr_kill(long id, int sig); } 434 AUE_NULL UNIMPL nosys 435 AUE_NULL UNIMPL nosys 436 AUE_JAIL_ATTACH NOPROTO { int jail_attach(int jid); } 437 AUE_EXTATTR_LIST_FD NOPROTO { ssize_t extattr_list_fd(int fd, \ int attrnamespace, void *data, \ size_t nbytes); } 438 AUE_EXTATTR_LIST_FILE NOPROTO { ssize_t extattr_list_file( \ const char *path, int attrnamespace, \ void *data, size_t nbytes); } 439 AUE_EXTATTR_LIST_LINK NOPROTO { ssize_t extattr_list_link( \ const char *path, int attrnamespace, \ void *data, size_t nbytes); } 440 AUE_NULL UNIMPL kse_switchin 441 AUE_SEMWAIT NOSTD { int freebsd32_ksem_timedwait(semid_t id, \ const struct timespec32 *abstime); } 442 AUE_NULL STD { int freebsd32_thr_suspend( \ const struct timespec32 *timeout); } 443 AUE_NULL NOPROTO { int thr_wake(long id); } 444 AUE_MODUNLOAD NOPROTO { int kldunloadf(int fileid, int flags); } 445 AUE_AUDIT NOPROTO { int audit(const void *record, \ u_int length); } 446 AUE_AUDITON NOPROTO { int auditon(int cmd, void *data, \ u_int length); } 447 AUE_GETAUID NOPROTO { int getauid(uid_t *auid); } 448 AUE_SETAUID NOPROTO { int setauid(uid_t *auid); } 449 AUE_GETAUDIT NOPROTO { int getaudit(struct auditinfo *auditinfo); } 450 AUE_SETAUDIT NOPROTO { int setaudit(struct auditinfo *auditinfo); } 451 AUE_GETAUDIT_ADDR NOPROTO { int getaudit_addr( \ struct auditinfo_addr *auditinfo_addr, \ u_int length); } 452 AUE_SETAUDIT_ADDR NOPROTO { int setaudit_addr( \ struct auditinfo_addr *auditinfo_addr, \ u_int length); } 453 AUE_AUDITCTL NOPROTO { int auditctl(char *path); } 454 AUE_NULL STD { int freebsd32_umtx_op(void *obj, int op,\ u_long val, void *uaddr, \ void *uaddr2); } 455 AUE_THR_NEW STD { int freebsd32_thr_new( \ struct thr_param32 *param, \ int param_size); } 456 AUE_NULL STD { int freebsd32_sigqueue(pid_t pid, \ int signum, int value); } 457 AUE_MQ_OPEN NOSTD { int freebsd32_kmq_open( \ const char *path, int flags, mode_t mode, \ const struct mq_attr32 *attr); } 458 AUE_MQ_SETATTR NOSTD { int freebsd32_kmq_setattr(int mqd, \ const struct mq_attr32 *attr, \ struct mq_attr32 *oattr); } 459 AUE_MQ_TIMEDRECEIVE NOSTD { int freebsd32_kmq_timedreceive(int mqd, \ char *msg_ptr, size_t msg_len, \ unsigned *msg_prio, \ const struct timespec32 *abs_timeout); } 460 AUE_MQ_TIMEDSEND NOSTD { int freebsd32_kmq_timedsend(int mqd, \ const char *msg_ptr, size_t msg_len,\ unsigned msg_prio, \ const struct timespec32 *abs_timeout);} 461 AUE_MQ_NOTIFY NOSTD { int freebsd32_kmq_notify(int mqd, \ const struct sigevent32 *sigev); } 462 AUE_MQ_UNLINK NOPROTO|NOSTD { int kmq_unlink(const char *path); } 463 AUE_NULL NOPROTO { int abort2(const char *why, int nargs, void **args); } 464 AUE_NULL NOPROTO { int thr_set_name(long id, const char *name); } 465 AUE_AIO_FSYNC STD { int freebsd32_aio_fsync(int op, \ struct aiocb32 *aiocbp); } 466 AUE_RTPRIO NOPROTO { int rtprio_thread(int function, \ lwpid_t lwpid, struct rtprio *rtp); } 467 AUE_NULL UNIMPL nosys 468 AUE_NULL UNIMPL nosys 469 AUE_NULL UNIMPL __getpath_fromfd 470 AUE_NULL UNIMPL __getpath_fromaddr 471 AUE_SCTP_PEELOFF NOPROTO|NOSTD { int sctp_peeloff(int sd, uint32_t name); } 472 AUE_SCTP_GENERIC_SENDMSG NOPROTO|NOSTD { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ caddr_t to, __socklen_t tolen, \ struct sctp_sndrcvinfo *sinfo, int flags); } 473 AUE_SCTP_GENERIC_SENDMSG_IOV NOPROTO|NOSTD { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ caddr_t to, __socklen_t tolen, \ struct sctp_sndrcvinfo *sinfo, int flags); } 474 AUE_SCTP_GENERIC_RECVMSG NOPROTO|NOSTD { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ struct sockaddr * from, __socklen_t *fromlenaddr, \ struct sctp_sndrcvinfo *sinfo, int *msg_flags); } #ifdef PAD64_REQUIRED 475 AUE_PREAD STD { ssize_t freebsd32_pread(int fd, \ void *buf,size_t nbyte, \ int pad, \ uint32_t offset1, uint32_t offset2); } 476 AUE_PWRITE STD { ssize_t freebsd32_pwrite(int fd, \ const void *buf, size_t nbyte, \ int pad, \ uint32_t offset1, uint32_t offset2); } 477 AUE_MMAP STD { caddr_t freebsd32_mmap(caddr_t addr, \ size_t len, int prot, int flags, int fd, \ int pad, \ uint32_t pos1, uint32_t pos2); } 478 AUE_LSEEK STD { off_t freebsd32_lseek(int fd, \ int pad, \ uint32_t offset1, uint32_t offset2, \ int whence); } 479 AUE_TRUNCATE STD { int freebsd32_truncate(char *path, \ int pad, \ uint32_t length1, uint32_t length2); } 480 AUE_FTRUNCATE STD { int freebsd32_ftruncate(int fd, \ int pad, \ uint32_t length1, uint32_t length2); } #else 475 AUE_PREAD STD { ssize_t freebsd32_pread(int fd, \ void *buf,size_t nbyte, \ uint32_t offset1, uint32_t offset2); } 476 AUE_PWRITE STD { ssize_t freebsd32_pwrite(int fd, \ const void *buf, size_t nbyte, \ uint32_t offset1, uint32_t offset2); } 477 AUE_MMAP STD { caddr_t freebsd32_mmap(caddr_t addr, \ size_t len, int prot, int flags, int fd, \ uint32_t pos1, uint32_t pos2); } 478 AUE_LSEEK STD { off_t freebsd32_lseek(int fd, \ uint32_t offset1, uint32_t offset2, \ int whence); } 479 AUE_TRUNCATE STD { int freebsd32_truncate(char *path, \ uint32_t length1, uint32_t length2); } 480 AUE_FTRUNCATE STD { int freebsd32_ftruncate(int fd, \ uint32_t length1, uint32_t length2); } #endif 481 AUE_THR_KILL2 NOPROTO { int thr_kill2(pid_t pid, long id, int sig); } 482 AUE_SHMOPEN NOPROTO { int shm_open(const char *path, int flags, \ mode_t mode); } 483 AUE_SHMUNLINK NOPROTO { int shm_unlink(const char *path); } 484 AUE_NULL NOPROTO { int cpuset(cpusetid_t *setid); } #ifdef PAD64_REQUIRED 485 AUE_NULL STD { int freebsd32_cpuset_setid(cpuwhich_t which, \ int pad, \ uint32_t id1, uint32_t id2, \ cpusetid_t setid); } #else 485 AUE_NULL STD { int freebsd32_cpuset_setid(cpuwhich_t which, \ uint32_t id1, uint32_t id2, \ cpusetid_t setid); } #endif 486 AUE_NULL STD { int freebsd32_cpuset_getid(cpulevel_t level, \ cpuwhich_t which, \ uint32_t id1, uint32_t id2, \ cpusetid_t *setid); } 487 AUE_NULL STD { int freebsd32_cpuset_getaffinity( \ cpulevel_t level, cpuwhich_t which, \ uint32_t id1, uint32_t id2, \ size_t cpusetsize, \ cpuset_t *mask); } 488 AUE_NULL STD { int freebsd32_cpuset_setaffinity( \ cpulevel_t level, cpuwhich_t which, \ uint32_t id1, uint32_t id2, \ size_t cpusetsize, \ const cpuset_t *mask); } 489 AUE_FACCESSAT NOPROTO { int faccessat(int fd, char *path, int amode, \ int flag); } 490 AUE_FCHMODAT NOPROTO { int fchmodat(int fd, const char *path, \ mode_t mode, int flag); } 491 AUE_FCHOWNAT NOPROTO { int fchownat(int fd, char *path, uid_t uid, \ gid_t gid, int flag); } 492 AUE_FEXECVE STD { int freebsd32_fexecve(int fd, \ uint32_t *argv, uint32_t *envv); } 493 AUE_FSTATAT COMPAT11 { int freebsd32_fstatat(int fd, \ char *path, struct freebsd11_stat32 *buf, \ int flag); } 494 AUE_FUTIMESAT STD { int freebsd32_futimesat(int fd, char *path, \ struct timeval *times); } 495 AUE_LINKAT NOPROTO { int linkat(int fd1, char *path1, int fd2, \ char *path2, int flag); } 496 AUE_MKDIRAT NOPROTO { int mkdirat(int fd, char *path, \ mode_t mode); } 497 AUE_MKFIFOAT NOPROTO { int mkfifoat(int fd, char *path, \ mode_t mode); } 498 AUE_MKNODAT COMPAT11 { int freebsd32_mknodat(int fd, char *path, \ mode_t mode, uint32_t dev); } 499 AUE_OPENAT_RWTC NOPROTO { int openat(int fd, char *path, int flag, \ mode_t mode); } 500 AUE_READLINKAT NOPROTO { int readlinkat(int fd, char *path, char *buf, \ size_t bufsize); } 501 AUE_RENAMEAT NOPROTO { int renameat(int oldfd, char *old, int newfd, \ const char *new); } 502 AUE_SYMLINKAT NOPROTO { int symlinkat(char *path1, int fd, \ char *path2); } 503 AUE_UNLINKAT NOPROTO { int unlinkat(int fd, char *path, \ int flag); } 504 AUE_POSIX_OPENPT NOPROTO { int posix_openpt(int flags); } ; 505 is initialised by the kgssapi code, if present. 505 AUE_NULL UNIMPL gssd_syscall 506 AUE_JAIL_GET STD { int freebsd32_jail_get(struct iovec32 *iovp, \ unsigned int iovcnt, int flags); } 507 AUE_JAIL_SET STD { int freebsd32_jail_set(struct iovec32 *iovp, \ unsigned int iovcnt, int flags); } 508 AUE_JAIL_REMOVE NOPROTO { int jail_remove(int jid); } 509 AUE_CLOSEFROM NOPROTO { int closefrom(int lowfd); } 510 AUE_SEMCTL NOSTD { int freebsd32_semctl(int semid, int semnum, \ int cmd, union semun32 *arg); } 511 AUE_MSGCTL NOSTD { int freebsd32_msgctl(int msqid, int cmd, \ struct msqid_ds32 *buf); } 512 AUE_SHMCTL NOSTD { int freebsd32_shmctl(int shmid, int cmd, \ struct shmid_ds32 *buf); } 513 AUE_LPATHCONF NOPROTO { int lpathconf(char *path, int name); } 514 AUE_NULL OBSOL cap_new 515 AUE_CAP_RIGHTS_GET NOPROTO { int __cap_rights_get(int version, \ int fd, cap_rights_t *rightsp); } 516 AUE_CAP_ENTER NOPROTO { int cap_enter(void); } 517 AUE_CAP_GETMODE NOPROTO { int cap_getmode(u_int *modep); } 518 AUE_PDFORK NOPROTO { int pdfork(int *fdp, int flags); } 519 AUE_PDKILL NOPROTO { int pdkill(int fd, int signum); } 520 AUE_PDGETPID NOPROTO { int pdgetpid(int fd, pid_t *pidp); } 521 AUE_PDWAIT UNIMPL pdwait4 522 AUE_SELECT STD { int freebsd32_pselect(int nd, fd_set *in, \ fd_set *ou, fd_set *ex, \ const struct timespec32 *ts, \ const sigset_t *sm); } 523 AUE_GETLOGINCLASS NOPROTO { int getloginclass(char *namebuf, \ size_t namelen); } 524 AUE_SETLOGINCLASS NOPROTO { int setloginclass(const char *namebuf); } 525 AUE_NULL NOPROTO { int rctl_get_racct(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 526 AUE_NULL NOPROTO { int rctl_get_rules(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 527 AUE_NULL NOPROTO { int rctl_get_limits(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 528 AUE_NULL NOPROTO { int rctl_add_rule(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 529 AUE_NULL NOPROTO { int rctl_remove_rule(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } #ifdef PAD64_REQUIRED 530 AUE_POSIX_FALLOCATE STD { int freebsd32_posix_fallocate(int fd, \ int pad, \ uint32_t offset1, uint32_t offset2,\ uint32_t len1, uint32_t len2); } 531 AUE_POSIX_FADVISE STD { int freebsd32_posix_fadvise(int fd, \ int pad, \ uint32_t offset1, uint32_t offset2,\ uint32_t len1, uint32_t len2, \ int advice); } 532 AUE_WAIT6 STD { int freebsd32_wait6(int idtype, int pad, \ uint32_t id1, uint32_t id2, \ int *status, int options, \ struct wrusage32 *wrusage, \ siginfo_t *info); } #else 530 AUE_POSIX_FALLOCATE STD { int freebsd32_posix_fallocate(int fd,\ uint32_t offset1, uint32_t offset2,\ uint32_t len1, uint32_t len2); } 531 AUE_POSIX_FADVISE STD { int freebsd32_posix_fadvise(int fd, \ uint32_t offset1, uint32_t offset2,\ uint32_t len1, uint32_t len2, \ int advice); } 532 AUE_WAIT6 STD { int freebsd32_wait6(int idtype, \ uint32_t id1, uint32_t id2, \ int *status, int options, \ struct wrusage32 *wrusage, \ siginfo_t *info); } #endif 533 AUE_CAP_RIGHTS_LIMIT NOPROTO { \ int cap_rights_limit(int fd, \ cap_rights_t *rightsp); } 534 AUE_CAP_IOCTLS_LIMIT STD { \ int freebsd32_cap_ioctls_limit(int fd, \ const uint32_t *cmds, size_t ncmds); } 535 AUE_CAP_IOCTLS_GET STD { \ ssize_t freebsd32_cap_ioctls_get(int fd, \ uint32_t *cmds, size_t maxcmds); } 536 AUE_CAP_FCNTLS_LIMIT NOPROTO { int cap_fcntls_limit(int fd, \ uint32_t fcntlrights); } 537 AUE_CAP_FCNTLS_GET NOPROTO { int cap_fcntls_get(int fd, \ uint32_t *fcntlrightsp); } 538 AUE_BINDAT NOPROTO { int bindat(int fd, int s, caddr_t name, \ int namelen); } 539 AUE_CONNECTAT NOPROTO { int connectat(int fd, int s, caddr_t name, \ int namelen); } 540 AUE_CHFLAGSAT NOPROTO { int chflagsat(int fd, const char *path, \ u_long flags, int atflag); } 541 AUE_ACCEPT NOPROTO { int accept4(int s, \ struct sockaddr * __restrict name, \ __socklen_t * __restrict anamelen, \ int flags); } 542 AUE_PIPE NOPROTO { int pipe2(int *fildes, int flags); } 543 AUE_AIO_MLOCK STD { int freebsd32_aio_mlock( \ struct aiocb32 *aiocbp); } #ifdef PAD64_REQUIRED 544 AUE_PROCCTL STD { int freebsd32_procctl(int idtype, int pad, \ uint32_t id1, uint32_t id2, int com, \ void *data); } #else 544 AUE_PROCCTL STD { int freebsd32_procctl(int idtype, \ uint32_t id1, uint32_t id2, int com, \ void *data); } #endif 545 AUE_POLL STD { int freebsd32_ppoll(struct pollfd *fds, \ u_int nfds, const struct timespec32 *ts, \ const sigset_t *set); } 546 AUE_FUTIMES STD { int freebsd32_futimens(int fd, \ struct timespec *times); } 547 AUE_FUTIMESAT STD { int freebsd32_utimensat(int fd, \ char *path, \ struct timespec *times, int flag); } 548 AUE_NULL NOPROTO { int numa_getaffinity(cpuwhich_t which, \ id_t id, \ struct vm_domain_policy *policy); } 549 AUE_NULL NOPROTO { int numa_setaffinity(cpuwhich_t which, \ id_t id, \ const struct vm_domain_policy *policy); } 550 AUE_FSYNC NOPROTO { int fdatasync(int fd); } 551 AUE_FSTAT STD { int freebsd32_fstat(int fd, \ struct stat32 *ub); } 552 AUE_FSTATAT STD { int freebsd32_fstatat(int fd, \ char *path, struct stat32 *buf, \ int flag); } 553 AUE_FHSTAT STD { int freebsd32_fhstat( \ const struct fhandle *u_fhp, \ struct stat32 *sb); } 554 AUE_GETDIRENTRIES STD { ssize_t freebsd32_getdirentries( \ int fd, char *buf, size_t count, \ int32_t *basep); } 555 AUE_STATFS NOPROTO { int statfs(char *path, \ struct statfs32 *buf); } 556 AUE_FSTATFS NOPROTO { int fstatfs(int fd, struct statfs32 *buf); } 557 AUE_GETFSSTAT NOPROTO { int getfsstat(struct statfs32 *buf, \ long bufsize, int mode); } 558 AUE_FHSTATFS NOPROTO { int fhstatfs(const struct fhandle *u_fhp, \ struct statfs32 *buf); } 559 AUE_MKNODAT NOPROTO { int mknodat(int fd, char *path, mode_t mode, \ dev_t dev); } 560 AUE_KEVENT STD { int freebsd32_kevent(int fd, \ const struct kevent32 *changelist, \ int nchanges, \ struct kevent32 *eventlist, \ int nevents, \ const struct timespec32 *timeout); } +561 AUE_NULL STD { int cpuset_getdomain(cpulevel_t level, \ + cpuwhich_t which, id_t id, \ + size_t domainsetsize, domainset_t *mask, \ + int *policy); } +562 AUE_NULL STD { int cpuset_setdomain(cpulevel_t level, \ + cpuwhich_t which, id_t id, \ + size_t domainsetsize, domainset_t *mask, \ + int policy); } + ; vim: syntax=off Index: user/jeff/numa/sys/conf/files =================================================================== --- user/jeff/numa/sys/conf/files (revision 326888) +++ user/jeff/numa/sys/conf/files (revision 326889) @@ -1,4851 +1,4851 @@ # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # acpi_quirks.h optional acpi \ dependency "$S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ compile-with "${AWK} -f $S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ no-obj no-implicit-rule before-depend \ clean "acpi_quirks.h" bhnd_nvram_map.h optional bhnd \ dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \ compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -h" \ no-obj no-implicit-rule before-depend \ clean "bhnd_nvram_map.h" bhnd_nvram_map_data.h optional bhnd \ dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \ compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -d" \ no-obj no-implicit-rule before-depend \ clean "bhnd_nvram_map_data.h" # # The 'fdt_dtb_file' target covers an actual DTB file name, which is derived # from the specified source (DTS) file: .dts -> .dtb # fdt_dtb_file optional fdt fdt_dtb_static \ compile-with "sh -c 'MACHINE=${MACHINE} $S/tools/fdt/make_dtb.sh $S ${FDT_DTS_FILE} ${.CURDIR}'" \ no-obj no-implicit-rule before-depend \ clean "${FDT_DTS_FILE:R}.dtb" fdt_static_dtb.h optional fdt fdt_dtb_static \ compile-with "sh -c 'MACHINE=${MACHINE} $S/tools/fdt/make_dtbh.sh ${FDT_DTS_FILE} ${.CURDIR}'" \ dependency "fdt_dtb_file" \ no-obj no-implicit-rule before-depend \ clean "fdt_static_dtb.h" feeder_eq_gen.h optional sound \ dependency "$S/tools/sound/feeder_eq_mkfilter.awk" \ compile-with "${AWK} -f $S/tools/sound/feeder_eq_mkfilter.awk -- ${FEEDER_EQ_PRESETS} > feeder_eq_gen.h" \ no-obj no-implicit-rule before-depend \ clean "feeder_eq_gen.h" feeder_rate_gen.h optional sound \ dependency "$S/tools/sound/feeder_rate_mkfilter.awk" \ compile-with "${AWK} -f $S/tools/sound/feeder_rate_mkfilter.awk -- ${FEEDER_RATE_PRESETS} > feeder_rate_gen.h" \ no-obj no-implicit-rule before-depend \ clean "feeder_rate_gen.h" snd_fxdiv_gen.h optional sound \ dependency "$S/tools/sound/snd_fxdiv_gen.awk" \ compile-with "${AWK} -f $S/tools/sound/snd_fxdiv_gen.awk -- > snd_fxdiv_gen.h" \ no-obj no-implicit-rule before-depend \ clean "snd_fxdiv_gen.h" miidevs.h optional miibus | mii \ dependency "$S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ compile-with "${AWK} -f $S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ no-obj no-implicit-rule before-depend \ clean "miidevs.h" pccarddevs.h standard \ dependency "$S/tools/pccarddevs2h.awk $S/dev/pccard/pccarddevs" \ compile-with "${AWK} -f $S/tools/pccarddevs2h.awk $S/dev/pccard/pccarddevs" \ no-obj no-implicit-rule before-depend \ clean "pccarddevs.h" kbdmuxmap.h optional kbdmux_dflt_keymap \ compile-with "kbdcontrol -P ${S:S/sys$/share/}/vt/keymaps -P ${S:S/sys$/share/}/syscons/keymaps -L ${KBDMUX_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > kbdmuxmap.h" \ no-obj no-implicit-rule before-depend \ clean "kbdmuxmap.h" teken_state.h optional sc | vt \ dependency "$S/teken/gensequences $S/teken/sequences" \ compile-with "${AWK} -f $S/teken/gensequences $S/teken/sequences > teken_state.h" \ no-obj no-implicit-rule before-depend \ clean "teken_state.h" usbdevs.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -h" \ no-obj no-implicit-rule before-depend \ clean "usbdevs.h" usbdevs_data.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -d" \ no-obj no-implicit-rule before-depend \ clean "usbdevs_data.h" cam/cam.c optional scbus cam/cam_compat.c optional scbus cam/cam_iosched.c optional scbus cam/cam_periph.c optional scbus cam/cam_queue.c optional scbus cam/cam_sim.c optional scbus cam/cam_xpt.c optional scbus cam/ata/ata_all.c optional scbus cam/ata/ata_xpt.c optional scbus cam/ata/ata_pmp.c optional scbus cam/nvme/nvme_all.c optional scbus nvme cam/nvme/nvme_da.c optional scbus nvme da cam/nvme/nvme_xpt.c optional scbus nvme cam/scsi/scsi_xpt.c optional scbus cam/scsi/scsi_all.c optional scbus cam/scsi/scsi_cd.c optional cd cam/scsi/scsi_ch.c optional ch cam/ata/ata_da.c optional ada | da cam/ctl/ctl.c optional ctl cam/ctl/ctl_backend.c optional ctl cam/ctl/ctl_backend_block.c optional ctl cam/ctl/ctl_backend_ramdisk.c optional ctl cam/ctl/ctl_cmd_table.c optional ctl cam/ctl/ctl_frontend.c optional ctl cam/ctl/ctl_frontend_cam_sim.c optional ctl cam/ctl/ctl_frontend_ioctl.c optional ctl cam/ctl/ctl_frontend_iscsi.c optional ctl cfiscsi cam/ctl/ctl_ha.c optional ctl cam/ctl/ctl_scsi_all.c optional ctl cam/ctl/ctl_tpc.c optional ctl cam/ctl/ctl_tpc_local.c optional ctl cam/ctl/ctl_error.c optional ctl cam/ctl/ctl_util.c optional ctl cam/ctl/scsi_ctl.c optional ctl cam/mmc/mmc_xpt.c optional scbus mmccam cam/mmc/mmc_da.c optional scbus mmccam da cam/scsi/scsi_da.c optional da cam/scsi/scsi_low.c optional ncv | nsp | stg cam/scsi/scsi_pass.c optional pass cam/scsi/scsi_pt.c optional pt cam/scsi/scsi_sa.c optional sa cam/scsi/scsi_enc.c optional ses cam/scsi/scsi_enc_ses.c optional ses cam/scsi/scsi_enc_safte.c optional ses cam/scsi/scsi_sg.c optional sg cam/scsi/scsi_targ_bh.c optional targbh cam/scsi/scsi_target.c optional targ cam/scsi/smp_all.c optional scbus # shared between zfs and dtrace cddl/compat/opensolaris/kern/opensolaris.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_cmn_err.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_kmem.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_misc.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_proc.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_sunddi.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_taskq.c optional zfs | dtrace compile-with "${CDDL_C}" # zfs specific cddl/compat/opensolaris/kern/opensolaris_acl.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_dtrace.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_kobj.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_kstat.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_lookup.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_policy.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_string.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_sysevent.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_uio.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_vfs.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_vm.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_zone.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/acl/acl_common.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/avl/avl.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/nvpair/opensolaris_fnvpair.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/nvpair/opensolaris_nvpair.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/nvpair/opensolaris_nvpair_alloc_fixed.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/unicode/u8_textprep.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfeature_common.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_comutil.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_deleg.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_fletcher.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_ioctl_compat.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_namecheck.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_prop.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zpool_prop.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zprop_common.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/vnode.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/abd.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/blkptr.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bplist.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bptree.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bqueue.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/ddt.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/ddt_zap.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_diff.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_zfetch.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c optional zfs compile-with "${ZFS_C}" \ warning "kernel contains CDDL licensed ZFS filesystem" cddl/contrib/opensolaris/uts/common/fs/zfs/dnode_sync.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_bookmark.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deadlist.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deleg.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_destroy.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_pool.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_prop.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_userhold.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_synctask.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/gzip.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lz4.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lzjb.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/multilist.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/range_tree.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/refcount.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/rrwlock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/sha256.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/skein_zfs.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_config.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_errlog.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_history.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/space_map.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/space_reftree.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/trim_map.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/txg.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/uberblock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/unique.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_cache.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_file.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_label.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_mirror.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_missing.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_queue.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_raidz.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_root.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zap.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zap_leaf.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zap_micro.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zcp.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zcp_get.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zcp_global.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zcp_iter.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zcp_synctask.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfeature.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_acl.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_byteswap.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_debug.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_dir.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fm.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fuid.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_log.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_onexit.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_replay.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_rlock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_sa.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_znode.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zil.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio_checksum.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio_compress.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio_inject.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zle.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zrlock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zvol.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/callb.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/fm.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/list.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/nvpair_alloc_system.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/adler32.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/deflate.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/inffast.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/inflate.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/inftrees.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/opensolaris_crc32.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/trees.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/zmod.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/zmod_subr.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/zutil.c optional zfs compile-with "${ZFS_C}" # zfs lua support cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lapi.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lauxlib.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lbaselib.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lbitlib.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lcode.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lcompat.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lcorolib.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lctype.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/ldebug.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/ldo.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/ldump.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lfunc.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lgc.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/llex.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lmem.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lobject.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lopcodes.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lparser.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lstate.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lstring.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lstrlib.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/ltable.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/ltablib.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/ltm.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lundump.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lvm.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lzio.c optional zfs compile-with "${ZFS_C}" # dtrace specific cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c optional dtrace compile-with "${DTRACE_C}" \ warning "kernel contains CDDL licensed DTRACE" cddl/contrib/opensolaris/uts/common/dtrace/dtrace_xoroshiro128_plus.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/dtmalloc/dtmalloc.c optional dtmalloc | dtraceall compile-with "${CDDL_C}" cddl/dev/profile/profile.c optional dtrace_profile | dtraceall compile-with "${CDDL_C}" cddl/dev/sdt/sdt.c optional dtrace_sdt | dtraceall compile-with "${CDDL_C}" cddl/dev/fbt/fbt.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" cddl/dev/systrace/systrace.c optional dtrace_systrace | dtraceall compile-with "${CDDL_C}" cddl/dev/prototype.c optional dtrace_prototype | dtraceall compile-with "${CDDL_C}" fs/nfsclient/nfs_clkdtrace.c optional dtnfscl nfscl | dtraceall nfscl compile-with "${CDDL_C}" compat/cloudabi/cloudabi_clock.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_errno.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_fd.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_file.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_futex.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_mem.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_proc.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_random.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_sock.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_thread.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_vdso.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi32/cloudabi32_fd.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_module.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_poll.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_sock.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_syscalls.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_sysent.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_thread.c optional compat_cloudabi32 compat/cloudabi64/cloudabi64_fd.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_module.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_poll.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_sock.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_syscalls.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_sysent.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_thread.c optional compat_cloudabi64 compat/freebsd32/freebsd32_capability.c optional compat_freebsd32 compat/freebsd32/freebsd32_ioctl.c optional compat_freebsd32 compat/freebsd32/freebsd32_misc.c optional compat_freebsd32 compat/freebsd32/freebsd32_syscalls.c optional compat_freebsd32 compat/freebsd32/freebsd32_sysent.c optional compat_freebsd32 contrib/ck/src/ck_array.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_centralized.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_combining.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_dissemination.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_mcs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_tournament.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_epoch.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_hp.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_hs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_ht.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_rhs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/dev/acpica/common/ahids.c optional acpi acpi_debug contrib/dev/acpica/common/ahuuids.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbcmds.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbconvert.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbdisply.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbexec.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbhistry.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbinput.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbmethod.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbnames.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbobject.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbstats.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbtest.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbutils.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbxface.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmbuffer.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmcstyle.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmdeferred.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmnames.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmopcode.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrc.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcl.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcl2.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcs.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmutils.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmwalk.c optional acpi acpi_debug contrib/dev/acpica/components/dispatcher/dsargs.c optional acpi contrib/dev/acpica/components/dispatcher/dscontrol.c optional acpi contrib/dev/acpica/components/dispatcher/dsdebug.c optional acpi contrib/dev/acpica/components/dispatcher/dsfield.c optional acpi contrib/dev/acpica/components/dispatcher/dsinit.c optional acpi contrib/dev/acpica/components/dispatcher/dsmethod.c optional acpi contrib/dev/acpica/components/dispatcher/dsmthdat.c optional acpi contrib/dev/acpica/components/dispatcher/dsobject.c optional acpi contrib/dev/acpica/components/dispatcher/dsopcode.c optional acpi contrib/dev/acpica/components/dispatcher/dspkginit.c optional acpi contrib/dev/acpica/components/dispatcher/dsutils.c optional acpi contrib/dev/acpica/components/dispatcher/dswexec.c optional acpi contrib/dev/acpica/components/dispatcher/dswload.c optional acpi contrib/dev/acpica/components/dispatcher/dswload2.c optional acpi contrib/dev/acpica/components/dispatcher/dswscope.c optional acpi contrib/dev/acpica/components/dispatcher/dswstate.c optional acpi contrib/dev/acpica/components/events/evevent.c optional acpi contrib/dev/acpica/components/events/evglock.c optional acpi contrib/dev/acpica/components/events/evgpe.c optional acpi contrib/dev/acpica/components/events/evgpeblk.c optional acpi contrib/dev/acpica/components/events/evgpeinit.c optional acpi contrib/dev/acpica/components/events/evgpeutil.c optional acpi contrib/dev/acpica/components/events/evhandler.c optional acpi contrib/dev/acpica/components/events/evmisc.c optional acpi contrib/dev/acpica/components/events/evregion.c optional acpi contrib/dev/acpica/components/events/evrgnini.c optional acpi contrib/dev/acpica/components/events/evsci.c optional acpi contrib/dev/acpica/components/events/evxface.c optional acpi contrib/dev/acpica/components/events/evxfevnt.c optional acpi contrib/dev/acpica/components/events/evxfgpe.c optional acpi contrib/dev/acpica/components/events/evxfregn.c optional acpi contrib/dev/acpica/components/executer/exconcat.c optional acpi contrib/dev/acpica/components/executer/exconfig.c optional acpi contrib/dev/acpica/components/executer/exconvrt.c optional acpi contrib/dev/acpica/components/executer/excreate.c optional acpi contrib/dev/acpica/components/executer/exdebug.c optional acpi contrib/dev/acpica/components/executer/exdump.c optional acpi contrib/dev/acpica/components/executer/exfield.c optional acpi contrib/dev/acpica/components/executer/exfldio.c optional acpi contrib/dev/acpica/components/executer/exmisc.c optional acpi contrib/dev/acpica/components/executer/exmutex.c optional acpi contrib/dev/acpica/components/executer/exnames.c optional acpi contrib/dev/acpica/components/executer/exoparg1.c optional acpi contrib/dev/acpica/components/executer/exoparg2.c optional acpi contrib/dev/acpica/components/executer/exoparg3.c optional acpi contrib/dev/acpica/components/executer/exoparg6.c optional acpi contrib/dev/acpica/components/executer/exprep.c optional acpi contrib/dev/acpica/components/executer/exregion.c optional acpi contrib/dev/acpica/components/executer/exresnte.c optional acpi contrib/dev/acpica/components/executer/exresolv.c optional acpi contrib/dev/acpica/components/executer/exresop.c optional acpi contrib/dev/acpica/components/executer/exstore.c optional acpi contrib/dev/acpica/components/executer/exstoren.c optional acpi contrib/dev/acpica/components/executer/exstorob.c optional acpi contrib/dev/acpica/components/executer/exsystem.c optional acpi contrib/dev/acpica/components/executer/extrace.c optional acpi contrib/dev/acpica/components/executer/exutils.c optional acpi contrib/dev/acpica/components/hardware/hwacpi.c optional acpi contrib/dev/acpica/components/hardware/hwesleep.c optional acpi contrib/dev/acpica/components/hardware/hwgpe.c optional acpi contrib/dev/acpica/components/hardware/hwpci.c optional acpi contrib/dev/acpica/components/hardware/hwregs.c optional acpi contrib/dev/acpica/components/hardware/hwsleep.c optional acpi contrib/dev/acpica/components/hardware/hwtimer.c optional acpi contrib/dev/acpica/components/hardware/hwvalid.c optional acpi contrib/dev/acpica/components/hardware/hwxface.c optional acpi contrib/dev/acpica/components/hardware/hwxfsleep.c optional acpi contrib/dev/acpica/components/namespace/nsaccess.c optional acpi contrib/dev/acpica/components/namespace/nsalloc.c optional acpi contrib/dev/acpica/components/namespace/nsarguments.c optional acpi contrib/dev/acpica/components/namespace/nsconvert.c optional acpi contrib/dev/acpica/components/namespace/nsdump.c optional acpi contrib/dev/acpica/components/namespace/nseval.c optional acpi contrib/dev/acpica/components/namespace/nsinit.c optional acpi contrib/dev/acpica/components/namespace/nsload.c optional acpi contrib/dev/acpica/components/namespace/nsnames.c optional acpi contrib/dev/acpica/components/namespace/nsobject.c optional acpi contrib/dev/acpica/components/namespace/nsparse.c optional acpi contrib/dev/acpica/components/namespace/nspredef.c optional acpi contrib/dev/acpica/components/namespace/nsprepkg.c optional acpi contrib/dev/acpica/components/namespace/nsrepair.c optional acpi contrib/dev/acpica/components/namespace/nsrepair2.c optional acpi contrib/dev/acpica/components/namespace/nssearch.c optional acpi contrib/dev/acpica/components/namespace/nsutils.c optional acpi contrib/dev/acpica/components/namespace/nswalk.c optional acpi contrib/dev/acpica/components/namespace/nsxfeval.c optional acpi contrib/dev/acpica/components/namespace/nsxfname.c optional acpi contrib/dev/acpica/components/namespace/nsxfobj.c optional acpi contrib/dev/acpica/components/parser/psargs.c optional acpi contrib/dev/acpica/components/parser/psloop.c optional acpi contrib/dev/acpica/components/parser/psobject.c optional acpi contrib/dev/acpica/components/parser/psopcode.c optional acpi contrib/dev/acpica/components/parser/psopinfo.c optional acpi contrib/dev/acpica/components/parser/psparse.c optional acpi contrib/dev/acpica/components/parser/psscope.c optional acpi contrib/dev/acpica/components/parser/pstree.c optional acpi contrib/dev/acpica/components/parser/psutils.c optional acpi contrib/dev/acpica/components/parser/pswalk.c optional acpi contrib/dev/acpica/components/parser/psxface.c optional acpi contrib/dev/acpica/components/resources/rsaddr.c optional acpi contrib/dev/acpica/components/resources/rscalc.c optional acpi contrib/dev/acpica/components/resources/rscreate.c optional acpi contrib/dev/acpica/components/resources/rsdump.c optional acpi acpi_debug contrib/dev/acpica/components/resources/rsdumpinfo.c optional acpi contrib/dev/acpica/components/resources/rsinfo.c optional acpi contrib/dev/acpica/components/resources/rsio.c optional acpi contrib/dev/acpica/components/resources/rsirq.c optional acpi contrib/dev/acpica/components/resources/rslist.c optional acpi contrib/dev/acpica/components/resources/rsmemory.c optional acpi contrib/dev/acpica/components/resources/rsmisc.c optional acpi contrib/dev/acpica/components/resources/rsserial.c optional acpi contrib/dev/acpica/components/resources/rsutils.c optional acpi contrib/dev/acpica/components/resources/rsxface.c optional acpi contrib/dev/acpica/components/tables/tbdata.c optional acpi contrib/dev/acpica/components/tables/tbfadt.c optional acpi contrib/dev/acpica/components/tables/tbfind.c optional acpi contrib/dev/acpica/components/tables/tbinstal.c optional acpi contrib/dev/acpica/components/tables/tbprint.c optional acpi contrib/dev/acpica/components/tables/tbutils.c optional acpi contrib/dev/acpica/components/tables/tbxface.c optional acpi contrib/dev/acpica/components/tables/tbxfload.c optional acpi contrib/dev/acpica/components/tables/tbxfroot.c optional acpi contrib/dev/acpica/components/utilities/utaddress.c optional acpi contrib/dev/acpica/components/utilities/utalloc.c optional acpi contrib/dev/acpica/components/utilities/utascii.c optional acpi contrib/dev/acpica/components/utilities/utbuffer.c optional acpi contrib/dev/acpica/components/utilities/utcache.c optional acpi contrib/dev/acpica/components/utilities/utcopy.c optional acpi contrib/dev/acpica/components/utilities/utdebug.c optional acpi contrib/dev/acpica/components/utilities/utdecode.c optional acpi contrib/dev/acpica/components/utilities/utdelete.c optional acpi contrib/dev/acpica/components/utilities/uterror.c optional acpi contrib/dev/acpica/components/utilities/uteval.c optional acpi contrib/dev/acpica/components/utilities/utexcep.c optional acpi contrib/dev/acpica/components/utilities/utglobal.c optional acpi contrib/dev/acpica/components/utilities/uthex.c optional acpi contrib/dev/acpica/components/utilities/utids.c optional acpi contrib/dev/acpica/components/utilities/utinit.c optional acpi contrib/dev/acpica/components/utilities/utlock.c optional acpi contrib/dev/acpica/components/utilities/utmath.c optional acpi contrib/dev/acpica/components/utilities/utmisc.c optional acpi contrib/dev/acpica/components/utilities/utmutex.c optional acpi contrib/dev/acpica/components/utilities/utnonansi.c optional acpi contrib/dev/acpica/components/utilities/utobject.c optional acpi contrib/dev/acpica/components/utilities/utosi.c optional acpi contrib/dev/acpica/components/utilities/utownerid.c optional acpi contrib/dev/acpica/components/utilities/utpredef.c optional acpi contrib/dev/acpica/components/utilities/utresdecode.c optional acpi acpi_debug contrib/dev/acpica/components/utilities/utresrc.c optional acpi contrib/dev/acpica/components/utilities/utstate.c optional acpi contrib/dev/acpica/components/utilities/utstring.c optional acpi contrib/dev/acpica/components/utilities/utstrsuppt.c optional acpi contrib/dev/acpica/components/utilities/utstrtoul64.c optional acpi contrib/dev/acpica/components/utilities/utuuid.c optional acpi acpi_debug contrib/dev/acpica/components/utilities/utxface.c optional acpi contrib/dev/acpica/components/utilities/utxferror.c optional acpi contrib/dev/acpica/components/utilities/utxfinit.c optional acpi contrib/dev/acpica/os_specific/service_layers/osgendbg.c optional acpi acpi_debug contrib/ipfilter/netinet/fil.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_auth.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_fil_freebsd.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_frag.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_log.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_nat.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_proxy.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_state.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_lookup.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -Wno-error -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_pool.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_htable.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_sync.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/mlfk_ipl.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_nat6.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_rules.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_scan.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_dstlist.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/radix_ipf.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/libfdt/fdt.c optional fdt contrib/libfdt/fdt_ro.c optional fdt contrib/libfdt/fdt_rw.c optional fdt contrib/libfdt/fdt_strerror.c optional fdt contrib/libfdt/fdt_sw.c optional fdt contrib/libfdt/fdt_wip.c optional fdt contrib/libnv/cnvlist.c standard contrib/libnv/dnvlist.c standard contrib/libnv/nvlist.c standard contrib/libnv/nvpair.c standard contrib/ngatm/netnatm/api/cc_conn.c optional ngatm_ccatm \ compile-with "${NORMAL_C_NOWERROR} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_data.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_dump.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_port.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_sig.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_user.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/unisap.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/misc/straddr.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/misc/unimsg_common.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/traffic.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/uni_ie.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/uni_msg.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/saal/saal_sscfu.c optional ngatm_sscfu \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/saal/saal_sscop.c optional ngatm_sscop \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_call.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_coord.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_party.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_print.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_reset.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_uni.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_unimsgcpy.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_verify.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" crypto/blowfish/bf_ecb.c optional ipsec | ipsec_support crypto/blowfish/bf_skey.c optional crypto | ipsec | ipsec_support crypto/camellia/camellia.c optional crypto | ipsec | ipsec_support crypto/camellia/camellia-api.c optional crypto | ipsec | ipsec_support crypto/chacha20/chacha20.c optional chacha20 crypto/des/des_ecb.c optional crypto | ipsec | ipsec_support | netsmb crypto/des/des_setkey.c optional crypto | ipsec | ipsec_support | netsmb crypto/rc4/rc4.c optional netgraph_mppc_encryption | kgssapi crypto/rijndael/rijndael-alg-fst.c optional crypto | ekcd | geom_bde | \ ipsec | ipsec_support | random !random_loadable | wlan_ccmp crypto/rijndael/rijndael-api-fst.c optional ekcd | geom_bde | random !random_loadable crypto/rijndael/rijndael-api.c optional crypto | ipsec | ipsec_support | \ wlan_ccmp crypto/sha1.c optional carp | crypto | ipsec | \ ipsec_support | netgraph_mppc_encryption | sctp crypto/sha2/sha256c.c optional crypto | ekcd | geom_bde | ipsec | \ ipsec_support | random !random_loadable | sctp | zfs crypto/sha2/sha512c.c optional crypto | geom_bde | ipsec | \ ipsec_support | zfs crypto/skein/skein.c optional crypto | zfs crypto/skein/skein_block.c optional crypto | zfs crypto/siphash/siphash.c optional inet | inet6 crypto/siphash/siphash_test.c optional inet | inet6 ddb/db_access.c optional ddb ddb/db_break.c optional ddb ddb/db_capture.c optional ddb ddb/db_command.c optional ddb ddb/db_examine.c optional ddb ddb/db_expr.c optional ddb ddb/db_input.c optional ddb ddb/db_lex.c optional ddb ddb/db_main.c optional ddb ddb/db_output.c optional ddb ddb/db_print.c optional ddb ddb/db_ps.c optional ddb ddb/db_run.c optional ddb ddb/db_script.c optional ddb ddb/db_sym.c optional ddb ddb/db_thread.c optional ddb ddb/db_textdump.c optional ddb ddb/db_variables.c optional ddb ddb/db_watch.c optional ddb ddb/db_write_cmd.c optional ddb dev/aac/aac.c optional aac dev/aac/aac_cam.c optional aacp aac dev/aac/aac_debug.c optional aac dev/aac/aac_disk.c optional aac dev/aac/aac_linux.c optional aac compat_linux dev/aac/aac_pci.c optional aac pci dev/aacraid/aacraid.c optional aacraid dev/aacraid/aacraid_cam.c optional aacraid scbus dev/aacraid/aacraid_debug.c optional aacraid dev/aacraid/aacraid_linux.c optional aacraid compat_linux dev/aacraid/aacraid_pci.c optional aacraid pci dev/acpi_support/acpi_wmi.c optional acpi_wmi acpi dev/acpi_support/acpi_asus.c optional acpi_asus acpi dev/acpi_support/acpi_asus_wmi.c optional acpi_asus_wmi acpi dev/acpi_support/acpi_fujitsu.c optional acpi_fujitsu acpi dev/acpi_support/acpi_hp.c optional acpi_hp acpi dev/acpi_support/acpi_ibm.c optional acpi_ibm acpi dev/acpi_support/acpi_panasonic.c optional acpi_panasonic acpi dev/acpi_support/acpi_sony.c optional acpi_sony acpi dev/acpi_support/acpi_toshiba.c optional acpi_toshiba acpi dev/acpi_support/atk0110.c optional aibs acpi dev/acpica/Osd/OsdDebug.c optional acpi dev/acpica/Osd/OsdHardware.c optional acpi dev/acpica/Osd/OsdInterrupt.c optional acpi dev/acpica/Osd/OsdMemory.c optional acpi dev/acpica/Osd/OsdSchedule.c optional acpi dev/acpica/Osd/OsdStream.c optional acpi dev/acpica/Osd/OsdSynch.c optional acpi dev/acpica/Osd/OsdTable.c optional acpi dev/acpica/acpi.c optional acpi dev/acpica/acpi_acad.c optional acpi dev/acpica/acpi_battery.c optional acpi dev/acpica/acpi_button.c optional acpi dev/acpica/acpi_cmbat.c optional acpi dev/acpica/acpi_cpu.c optional acpi dev/acpica/acpi_ec.c optional acpi dev/acpica/acpi_isab.c optional acpi isa dev/acpica/acpi_lid.c optional acpi dev/acpica/acpi_package.c optional acpi dev/acpica/acpi_perf.c optional acpi dev/acpica/acpi_powerres.c optional acpi dev/acpica/acpi_quirk.c optional acpi dev/acpica/acpi_resource.c optional acpi dev/acpica/acpi_container.c optional acpi dev/acpica/acpi_smbat.c optional acpi dev/acpica/acpi_thermal.c optional acpi dev/acpica/acpi_throttle.c optional acpi dev/acpica/acpi_video.c optional acpi_video acpi dev/acpica/acpi_dock.c optional acpi_dock acpi dev/adlink/adlink.c optional adlink dev/advansys/adv_pci.c optional adv pci dev/advansys/advansys.c optional adv dev/advansys/advlib.c optional adv dev/advansys/advmcode.c optional adv dev/advansys/adw_pci.c optional adw pci dev/advansys/adwcam.c optional adw dev/advansys/adwlib.c optional adw dev/advansys/adwmcode.c optional adw dev/ae/if_ae.c optional ae pci dev/age/if_age.c optional age pci dev/agp/agp.c optional agp pci dev/agp/agp_if.m optional agp pci dev/aha/aha.c optional aha dev/aha/aha_isa.c optional aha isa dev/ahci/ahci.c optional ahci dev/ahci/ahciem.c optional ahci dev/ahci/ahci_pci.c optional ahci pci dev/aic/aic.c optional aic dev/aic/aic_pccard.c optional aic pccard dev/aic7xxx/ahc_isa.c optional ahc isa dev/aic7xxx/ahc_pci.c optional ahc pci \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/aic7xxx/ahd_pci.c optional ahd pci \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/aic7xxx/aic7770.c optional ahc dev/aic7xxx/aic79xx.c optional ahd pci dev/aic7xxx/aic79xx_osm.c optional ahd pci dev/aic7xxx/aic79xx_pci.c optional ahd pci dev/aic7xxx/aic79xx_reg_print.c optional ahd pci ahd_reg_pretty_print dev/aic7xxx/aic7xxx.c optional ahc dev/aic7xxx/aic7xxx_93cx6.c optional ahc dev/aic7xxx/aic7xxx_osm.c optional ahc dev/aic7xxx/aic7xxx_pci.c optional ahc pci dev/aic7xxx/aic7xxx_reg_print.c optional ahc ahc_reg_pretty_print dev/al_eth/al_eth.c optional al_eth fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" dev/al_eth/al_init_eth_lm.c optional al_eth fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" dev/al_eth/al_init_eth_kr.c optional al_eth fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_iofic.c optional al_iofic \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_serdes_25g.c optional al_serdes \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_serdes_hssp.c optional al_serdes \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_config.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_debug.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_iofic.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_main.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_serdes.c optional al_serdes \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/eth/al_hal_eth_kr.c optional al_eth \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/eth/al_hal_eth_main.c optional al_eth \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" dev/alc/if_alc.c optional alc pci dev/ale/if_ale.c optional ale pci dev/alpm/alpm.c optional alpm pci dev/altera/avgen/altera_avgen.c optional altera_avgen dev/altera/avgen/altera_avgen_fdt.c optional altera_avgen fdt dev/altera/avgen/altera_avgen_nexus.c optional altera_avgen dev/altera/sdcard/altera_sdcard.c optional altera_sdcard dev/altera/sdcard/altera_sdcard_disk.c optional altera_sdcard dev/altera/sdcard/altera_sdcard_io.c optional altera_sdcard dev/altera/sdcard/altera_sdcard_fdt.c optional altera_sdcard fdt dev/altera/sdcard/altera_sdcard_nexus.c optional altera_sdcard dev/altera/pio/pio.c optional altera_pio dev/altera/pio/pio_if.m optional altera_pio dev/amdpm/amdpm.c optional amdpm pci | nfpm pci dev/amdsmb/amdsmb.c optional amdsmb pci dev/amr/amr.c optional amr dev/amr/amr_cam.c optional amrp amr dev/amr/amr_disk.c optional amr dev/amr/amr_linux.c optional amr compat_linux dev/amr/amr_pci.c optional amr pci dev/an/if_an.c optional an dev/an/if_an_isa.c optional an isa dev/an/if_an_pccard.c optional an pccard dev/an/if_an_pci.c optional an pci # dev/ata/ata_if.m optional ata | atacore dev/ata/ata-all.c optional ata | atacore dev/ata/ata-dma.c optional ata | atacore dev/ata/ata-lowlevel.c optional ata | atacore dev/ata/ata-sata.c optional ata | atacore dev/ata/ata-card.c optional ata pccard | atapccard dev/ata/ata-isa.c optional ata isa | ataisa dev/ata/ata-pci.c optional ata pci | atapci dev/ata/chipsets/ata-acard.c optional ata pci | ataacard dev/ata/chipsets/ata-acerlabs.c optional ata pci | ataacerlabs dev/ata/chipsets/ata-amd.c optional ata pci | ataamd dev/ata/chipsets/ata-ati.c optional ata pci | ataati dev/ata/chipsets/ata-cenatek.c optional ata pci | atacenatek dev/ata/chipsets/ata-cypress.c optional ata pci | atacypress dev/ata/chipsets/ata-cyrix.c optional ata pci | atacyrix dev/ata/chipsets/ata-highpoint.c optional ata pci | atahighpoint dev/ata/chipsets/ata-intel.c optional ata pci | ataintel dev/ata/chipsets/ata-ite.c optional ata pci | ataite dev/ata/chipsets/ata-jmicron.c optional ata pci | atajmicron dev/ata/chipsets/ata-marvell.c optional ata pci | atamarvell dev/ata/chipsets/ata-micron.c optional ata pci | atamicron dev/ata/chipsets/ata-national.c optional ata pci | atanational dev/ata/chipsets/ata-netcell.c optional ata pci | atanetcell dev/ata/chipsets/ata-nvidia.c optional ata pci | atanvidia dev/ata/chipsets/ata-promise.c optional ata pci | atapromise dev/ata/chipsets/ata-serverworks.c optional ata pci | ataserverworks dev/ata/chipsets/ata-siliconimage.c optional ata pci | atasiliconimage | ataati dev/ata/chipsets/ata-sis.c optional ata pci | atasis dev/ata/chipsets/ata-via.c optional ata pci | atavia # dev/ath/if_ath_pci.c optional ath_pci pci \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/ath/if_ath_ahb.c optional ath_ahb \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/ath/if_ath.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_alq.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_beacon.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_btcoex.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_btcoex_mci.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_debug.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_descdma.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_keycache.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_ioctl.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_led.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_lna_div.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tx.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tx_edma.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tx_ht.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tdma.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_sysctl.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_rx.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_rx_edma.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_spectral.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ah_osdep.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/ath/ath_hal/ah.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v1.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v3.c optional ath_hal | ath_ar5211 | ath_ar5212 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v14.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v4k.c \ optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_9287.c \ optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_regdomain.c optional ath \ compile-with "${NORMAL_C} ${NO_WSHIFT_COUNT_NEGATIVE} ${NO_WSHIFT_COUNT_OVERFLOW} -I$S/dev/ath" # ar5210 dev/ath/ath_hal/ar5210/ar5210_attach.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_beacon.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_interrupts.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_keycache.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_misc.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_phy.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_power.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_recv.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_reset.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_xmit.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar5211 dev/ath/ath_hal/ar5211/ar5211_attach.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_beacon.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_interrupts.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_keycache.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_misc.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_phy.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_power.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_recv.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_reset.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_xmit.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar5212 dev/ath/ath_hal/ar5212/ar5212_ani.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_attach.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_beacon.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_eeprom.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_gpio.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_interrupts.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_keycache.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_misc.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_phy.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_power.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_recv.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_reset.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_rfgain.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_xmit.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar5416 (depends on ar5212) dev/ath/ath_hal/ar5416/ar5416_ani.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_attach.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_beacon.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_btcoex.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_iq.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_adcgain.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_adcdc.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_eeprom.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_gpio.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_interrupts.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_keycache.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_misc.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_phy.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_power.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_radar.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_recv.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_reset.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_spectral.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_xmit.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9130 (depends upon ar5416) - also requires AH_SUPPORT_AR9130 # # Since this is an embedded MAC SoC, there's no need to compile it into the # default HAL. dev/ath/ath_hal/ar9001/ar9130_attach.c optional ath_ar9130 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9001/ar9130_phy.c optional ath_ar9130 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9001/ar9130_eeprom.c optional ath_ar9130 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9160 (depends on ar5416) dev/ath/ath_hal/ar9001/ar9160_attach.c optional ath_hal | ath_ar9160 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9280 (depends on ar5416) dev/ath/ath_hal/ar9002/ar9280_attach.c optional ath_hal | ath_ar9280 | \ ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9280_olc.c optional ath_hal | ath_ar9280 | \ ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9285 (depends on ar5416 and ar9280) dev/ath/ath_hal/ar9002/ar9285_attach.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_btcoex.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_reset.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_cal.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_phy.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_diversity.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9287 (depends on ar5416) dev/ath/ath_hal/ar9002/ar9287_attach.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_reset.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_cal.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_olc.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9300 contrib/dev/ath/ath_hal/ar9300/ar9300_ani.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_attach.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_beacon.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_eeprom.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal ${NO_WCONSTANT_CONVERSION}" contrib/dev/ath/ath_hal/ar9300/ar9300_freebsd.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_gpio.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_interrupts.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_keycache.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_mci.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_misc.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_paprd.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_phy.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_power.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_radar.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_radio.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_recv.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_recv_ds.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_reset.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal ${NO_WSOMETIMES_UNINITIALIZED} -Wno-unused-function" contrib/dev/ath/ath_hal/ar9300/ar9300_stub.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_stub_funcs.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_spectral.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_timer.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_xmit.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_xmit_ds.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" # rf backends dev/ath/ath_hal/ar5212/ar2316.c optional ath_rf2316 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2317.c optional ath_rf2317 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2413.c optional ath_hal | ath_rf2413 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2425.c optional ath_hal | ath_rf2425 | ath_rf2417 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5111.c optional ath_hal | ath_rf5111 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5112.c optional ath_hal | ath_rf5112 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5413.c optional ath_hal | ath_rf5413 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar2133.c optional ath_hal | ath_ar5416 | \ ath_ar9130 | ath_ar9160 | ath_ar9280 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9280.c optional ath_hal | ath_ar9280 | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ath rate control algorithms dev/ath/ath_rate/amrr/amrr.c optional ath_rate_amrr \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_rate/onoe/onoe.c optional ath_rate_onoe \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_rate/sample/sample.c optional ath_rate_sample \ compile-with "${NORMAL_C} -I$S/dev/ath" # ath DFS modules dev/ath/ath_dfs/null/dfs_null.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/bce/if_bce.c optional bce dev/bfe/if_bfe.c optional bfe dev/bge/if_bge.c optional bge dev/bhnd/bhnd.c optional bhnd dev/bhnd/bhnd_erom.c optional bhnd dev/bhnd/bhnd_erom_if.m optional bhnd dev/bhnd/bhnd_subr.c optional bhnd dev/bhnd/bhnd_bus_if.m optional bhnd dev/bhnd/bhndb/bhnd_bhndb.c optional bhndb bhnd dev/bhnd/bhndb/bhndb.c optional bhndb bhnd dev/bhnd/bhndb/bhndb_bus_if.m optional bhndb bhnd dev/bhnd/bhndb/bhndb_hwdata.c optional bhndb bhnd dev/bhnd/bhndb/bhndb_if.m optional bhndb bhnd dev/bhnd/bhndb/bhndb_pci.c optional bhndb bhnd pci dev/bhnd/bhndb/bhndb_pci_hwdata.c optional bhndb bhnd pci dev/bhnd/bhndb/bhndb_pci_sprom.c optional bhndb bhnd pci dev/bhnd/bhndb/bhndb_subr.c optional bhndb bhnd dev/bhnd/bcma/bcma.c optional bcma bhnd dev/bhnd/bcma/bcma_bhndb.c optional bcma bhnd bhndb dev/bhnd/bcma/bcma_erom.c optional bcma bhnd dev/bhnd/bcma/bcma_subr.c optional bcma bhnd dev/bhnd/cores/chipc/bhnd_chipc_if.m optional bhnd dev/bhnd/cores/chipc/bhnd_sprom_chipc.c optional bhnd dev/bhnd/cores/chipc/bhnd_pmu_chipc.c optional bhnd dev/bhnd/cores/chipc/chipc.c optional bhnd dev/bhnd/cores/chipc/chipc_cfi.c optional bhnd cfi dev/bhnd/cores/chipc/chipc_gpio.c optional bhnd gpio dev/bhnd/cores/chipc/chipc_slicer.c optional bhnd cfi | bhnd spibus dev/bhnd/cores/chipc/chipc_spi.c optional bhnd spibus dev/bhnd/cores/chipc/chipc_subr.c optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.c optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_if.m optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_hostb_if.m optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_subr.c optional bhnd dev/bhnd/cores/pci/bhnd_pci.c optional bhnd pci dev/bhnd/cores/pci/bhnd_pci_hostb.c optional bhndb bhnd pci dev/bhnd/cores/pci/bhnd_pcib.c optional bhnd_pcib bhnd pci dev/bhnd/cores/pcie2/bhnd_pcie2.c optional bhnd pci dev/bhnd/cores/pcie2/bhnd_pcie2_hostb.c optional bhndb bhnd pci dev/bhnd/cores/pcie2/bhnd_pcie2b.c optional bhnd_pcie2b bhnd pci dev/bhnd/cores/pmu/bhnd_pmu.c optional bhnd dev/bhnd/cores/pmu/bhnd_pmu_core.c optional bhnd dev/bhnd/cores/pmu/bhnd_pmu_if.m optional bhnd dev/bhnd/cores/pmu/bhnd_pmu_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_bcm.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_bcmraw.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_btxt.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_sprom.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_sprom_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_tlv.c optional bhnd dev/bhnd/nvram/bhnd_nvram_if.m optional bhnd dev/bhnd/nvram/bhnd_nvram_io.c optional bhnd dev/bhnd/nvram/bhnd_nvram_iobuf.c optional bhnd dev/bhnd/nvram/bhnd_nvram_ioptr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_iores.c optional bhnd dev/bhnd/nvram/bhnd_nvram_plist.c optional bhnd dev/bhnd/nvram/bhnd_nvram_store.c optional bhnd dev/bhnd/nvram/bhnd_nvram_store_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value_fmts.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value_prf.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value_subr.c optional bhnd dev/bhnd/nvram/bhnd_sprom.c optional bhnd dev/bhnd/siba/siba.c optional siba bhnd dev/bhnd/siba/siba_bhndb.c optional siba bhnd bhndb dev/bhnd/siba/siba_erom.c optional siba bhnd dev/bhnd/siba/siba_subr.c optional siba bhnd # dev/bktr/bktr_audio.c optional bktr pci dev/bktr/bktr_card.c optional bktr pci dev/bktr/bktr_core.c optional bktr pci dev/bktr/bktr_i2c.c optional bktr pci smbus dev/bktr/bktr_os.c optional bktr pci dev/bktr/bktr_tuner.c optional bktr pci dev/bktr/msp34xx.c optional bktr pci dev/bnxt/bnxt_hwrm.c optional bnxt iflib pci dev/bnxt/bnxt_sysctl.c optional bnxt iflib pci dev/bnxt/bnxt_txrx.c optional bnxt iflib pci dev/bnxt/if_bnxt.c optional bnxt iflib pci dev/buslogic/bt.c optional bt dev/buslogic/bt_isa.c optional bt isa dev/buslogic/bt_pci.c optional bt pci dev/bwi/bwimac.c optional bwi dev/bwi/bwiphy.c optional bwi dev/bwi/bwirf.c optional bwi dev/bwi/if_bwi.c optional bwi dev/bwi/if_bwi_pci.c optional bwi pci # XXX Work around clang warnings, until maintainer approves fix. dev/bwn/if_bwn.c optional bwn siba_bwn \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/bwn/if_bwn_pci.c optional bwn pci bhnd dev/bwn/if_bwn_phy_common.c optional bwn siba_bwn dev/bwn/if_bwn_phy_g.c optional bwn siba_bwn \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED} ${NO_WCONSTANT_CONVERSION}" dev/bwn/if_bwn_phy_lp.c optional bwn siba_bwn \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/bwn/if_bwn_phy_n.c optional bwn siba_bwn dev/bwn/if_bwn_util.c optional bwn siba_bwn dev/bwn/bwn_mac.c optional bwn bhnd dev/cardbus/cardbus.c optional cardbus dev/cardbus/cardbus_cis.c optional cardbus dev/cardbus/cardbus_device.c optional cardbus dev/cas/if_cas.c optional cas dev/cfi/cfi_bus_fdt.c optional cfi fdt dev/cfi/cfi_bus_nexus.c optional cfi dev/cfi/cfi_core.c optional cfi dev/cfi/cfi_dev.c optional cfi dev/cfi/cfi_disk.c optional cfid dev/chromebook_platform/chromebook_platform.c optional chromebook_platform dev/ciss/ciss.c optional ciss dev/cm/smc90cx6.c optional cm dev/cmx/cmx.c optional cmx dev/cmx/cmx_pccard.c optional cmx pccard dev/cpufreq/ichss.c optional cpufreq pci dev/cs/if_cs.c optional cs dev/cs/if_cs_isa.c optional cs isa dev/cs/if_cs_pccard.c optional cs pccard dev/cxgb/cxgb_main.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/cxgb_sge.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_mc5.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_vsc7323.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_vsc8211.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_ael1002.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_aq100x.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_mv88e1xxx.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_xgmac.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_t3_hw.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_tn1010.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/sys/uipc_mvec.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/cxgb_t3fw.c optional cxgb cxgb_t3fw \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgbe/t4_if.m optional cxgbe pci dev/cxgbe/t4_iov.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_mp_ring.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_main.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_netmap.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_sched.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_sge.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_l2t.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_tracer.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_vf.c optional cxgbev pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/common/t4_hw.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/common/t4vf_hw.c optional cxgbev pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/cudbg_common.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/cudbg_flash_utils.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/cudbg_lib.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/cudbg_wtp.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/fastlz.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/fastlz_api.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" t4fw_cfg.c optional cxgbe \ compile-with "${AWK} -f $S/tools/fw_stub.awk t4fw_cfg.fw:t4fw_cfg t4fw_cfg_uwire.fw:t4fw_cfg_uwire t4fw.fw:t4fw -mt4fw_cfg -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "t4fw_cfg.c" t4fw_cfg.fwo optional cxgbe \ dependency "t4fw_cfg.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t4fw_cfg.fwo" t4fw_cfg.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t4fw_cfg.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t4fw_cfg.fw" t4fw_cfg_uwire.fwo optional cxgbe \ dependency "t4fw_cfg_uwire.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t4fw_cfg_uwire.fwo" t4fw_cfg_uwire.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t4fw_cfg_uwire.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t4fw_cfg_uwire.fw" t4fw.fwo optional cxgbe \ dependency "t4fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t4fw.fwo" t4fw.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t4fw-1.16.63.0.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "t4fw.fw" t5fw_cfg.c optional cxgbe \ compile-with "${AWK} -f $S/tools/fw_stub.awk t5fw_cfg.fw:t5fw_cfg t5fw_cfg_uwire.fw:t5fw_cfg_uwire t5fw.fw:t5fw -mt5fw_cfg -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "t5fw_cfg.c" t5fw_cfg.fwo optional cxgbe \ dependency "t5fw_cfg.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t5fw_cfg.fwo" t5fw_cfg.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t5fw_cfg.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t5fw_cfg.fw" t5fw_cfg_uwire.fwo optional cxgbe \ dependency "t5fw_cfg_uwire.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t5fw_cfg_uwire.fwo" t5fw_cfg_uwire.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t5fw_cfg_uwire.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t5fw_cfg_uwire.fw" t5fw.fwo optional cxgbe \ dependency "t5fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t5fw.fwo" t5fw.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t5fw-1.16.63.0.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "t5fw.fw" t6fw_cfg.c optional cxgbe \ compile-with "${AWK} -f $S/tools/fw_stub.awk t6fw_cfg.fw:t6fw_cfg t6fw_cfg_uwire.fw:t6fw_cfg_uwire t6fw.fw:t6fw -mt6fw_cfg -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "t6fw_cfg.c" t6fw_cfg.fwo optional cxgbe \ dependency "t6fw_cfg.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t6fw_cfg.fwo" t6fw_cfg.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t6fw_cfg.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t6fw_cfg.fw" t6fw_cfg_uwire.fwo optional cxgbe \ dependency "t6fw_cfg_uwire.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t6fw_cfg_uwire.fwo" t6fw_cfg_uwire.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t6fw_cfg_uwire.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t6fw_cfg_uwire.fw" t6fw.fwo optional cxgbe \ dependency "t6fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t6fw.fwo" t6fw.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t6fw-1.16.63.0.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "t6fw.fw" dev/cxgbe/crypto/t4_crypto.c optional ccr \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cy/cy.c optional cy dev/cy/cy_isa.c optional cy isa dev/cy/cy_pci.c optional cy pci dev/cyapa/cyapa.c optional cyapa iicbus dev/dc/if_dc.c optional dc pci dev/dc/dcphy.c optional dc pci dev/dc/pnphy.c optional dc pci dev/dcons/dcons.c optional dcons dev/dcons/dcons_crom.c optional dcons_crom dev/dcons/dcons_os.c optional dcons dev/de/if_de.c optional de pci dev/dme/if_dme.c optional dme dev/dpt/dpt_pci.c optional dpt pci dev/dpt/dpt_scsi.c optional dpt dev/drm/ati_pcigart.c optional drm dev/drm/drm_agpsupport.c optional drm dev/drm/drm_auth.c optional drm dev/drm/drm_bufs.c optional drm dev/drm/drm_context.c optional drm dev/drm/drm_dma.c optional drm dev/drm/drm_drawable.c optional drm dev/drm/drm_drv.c optional drm dev/drm/drm_fops.c optional drm dev/drm/drm_hashtab.c optional drm dev/drm/drm_ioctl.c optional drm dev/drm/drm_irq.c optional drm dev/drm/drm_lock.c optional drm dev/drm/drm_memory.c optional drm dev/drm/drm_mm.c optional drm dev/drm/drm_pci.c optional drm dev/drm/drm_scatter.c optional drm dev/drm/drm_sman.c optional drm dev/drm/drm_sysctl.c optional drm dev/drm/drm_vm.c optional drm dev/drm/mach64_dma.c optional mach64drm dev/drm/mach64_drv.c optional mach64drm dev/drm/mach64_irq.c optional mach64drm dev/drm/mach64_state.c optional mach64drm dev/drm/mga_dma.c optional mgadrm dev/drm/mga_drv.c optional mgadrm dev/drm/mga_irq.c optional mgadrm dev/drm/mga_state.c optional mgadrm dev/drm/mga_warp.c optional mgadrm dev/drm/r128_cce.c optional r128drm \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/drm/r128_drv.c optional r128drm dev/drm/r128_irq.c optional r128drm dev/drm/r128_state.c optional r128drm dev/drm/savage_bci.c optional savagedrm dev/drm/savage_drv.c optional savagedrm dev/drm/savage_state.c optional savagedrm dev/drm/sis_drv.c optional sisdrm dev/drm/sis_ds.c optional sisdrm dev/drm/sis_mm.c optional sisdrm dev/drm/tdfx_drv.c optional tdfxdrm dev/drm/via_dma.c optional viadrm dev/drm/via_dmablit.c optional viadrm dev/drm/via_drv.c optional viadrm dev/drm/via_irq.c optional viadrm dev/drm/via_map.c optional viadrm dev/drm/via_mm.c optional viadrm dev/drm/via_verifier.c optional viadrm dev/drm/via_video.c optional viadrm dev/drm2/drm_agpsupport.c optional drm2 dev/drm2/drm_auth.c optional drm2 dev/drm2/drm_bufs.c optional drm2 dev/drm2/drm_buffer.c optional drm2 dev/drm2/drm_context.c optional drm2 dev/drm2/drm_crtc.c optional drm2 dev/drm2/drm_crtc_helper.c optional drm2 dev/drm2/drm_dma.c optional drm2 dev/drm2/drm_dp_helper.c optional drm2 dev/drm2/drm_dp_iic_helper.c optional drm2 dev/drm2/drm_drv.c optional drm2 dev/drm2/drm_edid.c optional drm2 dev/drm2/drm_fb_helper.c optional drm2 dev/drm2/drm_fops.c optional drm2 dev/drm2/drm_gem.c optional drm2 dev/drm2/drm_gem_names.c optional drm2 dev/drm2/drm_global.c optional drm2 dev/drm2/drm_hashtab.c optional drm2 dev/drm2/drm_ioctl.c optional drm2 dev/drm2/drm_irq.c optional drm2 dev/drm2/drm_linux_list_sort.c optional drm2 dev/drm2/drm_lock.c optional drm2 dev/drm2/drm_memory.c optional drm2 dev/drm2/drm_mm.c optional drm2 dev/drm2/drm_modes.c optional drm2 dev/drm2/drm_pci.c optional drm2 dev/drm2/drm_platform.c optional drm2 dev/drm2/drm_scatter.c optional drm2 dev/drm2/drm_stub.c optional drm2 dev/drm2/drm_sysctl.c optional drm2 dev/drm2/drm_vm.c optional drm2 dev/drm2/drm_os_freebsd.c optional drm2 dev/drm2/ttm/ttm_agp_backend.c optional drm2 dev/drm2/ttm/ttm_lock.c optional drm2 dev/drm2/ttm/ttm_object.c optional drm2 dev/drm2/ttm/ttm_tt.c optional drm2 dev/drm2/ttm/ttm_bo_util.c optional drm2 dev/drm2/ttm/ttm_bo.c optional drm2 dev/drm2/ttm/ttm_bo_manager.c optional drm2 dev/drm2/ttm/ttm_execbuf_util.c optional drm2 dev/drm2/ttm/ttm_memory.c optional drm2 dev/drm2/ttm/ttm_page_alloc.c optional drm2 dev/drm2/ttm/ttm_bo_vm.c optional drm2 dev/drm2/ati_pcigart.c optional drm2 agp pci dev/ed/if_ed.c optional ed dev/ed/if_ed_novell.c optional ed dev/ed/if_ed_rtl80x9.c optional ed dev/ed/if_ed_pccard.c optional ed pccard dev/ed/if_ed_pci.c optional ed pci dev/efidev/efidev.c optional efirt dev/efidev/efirt.c optional efirt dev/efidev/efirtc.c optional efirt dev/e1000/if_em.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/em_txrx.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/igb_txrx.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_80003es2lan.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82540.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82541.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82542.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82543.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82571.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82575.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_ich8lan.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_i210.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_api.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_mac.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_manage.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_nvm.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_phy.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_vf.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_mbx.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_osdep.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/et/if_et.c optional et dev/ena/ena.c optional ena \ compile-with "${NORMAL_C} -I$S/contrib" dev/ena/ena_sysctl.c optional ena \ compile-with "${NORMAL_C} -I$S/contrib" contrib/ena-com/ena_com.c optional ena contrib/ena-com/ena_eth_com.c optional ena dev/ep/if_ep.c optional ep dev/ep/if_ep_isa.c optional ep isa dev/ep/if_ep_pccard.c optional ep pccard dev/esp/esp_pci.c optional esp pci dev/esp/ncr53c9x.c optional esp dev/etherswitch/arswitch/arswitch.c optional arswitch dev/etherswitch/arswitch/arswitch_reg.c optional arswitch dev/etherswitch/arswitch/arswitch_phy.c optional arswitch dev/etherswitch/arswitch/arswitch_8216.c optional arswitch dev/etherswitch/arswitch/arswitch_8226.c optional arswitch dev/etherswitch/arswitch/arswitch_8316.c optional arswitch dev/etherswitch/arswitch/arswitch_8327.c optional arswitch dev/etherswitch/arswitch/arswitch_7240.c optional arswitch dev/etherswitch/arswitch/arswitch_9340.c optional arswitch dev/etherswitch/arswitch/arswitch_vlans.c optional arswitch dev/etherswitch/etherswitch.c optional etherswitch dev/etherswitch/etherswitch_if.m optional etherswitch dev/etherswitch/ip17x/ip17x.c optional ip17x dev/etherswitch/ip17x/ip175c.c optional ip17x dev/etherswitch/ip17x/ip175d.c optional ip17x dev/etherswitch/ip17x/ip17x_phy.c optional ip17x dev/etherswitch/ip17x/ip17x_vlans.c optional ip17x dev/etherswitch/miiproxy.c optional miiproxy dev/etherswitch/rtl8366/rtl8366rb.c optional rtl8366rb dev/etherswitch/e6000sw/e6000sw.c optional e6000sw dev/etherswitch/e6000sw/e6060sw.c optional e6060sw dev/etherswitch/infineon/adm6996fc.c optional adm6996fc dev/etherswitch/micrel/ksz8995ma.c optional ksz8995ma dev/etherswitch/ukswitch/ukswitch.c optional ukswitch dev/evdev/cdev.c optional evdev dev/evdev/evdev.c optional evdev dev/evdev/evdev_mt.c optional evdev dev/evdev/evdev_utils.c optional evdev dev/evdev/uinput.c optional evdev uinput dev/ex/if_ex.c optional ex dev/ex/if_ex_isa.c optional ex isa dev/ex/if_ex_pccard.c optional ex pccard dev/exca/exca.c optional cbb dev/extres/clk/clk.c optional ext_resources clk fdt dev/extres/clk/clkdev_if.m optional ext_resources clk fdt dev/extres/clk/clknode_if.m optional ext_resources clk fdt dev/extres/clk/clk_bus.c optional ext_resources clk fdt dev/extres/clk/clk_div.c optional ext_resources clk fdt dev/extres/clk/clk_fixed.c optional ext_resources clk fdt dev/extres/clk/clk_gate.c optional ext_resources clk fdt dev/extres/clk/clk_mux.c optional ext_resources clk fdt dev/extres/phy/phy.c optional ext_resources phy fdt dev/extres/phy/phy_if.m optional ext_resources phy fdt dev/extres/hwreset/hwreset.c optional ext_resources hwreset fdt dev/extres/hwreset/hwreset_if.m optional ext_resources hwreset fdt dev/extres/regulator/regdev_if.m optional ext_resources regulator fdt dev/extres/regulator/regnode_if.m optional ext_resources regulator fdt dev/extres/regulator/regulator.c optional ext_resources regulator fdt dev/extres/regulator/regulator_bus.c optional ext_resources regulator fdt dev/extres/regulator/regulator_fixed.c optional ext_resources regulator fdt dev/fb/fbd.c optional fbd | vt dev/fb/fb_if.m standard dev/fb/splash.c optional sc splash dev/fdt/fdt_clock.c optional fdt fdt_clock dev/fdt/fdt_clock_if.m optional fdt fdt_clock dev/fdt/fdt_common.c optional fdt dev/fdt/fdt_pinctrl.c optional fdt fdt_pinctrl dev/fdt/fdt_pinctrl_if.m optional fdt fdt_pinctrl dev/fdt/fdt_slicer.c optional fdt cfi | fdt nand | fdt mx25l dev/fdt/fdt_static_dtb.S optional fdt fdt_dtb_static \ dependency "fdt_dtb_file" dev/fdt/simplebus.c optional fdt dev/fe/if_fe.c optional fe dev/fe/if_fe_pccard.c optional fe pccard dev/filemon/filemon.c optional filemon dev/firewire/firewire.c optional firewire dev/firewire/fwcrom.c optional firewire dev/firewire/fwdev.c optional firewire dev/firewire/fwdma.c optional firewire dev/firewire/fwmem.c optional firewire dev/firewire/fwohci.c optional firewire dev/firewire/fwohci_pci.c optional firewire pci dev/firewire/if_fwe.c optional fwe dev/firewire/if_fwip.c optional fwip dev/firewire/sbp.c optional sbp dev/firewire/sbp_targ.c optional sbp_targ dev/flash/at45d.c optional at45d dev/flash/mx25l.c optional mx25l dev/fxp/if_fxp.c optional fxp dev/fxp/inphy.c optional fxp dev/gem/if_gem.c optional gem dev/gem/if_gem_pci.c optional gem pci dev/gem/if_gem_sbus.c optional gem sbus dev/gpio/gpiobacklight.c optional gpiobacklight fdt dev/gpio/gpiokeys.c optional gpiokeys fdt dev/gpio/gpiokeys_codes.c optional gpiokeys fdt dev/gpio/gpiobus.c optional gpio \ dependency "gpiobus_if.h" dev/gpio/gpioc.c optional gpio \ dependency "gpio_if.h" dev/gpio/gpioiic.c optional gpioiic dev/gpio/gpioled.c optional gpioled !fdt dev/gpio/gpioled_fdt.c optional gpioled fdt dev/gpio/gpiopower.c optional gpiopower fdt dev/gpio/gpioregulator.c optional gpioregulator fdt ext_resources dev/gpio/gpiospi.c optional gpiospi dev/gpio/gpioths.c optional gpioths dev/gpio/gpio_if.m optional gpio dev/gpio/gpiobus_if.m optional gpio dev/gpio/gpiopps.c optional gpiopps dev/gpio/ofw_gpiobus.c optional fdt gpio dev/hifn/hifn7751.c optional hifn dev/hme/if_hme.c optional hme dev/hme/if_hme_pci.c optional hme pci dev/hme/if_hme_sbus.c optional hme sbus dev/hptiop/hptiop.c optional hptiop scbus dev/hwpmc/hwpmc_logging.c optional hwpmc dev/hwpmc/hwpmc_mod.c optional hwpmc dev/hwpmc/hwpmc_soft.c optional hwpmc dev/ichiic/ig4_acpi.c optional ig4 acpi iicbus dev/ichiic/ig4_iic.c optional ig4 iicbus dev/ichiic/ig4_pci.c optional ig4 pci iicbus dev/ichsmb/ichsmb.c optional ichsmb dev/ichsmb/ichsmb_pci.c optional ichsmb pci dev/ida/ida.c optional ida dev/ida/ida_disk.c optional ida dev/ida/ida_pci.c optional ida pci dev/iicbus/ad7418.c optional ad7418 dev/iicbus/ds1307.c optional ds1307 dev/iicbus/ds13rtc.c optional ds13rtc | ds133x | ds1374 dev/iicbus/ds1672.c optional ds1672 dev/iicbus/ds3231.c optional ds3231 dev/iicbus/rtc8583.c optional rtc8583 dev/iicbus/icee.c optional icee dev/iicbus/if_ic.c optional ic dev/iicbus/iic.c optional iic dev/iicbus/iic_recover_bus.c optional iicbus dev/iicbus/iicbb.c optional iicbb dev/iicbus/iicbb_if.m optional iicbb dev/iicbus/iicbus.c optional iicbus dev/iicbus/iicbus_if.m optional iicbus dev/iicbus/iiconf.c optional iicbus dev/iicbus/iicsmb.c optional iicsmb \ dependency "iicbus_if.h" dev/iicbus/iicoc.c optional iicoc dev/iicbus/isl12xx.c optional isl12xx dev/iicbus/lm75.c optional lm75 dev/iicbus/nxprtc.c optional nxprtc | pcf8563 dev/iicbus/ofw_iicbus.c optional fdt iicbus dev/iicbus/s35390a.c optional s35390a dev/iir/iir.c optional iir dev/iir/iir_ctrl.c optional iir dev/iir/iir_pci.c optional iir pci dev/intpm/intpm.c optional intpm pci # XXX Work around clang warning, until maintainer approves fix. dev/ips/ips.c optional ips \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/ips/ips_commands.c optional ips dev/ips/ips_disk.c optional ips dev/ips/ips_ioctl.c optional ips dev/ips/ips_pci.c optional ips pci dev/ipw/if_ipw.c optional ipw ipwbssfw.c optional ipwbssfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_bss.fw:ipw_bss:130 -lintel_ipw -mipw_bss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ipwbssfw.c" ipw_bss.fwo optional ipwbssfw | ipwfw \ dependency "ipw_bss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ipw_bss.fwo" ipw_bss.fw optional ipwbssfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "ipw_bss.fw" ipwibssfw.c optional ipwibssfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_ibss.fw:ipw_ibss:130 -lintel_ipw -mipw_ibss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ipwibssfw.c" ipw_ibss.fwo optional ipwibssfw | ipwfw \ dependency "ipw_ibss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ipw_ibss.fwo" ipw_ibss.fw optional ipwibssfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3-i.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "ipw_ibss.fw" ipwmonitorfw.c optional ipwmonitorfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_monitor.fw:ipw_monitor:130 -lintel_ipw -mipw_monitor -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ipwmonitorfw.c" ipw_monitor.fwo optional ipwmonitorfw | ipwfw \ dependency "ipw_monitor.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ipw_monitor.fwo" ipw_monitor.fw optional ipwmonitorfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3-p.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "ipw_monitor.fw" dev/iscsi/icl.c optional iscsi dev/iscsi/icl_conn_if.m optional cfiscsi | iscsi dev/iscsi/icl_soft.c optional iscsi dev/iscsi/icl_soft_proxy.c optional iscsi dev/iscsi/iscsi.c optional iscsi scbus dev/iscsi_initiator/iscsi.c optional iscsi_initiator scbus dev/iscsi_initiator/iscsi_subr.c optional iscsi_initiator scbus dev/iscsi_initiator/isc_cam.c optional iscsi_initiator scbus dev/iscsi_initiator/isc_soc.c optional iscsi_initiator scbus dev/iscsi_initiator/isc_sm.c optional iscsi_initiator scbus dev/iscsi_initiator/isc_subr.c optional iscsi_initiator scbus dev/ismt/ismt.c optional ismt dev/isl/isl.c optional isl iicbus dev/isp/isp.c optional isp dev/isp/isp_freebsd.c optional isp dev/isp/isp_library.c optional isp dev/isp/isp_pci.c optional isp pci dev/isp/isp_sbus.c optional isp sbus dev/isp/isp_target.c optional isp dev/ispfw/ispfw.c optional ispfw dev/iwi/if_iwi.c optional iwi iwibssfw.c optional iwibssfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_bss.fw:iwi_bss:300 -lintel_iwi -miwi_bss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwibssfw.c" iwi_bss.fwo optional iwibssfw | iwifw \ dependency "iwi_bss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwi_bss.fwo" iwi_bss.fw optional iwibssfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-bss.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwi_bss.fw" iwiibssfw.c optional iwiibssfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_ibss.fw:iwi_ibss:300 -lintel_iwi -miwi_ibss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwiibssfw.c" iwi_ibss.fwo optional iwiibssfw | iwifw \ dependency "iwi_ibss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwi_ibss.fwo" iwi_ibss.fw optional iwiibssfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-ibss.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwi_ibss.fw" iwimonitorfw.c optional iwimonitorfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_monitor.fw:iwi_monitor:300 -lintel_iwi -miwi_monitor -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwimonitorfw.c" iwi_monitor.fwo optional iwimonitorfw | iwifw \ dependency "iwi_monitor.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwi_monitor.fwo" iwi_monitor.fw optional iwimonitorfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-sniffer.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwi_monitor.fw" dev/iwm/if_iwm.c optional iwm dev/iwm/if_iwm_7000.c optional iwm dev/iwm/if_iwm_8000.c optional iwm dev/iwm/if_iwm_binding.c optional iwm dev/iwm/if_iwm_fw.c optional iwm dev/iwm/if_iwm_led.c optional iwm dev/iwm/if_iwm_mac_ctxt.c optional iwm dev/iwm/if_iwm_notif_wait.c optional iwm dev/iwm/if_iwm_pcie_trans.c optional iwm dev/iwm/if_iwm_phy_ctxt.c optional iwm dev/iwm/if_iwm_phy_db.c optional iwm dev/iwm/if_iwm_power.c optional iwm dev/iwm/if_iwm_scan.c optional iwm dev/iwm/if_iwm_sf.c optional iwm dev/iwm/if_iwm_sta.c optional iwm dev/iwm/if_iwm_time_event.c optional iwm dev/iwm/if_iwm_util.c optional iwm iwm3160fw.c optional iwm3160fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm3160.fw:iwm3160fw -miwm3160fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm3160fw.c" iwm3160fw.fwo optional iwm3160fw | iwmfw \ dependency "iwm3160.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm3160fw.fwo" iwm3160.fw optional iwm3160fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-3160-17.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm3160.fw" iwm7260fw.c optional iwm7260fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7260.fw:iwm7260fw -miwm7260fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm7260fw.c" iwm7260fw.fwo optional iwm7260fw | iwmfw \ dependency "iwm7260.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm7260fw.fwo" iwm7260.fw optional iwm7260fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-7260-17.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm7260.fw" iwm7265fw.c optional iwm7265fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7265.fw:iwm7265fw -miwm7265fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm7265fw.c" iwm7265fw.fwo optional iwm7265fw | iwmfw \ dependency "iwm7265.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm7265fw.fwo" iwm7265.fw optional iwm7265fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-7265-17.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm7265.fw" iwm7265Dfw.c optional iwm7265Dfw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7265D.fw:iwm7265Dfw -miwm7265Dfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm7265Dfw.c" iwm7265Dfw.fwo optional iwm7265Dfw | iwmfw \ dependency "iwm7265D.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm7265Dfw.fwo" iwm7265D.fw optional iwm7265Dfw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-7265D-17.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm7265D.fw" iwm8000Cfw.c optional iwm8000Cfw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm8000C.fw:iwm8000Cfw -miwm8000Cfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm8000Cfw.c" iwm8000Cfw.fwo optional iwm8000Cfw | iwmfw \ dependency "iwm8000C.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm8000Cfw.fwo" iwm8000C.fw optional iwm8000Cfw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-8000C-16.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm8000C.fw" iwm8265.fw optional iwm8265fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-8265-22.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm8265.fw" iwm8265fw.c optional iwm8265fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm8265.fw:iwm8265fw -miwm8265fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm8265fw.c" iwm8265fw.fwo optional iwm8265fw | iwmfw \ dependency "iwm8265.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm8265fw.fwo" dev/iwn/if_iwn.c optional iwn iwn1000fw.c optional iwn1000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn1000.fw:iwn1000fw -miwn1000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn1000fw.c" iwn1000fw.fwo optional iwn1000fw | iwnfw \ dependency "iwn1000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn1000fw.fwo" iwn1000.fw optional iwn1000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-1000-39.31.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn1000.fw" iwn100fw.c optional iwn100fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn100.fw:iwn100fw -miwn100fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn100fw.c" iwn100fw.fwo optional iwn100fw | iwnfw \ dependency "iwn100.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn100fw.fwo" iwn100.fw optional iwn100fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-100-39.31.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn100.fw" iwn105fw.c optional iwn105fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn105.fw:iwn105fw -miwn105fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn105fw.c" iwn105fw.fwo optional iwn105fw | iwnfw \ dependency "iwn105.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn105fw.fwo" iwn105.fw optional iwn105fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-105-6-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn105.fw" iwn135fw.c optional iwn135fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn135.fw:iwn135fw -miwn135fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn135fw.c" iwn135fw.fwo optional iwn135fw | iwnfw \ dependency "iwn135.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn135fw.fwo" iwn135.fw optional iwn135fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-135-6-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn135.fw" iwn2000fw.c optional iwn2000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn2000.fw:iwn2000fw -miwn2000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn2000fw.c" iwn2000fw.fwo optional iwn2000fw | iwnfw \ dependency "iwn2000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn2000fw.fwo" iwn2000.fw optional iwn2000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-2000-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn2000.fw" iwn2030fw.c optional iwn2030fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn2030.fw:iwn2030fw -miwn2030fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn2030fw.c" iwn2030fw.fwo optional iwn2030fw | iwnfw \ dependency "iwn2030.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn2030fw.fwo" iwn2030.fw optional iwn2030fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwnwifi-2030-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn2030.fw" iwn4965fw.c optional iwn4965fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn4965.fw:iwn4965fw -miwn4965fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn4965fw.c" iwn4965fw.fwo optional iwn4965fw | iwnfw \ dependency "iwn4965.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn4965fw.fwo" iwn4965.fw optional iwn4965fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-4965-228.61.2.24.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn4965.fw" iwn5000fw.c optional iwn5000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5000.fw:iwn5000fw -miwn5000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn5000fw.c" iwn5000fw.fwo optional iwn5000fw | iwnfw \ dependency "iwn5000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn5000fw.fwo" iwn5000.fw optional iwn5000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-5000-8.83.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn5000.fw" iwn5150fw.c optional iwn5150fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5150.fw:iwn5150fw -miwn5150fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn5150fw.c" iwn5150fw.fwo optional iwn5150fw | iwnfw \ dependency "iwn5150.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn5150fw.fwo" iwn5150.fw optional iwn5150fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-5150-8.24.2.2.fw.uu"\ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn5150.fw" iwn6000fw.c optional iwn6000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000.fw:iwn6000fw -miwn6000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6000fw.c" iwn6000fw.fwo optional iwn6000fw | iwnfw \ dependency "iwn6000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6000fw.fwo" iwn6000.fw optional iwn6000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000-9.221.4.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6000.fw" iwn6000g2afw.c optional iwn6000g2afw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2a.fw:iwn6000g2afw -miwn6000g2afw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6000g2afw.c" iwn6000g2afw.fwo optional iwn6000g2afw | iwnfw \ dependency "iwn6000g2a.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6000g2afw.fwo" iwn6000g2a.fw optional iwn6000g2afw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000g2a-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6000g2a.fw" iwn6000g2bfw.c optional iwn6000g2bfw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2b.fw:iwn6000g2bfw -miwn6000g2bfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6000g2bfw.c" iwn6000g2bfw.fwo optional iwn6000g2bfw | iwnfw \ dependency "iwn6000g2b.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6000g2bfw.fwo" iwn6000g2b.fw optional iwn6000g2bfw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000g2b-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6000g2b.fw" iwn6050fw.c optional iwn6050fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6050.fw:iwn6050fw -miwn6050fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6050fw.c" iwn6050fw.fwo optional iwn6050fw | iwnfw \ dependency "iwn6050.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6050fw.fwo" iwn6050.fw optional iwn6050fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6050-41.28.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6050.fw" dev/ixgb/if_ixgb.c optional ixgb dev/ixgb/ixgb_ee.c optional ixgb dev/ixgb/ixgb_hw.c optional ixgb dev/ixgbe/if_ix.c optional ix inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP" dev/ixgbe/if_ixv.c optional ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP" dev/ixgbe/if_bypass.c optional ix inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_netmap.c optional ix inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/if_fdir.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/if_sriov.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ix_txrx.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_osdep.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_phy.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_api.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_common.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_mbx.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_vf.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_82598.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_82599.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_x540.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_x550.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_dcb.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_dcb_82598.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_dcb_82599.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/jedec_ts/jedec_ts.c optional jedec_ts smbus dev/jme/if_jme.c optional jme pci dev/joy/joy.c optional joy dev/joy/joy_isa.c optional joy isa dev/kbd/kbd.c optional atkbd | pckbd | sc | ukbd | vt dev/kbdmux/kbdmux.c optional kbdmux dev/ksyms/ksyms.c optional ksyms dev/le/am7990.c optional le dev/le/am79900.c optional le dev/le/if_le_pci.c optional le pci dev/le/lance.c optional le dev/led/led.c standard dev/lge/if_lge.c optional lge dev/liquidio/base/cn23xx_pf_device.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_console.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_ctrl.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_device.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_droq.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_mem_ops.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_request_manager.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_response_manager.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_core.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_ioctl.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_main.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_rss.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_rxtx.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_sysctl.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" lio.c optional lio \ compile-with "${AWK} -f $S/tools/fw_stub.awk lio_23xx_nic.bin.fw:lio_23xx_nic.bin -mlio_23xx_nic.bin -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "lio.c" lio_23xx_nic.bin.fw.fwo optional lio \ dependency "lio_23xx_nic.bin.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "lio_23xx_nic.bin.fw.fwo" lio_23xx_nic.bin.fw optional lio \ dependency "$S/contrib/dev/liquidio/lio_23xx_nic.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "lio_23xx_nic.bin.fw" dev/lmc/if_lmc.c optional lmc dev/malo/if_malo.c optional malo dev/malo/if_malohal.c optional malo dev/malo/if_malo_pci.c optional malo pci dev/mc146818/mc146818.c optional mc146818 dev/md/md.c optional md dev/mdio/mdio_if.m optional miiproxy | mdio dev/mdio/mdio.c optional miiproxy | mdio dev/mem/memdev.c optional mem dev/mem/memutil.c optional mem dev/mfi/mfi.c optional mfi dev/mfi/mfi_debug.c optional mfi dev/mfi/mfi_pci.c optional mfi pci dev/mfi/mfi_disk.c optional mfi dev/mfi/mfi_syspd.c optional mfi dev/mfi/mfi_tbolt.c optional mfi dev/mfi/mfi_linux.c optional mfi compat_linux dev/mfi/mfi_cam.c optional mfip scbus dev/mii/acphy.c optional miibus | acphy dev/mii/amphy.c optional miibus | amphy dev/mii/atphy.c optional miibus | atphy dev/mii/axphy.c optional miibus | axphy dev/mii/bmtphy.c optional miibus | bmtphy dev/mii/brgphy.c optional miibus | brgphy dev/mii/ciphy.c optional miibus | ciphy dev/mii/e1000phy.c optional miibus | e1000phy dev/mii/gentbi.c optional miibus | gentbi dev/mii/icsphy.c optional miibus | icsphy dev/mii/ip1000phy.c optional miibus | ip1000phy dev/mii/jmphy.c optional miibus | jmphy dev/mii/lxtphy.c optional miibus | lxtphy dev/mii/micphy.c optional miibus fdt | micphy fdt dev/mii/mii.c optional miibus | mii dev/mii/mii_bitbang.c optional miibus | mii_bitbang dev/mii/mii_physubr.c optional miibus | mii dev/mii/mii_fdt.c optional miibus fdt | mii fdt dev/mii/miibus_if.m optional miibus | mii dev/mii/mlphy.c optional miibus | mlphy dev/mii/nsgphy.c optional miibus | nsgphy dev/mii/nsphy.c optional miibus | nsphy dev/mii/nsphyter.c optional miibus | nsphyter dev/mii/pnaphy.c optional miibus | pnaphy dev/mii/qsphy.c optional miibus | qsphy dev/mii/rdcphy.c optional miibus | rdcphy dev/mii/rgephy.c optional miibus | rgephy dev/mii/rlphy.c optional miibus | rlphy dev/mii/rlswitch.c optional rlswitch dev/mii/smcphy.c optional miibus | smcphy dev/mii/smscphy.c optional miibus | smscphy dev/mii/tdkphy.c optional miibus | tdkphy dev/mii/tlphy.c optional miibus | tlphy dev/mii/truephy.c optional miibus | truephy dev/mii/ukphy.c optional miibus | mii dev/mii/ukphy_subr.c optional miibus | mii dev/mii/vscphy.c optional miibus | vscphy dev/mii/xmphy.c optional miibus | xmphy dev/mk48txx/mk48txx.c optional mk48txx dev/mlx/mlx.c optional mlx dev/mlx/mlx_disk.c optional mlx dev/mlx/mlx_pci.c optional mlx pci dev/mly/mly.c optional mly dev/mmc/mmc_subr.c optional mmc | mmcsd !mmccam dev/mmc/mmc.c optional mmc !mmccam dev/mmc/mmcbr_if.m standard dev/mmc/mmcbus_if.m standard dev/mmc/mmcsd.c optional mmcsd !mmccam dev/mmcnull/mmcnull.c optional mmcnull dev/mn/if_mn.c optional mn pci dev/mpr/mpr.c optional mpr dev/mpr/mpr_config.c optional mpr # XXX Work around clang warning, until maintainer approves fix. dev/mpr/mpr_mapping.c optional mpr \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/mpr/mpr_pci.c optional mpr pci dev/mpr/mpr_sas.c optional mpr \ compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}" dev/mpr/mpr_sas_lsi.c optional mpr dev/mpr/mpr_table.c optional mpr dev/mpr/mpr_user.c optional mpr dev/mps/mps.c optional mps dev/mps/mps_config.c optional mps # XXX Work around clang warning, until maintainer approves fix. dev/mps/mps_mapping.c optional mps \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/mps/mps_pci.c optional mps pci dev/mps/mps_sas.c optional mps \ compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}" dev/mps/mps_sas_lsi.c optional mps dev/mps/mps_table.c optional mps dev/mps/mps_user.c optional mps dev/mpt/mpt.c optional mpt dev/mpt/mpt_cam.c optional mpt dev/mpt/mpt_debug.c optional mpt dev/mpt/mpt_pci.c optional mpt pci dev/mpt/mpt_raid.c optional mpt dev/mpt/mpt_user.c optional mpt dev/mrsas/mrsas.c optional mrsas dev/mrsas/mrsas_cam.c optional mrsas dev/mrsas/mrsas_ioctl.c optional mrsas dev/mrsas/mrsas_fp.c optional mrsas dev/msk/if_msk.c optional msk dev/mvs/mvs.c optional mvs dev/mvs/mvs_if.m optional mvs dev/mvs/mvs_pci.c optional mvs pci dev/mwl/if_mwl.c optional mwl dev/mwl/if_mwl_pci.c optional mwl pci dev/mwl/mwlhal.c optional mwl mwlfw.c optional mwlfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk mw88W8363.fw:mw88W8363fw mwlboot.fw:mwlboot -mmwl -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "mwlfw.c" mw88W8363.fwo optional mwlfw \ dependency "mw88W8363.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "mw88W8363.fwo" mw88W8363.fw optional mwlfw \ dependency "$S/contrib/dev/mwl/mw88W8363.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "mw88W8363.fw" mwlboot.fwo optional mwlfw \ dependency "mwlboot.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "mwlboot.fwo" mwlboot.fw optional mwlfw \ dependency "$S/contrib/dev/mwl/mwlboot.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "mwlboot.fw" dev/mxge/if_mxge.c optional mxge pci dev/mxge/mxge_eth_z8e.c optional mxge pci dev/mxge/mxge_ethp_z8e.c optional mxge pci dev/mxge/mxge_rss_eth_z8e.c optional mxge pci dev/mxge/mxge_rss_ethp_z8e.c optional mxge pci dev/my/if_my.c optional my dev/nand/nand.c optional nand dev/nand/nand_bbt.c optional nand dev/nand/nand_cdev.c optional nand dev/nand/nand_generic.c optional nand dev/nand/nand_geom.c optional nand dev/nand/nand_id.c optional nand dev/nand/nandbus.c optional nand dev/nand/nandbus_if.m optional nand dev/nand/nand_if.m optional nand dev/nand/nandsim.c optional nandsim nand dev/nand/nandsim_chip.c optional nandsim nand dev/nand/nandsim_ctrl.c optional nandsim nand dev/nand/nandsim_log.c optional nandsim nand dev/nand/nandsim_swap.c optional nandsim nand dev/nand/nfc_if.m optional nand dev/ncr/ncr.c optional ncr pci dev/ncv/ncr53c500.c optional ncv dev/ncv/ncr53c500_pccard.c optional ncv pccard dev/netmap/if_ptnet.c optional netmap inet dev/netmap/netmap.c optional netmap dev/netmap/netmap_freebsd.c optional netmap dev/netmap/netmap_generic.c optional netmap dev/netmap/netmap_mbq.c optional netmap dev/netmap/netmap_mem2.c optional netmap dev/netmap/netmap_monitor.c optional netmap dev/netmap/netmap_offloadings.c optional netmap dev/netmap/netmap_pipe.c optional netmap dev/netmap/netmap_pt.c optional netmap dev/netmap/netmap_vale.c optional netmap # compile-with "${NORMAL_C} -Wconversion -Wextra" dev/nfsmb/nfsmb.c optional nfsmb pci dev/nge/if_nge.c optional nge dev/nxge/if_nxge.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-device.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-mm.c optional nxge dev/nxge/xgehal/xge-queue.c optional nxge dev/nxge/xgehal/xgehal-driver.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-ring.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-channel.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-fifo.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-stats.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-config.c optional nxge dev/nxge/xgehal/xgehal-mgmt.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nmdm/nmdm.c optional nmdm dev/nsp/nsp.c optional nsp dev/nsp/nsp_pccard.c optional nsp pccard dev/null/null.c standard dev/oce/oce_hw.c optional oce pci dev/oce/oce_if.c optional oce pci dev/oce/oce_mbox.c optional oce pci dev/oce/oce_queue.c optional oce pci dev/oce/oce_sysctl.c optional oce pci dev/oce/oce_util.c optional oce pci dev/ofw/ofw_bus_if.m optional fdt dev/ofw/ofw_bus_subr.c optional fdt dev/ofw/ofw_cpu.c optional fdt dev/ofw/ofw_fdt.c optional fdt dev/ofw/ofw_if.m optional fdt dev/ofw/ofw_subr.c optional fdt dev/ofw/ofwbus.c optional fdt dev/ofw/openfirm.c optional fdt dev/ofw/openfirmio.c optional fdt dev/ow/ow.c optional ow \ dependency "owll_if.h" \ dependency "own_if.h" dev/ow/owll_if.m optional ow dev/ow/own_if.m optional ow dev/ow/ow_temp.c optional ow_temp dev/ow/owc_gpiobus.c optional owc gpio dev/pbio/pbio.c optional pbio isa dev/pccard/card_if.m standard dev/pccard/pccard.c optional pccard dev/pccard/pccard_cis.c optional pccard dev/pccard/pccard_cis_quirks.c optional pccard dev/pccard/pccard_device.c optional pccard dev/pccard/power_if.m standard dev/pccbb/pccbb.c optional cbb dev/pccbb/pccbb_isa.c optional cbb isa dev/pccbb/pccbb_pci.c optional cbb pci dev/pcf/pcf.c optional pcf dev/pci/fixup_pci.c optional pci dev/pci/hostb_pci.c optional pci dev/pci/ignore_pci.c optional pci dev/pci/isa_pci.c optional pci isa dev/pci/pci.c optional pci dev/pci/pci_if.m standard dev/pci/pci_iov.c optional pci pci_iov dev/pci/pci_iov_if.m standard dev/pci/pci_iov_schema.c optional pci pci_iov dev/pci/pci_pci.c optional pci dev/pci/pci_subr.c optional pci dev/pci/pci_user.c optional pci dev/pci/pcib_if.m standard dev/pci/pcib_support.c standard dev/pci/vga_pci.c optional pci dev/pcn/if_pcn.c optional pcn pci dev/pdq/if_fpa.c optional fpa pci dev/pdq/pdq.c optional nowerror fpa pci dev/pdq/pdq_ifsubr.c optional nowerror fpa pci dev/pms/freebsd/driver/ini/src/agtiapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sadisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/mpi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saframe.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sahw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sainit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saint.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sampicmd.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sampirsp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saphy.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saport.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sasata.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sasmp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sassp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/satimer.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sautil.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saioctlcmd.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/mpidebug.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dminit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmsmp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmdisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmport.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmtimer.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmmisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/sminit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smmisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smsat.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smsatcb.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smsathw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smtimer.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdinit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdmisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdesgl.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdport.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdint.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdioctl.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdhw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/ossacmnapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tddmcmnapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdsmcmnapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdtimers.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itdio.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itdcb.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itdinit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itddisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sata/host/sat.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sata/host/ossasat.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sata/host/sathw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/ppbus/if_plip.c optional plip dev/ppbus/immio.c optional vpo dev/ppbus/lpbb.c optional lpbb dev/ppbus/lpt.c optional lpt dev/ppbus/pcfclock.c optional pcfclock dev/ppbus/ppb_1284.c optional ppbus dev/ppbus/ppb_base.c optional ppbus dev/ppbus/ppb_msq.c optional ppbus dev/ppbus/ppbconf.c optional ppbus dev/ppbus/ppbus_if.m optional ppbus dev/ppbus/ppi.c optional ppi dev/ppbus/pps.c optional pps dev/ppbus/vpo.c optional vpo dev/ppbus/vpoio.c optional vpo dev/ppc/ppc.c optional ppc dev/ppc/ppc_acpi.c optional ppc acpi dev/ppc/ppc_isa.c optional ppc isa dev/ppc/ppc_pci.c optional ppc pci dev/ppc/ppc_puc.c optional ppc puc dev/proto/proto_bus_isa.c optional proto acpi | proto isa dev/proto/proto_bus_pci.c optional proto pci dev/proto/proto_busdma.c optional proto dev/proto/proto_core.c optional proto dev/pst/pst-iop.c optional pst dev/pst/pst-pci.c optional pst pci dev/pst/pst-raid.c optional pst dev/pty/pty.c optional pty dev/puc/puc.c optional puc dev/puc/puc_cfg.c optional puc dev/puc/puc_pccard.c optional puc pccard dev/puc/puc_pci.c optional puc pci dev/puc/pucdata.c optional puc pci dev/quicc/quicc_core.c optional quicc dev/ral/rt2560.c optional ral dev/ral/rt2661.c optional ral dev/ral/rt2860.c optional ral dev/ral/if_ral_pci.c optional ral pci rt2561fw.c optional rt2561fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561.fw:rt2561fw -mrt2561 -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2561fw.c" rt2561fw.fwo optional rt2561fw | ralfw \ dependency "rt2561.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2561fw.fwo" rt2561.fw optional rt2561fw | ralfw \ dependency "$S/contrib/dev/ral/rt2561.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2561.fw" rt2561sfw.c optional rt2561sfw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561s.fw:rt2561sfw -mrt2561s -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2561sfw.c" rt2561sfw.fwo optional rt2561sfw | ralfw \ dependency "rt2561s.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2561sfw.fwo" rt2561s.fw optional rt2561sfw | ralfw \ dependency "$S/contrib/dev/ral/rt2561s.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2561s.fw" rt2661fw.c optional rt2661fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2661.fw:rt2661fw -mrt2661 -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2661fw.c" rt2661fw.fwo optional rt2661fw | ralfw \ dependency "rt2661.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2661fw.fwo" rt2661.fw optional rt2661fw | ralfw \ dependency "$S/contrib/dev/ral/rt2661.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2661.fw" rt2860fw.c optional rt2860fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2860.fw:rt2860fw -mrt2860 -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2860fw.c" rt2860fw.fwo optional rt2860fw | ralfw \ dependency "rt2860.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2860fw.fwo" rt2860.fw optional rt2860fw | ralfw \ dependency "$S/contrib/dev/ral/rt2860.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2860.fw" dev/random/random_infra.c optional random dev/random/random_harvestq.c optional random dev/random/randomdev.c optional random random_yarrow | \ random !random_yarrow !random_loadable dev/random/yarrow.c optional random random_yarrow dev/random/fortuna.c optional random !random_yarrow !random_loadable dev/random/hash.c optional random random_yarrow | \ random !random_yarrow !random_loadable dev/rc/rc.c optional rc dev/rccgpio/rccgpio.c optional rccgpio gpio dev/re/if_re.c optional re dev/rl/if_rl.c optional rl pci dev/rndtest/rndtest.c optional rndtest dev/rp/rp.c optional rp dev/rp/rp_isa.c optional rp isa dev/rp/rp_pci.c optional rp pci # dev/rtwn/if_rtwn.c optional rtwn dev/rtwn/if_rtwn_beacon.c optional rtwn dev/rtwn/if_rtwn_calib.c optional rtwn dev/rtwn/if_rtwn_cam.c optional rtwn dev/rtwn/if_rtwn_efuse.c optional rtwn dev/rtwn/if_rtwn_fw.c optional rtwn dev/rtwn/if_rtwn_rx.c optional rtwn dev/rtwn/if_rtwn_task.c optional rtwn dev/rtwn/if_rtwn_tx.c optional rtwn # dev/rtwn/pci/rtwn_pci_attach.c optional rtwn_pci pci dev/rtwn/pci/rtwn_pci_reg.c optional rtwn_pci pci dev/rtwn/pci/rtwn_pci_rx.c optional rtwn_pci pci dev/rtwn/pci/rtwn_pci_tx.c optional rtwn_pci pci # dev/rtwn/usb/rtwn_usb_attach.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_ep.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_reg.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_rx.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_tx.c optional rtwn_usb # RTL8188E dev/rtwn/rtl8188e/r88e_beacon.c optional rtwn dev/rtwn/rtl8188e/r88e_calib.c optional rtwn dev/rtwn/rtl8188e/r88e_chan.c optional rtwn dev/rtwn/rtl8188e/r88e_fw.c optional rtwn dev/rtwn/rtl8188e/r88e_init.c optional rtwn dev/rtwn/rtl8188e/r88e_led.c optional rtwn dev/rtwn/rtl8188e/r88e_tx.c optional rtwn dev/rtwn/rtl8188e/r88e_rf.c optional rtwn dev/rtwn/rtl8188e/r88e_rom.c optional rtwn dev/rtwn/rtl8188e/r88e_rx.c optional rtwn dev/rtwn/rtl8188e/usb/r88eu_attach.c optional rtwn_usb dev/rtwn/rtl8188e/usb/r88eu_init.c optional rtwn_usb dev/rtwn/rtl8188e/usb/r88eu_rx.c optional rtwn_usb # RTL8192C dev/rtwn/rtl8192c/r92c_attach.c optional rtwn dev/rtwn/rtl8192c/r92c_beacon.c optional rtwn dev/rtwn/rtl8192c/r92c_calib.c optional rtwn dev/rtwn/rtl8192c/r92c_chan.c optional rtwn dev/rtwn/rtl8192c/r92c_fw.c optional rtwn dev/rtwn/rtl8192c/r92c_init.c optional rtwn dev/rtwn/rtl8192c/r92c_llt.c optional rtwn dev/rtwn/rtl8192c/r92c_rf.c optional rtwn dev/rtwn/rtl8192c/r92c_rom.c optional rtwn dev/rtwn/rtl8192c/r92c_rx.c optional rtwn dev/rtwn/rtl8192c/r92c_tx.c optional rtwn dev/rtwn/rtl8192c/pci/r92ce_attach.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_calib.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_fw.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_init.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_led.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_rx.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_tx.c optional rtwn_pci pci dev/rtwn/rtl8192c/usb/r92cu_attach.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_init.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_led.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_rx.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_tx.c optional rtwn_usb # RTL8192E dev/rtwn/rtl8192e/r92e_chan.c optional rtwn dev/rtwn/rtl8192e/r92e_fw.c optional rtwn dev/rtwn/rtl8192e/r92e_init.c optional rtwn dev/rtwn/rtl8192e/r92e_led.c optional rtwn dev/rtwn/rtl8192e/r92e_rf.c optional rtwn dev/rtwn/rtl8192e/r92e_rom.c optional rtwn dev/rtwn/rtl8192e/r92e_rx.c optional rtwn dev/rtwn/rtl8192e/usb/r92eu_attach.c optional rtwn_usb dev/rtwn/rtl8192e/usb/r92eu_init.c optional rtwn_usb # RTL8812A dev/rtwn/rtl8812a/r12a_beacon.c optional rtwn dev/rtwn/rtl8812a/r12a_calib.c optional rtwn dev/rtwn/rtl8812a/r12a_caps.c optional rtwn dev/rtwn/rtl8812a/r12a_chan.c optional rtwn dev/rtwn/rtl8812a/r12a_fw.c optional rtwn dev/rtwn/rtl8812a/r12a_init.c optional rtwn dev/rtwn/rtl8812a/r12a_led.c optional rtwn dev/rtwn/rtl8812a/r12a_rf.c optional rtwn dev/rtwn/rtl8812a/r12a_rom.c optional rtwn dev/rtwn/rtl8812a/r12a_rx.c optional rtwn dev/rtwn/rtl8812a/r12a_tx.c optional rtwn dev/rtwn/rtl8812a/usb/r12au_attach.c optional rtwn_usb dev/rtwn/rtl8812a/usb/r12au_init.c optional rtwn_usb dev/rtwn/rtl8812a/usb/r12au_rx.c optional rtwn_usb dev/rtwn/rtl8812a/usb/r12au_tx.c optional rtwn_usb # RTL8821A dev/rtwn/rtl8821a/r21a_beacon.c optional rtwn dev/rtwn/rtl8821a/r21a_calib.c optional rtwn dev/rtwn/rtl8821a/r21a_chan.c optional rtwn dev/rtwn/rtl8821a/r21a_fw.c optional rtwn dev/rtwn/rtl8821a/r21a_init.c optional rtwn dev/rtwn/rtl8821a/r21a_led.c optional rtwn dev/rtwn/rtl8821a/r21a_rom.c optional rtwn dev/rtwn/rtl8821a/r21a_rx.c optional rtwn dev/rtwn/rtl8821a/usb/r21au_attach.c optional rtwn_usb dev/rtwn/rtl8821a/usb/r21au_dfs.c optional rtwn_usb dev/rtwn/rtl8821a/usb/r21au_init.c optional rtwn_usb rtwn-rtl8188eufw.c optional rtwn-rtl8188eufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8188eufw.fw:rtwn-rtl8188eufw:111 -mrtwn-rtl8188eufw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8188eufw.c" rtwn-rtl8188eufw.fwo optional rtwn-rtl8188eufw | rtwnfw \ dependency "rtwn-rtl8188eufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8188eufw.fwo" rtwn-rtl8188eufw.fw optional rtwn-rtl8188eufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8188eufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8188eufw.fw" rtwn-rtl8192cfwE.c optional rtwn-rtl8192cfwE | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwE.fw:rtwn-rtl8192cfwE:111 -mrtwn-rtl8192cfwE -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwE.c" rtwn-rtl8192cfwE.fwo optional rtwn-rtl8192cfwE | rtwnfw \ dependency "rtwn-rtl8192cfwE.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwE.fwo" rtwn-rtl8192cfwE.fw optional rtwn-rtl8192cfwE | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwE.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwE.fw" rtwn-rtl8192cfwE_B.c optional rtwn-rtl8192cfwE_B | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwE_B.fw:rtwn-rtl8192cfwE_B:111 -mrtwn-rtl8192cfwE_B -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwE_B.c" rtwn-rtl8192cfwE_B.fwo optional rtwn-rtl8192cfwE_B | rtwnfw \ dependency "rtwn-rtl8192cfwE_B.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwE_B.fwo" rtwn-rtl8192cfwE_B.fw optional rtwn-rtl8192cfwE_B | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwE_B.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwE_B.fw" rtwn-rtl8192cfwT.c optional rtwn-rtl8192cfwT | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwT.fw:rtwn-rtl8192cfwT:111 -mrtwn-rtl8192cfwT -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwT.c" rtwn-rtl8192cfwT.fwo optional rtwn-rtl8192cfwT | rtwnfw \ dependency "rtwn-rtl8192cfwT.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwT.fwo" rtwn-rtl8192cfwT.fw optional rtwn-rtl8192cfwT | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwT.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwT.fw" rtwn-rtl8192cfwU.c optional rtwn-rtl8192cfwU | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwU.fw:rtwn-rtl8192cfwU:111 -mrtwn-rtl8192cfwU -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwU.c" rtwn-rtl8192cfwU.fwo optional rtwn-rtl8192cfwU | rtwnfw \ dependency "rtwn-rtl8192cfwU.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwU.fwo" rtwn-rtl8192cfwU.fw optional rtwn-rtl8192cfwU | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwU.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwU.fw" rtwn-rtl8192eufw.c optional rtwn-rtl8192eufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192eufw.fw:rtwn-rtl8192eufw:111 -mrtwn-rtl8192eufw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192eufw.c" rtwn-rtl8192eufw.fwo optional rtwn-rtl8192eufw | rtwnfw \ dependency "rtwn-rtl8192eufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192eufw.fwo" rtwn-rtl8192eufw.fw optional rtwn-rtl8192eufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192eufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192eufw.fw" rtwn-rtl8812aufw.c optional rtwn-rtl8812aufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8812aufw.fw:rtwn-rtl8812aufw:111 -mrtwn-rtl8812aufw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8812aufw.c" rtwn-rtl8812aufw.fwo optional rtwn-rtl8812aufw | rtwnfw \ dependency "rtwn-rtl8812aufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8812aufw.fwo" rtwn-rtl8812aufw.fw optional rtwn-rtl8812aufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8812aufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8812aufw.fw" rtwn-rtl8821aufw.c optional rtwn-rtl8821aufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8821aufw.fw:rtwn-rtl8821aufw:111 -mrtwn-rtl8821aufw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8821aufw.c" rtwn-rtl8821aufw.fwo optional rtwn-rtl8821aufw | rtwnfw \ dependency "rtwn-rtl8821aufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8821aufw.fwo" rtwn-rtl8821aufw.fw optional rtwn-rtl8821aufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8821aufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8821aufw.fw" dev/safe/safe.c optional safe dev/scc/scc_if.m optional scc dev/scc/scc_bfe_ebus.c optional scc ebus dev/scc/scc_bfe_quicc.c optional scc quicc dev/scc/scc_bfe_sbus.c optional scc fhc | scc sbus dev/scc/scc_core.c optional scc dev/scc/scc_dev_quicc.c optional scc quicc dev/scc/scc_dev_sab82532.c optional scc dev/scc/scc_dev_z8530.c optional scc dev/sdhci/sdhci.c optional sdhci dev/sdhci/sdhci_fdt_gpio.c optional sdhci fdt gpio dev/sdhci/sdhci_if.m optional sdhci dev/sdhci/sdhci_acpi.c optional sdhci acpi dev/sdhci/sdhci_pci.c optional sdhci pci dev/sf/if_sf.c optional sf pci dev/sge/if_sge.c optional sge pci dev/siba/siba_bwn.c optional siba_bwn pci dev/siba/siba_core.c optional siba_bwn pci dev/siis/siis.c optional siis pci dev/sis/if_sis.c optional sis pci dev/sk/if_sk.c optional sk pci dev/smbus/smb.c optional smb dev/smbus/smbconf.c optional smbus dev/smbus/smbus.c optional smbus dev/smbus/smbus_if.m optional smbus dev/smc/if_smc.c optional smc dev/smc/if_smc_fdt.c optional smc fdt dev/sn/if_sn.c optional sn dev/sn/if_sn_isa.c optional sn isa dev/sn/if_sn_pccard.c optional sn pccard dev/snp/snp.c optional snp dev/sound/clone.c optional sound dev/sound/unit.c optional sound dev/sound/isa/ad1816.c optional snd_ad1816 isa dev/sound/isa/ess.c optional snd_ess isa dev/sound/isa/gusc.c optional snd_gusc isa dev/sound/isa/mss.c optional snd_mss isa dev/sound/isa/sb16.c optional snd_sb16 isa dev/sound/isa/sb8.c optional snd_sb8 isa dev/sound/isa/sbc.c optional snd_sbc isa dev/sound/isa/sndbuf_dma.c optional sound isa dev/sound/pci/als4000.c optional snd_als4000 pci dev/sound/pci/atiixp.c optional snd_atiixp pci dev/sound/pci/cmi.c optional snd_cmi pci dev/sound/pci/cs4281.c optional snd_cs4281 pci dev/sound/pci/csa.c optional snd_csa pci dev/sound/pci/csapcm.c optional snd_csa pci dev/sound/pci/ds1.c optional snd_ds1 pci dev/sound/pci/emu10k1.c optional snd_emu10k1 pci dev/sound/pci/emu10kx.c optional snd_emu10kx pci dev/sound/pci/emu10kx-pcm.c optional snd_emu10kx pci dev/sound/pci/emu10kx-midi.c optional snd_emu10kx pci dev/sound/pci/envy24.c optional snd_envy24 pci dev/sound/pci/envy24ht.c optional snd_envy24ht pci dev/sound/pci/es137x.c optional snd_es137x pci dev/sound/pci/fm801.c optional snd_fm801 pci dev/sound/pci/ich.c optional snd_ich pci dev/sound/pci/maestro.c optional snd_maestro pci dev/sound/pci/maestro3.c optional snd_maestro3 pci dev/sound/pci/neomagic.c optional snd_neomagic pci dev/sound/pci/solo.c optional snd_solo pci dev/sound/pci/spicds.c optional snd_spicds pci dev/sound/pci/t4dwave.c optional snd_t4dwave pci dev/sound/pci/via8233.c optional snd_via8233 pci dev/sound/pci/via82c686.c optional snd_via82c686 pci dev/sound/pci/vibes.c optional snd_vibes pci dev/sound/pci/hda/hdaa.c optional snd_hda pci dev/sound/pci/hda/hdaa_patches.c optional snd_hda pci dev/sound/pci/hda/hdac.c optional snd_hda pci dev/sound/pci/hda/hdac_if.m optional snd_hda pci dev/sound/pci/hda/hdacc.c optional snd_hda pci dev/sound/pci/hdspe.c optional snd_hdspe pci dev/sound/pci/hdspe-pcm.c optional snd_hdspe pci dev/sound/pcm/ac97.c optional sound dev/sound/pcm/ac97_if.m optional sound dev/sound/pcm/ac97_patch.c optional sound dev/sound/pcm/buffer.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/channel.c optional sound dev/sound/pcm/channel_if.m optional sound dev/sound/pcm/dsp.c optional sound dev/sound/pcm/feeder.c optional sound dev/sound/pcm/feeder_chain.c optional sound dev/sound/pcm/feeder_eq.c optional sound \ dependency "feeder_eq_gen.h" \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_if.m optional sound dev/sound/pcm/feeder_format.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_matrix.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_mixer.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_rate.c optional sound \ dependency "feeder_rate_gen.h" \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_volume.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/mixer.c optional sound dev/sound/pcm/mixer_if.m optional sound dev/sound/pcm/sndstat.c optional sound dev/sound/pcm/sound.c optional sound dev/sound/pcm/vchan.c optional sound dev/sound/usb/uaudio.c optional snd_uaudio usb dev/sound/usb/uaudio_pcm.c optional snd_uaudio usb dev/sound/midi/midi.c optional sound dev/sound/midi/mpu401.c optional sound dev/sound/midi/mpu_if.m optional sound dev/sound/midi/mpufoi_if.m optional sound dev/sound/midi/sequencer.c optional sound dev/sound/midi/synth_if.m optional sound dev/spibus/ofw_spibus.c optional fdt spibus dev/spibus/spibus.c optional spibus \ dependency "spibus_if.h" dev/spibus/spigen.c optional spigen dev/spibus/spibus_if.m optional spibus dev/ste/if_ste.c optional ste pci dev/stg/tmc18c30.c optional stg dev/stg/tmc18c30_isa.c optional stg isa dev/stg/tmc18c30_pccard.c optional stg pccard dev/stg/tmc18c30_pci.c optional stg pci dev/stg/tmc18c30_subr.c optional stg dev/stge/if_stge.c optional stge dev/sym/sym_hipd.c optional sym \ dependency "$S/dev/sym/sym_{conf,defs}.h" dev/syscons/blank/blank_saver.c optional blank_saver dev/syscons/daemon/daemon_saver.c optional daemon_saver dev/syscons/dragon/dragon_saver.c optional dragon_saver dev/syscons/fade/fade_saver.c optional fade_saver dev/syscons/fire/fire_saver.c optional fire_saver dev/syscons/green/green_saver.c optional green_saver dev/syscons/logo/logo.c optional logo_saver dev/syscons/logo/logo_saver.c optional logo_saver dev/syscons/rain/rain_saver.c optional rain_saver dev/syscons/schistory.c optional sc dev/syscons/scmouse.c optional sc dev/syscons/scterm.c optional sc dev/syscons/scvidctl.c optional sc dev/syscons/snake/snake_saver.c optional snake_saver dev/syscons/star/star_saver.c optional star_saver dev/syscons/syscons.c optional sc dev/syscons/sysmouse.c optional sc dev/syscons/warp/warp_saver.c optional warp_saver dev/tdfx/tdfx_linux.c optional tdfx_linux tdfx compat_linux dev/tdfx/tdfx_pci.c optional tdfx pci dev/ti/if_ti.c optional ti pci dev/tl/if_tl.c optional tl pci dev/trm/trm.c optional trm dev/twa/tw_cl_init.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_intr.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_io.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_misc.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_osl_cam.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_osl_freebsd.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twe/twe.c optional twe dev/twe/twe_freebsd.c optional twe dev/tws/tws.c optional tws dev/tws/tws_cam.c optional tws dev/tws/tws_hdm.c optional tws dev/tws/tws_services.c optional tws dev/tws/tws_user.c optional tws dev/tx/if_tx.c optional tx dev/txp/if_txp.c optional txp dev/uart/uart_bus_acpi.c optional uart acpi dev/uart/uart_bus_ebus.c optional uart ebus dev/uart/uart_bus_fdt.c optional uart fdt dev/uart/uart_bus_isa.c optional uart isa dev/uart/uart_bus_pccard.c optional uart pccard dev/uart/uart_bus_pci.c optional uart pci dev/uart/uart_bus_puc.c optional uart puc dev/uart/uart_bus_scc.c optional uart scc dev/uart/uart_core.c optional uart dev/uart/uart_dbg.c optional uart gdb dev/uart/uart_dev_mvebu.c optional uart uart_mvebu dev/uart/uart_dev_ns8250.c optional uart uart_ns8250 | uart uart_snps dev/uart/uart_dev_pl011.c optional uart pl011 dev/uart/uart_dev_quicc.c optional uart quicc dev/uart/uart_dev_sab82532.c optional uart uart_sab82532 dev/uart/uart_dev_sab82532.c optional uart scc dev/uart/uart_dev_snps.c optional uart uart_snps fdt dev/uart/uart_dev_z8530.c optional uart uart_z8530 dev/uart/uart_dev_z8530.c optional uart scc dev/uart/uart_if.m optional uart dev/uart/uart_subr.c optional uart dev/uart/uart_tty.c optional uart dev/ubsec/ubsec.c optional ubsec # # USB controller drivers # dev/usb/controller/at91dci.c optional at91dci dev/usb/controller/at91dci_atmelarm.c optional at91dci at91rm9200 dev/usb/controller/musb_otg.c optional musb dev/usb/controller/musb_otg_atmelarm.c optional musb at91rm9200 dev/usb/controller/dwc_otg.c optional dwcotg dev/usb/controller/dwc_otg_fdt.c optional dwcotg fdt dev/usb/controller/ehci.c optional ehci dev/usb/controller/ehci_pci.c optional ehci pci dev/usb/controller/ohci.c optional ohci dev/usb/controller/ohci_pci.c optional ohci pci dev/usb/controller/uhci.c optional uhci dev/usb/controller/uhci_pci.c optional uhci pci dev/usb/controller/xhci.c optional xhci dev/usb/controller/xhci_pci.c optional xhci pci dev/usb/controller/saf1761_otg.c optional saf1761otg dev/usb/controller/saf1761_otg_fdt.c optional saf1761otg fdt dev/usb/controller/uss820dci.c optional uss820dci dev/usb/controller/uss820dci_atmelarm.c optional uss820dci at91rm9200 dev/usb/controller/usb_controller.c optional usb # # USB storage drivers # dev/usb/storage/cfumass.c optional cfumass ctl dev/usb/storage/umass.c optional umass dev/usb/storage/urio.c optional urio dev/usb/storage/ustorage_fs.c optional usfs # # USB core # dev/usb/usb_busdma.c optional usb dev/usb/usb_core.c optional usb dev/usb/usb_debug.c optional usb dev/usb/usb_dev.c optional usb dev/usb/usb_device.c optional usb dev/usb/usb_dynamic.c optional usb dev/usb/usb_error.c optional usb dev/usb/usb_generic.c optional usb dev/usb/usb_handle_request.c optional usb dev/usb/usb_hid.c optional usb dev/usb/usb_hub.c optional usb dev/usb/usb_if.m optional usb dev/usb/usb_lookup.c optional usb dev/usb/usb_mbuf.c optional usb dev/usb/usb_msctest.c optional usb dev/usb/usb_parse.c optional usb dev/usb/usb_pf.c optional usb dev/usb/usb_process.c optional usb dev/usb/usb_request.c optional usb dev/usb/usb_transfer.c optional usb dev/usb/usb_util.c optional usb # # USB network drivers # dev/usb/net/if_aue.c optional aue dev/usb/net/if_axe.c optional axe dev/usb/net/if_axge.c optional axge dev/usb/net/if_cdce.c optional cdce dev/usb/net/if_cue.c optional cue dev/usb/net/if_ipheth.c optional ipheth dev/usb/net/if_kue.c optional kue dev/usb/net/if_mos.c optional mos dev/usb/net/if_rue.c optional rue dev/usb/net/if_smsc.c optional smsc dev/usb/net/if_udav.c optional udav dev/usb/net/if_ure.c optional ure dev/usb/net/if_usie.c optional usie dev/usb/net/if_urndis.c optional urndis dev/usb/net/ruephy.c optional rue dev/usb/net/usb_ethernet.c optional uether | aue | axe | axge | cdce | \ cue | ipheth | kue | mos | rue | \ smsc | udav | ure | urndis dev/usb/net/uhso.c optional uhso # # USB WLAN drivers # dev/usb/wlan/if_rsu.c optional rsu rsu-rtl8712fw.c optional rsu-rtl8712fw | rsufw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rsu-rtl8712fw.fw:rsu-rtl8712fw:120 -mrsu-rtl8712fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rsu-rtl8712fw.c" rsu-rtl8712fw.fwo optional rsu-rtl8712fw | rsufw \ dependency "rsu-rtl8712fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rsu-rtl8712fw.fwo" rsu-rtl8712fw.fw optional rsu-rtl8712.fw | rsufw \ dependency "$S/contrib/dev/rsu/rsu-rtl8712fw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rsu-rtl8712fw.fw" dev/usb/wlan/if_rum.c optional rum dev/usb/wlan/if_run.c optional run runfw.c optional runfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk run.fw:runfw -mrunfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "runfw.c" runfw.fwo optional runfw \ dependency "run.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "runfw.fwo" run.fw optional runfw \ dependency "$S/contrib/dev/run/rt2870.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "run.fw" dev/usb/wlan/if_uath.c optional uath dev/usb/wlan/if_upgt.c optional upgt dev/usb/wlan/if_ural.c optional ural dev/usb/wlan/if_urtw.c optional urtw dev/usb/wlan/if_zyd.c optional zyd # # USB serial and parallel port drivers # dev/usb/serial/u3g.c optional u3g dev/usb/serial/uark.c optional uark dev/usb/serial/ubsa.c optional ubsa dev/usb/serial/ubser.c optional ubser dev/usb/serial/uchcom.c optional uchcom dev/usb/serial/ucycom.c optional ucycom dev/usb/serial/ufoma.c optional ufoma dev/usb/serial/uftdi.c optional uftdi dev/usb/serial/ugensa.c optional ugensa dev/usb/serial/uipaq.c optional uipaq dev/usb/serial/ulpt.c optional ulpt dev/usb/serial/umcs.c optional umcs dev/usb/serial/umct.c optional umct dev/usb/serial/umodem.c optional umodem dev/usb/serial/umoscom.c optional umoscom dev/usb/serial/uplcom.c optional uplcom dev/usb/serial/uslcom.c optional uslcom dev/usb/serial/uvisor.c optional uvisor dev/usb/serial/uvscom.c optional uvscom dev/usb/serial/usb_serial.c optional ucom | u3g | uark | ubsa | ubser | \ uchcom | ucycom | ufoma | uftdi | \ ugensa | uipaq | umcs | umct | \ umodem | umoscom | uplcom | usie | \ uslcom | uvisor | uvscom # # USB misc drivers # dev/usb/misc/ufm.c optional ufm dev/usb/misc/udbp.c optional udbp dev/usb/misc/ugold.c optional ugold dev/usb/misc/uled.c optional uled # # USB input drivers # dev/usb/input/atp.c optional atp dev/usb/input/uep.c optional uep dev/usb/input/uhid.c optional uhid dev/usb/input/ukbd.c optional ukbd dev/usb/input/ums.c optional ums dev/usb/input/wmt.c optional wmt dev/usb/input/wsp.c optional wsp # # USB quirks # dev/usb/quirk/usb_quirk.c optional usb # # USB templates # dev/usb/template/usb_template.c optional usb_template dev/usb/template/usb_template_audio.c optional usb_template dev/usb/template/usb_template_cdce.c optional usb_template dev/usb/template/usb_template_kbd.c optional usb_template dev/usb/template/usb_template_modem.c optional usb_template dev/usb/template/usb_template_mouse.c optional usb_template dev/usb/template/usb_template_msc.c optional usb_template dev/usb/template/usb_template_mtp.c optional usb_template dev/usb/template/usb_template_phone.c optional usb_template dev/usb/template/usb_template_serialnet.c optional usb_template dev/usb/template/usb_template_midi.c optional usb_template # # USB video drivers # dev/usb/video/udl.c optional udl # # USB END # dev/videomode/videomode.c optional videomode dev/videomode/edid.c optional videomode dev/videomode/pickmode.c optional videomode dev/videomode/vesagtf.c optional videomode dev/vge/if_vge.c optional vge dev/viapm/viapm.c optional viapm pci dev/virtio/virtio.c optional virtio dev/virtio/virtqueue.c optional virtio dev/virtio/virtio_bus_if.m optional virtio dev/virtio/virtio_if.m optional virtio dev/virtio/pci/virtio_pci.c optional virtio_pci dev/virtio/mmio/virtio_mmio.c optional virtio_mmio fdt dev/virtio/mmio/virtio_mmio_if.m optional virtio_mmio fdt dev/virtio/network/if_vtnet.c optional vtnet dev/virtio/block/virtio_blk.c optional virtio_blk dev/virtio/balloon/virtio_balloon.c optional virtio_balloon dev/virtio/scsi/virtio_scsi.c optional virtio_scsi dev/virtio/random/virtio_random.c optional virtio_random dev/virtio/console/virtio_console.c optional virtio_console dev/vkbd/vkbd.c optional vkbd dev/vr/if_vr.c optional vr pci dev/vt/colors/vt_termcolors.c optional vt dev/vt/font/vt_font_default.c optional vt dev/vt/font/vt_mouse_cursor.c optional vt dev/vt/hw/efifb/efifb.c optional vt_efifb dev/vt/hw/fb/vt_fb.c optional vt dev/vt/hw/vga/vt_vga.c optional vt vt_vga dev/vt/logo/logo_freebsd.c optional vt splash dev/vt/logo/logo_beastie.c optional vt splash dev/vt/vt_buf.c optional vt dev/vt/vt_consolectl.c optional vt dev/vt/vt_core.c optional vt dev/vt/vt_cpulogos.c optional vt splash dev/vt/vt_font.c optional vt dev/vt/vt_sysmouse.c optional vt dev/vte/if_vte.c optional vte pci dev/vx/if_vx.c optional vx dev/vx/if_vx_pci.c optional vx pci dev/vxge/vxge.c optional vxge dev/vxge/vxgehal/vxgehal-ifmsg.c optional vxge dev/vxge/vxgehal/vxgehal-mrpcim.c optional vxge dev/vxge/vxgehal/vxge-queue.c optional vxge dev/vxge/vxgehal/vxgehal-ring.c optional vxge dev/vxge/vxgehal/vxgehal-swapper.c optional vxge dev/vxge/vxgehal/vxgehal-mgmt.c optional vxge dev/vxge/vxgehal/vxgehal-srpcim.c optional vxge dev/vxge/vxgehal/vxgehal-config.c optional vxge dev/vxge/vxgehal/vxgehal-blockpool.c optional vxge dev/vxge/vxgehal/vxgehal-doorbells.c optional vxge dev/vxge/vxgehal/vxgehal-mgmtaux.c optional vxge dev/vxge/vxgehal/vxgehal-device.c optional vxge dev/vxge/vxgehal/vxgehal-mm.c optional vxge dev/vxge/vxgehal/vxgehal-driver.c optional vxge dev/vxge/vxgehal/vxgehal-virtualpath.c optional vxge dev/vxge/vxgehal/vxgehal-channel.c optional vxge dev/vxge/vxgehal/vxgehal-fifo.c optional vxge dev/watchdog/watchdog.c standard dev/wb/if_wb.c optional wb pci dev/wi/if_wi.c optional wi dev/wi/if_wi_pccard.c optional wi pccard dev/wi/if_wi_pci.c optional wi pci dev/wpi/if_wpi.c optional wpi pci wpifw.c optional wpifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk wpi.fw:wpifw:153229 -mwpi -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "wpifw.c" wpifw.fwo optional wpifw \ dependency "wpi.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "wpifw.fwo" wpi.fw optional wpifw \ dependency "$S/contrib/dev/wpi/iwlwifi-3945-15.32.2.9.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "wpi.fw" dev/xdma/xdma.c optional xdma dev/xdma/xdma_if.m optional xdma dev/xdma/xdma_fdt_test.c optional xdma xdma_test fdt dev/xe/if_xe.c optional xe dev/xe/if_xe_pccard.c optional xe pccard dev/xen/balloon/balloon.c optional xenhvm dev/xen/blkfront/blkfront.c optional xenhvm dev/xen/blkback/blkback.c optional xenhvm dev/xen/console/xen_console.c optional xenhvm dev/xen/control/control.c optional xenhvm dev/xen/grant_table/grant_table.c optional xenhvm dev/xen/netback/netback.c optional xenhvm dev/xen/netfront/netfront.c optional xenhvm dev/xen/xenpci/xenpci.c optional xenpci dev/xen/timer/timer.c optional xenhvm dev/xen/pvcpu/pvcpu.c optional xenhvm dev/xen/xenstore/xenstore.c optional xenhvm dev/xen/xenstore/xenstore_dev.c optional xenhvm dev/xen/xenstore/xenstored_dev.c optional xenhvm dev/xen/evtchn/evtchn_dev.c optional xenhvm dev/xen/privcmd/privcmd.c optional xenhvm dev/xen/gntdev/gntdev.c optional xenhvm dev/xen/debug/debug.c optional xenhvm dev/xl/if_xl.c optional xl pci dev/xl/xlphy.c optional xl pci fs/autofs/autofs.c optional autofs fs/autofs/autofs_vfsops.c optional autofs fs/autofs/autofs_vnops.c optional autofs fs/deadfs/dead_vnops.c standard fs/devfs/devfs_devs.c standard fs/devfs/devfs_dir.c standard fs/devfs/devfs_rule.c standard fs/devfs/devfs_vfsops.c standard fs/devfs/devfs_vnops.c standard fs/fdescfs/fdesc_vfsops.c optional fdescfs fs/fdescfs/fdesc_vnops.c optional fdescfs fs/fifofs/fifo_vnops.c standard fs/cuse/cuse.c optional cuse fs/fuse/fuse_device.c optional fuse fs/fuse/fuse_file.c optional fuse fs/fuse/fuse_internal.c optional fuse fs/fuse/fuse_io.c optional fuse fs/fuse/fuse_ipc.c optional fuse fs/fuse/fuse_main.c optional fuse fs/fuse/fuse_node.c optional fuse fs/fuse/fuse_vfsops.c optional fuse fs/fuse/fuse_vnops.c optional fuse fs/msdosfs/msdosfs_conv.c optional msdosfs fs/msdosfs/msdosfs_denode.c optional msdosfs fs/msdosfs/msdosfs_fat.c optional msdosfs fs/msdosfs/msdosfs_iconv.c optional msdosfs_iconv fs/msdosfs/msdosfs_lookup.c optional msdosfs fs/msdosfs/msdosfs_vfsops.c optional msdosfs fs/msdosfs/msdosfs_vnops.c optional msdosfs fs/nandfs/bmap.c optional nandfs fs/nandfs/nandfs_alloc.c optional nandfs fs/nandfs/nandfs_bmap.c optional nandfs fs/nandfs/nandfs_buffer.c optional nandfs fs/nandfs/nandfs_cleaner.c optional nandfs fs/nandfs/nandfs_cpfile.c optional nandfs fs/nandfs/nandfs_dat.c optional nandfs fs/nandfs/nandfs_dir.c optional nandfs fs/nandfs/nandfs_ifile.c optional nandfs fs/nandfs/nandfs_segment.c optional nandfs fs/nandfs/nandfs_subr.c optional nandfs fs/nandfs/nandfs_sufile.c optional nandfs fs/nandfs/nandfs_vfsops.c optional nandfs fs/nandfs/nandfs_vnops.c optional nandfs fs/nfs/nfs_commonkrpc.c optional nfscl | nfsd fs/nfs/nfs_commonsubs.c optional nfscl | nfsd fs/nfs/nfs_commonport.c optional nfscl | nfsd fs/nfs/nfs_commonacl.c optional nfscl | nfsd fs/nfsclient/nfs_clcomsubs.c optional nfscl fs/nfsclient/nfs_clsubs.c optional nfscl fs/nfsclient/nfs_clstate.c optional nfscl fs/nfsclient/nfs_clkrpc.c optional nfscl fs/nfsclient/nfs_clrpcops.c optional nfscl fs/nfsclient/nfs_clvnops.c optional nfscl fs/nfsclient/nfs_clnode.c optional nfscl fs/nfsclient/nfs_clvfsops.c optional nfscl fs/nfsclient/nfs_clport.c optional nfscl fs/nfsclient/nfs_clbio.c optional nfscl fs/nfsclient/nfs_clnfsiod.c optional nfscl fs/nfsserver/nfs_fha_new.c optional nfsd inet fs/nfsserver/nfs_nfsdsocket.c optional nfsd inet fs/nfsserver/nfs_nfsdsubs.c optional nfsd inet fs/nfsserver/nfs_nfsdstate.c optional nfsd inet fs/nfsserver/nfs_nfsdkrpc.c optional nfsd inet fs/nfsserver/nfs_nfsdserv.c optional nfsd inet fs/nfsserver/nfs_nfsdport.c optional nfsd inet fs/nfsserver/nfs_nfsdcache.c optional nfsd inet fs/nullfs/null_subr.c optional nullfs fs/nullfs/null_vfsops.c optional nullfs fs/nullfs/null_vnops.c optional nullfs fs/procfs/procfs.c optional procfs fs/procfs/procfs_dbregs.c optional procfs fs/procfs/procfs_fpregs.c optional procfs fs/procfs/procfs_ioctl.c optional procfs fs/procfs/procfs_map.c optional procfs fs/procfs/procfs_mem.c optional procfs fs/procfs/procfs_note.c optional procfs fs/procfs/procfs_osrel.c optional procfs fs/procfs/procfs_regs.c optional procfs fs/procfs/procfs_rlimit.c optional procfs fs/procfs/procfs_status.c optional procfs fs/procfs/procfs_type.c optional procfs fs/pseudofs/pseudofs.c optional pseudofs fs/pseudofs/pseudofs_fileno.c optional pseudofs fs/pseudofs/pseudofs_vncache.c optional pseudofs fs/pseudofs/pseudofs_vnops.c optional pseudofs fs/smbfs/smbfs_io.c optional smbfs fs/smbfs/smbfs_node.c optional smbfs fs/smbfs/smbfs_smb.c optional smbfs fs/smbfs/smbfs_subr.c optional smbfs fs/smbfs/smbfs_vfsops.c optional smbfs fs/smbfs/smbfs_vnops.c optional smbfs fs/udf/osta.c optional udf fs/udf/udf_iconv.c optional udf_iconv fs/udf/udf_vfsops.c optional udf fs/udf/udf_vnops.c optional udf fs/unionfs/union_subr.c optional unionfs fs/unionfs/union_vfsops.c optional unionfs fs/unionfs/union_vnops.c optional unionfs fs/tmpfs/tmpfs_vnops.c optional tmpfs fs/tmpfs/tmpfs_fifoops.c optional tmpfs fs/tmpfs/tmpfs_vfsops.c optional tmpfs fs/tmpfs/tmpfs_subr.c optional tmpfs gdb/gdb_cons.c optional gdb gdb/gdb_main.c optional gdb gdb/gdb_packet.c optional gdb geom/bde/g_bde.c optional geom_bde geom/bde/g_bde_crypt.c optional geom_bde geom/bde/g_bde_lock.c optional geom_bde geom/bde/g_bde_work.c optional geom_bde geom/cache/g_cache.c optional geom_cache geom/concat/g_concat.c optional geom_concat geom/eli/g_eli.c optional geom_eli geom/eli/g_eli_crypto.c optional geom_eli geom/eli/g_eli_ctl.c optional geom_eli geom/eli/g_eli_hmac.c optional geom_eli geom/eli/g_eli_integrity.c optional geom_eli geom/eli/g_eli_key.c optional geom_eli geom/eli/g_eli_key_cache.c optional geom_eli geom/eli/g_eli_privacy.c optional geom_eli geom/eli/pkcs5v2.c optional geom_eli geom/gate/g_gate.c optional geom_gate geom/geom_aes.c optional geom_aes geom/geom_bsd.c optional geom_bsd geom/geom_bsd_enc.c optional geom_bsd | geom_part_bsd geom/geom_ccd.c optional ccd | geom_ccd geom/geom_ctl.c standard geom/geom_dev.c standard geom/geom_disk.c standard geom/geom_dump.c standard geom/geom_event.c standard geom/geom_fox.c optional geom_fox geom/geom_flashmap.c optional fdt cfi | fdt nand | fdt mx25l | mmcsd geom/geom_io.c standard geom/geom_kern.c standard geom/geom_map.c optional geom_map geom/geom_mbr.c optional geom_mbr geom/geom_mbr_enc.c optional geom_mbr geom/geom_redboot.c optional geom_redboot geom/geom_slice.c standard geom/geom_subr.c standard geom/geom_sunlabel.c optional geom_sunlabel geom/geom_sunlabel_enc.c optional geom_sunlabel geom/geom_vfs.c standard geom/geom_vol_ffs.c optional geom_vol geom/journal/g_journal.c optional geom_journal geom/journal/g_journal_ufs.c optional geom_journal geom/label/g_label.c optional geom_label | geom_label_gpt geom/label/g_label_ext2fs.c optional geom_label geom/label/g_label_iso9660.c optional geom_label geom/label/g_label_msdosfs.c optional geom_label geom/label/g_label_ntfs.c optional geom_label geom/label/g_label_reiserfs.c optional geom_label geom/label/g_label_ufs.c optional geom_label geom/label/g_label_gpt.c optional geom_label | geom_label_gpt geom/label/g_label_disk_ident.c optional geom_label geom/linux_lvm/g_linux_lvm.c optional geom_linux_lvm geom/mirror/g_mirror.c optional geom_mirror geom/mirror/g_mirror_ctl.c optional geom_mirror geom/mountver/g_mountver.c optional geom_mountver geom/multipath/g_multipath.c optional geom_multipath geom/nop/g_nop.c optional geom_nop geom/part/g_part.c standard geom/part/g_part_if.m standard geom/part/g_part_apm.c optional geom_part_apm geom/part/g_part_bsd.c optional geom_part_bsd geom/part/g_part_bsd64.c optional geom_part_bsd64 geom/part/g_part_ebr.c optional geom_part_ebr geom/part/g_part_gpt.c optional geom_part_gpt geom/part/g_part_ldm.c optional geom_part_ldm geom/part/g_part_mbr.c optional geom_part_mbr geom/part/g_part_vtoc8.c optional geom_part_vtoc8 geom/raid/g_raid.c optional geom_raid geom/raid/g_raid_ctl.c optional geom_raid geom/raid/g_raid_md_if.m optional geom_raid geom/raid/g_raid_tr_if.m optional geom_raid geom/raid/md_ddf.c optional geom_raid geom/raid/md_intel.c optional geom_raid geom/raid/md_jmicron.c optional geom_raid geom/raid/md_nvidia.c optional geom_raid geom/raid/md_promise.c optional geom_raid geom/raid/md_sii.c optional geom_raid geom/raid/tr_concat.c optional geom_raid geom/raid/tr_raid0.c optional geom_raid geom/raid/tr_raid1.c optional geom_raid geom/raid/tr_raid1e.c optional geom_raid geom/raid/tr_raid5.c optional geom_raid geom/raid3/g_raid3.c optional geom_raid3 geom/raid3/g_raid3_ctl.c optional geom_raid3 geom/shsec/g_shsec.c optional geom_shsec geom/stripe/g_stripe.c optional geom_stripe contrib/xz-embedded/freebsd/xz_malloc.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_crc32.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_bcj.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_lzma2.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_stream.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" geom/uzip/g_uzip.c optional geom_uzip geom/uzip/g_uzip_lzma.c optional geom_uzip geom/uzip/g_uzip_wrkthr.c optional geom_uzip geom/uzip/g_uzip_zlib.c optional geom_uzip geom/vinum/geom_vinum.c optional geom_vinum geom/vinum/geom_vinum_create.c optional geom_vinum geom/vinum/geom_vinum_drive.c optional geom_vinum geom/vinum/geom_vinum_plex.c optional geom_vinum geom/vinum/geom_vinum_volume.c optional geom_vinum geom/vinum/geom_vinum_subr.c optional geom_vinum geom/vinum/geom_vinum_raid5.c optional geom_vinum geom/vinum/geom_vinum_share.c optional geom_vinum geom/vinum/geom_vinum_list.c optional geom_vinum geom/vinum/geom_vinum_rm.c optional geom_vinum geom/vinum/geom_vinum_init.c optional geom_vinum geom/vinum/geom_vinum_state.c optional geom_vinum geom/vinum/geom_vinum_rename.c optional geom_vinum geom/vinum/geom_vinum_move.c optional geom_vinum geom/vinum/geom_vinum_events.c optional geom_vinum geom/virstor/binstream.c optional geom_virstor geom/virstor/g_virstor.c optional geom_virstor geom/virstor/g_virstor_md.c optional geom_virstor geom/zero/g_zero.c optional geom_zero fs/ext2fs/ext2_acl.c optional ext2fs fs/ext2fs/ext2_alloc.c optional ext2fs fs/ext2fs/ext2_balloc.c optional ext2fs fs/ext2fs/ext2_bmap.c optional ext2fs fs/ext2fs/ext2_csum.c optional ext2fs fs/ext2fs/ext2_extattr.c optional ext2fs fs/ext2fs/ext2_extents.c optional ext2fs fs/ext2fs/ext2_inode.c optional ext2fs fs/ext2fs/ext2_inode_cnv.c optional ext2fs fs/ext2fs/ext2_hash.c optional ext2fs fs/ext2fs/ext2_htree.c optional ext2fs fs/ext2fs/ext2_lookup.c optional ext2fs fs/ext2fs/ext2_subr.c optional ext2fs fs/ext2fs/ext2_vfsops.c optional ext2fs fs/ext2fs/ext2_vnops.c optional ext2fs # isa/isa_if.m standard isa/isa_common.c optional isa isa/isahint.c optional isa isa/pnp.c optional isa isapnp isa/pnpparse.c optional isa isapnp fs/cd9660/cd9660_bmap.c optional cd9660 fs/cd9660/cd9660_lookup.c optional cd9660 fs/cd9660/cd9660_node.c optional cd9660 fs/cd9660/cd9660_rrip.c optional cd9660 fs/cd9660/cd9660_util.c optional cd9660 fs/cd9660/cd9660_vfsops.c optional cd9660 fs/cd9660/cd9660_vnops.c optional cd9660 fs/cd9660/cd9660_iconv.c optional cd9660_iconv kern/bus_if.m standard kern/clock_if.m standard kern/cpufreq_if.m standard kern/device_if.m standard kern/imgact_binmisc.c optional imagact_binmisc kern/imgact_elf.c standard kern/imgact_elf32.c optional compat_freebsd32 kern/imgact_shell.c standard kern/inflate.c optional gzip kern/init_main.c standard kern/init_sysent.c standard kern/ksched.c optional _kposix_priority_scheduling kern/kern_acct.c standard kern/kern_alq.c optional alq kern/kern_clock.c standard kern/kern_condvar.c standard kern/kern_conf.c standard kern/kern_cons.c standard kern/kern_cpu.c standard kern/kern_cpuset.c standard kern/kern_context.c standard kern/kern_descrip.c standard kern/kern_dtrace.c optional kdtrace_hooks kern/kern_dump.c standard kern/kern_environment.c standard kern/kern_et.c standard kern/kern_event.c standard kern/kern_exec.c standard kern/kern_exit.c standard kern/kern_fail.c standard kern/kern_ffclock.c standard kern/kern_fork.c standard kern/kern_gzio.c optional gzio kern/kern_hhook.c standard kern/kern_idle.c standard kern/kern_intr.c standard kern/kern_jail.c standard kern/kern_khelp.c standard kern/kern_kthread.c standard kern/kern_ktr.c optional ktr kern/kern_ktrace.c standard kern/kern_linker.c standard kern/kern_lock.c standard kern/kern_lockf.c standard kern/kern_lockstat.c optional kdtrace_hooks kern/kern_loginclass.c standard kern/kern_malloc.c standard kern/kern_mbuf.c standard kern/kern_mib.c standard kern/kern_module.c standard kern/kern_mtxpool.c standard kern/kern_mutex.c standard kern/kern_ntptime.c standard kern/kern_numa.c standard kern/kern_osd.c standard kern/kern_physio.c standard kern/kern_pmc.c standard kern/kern_poll.c optional device_polling kern/kern_priv.c standard kern/kern_proc.c standard kern/kern_procctl.c standard kern/kern_prot.c standard kern/kern_racct.c standard kern/kern_rangelock.c standard kern/kern_rctl.c standard kern/kern_resource.c standard kern/kern_rmlock.c standard kern/kern_rwlock.c standard kern/kern_sdt.c optional kdtrace_hooks kern/kern_sema.c standard kern/kern_sendfile.c standard kern/kern_sharedpage.c standard kern/kern_shutdown.c standard kern/kern_sig.c standard kern/kern_switch.c standard kern/kern_sx.c standard kern/kern_synch.c standard kern/kern_syscalls.c standard kern/kern_sysctl.c standard kern/kern_tc.c standard kern/kern_thr.c standard kern/kern_thread.c standard kern/kern_time.c standard kern/kern_timeout.c standard kern/kern_umtx.c standard kern/kern_uuid.c standard kern/kern_xxx.c standard kern/link_elf.c standard kern/linker_if.m standard kern/md4c.c optional netsmb kern/md5c.c standard kern/p1003_1b.c standard kern/posix4_mib.c standard kern/sched_4bsd.c optional sched_4bsd kern/sched_ule.c optional sched_ule kern/serdev_if.m standard kern/stack_protector.c standard \ compile-with "${NORMAL_C:N-fstack-protector*}" kern/subr_acl_nfs4.c optional ufs_acl | zfs kern/subr_acl_posix1e.c optional ufs_acl kern/subr_autoconf.c standard kern/subr_blist.c standard kern/subr_bus.c standard kern/subr_bus_dma.c standard kern/subr_bufring.c standard kern/subr_capability.c standard kern/subr_clock.c standard kern/subr_counter.c standard kern/subr_devstat.c standard kern/subr_disk.c standard kern/subr_eventhandler.c standard kern/subr_fattime.c standard kern/subr_firmware.c optional firmware kern/subr_gtaskqueue.c standard kern/subr_hash.c standard kern/subr_hints.c standard kern/subr_kdb.c standard kern/subr_kobj.c standard kern/subr_lock.c standard kern/subr_log.c standard kern/subr_mchain.c optional libmchain kern/subr_module.c standard kern/subr_msgbuf.c standard kern/subr_param.c standard kern/subr_pcpu.c standard kern/subr_pctrie.c standard kern/subr_power.c standard kern/subr_prf.c standard kern/subr_prof.c standard kern/subr_rman.c standard kern/subr_rtc.c standard kern/subr_sbuf.c standard kern/subr_scanf.c standard kern/subr_sglist.c standard kern/subr_sleepqueue.c standard kern/subr_smp.c standard kern/subr_stack.c optional ddb | stack | ktr kern/subr_taskqueue.c standard kern/subr_terminal.c optional vt kern/subr_trap.c standard kern/subr_turnstile.c standard kern/subr_uio.c standard kern/subr_unit.c standard kern/subr_vmem.c standard kern/subr_witness.c optional witness kern/sys_capability.c standard kern/sys_generic.c standard kern/sys_pipe.c standard kern/sys_procdesc.c standard kern/sys_process.c standard kern/sys_socket.c standard kern/syscalls.c standard kern/sysv_ipc.c standard kern/sysv_msg.c optional sysvmsg kern/sysv_sem.c optional sysvsem kern/sysv_shm.c optional sysvshm kern/tty.c standard kern/tty_compat.c optional compat_43tty kern/tty_info.c standard kern/tty_inq.c standard kern/tty_outq.c standard kern/tty_pts.c standard kern/tty_tty.c standard kern/tty_ttydisc.c standard kern/uipc_accf.c standard kern/uipc_debug.c optional ddb kern/uipc_domain.c standard kern/uipc_mbuf.c standard kern/uipc_mbuf2.c standard kern/uipc_mbufhash.c standard kern/uipc_mqueue.c optional p1003_1b_mqueue kern/uipc_sem.c optional p1003_1b_semaphores kern/uipc_shm.c standard kern/uipc_sockbuf.c standard kern/uipc_socket.c standard kern/uipc_syscalls.c standard kern/uipc_usrreq.c standard kern/vfs_acl.c standard kern/vfs_aio.c standard kern/vfs_bio.c standard kern/vfs_cache.c standard kern/vfs_cluster.c standard kern/vfs_default.c standard kern/vfs_export.c standard kern/vfs_extattr.c standard kern/vfs_hash.c standard kern/vfs_init.c standard kern/vfs_lookup.c standard kern/vfs_mount.c standard kern/vfs_mountroot.c standard kern/vfs_subr.c standard kern/vfs_syscalls.c standard kern/vfs_vnops.c standard # # Kernel GSS-API # gssd.h optional kgssapi \ dependency "$S/kgssapi/gssd.x" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/kgssapi/gssd.x | grep -v pthread.h > gssd.h" \ no-obj no-implicit-rule before-depend local \ clean "gssd.h" gssd_xdr.c optional kgssapi \ dependency "$S/kgssapi/gssd.x gssd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/kgssapi/gssd.x -o gssd_xdr.c" \ no-implicit-rule before-depend local \ clean "gssd_xdr.c" gssd_clnt.c optional kgssapi \ dependency "$S/kgssapi/gssd.x gssd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/kgssapi/gssd.x | grep -v string.h > gssd_clnt.c" \ no-implicit-rule before-depend local \ clean "gssd_clnt.c" kgssapi/gss_accept_sec_context.c optional kgssapi kgssapi/gss_add_oid_set_member.c optional kgssapi kgssapi/gss_acquire_cred.c optional kgssapi kgssapi/gss_canonicalize_name.c optional kgssapi kgssapi/gss_create_empty_oid_set.c optional kgssapi kgssapi/gss_delete_sec_context.c optional kgssapi kgssapi/gss_display_status.c optional kgssapi kgssapi/gss_export_name.c optional kgssapi kgssapi/gss_get_mic.c optional kgssapi kgssapi/gss_init_sec_context.c optional kgssapi kgssapi/gss_impl.c optional kgssapi kgssapi/gss_import_name.c optional kgssapi kgssapi/gss_names.c optional kgssapi kgssapi/gss_pname_to_uid.c optional kgssapi kgssapi/gss_release_buffer.c optional kgssapi kgssapi/gss_release_cred.c optional kgssapi kgssapi/gss_release_name.c optional kgssapi kgssapi/gss_release_oid_set.c optional kgssapi kgssapi/gss_set_cred_option.c optional kgssapi kgssapi/gss_test_oid_set_member.c optional kgssapi kgssapi/gss_unwrap.c optional kgssapi kgssapi/gss_verify_mic.c optional kgssapi kgssapi/gss_wrap.c optional kgssapi kgssapi/gss_wrap_size_limit.c optional kgssapi kgssapi/gssd_prot.c optional kgssapi kgssapi/krb5/krb5_mech.c optional kgssapi kgssapi/krb5/kcrypto.c optional kgssapi kgssapi/krb5/kcrypto_aes.c optional kgssapi kgssapi/krb5/kcrypto_arcfour.c optional kgssapi kgssapi/krb5/kcrypto_des.c optional kgssapi kgssapi/krb5/kcrypto_des3.c optional kgssapi kgssapi/kgss_if.m optional kgssapi kgssapi/gsstest.c optional kgssapi_debug # These files in libkern/ are those needed by all architectures. Some # of the files in libkern/ are only needed on some architectures, e.g., # libkern/divdi3.c is needed by i386 but not alpha. Also, some of these # routines may be optimized for a particular platform. In either case, # the file should be moved to conf/files. from here. # libkern/arc4random.c standard crypto/chacha20/chacha.c standard libkern/asprintf.c standard libkern/bcd.c standard libkern/bsearch.c standard libkern/crc32.c standard libkern/explicit_bzero.c standard libkern/fnmatch.c standard libkern/iconv.c optional libiconv libkern/iconv_converter_if.m optional libiconv libkern/iconv_ucs.c optional libiconv libkern/iconv_xlat.c optional libiconv libkern/iconv_xlat16.c optional libiconv libkern/inet_aton.c standard libkern/inet_ntoa.c standard libkern/inet_ntop.c standard libkern/inet_pton.c standard libkern/jenkins_hash.c standard libkern/murmur3_32.c standard libkern/mcount.c optional profiling-routine libkern/memcchr.c standard libkern/memchr.c standard libkern/memcmp.c standard libkern/memmem.c optional gdb libkern/qsort.c standard libkern/qsort_r.c standard libkern/random.c standard libkern/scanc.c standard libkern/strcasecmp.c standard libkern/strcat.c standard libkern/strchr.c standard libkern/strcmp.c standard libkern/strcpy.c standard libkern/strcspn.c standard libkern/strdup.c standard libkern/strndup.c standard libkern/strlcat.c standard libkern/strlcpy.c standard libkern/strlen.c standard libkern/strncat.c standard libkern/strncmp.c standard libkern/strncpy.c standard libkern/strnlen.c standard libkern/strrchr.c standard libkern/strsep.c standard libkern/strspn.c standard libkern/strstr.c standard libkern/strtol.c standard libkern/strtoq.c standard libkern/strtoul.c standard libkern/strtouq.c standard libkern/strvalid.c standard libkern/timingsafe_bcmp.c standard libkern/zlib.c optional crypto | geom_uzip | ipsec | \ ipsec_support | mxge | netgraph_deflate | ddb_ctf | gzio net/altq/altq_cbq.c optional altq net/altq/altq_cdnr.c optional altq net/altq/altq_codel.c optional altq net/altq/altq_hfsc.c optional altq net/altq/altq_fairq.c optional altq net/altq/altq_priq.c optional altq net/altq/altq_red.c optional altq net/altq/altq_rio.c optional altq net/altq/altq_rmclass.c optional altq net/altq/altq_subr.c optional altq net/bpf.c standard net/bpf_buffer.c optional bpf net/bpf_jitter.c optional bpf_jitter net/bpf_filter.c optional bpf | netgraph_bpf net/bpf_zerocopy.c optional bpf net/bridgestp.c optional bridge | if_bridge net/flowtable.c optional flowtable inet | flowtable inet6 net/ieee8023ad_lacp.c optional lagg net/if.c standard net/if_arcsubr.c optional arcnet net/if_bridge.c optional bridge inet | if_bridge inet net/if_clone.c standard net/if_dead.c standard net/if_debug.c optional ddb net/if_disc.c optional disc net/if_edsc.c optional edsc net/if_enc.c optional enc inet | enc inet6 net/if_epair.c optional epair net/if_ethersubr.c optional ether net/if_fddisubr.c optional fddi net/if_fwsubr.c optional fwip net/if_gif.c optional gif inet | gif inet6 | \ netgraph_gif inet | netgraph_gif inet6 net/if_gre.c optional gre inet | gre inet6 net/if_ipsec.c optional inet ipsec | inet6 ipsec net/if_iso88025subr.c optional token net/if_lagg.c optional lagg net/if_loop.c optional loop net/if_llatbl.c standard net/if_me.c optional me inet net/if_media.c standard net/if_mib.c standard net/if_spppfr.c optional sppp | netgraph_sppp net/if_spppsubr.c optional sppp | netgraph_sppp net/if_stf.c optional stf inet inet6 net/if_tun.c optional tun net/if_tap.c optional tap net/if_vlan.c optional vlan net/if_vxlan.c optional vxlan inet | vxlan inet6 net/ifdi_if.m optional ether pci net/iflib.c optional ether pci net/mp_ring.c optional ether net/mppcc.c optional netgraph_mppc_compression net/mppcd.c optional netgraph_mppc_compression net/netisr.c standard net/pfil.c optional ether | inet net/radix.c standard net/radix_mpath.c standard net/raw_cb.c standard net/raw_usrreq.c standard net/route.c standard net/rss_config.c optional inet rss | inet6 rss net/rtsock.c standard net/slcompress.c optional netgraph_vjc | sppp | \ netgraph_sppp net/toeplitz.c optional inet rss | inet6 rss net/vnet.c optional vimage net80211/ieee80211.c optional wlan net80211/ieee80211_acl.c optional wlan wlan_acl net80211/ieee80211_action.c optional wlan net80211/ieee80211_ageq.c optional wlan net80211/ieee80211_adhoc.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_ageq.c optional wlan net80211/ieee80211_amrr.c optional wlan | wlan_amrr net80211/ieee80211_crypto.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_crypto_ccmp.c optional wlan wlan_ccmp net80211/ieee80211_crypto_none.c optional wlan net80211/ieee80211_crypto_tkip.c optional wlan wlan_tkip net80211/ieee80211_crypto_wep.c optional wlan wlan_wep net80211/ieee80211_ddb.c optional wlan ddb net80211/ieee80211_dfs.c optional wlan net80211/ieee80211_freebsd.c optional wlan net80211/ieee80211_hostap.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_ht.c optional wlan net80211/ieee80211_hwmp.c optional wlan ieee80211_support_mesh net80211/ieee80211_input.c optional wlan net80211/ieee80211_ioctl.c optional wlan net80211/ieee80211_mesh.c optional wlan ieee80211_support_mesh \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_monitor.c optional wlan net80211/ieee80211_node.c optional wlan net80211/ieee80211_output.c optional wlan net80211/ieee80211_phy.c optional wlan net80211/ieee80211_power.c optional wlan net80211/ieee80211_proto.c optional wlan net80211/ieee80211_radiotap.c optional wlan net80211/ieee80211_ratectl.c optional wlan net80211/ieee80211_ratectl_none.c optional wlan net80211/ieee80211_regdomain.c optional wlan net80211/ieee80211_rssadapt.c optional wlan wlan_rssadapt net80211/ieee80211_scan.c optional wlan net80211/ieee80211_scan_sta.c optional wlan net80211/ieee80211_sta.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_superg.c optional wlan ieee80211_support_superg net80211/ieee80211_scan_sw.c optional wlan net80211/ieee80211_tdma.c optional wlan ieee80211_support_tdma net80211/ieee80211_vht.c optional wlan net80211/ieee80211_wds.c optional wlan net80211/ieee80211_xauth.c optional wlan wlan_xauth net80211/ieee80211_alq.c optional wlan ieee80211_alq netgraph/atm/ccatm/ng_ccatm.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/ngatmbase.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/sscfu/ng_sscfu.c optional ngatm_sscfu \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/sscop/ng_sscop.c optional ngatm_sscop \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/uni/ng_uni.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/bluetooth/common/ng_bluetooth.c optional netgraph_bluetooth netgraph/bluetooth/drivers/bt3c/ng_bt3c_pccard.c optional netgraph_bluetooth_bt3c netgraph/bluetooth/drivers/h4/ng_h4.c optional netgraph_bluetooth_h4 netgraph/bluetooth/drivers/ubt/ng_ubt.c optional netgraph_bluetooth_ubt usb netgraph/bluetooth/drivers/ubtbcmfw/ubtbcmfw.c optional netgraph_bluetooth_ubtbcmfw usb netgraph/bluetooth/hci/ng_hci_cmds.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_evnt.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_main.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_misc.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_ulpi.c optional netgraph_bluetooth_hci netgraph/bluetooth/l2cap/ng_l2cap_cmds.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_evnt.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_llpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_main.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_misc.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_ulpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/socket/ng_btsocket.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_hci_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_rfcomm.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_sco.c optional netgraph_bluetooth_socket netgraph/netflow/netflow.c optional netgraph_netflow netgraph/netflow/netflow_v9.c optional netgraph_netflow netgraph/netflow/ng_netflow.c optional netgraph_netflow netgraph/ng_UI.c optional netgraph_UI netgraph/ng_async.c optional netgraph_async netgraph/ng_atmllc.c optional netgraph_atmllc netgraph/ng_base.c optional netgraph netgraph/ng_bpf.c optional netgraph_bpf netgraph/ng_bridge.c optional netgraph_bridge netgraph/ng_car.c optional netgraph_car netgraph/ng_cisco.c optional netgraph_cisco netgraph/ng_deflate.c optional netgraph_deflate netgraph/ng_device.c optional netgraph_device netgraph/ng_echo.c optional netgraph_echo netgraph/ng_eiface.c optional netgraph_eiface netgraph/ng_ether.c optional netgraph_ether netgraph/ng_ether_echo.c optional netgraph_ether_echo netgraph/ng_frame_relay.c optional netgraph_frame_relay netgraph/ng_gif.c optional netgraph_gif inet6 | netgraph_gif inet netgraph/ng_gif_demux.c optional netgraph_gif_demux netgraph/ng_hole.c optional netgraph_hole netgraph/ng_iface.c optional netgraph_iface netgraph/ng_ip_input.c optional netgraph_ip_input netgraph/ng_ipfw.c optional netgraph_ipfw inet ipfirewall netgraph/ng_ksocket.c optional netgraph_ksocket netgraph/ng_l2tp.c optional netgraph_l2tp netgraph/ng_lmi.c optional netgraph_lmi netgraph/ng_mppc.c optional netgraph_mppc_compression | \ netgraph_mppc_encryption netgraph/ng_nat.c optional netgraph_nat inet libalias netgraph/ng_one2many.c optional netgraph_one2many netgraph/ng_parse.c optional netgraph netgraph/ng_patch.c optional netgraph_patch netgraph/ng_pipe.c optional netgraph_pipe netgraph/ng_ppp.c optional netgraph_ppp netgraph/ng_pppoe.c optional netgraph_pppoe netgraph/ng_pptpgre.c optional netgraph_pptpgre netgraph/ng_pred1.c optional netgraph_pred1 netgraph/ng_rfc1490.c optional netgraph_rfc1490 netgraph/ng_socket.c optional netgraph_socket netgraph/ng_split.c optional netgraph_split netgraph/ng_sppp.c optional netgraph_sppp netgraph/ng_tag.c optional netgraph_tag netgraph/ng_tcpmss.c optional netgraph_tcpmss netgraph/ng_tee.c optional netgraph_tee netgraph/ng_tty.c optional netgraph_tty netgraph/ng_vjc.c optional netgraph_vjc netgraph/ng_vlan.c optional netgraph_vlan netinet/accf_data.c optional accept_filter_data inet netinet/accf_dns.c optional accept_filter_dns inet netinet/accf_http.c optional accept_filter_http inet netinet/if_ether.c optional inet ether netinet/igmp.c optional inet netinet/in.c optional inet netinet/in_debug.c optional inet ddb netinet/in_kdtrace.c optional inet | inet6 netinet/ip_carp.c optional inet carp | inet6 carp netinet/in_fib.c optional inet netinet/in_gif.c optional gif inet | netgraph_gif inet netinet/ip_gre.c optional gre inet netinet/ip_id.c optional inet netinet/in_jail.c optional inet netinet/in_mcast.c optional inet netinet/in_pcb.c optional inet | inet6 netinet/in_pcbgroup.c optional inet pcbgroup | inet6 pcbgroup netinet/in_prot.c optional inet | inet6 netinet/in_proto.c optional inet | inet6 netinet/in_rmx.c optional inet netinet/in_rss.c optional inet rss netinet/ip_divert.c optional inet ipdivert ipfirewall netinet/ip_ecn.c optional inet | inet6 netinet/ip_encap.c optional inet | inet6 netinet/ip_fastfwd.c optional inet netinet/ip_icmp.c optional inet | inet6 netinet/ip_input.c optional inet netinet/ip_mroute.c optional mrouting inet netinet/ip_options.c optional inet netinet/ip_output.c optional inet netinet/ip_reass.c optional inet netinet/raw_ip.c optional inet | inet6 netinet/cc/cc.c optional inet | inet6 netinet/cc/cc_newreno.c optional inet | inet6 netinet/sctp_asconf.c optional inet sctp | inet6 sctp netinet/sctp_auth.c optional inet sctp | inet6 sctp netinet/sctp_bsd_addr.c optional inet sctp | inet6 sctp netinet/sctp_cc_functions.c optional inet sctp | inet6 sctp netinet/sctp_crc32.c optional inet sctp | inet6 sctp netinet/sctp_indata.c optional inet sctp | inet6 sctp netinet/sctp_input.c optional inet sctp | inet6 sctp netinet/sctp_output.c optional inet sctp | inet6 sctp netinet/sctp_pcb.c optional inet sctp | inet6 sctp netinet/sctp_peeloff.c optional inet sctp | inet6 sctp netinet/sctp_ss_functions.c optional inet sctp | inet6 sctp netinet/sctp_syscalls.c optional inet sctp | inet6 sctp netinet/sctp_sysctl.c optional inet sctp | inet6 sctp netinet/sctp_timer.c optional inet sctp | inet6 sctp netinet/sctp_usrreq.c optional inet sctp | inet6 sctp netinet/sctputil.c optional inet sctp | inet6 sctp netinet/siftr.c optional inet siftr alq | inet6 siftr alq netinet/tcp_debug.c optional tcpdebug netinet/tcp_fastopen.c optional inet tcp_rfc7413 | inet6 tcp_rfc7413 netinet/tcp_hostcache.c optional inet | inet6 netinet/tcp_input.c optional inet | inet6 netinet/tcp_lro.c optional inet | inet6 netinet/tcp_output.c optional inet | inet6 netinet/tcp_offload.c optional tcp_offload inet | tcp_offload inet6 netinet/tcp_pcap.c optional inet tcppcap | inet6 tcppcap netinet/tcp_reass.c optional inet | inet6 netinet/tcp_sack.c optional inet | inet6 netinet/tcp_subr.c optional inet | inet6 netinet/tcp_syncache.c optional inet | inet6 netinet/tcp_timer.c optional inet | inet6 netinet/tcp_timewait.c optional inet | inet6 netinet/tcp_usrreq.c optional inet | inet6 netinet/udp_usrreq.c optional inet | inet6 netinet/libalias/alias.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_db.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_mod.c optional libalias | netgraph_nat netinet/libalias/alias_proxy.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_util.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_sctp.c optional libalias inet | netgraph_nat inet netinet6/dest6.c optional inet6 netinet6/frag6.c optional inet6 netinet6/icmp6.c optional inet6 netinet6/in6.c optional inet6 netinet6/in6_cksum.c optional inet6 netinet6/in6_fib.c optional inet6 netinet6/in6_gif.c optional gif inet6 | netgraph_gif inet6 netinet6/in6_ifattach.c optional inet6 netinet6/in6_jail.c optional inet6 netinet6/in6_mcast.c optional inet6 netinet6/in6_pcb.c optional inet6 netinet6/in6_pcbgroup.c optional inet6 pcbgroup netinet6/in6_proto.c optional inet6 netinet6/in6_rmx.c optional inet6 netinet6/in6_rss.c optional inet6 rss netinet6/in6_src.c optional inet6 netinet6/ip6_fastfwd.c optional inet6 netinet6/ip6_forward.c optional inet6 netinet6/ip6_gre.c optional gre inet6 netinet6/ip6_id.c optional inet6 netinet6/ip6_input.c optional inet6 netinet6/ip6_mroute.c optional mrouting inet6 netinet6/ip6_output.c optional inet6 netinet6/mld6.c optional inet6 netinet6/nd6.c optional inet6 netinet6/nd6_nbr.c optional inet6 netinet6/nd6_rtr.c optional inet6 netinet6/raw_ip6.c optional inet6 netinet6/route6.c optional inet6 netinet6/scope6.c optional inet6 netinet6/sctp6_usrreq.c optional inet6 sctp netinet6/udp6_usrreq.c optional inet6 netipsec/ipsec.c optional ipsec inet | ipsec inet6 netipsec/ipsec_input.c optional ipsec inet | ipsec inet6 netipsec/ipsec_mbuf.c optional ipsec inet | ipsec inet6 netipsec/ipsec_mod.c optional ipsec inet | ipsec inet6 netipsec/ipsec_output.c optional ipsec inet | ipsec inet6 netipsec/ipsec_pcb.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/key.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/key_debug.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/keysock.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/subr_ipsec.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/udpencap.c optional ipsec inet netipsec/xform_ah.c optional ipsec inet | ipsec inet6 netipsec/xform_esp.c optional ipsec inet | ipsec inet6 netipsec/xform_ipcomp.c optional ipsec inet | ipsec inet6 netipsec/xform_tcp.c optional ipsec inet tcp_signature | \ ipsec inet6 tcp_signature | ipsec_support inet tcp_signature | \ ipsec_support inet6 tcp_signature netpfil/ipfw/dn_aqm_codel.c optional inet dummynet netpfil/ipfw/dn_aqm_pie.c optional inet dummynet netpfil/ipfw/dn_heap.c optional inet dummynet netpfil/ipfw/dn_sched_fifo.c optional inet dummynet netpfil/ipfw/dn_sched_fq_codel.c optional inet dummynet netpfil/ipfw/dn_sched_fq_pie.c optional inet dummynet netpfil/ipfw/dn_sched_prio.c optional inet dummynet netpfil/ipfw/dn_sched_qfq.c optional inet dummynet netpfil/ipfw/dn_sched_rr.c optional inet dummynet netpfil/ipfw/dn_sched_wf2q.c optional inet dummynet netpfil/ipfw/ip_dummynet.c optional inet dummynet netpfil/ipfw/ip_dn_io.c optional inet dummynet netpfil/ipfw/ip_dn_glue.c optional inet dummynet netpfil/ipfw/ip_fw2.c optional inet ipfirewall netpfil/ipfw/ip_fw_bpf.c optional inet ipfirewall netpfil/ipfw/ip_fw_dynamic.c optional inet ipfirewall netpfil/ipfw/ip_fw_eaction.c optional inet ipfirewall netpfil/ipfw/ip_fw_log.c optional inet ipfirewall netpfil/ipfw/ip_fw_pfil.c optional inet ipfirewall netpfil/ipfw/ip_fw_sockopt.c optional inet ipfirewall netpfil/ipfw/ip_fw_table.c optional inet ipfirewall netpfil/ipfw/ip_fw_table_algo.c optional inet ipfirewall netpfil/ipfw/ip_fw_table_value.c optional inet ipfirewall netpfil/ipfw/ip_fw_iface.c optional inet ipfirewall netpfil/ipfw/ip_fw_nat.c optional inet ipfirewall_nat netpfil/ipfw/nat64/ip_fw_nat64.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64lsn.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64lsn_control.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64stl.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64stl_control.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64_translate.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nptv6/ip_fw_nptv6.c optional inet inet6 ipfirewall \ ipfirewall_nptv6 netpfil/ipfw/nptv6/nptv6.c optional inet inet6 ipfirewall \ ipfirewall_nptv6 netpfil/ipfw/pmod/ip_fw_pmod.c optional inet ipfirewall_pmod netpfil/ipfw/pmod/tcpmod.c optional inet ipfirewall_pmod netpfil/pf/if_pflog.c optional pflog pf inet netpfil/pf/if_pfsync.c optional pfsync pf inet netpfil/pf/pf.c optional pf inet netpfil/pf/pf_if.c optional pf inet netpfil/pf/pf_ioctl.c optional pf inet netpfil/pf/pf_lb.c optional pf inet netpfil/pf/pf_norm.c optional pf inet netpfil/pf/pf_osfp.c optional pf inet netpfil/pf/pf_ruleset.c optional pf inet netpfil/pf/pf_table.c optional pf inet netpfil/pf/in4_cksum.c optional pf inet netsmb/smb_conn.c optional netsmb netsmb/smb_crypt.c optional netsmb netsmb/smb_dev.c optional netsmb netsmb/smb_iod.c optional netsmb netsmb/smb_rq.c optional netsmb netsmb/smb_smb.c optional netsmb netsmb/smb_subr.c optional netsmb netsmb/smb_trantcp.c optional netsmb netsmb/smb_usr.c optional netsmb nfs/bootp_subr.c optional bootp nfscl nfs/krpc_subr.c optional bootp nfscl nfs/nfs_diskless.c optional nfscl nfs_root nfs/nfs_fha.c optional nfsd nfs/nfs_lock.c optional nfscl | nfslockd | nfsd nfs/nfs_nfssvc.c optional nfscl | nfsd nlm/nlm_advlock.c optional nfslockd | nfsd nlm/nlm_prot_clnt.c optional nfslockd | nfsd nlm/nlm_prot_impl.c optional nfslockd | nfsd nlm/nlm_prot_server.c optional nfslockd | nfsd nlm/nlm_prot_svc.c optional nfslockd | nfsd nlm/nlm_prot_xdr.c optional nfslockd | nfsd nlm/sm_inter_xdr.c optional nfslockd | nfsd # Linux Kernel Programming Interface compat/linuxkpi/common/src/linux_kmod.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_compat.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_current.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_hrtimer.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_kthread.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_lock.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_page.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_pci.c optional compat_linuxkpi pci \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_tasklet.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_idr.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_radix.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_rcu.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C} -I$S/contrib/ck/include" compat/linuxkpi/common/src/linux_schedule.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_slab.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_usb.c optional compat_linuxkpi usb \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_work.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" # OpenFabrics Enterprise Distribution (Infiniband) ofed/drivers/infiniband/core/ib_addr.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_agent.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_cache.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_cm.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_cma.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_cq.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_device.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_fmr_pool.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_iwcm.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_iwpm_msg.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_iwpm_util.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_mad.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_mad_rmpp.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_multicast.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_packer.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_sa_query.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_smi.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_sysfs.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_ucm.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_ucma.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_ud_header.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_umem.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_user_mad.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_cmd.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_main.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_marshall.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_verbs.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" #ofed/drivers/infiniband/ulp/ipoib/ipoib_fs.c optional ipoib \ # compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_verbs.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" #ofed/drivers/infiniband/ulp/ipoib/ipoib_vlan.c optional ipoib \ # compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/sdp/sdp_bcopy.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_main.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_rx.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_cma.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_tx.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_mcg.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_sysfs.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_cm.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_ah.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_cq.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_doorbell.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_mad.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_main.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_mr.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_qp.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_srq.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_wc.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_alloc.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_catas.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_cmd.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_cq.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_eq.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_fw.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_fw_qos.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_icm.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_intf.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_main.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_mcg.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_mr.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_pd.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_port.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_profile.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_qp.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_reset.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_sense.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_srq.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_resource_tracker.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_cq.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_main.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_netdev.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_port.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_resources.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_rx.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_tx.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_ah.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_cq.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_gsi.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_mad.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_main.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_mem.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_mr.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_qp.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_srq.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_virt.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_alloc.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_cmd.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_cq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_diagnostics.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_eq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fs_cmd.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fs_tree.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fw.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_health.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mad.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_main.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mcg.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mr.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_pagealloc.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_pd.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_port.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_qp.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_srq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_transobj.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_uar.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_vport.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_wq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_ethtool.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_main.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_tx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_flow_table.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_rx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_txrx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" # crypto support opencrypto/cast.c optional crypto | ipsec | ipsec_support opencrypto/criov.c optional crypto | ipsec | ipsec_support opencrypto/crypto.c optional crypto | ipsec | ipsec_support opencrypto/cryptodev.c optional cryptodev opencrypto/cryptodev_if.m optional crypto | ipsec | ipsec_support opencrypto/cryptosoft.c optional crypto | ipsec | ipsec_support opencrypto/cryptodeflate.c optional crypto | ipsec | ipsec_support opencrypto/gmac.c optional crypto | ipsec | ipsec_support opencrypto/gfmult.c optional crypto | ipsec | ipsec_support opencrypto/rmd160.c optional crypto | ipsec | ipsec_support opencrypto/skipjack.c optional crypto | ipsec | ipsec_support opencrypto/xform.c optional crypto | ipsec | ipsec_support rpc/auth_none.c optional krpc | nfslockd | nfscl | nfsd rpc/auth_unix.c optional krpc | nfslockd | nfscl | nfsd rpc/authunix_prot.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_bck.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_dg.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_rc.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_vc.c optional krpc | nfslockd | nfscl | nfsd rpc/getnetconfig.c optional krpc | nfslockd | nfscl | nfsd rpc/replay.c optional krpc | nfslockd | nfscl | nfsd rpc/rpc_callmsg.c optional krpc | nfslockd | nfscl | nfsd rpc/rpc_generic.c optional krpc | nfslockd | nfscl | nfsd rpc/rpc_prot.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcb_clnt.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcb_prot.c optional krpc | nfslockd | nfscl | nfsd rpc/svc.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_auth.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_auth_unix.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_dg.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_generic.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_vc.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcsec_gss/rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_conf.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_misc.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_prot.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/svc_rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi security/audit/audit.c optional audit security/audit/audit_arg.c optional audit security/audit/audit_bsm.c optional audit security/audit/audit_bsm_db.c optional audit security/audit/audit_bsm_klib.c optional audit security/audit/audit_dtrace.c optional dtaudit audit | dtraceall audit compile-with "${CDDL_C}" security/audit/audit_pipe.c optional audit security/audit/audit_syscalls.c standard security/audit/audit_trigger.c optional audit security/audit/audit_worker.c optional audit security/audit/bsm_domain.c optional audit security/audit/bsm_errno.c optional audit security/audit/bsm_fcntl.c optional audit security/audit/bsm_socket_type.c optional audit security/audit/bsm_token.c optional audit security/mac/mac_audit.c optional mac audit security/mac/mac_cred.c optional mac security/mac/mac_framework.c optional mac security/mac/mac_inet.c optional mac inet | mac inet6 security/mac/mac_inet6.c optional mac inet6 security/mac/mac_label.c optional mac security/mac/mac_net.c optional mac security/mac/mac_pipe.c optional mac security/mac/mac_posix_sem.c optional mac security/mac/mac_posix_shm.c optional mac security/mac/mac_priv.c optional mac security/mac/mac_process.c optional mac security/mac/mac_socket.c optional mac security/mac/mac_syscalls.c standard security/mac/mac_system.c optional mac security/mac/mac_sysv_msg.c optional mac security/mac/mac_sysv_sem.c optional mac security/mac/mac_sysv_shm.c optional mac security/mac/mac_vfs.c optional mac security/mac_biba/mac_biba.c optional mac_biba security/mac_bsdextended/mac_bsdextended.c optional mac_bsdextended security/mac_bsdextended/ugidfw_system.c optional mac_bsdextended security/mac_bsdextended/ugidfw_vnode.c optional mac_bsdextended security/mac_ifoff/mac_ifoff.c optional mac_ifoff security/mac_lomac/mac_lomac.c optional mac_lomac security/mac_mls/mac_mls.c optional mac_mls security/mac_none/mac_none.c optional mac_none security/mac_partition/mac_partition.c optional mac_partition security/mac_portacl/mac_portacl.c optional mac_portacl security/mac_seeotheruids/mac_seeotheruids.c optional mac_seeotheruids security/mac_stub/mac_stub.c optional mac_stub security/mac_test/mac_test.c optional mac_test teken/teken.c optional sc | vt ufs/ffs/ffs_alloc.c optional ffs ufs/ffs/ffs_balloc.c optional ffs ufs/ffs/ffs_inode.c optional ffs ufs/ffs/ffs_snapshot.c optional ffs ufs/ffs/ffs_softdep.c optional ffs ufs/ffs/ffs_subr.c optional ffs ufs/ffs/ffs_tables.c optional ffs ufs/ffs/ffs_vfsops.c optional ffs ufs/ffs/ffs_vnops.c optional ffs ufs/ffs/ffs_rawread.c optional ffs directio ufs/ffs/ffs_suspend.c optional ffs ufs/ufs/ufs_acl.c optional ffs ufs/ufs/ufs_bmap.c optional ffs ufs/ufs/ufs_dirhash.c optional ffs ufs/ufs/ufs_extattr.c optional ffs ufs/ufs/ufs_gjournal.c optional ffs UFS_GJOURNAL ufs/ufs/ufs_inode.c optional ffs ufs/ufs/ufs_lookup.c optional ffs ufs/ufs/ufs_quota.c optional ffs ufs/ufs/ufs_vfsops.c optional ffs ufs/ufs/ufs_vnops.c optional ffs vm/default_pager.c standard vm/device_pager.c standard vm/phys_pager.c standard vm/redzone.c optional DEBUG_REDZONE vm/sg_pager.c standard vm/swap_pager.c standard vm/uma_core.c standard vm/uma_dbg.c standard vm/memguard.c optional DEBUG_MEMGUARD -vm/vm_domain.c standard +vm/vm_domainset.c standard vm/vm_fault.c standard vm/vm_glue.c standard vm/vm_init.c standard vm/vm_kern.c standard vm/vm_map.c standard vm/vm_meter.c standard vm/vm_mmap.c standard vm/vm_object.c standard vm/vm_page.c standard vm/vm_pageout.c standard vm/vm_pager.c standard vm/vm_phys.c standard vm/vm_radix.c standard vm/vm_reserv.c standard vm/vm_swapout.c optional !NO_SWAPPING vm/vm_swapout_dummy.c optional NO_SWAPPING vm/vm_unix.c standard vm/vnode_pager.c standard xen/features.c optional xenhvm xen/xenbus/xenbus_if.m optional xenhvm xen/xenbus/xenbus.c optional xenhvm xen/xenbus/xenbusb_if.m optional xenhvm xen/xenbus/xenbusb.c optional xenhvm xen/xenbus/xenbusb_front.c optional xenhvm xen/xenbus/xenbusb_back.c optional xenhvm xen/xenmem/xenmem_if.m optional xenhvm xdr/xdr.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_array.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_mbuf.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_mem.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_reference.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_sizeof.c optional krpc | nfslockd | nfscl | nfsd Index: user/jeff/numa/sys/kern/init_main.c =================================================================== --- user/jeff/numa/sys/kern/init_main.c (revision 326888) +++ user/jeff/numa/sys/kern/init_main.c (revision 326889) @@ -1,875 +1,872 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1995 Terrence R. Lambert * All rights reserved. * * Copyright (c) 1982, 1986, 1989, 1991, 1992, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)init_main.c 8.9 (Berkeley) 1/21/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_init_path.h" #include "opt_verbose_sysinit.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void mi_startup(void); /* Should be elsewhere */ /* Components of the first process -- never freed. */ static struct session session0; static struct pgrp pgrp0; struct proc proc0; struct thread0_storage thread0_st __aligned(32); struct vmspace vmspace0; struct proc *initproc; #ifndef BOOTHOWTO #define BOOTHOWTO 0 #endif int boothowto = BOOTHOWTO; /* initialized so that it can be patched */ SYSCTL_INT(_debug, OID_AUTO, boothowto, CTLFLAG_RD, &boothowto, 0, "Boot control flags, passed from loader"); #ifndef BOOTVERBOSE #define BOOTVERBOSE 0 #endif int bootverbose = BOOTVERBOSE; SYSCTL_INT(_debug, OID_AUTO, bootverbose, CTLFLAG_RW, &bootverbose, 0, "Control the output of verbose kernel messages"); #ifdef INVARIANTS FEATURE(invariants, "Kernel compiled with INVARIANTS, may affect performance"); #endif /* * This ensures that there is at least one entry so that the sysinit_set * symbol is not undefined. A sybsystem ID of SI_SUB_DUMMY is never * executed. */ SYSINIT(placeholder, SI_SUB_DUMMY, SI_ORDER_ANY, NULL, NULL); /* * The sysinit table itself. Items are checked off as the are run. * If we want to register new sysinit types, add them to newsysinit. */ SET_DECLARE(sysinit_set, struct sysinit); struct sysinit **sysinit, **sysinit_end; struct sysinit **newsysinit, **newsysinit_end; EVENTHANDLER_LIST_DECLARE(process_init); EVENTHANDLER_LIST_DECLARE(thread_init); EVENTHANDLER_LIST_DECLARE(process_ctor); EVENTHANDLER_LIST_DECLARE(thread_ctor); /* * Merge a new sysinit set into the current set, reallocating it if * necessary. This can only be called after malloc is running. */ void sysinit_add(struct sysinit **set, struct sysinit **set_end) { struct sysinit **newset; struct sysinit **sipp; struct sysinit **xipp; int count; count = set_end - set; if (newsysinit) count += newsysinit_end - newsysinit; else count += sysinit_end - sysinit; newset = malloc(count * sizeof(*sipp), M_TEMP, M_NOWAIT); if (newset == NULL) panic("cannot malloc for sysinit"); xipp = newset; if (newsysinit) for (sipp = newsysinit; sipp < newsysinit_end; sipp++) *xipp++ = *sipp; else for (sipp = sysinit; sipp < sysinit_end; sipp++) *xipp++ = *sipp; for (sipp = set; sipp < set_end; sipp++) *xipp++ = *sipp; if (newsysinit) free(newsysinit, M_TEMP); newsysinit = newset; newsysinit_end = newset + count; } #if defined (DDB) && defined(VERBOSE_SYSINIT) static const char * symbol_name(vm_offset_t va, db_strategy_t strategy) { const char *name; c_db_sym_t sym; db_expr_t offset; if (va == 0) return (NULL); sym = db_search_symbol(va, strategy, &offset); if (offset != 0) return (NULL); db_symbol_values(sym, &name, NULL); return (name); } #endif /* * System startup; initialize the world, create process 0, mount root * filesystem, and fork to create init and pagedaemon. Most of the * hard work is done in the lower-level initialization routines including * startup(), which does memory initialization and autoconfiguration. * * This allows simple addition of new kernel subsystems that require * boot time initialization. It also allows substitution of subsystem * (for instance, a scheduler, kernel profiler, or VM system) by object * module. Finally, it allows for optional "kernel threads". */ void mi_startup(void) { struct sysinit **sipp; /* system initialization*/ struct sysinit **xipp; /* interior loop of sort*/ struct sysinit *save; /* bubble*/ #if defined(VERBOSE_SYSINIT) int last; int verbose; #endif if (boothowto & RB_VERBOSE) bootverbose++; if (sysinit == NULL) { sysinit = SET_BEGIN(sysinit_set); sysinit_end = SET_LIMIT(sysinit_set); } restart: /* * Perform a bubble sort of the system initialization objects by * their subsystem (primary key) and order (secondary key). */ for (sipp = sysinit; sipp < sysinit_end; sipp++) { for (xipp = sipp + 1; xipp < sysinit_end; xipp++) { if ((*sipp)->subsystem < (*xipp)->subsystem || ((*sipp)->subsystem == (*xipp)->subsystem && (*sipp)->order <= (*xipp)->order)) continue; /* skip*/ save = *sipp; *sipp = *xipp; *xipp = save; } } #if defined(VERBOSE_SYSINIT) last = SI_SUB_COPYRIGHT; verbose = 0; #if !defined(DDB) printf("VERBOSE_SYSINIT: DDB not enabled, symbol lookups disabled.\n"); #endif #endif /* * Traverse the (now) ordered list of system initialization tasks. * Perform each task, and continue on to the next task. */ for (sipp = sysinit; sipp < sysinit_end; sipp++) { if ((*sipp)->subsystem == SI_SUB_DUMMY) continue; /* skip dummy task(s)*/ if ((*sipp)->subsystem == SI_SUB_DONE) continue; #if defined(VERBOSE_SYSINIT) if ((*sipp)->subsystem > last) { verbose = 1; last = (*sipp)->subsystem; printf("subsystem %x\n", last); } if (verbose) { #if defined(DDB) const char *func, *data; func = symbol_name((vm_offset_t)(*sipp)->func, DB_STGY_PROC); data = symbol_name((vm_offset_t)(*sipp)->udata, DB_STGY_ANY); if (func != NULL && data != NULL) printf(" %s(&%s)... ", func, data); else if (func != NULL) printf(" %s(%p)... ", func, (*sipp)->udata); else #endif printf(" %p(%p)... ", (*sipp)->func, (*sipp)->udata); } #endif /* Call function */ (*((*sipp)->func))((*sipp)->udata); #if defined(VERBOSE_SYSINIT) if (verbose) printf("done.\n"); #endif /* Check off the one we're just done */ (*sipp)->subsystem = SI_SUB_DONE; /* Check if we've installed more sysinit items via KLD */ if (newsysinit != NULL) { if (sysinit != SET_BEGIN(sysinit_set)) free(sysinit, M_TEMP); sysinit = newsysinit; sysinit_end = newsysinit_end; newsysinit = NULL; newsysinit_end = NULL; goto restart; } } mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); mtx_unlock(&Giant); /* * Now hand over this thread to swapper. */ swapper(); /* NOTREACHED*/ } static void print_caddr_t(void *data) { printf("%s", (char *)data); } static void print_version(void *data __unused) { int len; /* Strip a trailing newline from version. */ len = strlen(version); while (len > 0 && version[len - 1] == '\n') len--; printf("%.*s %s\n", len, version, machine); printf("%s\n", compiler_version); } SYSINIT(announce, SI_SUB_COPYRIGHT, SI_ORDER_FIRST, print_caddr_t, copyright); SYSINIT(trademark, SI_SUB_COPYRIGHT, SI_ORDER_SECOND, print_caddr_t, trademark); SYSINIT(version, SI_SUB_COPYRIGHT, SI_ORDER_THIRD, print_version, NULL); #ifdef WITNESS static char wit_warn[] = "WARNING: WITNESS option enabled, expect reduced performance.\n"; SYSINIT(witwarn, SI_SUB_COPYRIGHT, SI_ORDER_THIRD + 1, print_caddr_t, wit_warn); SYSINIT(witwarn2, SI_SUB_LAST, SI_ORDER_THIRD + 1, print_caddr_t, wit_warn); #endif #ifdef DIAGNOSTIC static char diag_warn[] = "WARNING: DIAGNOSTIC option enabled, expect reduced performance.\n"; SYSINIT(diagwarn, SI_SUB_COPYRIGHT, SI_ORDER_THIRD + 2, print_caddr_t, diag_warn); SYSINIT(diagwarn2, SI_SUB_LAST, SI_ORDER_THIRD + 2, print_caddr_t, diag_warn); #endif static int null_fetch_syscall_args(struct thread *td __unused) { panic("null_fetch_syscall_args"); } static void null_set_syscall_retval(struct thread *td __unused, int error __unused) { panic("null_set_syscall_retval"); } struct sysentvec null_sysvec = { .sv_size = 0, .sv_table = NULL, .sv_mask = 0, .sv_errsize = 0, .sv_errtbl = NULL, .sv_transtrap = NULL, .sv_fixup = NULL, .sv_sendsig = NULL, .sv_sigcode = NULL, .sv_szsigcode = NULL, .sv_name = "null", .sv_coredump = NULL, .sv_imgact_try = NULL, .sv_minsigstksz = 0, .sv_pagesize = PAGE_SIZE, .sv_minuser = VM_MIN_ADDRESS, .sv_maxuser = VM_MAXUSER_ADDRESS, .sv_usrstack = USRSTACK, .sv_psstrings = PS_STRINGS, .sv_stackprot = VM_PROT_ALL, .sv_copyout_strings = NULL, .sv_setregs = NULL, .sv_fixlimit = NULL, .sv_maxssiz = NULL, .sv_flags = 0, .sv_set_syscall_retval = null_set_syscall_retval, .sv_fetch_syscall_args = null_fetch_syscall_args, .sv_syscallnames = NULL, .sv_schedtail = NULL, .sv_thread_detach = NULL, .sv_trap = NULL, }; /* * The two following SYSINIT's are proc0 specific glue code. I am not * convinced that they can not be safely combined, but their order of * operation has been maintained as the same as the original init_main.c * for right now. */ /* ARGSUSED*/ static void proc0_init(void *dummy __unused) { struct proc *p; struct thread *td; struct ucred *newcred; struct uidinfo tmpuinfo; struct loginclass tmplc = { .lc_name = "", }; vm_paddr_t pageablemem; int i; GIANT_REQUIRED; p = &proc0; td = &thread0; /* * Initialize magic number and osrel. */ p->p_magic = P_MAGIC; p->p_osrel = osreldate; /* * Initialize thread and process structures. */ procinit(); /* set up proc zone */ threadinit(); /* set up UMA zones */ /* * Initialise scheduler resources. * Add scheduler specific parts to proc, thread as needed. */ schedinit(); /* scheduler gets its house in order */ /* * Create process 0 (the swapper). */ LIST_INSERT_HEAD(&allproc, p, p_list); LIST_INSERT_HEAD(PIDHASH(0), p, p_hash); mtx_init(&pgrp0.pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK); p->p_pgrp = &pgrp0; LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); LIST_INIT(&pgrp0.pg_members); LIST_INSERT_HEAD(&pgrp0.pg_members, p, p_pglist); pgrp0.pg_session = &session0; mtx_init(&session0.s_mtx, "session", NULL, MTX_DEF); refcount_init(&session0.s_count, 1); session0.s_leader = p; p->p_sysent = &null_sysvec; p->p_flag = P_SYSTEM | P_INMEM | P_KPROC; p->p_flag2 = 0; p->p_state = PRS_NORMAL; p->p_klist = knlist_alloc(&p->p_mtx); STAILQ_INIT(&p->p_ktr); p->p_nice = NZERO; /* pid_max cannot be greater than PID_MAX */ td->td_tid = PID_MAX + 1; LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); td->td_state = TDS_RUNNING; td->td_pri_class = PRI_TIMESHARE; td->td_user_pri = PUSER; td->td_base_user_pri = PUSER; td->td_lend_user_pri = PRI_MAX; td->td_priority = PVM; td->td_base_pri = PVM; td->td_oncpu = curcpu; td->td_flags = TDF_INMEM; td->td_pflags = TDP_KTHREAD; td->td_cpuset = cpuset_thread0(); - vm_domain_policy_init(&td->td_vm_dom_policy); - vm_domain_policy_set(&td->td_vm_dom_policy, VM_POLICY_NONE, -1); - vm_domain_policy_init(&p->p_vm_dom_policy); - vm_domain_policy_set(&p->p_vm_dom_policy, VM_POLICY_NONE, -1); + td->td_domain.dr_policy = td->td_cpuset->cs_domain; prison0_init(); p->p_peers = 0; p->p_leader = p; p->p_reaper = p; LIST_INIT(&p->p_reaplist); strncpy(p->p_comm, "kernel", sizeof (p->p_comm)); strncpy(td->td_name, "swapper", sizeof (td->td_name)); callout_init_mtx(&p->p_itcallout, &p->p_mtx, 0); callout_init_mtx(&p->p_limco, &p->p_mtx, 0); callout_init(&td->td_slpcallout, 1); /* Create credentials. */ newcred = crget(); newcred->cr_ngroups = 1; /* group 0 */ /* A hack to prevent uifind from tripping over NULL pointers. */ curthread->td_ucred = newcred; tmpuinfo.ui_uid = 1; newcred->cr_uidinfo = newcred->cr_ruidinfo = &tmpuinfo; newcred->cr_uidinfo = uifind(0); newcred->cr_ruidinfo = uifind(0); newcred->cr_loginclass = &tmplc; newcred->cr_loginclass = loginclass_find("default"); /* End hack. creds get properly set later with thread_cow_get_proc */ curthread->td_ucred = NULL; newcred->cr_prison = &prison0; proc_set_cred_init(p, newcred); #ifdef AUDIT audit_cred_kproc0(newcred); #endif #ifdef MAC mac_cred_create_swapper(newcred); #endif /* Create sigacts. */ p->p_sigacts = sigacts_alloc(); /* Initialize signal state for process 0. */ siginit(&proc0); /* Create the file descriptor table. */ p->p_fd = fdinit(NULL, false); p->p_fdtol = NULL; /* Create the limits structures. */ p->p_limit = lim_alloc(); for (i = 0; i < RLIM_NLIMITS; i++) p->p_limit->pl_rlimit[i].rlim_cur = p->p_limit->pl_rlimit[i].rlim_max = RLIM_INFINITY; p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur = p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles; p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_cur = p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; p->p_limit->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; p->p_limit->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; /* Cast to avoid overflow on i386/PAE. */ pageablemem = ptoa((vm_paddr_t)vm_cnt.v_free_count); p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_cur = p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_max = pageablemem; p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = pageablemem / 3; p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = pageablemem; p->p_cpulimit = RLIM_INFINITY; PROC_LOCK(p); thread_cow_get_proc(td, p); PROC_UNLOCK(p); /* Initialize resource accounting structures. */ racct_create(&p->p_racct); p->p_stats = pstats_alloc(); /* Allocate a prototype map so we have something to fork. */ p->p_vmspace = &vmspace0; vmspace0.vm_refcnt = 1; pmap_pinit0(vmspace_pmap(&vmspace0)); /* * proc0 is not expected to enter usermode, so there is no special * handling for sv_minuser here, like is done for exec_new_vmspace(). */ vm_map_init(&vmspace0.vm_map, vmspace_pmap(&vmspace0), p->p_sysent->sv_minuser, p->p_sysent->sv_maxuser); /* * Call the init and ctor for the new thread and proc. We wait * to do this until all other structures are fairly sane. */ EVENTHANDLER_DIRECT_INVOKE(process_init, p); EVENTHANDLER_DIRECT_INVOKE(thread_init, td); EVENTHANDLER_DIRECT_INVOKE(process_ctor, p); EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); /* * Charge root for one process. */ (void)chgproccnt(p->p_ucred->cr_ruidinfo, 1, 0); PROC_LOCK(p); racct_add_force(p, RACCT_NPROC, 1); PROC_UNLOCK(p); } SYSINIT(p0init, SI_SUB_INTRINSIC, SI_ORDER_FIRST, proc0_init, NULL); /* ARGSUSED*/ static void proc0_post(void *dummy __unused) { struct timespec ts; struct proc *p; struct rusage ru; struct thread *td; /* * Now we can look at the time, having had a chance to verify the * time from the filesystem. Pretend that proc0 started now. */ sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { microuptime(&p->p_stats->p_start); PROC_STATLOCK(p); rufetch(p, &ru); /* Clears thread stats */ PROC_STATUNLOCK(p); p->p_rux.rux_runtime = 0; p->p_rux.rux_uticks = 0; p->p_rux.rux_sticks = 0; p->p_rux.rux_iticks = 0; FOREACH_THREAD_IN_PROC(p, td) { td->td_runtime = 0; } } sx_sunlock(&allproc_lock); PCPU_SET(switchtime, cpu_ticks()); PCPU_SET(switchticks, ticks); /* * Give the ``random'' number generator a thump. */ nanotime(&ts); srandom(ts.tv_sec ^ ts.tv_nsec); } SYSINIT(p0post, SI_SUB_INTRINSIC_POST, SI_ORDER_FIRST, proc0_post, NULL); static void random_init(void *dummy __unused) { /* * After CPU has been started we have some randomness on most * platforms via get_cyclecount(). For platforms that don't * we will reseed random(9) in proc0_post() as well. */ srandom(get_cyclecount()); } SYSINIT(random, SI_SUB_RANDOM, SI_ORDER_FIRST, random_init, NULL); /* *************************************************************************** **** **** The following SYSINIT's and glue code should be moved to the **** respective files on a per subsystem basis. **** *************************************************************************** */ /* * List of paths to try when searching for "init". */ static char init_path[MAXPATHLEN] = #ifdef INIT_PATH __XSTRING(INIT_PATH); #else "/sbin/init:/sbin/oinit:/sbin/init.bak:/rescue/init"; #endif SYSCTL_STRING(_kern, OID_AUTO, init_path, CTLFLAG_RD, init_path, 0, "Path used to search the init process"); /* * Shutdown timeout of init(8). * Unused within kernel, but used to control init(8), hence do not remove. */ #ifndef INIT_SHUTDOWN_TIMEOUT #define INIT_SHUTDOWN_TIMEOUT 120 #endif static int init_shutdown_timeout = INIT_SHUTDOWN_TIMEOUT; SYSCTL_INT(_kern, OID_AUTO, init_shutdown_timeout, CTLFLAG_RW, &init_shutdown_timeout, 0, "Shutdown timeout of init(8). " "Unused within kernel, but used to control init(8)"); /* * Start the initial user process; try exec'ing each pathname in init_path. * The program is invoked with one argument containing the boot flags. */ static void start_init(void *dummy) { vm_offset_t addr; struct execve_args args; int options, error; char *var, *path, *next, *s; char *ucp, **uap, *arg0, *arg1; struct thread *td; struct proc *p; mtx_lock(&Giant); GIANT_REQUIRED; td = curthread; p = td->td_proc; vfs_mountroot(); /* Wipe GELI passphrase from the environment. */ kern_unsetenv("kern.geom.eli.passphrase"); /* * Need just enough stack to hold the faked-up "execve()" arguments. */ addr = p->p_sysent->sv_usrstack - PAGE_SIZE; if (vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, PAGE_SIZE, 0, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0) != 0) panic("init: couldn't allocate argument space"); p->p_vmspace->vm_maxsaddr = (caddr_t)addr; p->p_vmspace->vm_ssize = 1; if ((var = kern_getenv("init_path")) != NULL) { strlcpy(init_path, var, sizeof(init_path)); freeenv(var); } for (path = init_path; *path != '\0'; path = next) { while (*path == ':') path++; if (*path == '\0') break; for (next = path; *next != '\0' && *next != ':'; next++) /* nothing */ ; if (bootverbose) printf("start_init: trying %.*s\n", (int)(next - path), path); /* * Move out the boot flag argument. */ options = 0; ucp = (char *)p->p_sysent->sv_usrstack; (void)subyte(--ucp, 0); /* trailing zero */ if (boothowto & RB_SINGLE) { (void)subyte(--ucp, 's'); options = 1; } #ifdef notyet if (boothowto & RB_FASTBOOT) { (void)subyte(--ucp, 'f'); options = 1; } #endif #ifdef BOOTCDROM (void)subyte(--ucp, 'C'); options = 1; #endif if (options == 0) (void)subyte(--ucp, '-'); (void)subyte(--ucp, '-'); /* leading hyphen */ arg1 = ucp; /* * Move out the file name (also arg 0). */ (void)subyte(--ucp, 0); for (s = next - 1; s >= path; s--) (void)subyte(--ucp, *s); arg0 = ucp; /* * Move out the arg pointers. */ uap = (char **)rounddown2((intptr_t)ucp, sizeof(intptr_t)); (void)suword((caddr_t)--uap, (long)0); /* terminator */ (void)suword((caddr_t)--uap, (long)(intptr_t)arg1); (void)suword((caddr_t)--uap, (long)(intptr_t)arg0); /* * Point at the arguments. */ args.fname = arg0; args.argv = uap; args.envv = NULL; /* * Now try to exec the program. If can't for any reason * other than it doesn't exist, complain. * * Otherwise, return via fork_trampoline() all the way * to user mode as init! */ if ((error = sys_execve(td, &args)) == EJUSTRETURN) { mtx_unlock(&Giant); return; } if (error != ENOENT) printf("exec %.*s: error %d\n", (int)(next - path), path, error); } printf("init: not found in path %s\n", init_path); panic("no init"); } /* * Like kproc_create(), but runs in its own address space. * We do this early to reserve pid 1. * * Note special case - do not make it runnable yet. Other work * in progress will change this more. */ static void create_init(const void *udata __unused) { struct fork_req fr; struct ucred *newcred, *oldcred; struct thread *td; int error; bzero(&fr, sizeof(fr)); fr.fr_flags = RFFDG | RFPROC | RFSTOPPED; fr.fr_procp = &initproc; error = fork1(&thread0, &fr); if (error) panic("cannot fork init: %d\n", error); KASSERT(initproc->p_pid == 1, ("create_init: initproc->p_pid != 1")); /* divorce init's credentials from the kernel's */ newcred = crget(); sx_xlock(&proctree_lock); PROC_LOCK(initproc); initproc->p_flag |= P_SYSTEM | P_INMEM; initproc->p_treeflag |= P_TREE_REAPER; LIST_INSERT_HEAD(&initproc->p_reaplist, &proc0, p_reapsibling); oldcred = initproc->p_ucred; crcopy(newcred, oldcred); #ifdef MAC mac_cred_create_init(newcred); #endif #ifdef AUDIT audit_cred_proc1(newcred); #endif proc_set_cred(initproc, newcred); td = FIRST_THREAD_IN_PROC(initproc); crfree(td->td_ucred); td->td_ucred = crhold(initproc->p_ucred); PROC_UNLOCK(initproc); sx_xunlock(&proctree_lock); crfree(oldcred); cpu_fork_kthread_handler(FIRST_THREAD_IN_PROC(initproc), start_init, NULL); } SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL); /* * Make it runnable now. */ static void kick_init(const void *udata __unused) { struct thread *td; td = FIRST_THREAD_IN_PROC(initproc); thread_lock(td); TD_SET_CAN_RUN(td); sched_add(td, SRQ_BORING); thread_unlock(td); } SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_MIDDLE, kick_init, NULL); Index: user/jeff/numa/sys/kern/init_sysent.c =================================================================== --- user/jeff/numa/sys/kern/init_sysent.c (revision 326888) +++ user/jeff/numa/sys/kern/init_sysent.c (revision 326889) @@ -1,615 +1,617 @@ /* * System call switch table. * * DO NOT EDIT-- this file is automatically generated. * $FreeBSD$ */ #include "opt_compat.h" #include #include #include #define AS(name) (sizeof(struct name) / sizeof(register_t)) #ifdef COMPAT_43 #define compat(n, name) n, (sy_call_t *)__CONCAT(o,name) #else #define compat(n, name) 0, (sy_call_t *)nosys #endif #ifdef COMPAT_FREEBSD4 #define compat4(n, name) n, (sy_call_t *)__CONCAT(freebsd4_,name) #else #define compat4(n, name) 0, (sy_call_t *)nosys #endif #ifdef COMPAT_FREEBSD6 #define compat6(n, name) n, (sy_call_t *)__CONCAT(freebsd6_,name) #else #define compat6(n, name) 0, (sy_call_t *)nosys #endif #ifdef COMPAT_FREEBSD7 #define compat7(n, name) n, (sy_call_t *)__CONCAT(freebsd7_,name) #else #define compat7(n, name) 0, (sy_call_t *)nosys #endif #ifdef COMPAT_FREEBSD10 #define compat10(n, name) n, (sy_call_t *)__CONCAT(freebsd10_,name) #else #define compat10(n, name) 0, (sy_call_t *)nosys #endif #ifdef COMPAT_FREEBSD11 #define compat11(n, name) n, (sy_call_t *)__CONCAT(freebsd11_,name) #else #define compat11(n, name) 0, (sy_call_t *)nosys #endif /* The casts are bogus but will do for now. */ struct sysent sysent[] = { { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 0 = syscall */ { AS(sys_exit_args), (sy_call_t *)sys_sys_exit, AUE_EXIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 1 = exit */ { 0, (sy_call_t *)sys_fork, AUE_FORK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 2 = fork */ { AS(read_args), (sy_call_t *)sys_read, AUE_READ, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 3 = read */ { AS(write_args), (sy_call_t *)sys_write, AUE_WRITE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 4 = write */ { AS(open_args), (sy_call_t *)sys_open, AUE_OPEN_RWTC, NULL, 0, 0, 0, SY_THR_STATIC }, /* 5 = open */ { AS(close_args), (sy_call_t *)sys_close, AUE_CLOSE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 6 = close */ { AS(wait4_args), (sy_call_t *)sys_wait4, AUE_WAIT4, NULL, 0, 0, 0, SY_THR_STATIC }, /* 7 = wait4 */ { compat(AS(ocreat_args),creat), AUE_CREAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 8 = old creat */ { AS(link_args), (sy_call_t *)sys_link, AUE_LINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 9 = link */ { AS(unlink_args), (sy_call_t *)sys_unlink, AUE_UNLINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 10 = unlink */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 11 = obsolete execv */ { AS(chdir_args), (sy_call_t *)sys_chdir, AUE_CHDIR, NULL, 0, 0, 0, SY_THR_STATIC }, /* 12 = chdir */ { AS(fchdir_args), (sy_call_t *)sys_fchdir, AUE_FCHDIR, NULL, 0, 0, 0, SY_THR_STATIC }, /* 13 = fchdir */ { compat11(AS(freebsd11_mknod_args),mknod), AUE_MKNOD, NULL, 0, 0, 0, SY_THR_STATIC }, /* 14 = freebsd11 mknod */ { AS(chmod_args), (sy_call_t *)sys_chmod, AUE_CHMOD, NULL, 0, 0, 0, SY_THR_STATIC }, /* 15 = chmod */ { AS(chown_args), (sy_call_t *)sys_chown, AUE_CHOWN, NULL, 0, 0, 0, SY_THR_STATIC }, /* 16 = chown */ { AS(obreak_args), (sy_call_t *)sys_obreak, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 17 = break */ { compat4(AS(freebsd4_getfsstat_args),getfsstat), AUE_GETFSSTAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 18 = freebsd4 getfsstat */ { compat(AS(olseek_args),lseek), AUE_LSEEK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 19 = old lseek */ { 0, (sy_call_t *)sys_getpid, AUE_GETPID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 20 = getpid */ { AS(mount_args), (sy_call_t *)sys_mount, AUE_MOUNT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 21 = mount */ { AS(unmount_args), (sy_call_t *)sys_unmount, AUE_UMOUNT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 22 = unmount */ { AS(setuid_args), (sy_call_t *)sys_setuid, AUE_SETUID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 23 = setuid */ { 0, (sy_call_t *)sys_getuid, AUE_GETUID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 24 = getuid */ { 0, (sy_call_t *)sys_geteuid, AUE_GETEUID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 25 = geteuid */ { AS(ptrace_args), (sy_call_t *)sys_ptrace, AUE_PTRACE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 26 = ptrace */ { AS(recvmsg_args), (sy_call_t *)sys_recvmsg, AUE_RECVMSG, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 27 = recvmsg */ { AS(sendmsg_args), (sy_call_t *)sys_sendmsg, AUE_SENDMSG, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 28 = sendmsg */ { AS(recvfrom_args), (sy_call_t *)sys_recvfrom, AUE_RECVFROM, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 29 = recvfrom */ { AS(accept_args), (sy_call_t *)sys_accept, AUE_ACCEPT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 30 = accept */ { AS(getpeername_args), (sy_call_t *)sys_getpeername, AUE_GETPEERNAME, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 31 = getpeername */ { AS(getsockname_args), (sy_call_t *)sys_getsockname, AUE_GETSOCKNAME, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 32 = getsockname */ { AS(access_args), (sy_call_t *)sys_access, AUE_ACCESS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 33 = access */ { AS(chflags_args), (sy_call_t *)sys_chflags, AUE_CHFLAGS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 34 = chflags */ { AS(fchflags_args), (sy_call_t *)sys_fchflags, AUE_FCHFLAGS, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 35 = fchflags */ { 0, (sy_call_t *)sys_sync, AUE_SYNC, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 36 = sync */ { AS(kill_args), (sy_call_t *)sys_kill, AUE_KILL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 37 = kill */ { compat(AS(ostat_args),stat), AUE_STAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 38 = old stat */ { 0, (sy_call_t *)sys_getppid, AUE_GETPPID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 39 = getppid */ { compat(AS(olstat_args),lstat), AUE_LSTAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 40 = old lstat */ { AS(dup_args), (sy_call_t *)sys_dup, AUE_DUP, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 41 = dup */ { compat10(0,pipe), AUE_PIPE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 42 = freebsd10 pipe */ { 0, (sy_call_t *)sys_getegid, AUE_GETEGID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 43 = getegid */ { AS(profil_args), (sy_call_t *)sys_profil, AUE_PROFILE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 44 = profil */ { AS(ktrace_args), (sy_call_t *)sys_ktrace, AUE_KTRACE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 45 = ktrace */ { compat(AS(osigaction_args),sigaction), AUE_SIGACTION, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 46 = old sigaction */ { 0, (sy_call_t *)sys_getgid, AUE_GETGID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 47 = getgid */ { compat(AS(osigprocmask_args),sigprocmask), AUE_SIGPROCMASK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 48 = old sigprocmask */ { AS(getlogin_args), (sy_call_t *)sys_getlogin, AUE_GETLOGIN, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 49 = getlogin */ { AS(setlogin_args), (sy_call_t *)sys_setlogin, AUE_SETLOGIN, NULL, 0, 0, 0, SY_THR_STATIC }, /* 50 = setlogin */ { AS(acct_args), (sy_call_t *)sys_acct, AUE_ACCT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 51 = acct */ { compat(0,sigpending), AUE_SIGPENDING, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 52 = old sigpending */ { AS(sigaltstack_args), (sy_call_t *)sys_sigaltstack, AUE_SIGALTSTACK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 53 = sigaltstack */ { AS(ioctl_args), (sy_call_t *)sys_ioctl, AUE_IOCTL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 54 = ioctl */ { AS(reboot_args), (sy_call_t *)sys_reboot, AUE_REBOOT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 55 = reboot */ { AS(revoke_args), (sy_call_t *)sys_revoke, AUE_REVOKE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 56 = revoke */ { AS(symlink_args), (sy_call_t *)sys_symlink, AUE_SYMLINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 57 = symlink */ { AS(readlink_args), (sy_call_t *)sys_readlink, AUE_READLINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 58 = readlink */ { AS(execve_args), (sy_call_t *)sys_execve, AUE_EXECVE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 59 = execve */ { AS(umask_args), (sy_call_t *)sys_umask, AUE_UMASK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 60 = umask */ { AS(chroot_args), (sy_call_t *)sys_chroot, AUE_CHROOT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 61 = chroot */ { compat(AS(ofstat_args),fstat), AUE_FSTAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 62 = old fstat */ { compat(AS(getkerninfo_args),getkerninfo), AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 63 = old getkerninfo */ { compat(0,getpagesize), AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 64 = old getpagesize */ { AS(msync_args), (sy_call_t *)sys_msync, AUE_MSYNC, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 65 = msync */ { 0, (sy_call_t *)sys_vfork, AUE_VFORK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 66 = vfork */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 67 = obsolete vread */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 68 = obsolete vwrite */ { AS(sbrk_args), (sy_call_t *)sys_sbrk, AUE_SBRK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 69 = sbrk */ { AS(sstk_args), (sy_call_t *)sys_sstk, AUE_SSTK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 70 = sstk */ { compat(AS(ommap_args),mmap), AUE_MMAP, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 71 = old mmap */ { AS(ovadvise_args), (sy_call_t *)sys_ovadvise, AUE_O_VADVISE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 72 = vadvise */ { AS(munmap_args), (sy_call_t *)sys_munmap, AUE_MUNMAP, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 73 = munmap */ { AS(mprotect_args), (sy_call_t *)sys_mprotect, AUE_MPROTECT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 74 = mprotect */ { AS(madvise_args), (sy_call_t *)sys_madvise, AUE_MADVISE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 75 = madvise */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 76 = obsolete vhangup */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 77 = obsolete vlimit */ { AS(mincore_args), (sy_call_t *)sys_mincore, AUE_MINCORE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 78 = mincore */ { AS(getgroups_args), (sy_call_t *)sys_getgroups, AUE_GETGROUPS, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 79 = getgroups */ { AS(setgroups_args), (sy_call_t *)sys_setgroups, AUE_SETGROUPS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 80 = setgroups */ { 0, (sy_call_t *)sys_getpgrp, AUE_GETPGRP, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 81 = getpgrp */ { AS(setpgid_args), (sy_call_t *)sys_setpgid, AUE_SETPGRP, NULL, 0, 0, 0, SY_THR_STATIC }, /* 82 = setpgid */ { AS(setitimer_args), (sy_call_t *)sys_setitimer, AUE_SETITIMER, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 83 = setitimer */ { compat(0,wait), AUE_WAIT4, NULL, 0, 0, 0, SY_THR_STATIC }, /* 84 = old wait */ { AS(swapon_args), (sy_call_t *)sys_swapon, AUE_SWAPON, NULL, 0, 0, 0, SY_THR_STATIC }, /* 85 = swapon */ { AS(getitimer_args), (sy_call_t *)sys_getitimer, AUE_GETITIMER, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 86 = getitimer */ { compat(AS(gethostname_args),gethostname), AUE_SYSCTL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 87 = old gethostname */ { compat(AS(sethostname_args),sethostname), AUE_SYSCTL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 88 = old sethostname */ { 0, (sy_call_t *)sys_getdtablesize, AUE_GETDTABLESIZE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 89 = getdtablesize */ { AS(dup2_args), (sy_call_t *)sys_dup2, AUE_DUP2, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 90 = dup2 */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 91 = getdopt */ { AS(fcntl_args), (sy_call_t *)sys_fcntl, AUE_FCNTL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 92 = fcntl */ { AS(select_args), (sy_call_t *)sys_select, AUE_SELECT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 93 = select */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 94 = setdopt */ { AS(fsync_args), (sy_call_t *)sys_fsync, AUE_FSYNC, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 95 = fsync */ { AS(setpriority_args), (sy_call_t *)sys_setpriority, AUE_SETPRIORITY, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 96 = setpriority */ { AS(socket_args), (sy_call_t *)sys_socket, AUE_SOCKET, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 97 = socket */ { AS(connect_args), (sy_call_t *)sys_connect, AUE_CONNECT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 98 = connect */ { compat(AS(accept_args),accept), AUE_ACCEPT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 99 = old accept */ { AS(getpriority_args), (sy_call_t *)sys_getpriority, AUE_GETPRIORITY, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 100 = getpriority */ { compat(AS(osend_args),send), AUE_SEND, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 101 = old send */ { compat(AS(orecv_args),recv), AUE_RECV, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 102 = old recv */ { compat(AS(osigreturn_args),sigreturn), AUE_SIGRETURN, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 103 = old sigreturn */ { AS(bind_args), (sy_call_t *)sys_bind, AUE_BIND, NULL, 0, 0, 0, SY_THR_STATIC }, /* 104 = bind */ { AS(setsockopt_args), (sy_call_t *)sys_setsockopt, AUE_SETSOCKOPT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 105 = setsockopt */ { AS(listen_args), (sy_call_t *)sys_listen, AUE_LISTEN, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 106 = listen */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 107 = obsolete vtimes */ { compat(AS(osigvec_args),sigvec), AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 108 = old sigvec */ { compat(AS(osigblock_args),sigblock), AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 109 = old sigblock */ { compat(AS(osigsetmask_args),sigsetmask), AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 110 = old sigsetmask */ { compat(AS(osigsuspend_args),sigsuspend), AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 111 = old sigsuspend */ { compat(AS(osigstack_args),sigstack), AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 112 = old sigstack */ { compat(AS(orecvmsg_args),recvmsg), AUE_RECVMSG, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 113 = old recvmsg */ { compat(AS(osendmsg_args),sendmsg), AUE_SENDMSG, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 114 = old sendmsg */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 115 = obsolete vtrace */ { AS(gettimeofday_args), (sy_call_t *)sys_gettimeofday, AUE_GETTIMEOFDAY, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 116 = gettimeofday */ { AS(getrusage_args), (sy_call_t *)sys_getrusage, AUE_GETRUSAGE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 117 = getrusage */ { AS(getsockopt_args), (sy_call_t *)sys_getsockopt, AUE_GETSOCKOPT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 118 = getsockopt */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 119 = resuba */ { AS(readv_args), (sy_call_t *)sys_readv, AUE_READV, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 120 = readv */ { AS(writev_args), (sy_call_t *)sys_writev, AUE_WRITEV, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 121 = writev */ { AS(settimeofday_args), (sy_call_t *)sys_settimeofday, AUE_SETTIMEOFDAY, NULL, 0, 0, 0, SY_THR_STATIC }, /* 122 = settimeofday */ { AS(fchown_args), (sy_call_t *)sys_fchown, AUE_FCHOWN, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 123 = fchown */ { AS(fchmod_args), (sy_call_t *)sys_fchmod, AUE_FCHMOD, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 124 = fchmod */ { compat(AS(recvfrom_args),recvfrom), AUE_RECVFROM, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 125 = old recvfrom */ { AS(setreuid_args), (sy_call_t *)sys_setreuid, AUE_SETREUID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 126 = setreuid */ { AS(setregid_args), (sy_call_t *)sys_setregid, AUE_SETREGID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 127 = setregid */ { AS(rename_args), (sy_call_t *)sys_rename, AUE_RENAME, NULL, 0, 0, 0, SY_THR_STATIC }, /* 128 = rename */ { compat(AS(otruncate_args),truncate), AUE_TRUNCATE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 129 = old truncate */ { compat(AS(oftruncate_args),ftruncate), AUE_FTRUNCATE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 130 = old ftruncate */ { AS(flock_args), (sy_call_t *)sys_flock, AUE_FLOCK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 131 = flock */ { AS(mkfifo_args), (sy_call_t *)sys_mkfifo, AUE_MKFIFO, NULL, 0, 0, 0, SY_THR_STATIC }, /* 132 = mkfifo */ { AS(sendto_args), (sy_call_t *)sys_sendto, AUE_SENDTO, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 133 = sendto */ { AS(shutdown_args), (sy_call_t *)sys_shutdown, AUE_SHUTDOWN, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 134 = shutdown */ { AS(socketpair_args), (sy_call_t *)sys_socketpair, AUE_SOCKETPAIR, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 135 = socketpair */ { AS(mkdir_args), (sy_call_t *)sys_mkdir, AUE_MKDIR, NULL, 0, 0, 0, SY_THR_STATIC }, /* 136 = mkdir */ { AS(rmdir_args), (sy_call_t *)sys_rmdir, AUE_RMDIR, NULL, 0, 0, 0, SY_THR_STATIC }, /* 137 = rmdir */ { AS(utimes_args), (sy_call_t *)sys_utimes, AUE_UTIMES, NULL, 0, 0, 0, SY_THR_STATIC }, /* 138 = utimes */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 139 = obsolete 4.2 sigreturn */ { AS(adjtime_args), (sy_call_t *)sys_adjtime, AUE_ADJTIME, NULL, 0, 0, 0, SY_THR_STATIC }, /* 140 = adjtime */ { compat(AS(ogetpeername_args),getpeername), AUE_GETPEERNAME, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 141 = old getpeername */ { compat(0,gethostid), AUE_SYSCTL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 142 = old gethostid */ { compat(AS(osethostid_args),sethostid), AUE_SYSCTL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 143 = old sethostid */ { compat(AS(ogetrlimit_args),getrlimit), AUE_GETRLIMIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 144 = old getrlimit */ { compat(AS(osetrlimit_args),setrlimit), AUE_SETRLIMIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 145 = old setrlimit */ { compat(AS(okillpg_args),killpg), AUE_KILLPG, NULL, 0, 0, 0, SY_THR_STATIC }, /* 146 = old killpg */ { 0, (sy_call_t *)sys_setsid, AUE_SETSID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 147 = setsid */ { AS(quotactl_args), (sy_call_t *)sys_quotactl, AUE_QUOTACTL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 148 = quotactl */ { compat(0,quota), AUE_O_QUOTA, NULL, 0, 0, 0, SY_THR_STATIC }, /* 149 = old quota */ { compat(AS(getsockname_args),getsockname), AUE_GETSOCKNAME, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 150 = old getsockname */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 151 = sem_lock */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 152 = sem_wakeup */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 153 = asyncdaemon */ { AS(nlm_syscall_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 154 = nlm_syscall */ { AS(nfssvc_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 155 = nfssvc */ { compat(AS(ogetdirentries_args),getdirentries), AUE_GETDIRENTRIES, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 156 = old getdirentries */ { compat4(AS(freebsd4_statfs_args),statfs), AUE_STATFS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 157 = freebsd4 statfs */ { compat4(AS(freebsd4_fstatfs_args),fstatfs), AUE_FSTATFS, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 158 = freebsd4 fstatfs */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 159 = nosys */ { AS(lgetfh_args), (sy_call_t *)sys_lgetfh, AUE_LGETFH, NULL, 0, 0, 0, SY_THR_STATIC }, /* 160 = lgetfh */ { AS(getfh_args), (sy_call_t *)sys_getfh, AUE_NFS_GETFH, NULL, 0, 0, 0, SY_THR_STATIC }, /* 161 = getfh */ { compat4(AS(freebsd4_getdomainname_args),getdomainname), AUE_SYSCTL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 162 = freebsd4 getdomainname */ { compat4(AS(freebsd4_setdomainname_args),setdomainname), AUE_SYSCTL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 163 = freebsd4 setdomainname */ { compat4(AS(freebsd4_uname_args),uname), AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 164 = freebsd4 uname */ { AS(sysarch_args), (sy_call_t *)sysarch, AUE_SYSARCH, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 165 = sysarch */ { AS(rtprio_args), (sy_call_t *)sys_rtprio, AUE_RTPRIO, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 166 = rtprio */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 167 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 168 = nosys */ { AS(semsys_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 169 = semsys */ { AS(msgsys_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 170 = msgsys */ { AS(shmsys_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 171 = shmsys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 172 = nosys */ { compat6(AS(freebsd6_pread_args),pread), AUE_PREAD, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 173 = freebsd6 pread */ { compat6(AS(freebsd6_pwrite_args),pwrite), AUE_PWRITE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 174 = freebsd6 pwrite */ { AS(setfib_args), (sy_call_t *)sys_setfib, AUE_SETFIB, NULL, 0, 0, 0, SY_THR_STATIC }, /* 175 = setfib */ { AS(ntp_adjtime_args), (sy_call_t *)sys_ntp_adjtime, AUE_NTP_ADJTIME, NULL, 0, 0, 0, SY_THR_STATIC }, /* 176 = ntp_adjtime */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 177 = sfork */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 178 = getdescriptor */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 179 = setdescriptor */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 180 = nosys */ { AS(setgid_args), (sy_call_t *)sys_setgid, AUE_SETGID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 181 = setgid */ { AS(setegid_args), (sy_call_t *)sys_setegid, AUE_SETEGID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 182 = setegid */ { AS(seteuid_args), (sy_call_t *)sys_seteuid, AUE_SETEUID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 183 = seteuid */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 184 = lfs_bmapv */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 185 = lfs_markv */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 186 = lfs_segclean */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 187 = lfs_segwait */ { compat11(AS(freebsd11_stat_args),stat), AUE_STAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 188 = freebsd11 stat */ { compat11(AS(freebsd11_fstat_args),fstat), AUE_FSTAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 189 = freebsd11 fstat */ { compat11(AS(freebsd11_lstat_args),lstat), AUE_LSTAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 190 = freebsd11 lstat */ { AS(pathconf_args), (sy_call_t *)sys_pathconf, AUE_PATHCONF, NULL, 0, 0, 0, SY_THR_STATIC }, /* 191 = pathconf */ { AS(fpathconf_args), (sy_call_t *)sys_fpathconf, AUE_FPATHCONF, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 192 = fpathconf */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 193 = nosys */ { AS(__getrlimit_args), (sy_call_t *)sys_getrlimit, AUE_GETRLIMIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 194 = getrlimit */ { AS(__setrlimit_args), (sy_call_t *)sys_setrlimit, AUE_SETRLIMIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 195 = setrlimit */ { compat11(AS(freebsd11_getdirentries_args),getdirentries), AUE_GETDIRENTRIES, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 196 = freebsd11 getdirentries */ { compat6(AS(freebsd6_mmap_args),mmap), AUE_MMAP, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 197 = freebsd6 mmap */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 198 = __syscall */ { compat6(AS(freebsd6_lseek_args),lseek), AUE_LSEEK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 199 = freebsd6 lseek */ { compat6(AS(freebsd6_truncate_args),truncate), AUE_TRUNCATE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 200 = freebsd6 truncate */ { compat6(AS(freebsd6_ftruncate_args),ftruncate), AUE_FTRUNCATE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 201 = freebsd6 ftruncate */ { AS(sysctl_args), (sy_call_t *)sys___sysctl, AUE_SYSCTL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 202 = __sysctl */ { AS(mlock_args), (sy_call_t *)sys_mlock, AUE_MLOCK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 203 = mlock */ { AS(munlock_args), (sy_call_t *)sys_munlock, AUE_MUNLOCK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 204 = munlock */ { AS(undelete_args), (sy_call_t *)sys_undelete, AUE_UNDELETE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 205 = undelete */ { AS(futimes_args), (sy_call_t *)sys_futimes, AUE_FUTIMES, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 206 = futimes */ { AS(getpgid_args), (sy_call_t *)sys_getpgid, AUE_GETPGID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 207 = getpgid */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 208 = newreboot */ { AS(poll_args), (sy_call_t *)sys_poll, AUE_POLL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 209 = poll */ { AS(nosys_args), (sy_call_t *)lkmnosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 210 = lkmnosys */ { AS(nosys_args), (sy_call_t *)lkmnosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 211 = lkmnosys */ { AS(nosys_args), (sy_call_t *)lkmnosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 212 = lkmnosys */ { AS(nosys_args), (sy_call_t *)lkmnosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 213 = lkmnosys */ { AS(nosys_args), (sy_call_t *)lkmnosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 214 = lkmnosys */ { AS(nosys_args), (sy_call_t *)lkmnosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 215 = lkmnosys */ { AS(nosys_args), (sy_call_t *)lkmnosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 216 = lkmnosys */ { AS(nosys_args), (sy_call_t *)lkmnosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 217 = lkmnosys */ { AS(nosys_args), (sy_call_t *)lkmnosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 218 = lkmnosys */ { AS(nosys_args), (sy_call_t *)lkmnosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 219 = lkmnosys */ { 0, (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 220 = freebsd7 __semctl */ { AS(semget_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 221 = semget */ { AS(semop_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 222 = semop */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 223 = semconfig */ { 0, (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 224 = freebsd7 msgctl */ { AS(msgget_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 225 = msgget */ { AS(msgsnd_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 226 = msgsnd */ { AS(msgrcv_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 227 = msgrcv */ { AS(shmat_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 228 = shmat */ { 0, (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 229 = freebsd7 shmctl */ { AS(shmdt_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 230 = shmdt */ { AS(shmget_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 231 = shmget */ { AS(clock_gettime_args), (sy_call_t *)sys_clock_gettime, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 232 = clock_gettime */ { AS(clock_settime_args), (sy_call_t *)sys_clock_settime, AUE_CLOCK_SETTIME, NULL, 0, 0, 0, SY_THR_STATIC }, /* 233 = clock_settime */ { AS(clock_getres_args), (sy_call_t *)sys_clock_getres, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 234 = clock_getres */ { AS(ktimer_create_args), (sy_call_t *)sys_ktimer_create, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 235 = ktimer_create */ { AS(ktimer_delete_args), (sy_call_t *)sys_ktimer_delete, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 236 = ktimer_delete */ { AS(ktimer_settime_args), (sy_call_t *)sys_ktimer_settime, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 237 = ktimer_settime */ { AS(ktimer_gettime_args), (sy_call_t *)sys_ktimer_gettime, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 238 = ktimer_gettime */ { AS(ktimer_getoverrun_args), (sy_call_t *)sys_ktimer_getoverrun, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 239 = ktimer_getoverrun */ { AS(nanosleep_args), (sy_call_t *)sys_nanosleep, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 240 = nanosleep */ { AS(ffclock_getcounter_args), (sy_call_t *)sys_ffclock_getcounter, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 241 = ffclock_getcounter */ { AS(ffclock_setestimate_args), (sy_call_t *)sys_ffclock_setestimate, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 242 = ffclock_setestimate */ { AS(ffclock_getestimate_args), (sy_call_t *)sys_ffclock_getestimate, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 243 = ffclock_getestimate */ { AS(clock_nanosleep_args), (sy_call_t *)sys_clock_nanosleep, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 244 = clock_nanosleep */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 245 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 246 = nosys */ { AS(clock_getcpuclockid2_args), (sy_call_t *)sys_clock_getcpuclockid2, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 247 = clock_getcpuclockid2 */ { AS(ntp_gettime_args), (sy_call_t *)sys_ntp_gettime, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 248 = ntp_gettime */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 249 = nosys */ { AS(minherit_args), (sy_call_t *)sys_minherit, AUE_MINHERIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 250 = minherit */ { AS(rfork_args), (sy_call_t *)sys_rfork, AUE_RFORK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 251 = rfork */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 252 = obsolete openbsd_poll */ { 0, (sy_call_t *)sys_issetugid, AUE_ISSETUGID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 253 = issetugid */ { AS(lchown_args), (sy_call_t *)sys_lchown, AUE_LCHOWN, NULL, 0, 0, 0, SY_THR_STATIC }, /* 254 = lchown */ { AS(aio_read_args), (sy_call_t *)sys_aio_read, AUE_AIO_READ, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 255 = aio_read */ { AS(aio_write_args), (sy_call_t *)sys_aio_write, AUE_AIO_WRITE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 256 = aio_write */ { AS(lio_listio_args), (sy_call_t *)sys_lio_listio, AUE_LIO_LISTIO, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 257 = lio_listio */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 258 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 259 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 260 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 261 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 262 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 263 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 264 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 265 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 266 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 267 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 268 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 269 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 270 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 271 = nosys */ { compat11(AS(freebsd11_getdents_args),getdents), AUE_O_GETDENTS, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 272 = freebsd11 getdents */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 273 = nosys */ { AS(lchmod_args), (sy_call_t *)sys_lchmod, AUE_LCHMOD, NULL, 0, 0, 0, SY_THR_STATIC }, /* 274 = lchmod */ { AS(lchown_args), (sy_call_t *)sys_lchown, AUE_LCHOWN, NULL, 0, 0, 0, SY_THR_STATIC }, /* 275 = netbsd_lchown */ { AS(lutimes_args), (sy_call_t *)sys_lutimes, AUE_LUTIMES, NULL, 0, 0, 0, SY_THR_STATIC }, /* 276 = lutimes */ { AS(msync_args), (sy_call_t *)sys_msync, AUE_MSYNC, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 277 = netbsd_msync */ { compat11(AS(freebsd11_nstat_args),nstat), AUE_STAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 278 = freebsd11 nstat */ { compat11(AS(freebsd11_nfstat_args),nfstat), AUE_FSTAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 279 = freebsd11 nfstat */ { compat11(AS(freebsd11_nlstat_args),nlstat), AUE_LSTAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 280 = freebsd11 nlstat */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 281 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 282 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 283 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 284 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 285 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 286 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 287 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 288 = nosys */ { AS(preadv_args), (sy_call_t *)sys_preadv, AUE_PREADV, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 289 = preadv */ { AS(pwritev_args), (sy_call_t *)sys_pwritev, AUE_PWRITEV, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 290 = pwritev */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 291 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 292 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 293 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 294 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 295 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 296 = nosys */ { compat4(AS(freebsd4_fhstatfs_args),fhstatfs), AUE_FHSTATFS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 297 = freebsd4 fhstatfs */ { AS(fhopen_args), (sy_call_t *)sys_fhopen, AUE_FHOPEN, NULL, 0, 0, 0, SY_THR_STATIC }, /* 298 = fhopen */ { compat11(AS(freebsd11_fhstat_args),fhstat), AUE_FHSTAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 299 = freebsd11 fhstat */ { AS(modnext_args), (sy_call_t *)sys_modnext, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 300 = modnext */ { AS(modstat_args), (sy_call_t *)sys_modstat, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 301 = modstat */ { AS(modfnext_args), (sy_call_t *)sys_modfnext, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 302 = modfnext */ { AS(modfind_args), (sy_call_t *)sys_modfind, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 303 = modfind */ { AS(kldload_args), (sy_call_t *)sys_kldload, AUE_MODLOAD, NULL, 0, 0, 0, SY_THR_STATIC }, /* 304 = kldload */ { AS(kldunload_args), (sy_call_t *)sys_kldunload, AUE_MODUNLOAD, NULL, 0, 0, 0, SY_THR_STATIC }, /* 305 = kldunload */ { AS(kldfind_args), (sy_call_t *)sys_kldfind, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 306 = kldfind */ { AS(kldnext_args), (sy_call_t *)sys_kldnext, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 307 = kldnext */ { AS(kldstat_args), (sy_call_t *)sys_kldstat, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 308 = kldstat */ { AS(kldfirstmod_args), (sy_call_t *)sys_kldfirstmod, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 309 = kldfirstmod */ { AS(getsid_args), (sy_call_t *)sys_getsid, AUE_GETSID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 310 = getsid */ { AS(setresuid_args), (sy_call_t *)sys_setresuid, AUE_SETRESUID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 311 = setresuid */ { AS(setresgid_args), (sy_call_t *)sys_setresgid, AUE_SETRESGID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 312 = setresgid */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 313 = obsolete signanosleep */ { AS(aio_return_args), (sy_call_t *)sys_aio_return, AUE_AIO_RETURN, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 314 = aio_return */ { AS(aio_suspend_args), (sy_call_t *)sys_aio_suspend, AUE_AIO_SUSPEND, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 315 = aio_suspend */ { AS(aio_cancel_args), (sy_call_t *)sys_aio_cancel, AUE_AIO_CANCEL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 316 = aio_cancel */ { AS(aio_error_args), (sy_call_t *)sys_aio_error, AUE_AIO_ERROR, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 317 = aio_error */ { compat6(AS(freebsd6_aio_read_args),aio_read), AUE_AIO_READ, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 318 = freebsd6 aio_read */ { compat6(AS(freebsd6_aio_write_args),aio_write), AUE_AIO_WRITE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 319 = freebsd6 aio_write */ { compat6(AS(freebsd6_lio_listio_args),lio_listio), AUE_LIO_LISTIO, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 320 = freebsd6 lio_listio */ { 0, (sy_call_t *)sys_yield, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 321 = yield */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 322 = obsolete thr_sleep */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 323 = obsolete thr_wakeup */ { AS(mlockall_args), (sy_call_t *)sys_mlockall, AUE_MLOCKALL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 324 = mlockall */ { 0, (sy_call_t *)sys_munlockall, AUE_MUNLOCKALL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 325 = munlockall */ { AS(__getcwd_args), (sy_call_t *)sys___getcwd, AUE_GETCWD, NULL, 0, 0, 0, SY_THR_STATIC }, /* 326 = __getcwd */ { AS(sched_setparam_args), (sy_call_t *)sys_sched_setparam, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 327 = sched_setparam */ { AS(sched_getparam_args), (sy_call_t *)sys_sched_getparam, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 328 = sched_getparam */ { AS(sched_setscheduler_args), (sy_call_t *)sys_sched_setscheduler, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 329 = sched_setscheduler */ { AS(sched_getscheduler_args), (sy_call_t *)sys_sched_getscheduler, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 330 = sched_getscheduler */ { 0, (sy_call_t *)sys_sched_yield, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 331 = sched_yield */ { AS(sched_get_priority_max_args), (sy_call_t *)sys_sched_get_priority_max, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 332 = sched_get_priority_max */ { AS(sched_get_priority_min_args), (sy_call_t *)sys_sched_get_priority_min, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 333 = sched_get_priority_min */ { AS(sched_rr_get_interval_args), (sy_call_t *)sys_sched_rr_get_interval, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 334 = sched_rr_get_interval */ { AS(utrace_args), (sy_call_t *)sys_utrace, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 335 = utrace */ { compat4(AS(freebsd4_sendfile_args),sendfile), AUE_SENDFILE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 336 = freebsd4 sendfile */ { AS(kldsym_args), (sy_call_t *)sys_kldsym, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 337 = kldsym */ { AS(jail_args), (sy_call_t *)sys_jail, AUE_JAIL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 338 = jail */ { AS(nnpfs_syscall_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 339 = nnpfs_syscall */ { AS(sigprocmask_args), (sy_call_t *)sys_sigprocmask, AUE_SIGPROCMASK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 340 = sigprocmask */ { AS(sigsuspend_args), (sy_call_t *)sys_sigsuspend, AUE_SIGSUSPEND, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 341 = sigsuspend */ { compat4(AS(freebsd4_sigaction_args),sigaction), AUE_SIGACTION, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 342 = freebsd4 sigaction */ { AS(sigpending_args), (sy_call_t *)sys_sigpending, AUE_SIGPENDING, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 343 = sigpending */ { compat4(AS(freebsd4_sigreturn_args),sigreturn), AUE_SIGRETURN, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 344 = freebsd4 sigreturn */ { AS(sigtimedwait_args), (sy_call_t *)sys_sigtimedwait, AUE_SIGWAIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 345 = sigtimedwait */ { AS(sigwaitinfo_args), (sy_call_t *)sys_sigwaitinfo, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 346 = sigwaitinfo */ { AS(__acl_get_file_args), (sy_call_t *)sys___acl_get_file, AUE_ACL_GET_FILE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 347 = __acl_get_file */ { AS(__acl_set_file_args), (sy_call_t *)sys___acl_set_file, AUE_ACL_SET_FILE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 348 = __acl_set_file */ { AS(__acl_get_fd_args), (sy_call_t *)sys___acl_get_fd, AUE_ACL_GET_FD, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 349 = __acl_get_fd */ { AS(__acl_set_fd_args), (sy_call_t *)sys___acl_set_fd, AUE_ACL_SET_FD, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 350 = __acl_set_fd */ { AS(__acl_delete_file_args), (sy_call_t *)sys___acl_delete_file, AUE_ACL_DELETE_FILE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 351 = __acl_delete_file */ { AS(__acl_delete_fd_args), (sy_call_t *)sys___acl_delete_fd, AUE_ACL_DELETE_FD, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 352 = __acl_delete_fd */ { AS(__acl_aclcheck_file_args), (sy_call_t *)sys___acl_aclcheck_file, AUE_ACL_CHECK_FILE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 353 = __acl_aclcheck_file */ { AS(__acl_aclcheck_fd_args), (sy_call_t *)sys___acl_aclcheck_fd, AUE_ACL_CHECK_FD, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 354 = __acl_aclcheck_fd */ { AS(extattrctl_args), (sy_call_t *)sys_extattrctl, AUE_EXTATTRCTL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 355 = extattrctl */ { AS(extattr_set_file_args), (sy_call_t *)sys_extattr_set_file, AUE_EXTATTR_SET_FILE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 356 = extattr_set_file */ { AS(extattr_get_file_args), (sy_call_t *)sys_extattr_get_file, AUE_EXTATTR_GET_FILE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 357 = extattr_get_file */ { AS(extattr_delete_file_args), (sy_call_t *)sys_extattr_delete_file, AUE_EXTATTR_DELETE_FILE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 358 = extattr_delete_file */ { AS(aio_waitcomplete_args), (sy_call_t *)sys_aio_waitcomplete, AUE_AIO_WAITCOMPLETE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 359 = aio_waitcomplete */ { AS(getresuid_args), (sy_call_t *)sys_getresuid, AUE_GETRESUID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 360 = getresuid */ { AS(getresgid_args), (sy_call_t *)sys_getresgid, AUE_GETRESGID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 361 = getresgid */ { 0, (sy_call_t *)sys_kqueue, AUE_KQUEUE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 362 = kqueue */ { compat11(AS(freebsd11_kevent_args),kevent), AUE_KEVENT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 363 = freebsd11 kevent */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 364 = __cap_get_proc */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 365 = __cap_set_proc */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 366 = __cap_get_fd */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 367 = __cap_get_file */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 368 = __cap_set_fd */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 369 = __cap_set_file */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 370 = nosys */ { AS(extattr_set_fd_args), (sy_call_t *)sys_extattr_set_fd, AUE_EXTATTR_SET_FD, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 371 = extattr_set_fd */ { AS(extattr_get_fd_args), (sy_call_t *)sys_extattr_get_fd, AUE_EXTATTR_GET_FD, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 372 = extattr_get_fd */ { AS(extattr_delete_fd_args), (sy_call_t *)sys_extattr_delete_fd, AUE_EXTATTR_DELETE_FD, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 373 = extattr_delete_fd */ { AS(__setugid_args), (sy_call_t *)sys___setugid, AUE_SETUGID, NULL, 0, 0, 0, SY_THR_STATIC }, /* 374 = __setugid */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 375 = nfsclnt */ { AS(eaccess_args), (sy_call_t *)sys_eaccess, AUE_EACCESS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 376 = eaccess */ { AS(afs3_syscall_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 377 = afs3_syscall */ { AS(nmount_args), (sy_call_t *)sys_nmount, AUE_NMOUNT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 378 = nmount */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 379 = kse_exit */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 380 = kse_wakeup */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 381 = kse_create */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 382 = kse_thr_interrupt */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 383 = kse_release */ { AS(__mac_get_proc_args), (sy_call_t *)sys___mac_get_proc, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 384 = __mac_get_proc */ { AS(__mac_set_proc_args), (sy_call_t *)sys___mac_set_proc, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 385 = __mac_set_proc */ { AS(__mac_get_fd_args), (sy_call_t *)sys___mac_get_fd, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 386 = __mac_get_fd */ { AS(__mac_get_file_args), (sy_call_t *)sys___mac_get_file, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 387 = __mac_get_file */ { AS(__mac_set_fd_args), (sy_call_t *)sys___mac_set_fd, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 388 = __mac_set_fd */ { AS(__mac_set_file_args), (sy_call_t *)sys___mac_set_file, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 389 = __mac_set_file */ { AS(kenv_args), (sy_call_t *)sys_kenv, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 390 = kenv */ { AS(lchflags_args), (sy_call_t *)sys_lchflags, AUE_LCHFLAGS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 391 = lchflags */ { AS(uuidgen_args), (sy_call_t *)sys_uuidgen, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 392 = uuidgen */ { AS(sendfile_args), (sy_call_t *)sys_sendfile, AUE_SENDFILE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 393 = sendfile */ { AS(mac_syscall_args), (sy_call_t *)sys_mac_syscall, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 394 = mac_syscall */ { compat11(AS(freebsd11_getfsstat_args),getfsstat), AUE_GETFSSTAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 395 = freebsd11 getfsstat */ { compat11(AS(freebsd11_statfs_args),statfs), AUE_STATFS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 396 = freebsd11 statfs */ { compat11(AS(freebsd11_fstatfs_args),fstatfs), AUE_FSTATFS, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 397 = freebsd11 fstatfs */ { compat11(AS(freebsd11_fhstatfs_args),fhstatfs), AUE_FHSTATFS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 398 = freebsd11 fhstatfs */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 399 = nosys */ { AS(ksem_close_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 400 = ksem_close */ { AS(ksem_post_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 401 = ksem_post */ { AS(ksem_wait_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 402 = ksem_wait */ { AS(ksem_trywait_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 403 = ksem_trywait */ { AS(ksem_init_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 404 = ksem_init */ { AS(ksem_open_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 405 = ksem_open */ { AS(ksem_unlink_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 406 = ksem_unlink */ { AS(ksem_getvalue_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 407 = ksem_getvalue */ { AS(ksem_destroy_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 408 = ksem_destroy */ { AS(__mac_get_pid_args), (sy_call_t *)sys___mac_get_pid, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 409 = __mac_get_pid */ { AS(__mac_get_link_args), (sy_call_t *)sys___mac_get_link, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 410 = __mac_get_link */ { AS(__mac_set_link_args), (sy_call_t *)sys___mac_set_link, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 411 = __mac_set_link */ { AS(extattr_set_link_args), (sy_call_t *)sys_extattr_set_link, AUE_EXTATTR_SET_LINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 412 = extattr_set_link */ { AS(extattr_get_link_args), (sy_call_t *)sys_extattr_get_link, AUE_EXTATTR_GET_LINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 413 = extattr_get_link */ { AS(extattr_delete_link_args), (sy_call_t *)sys_extattr_delete_link, AUE_EXTATTR_DELETE_LINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 414 = extattr_delete_link */ { AS(__mac_execve_args), (sy_call_t *)sys___mac_execve, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 415 = __mac_execve */ { AS(sigaction_args), (sy_call_t *)sys_sigaction, AUE_SIGACTION, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 416 = sigaction */ { AS(sigreturn_args), (sy_call_t *)sys_sigreturn, AUE_SIGRETURN, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 417 = sigreturn */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 418 = __xstat */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 419 = __xfstat */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 420 = __xlstat */ { AS(getcontext_args), (sy_call_t *)sys_getcontext, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 421 = getcontext */ { AS(setcontext_args), (sy_call_t *)sys_setcontext, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 422 = setcontext */ { AS(swapcontext_args), (sy_call_t *)sys_swapcontext, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 423 = swapcontext */ { AS(swapoff_args), (sy_call_t *)sys_swapoff, AUE_SWAPOFF, NULL, 0, 0, 0, SY_THR_STATIC }, /* 424 = swapoff */ { AS(__acl_get_link_args), (sy_call_t *)sys___acl_get_link, AUE_ACL_GET_LINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 425 = __acl_get_link */ { AS(__acl_set_link_args), (sy_call_t *)sys___acl_set_link, AUE_ACL_SET_LINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 426 = __acl_set_link */ { AS(__acl_delete_link_args), (sy_call_t *)sys___acl_delete_link, AUE_ACL_DELETE_LINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 427 = __acl_delete_link */ { AS(__acl_aclcheck_link_args), (sy_call_t *)sys___acl_aclcheck_link, AUE_ACL_CHECK_LINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 428 = __acl_aclcheck_link */ { AS(sigwait_args), (sy_call_t *)sys_sigwait, AUE_SIGWAIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 429 = sigwait */ { AS(thr_create_args), (sy_call_t *)sys_thr_create, AUE_THR_CREATE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 430 = thr_create */ { AS(thr_exit_args), (sy_call_t *)sys_thr_exit, AUE_THR_EXIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 431 = thr_exit */ { AS(thr_self_args), (sy_call_t *)sys_thr_self, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 432 = thr_self */ { AS(thr_kill_args), (sy_call_t *)sys_thr_kill, AUE_THR_KILL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 433 = thr_kill */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 434 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 435 = nosys */ { AS(jail_attach_args), (sy_call_t *)sys_jail_attach, AUE_JAIL_ATTACH, NULL, 0, 0, 0, SY_THR_STATIC }, /* 436 = jail_attach */ { AS(extattr_list_fd_args), (sy_call_t *)sys_extattr_list_fd, AUE_EXTATTR_LIST_FD, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 437 = extattr_list_fd */ { AS(extattr_list_file_args), (sy_call_t *)sys_extattr_list_file, AUE_EXTATTR_LIST_FILE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 438 = extattr_list_file */ { AS(extattr_list_link_args), (sy_call_t *)sys_extattr_list_link, AUE_EXTATTR_LIST_LINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 439 = extattr_list_link */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 440 = kse_switchin */ { AS(ksem_timedwait_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 441 = ksem_timedwait */ { AS(thr_suspend_args), (sy_call_t *)sys_thr_suspend, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 442 = thr_suspend */ { AS(thr_wake_args), (sy_call_t *)sys_thr_wake, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 443 = thr_wake */ { AS(kldunloadf_args), (sy_call_t *)sys_kldunloadf, AUE_MODUNLOAD, NULL, 0, 0, 0, SY_THR_STATIC }, /* 444 = kldunloadf */ { AS(audit_args), (sy_call_t *)sys_audit, AUE_AUDIT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 445 = audit */ { AS(auditon_args), (sy_call_t *)sys_auditon, AUE_AUDITON, NULL, 0, 0, 0, SY_THR_STATIC }, /* 446 = auditon */ { AS(getauid_args), (sy_call_t *)sys_getauid, AUE_GETAUID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 447 = getauid */ { AS(setauid_args), (sy_call_t *)sys_setauid, AUE_SETAUID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 448 = setauid */ { AS(getaudit_args), (sy_call_t *)sys_getaudit, AUE_GETAUDIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 449 = getaudit */ { AS(setaudit_args), (sy_call_t *)sys_setaudit, AUE_SETAUDIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 450 = setaudit */ { AS(getaudit_addr_args), (sy_call_t *)sys_getaudit_addr, AUE_GETAUDIT_ADDR, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 451 = getaudit_addr */ { AS(setaudit_addr_args), (sy_call_t *)sys_setaudit_addr, AUE_SETAUDIT_ADDR, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 452 = setaudit_addr */ { AS(auditctl_args), (sy_call_t *)sys_auditctl, AUE_AUDITCTL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 453 = auditctl */ { AS(_umtx_op_args), (sy_call_t *)sys__umtx_op, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 454 = _umtx_op */ { AS(thr_new_args), (sy_call_t *)sys_thr_new, AUE_THR_NEW, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 455 = thr_new */ { AS(sigqueue_args), (sy_call_t *)sys_sigqueue, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 456 = sigqueue */ { AS(kmq_open_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 457 = kmq_open */ { AS(kmq_setattr_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_ABSENT }, /* 458 = kmq_setattr */ { AS(kmq_timedreceive_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_ABSENT }, /* 459 = kmq_timedreceive */ { AS(kmq_timedsend_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_ABSENT }, /* 460 = kmq_timedsend */ { AS(kmq_notify_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_ABSENT }, /* 461 = kmq_notify */ { AS(kmq_unlink_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 462 = kmq_unlink */ { AS(abort2_args), (sy_call_t *)sys_abort2, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 463 = abort2 */ { AS(thr_set_name_args), (sy_call_t *)sys_thr_set_name, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 464 = thr_set_name */ { AS(aio_fsync_args), (sy_call_t *)sys_aio_fsync, AUE_AIO_FSYNC, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 465 = aio_fsync */ { AS(rtprio_thread_args), (sy_call_t *)sys_rtprio_thread, AUE_RTPRIO, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 466 = rtprio_thread */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 467 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 468 = nosys */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 469 = __getpath_fromfd */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 470 = __getpath_fromaddr */ { AS(sctp_peeloff_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_ABSENT }, /* 471 = sctp_peeloff */ { AS(sctp_generic_sendmsg_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_ABSENT }, /* 472 = sctp_generic_sendmsg */ { AS(sctp_generic_sendmsg_iov_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_ABSENT }, /* 473 = sctp_generic_sendmsg_iov */ { AS(sctp_generic_recvmsg_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_ABSENT }, /* 474 = sctp_generic_recvmsg */ { AS(pread_args), (sy_call_t *)sys_pread, AUE_PREAD, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 475 = pread */ { AS(pwrite_args), (sy_call_t *)sys_pwrite, AUE_PWRITE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 476 = pwrite */ { AS(mmap_args), (sy_call_t *)sys_mmap, AUE_MMAP, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 477 = mmap */ { AS(lseek_args), (sy_call_t *)sys_lseek, AUE_LSEEK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 478 = lseek */ { AS(truncate_args), (sy_call_t *)sys_truncate, AUE_TRUNCATE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 479 = truncate */ { AS(ftruncate_args), (sy_call_t *)sys_ftruncate, AUE_FTRUNCATE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 480 = ftruncate */ { AS(thr_kill2_args), (sy_call_t *)sys_thr_kill2, AUE_THR_KILL2, NULL, 0, 0, 0, SY_THR_STATIC }, /* 481 = thr_kill2 */ { AS(shm_open_args), (sy_call_t *)sys_shm_open, AUE_SHMOPEN, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 482 = shm_open */ { AS(shm_unlink_args), (sy_call_t *)sys_shm_unlink, AUE_SHMUNLINK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 483 = shm_unlink */ { AS(cpuset_args), (sy_call_t *)sys_cpuset, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 484 = cpuset */ { AS(cpuset_setid_args), (sy_call_t *)sys_cpuset_setid, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 485 = cpuset_setid */ { AS(cpuset_getid_args), (sy_call_t *)sys_cpuset_getid, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 486 = cpuset_getid */ { AS(cpuset_getaffinity_args), (sy_call_t *)sys_cpuset_getaffinity, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 487 = cpuset_getaffinity */ { AS(cpuset_setaffinity_args), (sy_call_t *)sys_cpuset_setaffinity, AUE_NULL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 488 = cpuset_setaffinity */ { AS(faccessat_args), (sy_call_t *)sys_faccessat, AUE_FACCESSAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 489 = faccessat */ { AS(fchmodat_args), (sy_call_t *)sys_fchmodat, AUE_FCHMODAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 490 = fchmodat */ { AS(fchownat_args), (sy_call_t *)sys_fchownat, AUE_FCHOWNAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 491 = fchownat */ { AS(fexecve_args), (sy_call_t *)sys_fexecve, AUE_FEXECVE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 492 = fexecve */ { compat11(AS(freebsd11_fstatat_args),fstatat), AUE_FSTATAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 493 = freebsd11 fstatat */ { AS(futimesat_args), (sy_call_t *)sys_futimesat, AUE_FUTIMESAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 494 = futimesat */ { AS(linkat_args), (sy_call_t *)sys_linkat, AUE_LINKAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 495 = linkat */ { AS(mkdirat_args), (sy_call_t *)sys_mkdirat, AUE_MKDIRAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 496 = mkdirat */ { AS(mkfifoat_args), (sy_call_t *)sys_mkfifoat, AUE_MKFIFOAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 497 = mkfifoat */ { compat11(AS(freebsd11_mknodat_args),mknodat), AUE_MKNODAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 498 = freebsd11 mknodat */ { AS(openat_args), (sy_call_t *)sys_openat, AUE_OPENAT_RWTC, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 499 = openat */ { AS(readlinkat_args), (sy_call_t *)sys_readlinkat, AUE_READLINKAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 500 = readlinkat */ { AS(renameat_args), (sy_call_t *)sys_renameat, AUE_RENAMEAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 501 = renameat */ { AS(symlinkat_args), (sy_call_t *)sys_symlinkat, AUE_SYMLINKAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 502 = symlinkat */ { AS(unlinkat_args), (sy_call_t *)sys_unlinkat, AUE_UNLINKAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 503 = unlinkat */ { AS(posix_openpt_args), (sy_call_t *)sys_posix_openpt, AUE_POSIX_OPENPT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 504 = posix_openpt */ { AS(gssd_syscall_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 505 = gssd_syscall */ { AS(jail_get_args), (sy_call_t *)sys_jail_get, AUE_JAIL_GET, NULL, 0, 0, 0, SY_THR_STATIC }, /* 506 = jail_get */ { AS(jail_set_args), (sy_call_t *)sys_jail_set, AUE_JAIL_SET, NULL, 0, 0, 0, SY_THR_STATIC }, /* 507 = jail_set */ { AS(jail_remove_args), (sy_call_t *)sys_jail_remove, AUE_JAIL_REMOVE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 508 = jail_remove */ { AS(closefrom_args), (sy_call_t *)sys_closefrom, AUE_CLOSEFROM, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 509 = closefrom */ { AS(__semctl_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 510 = __semctl */ { AS(msgctl_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 511 = msgctl */ { AS(shmctl_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 512 = shmctl */ { AS(lpathconf_args), (sy_call_t *)sys_lpathconf, AUE_LPATHCONF, NULL, 0, 0, 0, SY_THR_STATIC }, /* 513 = lpathconf */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 514 = obsolete cap_new */ { AS(__cap_rights_get_args), (sy_call_t *)sys___cap_rights_get, AUE_CAP_RIGHTS_GET, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 515 = __cap_rights_get */ { 0, (sy_call_t *)sys_cap_enter, AUE_CAP_ENTER, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 516 = cap_enter */ { AS(cap_getmode_args), (sy_call_t *)sys_cap_getmode, AUE_CAP_GETMODE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 517 = cap_getmode */ { AS(pdfork_args), (sy_call_t *)sys_pdfork, AUE_PDFORK, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 518 = pdfork */ { AS(pdkill_args), (sy_call_t *)sys_pdkill, AUE_PDKILL, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 519 = pdkill */ { AS(pdgetpid_args), (sy_call_t *)sys_pdgetpid, AUE_PDGETPID, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 520 = pdgetpid */ { 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 521 = pdwait4 */ { AS(pselect_args), (sy_call_t *)sys_pselect, AUE_SELECT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 522 = pselect */ { AS(getloginclass_args), (sy_call_t *)sys_getloginclass, AUE_GETLOGINCLASS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 523 = getloginclass */ { AS(setloginclass_args), (sy_call_t *)sys_setloginclass, AUE_SETLOGINCLASS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 524 = setloginclass */ { AS(rctl_get_racct_args), (sy_call_t *)sys_rctl_get_racct, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 525 = rctl_get_racct */ { AS(rctl_get_rules_args), (sy_call_t *)sys_rctl_get_rules, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 526 = rctl_get_rules */ { AS(rctl_get_limits_args), (sy_call_t *)sys_rctl_get_limits, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 527 = rctl_get_limits */ { AS(rctl_add_rule_args), (sy_call_t *)sys_rctl_add_rule, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 528 = rctl_add_rule */ { AS(rctl_remove_rule_args), (sy_call_t *)sys_rctl_remove_rule, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 529 = rctl_remove_rule */ { AS(posix_fallocate_args), (sy_call_t *)sys_posix_fallocate, AUE_POSIX_FALLOCATE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 530 = posix_fallocate */ { AS(posix_fadvise_args), (sy_call_t *)sys_posix_fadvise, AUE_POSIX_FADVISE, NULL, 0, 0, 0, SY_THR_STATIC }, /* 531 = posix_fadvise */ { AS(wait6_args), (sy_call_t *)sys_wait6, AUE_WAIT6, NULL, 0, 0, 0, SY_THR_STATIC }, /* 532 = wait6 */ { AS(cap_rights_limit_args), (sy_call_t *)sys_cap_rights_limit, AUE_CAP_RIGHTS_LIMIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 533 = cap_rights_limit */ { AS(cap_ioctls_limit_args), (sy_call_t *)sys_cap_ioctls_limit, AUE_CAP_IOCTLS_LIMIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 534 = cap_ioctls_limit */ { AS(cap_ioctls_get_args), (sy_call_t *)sys_cap_ioctls_get, AUE_CAP_IOCTLS_GET, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 535 = cap_ioctls_get */ { AS(cap_fcntls_limit_args), (sy_call_t *)sys_cap_fcntls_limit, AUE_CAP_FCNTLS_LIMIT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 536 = cap_fcntls_limit */ { AS(cap_fcntls_get_args), (sy_call_t *)sys_cap_fcntls_get, AUE_CAP_FCNTLS_GET, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 537 = cap_fcntls_get */ { AS(bindat_args), (sy_call_t *)sys_bindat, AUE_BINDAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 538 = bindat */ { AS(connectat_args), (sy_call_t *)sys_connectat, AUE_CONNECTAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 539 = connectat */ { AS(chflagsat_args), (sy_call_t *)sys_chflagsat, AUE_CHFLAGSAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 540 = chflagsat */ { AS(accept4_args), (sy_call_t *)sys_accept4, AUE_ACCEPT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 541 = accept4 */ { AS(pipe2_args), (sy_call_t *)sys_pipe2, AUE_PIPE, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 542 = pipe2 */ { AS(aio_mlock_args), (sy_call_t *)sys_aio_mlock, AUE_AIO_MLOCK, NULL, 0, 0, 0, SY_THR_STATIC }, /* 543 = aio_mlock */ { AS(procctl_args), (sy_call_t *)sys_procctl, AUE_PROCCTL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 544 = procctl */ { AS(ppoll_args), (sy_call_t *)sys_ppoll, AUE_POLL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 545 = ppoll */ { AS(futimens_args), (sy_call_t *)sys_futimens, AUE_FUTIMES, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 546 = futimens */ { AS(utimensat_args), (sy_call_t *)sys_utimensat, AUE_FUTIMESAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 547 = utimensat */ { AS(numa_getaffinity_args), (sy_call_t *)sys_numa_getaffinity, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 548 = numa_getaffinity */ { AS(numa_setaffinity_args), (sy_call_t *)sys_numa_setaffinity, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 549 = numa_setaffinity */ { AS(fdatasync_args), (sy_call_t *)sys_fdatasync, AUE_FSYNC, NULL, 0, 0, 0, SY_THR_STATIC }, /* 550 = fdatasync */ { AS(fstat_args), (sy_call_t *)sys_fstat, AUE_FSTAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 551 = fstat */ { AS(fstatat_args), (sy_call_t *)sys_fstatat, AUE_FSTATAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 552 = fstatat */ { AS(fhstat_args), (sy_call_t *)sys_fhstat, AUE_FHSTAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 553 = fhstat */ { AS(getdirentries_args), (sy_call_t *)sys_getdirentries, AUE_GETDIRENTRIES, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 554 = getdirentries */ { AS(statfs_args), (sy_call_t *)sys_statfs, AUE_STATFS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 555 = statfs */ { AS(fstatfs_args), (sy_call_t *)sys_fstatfs, AUE_FSTATFS, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 556 = fstatfs */ { AS(getfsstat_args), (sy_call_t *)sys_getfsstat, AUE_GETFSSTAT, NULL, 0, 0, 0, SY_THR_STATIC }, /* 557 = getfsstat */ { AS(fhstatfs_args), (sy_call_t *)sys_fhstatfs, AUE_FHSTATFS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 558 = fhstatfs */ { AS(mknodat_args), (sy_call_t *)sys_mknodat, AUE_MKNODAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 559 = mknodat */ { AS(kevent_args), (sy_call_t *)sys_kevent, AUE_KEVENT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 560 = kevent */ + { AS(cpuset_getdomain_args), (sy_call_t *)sys_cpuset_getdomain, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 561 = cpuset_getdomain */ + { AS(cpuset_setdomain_args), (sy_call_t *)sys_cpuset_setdomain, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 562 = cpuset_setdomain */ }; Index: user/jeff/numa/sys/kern/kern_cpuset.c =================================================================== --- user/jeff/numa/sys/kern/kern_cpuset.c (revision 326888) +++ user/jeff/numa/sys/kern/kern_cpuset.c (revision 326889) @@ -1,1351 +1,2154 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008, Jeffrey Roberson * All rights reserved. * * Copyright (c) 2008 Nokia Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include +#include #include #include +#include #include #include +#include #ifdef DDB #include #endif /* DDB */ /* * cpusets provide a mechanism for creating and manipulating sets of * processors for the purpose of constraining the scheduling of threads to * specific processors. * * Each process belongs to an identified set, by default this is set 1. Each * thread may further restrict the cpus it may run on to a subset of this * named set. This creates an anonymous set which other threads and processes * may not join by number. * * The named set is referred to herein as the 'base' set to avoid ambiguity. * This set is usually a child of a 'root' set while the anonymous set may * simply be referred to as a mask. In the syscall api these are referred to * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. * * Threads inherit their set from their creator whether it be anonymous or * not. This means that anonymous sets are immutable because they may be * shared. To modify an anonymous set a new set is created with the desired * mask and the same parent as the existing anonymous set. This gives the * illusion of each thread having a private mask. * * Via the syscall apis a user may ask to retrieve or modify the root, base, * or mask that is discovered via a pid, tid, or setid. Modifying a set * modifies all numbered and anonymous child sets to comply with the new mask. * Modifying a pid or tid's mask applies only to that tid but must still * exist within the assigned parent set. * * A thread may not be assigned to a group separate from other threads in * the process. This is to remove ambiguity when the setid is queried with * a pid argument. There is no other technical limitation. * * This somewhat complex arrangement is intended to make it easy for * applications to query available processors and bind their threads to * specific processors while also allowing administrators to dynamically * reprovision by changing sets which apply to groups of processes. * * A simple application should not concern itself with sets at all and * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id * meaning 'curthread'. It may query available cpus for that tid with a * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). */ static uma_zone_t cpuset_zone; +static uma_zone_t domainset_zone; static struct mtx cpuset_lock; static struct setlist cpuset_ids; +static struct domainlist cpuset_domains; static struct unrhdr *cpuset_unr; static struct cpuset *cpuset_zero, *cpuset_default; /* Return the size of cpuset_t at the kernel level */ SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD, SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)"); cpuset_t *cpuset_root; cpuset_t cpuset_domain[MAXMEMDOM]; /* + * Find the first non-anonymous set starting from 'set'. + */ +static struct cpuset * +cpuset_getbase(struct cpuset *set) +{ + + if (set->cs_id == CPUSET_INVALID) + set = set->cs_parent; + return (set); +} + +/* + * Walks up the tree from 'set' to find the root. + */ +static struct cpuset * +cpuset_getroot(struct cpuset *set) +{ + + while ((set->cs_flags & CPU_SET_ROOT) == 0 && set->cs_parent != NULL) + set = set->cs_parent; + return (set); +} + +/* * Acquire a reference to a cpuset, all pointers must be tracked with refs. */ struct cpuset * cpuset_ref(struct cpuset *set) { refcount_acquire(&set->cs_ref); return (set); } /* * Walks up the tree from 'set' to find the root. Returns the root * referenced. */ static struct cpuset * cpuset_refroot(struct cpuset *set) { - for (; set->cs_parent != NULL; set = set->cs_parent) - if (set->cs_flags & CPU_SET_ROOT) - break; - cpuset_ref(set); - - return (set); + return cpuset_ref(cpuset_getroot(set)); } /* * Find the first non-anonymous set starting from 'set'. Returns this set * referenced. May return the passed in set with an extra ref if it is * not anonymous. */ static struct cpuset * cpuset_refbase(struct cpuset *set) { - if (set->cs_id == CPUSET_INVALID) - set = set->cs_parent; - cpuset_ref(set); - - return (set); + return cpuset_ref(cpuset_getbase(set)); } /* * Release a reference in a context where it is safe to allocate. */ void cpuset_rel(struct cpuset *set) { cpusetid_t id; if (refcount_release(&set->cs_ref) == 0) return; mtx_lock_spin(&cpuset_lock); LIST_REMOVE(set, cs_siblings); id = set->cs_id; if (id != CPUSET_INVALID) LIST_REMOVE(set, cs_link); mtx_unlock_spin(&cpuset_lock); cpuset_rel(set->cs_parent); uma_zfree(cpuset_zone, set); if (id != CPUSET_INVALID) free_unr(cpuset_unr, id); } /* * Deferred release must be used when in a context that is not safe to * allocate/free. This places any unreferenced sets on the list 'head'. */ static void cpuset_rel_defer(struct setlist *head, struct cpuset *set) { if (refcount_release(&set->cs_ref) == 0) return; mtx_lock_spin(&cpuset_lock); LIST_REMOVE(set, cs_siblings); if (set->cs_id != CPUSET_INVALID) LIST_REMOVE(set, cs_link); LIST_INSERT_HEAD(head, set, cs_link); mtx_unlock_spin(&cpuset_lock); } /* * Complete a deferred release. Removes the set from the list provided to * cpuset_rel_defer. */ static void cpuset_rel_complete(struct cpuset *set) { LIST_REMOVE(set, cs_link); cpuset_rel(set->cs_parent); uma_zfree(cpuset_zone, set); } /* * Find a set based on an id. Returns it with a ref. */ static struct cpuset * cpuset_lookup(cpusetid_t setid, struct thread *td) { struct cpuset *set; if (setid == CPUSET_INVALID) return (NULL); mtx_lock_spin(&cpuset_lock); LIST_FOREACH(set, &cpuset_ids, cs_link) if (set->cs_id == setid) break; if (set) cpuset_ref(set); mtx_unlock_spin(&cpuset_lock); KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__)); if (set != NULL && jailed(td->td_ucred)) { struct cpuset *jset, *tset; jset = td->td_ucred->cr_prison->pr_cpuset; for (tset = set; tset != NULL; tset = tset->cs_parent) if (tset == jset) break; if (tset == NULL) { cpuset_rel(set); set = NULL; } } return (set); } /* * Create a set in the space provided in 'set' with the provided parameters. * The set is returned with a single ref. May return EDEADLK if the set * will have no valid cpu based on restrictions from the parent. */ static int -_cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask, - cpusetid_t id) +_cpuset_create(struct cpuset *set, struct cpuset *parent, + const cpuset_t *mask, struct domainset *domain, cpusetid_t id) { + if (domain == NULL) + domain = parent->cs_domain; + if (mask == NULL) + mask = &parent->cs_mask; if (!CPU_OVERLAP(&parent->cs_mask, mask)) return (EDEADLK); + /* The domain must be prepared ahead of time. */ + if (!DOMAINSET_SUBSET(&parent->cs_domain->ds_mask, &domain->ds_mask)) + return (EDEADLK); CPU_COPY(mask, &set->cs_mask); LIST_INIT(&set->cs_children); refcount_init(&set->cs_ref, 1); set->cs_flags = 0; mtx_lock_spin(&cpuset_lock); + set->cs_domain = domain; CPU_AND(&set->cs_mask, &parent->cs_mask); set->cs_id = id; set->cs_parent = cpuset_ref(parent); LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); if (set->cs_id != CPUSET_INVALID) LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); mtx_unlock_spin(&cpuset_lock); return (0); } /* * Create a new non-anonymous set with the requested parent and mask. May * return failures if the mask is invalid or a new number can not be * allocated. */ static int cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask) { struct cpuset *set; cpusetid_t id; int error; id = alloc_unr(cpuset_unr); if (id == -1) return (ENFILE); - *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); - error = _cpuset_create(set, parent, mask, id); + *setp = set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); + error = _cpuset_create(set, parent, mask, NULL, id); if (error == 0) return (0); free_unr(cpuset_unr, id); uma_zfree(cpuset_zone, set); return (error); } +static void +cpuset_freelist_add(struct setlist *list, int count) +{ + struct cpuset *set; + int i; + + for (i = 0; i < count; i++) { + set = uma_zalloc(cpuset_zone, M_ZERO | M_WAITOK); + LIST_INSERT_HEAD(list, set, cs_link); + } +} + +static void +cpuset_freelist_init(struct setlist *list, int count) +{ + + LIST_INIT(list); + cpuset_freelist_add(list, count); +} + +static void +cpuset_freelist_free(struct setlist *list) +{ + struct cpuset *set; + + while ((set = LIST_FIRST(list)) != NULL) { + LIST_REMOVE(set, cs_link); + uma_zfree(cpuset_zone, set); + } +} + +static void +domainset_freelist_add(struct domainlist *list, int count) +{ + struct domainset *set; + int i; + + for (i = 0; i < count; i++) { + set = uma_zalloc(domainset_zone, M_ZERO | M_WAITOK); + LIST_INSERT_HEAD(list, set, ds_link); + } +} + +static void +domainset_freelist_init(struct domainlist *list, int count) +{ + + LIST_INIT(list); + domainset_freelist_add(list, count); +} + +static void +domainset_freelist_free(struct domainlist *list) +{ + struct domainset *set; + + while ((set = LIST_FIRST(list)) != NULL) { + LIST_REMOVE(set, ds_link); + uma_zfree(domainset_zone, set); + } +} + +/* Copy a domainset preserving mask and policy. */ +static void +domainset_copy(const struct domainset *from, struct domainset *to) +{ + + DOMAINSET_COPY(&from->ds_mask, &to->ds_mask); + to->ds_policy = from->ds_policy; +} + +/* Return 1 if mask and policy are equal, otherwise 0. */ +static int +domainset_equal(const struct domainset *one, const struct domainset *two) +{ + + return (DOMAINSET_CMP(&one->ds_mask, &two->ds_mask) == 0 && + one->ds_policy == two->ds_policy); +} + /* + * Lookup or create a domainset. The key is provided in ds_mask and + * ds_policy. If the domainset does not yet exist the storage in + * 'domain' is used to insert. Otherwise this storage is freed to the + * domainset_zone and the existing domainset is returned. + */ +static struct domainset * +_domainset_create(struct domainset *domain, struct domainlist *freelist) +{ + struct domainset *ndomain; + + mtx_lock_spin(&cpuset_lock); + LIST_FOREACH(ndomain, &cpuset_domains, ds_link) + if (domainset_equal(ndomain, domain)) + break; + /* + * If the domain does not yet exist we insert it and initialize + * various iteration helpers which are not part of the key. + */ + if (ndomain == NULL) { + LIST_INSERT_HEAD(&cpuset_domains, domain, ds_link); + domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask); + domain->ds_max = DOMAINSET_FLS(&domain->ds_mask) + 1; + } + mtx_unlock_spin(&cpuset_lock); + if (ndomain == NULL) + return (domain); + if (freelist != NULL) + LIST_INSERT_HEAD(freelist, domain, ds_link); + else + uma_zfree(domainset_zone, domain); + return (ndomain); + +} + +/* + * Create or lookup a domainset based on the key held in 'domain'. + */ +static struct domainset * +domainset_create(const struct domainset *domain) +{ + struct domainset *ndomain; + + ndomain = uma_zalloc(domainset_zone, M_WAITOK | M_ZERO); + domainset_copy(domain, ndomain); + return _domainset_create(ndomain, NULL); +} + +/* + * Update thread domainset pointers. + */ +static void +domainset_notify(void) +{ + struct thread *td; + struct proc *p; + + sx_slock(&allproc_lock); + FOREACH_PROC_IN_SYSTEM(p) { + PROC_LOCK(p); + if (p->p_state == PRS_NEW) { + PROC_UNLOCK(p); + continue; + } + FOREACH_THREAD_IN_PROC(p, td) { + thread_lock(td); + td->td_domain.dr_policy = td->td_cpuset->cs_domain; + thread_unlock(td); + } + PROC_UNLOCK(p); + } + sx_sunlock(&allproc_lock); + kernel_object->domain.dr_policy = cpuset_default->cs_domain; +} + +/* + * Create a new set that is a subset of a parent. + */ +static struct domainset * +domainset_shadow(const struct domainset *pdomain, + const struct domainset *domain, struct domainlist *freelist) +{ + struct domainset *ndomain; + + ndomain = LIST_FIRST(freelist); + LIST_REMOVE(ndomain, ds_link); + + /* + * Initialize the key from the request. + */ + domainset_copy(domain, ndomain); + + /* + * Restrict the key by the parent. + */ + DOMAINSET_AND(&ndomain->ds_mask, &pdomain->ds_mask); + + return _domainset_create(ndomain, freelist); +} + +/* * Recursively check for errors that would occur from applying mask to * the tree of sets starting at 'set'. Checks for sets that would become * empty as well as RDONLY flags. */ static int cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int check_mask) { struct cpuset *nset; cpuset_t newmask; int error; mtx_assert(&cpuset_lock, MA_OWNED); if (set->cs_flags & CPU_SET_RDONLY) return (EPERM); if (check_mask) { if (!CPU_OVERLAP(&set->cs_mask, mask)) return (EDEADLK); CPU_COPY(&set->cs_mask, &newmask); CPU_AND(&newmask, mask); } else CPU_COPY(mask, &newmask); error = 0; LIST_FOREACH(nset, &set->cs_children, cs_siblings) if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0) break; return (error); } /* * Applies the mask 'mask' without checking for empty sets or permissions. */ static void cpuset_update(struct cpuset *set, cpuset_t *mask) { struct cpuset *nset; mtx_assert(&cpuset_lock, MA_OWNED); CPU_AND(&set->cs_mask, mask); LIST_FOREACH(nset, &set->cs_children, cs_siblings) cpuset_update(nset, &set->cs_mask); return; } /* * Modify the set 'set' to use a copy of the mask provided. Apply this new * mask to restrict all children in the tree. Checks for validity before * applying the changes. */ static int cpuset_modify(struct cpuset *set, cpuset_t *mask) { struct cpuset *root; int error; error = priv_check(curthread, PRIV_SCHED_CPUSET); if (error) return (error); /* * In case we are called from within the jail * we do not allow modifying the dedicated root * cpuset of the jail but may still allow to * change child sets. */ if (jailed(curthread->td_ucred) && set->cs_flags & CPU_SET_ROOT) return (EPERM); /* * Verify that we have access to this set of * cpus. */ - root = set->cs_parent; - if (root && !CPU_SUBSET(&root->cs_mask, mask)) - return (EINVAL); + root = cpuset_getroot(set); mtx_lock_spin(&cpuset_lock); + if (root && !CPU_SUBSET(&root->cs_mask, mask)) { + error = EINVAL; + goto out; + } error = cpuset_testupdate(set, mask, 0); if (error) goto out; CPU_COPY(mask, &set->cs_mask); cpuset_update(set, mask); out: mtx_unlock_spin(&cpuset_lock); return (error); } /* + * Recursively check for errors that would occur from applying mask to + * the tree of sets starting at 'set'. Checks for sets that would become + * empty as well as RDONLY flags. + */ +static int +cpuset_testupdate_domain(struct cpuset *set, struct domainset *dset, + struct domainset *orig, int *count, int check_mask) +{ + struct cpuset *nset; + struct domainset *domain; + struct domainset newset; + int error; + + mtx_assert(&cpuset_lock, MA_OWNED); + if (set->cs_flags & CPU_SET_RDONLY) + return (EPERM); + domain = set->cs_domain; + domainset_copy(domain, &newset); + if (!domainset_equal(domain, orig)) { + if (!DOMAINSET_OVERLAP(&domain->ds_mask, &dset->ds_mask)) + return (EDEADLK); + DOMAINSET_AND(&newset.ds_mask, &dset->ds_mask); + /* Count the number of domains that are changing. */ + (*count)++; + } + error = 0; + LIST_FOREACH(nset, &set->cs_children, cs_siblings) + if ((error = cpuset_testupdate_domain(nset, &newset, domain, + count, 1)) != 0) + break; + return (error); +} + +/* + * Applies the mask 'mask' without checking for empty sets or permissions. + */ +static void +cpuset_update_domain(struct cpuset *set, struct domainset *domain, + struct domainset *orig, struct domainlist *domains) +{ + struct cpuset *nset; + + mtx_assert(&cpuset_lock, MA_OWNED); + /* + * If this domainset has changed from the parent we must calculate + * a new set. Otherwise it simply inherits from the parent. When + * we inherit from the parent we get a new mask and policy. If the + * set is modified from the parent we keep the policy and only + * update the mask. + */ + if (set->cs_domain != orig) { + orig = set->cs_domain; + set->cs_domain = domainset_shadow(domain, orig, domains); + } else + set->cs_domain = domain; + LIST_FOREACH(nset, &set->cs_children, cs_siblings) + cpuset_update_domain(nset, set->cs_domain, orig, domains); + + return; +} + +/* + * Modify the set 'set' to use a copy the domainset provided. Apply this new + * mask to restrict all children in the tree. Checks for validity before + * applying the changes. + */ +static int +cpuset_modify_domain(struct cpuset *set, struct domainset *domain) +{ + struct domainlist domains; + struct domainset temp; + struct domainset *dset; + struct cpuset *root; + int ndomains, needed; + int error; + + error = priv_check(curthread, PRIV_SCHED_CPUSET); + if (error) + return (error); + /* + * In case we are called from within the jail + * we do not allow modifying the dedicated root + * cpuset of the jail but may still allow to + * change child sets. + */ + if (jailed(curthread->td_ucred) && + set->cs_flags & CPU_SET_ROOT) + return (EPERM); + domainset_freelist_init(&domains, 0); + domain = domainset_create(domain); + ndomains = needed = 0; + do { + if (ndomains < needed) { + domainset_freelist_add(&domains, needed - ndomains); + ndomains = needed; + } + root = cpuset_getroot(set); + mtx_lock_spin(&cpuset_lock); + dset = root->cs_domain; + /* + * Verify that we have access to this set of domains. + */ + if (root && + !DOMAINSET_SUBSET(&dset->ds_mask, &domain->ds_mask)) { + error = EINVAL; + goto out; + } + /* + * Determine whether we can apply this set of domains and + * how many new domain structures it will require. + */ + domainset_copy(domain, &temp); + needed = 0; + error = cpuset_testupdate_domain(set, &temp, set->cs_domain, + &needed, 0); + if (error) + goto out; + } while (ndomains < needed); + dset = set->cs_domain; + cpuset_update_domain(set, domain, dset, &domains); +out: + mtx_unlock_spin(&cpuset_lock); + domainset_freelist_free(&domains); + if (error == 0) + domainset_notify(); + + return (error); +} + +/* * Resolve the 'which' parameter of several cpuset apis. * * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also * checks for permission via p_cansched(). * * For WHICH_SET returns a valid set with a new reference. * * -1 may be supplied for any argument to mean the current proc/thread or * the base set of the current thread. May fail with ESRCH/EPERM. */ int cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, struct cpuset **setp) { struct cpuset *set; struct thread *td; struct proc *p; int error; *pp = p = NULL; *tdp = td = NULL; *setp = set = NULL; switch (which) { case CPU_WHICH_PID: if (id == -1) { PROC_LOCK(curproc); p = curproc; break; } if ((p = pfind(id)) == NULL) return (ESRCH); break; case CPU_WHICH_TID: if (id == -1) { PROC_LOCK(curproc); p = curproc; td = curthread; break; } td = tdfind(id, -1); if (td == NULL) return (ESRCH); p = td->td_proc; break; case CPU_WHICH_CPUSET: if (id == -1) { thread_lock(curthread); set = cpuset_refbase(curthread->td_cpuset); thread_unlock(curthread); } else set = cpuset_lookup(id, curthread); if (set) { *setp = set; return (0); } return (ESRCH); case CPU_WHICH_JAIL: { /* Find `set' for prison with given id. */ struct prison *pr; sx_slock(&allprison_lock); pr = prison_find_child(curthread->td_ucred->cr_prison, id); sx_sunlock(&allprison_lock); if (pr == NULL) return (ESRCH); cpuset_ref(pr->pr_cpuset); *setp = pr->pr_cpuset; mtx_unlock(&pr->pr_mtx); return (0); } case CPU_WHICH_IRQ: case CPU_WHICH_DOMAIN: return (0); default: return (EINVAL); } error = p_cansched(curthread, p); if (error) { PROC_UNLOCK(p); return (error); } if (td == NULL) td = FIRST_THREAD_IN_PROC(p); *pp = p; *tdp = td; return (0); } +static int +cpuset_testshadow(struct cpuset *set, const cpuset_t *mask, + const struct domainset *domain) +{ + struct cpuset *parent; + struct domainset *dset; + + parent = cpuset_getbase(set); + /* + * If we are restricting a cpu mask it must be a subset of the + * parent or invalid CPUs have been specified. + */ + if (mask != NULL && !CPU_SUBSET(&parent->cs_mask, mask)) + return (EINVAL); + + /* + * If we are restricting a domain mask it must be a subset of the + * parent or invalid domains have been specified. + */ + dset = parent->cs_domain; + if (domain != NULL && + !DOMAINSET_SUBSET(&dset->ds_mask, &domain->ds_mask)) + return (EINVAL); + + return (0); +} + /* * Create an anonymous set with the provided mask in the space provided by - * 'fset'. If the passed in set is anonymous we use its parent otherwise + * 'nset'. If the passed in set is anonymous we use its parent otherwise * the new set is a child of 'set'. */ static int -cpuset_shadow(struct cpuset *set, struct cpuset *fset, const cpuset_t *mask) +cpuset_shadow(struct cpuset *set, struct cpuset **nsetp, + const cpuset_t *mask, const struct domainset *domain, + struct setlist *cpusets, struct domainlist *domains) { struct cpuset *parent; + struct cpuset *nset; + struct domainset *dset; + struct domainset *d; + int error; - if (set->cs_id == CPUSET_INVALID) - parent = set->cs_parent; + error = cpuset_testshadow(set, mask, domain); + if (error) + return (error); + + parent = cpuset_getbase(set); + dset = parent->cs_domain; + if (mask == NULL) + mask = &set->cs_mask; + if (domain != NULL) + d = domainset_shadow(dset, domain, domains); else - parent = set; - if (!CPU_SUBSET(&parent->cs_mask, mask)) + d = set->cs_domain; + nset = LIST_FIRST(cpusets); + error = _cpuset_create(nset, parent, mask, d, CPUSET_INVALID); + if (error == 0) { + LIST_REMOVE(nset, cs_link); + *nsetp = nset; + } + return (error); +} + +static struct cpuset * +cpuset_update_thread(struct thread *td, struct cpuset *nset) +{ + struct cpuset *tdset; + + tdset = td->td_cpuset; + td->td_cpuset = nset; + td->td_domain.dr_policy = nset->cs_domain; + sched_affinity(td); + + return (tdset); +} + +static int +cpuset_setproc_test_maskthread(struct cpuset *tdset, cpuset_t *mask, + struct domainset *domain) +{ + struct cpuset *parent; + + parent = cpuset_getbase(tdset); + if (mask == NULL) + mask = &tdset->cs_mask; + if (domain == NULL) + domain = tdset->cs_domain; + return cpuset_testshadow(parent, mask, domain); +} + +static int +cpuset_setproc_maskthread(struct cpuset *tdset, cpuset_t *mask, + struct domainset *domain, struct cpuset **nsetp, + struct setlist *freelist, struct domainlist *domainlist) +{ + struct cpuset *parent; + + parent = cpuset_getbase(tdset); + if (mask == NULL) + mask = &tdset->cs_mask; + if (domain == NULL) + domain = tdset->cs_domain; + return cpuset_shadow(parent, nsetp, mask, domain, freelist, + domainlist); +} + +static int +cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set, + cpuset_t *mask, struct domainset *domain) +{ + struct cpuset *parent; + + parent = cpuset_getbase(tdset); + + /* + * If the thread restricted its mask then apply that same + * restriction to the new set, otherwise take it wholesale. + */ + if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) { + CPU_COPY(&tdset->cs_mask, mask); + CPU_AND(mask, &set->cs_mask); + } else + CPU_COPY(&set->cs_mask, mask); + + /* + * If the thread restricted the domain then we apply the + * restriction to the new set but retain the policy. + */ + if (tdset->cs_domain != parent->cs_domain) { + domainset_copy(tdset->cs_domain, domain); + DOMAINSET_AND(&domain->ds_mask, &set->cs_domain->ds_mask); + } else + domainset_copy(set->cs_domain, domain); + + if (CPU_EMPTY(mask) || DOMAINSET_EMPTY(&domain->ds_mask)) return (EDEADLK); - return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); + + return (0); } +static int +cpuset_setproc_test_setthread(struct cpuset *tdset, struct cpuset *set) +{ + struct domainset domain; + cpuset_t mask; + + if (tdset->cs_id != CPUSET_INVALID) + return (0); + return cpuset_setproc_setthread_mask(tdset, set, &mask, &domain); +} + +static int +cpuset_setproc_setthread(struct cpuset *tdset, struct cpuset *set, + struct cpuset **nsetp, struct setlist *freelist, + struct domainlist *domainlist) +{ + struct domainset domain; + cpuset_t mask; + int error; + + /* + * If we're replacing on a thread that has not constrained the + * original set we can simply accept the new set. + */ + if (tdset->cs_id != CPUSET_INVALID) { + *nsetp = cpuset_ref(set); + return (0); + } + error = cpuset_setproc_setthread_mask(tdset, set, &mask, &domain); + if (error) + return (error); + + return cpuset_shadow(tdset, nsetp, &mask, &domain, freelist, + domainlist); +} + /* - * Handle two cases for replacing the base set or mask of an entire process. + * Handle three cases for updating an entire process. * - * 1) Set is non-null and mask is null. This reparents all anonymous sets - * to the provided set and replaces all non-anonymous td_cpusets with the - * provided set. - * 2) Mask is non-null and set is null. This replaces or creates anonymous - * sets for every thread with the existing base as a parent. + * 1) Set is non-null. This reparents all anonymous sets to the provided + * set and replaces all non-anonymous td_cpusets with the provided set. + * 2) Mask is non-null. This replaces or creates anonymous sets for every + * thread with the existing base as a parent. + * 3) domain is non-null. This creates anonymous sets for every thread + * and replaces the domain set. * * This is overly complicated because we can't allocate while holding a * spinlock and spinlocks must be held while changing and examining thread * state. */ static int -cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) +cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask, + struct domainset *domain) { struct setlist freelist; struct setlist droplist; - struct cpuset *tdset; + struct domainlist domainlist; struct cpuset *nset; struct thread *td; struct proc *p; int threads; int nfree; int error; /* * The algorithm requires two passes due to locking considerations. * * 1) Lookup the process and acquire the locks in the required order. * 2) If enough cpusets have not been allocated release the locks and * allocate them. Loop. */ - LIST_INIT(&freelist); + cpuset_freelist_init(&freelist, 1); + domainset_freelist_init(&domainlist, 1); + nfree = 1; LIST_INIT(&droplist); nfree = 0; for (;;) { error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); if (error) goto out; if (nfree >= p->p_numthreads) break; threads = p->p_numthreads; PROC_UNLOCK(p); - for (; nfree < threads; nfree++) { - nset = uma_zalloc(cpuset_zone, M_WAITOK); - LIST_INSERT_HEAD(&freelist, nset, cs_link); + if (nfree < threads) { + cpuset_freelist_add(&freelist, threads - nfree); + domainset_freelist_add(&domainlist, threads - nfree); + nfree = threads; } } PROC_LOCK_ASSERT(p, MA_OWNED); /* * Now that the appropriate locks are held and we have enough cpusets, - * make sure the operation will succeed before applying changes. The + * make sure the operation will succeed before applying changes. The * proc lock prevents td_cpuset from changing between calls. */ error = 0; FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); - tdset = td->td_cpuset; - /* - * Verify that a new mask doesn't specify cpus outside of - * the set the thread is a member of. - */ - if (mask) { - if (tdset->cs_id == CPUSET_INVALID) - tdset = tdset->cs_parent; - if (!CPU_SUBSET(&tdset->cs_mask, mask)) - error = EDEADLK; - /* - * Verify that a new set won't leave an existing thread - * mask without a cpu to run on. It can, however, restrict - * the set. - */ - } else if (tdset->cs_id == CPUSET_INVALID) { - if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask)) - error = EDEADLK; - } + if (set != NULL) + error = cpuset_setproc_test_setthread(td->td_cpuset, + set); + else + error = cpuset_setproc_test_maskthread(td->td_cpuset, + mask, domain); thread_unlock(td); if (error) goto unlock_out; } /* * Replace each thread's cpuset while using deferred release. We * must do this because the thread lock must be held while operating * on the thread and this limits the type of operations allowed. */ FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); - /* - * If we presently have an anonymous set or are applying a - * mask we must create an anonymous shadow set. That is - * either parented to our existing base or the supplied set. - * - * If we have a base set with no anonymous shadow we simply - * replace it outright. - */ - tdset = td->td_cpuset; - if (tdset->cs_id == CPUSET_INVALID || mask) { - nset = LIST_FIRST(&freelist); - LIST_REMOVE(nset, cs_link); - if (mask) - error = cpuset_shadow(tdset, nset, mask); - else - error = _cpuset_create(nset, set, - &tdset->cs_mask, CPUSET_INVALID); - if (error) { - LIST_INSERT_HEAD(&freelist, nset, cs_link); - thread_unlock(td); - break; - } - } else - nset = cpuset_ref(set); - cpuset_rel_defer(&droplist, tdset); - td->td_cpuset = nset; - sched_affinity(td); + if (set != NULL) + error = cpuset_setproc_setthread(td->td_cpuset, set, + &nset, &freelist, &domainlist); + else + error = cpuset_setproc_maskthread(td->td_cpuset, mask, + domain, &nset, &freelist, &domainlist); + if (error) { + thread_unlock(td); + break; + } + cpuset_rel_defer(&droplist, cpuset_update_thread(td, nset)); thread_unlock(td); } unlock_out: PROC_UNLOCK(p); out: while ((nset = LIST_FIRST(&droplist)) != NULL) cpuset_rel_complete(nset); - while ((nset = LIST_FIRST(&freelist)) != NULL) { - LIST_REMOVE(nset, cs_link); - uma_zfree(cpuset_zone, nset); - } + cpuset_freelist_free(&freelist); + domainset_freelist_free(&domainlist); return (error); } /* * Return a string representing a valid layout for a cpuset_t object. * It expects an incoming buffer at least sized as CPUSETBUFSIZ. */ char * cpusetobj_strprint(char *buf, const cpuset_t *set) { char *tbuf; size_t i, bytesp, bufsiz; tbuf = buf; bytesp = 0; bufsiz = CPUSETBUFSIZ; for (i = 0; i < (_NCPUWORDS - 1); i++) { bytesp = snprintf(tbuf, bufsiz, "%lx,", set->__bits[i]); bufsiz -= bytesp; tbuf += bytesp; } snprintf(tbuf, bufsiz, "%lx", set->__bits[_NCPUWORDS - 1]); return (buf); } /* * Build a valid cpuset_t object from a string representation. * It expects an incoming buffer at least sized as CPUSETBUFSIZ. */ int cpusetobj_strscan(cpuset_t *set, const char *buf) { u_int nwords; int i, ret; if (strlen(buf) > CPUSETBUFSIZ - 1) return (-1); /* Allow to pass a shorter version of the mask when necessary. */ nwords = 1; for (i = 0; buf[i] != '\0'; i++) if (buf[i] == ',') nwords++; if (nwords > _NCPUWORDS) return (-1); CPU_ZERO(set); for (i = 0; i < (nwords - 1); i++) { ret = sscanf(buf, "%lx,", &set->__bits[i]); if (ret == 0 || ret == -1) return (-1); buf = strstr(buf, ","); if (buf == NULL) return (-1); buf++; } ret = sscanf(buf, "%lx", &set->__bits[nwords - 1]); if (ret == 0 || ret == -1) return (-1); return (0); } /* - * Apply an anonymous mask to a single thread. + * Apply an anonymous mask or a domain to a single thread. */ -int -cpuset_setthread(lwpid_t id, cpuset_t *mask) +static int +_cpuset_setthread(lwpid_t id, cpuset_t *mask, struct domainset *domain) { + struct setlist cpusets; + struct domainlist domainlist; struct cpuset *nset; struct cpuset *set; struct thread *td; struct proc *p; int error; - nset = uma_zalloc(cpuset_zone, M_WAITOK); + cpuset_freelist_init(&cpusets, 1); + domainset_freelist_init(&domainlist, domain != NULL); error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set); if (error) goto out; set = NULL; thread_lock(td); - error = cpuset_shadow(td->td_cpuset, nset, mask); - if (error == 0) { - set = td->td_cpuset; - td->td_cpuset = nset; - sched_affinity(td); - nset = NULL; - } + error = cpuset_shadow(td->td_cpuset, &nset, mask, domain, + &cpusets, &domainlist); + if (error == 0) + set = cpuset_update_thread(td, nset); thread_unlock(td); PROC_UNLOCK(p); if (set) cpuset_rel(set); out: - if (nset) - uma_zfree(cpuset_zone, nset); + cpuset_freelist_free(&cpusets); + domainset_freelist_free(&domainlist); return (error); } /* + * Apply an anonymous mask to a single thread. + */ +int +cpuset_setthread(lwpid_t id, cpuset_t *mask) +{ + + return _cpuset_setthread(id, mask, NULL); +} + +/* * Apply new cpumask to the ithread. */ int cpuset_setithread(lwpid_t id, int cpu) { + struct setlist cpusets; struct cpuset *nset, *rset; struct cpuset *parent, *old_set; struct thread *td; struct proc *p; cpusetid_t cs_id; cpuset_t mask; int error; - nset = uma_zalloc(cpuset_zone, M_WAITOK); - rset = uma_zalloc(cpuset_zone, M_WAITOK); + cpuset_freelist_init(&cpusets, 1); + rset = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); cs_id = CPUSET_INVALID; CPU_ZERO(&mask); if (cpu == NOCPU) CPU_COPY(cpuset_root, &mask); else CPU_SET(cpu, &mask); error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &old_set); if (error != 0 || ((cs_id = alloc_unr(cpuset_unr)) == CPUSET_INVALID)) goto out; /* cpuset_which() returns with PROC_LOCK held. */ old_set = td->td_cpuset; if (cpu == NOCPU) { + nset = LIST_FIRST(&cpusets); + LIST_REMOVE(nset, cs_link); /* * roll back to default set. We're not using cpuset_shadow() * here because we can fail CPU_SUBSET() check. This can happen * if default set does not contain all CPUs. */ - error = _cpuset_create(nset, cpuset_default, &mask, + error = _cpuset_create(nset, cpuset_default, &mask, NULL, CPUSET_INVALID); goto applyset; } if (old_set->cs_id == 1 || (old_set->cs_id == CPUSET_INVALID && old_set->cs_parent->cs_id == 1)) { /* * Current set is either default (1) or * shadowed version of default set. * * Allocate new root set to be able to shadow it * with any mask. */ error = _cpuset_create(rset, cpuset_zero, - &cpuset_zero->cs_mask, cs_id); + &cpuset_zero->cs_mask, NULL, cs_id); if (error != 0) { PROC_UNLOCK(p); goto out; } rset->cs_flags |= CPU_SET_ROOT; parent = rset; rset = NULL; cs_id = CPUSET_INVALID; } else { /* Assume existing set was already allocated by previous call */ parent = old_set; old_set = NULL; } - error = cpuset_shadow(parent, nset, &mask); + error = cpuset_shadow(parent, &nset, &mask, NULL, &cpusets, NULL); applyset: if (error == 0) { thread_lock(td); - td->td_cpuset = nset; - sched_affinity(td); + old_set = cpuset_update_thread(td, nset); thread_unlock(td); - nset = NULL; } else old_set = NULL; PROC_UNLOCK(p); if (old_set != NULL) cpuset_rel(old_set); out: - if (nset != NULL) - uma_zfree(cpuset_zone, nset); + cpuset_freelist_free(&cpusets); if (rset != NULL) uma_zfree(cpuset_zone, rset); if (cs_id != CPUSET_INVALID) free_unr(cpuset_unr, cs_id); return (error); } +struct domainset domainset0; +void +domainset_zero(void) +{ + struct domainset *dset; + int i; + + mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); + + dset = &domainset0; + DOMAINSET_ZERO(&dset->ds_mask); + for (i = 0; i < vm_ndomains; i++) + DOMAINSET_SET(i, &dset->ds_mask); + dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN; + curthread->td_domain.dr_policy = _domainset_create(dset, NULL); + kernel_object->domain.dr_policy = curthread->td_domain.dr_policy; +} + /* * Creates system-wide cpusets and the cpuset for thread0 including two * sets: * * 0 - The root set which should represent all valid processors in the * system. It is initially created with a mask of all processors * because we don't know what processors are valid until cpuset_init() * runs. This set is immutable. * 1 - The default set which all processes are a member of until changed. * This allows an administrator to move all threads off of given cpus to * dedicate them to high priority tasks or save power etc. */ struct cpuset * cpuset_thread0(void) { struct cpuset *set; - int error, i; + int error; cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); + domainset_zone = uma_zcreate("domainset", sizeof(struct domainset), + NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); /* * Create the root system set for the whole machine. Doesn't use * cpuset_create() due to NULL parent. */ set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); CPU_FILL(&set->cs_mask); LIST_INIT(&set->cs_children); LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); set->cs_ref = 1; set->cs_flags = CPU_SET_ROOT; + set->cs_domain = &domainset0; cpuset_zero = set; cpuset_root = &set->cs_mask; /* * Now derive a default, modifiable set from that to give out. */ - set = uma_zalloc(cpuset_zone, M_WAITOK); - error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); + set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); + error = _cpuset_create(set, cpuset_zero, NULL, NULL, 1); KASSERT(error == 0, ("Error creating default set: %d\n", error)); cpuset_default = set; /* * Initialize the unit allocator. 0 and 1 are allocated above. */ cpuset_unr = new_unrhdr(2, INT_MAX, NULL); - /* - * If MD code has not initialized per-domain cpusets, place all - * CPUs in domain 0. - */ - for (i = 0; i < MAXMEMDOM; i++) - if (!CPU_EMPTY(&cpuset_domain[i])) - goto domains_set; - CPU_COPY(&all_cpus, &cpuset_domain[0]); -domains_set: - return (set); } /* * Create a cpuset, which would be cpuset_create() but * mark the new 'set' as root. * * We are not going to reparent the td to it. Use cpuset_setproc_update_set() * for that. * * In case of no error, returns the set in *setp locked with a reference. */ int cpuset_create_root(struct prison *pr, struct cpuset **setp) { struct cpuset *set; int error; KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__)); KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__)); error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask); if (error) return (error); KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data", __func__, __LINE__)); /* Mark the set as root. */ set = *setp; set->cs_flags |= CPU_SET_ROOT; return (0); } int cpuset_setproc_update_set(struct proc *p, struct cpuset *set) { int error; KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__)); KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__)); cpuset_ref(set); - error = cpuset_setproc(p->p_pid, set, NULL); + error = cpuset_setproc(p->p_pid, set, NULL, NULL); if (error) return (error); cpuset_rel(set); return (0); } /* * This is called once the final set of system cpus is known. Modifies * the root set and all children and mark the root read-only. */ static void cpuset_init(void *arg) { cpuset_t mask; + int i; mask = all_cpus; if (cpuset_modify(cpuset_zero, &mask)) panic("Can't set initial cpuset mask.\n"); cpuset_zero->cs_flags |= CPU_SET_RDONLY; + + /* + * If MD code has not initialized per-domain cpusets, place all + * CPUs in domain 0. + */ + for (i = 0; i < MAXMEMDOM; i++) + if (!CPU_EMPTY(&cpuset_domain[i])) + goto domains_set; + CPU_COPY(&all_cpus, &cpuset_domain[0]); +domains_set: + return; } SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); #ifndef _SYS_SYSPROTO_H_ struct cpuset_args { cpusetid_t *setid; }; #endif int sys_cpuset(struct thread *td, struct cpuset_args *uap) { struct cpuset *root; struct cpuset *set; int error; thread_lock(td); root = cpuset_refroot(td->td_cpuset); thread_unlock(td); error = cpuset_create(&set, root, &root->cs_mask); cpuset_rel(root); if (error) return (error); error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); if (error == 0) - error = cpuset_setproc(-1, set, NULL); + error = cpuset_setproc(-1, set, NULL, NULL); cpuset_rel(set); return (error); } #ifndef _SYS_SYSPROTO_H_ struct cpuset_setid_args { cpuwhich_t which; id_t id; cpusetid_t setid; }; #endif int sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) { return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid)); } int kern_cpuset_setid(struct thread *td, cpuwhich_t which, id_t id, cpusetid_t setid) { struct cpuset *set; int error; /* * Presently we only support per-process sets. */ if (which != CPU_WHICH_PID) return (EINVAL); set = cpuset_lookup(setid, td); if (set == NULL) return (ESRCH); - error = cpuset_setproc(id, set, NULL); + error = cpuset_setproc(id, set, NULL, NULL); cpuset_rel(set); return (error); } #ifndef _SYS_SYSPROTO_H_ struct cpuset_getid_args { cpulevel_t level; cpuwhich_t which; id_t id; cpusetid_t *setid; }; #endif int sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) { return (kern_cpuset_getid(td, uap->level, uap->which, uap->id, uap->setid)); } int kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid) { struct cpuset *nset; struct cpuset *set; struct thread *ttd; struct proc *p; cpusetid_t tmpid; int error; if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET) return (EINVAL); error = cpuset_which(which, id, &p, &ttd, &set); if (error) return (error); switch (which) { case CPU_WHICH_TID: case CPU_WHICH_PID: thread_lock(ttd); set = cpuset_refbase(ttd->td_cpuset); thread_unlock(ttd); PROC_UNLOCK(p); break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: break; case CPU_WHICH_IRQ: case CPU_WHICH_DOMAIN: return (EINVAL); } switch (level) { case CPU_LEVEL_ROOT: nset = cpuset_refroot(set); cpuset_rel(set); set = nset; break; case CPU_LEVEL_CPUSET: break; case CPU_LEVEL_WHICH: break; } tmpid = set->cs_id; cpuset_rel(set); if (error == 0) error = copyout(&tmpid, setid, sizeof(tmpid)); return (error); } #ifndef _SYS_SYSPROTO_H_ struct cpuset_getaffinity_args { cpulevel_t level; cpuwhich_t which; id_t id; size_t cpusetsize; cpuset_t *mask; }; #endif int sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) { return (kern_cpuset_getaffinity(td, uap->level, uap->which, uap->id, uap->cpusetsize, uap->mask)); } int kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *maskp) { struct thread *ttd; struct cpuset *nset; struct cpuset *set; struct proc *p; cpuset_t *mask; int error; size_t size; if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY) return (ERANGE); /* In Capability mode, you can only get your own CPU set. */ if (IN_CAPABILITY_MODE(td)) { if (level != CPU_LEVEL_WHICH) return (ECAPMODE); if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) return (ECAPMODE); if (id != -1) return (ECAPMODE); } size = cpusetsize; mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); error = cpuset_which(which, id, &p, &ttd, &set); if (error) goto out; switch (level) { case CPU_LEVEL_ROOT: case CPU_LEVEL_CPUSET: switch (which) { case CPU_WHICH_TID: case CPU_WHICH_PID: thread_lock(ttd); set = cpuset_ref(ttd->td_cpuset); thread_unlock(ttd); break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: break; case CPU_WHICH_IRQ: case CPU_WHICH_INTRHANDLER: case CPU_WHICH_ITHREAD: case CPU_WHICH_DOMAIN: error = EINVAL; goto out; } if (level == CPU_LEVEL_ROOT) nset = cpuset_refroot(set); else nset = cpuset_refbase(set); CPU_COPY(&nset->cs_mask, mask); cpuset_rel(nset); break; case CPU_LEVEL_WHICH: switch (which) { case CPU_WHICH_TID: thread_lock(ttd); CPU_COPY(&ttd->td_cpuset->cs_mask, mask); thread_unlock(ttd); break; case CPU_WHICH_PID: FOREACH_THREAD_IN_PROC(p, ttd) { thread_lock(ttd); CPU_OR(mask, &ttd->td_cpuset->cs_mask); thread_unlock(ttd); } break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: CPU_COPY(&set->cs_mask, mask); break; case CPU_WHICH_IRQ: case CPU_WHICH_INTRHANDLER: case CPU_WHICH_ITHREAD: error = intr_getaffinity(id, which, mask); break; case CPU_WHICH_DOMAIN: if (id < 0 || id >= MAXMEMDOM) error = ESRCH; else CPU_COPY(&cpuset_domain[id], mask); break; } break; default: error = EINVAL; break; } if (set) cpuset_rel(set); if (p) PROC_UNLOCK(p); if (error == 0) error = copyout(mask, maskp, size); out: free(mask, M_TEMP); return (error); } #ifndef _SYS_SYSPROTO_H_ struct cpuset_setaffinity_args { cpulevel_t level; cpuwhich_t which; id_t id; size_t cpusetsize; const cpuset_t *mask; }; #endif int sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) { return (kern_cpuset_setaffinity(td, uap->level, uap->which, uap->id, uap->cpusetsize, uap->mask)); } int kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *maskp) { struct cpuset *nset; struct cpuset *set; struct thread *ttd; struct proc *p; cpuset_t *mask; int error; if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY) return (ERANGE); /* In Capability mode, you can only set your own CPU set. */ if (IN_CAPABILITY_MODE(td)) { if (level != CPU_LEVEL_WHICH) return (ECAPMODE); if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) return (ECAPMODE); if (id != -1) return (ECAPMODE); } mask = malloc(cpusetsize, M_TEMP, M_WAITOK | M_ZERO); error = copyin(maskp, mask, cpusetsize); if (error) goto out; /* * Verify that no high bits are set. */ if (cpusetsize > sizeof(cpuset_t)) { char *end; char *cp; end = cp = (char *)&mask->__bits; end += cpusetsize; cp += sizeof(cpuset_t); while (cp != end) if (*cp++ != 0) { error = EINVAL; goto out; } } switch (level) { case CPU_LEVEL_ROOT: case CPU_LEVEL_CPUSET: error = cpuset_which(which, id, &p, &ttd, &set); if (error) break; switch (which) { case CPU_WHICH_TID: case CPU_WHICH_PID: thread_lock(ttd); set = cpuset_ref(ttd->td_cpuset); thread_unlock(ttd); PROC_UNLOCK(p); break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: break; case CPU_WHICH_IRQ: case CPU_WHICH_INTRHANDLER: case CPU_WHICH_ITHREAD: case CPU_WHICH_DOMAIN: error = EINVAL; goto out; } if (level == CPU_LEVEL_ROOT) nset = cpuset_refroot(set); else nset = cpuset_refbase(set); error = cpuset_modify(nset, mask); cpuset_rel(nset); cpuset_rel(set); break; case CPU_LEVEL_WHICH: switch (which) { case CPU_WHICH_TID: error = cpuset_setthread(id, mask); break; case CPU_WHICH_PID: - error = cpuset_setproc(id, NULL, mask); + error = cpuset_setproc(id, NULL, mask, NULL); break; case CPU_WHICH_CPUSET: case CPU_WHICH_JAIL: error = cpuset_which(which, id, &p, &ttd, &set); if (error == 0) { error = cpuset_modify(set, mask); cpuset_rel(set); } break; case CPU_WHICH_IRQ: case CPU_WHICH_INTRHANDLER: case CPU_WHICH_ITHREAD: error = intr_setaffinity(id, which, mask); break; default: error = EINVAL; break; } break; default: error = EINVAL; break; } out: free(mask, M_TEMP); return (error); } +#ifndef _SYS_SYSPROTO_H_ +struct cpuset_getdomain_args { + cpulevel_t level; + cpuwhich_t which; + id_t id; + size_t domainsetsize; + domainset_t *mask; + int *policy; +}; +#endif +int +sys_cpuset_getdomain(struct thread *td, struct cpuset_getdomain_args *uap) +{ + + return (kern_cpuset_getdomain(td, uap->level, uap->which, + uap->id, uap->domainsetsize, uap->mask, uap->policy)); +} + +int +kern_cpuset_getdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, + id_t id, size_t domainsetsize, domainset_t *maskp, int *policyp) +{ + struct thread *ttd; + struct cpuset *nset; + struct cpuset *set; + struct domainset *dset; + struct proc *p; + domainset_t *mask; + int policy; + int error; + size_t size; + + if (domainsetsize < sizeof(domainset_t) || + domainsetsize > DOMAINSET_MAXSIZE / NBBY) + return (ERANGE); + /* In Capability mode, you can only get your own domain set. */ + if (IN_CAPABILITY_MODE(td)) { + if (level != CPU_LEVEL_WHICH) + return (ECAPMODE); + if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) + return (ECAPMODE); + if (id != -1) + return (ECAPMODE); + } + policy = 0; + size = domainsetsize; + mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); + error = cpuset_which(which, id, &p, &ttd, &set); + if (error) + goto out; + switch (level) { + case CPU_LEVEL_ROOT: + case CPU_LEVEL_CPUSET: + switch (which) { + case CPU_WHICH_TID: + case CPU_WHICH_PID: + thread_lock(ttd); + set = cpuset_ref(ttd->td_cpuset); + thread_unlock(ttd); + break; + case CPU_WHICH_CPUSET: + case CPU_WHICH_JAIL: + break; + case CPU_WHICH_IRQ: + case CPU_WHICH_INTRHANDLER: + case CPU_WHICH_ITHREAD: + case CPU_WHICH_DOMAIN: + error = EINVAL; + goto out; + } + if (level == CPU_LEVEL_ROOT) + nset = cpuset_refroot(set); + else + nset = cpuset_refbase(set); + /* Fetch once for a coherent result. */ + dset = nset->cs_domain; + DOMAINSET_COPY(&dset->ds_mask, mask); + policy = dset->ds_policy; + cpuset_rel(nset); + break; + case CPU_LEVEL_WHICH: + switch (which) { + case CPU_WHICH_TID: + thread_lock(ttd); + /* Fetch once for a coherent result. */ + dset = ttd->td_cpuset->cs_domain; + DOMAINSET_COPY(&dset->ds_mask, mask); + policy = dset->ds_policy; + thread_unlock(ttd); + break; + case CPU_WHICH_PID: + FOREACH_THREAD_IN_PROC(p, ttd) { + thread_lock(ttd); + dset = ttd->td_cpuset->cs_domain; + /* Show all domains in the proc. */ + DOMAINSET_OR(mask, &dset->ds_mask); + /* Last policy wins. */ + policy = dset->ds_policy; + thread_unlock(ttd); + } + break; + case CPU_WHICH_CPUSET: + case CPU_WHICH_JAIL: + dset = set->cs_domain; + policy = dset->ds_policy; + DOMAINSET_OR(mask, &dset->ds_mask); + break; + case CPU_WHICH_IRQ: + case CPU_WHICH_INTRHANDLER: + case CPU_WHICH_ITHREAD: + case CPU_WHICH_DOMAIN: + error = EINVAL; + break; + } + break; + default: + error = EINVAL; + break; + } + if (set) + cpuset_rel(set); + if (p) + PROC_UNLOCK(p); + if (error == 0) + error = copyout(mask, maskp, size); + if (error == 0) + error = copyout(&policy, policyp, sizeof(*policyp)); +out: + free(mask, M_TEMP); + return (error); + return 0; +} + +#ifndef _SYS_SYSPROTO_H_ +struct cpuset_setdomain_args { + cpulevel_t level; + cpuwhich_t which; + id_t id; + size_t domainsetsize; + domainset_t *mask; + int policy; +}; +#endif +int +sys_cpuset_setdomain(struct thread *td, struct cpuset_setdomain_args *uap) +{ + + return (kern_cpuset_setdomain(td, uap->level, uap->which, + uap->id, uap->domainsetsize, uap->mask, uap->policy)); +} + +int +kern_cpuset_setdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, + id_t id, size_t domainsetsize, const domainset_t *maskp, int policy) +{ + struct cpuset *nset; + struct cpuset *set; + struct thread *ttd; + struct proc *p; + struct domainset domain; + domainset_t *mask; + int error; + + if (domainsetsize < sizeof(domainset_t) || + domainsetsize > DOMAINSET_MAXSIZE / NBBY) + return (ERANGE); + /* In Capability mode, you can only set your own CPU set. */ + if (IN_CAPABILITY_MODE(td)) { + if (level != CPU_LEVEL_WHICH) + return (ECAPMODE); + if (which != CPU_WHICH_TID && which != CPU_WHICH_PID) + return (ECAPMODE); + if (id != -1) + return (ECAPMODE); + } + memset(&domain, 0, sizeof(domain)); + mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO); + error = copyin(maskp, mask, domainsetsize); + if (error) + goto out; + /* + * Verify that no high bits are set. + */ + if (domainsetsize > sizeof(domainset_t)) { + char *end; + char *cp; + + end = cp = (char *)&mask->__bits; + end += domainsetsize; + cp += sizeof(domainset_t); + while (cp != end) + if (*cp++ != 0) { + error = EINVAL; + goto out; + } + + } + DOMAINSET_COPY(mask, &domain.ds_mask); + domain.ds_policy = policy; + if (policy <= DOMAINSET_POLICY_INVALID || + policy > DOMAINSET_POLICY_MAX) + return (EINVAL); + + switch (level) { + case CPU_LEVEL_ROOT: + case CPU_LEVEL_CPUSET: + error = cpuset_which(which, id, &p, &ttd, &set); + if (error) + break; + switch (which) { + case CPU_WHICH_TID: + case CPU_WHICH_PID: + thread_lock(ttd); + set = cpuset_ref(ttd->td_cpuset); + thread_unlock(ttd); + PROC_UNLOCK(p); + break; + case CPU_WHICH_CPUSET: + case CPU_WHICH_JAIL: + break; + case CPU_WHICH_IRQ: + case CPU_WHICH_INTRHANDLER: + case CPU_WHICH_ITHREAD: + case CPU_WHICH_DOMAIN: + error = EINVAL; + goto out; + } + if (level == CPU_LEVEL_ROOT) + nset = cpuset_refroot(set); + else + nset = cpuset_refbase(set); + error = cpuset_modify_domain(nset, &domain); + cpuset_rel(nset); + cpuset_rel(set); + break; + case CPU_LEVEL_WHICH: + switch (which) { + case CPU_WHICH_TID: + error = _cpuset_setthread(id, NULL, &domain); + break; + case CPU_WHICH_PID: + error = cpuset_setproc(id, NULL, NULL, &domain); + break; + case CPU_WHICH_CPUSET: + case CPU_WHICH_JAIL: + error = cpuset_which(which, id, &p, &ttd, &set); + if (error == 0) { + error = cpuset_modify_domain(set, &domain); + cpuset_rel(set); + } + break; + case CPU_WHICH_IRQ: + case CPU_WHICH_INTRHANDLER: + case CPU_WHICH_ITHREAD: + default: + error = EINVAL; + break; + } + break; + default: + error = EINVAL; + break; + } +out: + free(mask, M_TEMP); + return (error); +} + #ifdef DDB -void -ddb_display_cpuset(const cpuset_t *set) +BITSET_DEFINE(bitset, 1); +static void +ddb_display_bitset(const struct bitset *set, int size) { - int cpu, once; + int bit, once; - for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { - if (CPU_ISSET(cpu, set)) { + for (once = 0, bit = 0; bit < size; bit++) { + if (CPU_ISSET(bit, set)) { if (once == 0) { - db_printf("%d", cpu); + db_printf("%d", bit); once = 1; } else - db_printf(",%d", cpu); + db_printf(",%d", bit); } } if (once == 0) db_printf(""); } +void +ddb_display_cpuset(const cpuset_t *set) +{ + ddb_display_bitset((const struct bitset *)set, CPU_SETSIZE); +} + +static void +ddb_display_domainset(const domainset_t *set) +{ + ddb_display_bitset((const struct bitset *)set, DOMAINSET_SETSIZE); +} + DB_SHOW_COMMAND(cpusets, db_show_cpusets) { struct cpuset *set; LIST_FOREACH(set, &cpuset_ids, cs_link) { db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n", set, set->cs_id, set->cs_ref, set->cs_flags, (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0); - db_printf(" mask="); + db_printf(" cpu mask="); ddb_display_cpuset(&set->cs_mask); db_printf("\n"); + db_printf(" domain policy %d mask=", + set->cs_domain->ds_policy); + ddb_display_domainset(&set->cs_domain->ds_mask); + db_printf("\n"); if (db_pager_quit) break; + } +} + +DB_SHOW_COMMAND(domainsets, db_show_domainsets) +{ + struct domainset *set; + + LIST_FOREACH(set, &cpuset_domains, ds_link) { + db_printf("set=%p policy %d cnt %d max %d\n", + set, set->ds_policy, set->ds_cnt, set->ds_max); + db_printf(" mask ="); + ddb_display_domainset(&set->ds_mask); + db_printf("\n"); } } #endif /* DDB */ Index: user/jeff/numa/sys/kern/kern_exit.c =================================================================== --- user/jeff/numa/sys/kern/kern_exit.c (revision 326888) +++ user/jeff/numa/sys/kern/kern_exit.c (revision 326889) @@ -1,1346 +1,1342 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include "opt_ktrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for acct_process() function prototype */ #include #include #include #include #include #ifdef KTRACE #include #endif #include #include #include #include #include #include #include #include #include #include #ifdef KDTRACE_HOOKS #include dtrace_execexit_func_t dtrace_fasttrap_exit; #endif SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE1(proc, , , exit, "int"); /* Hook for NFS teardown procedure. */ void (*nlminfo_release_p)(struct proc *p); EVENTHANDLER_LIST_DECLARE(process_exit); struct proc * proc_realparent(struct proc *child) { struct proc *p, *parent; sx_assert(&proctree_lock, SX_LOCKED); if ((child->p_treeflag & P_TREE_ORPHANED) == 0) { if (child->p_oppid == 0 || child->p_pptr->p_pid == child->p_oppid) parent = child->p_pptr; else parent = initproc; return (parent); } for (p = child; (p->p_treeflag & P_TREE_FIRST_ORPHAN) == 0;) { /* Cannot use LIST_PREV(), since the list head is not known. */ p = __containerof(p->p_orphan.le_prev, struct proc, p_orphan.le_next); KASSERT((p->p_treeflag & P_TREE_ORPHANED) != 0, ("missing P_ORPHAN %p", p)); } parent = __containerof(p->p_orphan.le_prev, struct proc, p_orphans.lh_first); return (parent); } void reaper_abandon_children(struct proc *p, bool exiting) { struct proc *p1, *p2, *ptmp; sx_assert(&proctree_lock, SX_LOCKED); KASSERT(p != initproc, ("reaper_abandon_children for initproc")); if ((p->p_treeflag & P_TREE_REAPER) == 0) return; p1 = p->p_reaper; LIST_FOREACH_SAFE(p2, &p->p_reaplist, p_reapsibling, ptmp) { LIST_REMOVE(p2, p_reapsibling); p2->p_reaper = p1; p2->p_reapsubtree = p->p_reapsubtree; LIST_INSERT_HEAD(&p1->p_reaplist, p2, p_reapsibling); if (exiting && p2->p_pptr == p) { PROC_LOCK(p2); proc_reparent(p2, p1); PROC_UNLOCK(p2); } } KASSERT(LIST_EMPTY(&p->p_reaplist), ("p_reaplist not empty")); p->p_treeflag &= ~P_TREE_REAPER; } static void clear_orphan(struct proc *p) { struct proc *p1; sx_assert(&proctree_lock, SA_XLOCKED); if ((p->p_treeflag & P_TREE_ORPHANED) == 0) return; if ((p->p_treeflag & P_TREE_FIRST_ORPHAN) != 0) { p1 = LIST_NEXT(p, p_orphan); if (p1 != NULL) p1->p_treeflag |= P_TREE_FIRST_ORPHAN; p->p_treeflag &= ~P_TREE_FIRST_ORPHAN; } LIST_REMOVE(p, p_orphan); p->p_treeflag &= ~P_TREE_ORPHANED; } /* * exit -- death of process. */ void sys_sys_exit(struct thread *td, struct sys_exit_args *uap) { exit1(td, uap->rval, 0); /* NOTREACHED */ } /* * Exit: deallocate address space and other resources, change proc state to * zombie, and unlink proc from allproc and parent's lists. Save exit status * and rusage for wait(). Check for child processes and orphan them. */ void exit1(struct thread *td, int rval, int signo) { struct proc *p, *nq, *q, *t; struct thread *tdt; ksiginfo_t *ksi, *ksi1; mtx_assert(&Giant, MA_NOTOWNED); KASSERT(rval == 0 || signo == 0, ("exit1 rv %d sig %d", rval, signo)); p = td->td_proc; /* * XXX in case we're rebooting we just let init die in order to * work around an unsolved stack overflow seen very late during * shutdown on sparc64 when the gmirror worker process exists. */ if (p == initproc && rebooting == 0) { printf("init died (signal %d, exit %d)\n", signo, rval); panic("Going nowhere without my init!"); } /* * Deref SU mp, since the thread does not return to userspace. */ td_softdep_cleanup(td); /* * MUST abort all other threads before proceeding past here. */ PROC_LOCK(p); /* * First check if some other thread or external request got * here before us. If so, act appropriately: exit or suspend. * We must ensure that stop requests are handled before we set * P_WEXIT. */ thread_suspend_check(0); while (p->p_flag & P_HADTHREADS) { /* * Kill off the other threads. This requires * some co-operation from other parts of the kernel * so it may not be instantaneous. With this state set * any thread entering the kernel from userspace will * thread_exit() in trap(). Any thread attempting to * sleep will return immediately with EINTR or EWOULDBLOCK * which will hopefully force them to back out to userland * freeing resources as they go. Any thread attempting * to return to userland will thread_exit() from userret(). * thread_exit() will unsuspend us when the last of the * other threads exits. * If there is already a thread singler after resumption, * calling thread_single will fail; in that case, we just * re-check all suspension request, the thread should * either be suspended there or exit. */ if (!thread_single(p, SINGLE_EXIT)) /* * All other activity in this process is now * stopped. Threading support has been turned * off. */ break; /* * Recheck for new stop or suspend requests which * might appear while process lock was dropped in * thread_single(). */ thread_suspend_check(0); } KASSERT(p->p_numthreads == 1, ("exit1: proc %p exiting with %d threads", p, p->p_numthreads)); racct_sub(p, RACCT_NTHR, 1); /* Let event handler change exit status */ p->p_xexit = rval; p->p_xsig = signo; /* * Wakeup anyone in procfs' PIOCWAIT. They should have a hold * on our vmspace, so we should block below until they have * released their reference to us. Note that if they have * requested S_EXIT stops we will block here until they ack * via PIOCCONT. */ _STOPEVENT(p, S_EXIT, 0); /* * Ignore any pending request to stop due to a stop signal. * Once P_WEXIT is set, future requests will be ignored as * well. */ p->p_flag &= ~P_STOPPED_SIG; KASSERT(!P_SHOULDSTOP(p), ("exiting process is stopped")); /* * Note that we are exiting and do another wakeup of anyone in * PIOCWAIT in case they aren't listening for S_EXIT stops or * decided to wait again after we told them we are exiting. */ p->p_flag |= P_WEXIT; wakeup(&p->p_stype); /* * Wait for any processes that have a hold on our vmspace to * release their reference. */ while (p->p_lock > 0) msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0); PROC_UNLOCK(p); /* Drain the limit callout while we don't have the proc locked */ callout_drain(&p->p_limco); #ifdef AUDIT /* * The Sun BSM exit token contains two components: an exit status as * passed to exit(), and a return value to indicate what sort of exit * it was. The exit status is WEXITSTATUS(rv), but it's not clear * what the return value is. */ AUDIT_ARG_EXIT(rval, 0); AUDIT_SYSCALL_EXIT(0, td); #endif /* Are we a task leader with peers? */ if (p->p_peers != NULL && p == p->p_leader) { mtx_lock(&ppeers_lock); q = p->p_peers; while (q != NULL) { PROC_LOCK(q); kern_psignal(q, SIGKILL); PROC_UNLOCK(q); q = q->p_peers; } while (p->p_peers != NULL) msleep(p, &ppeers_lock, PWAIT, "exit1", 0); mtx_unlock(&ppeers_lock); } /* * Check if any loadable modules need anything done at process exit. * E.g. SYSV IPC stuff. * Event handler could change exit status. * XXX what if one of these generates an error? */ EVENTHANDLER_DIRECT_INVOKE(process_exit, p); /* * If parent is waiting for us to exit or exec, * P_PPWAIT is set; we will wakeup the parent below. */ PROC_LOCK(p); stopprofclock(p); p->p_flag &= ~(P_TRACED | P_PPWAIT | P_PPTRACE); p->p_ptevents = 0; /* * Stop the real interval timer. If the handler is currently * executing, prevent it from rearming itself and let it finish. */ if (timevalisset(&p->p_realtimer.it_value) && _callout_stop_safe(&p->p_itcallout, CS_EXECUTING, NULL) == 0) { timevalclear(&p->p_realtimer.it_interval); msleep(&p->p_itcallout, &p->p_mtx, PWAIT, "ritwait", 0); KASSERT(!timevalisset(&p->p_realtimer.it_value), ("realtime timer is still armed")); } PROC_UNLOCK(p); umtx_thread_exit(td); /* * Reset any sigio structures pointing to us as a result of * F_SETOWN with our pid. */ funsetownlst(&p->p_sigiolst); /* * If this process has an nlminfo data area (for lockd), release it */ if (nlminfo_release_p != NULL && p->p_nlminfo != NULL) (*nlminfo_release_p)(p); /* * Close open files and release open-file table. * This may block! */ fdescfree(td); /* * If this thread tickled GEOM, we need to wait for the giggling to * stop before we return to userland */ if (td->td_pflags & TDP_GEOM) g_waitidle(); /* * Remove ourself from our leader's peer list and wake our leader. */ if (p->p_leader->p_peers != NULL) { mtx_lock(&ppeers_lock); if (p->p_leader->p_peers != NULL) { q = p->p_leader; while (q->p_peers != p) q = q->p_peers; q->p_peers = p->p_peers; wakeup(p->p_leader); } mtx_unlock(&ppeers_lock); } vmspace_exit(td); killjobc(); (void)acct_process(td); #ifdef KTRACE ktrprocexit(td); #endif /* * Release reference to text vnode */ if (p->p_textvp != NULL) { vrele(p->p_textvp); p->p_textvp = NULL; } /* * Release our limits structure. */ lim_free(p->p_limit); p->p_limit = NULL; tidhash_remove(td); /* * Remove proc from allproc queue and pidhash chain. * Place onto zombproc. Unlink from parent's child list. */ sx_xlock(&allproc_lock); LIST_REMOVE(p, p_list); LIST_INSERT_HEAD(&zombproc, p, p_list); LIST_REMOVE(p, p_hash); sx_xunlock(&allproc_lock); /* * Call machine-dependent code to release any * machine-dependent resources other than the address space. * The address space is released by "vmspace_exitfree(p)" in * vm_waitproc(). */ cpu_exit(td); WITNESS_WARN(WARN_PANIC, NULL, "process (pid %d) exiting", p->p_pid); /* * Reparent all children processes: * - traced ones to the original parent (or init if we are that parent) * - the rest to init */ sx_xlock(&proctree_lock); q = LIST_FIRST(&p->p_children); if (q != NULL) /* only need this if any child is S_ZOMB */ wakeup(q->p_reaper); for (; q != NULL; q = nq) { nq = LIST_NEXT(q, p_sibling); ksi = ksiginfo_alloc(TRUE); PROC_LOCK(q); q->p_sigparent = SIGCHLD; if (!(q->p_flag & P_TRACED)) { proc_reparent(q, q->p_reaper); if (q->p_state == PRS_ZOMBIE) { /* * Inform reaper about the reparented * zombie, since wait(2) has something * new to report. Guarantee queueing * of the SIGCHLD signal, similar to * the _exit() behaviour, by providing * our ksiginfo. Ksi is freed by the * signal delivery. */ if (q->p_ksi == NULL) { ksi1 = NULL; } else { ksiginfo_copy(q->p_ksi, ksi); ksi->ksi_flags |= KSI_INS; ksi1 = ksi; ksi = NULL; } PROC_LOCK(q->p_reaper); pksignal(q->p_reaper, SIGCHLD, ksi1); PROC_UNLOCK(q->p_reaper); } } else { /* * Traced processes are killed since their existence * means someone is screwing up. */ t = proc_realparent(q); if (t == p) { proc_reparent(q, q->p_reaper); } else { PROC_LOCK(t); proc_reparent(q, t); PROC_UNLOCK(t); } /* * Since q was found on our children list, the * proc_reparent() call moved q to the orphan * list due to present P_TRACED flag. Clear * orphan link for q now while q is locked. */ clear_orphan(q); q->p_flag &= ~(P_TRACED | P_STOPPED_TRACE); q->p_flag2 &= ~P2_PTRACE_FSTP; q->p_ptevents = 0; FOREACH_THREAD_IN_PROC(q, tdt) { tdt->td_dbgflags &= ~(TDB_SUSPEND | TDB_XSIG | TDB_FSTP); } kern_psignal(q, SIGKILL); } PROC_UNLOCK(q); if (ksi != NULL) ksiginfo_free(ksi); } /* * Also get rid of our orphans. */ while ((q = LIST_FIRST(&p->p_orphans)) != NULL) { PROC_LOCK(q); CTR2(KTR_PTRACE, "exit: pid %d, clearing orphan %d", p->p_pid, q->p_pid); clear_orphan(q); PROC_UNLOCK(q); } /* Save exit status. */ PROC_LOCK(p); p->p_xthread = td; /* Tell the prison that we are gone. */ prison_proc_free(p->p_ucred->cr_prison); #ifdef KDTRACE_HOOKS /* * Tell the DTrace fasttrap provider about the exit if it * has declared an interest. */ if (dtrace_fasttrap_exit) dtrace_fasttrap_exit(p); #endif /* * Notify interested parties of our demise. */ KNOTE_LOCKED(p->p_klist, NOTE_EXIT); #ifdef KDTRACE_HOOKS int reason = CLD_EXITED; if (WCOREDUMP(signo)) reason = CLD_DUMPED; else if (WIFSIGNALED(signo)) reason = CLD_KILLED; SDT_PROBE1(proc, , , exit, reason); #endif /* * If this is a process with a descriptor, we may not need to deliver * a signal to the parent. proctree_lock is held over * procdesc_exit() to serialize concurrent calls to close() and * exit(). */ if (p->p_procdesc == NULL || procdesc_exit(p)) { /* * Notify parent that we're gone. If parent has the * PS_NOCLDWAIT flag set, or if the handler is set to SIG_IGN, * notify process 1 instead (and hope it will handle this * situation). */ PROC_LOCK(p->p_pptr); mtx_lock(&p->p_pptr->p_sigacts->ps_mtx); if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) { struct proc *pp; mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx); pp = p->p_pptr; PROC_UNLOCK(pp); proc_reparent(p, p->p_reaper); p->p_sigparent = SIGCHLD; PROC_LOCK(p->p_pptr); /* * Notify parent, so in case he was wait(2)ing or * executing waitpid(2) with our pid, he will * continue. */ wakeup(pp); } else mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx); if (p->p_pptr == p->p_reaper || p->p_pptr == initproc) childproc_exited(p); else if (p->p_sigparent != 0) { if (p->p_sigparent == SIGCHLD) childproc_exited(p); else /* LINUX thread */ kern_psignal(p->p_pptr, p->p_sigparent); } } else PROC_LOCK(p->p_pptr); sx_xunlock(&proctree_lock); /* * The state PRS_ZOMBIE prevents other proesses from sending * signal to the process, to avoid memory leak, we free memory * for signal queue at the time when the state is set. */ sigqueue_flush(&p->p_sigqueue); sigqueue_flush(&td->td_sigqueue); /* * We have to wait until after acquiring all locks before * changing p_state. We need to avoid all possible context * switches (including ones from blocking on a mutex) while * marked as a zombie. We also have to set the zombie state * before we release the parent process' proc lock to avoid * a lost wakeup. So, we first call wakeup, then we grab the * sched lock, update the state, and release the parent process' * proc lock. */ wakeup(p->p_pptr); cv_broadcast(&p->p_pwait); sched_exit(p->p_pptr, td); PROC_SLOCK(p); p->p_state = PRS_ZOMBIE; PROC_UNLOCK(p->p_pptr); /* * Save our children's rusage information in our exit rusage. */ PROC_STATLOCK(p); ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux); PROC_STATUNLOCK(p); /* * Make sure the scheduler takes this thread out of its tables etc. * This will also release this thread's reference to the ucred. * Other thread parts to release include pcb bits and such. */ thread_exit(); } #ifndef _SYS_SYSPROTO_H_ struct abort2_args { char *why; int nargs; void **args; }; #endif int sys_abort2(struct thread *td, struct abort2_args *uap) { struct proc *p = td->td_proc; struct sbuf *sb; void *uargs[16]; int error, i, sig; /* * Do it right now so we can log either proper call of abort2(), or * note, that invalid argument was passed. 512 is big enough to * handle 16 arguments' descriptions with additional comments. */ sb = sbuf_new(NULL, NULL, 512, SBUF_FIXEDLEN); sbuf_clear(sb); sbuf_printf(sb, "%s(pid %d uid %d) aborted: ", p->p_comm, p->p_pid, td->td_ucred->cr_uid); /* * Since we can't return from abort2(), send SIGKILL in cases, where * abort2() was called improperly */ sig = SIGKILL; /* Prevent from DoSes from user-space. */ if (uap->nargs < 0 || uap->nargs > 16) goto out; if (uap->nargs > 0) { if (uap->args == NULL) goto out; error = copyin(uap->args, uargs, uap->nargs * sizeof(void *)); if (error != 0) goto out; } /* * Limit size of 'reason' string to 128. Will fit even when * maximal number of arguments was chosen to be logged. */ if (uap->why != NULL) { error = sbuf_copyin(sb, uap->why, 128); if (error < 0) goto out; } else { sbuf_printf(sb, "(null)"); } if (uap->nargs > 0) { sbuf_printf(sb, "("); for (i = 0;i < uap->nargs; i++) sbuf_printf(sb, "%s%p", i == 0 ? "" : ", ", uargs[i]); sbuf_printf(sb, ")"); } /* * Final stage: arguments were proper, string has been * successfully copied from userspace, and copying pointers * from user-space succeed. */ sig = SIGABRT; out: if (sig == SIGKILL) { sbuf_trim(sb); sbuf_printf(sb, " (Reason text inaccessible)"); } sbuf_cat(sb, "\n"); sbuf_finish(sb); log(LOG_INFO, "%s", sbuf_data(sb)); sbuf_delete(sb); exit1(td, 0, sig); return (0); } #ifdef COMPAT_43 /* * The dirty work is handled by kern_wait(). */ int owait(struct thread *td, struct owait_args *uap __unused) { int error, status; error = kern_wait(td, WAIT_ANY, &status, 0, NULL); if (error == 0) td->td_retval[1] = status; return (error); } #endif /* COMPAT_43 */ /* * The dirty work is handled by kern_wait(). */ int sys_wait4(struct thread *td, struct wait4_args *uap) { struct rusage ru, *rup; int error, status; if (uap->rusage != NULL) rup = &ru; else rup = NULL; error = kern_wait(td, uap->pid, &status, uap->options, rup); if (uap->status != NULL && error == 0 && td->td_retval[0] != 0) error = copyout(&status, uap->status, sizeof(status)); if (uap->rusage != NULL && error == 0 && td->td_retval[0] != 0) error = copyout(&ru, uap->rusage, sizeof(struct rusage)); return (error); } int sys_wait6(struct thread *td, struct wait6_args *uap) { struct __wrusage wru, *wrup; siginfo_t si, *sip; idtype_t idtype; id_t id; int error, status; idtype = uap->idtype; id = uap->id; if (uap->wrusage != NULL) wrup = &wru; else wrup = NULL; if (uap->info != NULL) { sip = &si; bzero(sip, sizeof(*sip)); } else sip = NULL; /* * We expect all callers of wait6() to know about WEXITED and * WTRAPPED. */ error = kern_wait6(td, idtype, id, &status, uap->options, wrup, sip); if (uap->status != NULL && error == 0 && td->td_retval[0] != 0) error = copyout(&status, uap->status, sizeof(status)); if (uap->wrusage != NULL && error == 0 && td->td_retval[0] != 0) error = copyout(&wru, uap->wrusage, sizeof(wru)); if (uap->info != NULL && error == 0) error = copyout(&si, uap->info, sizeof(si)); return (error); } /* * Reap the remains of a zombie process and optionally return status and * rusage. Asserts and will release both the proctree_lock and the process * lock as part of its work. */ void proc_reap(struct thread *td, struct proc *p, int *status, int options) { struct proc *q, *t; sx_assert(&proctree_lock, SA_XLOCKED); PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); KASSERT(p->p_state == PRS_ZOMBIE, ("proc_reap: !PRS_ZOMBIE")); q = td->td_proc; PROC_SUNLOCK(p); if (status) *status = KW_EXITCODE(p->p_xexit, p->p_xsig); if (options & WNOWAIT) { /* * Only poll, returning the status. Caller does not wish to * release the proc struct just yet. */ PROC_UNLOCK(p); sx_xunlock(&proctree_lock); return; } PROC_LOCK(q); sigqueue_take(p->p_ksi); PROC_UNLOCK(q); /* * If we got the child via a ptrace 'attach', we need to give it back * to the old parent. */ if (p->p_oppid != 0 && p->p_oppid != p->p_pptr->p_pid) { PROC_UNLOCK(p); t = proc_realparent(p); PROC_LOCK(t); PROC_LOCK(p); CTR2(KTR_PTRACE, "wait: traced child %d moved back to parent %d", p->p_pid, t->p_pid); proc_reparent(p, t); p->p_oppid = 0; PROC_UNLOCK(p); pksignal(t, SIGCHLD, p->p_ksi); wakeup(t); cv_broadcast(&p->p_pwait); PROC_UNLOCK(t); sx_xunlock(&proctree_lock); return; } p->p_oppid = 0; PROC_UNLOCK(p); /* * Remove other references to this process to ensure we have an * exclusive reference. */ sx_xlock(&allproc_lock); LIST_REMOVE(p, p_list); /* off zombproc */ sx_xunlock(&allproc_lock); LIST_REMOVE(p, p_sibling); reaper_abandon_children(p, true); LIST_REMOVE(p, p_reapsibling); PROC_LOCK(p); clear_orphan(p); PROC_UNLOCK(p); leavepgrp(p); if (p->p_procdesc != NULL) procdesc_reap(p); sx_xunlock(&proctree_lock); PROC_LOCK(p); knlist_detach(p->p_klist); p->p_klist = NULL; PROC_UNLOCK(p); /* * Removal from allproc list and process group list paired with * PROC_LOCK which was executed during that time should guarantee * nothing can reach this process anymore. As such further locking * is unnecessary. */ p->p_xexit = p->p_xsig = 0; /* XXX: why? */ PROC_LOCK(q); ruadd(&q->p_stats->p_cru, &q->p_crux, &p->p_ru, &p->p_rux); PROC_UNLOCK(q); /* * Decrement the count of procs running with this uid. */ (void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); /* * Destroy resource accounting information associated with the process. */ #ifdef RACCT if (racct_enable) { PROC_LOCK(p); racct_sub(p, RACCT_NPROC, 1); PROC_UNLOCK(p); } #endif racct_proc_exit(p); /* * Free credentials, arguments, and sigacts. */ crfree(p->p_ucred); proc_set_cred(p, NULL); pargs_drop(p->p_args); p->p_args = NULL; sigacts_free(p->p_sigacts); p->p_sigacts = NULL; /* * Do any thread-system specific cleanups. */ thread_wait(p); /* * Give vm and machine-dependent layer a chance to free anything that * cpu_exit couldn't release while still running in process context. */ vm_waitproc(p); #ifdef MAC mac_proc_destroy(p); #endif - /* - * Free any domain policy that's still hiding around. - */ - vm_domain_policy_cleanup(&p->p_vm_dom_policy); KASSERT(FIRST_THREAD_IN_PROC(p), ("proc_reap: no residual thread!")); uma_zfree(proc_zone, p); atomic_add_int(&nprocs, -1); } static int proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *siginfo, int check_only) { struct rusage *rup; sx_assert(&proctree_lock, SA_XLOCKED); PROC_LOCK(p); switch (idtype) { case P_ALL: if (p->p_procdesc != NULL) { PROC_UNLOCK(p); return (0); } break; case P_PID: if (p->p_pid != (pid_t)id) { PROC_UNLOCK(p); return (0); } break; case P_PGID: if (p->p_pgid != (pid_t)id) { PROC_UNLOCK(p); return (0); } break; case P_SID: if (p->p_session->s_sid != (pid_t)id) { PROC_UNLOCK(p); return (0); } break; case P_UID: if (p->p_ucred->cr_uid != (uid_t)id) { PROC_UNLOCK(p); return (0); } break; case P_GID: if (p->p_ucred->cr_gid != (gid_t)id) { PROC_UNLOCK(p); return (0); } break; case P_JAILID: if (p->p_ucred->cr_prison->pr_id != (int)id) { PROC_UNLOCK(p); return (0); } break; /* * It seems that the thread structures get zeroed out * at process exit. This makes it impossible to * support P_SETID, P_CID or P_CPUID. */ default: PROC_UNLOCK(p); return (0); } if (p_canwait(td, p)) { PROC_UNLOCK(p); return (0); } if (((options & WEXITED) == 0) && (p->p_state == PRS_ZOMBIE)) { PROC_UNLOCK(p); return (0); } /* * This special case handles a kthread spawned by linux_clone * (see linux_misc.c). The linux_wait4 and linux_waitpid * functions need to be able to distinguish between waiting * on a process and waiting on a thread. It is a thread if * p_sigparent is not SIGCHLD, and the WLINUXCLONE option * signifies we want to wait for threads and not processes. */ if ((p->p_sigparent != SIGCHLD) ^ ((options & WLINUXCLONE) != 0)) { PROC_UNLOCK(p); return (0); } if (siginfo != NULL) { bzero(siginfo, sizeof(*siginfo)); siginfo->si_errno = 0; /* * SUSv4 requires that the si_signo value is always * SIGCHLD. Obey it despite the rfork(2) interface * allows to request other signal for child exit * notification. */ siginfo->si_signo = SIGCHLD; /* * This is still a rough estimate. We will fix the * cases TRAPPED, STOPPED, and CONTINUED later. */ if (WCOREDUMP(p->p_xsig)) { siginfo->si_code = CLD_DUMPED; siginfo->si_status = WTERMSIG(p->p_xsig); } else if (WIFSIGNALED(p->p_xsig)) { siginfo->si_code = CLD_KILLED; siginfo->si_status = WTERMSIG(p->p_xsig); } else { siginfo->si_code = CLD_EXITED; siginfo->si_status = p->p_xexit; } siginfo->si_pid = p->p_pid; siginfo->si_uid = p->p_ucred->cr_uid; /* * The si_addr field would be useful additional * detail, but apparently the PC value may be lost * when we reach this point. bzero() above sets * siginfo->si_addr to NULL. */ } /* * There should be no reason to limit resources usage info to * exited processes only. A snapshot about any resources used * by a stopped process may be exactly what is needed. */ if (wrusage != NULL) { rup = &wrusage->wru_self; *rup = p->p_ru; PROC_STATLOCK(p); calcru(p, &rup->ru_utime, &rup->ru_stime); PROC_STATUNLOCK(p); rup = &wrusage->wru_children; *rup = p->p_stats->p_cru; calccru(p, &rup->ru_utime, &rup->ru_stime); } if (p->p_state == PRS_ZOMBIE && !check_only) { PROC_SLOCK(p); proc_reap(td, p, status, options); return (-1); } return (1); } int kern_wait(struct thread *td, pid_t pid, int *status, int options, struct rusage *rusage) { struct __wrusage wru, *wrup; idtype_t idtype; id_t id; int ret; /* * Translate the special pid values into the (idtype, pid) * pair for kern_wait6. The WAIT_MYPGRP case is handled by * kern_wait6() on its own. */ if (pid == WAIT_ANY) { idtype = P_ALL; id = 0; } else if (pid < 0) { idtype = P_PGID; id = (id_t)-pid; } else { idtype = P_PID; id = (id_t)pid; } if (rusage != NULL) wrup = &wru; else wrup = NULL; /* * For backward compatibility we implicitly add flags WEXITED * and WTRAPPED here. */ options |= WEXITED | WTRAPPED; ret = kern_wait6(td, idtype, id, status, options, wrup, NULL); if (rusage != NULL) *rusage = wru.wru_self; return (ret); } static void report_alive_proc(struct thread *td, struct proc *p, siginfo_t *siginfo, int *status, int options, int si_code) { bool cont; PROC_LOCK_ASSERT(p, MA_OWNED); sx_assert(&proctree_lock, SA_XLOCKED); MPASS(si_code == CLD_TRAPPED || si_code == CLD_STOPPED || si_code == CLD_CONTINUED); cont = si_code == CLD_CONTINUED; if ((options & WNOWAIT) == 0) { if (cont) p->p_flag &= ~P_CONTINUED; else p->p_flag |= P_WAITED; PROC_LOCK(td->td_proc); sigqueue_take(p->p_ksi); PROC_UNLOCK(td->td_proc); } sx_xunlock(&proctree_lock); if (siginfo != NULL) { siginfo->si_code = si_code; siginfo->si_status = cont ? SIGCONT : p->p_xsig; } if (status != NULL) *status = cont ? SIGCONT : W_STOPCODE(p->p_xsig); PROC_UNLOCK(p); td->td_retval[0] = p->p_pid; } int kern_wait6(struct thread *td, idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *siginfo) { struct proc *p, *q; pid_t pid; int error, nfound, ret; AUDIT_ARG_VALUE((int)idtype); /* XXX - This is likely wrong! */ AUDIT_ARG_PID((pid_t)id); /* XXX - This may be wrong! */ AUDIT_ARG_VALUE(options); q = td->td_proc; if ((pid_t)id == WAIT_MYPGRP && (idtype == P_PID || idtype == P_PGID)) { PROC_LOCK(q); id = (id_t)q->p_pgid; PROC_UNLOCK(q); idtype = P_PGID; } /* If we don't know the option, just return. */ if ((options & ~(WUNTRACED | WNOHANG | WCONTINUED | WNOWAIT | WEXITED | WTRAPPED | WLINUXCLONE)) != 0) return (EINVAL); if ((options & (WEXITED | WUNTRACED | WCONTINUED | WTRAPPED)) == 0) { /* * We will be unable to find any matching processes, * because there are no known events to look for. * Prefer to return error instead of blocking * indefinitely. */ return (EINVAL); } loop: if (q->p_flag & P_STATCHILD) { PROC_LOCK(q); q->p_flag &= ~P_STATCHILD; PROC_UNLOCK(q); } nfound = 0; sx_xlock(&proctree_lock); LIST_FOREACH(p, &q->p_children, p_sibling) { pid = p->p_pid; ret = proc_to_reap(td, p, idtype, id, status, options, wrusage, siginfo, 0); if (ret == 0) continue; else if (ret == 1) nfound++; else { td->td_retval[0] = pid; return (0); } PROC_LOCK_ASSERT(p, MA_OWNED); if ((options & (WTRAPPED | WUNTRACED)) != 0) PROC_SLOCK(p); if ((options & WTRAPPED) != 0 && (p->p_flag & P_TRACED) != 0 && (p->p_flag & (P_STOPPED_TRACE | P_STOPPED_SIG)) != 0 && p->p_suspcount == p->p_numthreads && (p->p_flag & P_WAITED) == 0) { PROC_SUNLOCK(p); CTR4(KTR_PTRACE, "wait: returning trapped pid %d status %#x " "(xstat %d) xthread %d", p->p_pid, W_STOPCODE(p->p_xsig), p->p_xsig, p->p_xthread != NULL ? p->p_xthread->td_tid : -1); report_alive_proc(td, p, siginfo, status, options, CLD_TRAPPED); return (0); } if ((options & WUNTRACED) != 0 && (p->p_flag & P_STOPPED_SIG) != 0 && p->p_suspcount == p->p_numthreads && (p->p_flag & P_WAITED) == 0) { PROC_SUNLOCK(p); report_alive_proc(td, p, siginfo, status, options, CLD_STOPPED); return (0); } if ((options & (WTRAPPED | WUNTRACED)) != 0) PROC_SUNLOCK(p); if ((options & WCONTINUED) != 0 && (p->p_flag & P_CONTINUED) != 0) { report_alive_proc(td, p, siginfo, status, options, CLD_CONTINUED); return (0); } PROC_UNLOCK(p); } /* * Look in the orphans list too, to allow the parent to * collect it's child exit status even if child is being * debugged. * * Debugger detaches from the parent upon successful * switch-over from parent to child. At this point due to * re-parenting the parent loses the child to debugger and a * wait4(2) call would report that it has no children to wait * for. By maintaining a list of orphans we allow the parent * to successfully wait until the child becomes a zombie. */ if (nfound == 0) { LIST_FOREACH(p, &q->p_orphans, p_orphan) { ret = proc_to_reap(td, p, idtype, id, NULL, options, NULL, NULL, 1); if (ret != 0) { KASSERT(ret != -1, ("reaped an orphan (pid %d)", (int)td->td_retval[0])); PROC_UNLOCK(p); nfound++; break; } } } if (nfound == 0) { sx_xunlock(&proctree_lock); return (ECHILD); } if (options & WNOHANG) { sx_xunlock(&proctree_lock); td->td_retval[0] = 0; return (0); } PROC_LOCK(q); sx_xunlock(&proctree_lock); if (q->p_flag & P_STATCHILD) { q->p_flag &= ~P_STATCHILD; error = 0; } else error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "wait", 0); PROC_UNLOCK(q); if (error) return (error); goto loop; } /* * Make process 'parent' the new parent of process 'child'. * Must be called with an exclusive hold of proctree lock. */ void proc_reparent(struct proc *child, struct proc *parent) { sx_assert(&proctree_lock, SX_XLOCKED); PROC_LOCK_ASSERT(child, MA_OWNED); if (child->p_pptr == parent) return; PROC_LOCK(child->p_pptr); sigqueue_take(child->p_ksi); PROC_UNLOCK(child->p_pptr); LIST_REMOVE(child, p_sibling); LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); clear_orphan(child); if (child->p_flag & P_TRACED) { if (LIST_EMPTY(&child->p_pptr->p_orphans)) { child->p_treeflag |= P_TREE_FIRST_ORPHAN; LIST_INSERT_HEAD(&child->p_pptr->p_orphans, child, p_orphan); } else { LIST_INSERT_AFTER(LIST_FIRST(&child->p_pptr->p_orphans), child, p_orphan); } child->p_treeflag |= P_TREE_ORPHANED; } child->p_pptr = parent; } Index: user/jeff/numa/sys/kern/kern_fork.c =================================================================== --- user/jeff/numa/sys/kern/kern_fork.c (revision 326888) +++ user/jeff/numa/sys/kern/kern_fork.c (revision 326889) @@ -1,1126 +1,1118 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_ktrace.h" #include "opt_kstack_pages.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KDTRACE_HOOKS #include dtrace_fork_func_t dtrace_fasttrap_fork; #endif SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE3(proc, , , create, "struct proc *", "struct proc *", "int"); #ifndef _SYS_SYSPROTO_H_ struct fork_args { int dummy; }; #endif EVENTHANDLER_LIST_DECLARE(process_fork); /* ARGSUSED */ int sys_fork(struct thread *td, struct fork_args *uap) { struct fork_req fr; int error, pid; bzero(&fr, sizeof(fr)); fr.fr_flags = RFFDG | RFPROC; fr.fr_pidp = &pid; error = fork1(td, &fr); if (error == 0) { td->td_retval[0] = pid; td->td_retval[1] = 0; } return (error); } /* ARGUSED */ int sys_pdfork(struct thread *td, struct pdfork_args *uap) { struct fork_req fr; int error, fd, pid; bzero(&fr, sizeof(fr)); fr.fr_flags = RFFDG | RFPROC | RFPROCDESC; fr.fr_pidp = &pid; fr.fr_pd_fd = &fd; fr.fr_pd_flags = uap->flags; /* * It is necessary to return fd by reference because 0 is a valid file * descriptor number, and the child needs to be able to distinguish * itself from the parent using the return value. */ error = fork1(td, &fr); if (error == 0) { td->td_retval[0] = pid; td->td_retval[1] = 0; error = copyout(&fd, uap->fdp, sizeof(fd)); } return (error); } /* ARGSUSED */ int sys_vfork(struct thread *td, struct vfork_args *uap) { struct fork_req fr; int error, pid; bzero(&fr, sizeof(fr)); fr.fr_flags = RFFDG | RFPROC | RFPPWAIT | RFMEM; fr.fr_pidp = &pid; error = fork1(td, &fr); if (error == 0) { td->td_retval[0] = pid; td->td_retval[1] = 0; } return (error); } int sys_rfork(struct thread *td, struct rfork_args *uap) { struct fork_req fr; int error, pid; /* Don't allow kernel-only flags. */ if ((uap->flags & RFKERNELONLY) != 0) return (EINVAL); AUDIT_ARG_FFLAGS(uap->flags); bzero(&fr, sizeof(fr)); fr.fr_flags = uap->flags; fr.fr_pidp = &pid; error = fork1(td, &fr); if (error == 0) { td->td_retval[0] = pid; td->td_retval[1] = 0; } return (error); } int nprocs = 1; /* process 0 */ int lastpid = 0; SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0, "Last used PID"); /* * Random component to lastpid generation. We mix in a random factor to make * it a little harder to predict. We sanity check the modulus value to avoid * doing it in critical paths. Don't let it be too small or we pointlessly * waste randomness entropy, and don't let it be impossibly large. Using a * modulus that is too big causes a LOT more process table scans and slows * down fork processing as the pidchecked caching is defeated. */ static int randompid = 0; static int sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) { int error, pid; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error != 0) return(error); sx_xlock(&allproc_lock); pid = randompid; error = sysctl_handle_int(oidp, &pid, 0, req); if (error == 0 && req->newptr != NULL) { if (pid == 0) randompid = 0; else if (pid == 1) /* generate a random PID modulus between 100 and 1123 */ randompid = 100 + arc4random() % 1024; else if (pid < 0 || pid > pid_max - 100) /* out of range */ randompid = pid_max - 100; else if (pid < 100) /* Make it reasonable */ randompid = 100; else randompid = pid; } sx_xunlock(&allproc_lock); return (error); } SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_kern_randompid, "I", "Random PID modulus. Special values: 0: disable, 1: choose random value"); static int fork_findpid(int flags) { struct proc *p; int trypid; static int pidchecked = 0; /* * Requires allproc_lock in order to iterate over the list * of processes, and proctree_lock to access p_pgrp. */ sx_assert(&allproc_lock, SX_LOCKED); sx_assert(&proctree_lock, SX_LOCKED); /* * Find an unused process ID. We remember a range of unused IDs * ready to use (from lastpid+1 through pidchecked-1). * * If RFHIGHPID is set (used during system boot), do not allocate * low-numbered pids. */ trypid = lastpid + 1; if (flags & RFHIGHPID) { if (trypid < 10) trypid = 10; } else { if (randompid) trypid += arc4random() % randompid; } retry: /* * If the process ID prototype has wrapped around, * restart somewhat above 0, as the low-numbered procs * tend to include daemons that don't exit. */ if (trypid >= pid_max) { trypid = trypid % pid_max; if (trypid < 100) trypid += 100; pidchecked = 0; } if (trypid >= pidchecked) { int doingzomb = 0; pidchecked = PID_MAX; /* * Scan the active and zombie procs to check whether this pid * is in use. Remember the lowest pid that's greater * than trypid, so we can avoid checking for a while. * * Avoid reuse of the process group id, session id or * the reaper subtree id. Note that for process group * and sessions, the amount of reserved pids is * limited by process limit. For the subtree ids, the * id is kept reserved only while there is a * non-reaped process in the subtree, so amount of * reserved pids is limited by process limit times * two. */ p = LIST_FIRST(&allproc); again: for (; p != NULL; p = LIST_NEXT(p, p_list)) { while (p->p_pid == trypid || p->p_reapsubtree == trypid || (p->p_pgrp != NULL && (p->p_pgrp->pg_id == trypid || (p->p_session != NULL && p->p_session->s_sid == trypid)))) { trypid++; if (trypid >= pidchecked) goto retry; } if (p->p_pid > trypid && pidchecked > p->p_pid) pidchecked = p->p_pid; if (p->p_pgrp != NULL) { if (p->p_pgrp->pg_id > trypid && pidchecked > p->p_pgrp->pg_id) pidchecked = p->p_pgrp->pg_id; if (p->p_session != NULL && p->p_session->s_sid > trypid && pidchecked > p->p_session->s_sid) pidchecked = p->p_session->s_sid; } } if (!doingzomb) { doingzomb = 1; p = LIST_FIRST(&zombproc); goto again; } } /* * RFHIGHPID does not mess with the lastpid counter during boot. */ if (flags & RFHIGHPID) pidchecked = 0; else lastpid = trypid; return (trypid); } static int fork_norfproc(struct thread *td, int flags) { int error; struct proc *p1; KASSERT((flags & RFPROC) == 0, ("fork_norfproc called with RFPROC set")); p1 = td->td_proc; if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) && (flags & (RFCFDG | RFFDG))) { PROC_LOCK(p1); if (thread_single(p1, SINGLE_BOUNDARY)) { PROC_UNLOCK(p1); return (ERESTART); } PROC_UNLOCK(p1); } error = vm_forkproc(td, NULL, NULL, NULL, flags); if (error) goto fail; /* * Close all file descriptors. */ if (flags & RFCFDG) { struct filedesc *fdtmp; fdtmp = fdinit(td->td_proc->p_fd, false); fdescfree(td); p1->p_fd = fdtmp; } /* * Unshare file descriptors (from parent). */ if (flags & RFFDG) fdunshare(td); fail: if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) && (flags & (RFCFDG | RFFDG))) { PROC_LOCK(p1); thread_single_end(p1, SINGLE_BOUNDARY); PROC_UNLOCK(p1); } return (error); } static void do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2, struct vmspace *vm2, struct file *fp_procdesc) { struct proc *p1, *pptr; int trypid; struct filedesc *fd; struct filedesc_to_leader *fdtol; struct sigacts *newsigacts; sx_assert(&proctree_lock, SX_SLOCKED); sx_assert(&allproc_lock, SX_XLOCKED); p1 = td->td_proc; trypid = fork_findpid(fr->fr_flags); sx_sunlock(&proctree_lock); p2->p_state = PRS_NEW; /* protect against others */ p2->p_pid = trypid; AUDIT_ARG_PID(p2->p_pid); LIST_INSERT_HEAD(&allproc, p2, p_list); allproc_gen++; LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); tidhash_add(td2); PROC_LOCK(p2); PROC_LOCK(p1); sx_xunlock(&allproc_lock); bcopy(&p1->p_startcopy, &p2->p_startcopy, __rangeof(struct proc, p_startcopy, p_endcopy)); pargs_hold(p2->p_args); PROC_UNLOCK(p1); bzero(&p2->p_startzero, __rangeof(struct proc, p_startzero, p_endzero)); /* Tell the prison that we exist. */ prison_proc_hold(p2->p_ucred->cr_prison); PROC_UNLOCK(p2); /* * Malloc things while we don't hold any locks. */ if (fr->fr_flags & RFSIGSHARE) newsigacts = NULL; else newsigacts = sigacts_alloc(); /* * Copy filedesc. */ if (fr->fr_flags & RFCFDG) { fd = fdinit(p1->p_fd, false); fdtol = NULL; } else if (fr->fr_flags & RFFDG) { fd = fdcopy(p1->p_fd); fdtol = NULL; } else { fd = fdshare(p1->p_fd); if (p1->p_fdtol == NULL) p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL, p1->p_leader); if ((fr->fr_flags & RFTHREAD) != 0) { /* * Shared file descriptor table, and shared * process leaders. */ fdtol = p1->p_fdtol; FILEDESC_XLOCK(p1->p_fd); fdtol->fdl_refcount++; FILEDESC_XUNLOCK(p1->p_fd); } else { /* * Shared file descriptor table, and different * process leaders. */ fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p1->p_fd, p2); } } /* * Make a proc table entry for the new process. * Start by zeroing the section of proc that is zero-initialized, * then copy the section that is copied directly from the parent. */ PROC_LOCK(p2); PROC_LOCK(p1); bzero(&td2->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &td2->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name)); td2->td_sigstk = td->td_sigstk; td2->td_flags = TDF_INMEM; td2->td_lend_user_pri = PRI_MAX; #ifdef VIMAGE td2->td_vnet = NULL; td2->td_vnet_lpush = NULL; #endif /* * Allow the scheduler to initialize the child. */ thread_lock(td); sched_fork(td, td2); thread_unlock(td); /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. */ p2->p_flag = P_INMEM; p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC | P2_TRAPCAP); p2->p_swtick = ticks; if (p1->p_flag & P_PROFIL) startprofclock(p2); - /* - * Whilst the proc lock is held, copy the VM domain data out - * using the VM domain method. - */ - vm_domain_policy_init(&p2->p_vm_dom_policy); - vm_domain_policy_localcopy(&p2->p_vm_dom_policy, - &p1->p_vm_dom_policy); - if (fr->fr_flags & RFSIGSHARE) { p2->p_sigacts = sigacts_hold(p1->p_sigacts); } else { sigacts_copy(newsigacts, p1->p_sigacts); p2->p_sigacts = newsigacts; } if (fr->fr_flags & RFTSIGZMB) p2->p_sigparent = RFTSIGNUM(fr->fr_flags); else if (fr->fr_flags & RFLINUXTHPN) p2->p_sigparent = SIGUSR1; else p2->p_sigparent = SIGCHLD; p2->p_textvp = p1->p_textvp; p2->p_fd = fd; p2->p_fdtol = fdtol; if (p1->p_flag2 & P2_INHERIT_PROTECTED) { p2->p_flag |= P_PROTECTED; p2->p_flag2 |= P2_INHERIT_PROTECTED; } /* * p_limit is copy-on-write. Bump its refcount. */ lim_fork(p1, p2); thread_cow_get_proc(td2, p2); pstats_fork(p1->p_stats, p2->p_stats); PROC_UNLOCK(p1); PROC_UNLOCK(p2); /* Bump references to the text vnode (for procfs). */ if (p2->p_textvp) vrefact(p2->p_textvp); /* * Set up linkage for kernel based threading. */ if ((fr->fr_flags & RFTHREAD) != 0) { mtx_lock(&ppeers_lock); p2->p_peers = p1->p_peers; p1->p_peers = p2; p2->p_leader = p1->p_leader; mtx_unlock(&ppeers_lock); PROC_LOCK(p1->p_leader); if ((p1->p_leader->p_flag & P_WEXIT) != 0) { PROC_UNLOCK(p1->p_leader); /* * The task leader is exiting, so process p1 is * going to be killed shortly. Since p1 obviously * isn't dead yet, we know that the leader is either * sending SIGKILL's to all the processes in this * task or is sleeping waiting for all the peers to * exit. We let p1 complete the fork, but we need * to go ahead and kill the new process p2 since * the task leader may not get a chance to send * SIGKILL to it. We leave it on the list so that * the task leader will wait for this new process * to commit suicide. */ PROC_LOCK(p2); kern_psignal(p2, SIGKILL); PROC_UNLOCK(p2); } else PROC_UNLOCK(p1->p_leader); } else { p2->p_peers = NULL; p2->p_leader = p2; } sx_xlock(&proctree_lock); PGRP_LOCK(p1->p_pgrp); PROC_LOCK(p2); PROC_LOCK(p1); /* * Preserve some more flags in subprocess. P_PROFIL has already * been preserved. */ p2->p_flag |= p1->p_flag & P_SUGID; td2->td_pflags |= (td->td_pflags & TDP_ALTSTACK) | TDP_FORKING; SESS_LOCK(p1->p_session); if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) p2->p_flag |= P_CONTROLT; SESS_UNLOCK(p1->p_session); if (fr->fr_flags & RFPPWAIT) p2->p_flag |= P_PPWAIT; p2->p_pgrp = p1->p_pgrp; LIST_INSERT_AFTER(p1, p2, p_pglist); PGRP_UNLOCK(p1->p_pgrp); LIST_INIT(&p2->p_children); LIST_INIT(&p2->p_orphans); callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0); /* * If PF_FORK is set, the child process inherits the * procfs ioctl flags from its parent. */ if (p1->p_pfsflags & PF_FORK) { p2->p_stops = p1->p_stops; p2->p_pfsflags = p1->p_pfsflags; } /* * This begins the section where we must prevent the parent * from being swapped. */ _PHOLD(p1); PROC_UNLOCK(p1); /* * Attach the new process to its parent. * * If RFNOWAIT is set, the newly created process becomes a child * of init. This effectively disassociates the child from the * parent. */ if ((fr->fr_flags & RFNOWAIT) != 0) { pptr = p1->p_reaper; p2->p_reaper = pptr; } else { p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ? p1 : p1->p_reaper; pptr = p1; } p2->p_pptr = pptr; LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); LIST_INIT(&p2->p_reaplist); LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling); if (p2->p_reaper == p1) p2->p_reapsubtree = p2->p_pid; sx_xunlock(&proctree_lock); /* Inform accounting that we have forked. */ p2->p_acflag = AFORK; PROC_UNLOCK(p2); #ifdef KTRACE ktrprocfork(p1, p2); #endif /* * Finish creating the child process. It will return via a different * execution path later. (ie: directly into user mode) */ vm_forkproc(td, p2, td2, vm2, fr->fr_flags); if (fr->fr_flags == (RFFDG | RFPROC)) { VM_CNT_INC(v_forks); VM_CNT_ADD(v_forkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { VM_CNT_INC(v_vforks); VM_CNT_ADD(v_vforkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else if (p1 == &proc0) { VM_CNT_INC(v_kthreads); VM_CNT_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else { VM_CNT_INC(v_rforks); VM_CNT_ADD(v_rforkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } /* * Associate the process descriptor with the process before anything * can happen that might cause that process to need the descriptor. * However, don't do this until after fork(2) can no longer fail. */ if (fr->fr_flags & RFPROCDESC) procdesc_new(p2, fr->fr_pd_flags); /* * Both processes are set up, now check if any loadable modules want * to adjust anything. */ EVENTHANDLER_DIRECT_INVOKE(process_fork, p1, p2, fr->fr_flags); /* * Set the child start time and mark the process as being complete. */ PROC_LOCK(p2); PROC_LOCK(p1); microuptime(&p2->p_stats->p_start); PROC_SLOCK(p2); p2->p_state = PRS_NORMAL; PROC_SUNLOCK(p2); #ifdef KDTRACE_HOOKS /* * Tell the DTrace fasttrap provider about the new process so that any * tracepoints inherited from the parent can be removed. We have to do * this only after p_state is PRS_NORMAL since the fasttrap module will * use pfind() later on. */ if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork) dtrace_fasttrap_fork(p1, p2); #endif /* * Hold the process so that it cannot exit after we make it runnable, * but before we wait for the debugger. */ _PHOLD(p2); if (p1->p_ptevents & PTRACE_FORK) { /* * Arrange for debugger to receive the fork event. * * We can report PL_FLAG_FORKED regardless of * P_FOLLOWFORK settings, but it does not make a sense * for runaway child. */ td->td_dbgflags |= TDB_FORK; td->td_dbg_forked = p2->p_pid; td2->td_dbgflags |= TDB_STOPATFORK; } if (fr->fr_flags & RFPPWAIT) { td->td_pflags |= TDP_RFPPWAIT; td->td_rfppwait_p = p2; td->td_dbgflags |= TDB_VFORK; } PROC_UNLOCK(p2); /* * Now can be swapped. */ _PRELE(p1); PROC_UNLOCK(p1); /* * Tell any interested parties about the new process. */ knote_fork(p1->p_klist, p2->p_pid); SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags); if (fr->fr_flags & RFPROCDESC) { procdesc_finit(p2->p_procdesc, fp_procdesc); fdrop(fp_procdesc, td); } if ((fr->fr_flags & RFSTOPPED) == 0) { /* * If RFSTOPPED not requested, make child runnable and * add to run queue. */ thread_lock(td2); TD_SET_CAN_RUN(td2); sched_add(td2, SRQ_BORING); thread_unlock(td2); if (fr->fr_pidp != NULL) *fr->fr_pidp = p2->p_pid; } else { *fr->fr_procp = p2; } PROC_LOCK(p2); /* * Wait until debugger is attached to child. */ while (td2->td_proc == p2 && (td2->td_dbgflags & TDB_STOPATFORK) != 0) cv_wait(&p2->p_dbgwait, &p2->p_mtx); _PRELE(p2); racct_proc_fork_done(p2); PROC_UNLOCK(p2); } int fork1(struct thread *td, struct fork_req *fr) { struct proc *p1, *newproc; struct thread *td2; struct vmspace *vm2; struct file *fp_procdesc; vm_ooffset_t mem_charged; int error, nprocs_new, ok; static int curfail; static struct timeval lastfail; int flags, pages; flags = fr->fr_flags; pages = fr->fr_pages; if ((flags & RFSTOPPED) != 0) MPASS(fr->fr_procp != NULL && fr->fr_pidp == NULL); else MPASS(fr->fr_procp == NULL); /* Check for the undefined or unimplemented flags. */ if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0) return (EINVAL); /* Signal value requires RFTSIGZMB. */ if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0) return (EINVAL); /* Can't copy and clear. */ if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) return (EINVAL); /* Check the validity of the signal number. */ if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG) return (EINVAL); if ((flags & RFPROCDESC) != 0) { /* Can't not create a process yet get a process descriptor. */ if ((flags & RFPROC) == 0) return (EINVAL); /* Must provide a place to put a procdesc if creating one. */ if (fr->fr_pd_fd == NULL) return (EINVAL); /* Check if we are using supported flags. */ if ((fr->fr_pd_flags & ~PD_ALLOWED_AT_FORK) != 0) return (EINVAL); } p1 = td->td_proc; /* * Here we don't create a new process, but we divorce * certain parts of a process from itself. */ if ((flags & RFPROC) == 0) { if (fr->fr_procp != NULL) *fr->fr_procp = NULL; else if (fr->fr_pidp != NULL) *fr->fr_pidp = 0; return (fork_norfproc(td, flags)); } fp_procdesc = NULL; newproc = NULL; vm2 = NULL; /* * Increment the nprocs resource before allocations occur. * Although process entries are dynamically created, we still * keep a global limit on the maximum number we will * create. There are hard-limits as to the number of processes * that can run, established by the KVA and memory usage for * the process data. * * Don't allow a nonprivileged user to use the last ten * processes; don't let root exceed the limit. */ nprocs_new = atomic_fetchadd_int(&nprocs, 1) + 1; if ((nprocs_new >= maxproc - 10 && priv_check_cred(td->td_ucred, PRIV_MAXPROC, 0) != 0) || nprocs_new >= maxproc) { error = EAGAIN; sx_xlock(&allproc_lock); if (ppsratecheck(&lastfail, &curfail, 1)) { printf("maxproc limit exceeded by uid %u (pid %d); " "see tuning(7) and login.conf(5)\n", td->td_ucred->cr_ruid, p1->p_pid); } sx_xunlock(&allproc_lock); goto fail2; } /* * If required, create a process descriptor in the parent first; we * will abandon it if something goes wrong. We don't finit() until * later. */ if (flags & RFPROCDESC) { error = procdesc_falloc(td, &fp_procdesc, fr->fr_pd_fd, fr->fr_pd_flags, fr->fr_pd_fcaps); if (error != 0) goto fail2; } mem_charged = 0; if (pages == 0) pages = kstack_pages; /* Allocate new proc. */ newproc = uma_zalloc(proc_zone, M_WAITOK); td2 = FIRST_THREAD_IN_PROC(newproc); if (td2 == NULL) { td2 = thread_alloc(pages); if (td2 == NULL) { error = ENOMEM; goto fail2; } proc_linkup(newproc, td2); } else { if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) { if (td2->td_kstack != 0) vm_thread_dispose(td2); if (!thread_alloc_stack(td2, pages)) { error = ENOMEM; goto fail2; } } } if ((flags & RFMEM) == 0) { vm2 = vmspace_fork(p1->p_vmspace, &mem_charged); if (vm2 == NULL) { error = ENOMEM; goto fail2; } if (!swap_reserve(mem_charged)) { /* * The swap reservation failed. The accounting * from the entries of the copied vm2 will be * subtracted in vmspace_free(), so force the * reservation there. */ swap_reserve_force(mem_charged); error = ENOMEM; goto fail2; } } else vm2 = NULL; /* * XXX: This is ugly; when we copy resource usage, we need to bump * per-cred resource counters. */ proc_set_cred_init(newproc, crhold(td->td_ucred)); /* * Initialize resource accounting for the child process. */ error = racct_proc_fork(p1, newproc); if (error != 0) { error = EAGAIN; goto fail1; } #ifdef MAC mac_proc_init(newproc); #endif newproc->p_klist = knlist_alloc(&newproc->p_mtx); STAILQ_INIT(&newproc->p_ktr); /* We have to lock the process tree while we look for a pid. */ sx_slock(&proctree_lock); sx_xlock(&allproc_lock); /* * Increment the count of procs running with this uid. Don't allow * a nonprivileged user to exceed their current limit. * * XXXRW: Can we avoid privilege here if it's not needed? */ error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0); if (error == 0) ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0); else { ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_NPROC)); } if (ok) { do_fork(td, fr, newproc, td2, vm2, fp_procdesc); return (0); } error = EAGAIN; sx_sunlock(&proctree_lock); sx_xunlock(&allproc_lock); #ifdef MAC mac_proc_destroy(newproc); #endif racct_proc_exit(newproc); fail1: crfree(newproc->p_ucred); newproc->p_ucred = NULL; fail2: if (vm2 != NULL) vmspace_free(vm2); uma_zfree(proc_zone, newproc); if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) { fdclose(td, fp_procdesc, *fr->fr_pd_fd); fdrop(fp_procdesc, td); } atomic_add_int(&nprocs, -1); pause("fork", hz / 2); return (error); } /* * Handle the return of a child process from fork1(). This function * is called from the MD fork_trampoline() entry point. */ void fork_exit(void (*callout)(void *, struct trapframe *), void *arg, struct trapframe *frame) { struct proc *p; struct thread *td; struct thread *dtd; td = curthread; p = td->td_proc; KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new")); CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)", td, td_get_sched(td), p->p_pid, td->td_name); sched_fork_exit(td); /* * Processes normally resume in mi_switch() after being * cpu_switch()'ed to, but when children start up they arrive here * instead, so we must do much the same things as mi_switch() would. */ if ((dtd = PCPU_GET(deadthread))) { PCPU_SET(deadthread, NULL); thread_stash(dtd); } thread_unlock(td); /* * cpu_fork_kthread_handler intercepts this function call to * have this call a non-return function to stay in kernel mode. * initproc has its own fork handler, but it does return. */ KASSERT(callout != NULL, ("NULL callout in fork_exit")); callout(arg, frame); /* * Check if a kernel thread misbehaved and returned from its main * function. */ if (p->p_flag & P_KPROC) { printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n", td->td_name, p->p_pid); kthread_exit(); } mtx_assert(&Giant, MA_NOTOWNED); if (p->p_sysent->sv_schedtail != NULL) (p->p_sysent->sv_schedtail)(td); td->td_pflags &= ~TDP_FORKING; } /* * Simplified back end of syscall(), used when returning from fork() * directly into user mode. This function is passed in to fork_exit() * as the first parameter and is called when returning to a new * userland process. */ void fork_return(struct thread *td, struct trapframe *frame) { struct proc *p, *dbg; p = td->td_proc; if (td->td_dbgflags & TDB_STOPATFORK) { sx_xlock(&proctree_lock); PROC_LOCK(p); if (p->p_pptr->p_ptevents & PTRACE_FORK) { /* * If debugger still wants auto-attach for the * parent's children, do it now. */ dbg = p->p_pptr->p_pptr; proc_set_traced(p, true); CTR2(KTR_PTRACE, "fork_return: attaching to new child pid %d: oppid %d", p->p_pid, p->p_oppid); proc_reparent(p, dbg); sx_xunlock(&proctree_lock); td->td_dbgflags |= TDB_CHILD | TDB_SCX | TDB_FSTP; ptracestop(td, SIGSTOP, NULL); td->td_dbgflags &= ~(TDB_CHILD | TDB_SCX); } else { /* * ... otherwise clear the request. */ sx_xunlock(&proctree_lock); td->td_dbgflags &= ~TDB_STOPATFORK; cv_broadcast(&p->p_dbgwait); } PROC_UNLOCK(p); } else if (p->p_flag & P_TRACED || td->td_dbgflags & TDB_BORN) { /* * This is the start of a new thread in a traced * process. Report a system call exit event. */ PROC_LOCK(p); td->td_dbgflags |= TDB_SCX; _STOPEVENT(p, S_SCX, td->td_sa.code); if ((p->p_ptevents & PTRACE_SCX) != 0 || (td->td_dbgflags & TDB_BORN) != 0) ptracestop(td, SIGTRAP, NULL); td->td_dbgflags &= ~(TDB_SCX | TDB_BORN); PROC_UNLOCK(p); } userret(td, frame); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) ktrsysret(SYS_fork, 0, 0); #endif } Index: user/jeff/numa/sys/kern/kern_numa.c =================================================================== --- user/jeff/numa/sys/kern/kern_numa.c (revision 326888) +++ user/jeff/numa/sys/kern/kern_numa.c (revision 326889) @@ -1,169 +1,46 @@ /*- * Copyright (c) 2015, Adrian Chadd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - int sys_numa_setaffinity(struct thread *td, struct numa_setaffinity_args *uap) { - int error; - struct vm_domain_policy vp; - struct thread *ttd; - struct proc *p; - struct cpuset *set; - - set = NULL; - p = NULL; - - /* - * Copy in just the policy information into the policy - * struct. Userland only supplies vm_domain_policy_entry. - */ - error = copyin(uap->policy, &vp.p, sizeof(vp.p)); - if (error) - goto out; - - /* - * Ensure the seq number is zero - otherwise seq.h - * may get very confused. - */ - vp.seq = 0; - - /* - * Validate policy. - */ - if (vm_domain_policy_validate(&vp) != 0) { - error = EINVAL; - goto out; - } - - /* - * Go find the desired proc/tid for this operation. - */ - error = cpuset_which(uap->which, uap->id, &p, - &ttd, &set); - if (error) - goto out; - - /* Only handle CPU_WHICH_TID and CPU_WHICH_PID */ - /* - * XXX if cpuset_which is called with WHICH_CPUSET and NULL cpuset, - * it'll return ESRCH. We should just return EINVAL. - */ - switch (uap->which) { - case CPU_WHICH_TID: - vm_domain_policy_copy(&ttd->td_vm_dom_policy, &vp); - break; - case CPU_WHICH_PID: - vm_domain_policy_copy(&p->p_vm_dom_policy, &vp); - break; - default: - error = EINVAL; - break; - } - - PROC_UNLOCK(p); -out: - if (set) - cpuset_rel(set); - return (error); + return (ENOSYS); } int sys_numa_getaffinity(struct thread *td, struct numa_getaffinity_args *uap) { - int error; - struct vm_domain_policy vp; - struct thread *ttd; - struct proc *p; - struct cpuset *set; - - set = NULL; - p = NULL; - - error = cpuset_which(uap->which, uap->id, &p, - &ttd, &set); - if (error) - goto out; - - /* Only handle CPU_WHICH_TID and CPU_WHICH_PID */ - /* - * XXX if cpuset_which is called with WHICH_CPUSET and NULL cpuset, - * it'll return ESRCH. We should just return EINVAL. - */ - switch (uap->which) { - case CPU_WHICH_TID: - vm_domain_policy_localcopy(&vp, &ttd->td_vm_dom_policy); - break; - case CPU_WHICH_PID: - vm_domain_policy_localcopy(&vp, &p->p_vm_dom_policy); - break; - default: - error = EINVAL; - break; - } - if (p) - PROC_UNLOCK(p); - /* - * Copy out only the vm_domain_policy_entry part. - */ - if (error == 0) - error = copyout(&vp.p, uap->policy, sizeof(vp.p)); -out: - if (set) - cpuset_rel(set); - return (error); + return (ENOSYS); } Index: user/jeff/numa/sys/kern/kern_thr.c =================================================================== --- user/jeff/numa/sys/kern/kern_thr.c (revision 326888) +++ user/jeff/numa/sys/kern/kern_thr.c (revision 326889) @@ -1,618 +1,612 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003, Jeffrey Roberson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include "opt_posix.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); static int max_threads_per_proc = 1500; SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, &max_threads_per_proc, 0, "Limit on threads per proc"); static int max_threads_hits; SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, &max_threads_hits, 0, "kern.threads.max_threads_per_proc hit count"); #ifdef COMPAT_FREEBSD32 static inline int suword_lwpid(void *addr, lwpid_t lwpid) { int error; if (SV_CURPROC_FLAG(SV_LP64)) error = suword(addr, lwpid); else error = suword32(addr, lwpid); return (error); } #else #define suword_lwpid suword #endif /* * System call interface. */ struct thr_create_initthr_args { ucontext_t ctx; long *tid; }; static int thr_create_initthr(struct thread *td, void *thunk) { struct thr_create_initthr_args *args; /* Copy out the child tid. */ args = thunk; if (args->tid != NULL && suword_lwpid(args->tid, td->td_tid)) return (EFAULT); return (set_mcontext(td, &args->ctx.uc_mcontext)); } int sys_thr_create(struct thread *td, struct thr_create_args *uap) /* ucontext_t *ctx, long *id, int flags */ { struct thr_create_initthr_args args; int error; if ((error = copyin(uap->ctx, &args.ctx, sizeof(args.ctx)))) return (error); args.tid = uap->id; return (thread_create(td, NULL, thr_create_initthr, &args)); } int sys_thr_new(struct thread *td, struct thr_new_args *uap) /* struct thr_param * */ { struct thr_param param; int error; if (uap->param_size < 0 || uap->param_size > sizeof(param)) return (EINVAL); bzero(¶m, sizeof(param)); if ((error = copyin(uap->param, ¶m, uap->param_size))) return (error); return (kern_thr_new(td, ¶m)); } static int thr_new_initthr(struct thread *td, void *thunk) { stack_t stack; struct thr_param *param; /* * Here we copy out tid to two places, one for child and one * for parent, because pthread can create a detached thread, * if parent wants to safely access child tid, it has to provide * its storage, because child thread may exit quickly and * memory is freed before parent thread can access it. */ param = thunk; if ((param->child_tid != NULL && suword_lwpid(param->child_tid, td->td_tid)) || (param->parent_tid != NULL && suword_lwpid(param->parent_tid, td->td_tid))) return (EFAULT); /* Set up our machine context. */ stack.ss_sp = param->stack_base; stack.ss_size = param->stack_size; /* Set upcall address to user thread entry function. */ cpu_set_upcall(td, param->start_func, param->arg, &stack); /* Setup user TLS address and TLS pointer register. */ return (cpu_set_user_tls(td, param->tls_base)); } int kern_thr_new(struct thread *td, struct thr_param *param) { struct rtprio rtp, *rtpp; int error; rtpp = NULL; if (param->rtp != 0) { error = copyin(param->rtp, &rtp, sizeof(struct rtprio)); if (error) return (error); rtpp = &rtp; } return (thread_create(td, rtpp, thr_new_initthr, param)); } int thread_create(struct thread *td, struct rtprio *rtp, int (*initialize_thread)(struct thread *, void *), void *thunk) { struct thread *newtd; struct proc *p; int error; p = td->td_proc; if (rtp != NULL) { switch(rtp->type) { case RTP_PRIO_REALTIME: case RTP_PRIO_FIFO: /* Only root can set scheduler policy */ if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0) return (EPERM); if (rtp->prio > RTP_PRIO_MAX) return (EINVAL); break; case RTP_PRIO_NORMAL: rtp->prio = 0; break; default: return (EINVAL); } } #ifdef RACCT if (racct_enable) { PROC_LOCK(p); error = racct_add(p, RACCT_NTHR, 1); PROC_UNLOCK(p); if (error != 0) return (EPROCLIM); } #endif /* Initialize our td */ error = kern_thr_alloc(p, 0, &newtd); if (error) goto fail; cpu_copy_thread(newtd, td); bzero(&newtd->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &newtd->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); newtd->td_proc = td->td_proc; newtd->td_rb_list = newtd->td_rbp_list = newtd->td_rb_inact = 0; thread_cow_get(newtd, td); error = initialize_thread(newtd, thunk); if (error != 0) { thread_cow_free(newtd); thread_free(newtd); goto fail; } PROC_LOCK(p); p->p_flag |= P_HADTHREADS; thread_link(newtd, p); bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); thread_lock(td); /* let the scheduler know about these things. */ sched_fork_thread(td, newtd); thread_unlock(td); if (P_SHOULDSTOP(p)) newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; if (p->p_ptevents & PTRACE_LWP) newtd->td_dbgflags |= TDB_BORN; - /* - * Copy the existing thread VM policy into the new thread. - */ - vm_domain_policy_localcopy(&newtd->td_vm_dom_policy, - &td->td_vm_dom_policy); - PROC_UNLOCK(p); tidhash_add(newtd); thread_lock(newtd); if (rtp != NULL) { if (!(td->td_pri_class == PRI_TIMESHARE && rtp->type == RTP_PRIO_NORMAL)) { rtp_to_pri(rtp, newtd); sched_prio(newtd, newtd->td_user_pri); } /* ignore timesharing class */ } TD_SET_CAN_RUN(newtd); sched_add(newtd, SRQ_BORING); thread_unlock(newtd); return (0); fail: #ifdef RACCT if (racct_enable) { PROC_LOCK(p); racct_sub(p, RACCT_NTHR, 1); PROC_UNLOCK(p); } #endif return (error); } int sys_thr_self(struct thread *td, struct thr_self_args *uap) /* long *id */ { int error; error = suword_lwpid(uap->id, (unsigned)td->td_tid); if (error == -1) return (EFAULT); return (0); } int sys_thr_exit(struct thread *td, struct thr_exit_args *uap) /* long *state */ { umtx_thread_exit(td); /* Signal userland that it can free the stack. */ if ((void *)uap->state != NULL) { suword_lwpid(uap->state, 1); kern_umtx_wake(td, uap->state, INT_MAX, 0); } return (kern_thr_exit(td)); } int kern_thr_exit(struct thread *td) { struct proc *p; p = td->td_proc; /* * If all of the threads in a process call this routine to * exit (e.g. all threads call pthread_exit()), exactly one * thread should return to the caller to terminate the process * instead of the thread. * * Checking p_numthreads alone is not sufficient since threads * might be committed to terminating while the PROC_LOCK is * dropped in either ptracestop() or while removing this thread * from the tidhash. Instead, the p_pendingexits field holds * the count of threads in either of those states and a thread * is considered the "last" thread if all of the other threads * in a process are already terminating. */ PROC_LOCK(p); if (p->p_numthreads == p->p_pendingexits + 1) { /* * Ignore attempts to shut down last thread in the * proc. This will actually call _exit(2) in the * usermode trampoline when it returns. */ PROC_UNLOCK(p); return (0); } p->p_pendingexits++; td->td_dbgflags |= TDB_EXIT; if (p->p_ptevents & PTRACE_LWP) ptracestop(td, SIGTRAP, NULL); PROC_UNLOCK(p); tidhash_remove(td); PROC_LOCK(p); p->p_pendingexits--; /* * The check above should prevent all other threads from this * process from exiting while the PROC_LOCK is dropped, so * there must be at least one other thread other than the * current thread. */ KASSERT(p->p_numthreads > 1, ("too few threads")); racct_sub(p, RACCT_NTHR, 1); tdsigcleanup(td); PROC_SLOCK(p); thread_stopped(p); thread_exit(); /* NOTREACHED */ } int sys_thr_kill(struct thread *td, struct thr_kill_args *uap) /* long id, int sig */ { ksiginfo_t ksi; struct thread *ttd; struct proc *p; int error; p = td->td_proc; ksiginfo_init(&ksi); ksi.ksi_signo = uap->sig; ksi.ksi_code = SI_LWP; ksi.ksi_pid = p->p_pid; ksi.ksi_uid = td->td_ucred->cr_ruid; if (uap->id == -1) { if (uap->sig != 0 && !_SIG_VALID(uap->sig)) { error = EINVAL; } else { error = ESRCH; PROC_LOCK(p); FOREACH_THREAD_IN_PROC(p, ttd) { if (ttd != td) { error = 0; if (uap->sig == 0) break; tdksignal(ttd, uap->sig, &ksi); } } PROC_UNLOCK(p); } } else { error = 0; ttd = tdfind((lwpid_t)uap->id, p->p_pid); if (ttd == NULL) return (ESRCH); if (uap->sig == 0) ; else if (!_SIG_VALID(uap->sig)) error = EINVAL; else tdksignal(ttd, uap->sig, &ksi); PROC_UNLOCK(ttd->td_proc); } return (error); } int sys_thr_kill2(struct thread *td, struct thr_kill2_args *uap) /* pid_t pid, long id, int sig */ { ksiginfo_t ksi; struct thread *ttd; struct proc *p; int error; AUDIT_ARG_SIGNUM(uap->sig); ksiginfo_init(&ksi); ksi.ksi_signo = uap->sig; ksi.ksi_code = SI_LWP; ksi.ksi_pid = td->td_proc->p_pid; ksi.ksi_uid = td->td_ucred->cr_ruid; if (uap->id == -1) { if ((p = pfind(uap->pid)) == NULL) return (ESRCH); AUDIT_ARG_PROCESS(p); error = p_cansignal(td, p, uap->sig); if (error) { PROC_UNLOCK(p); return (error); } if (uap->sig != 0 && !_SIG_VALID(uap->sig)) { error = EINVAL; } else { error = ESRCH; FOREACH_THREAD_IN_PROC(p, ttd) { if (ttd != td) { error = 0; if (uap->sig == 0) break; tdksignal(ttd, uap->sig, &ksi); } } } PROC_UNLOCK(p); } else { ttd = tdfind((lwpid_t)uap->id, uap->pid); if (ttd == NULL) return (ESRCH); p = ttd->td_proc; AUDIT_ARG_PROCESS(p); error = p_cansignal(td, p, uap->sig); if (uap->sig == 0) ; else if (!_SIG_VALID(uap->sig)) error = EINVAL; else tdksignal(ttd, uap->sig, &ksi); PROC_UNLOCK(p); } return (error); } int sys_thr_suspend(struct thread *td, struct thr_suspend_args *uap) /* const struct timespec *timeout */ { struct timespec ts, *tsp; int error; tsp = NULL; if (uap->timeout != NULL) { error = umtx_copyin_timeout(uap->timeout, &ts); if (error != 0) return (error); tsp = &ts; } return (kern_thr_suspend(td, tsp)); } int kern_thr_suspend(struct thread *td, struct timespec *tsp) { struct proc *p = td->td_proc; struct timeval tv; int error = 0; int timo = 0; if (td->td_pflags & TDP_WAKEUP) { td->td_pflags &= ~TDP_WAKEUP; return (0); } if (tsp != NULL) { if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) error = EWOULDBLOCK; else { TIMESPEC_TO_TIMEVAL(&tv, tsp); timo = tvtohz(&tv); } } PROC_LOCK(p); if (error == 0 && (td->td_flags & TDF_THRWAKEUP) == 0) error = msleep((void *)td, &p->p_mtx, PCATCH, "lthr", timo); if (td->td_flags & TDF_THRWAKEUP) { thread_lock(td); td->td_flags &= ~TDF_THRWAKEUP; thread_unlock(td); PROC_UNLOCK(p); return (0); } PROC_UNLOCK(p); if (error == EWOULDBLOCK) error = ETIMEDOUT; else if (error == ERESTART) { if (timo != 0) error = EINTR; } return (error); } int sys_thr_wake(struct thread *td, struct thr_wake_args *uap) /* long id */ { struct proc *p; struct thread *ttd; if (uap->id == td->td_tid) { td->td_pflags |= TDP_WAKEUP; return (0); } p = td->td_proc; ttd = tdfind((lwpid_t)uap->id, p->p_pid); if (ttd == NULL) return (ESRCH); thread_lock(ttd); ttd->td_flags |= TDF_THRWAKEUP; thread_unlock(ttd); wakeup((void *)ttd); PROC_UNLOCK(p); return (0); } int sys_thr_set_name(struct thread *td, struct thr_set_name_args *uap) { struct proc *p; char name[MAXCOMLEN + 1]; struct thread *ttd; int error; error = 0; name[0] = '\0'; if (uap->name != NULL) { error = copyinstr(uap->name, name, sizeof(name), NULL); if (error == ENAMETOOLONG) { error = copyin(uap->name, name, sizeof(name) - 1); name[sizeof(name) - 1] = '\0'; } if (error) return (error); } p = td->td_proc; ttd = tdfind((lwpid_t)uap->id, p->p_pid); if (ttd == NULL) return (ESRCH); strcpy(ttd->td_name, name); #ifdef KTR sched_clear_tdname(ttd); #endif PROC_UNLOCK(p); return (error); } int kern_thr_alloc(struct proc *p, int pages, struct thread **ntd) { /* Have race condition but it is cheap. */ if (p->p_numthreads >= max_threads_per_proc) { ++max_threads_hits; return (EPROCLIM); } *ntd = thread_alloc(pages); if (*ntd == NULL) return (ENOMEM); return (0); } Index: user/jeff/numa/sys/kern/kern_thread.c =================================================================== --- user/jeff/numa/sys/kern/kern_thread.c (revision 326888) +++ user/jeff/numa/sys/kern/kern_thread.c (revision 326889) @@ -1,1267 +1,1265 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2001 Julian Elischer . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include "opt_witness.h" #include "opt_hwpmc_hooks.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif #include #include #include #include #include #include /* * Asserts below verify the stability of struct thread and struct proc * layout, as exposed by KBI to modules. On head, the KBI is allowed * to drift, change to the structures must be accompanied by the * assert update. * * On the stable branches after KBI freeze, conditions must not be * violated. Typically new fields are moved to the end of the * structures. */ #ifdef __amd64__ -_Static_assert(offsetof(struct thread, td_flags) == 0xf4, +_Static_assert(offsetof(struct thread, td_flags) == 0xfc, "struct thread KBI td_flags"); -_Static_assert(offsetof(struct thread, td_pflags) == 0xfc, +_Static_assert(offsetof(struct thread, td_pflags) == 0x104, "struct thread KBI td_pflags"); -_Static_assert(offsetof(struct thread, td_frame) == 0x460, +_Static_assert(offsetof(struct thread, td_frame) == 0x468, "struct thread KBI td_frame"); -_Static_assert(offsetof(struct thread, td_emuldata) == 0x508, +_Static_assert(offsetof(struct thread, td_emuldata) == 0x510, "struct thread KBI td_emuldata"); _Static_assert(offsetof(struct proc, p_flag) == 0xb0, "struct proc KBI p_flag"); _Static_assert(offsetof(struct proc, p_pid) == 0xbc, "struct proc KBI p_pid"); _Static_assert(offsetof(struct proc, p_filemon) == 0x3d0, "struct proc KBI p_filemon"); _Static_assert(offsetof(struct proc, p_comm) == 0x3e0, "struct proc KBI p_comm"); _Static_assert(offsetof(struct proc, p_emuldata) == 0x4b8, "struct proc KBI p_emuldata"); #endif #ifdef __i386__ -_Static_assert(offsetof(struct thread, td_flags) == 0x9c, +_Static_assert(offsetof(struct thread, td_flags) == 0x100, "struct thread KBI td_flags"); -_Static_assert(offsetof(struct thread, td_pflags) == 0xa4, +_Static_assert(offsetof(struct thread, td_pflags) == 0xa8, "struct thread KBI td_pflags"); -_Static_assert(offsetof(struct thread, td_frame) == 0x2ec, +_Static_assert(offsetof(struct thread, td_frame) == 0x2f0, "struct thread KBI td_frame"); -_Static_assert(offsetof(struct thread, td_emuldata) == 0x338, +_Static_assert(offsetof(struct thread, td_emuldata) == 0x33c, "struct thread KBI td_emuldata"); _Static_assert(offsetof(struct proc, p_flag) == 0x68, "struct proc KBI p_flag"); _Static_assert(offsetof(struct proc, p_pid) == 0x74, "struct proc KBI p_pid"); _Static_assert(offsetof(struct proc, p_filemon) == 0x27c, "struct proc KBI p_filemon"); _Static_assert(offsetof(struct proc, p_comm) == 0x288, "struct proc KBI p_comm"); _Static_assert(offsetof(struct proc, p_emuldata) == 0x314, "struct proc KBI p_emuldata"); #endif SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE(proc, , , lwp__exit); /* * thread related storage. */ static uma_zone_t thread_zone; TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); static struct mtx zombie_lock; MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); static void thread_zombie(struct thread *); static int thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary); #define TID_BUFFER_SIZE 1024 struct mtx tid_lock; static struct unrhdr *tid_unrhdr; static lwpid_t tid_buffer[TID_BUFFER_SIZE]; static int tid_head, tid_tail; static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); struct tidhashhead *tidhashtbl; u_long tidhash; struct rwlock tidhash_lock; EVENTHANDLER_LIST_DEFINE(thread_ctor); EVENTHANDLER_LIST_DEFINE(thread_dtor); EVENTHANDLER_LIST_DEFINE(thread_init); EVENTHANDLER_LIST_DEFINE(thread_fini); static lwpid_t tid_alloc(void) { lwpid_t tid; tid = alloc_unr(tid_unrhdr); if (tid != -1) return (tid); mtx_lock(&tid_lock); if (tid_head == tid_tail) { mtx_unlock(&tid_lock); return (-1); } tid = tid_buffer[tid_head]; tid_head = (tid_head + 1) % TID_BUFFER_SIZE; mtx_unlock(&tid_lock); return (tid); } static void tid_free(lwpid_t tid) { lwpid_t tmp_tid = -1; mtx_lock(&tid_lock); if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { tmp_tid = tid_buffer[tid_head]; tid_head = (tid_head + 1) % TID_BUFFER_SIZE; } tid_buffer[tid_tail] = tid; tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE; mtx_unlock(&tid_lock); if (tmp_tid != -1) free_unr(tid_unrhdr, tmp_tid); } /* * Prepare a thread for use. */ static int thread_ctor(void *mem, int size, void *arg, int flags) { struct thread *td; td = (struct thread *)mem; td->td_state = TDS_INACTIVE; td->td_oncpu = NOCPU; td->td_tid = tid_alloc(); /* * Note that td_critnest begins life as 1 because the thread is not * running and is thereby implicitly waiting to be on the receiving * end of a context switch. */ td->td_critnest = 1; td->td_lend_user_pri = PRI_MAX; EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); #ifdef AUDIT audit_thread_alloc(td); #endif umtx_thread_alloc(td); return (0); } /* * Reclaim a thread after use. */ static void thread_dtor(void *mem, int size, void *arg) { struct thread *td; td = (struct thread *)mem; #ifdef INVARIANTS /* Verify that this thread is in a safe state to free. */ switch (td->td_state) { case TDS_INHIBITED: case TDS_RUNNING: case TDS_CAN_RUN: case TDS_RUNQ: /* * We must never unlink a thread that is in one of * these states, because it is currently active. */ panic("bad state for thread unlinking"); /* NOTREACHED */ case TDS_INACTIVE: break; default: panic("bad thread state"); /* NOTREACHED */ } #endif #ifdef AUDIT audit_thread_free(td); #endif /* Free all OSD associated to this thread. */ osd_thread_exit(td); td_softdep_cleanup(td); MPASS(td->td_su == NULL); EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td); tid_free(td->td_tid); } /* * Initialize type-stable parts of a thread (when newly created). */ static int thread_init(void *mem, int size, int flags) { struct thread *td; td = (struct thread *)mem; td->td_sleepqueue = sleepq_alloc(); td->td_turnstile = turnstile_alloc(); td->td_rlqe = NULL; EVENTHANDLER_DIRECT_INVOKE(thread_init, td); umtx_thread_init(td); td->td_kstack = 0; td->td_sel = NULL; return (0); } /* * Tear down type-stable parts of a thread (just before being discarded). */ static void thread_fini(void *mem, int size) { struct thread *td; td = (struct thread *)mem; EVENTHANDLER_DIRECT_INVOKE(thread_fini, td); rlqentry_free(td->td_rlqe); turnstile_free(td->td_turnstile); sleepq_free(td->td_sleepqueue); umtx_thread_fini(td); seltdfini(td); } /* * For a newly created process, * link up all the structures and its initial threads etc. * called from: * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. * proc_dtor() (should go away) * proc_init() */ void proc_linkup0(struct proc *p, struct thread *td) { TAILQ_INIT(&p->p_threads); /* all threads in proc */ proc_linkup(p, td); } void proc_linkup(struct proc *p, struct thread *td) { sigqueue_init(&p->p_sigqueue, p); p->p_ksi = ksiginfo_alloc(1); if (p->p_ksi != NULL) { /* XXX p_ksi may be null if ksiginfo zone is not ready */ p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; } LIST_INIT(&p->p_mqnotifier); p->p_numthreads = 0; thread_link(td, p); } /* * Initialize global thread allocation resources. */ void threadinit(void) { mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); /* * pid_max cannot be greater than PID_MAX. * leave one number for thread0. */ tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), thread_ctor, thread_dtor, thread_init, thread_fini, 32 - 1, UMA_ZONE_NOFREE); tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); rw_init(&tidhash_lock, "tidhash"); } /* * Place an unused thread on the zombie list. * Use the slpq as that must be unused by now. */ void thread_zombie(struct thread *td) { mtx_lock_spin(&zombie_lock); TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); mtx_unlock_spin(&zombie_lock); } /* * Release a thread that has exited after cpu_throw(). */ void thread_stash(struct thread *td) { atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); thread_zombie(td); } /* * Reap zombie resources. */ void thread_reap(void) { struct thread *td_first, *td_next; /* * Don't even bother to lock if none at this instant, * we really don't care about the next instant. */ if (!TAILQ_EMPTY(&zombie_threads)) { mtx_lock_spin(&zombie_lock); td_first = TAILQ_FIRST(&zombie_threads); if (td_first) TAILQ_INIT(&zombie_threads); mtx_unlock_spin(&zombie_lock); while (td_first) { td_next = TAILQ_NEXT(td_first, td_slpq); thread_cow_free(td_first); thread_free(td_first); td_first = td_next; } } } /* * Allocate a thread. */ struct thread * thread_alloc(int pages) { struct thread *td; thread_reap(); /* check if any zombies to get */ td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); if (!vm_thread_new(td, pages)) { uma_zfree(thread_zone, td); return (NULL); } cpu_thread_alloc(td); - vm_domain_policy_init(&td->td_vm_dom_policy); return (td); } int thread_alloc_stack(struct thread *td, int pages) { KASSERT(td->td_kstack == 0, ("thread_alloc_stack called on a thread with kstack")); if (!vm_thread_new(td, pages)) return (0); cpu_thread_alloc(td); return (1); } /* * Deallocate a thread. */ void thread_free(struct thread *td) { lock_profile_thread_exit(td); if (td->td_cpuset) cpuset_rel(td->td_cpuset); td->td_cpuset = NULL; cpu_thread_free(td); if (td->td_kstack != 0) vm_thread_dispose(td); - vm_domain_policy_cleanup(&td->td_vm_dom_policy); callout_drain(&td->td_slpcallout); uma_zfree(thread_zone, td); } void thread_cow_get_proc(struct thread *newtd, struct proc *p) { PROC_LOCK_ASSERT(p, MA_OWNED); newtd->td_ucred = crhold(p->p_ucred); newtd->td_limit = lim_hold(p->p_limit); newtd->td_cowgen = p->p_cowgen; } void thread_cow_get(struct thread *newtd, struct thread *td) { newtd->td_ucred = crhold(td->td_ucred); newtd->td_limit = lim_hold(td->td_limit); newtd->td_cowgen = td->td_cowgen; } void thread_cow_free(struct thread *td) { if (td->td_ucred != NULL) crfree(td->td_ucred); if (td->td_limit != NULL) lim_free(td->td_limit); } void thread_cow_update(struct thread *td) { struct proc *p; struct ucred *oldcred; struct plimit *oldlimit; p = td->td_proc; oldcred = NULL; oldlimit = NULL; PROC_LOCK(p); if (td->td_ucred != p->p_ucred) { oldcred = td->td_ucred; td->td_ucred = crhold(p->p_ucred); } if (td->td_limit != p->p_limit) { oldlimit = td->td_limit; td->td_limit = lim_hold(p->p_limit); } td->td_cowgen = p->p_cowgen; PROC_UNLOCK(p); if (oldcred != NULL) crfree(oldcred); if (oldlimit != NULL) lim_free(oldlimit); } /* * Discard the current thread and exit from its context. * Always called with scheduler locked. * * Because we can't free a thread while we're operating under its context, * push the current thread into our CPU's deadthread holder. This means * we needn't worry about someone else grabbing our context before we * do a cpu_throw(). */ void thread_exit(void) { uint64_t runtime, new_switchtime; struct thread *td; struct thread *td2; struct proc *p; int wakeup_swapper; td = curthread; p = td->td_proc; PROC_SLOCK_ASSERT(p, MA_OWNED); mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT(p != NULL, ("thread exiting without a process")); CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, (long)p->p_pid, td->td_name); SDT_PROBE0(proc, , , lwp__exit); KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); #ifdef AUDIT AUDIT_SYSCALL_EXIT(0, td); #endif /* * drop FPU & debug register state storage, or any other * architecture specific resources that * would not be on a new untouched process. */ cpu_thread_exit(td); /* * The last thread is left attached to the process * So that the whole bundle gets recycled. Skip * all this stuff if we never had threads. * EXIT clears all sign of other threads when * it goes to single threading, so the last thread always * takes the short path. */ if (p->p_flag & P_HADTHREADS) { if (p->p_numthreads > 1) { atomic_add_int(&td->td_proc->p_exitthreads, 1); thread_unlink(td); td2 = FIRST_THREAD_IN_PROC(p); sched_exit_thread(td2, td); /* * The test below is NOT true if we are the * sole exiting thread. P_STOPPED_SINGLE is unset * in exit1() after it is the only survivor. */ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { if (p->p_numthreads == p->p_suspcount) { thread_lock(p->p_singlethread); wakeup_swapper = thread_unsuspend_one( p->p_singlethread, p, false); thread_unlock(p->p_singlethread); if (wakeup_swapper) kick_proc0(); } } PCPU_SET(deadthread, td); } else { /* * The last thread is exiting.. but not through exit() */ panic ("thread_exit: Last thread exiting on its own"); } } #ifdef HWPMC_HOOKS /* * If this thread is part of a process that is being tracked by hwpmc(4), * inform the module of the thread's impending exit. */ if (PMC_PROC_IS_USING_PMCS(td->td_proc)) PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); #endif PROC_UNLOCK(p); PROC_STATLOCK(p); thread_lock(td); PROC_SUNLOCK(p); /* Do the same timestamp bookkeeping that mi_switch() would do. */ new_switchtime = cpu_ticks(); runtime = new_switchtime - PCPU_GET(switchtime); td->td_runtime += runtime; td->td_incruntime += runtime; PCPU_SET(switchtime, new_switchtime); PCPU_SET(switchticks, ticks); VM_CNT_INC(v_swtch); /* Save our resource usage in our process. */ td->td_ru.ru_nvcsw++; ruxagg(p, td); rucollect(&p->p_ru, &td->td_ru); PROC_STATUNLOCK(p); td->td_state = TDS_INACTIVE; #ifdef WITNESS witness_thread_exit(td); #endif CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); sched_throw(td); panic("I'm a teapot!"); /* NOTREACHED */ } /* * Do any thread specific cleanups that may be needed in wait() * called with Giant, proc and schedlock not held. */ void thread_wait(struct proc *p) { struct thread *td; mtx_assert(&Giant, MA_NOTOWNED); KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); td = FIRST_THREAD_IN_PROC(p); /* Lock the last thread so we spin until it exits cpu_throw(). */ thread_lock(td); thread_unlock(td); lock_profile_thread_exit(td); cpuset_rel(td->td_cpuset); td->td_cpuset = NULL; cpu_thread_clean(td); thread_cow_free(td); callout_drain(&td->td_slpcallout); thread_reap(); /* check for zombie threads etc. */ } /* * Link a thread to a process. * set up anything that needs to be initialized for it to * be used by the process. */ void thread_link(struct thread *td, struct proc *p) { /* * XXX This can't be enabled because it's called for proc0 before * its lock has been created. * PROC_LOCK_ASSERT(p, MA_OWNED); */ td->td_state = TDS_INACTIVE; td->td_proc = p; td->td_flags = TDF_INMEM; LIST_INIT(&td->td_contested); LIST_INIT(&td->td_lprof[0]); LIST_INIT(&td->td_lprof[1]); sigqueue_init(&td->td_sigqueue, p); callout_init(&td->td_slpcallout, 1); TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); p->p_numthreads++; } /* * Called from: * thread_exit() */ void thread_unlink(struct thread *td) { struct proc *p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); TAILQ_REMOVE(&p->p_threads, td, td_plist); p->p_numthreads--; /* could clear a few other things here */ /* Must NOT clear links to proc! */ } static int calc_remaining(struct proc *p, int mode) { int remaining; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); if (mode == SINGLE_EXIT) remaining = p->p_numthreads; else if (mode == SINGLE_BOUNDARY) remaining = p->p_numthreads - p->p_boundary_count; else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) remaining = p->p_numthreads - p->p_suspcount; else panic("calc_remaining: wrong mode %d", mode); return (remaining); } static int remain_for_mode(int mode) { return (mode == SINGLE_ALLPROC ? 0 : 1); } static int weed_inhib(int mode, struct thread *td2, struct proc *p) { int wakeup_swapper; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); THREAD_LOCK_ASSERT(td2, MA_OWNED); wakeup_swapper = 0; switch (mode) { case SINGLE_EXIT: if (TD_IS_SUSPENDED(td2)) wakeup_swapper |= thread_unsuspend_one(td2, p, true); if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) wakeup_swapper |= sleepq_abort(td2, EINTR); break; case SINGLE_BOUNDARY: case SINGLE_NO_EXIT: if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) wakeup_swapper |= thread_unsuspend_one(td2, p, false); if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) wakeup_swapper |= sleepq_abort(td2, ERESTART); break; case SINGLE_ALLPROC: /* * ALLPROC suspend tries to avoid spurious EINTR for * threads sleeping interruptable, by suspending the * thread directly, similarly to sig_suspend_threads(). * Since such sleep is not performed at the user * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP * is used to avoid immediate un-suspend. */ if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY | TDF_ALLPROCSUSP)) == 0) wakeup_swapper |= thread_unsuspend_one(td2, p, false); if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) { if ((td2->td_flags & TDF_SBDRY) == 0) { thread_suspend_one(td2); td2->td_flags |= TDF_ALLPROCSUSP; } else { wakeup_swapper |= sleepq_abort(td2, ERESTART); } } break; } return (wakeup_swapper); } /* * Enforce single-threading. * * Returns 1 if the caller must abort (another thread is waiting to * exit the process or similar). Process is locked! * Returns 0 when you are successfully the only thread running. * A process has successfully single threaded in the suspend mode when * There are no threads in user mode. Threads in the kernel must be * allowed to continue until they get to the user boundary. They may even * copy out their return values and data before suspending. They may however be * accelerated in reaching the user boundary as we will wake up * any sleeping threads that are interruptable. (PCATCH). */ int thread_single(struct proc *p, int mode) { struct thread *td; struct thread *td2; int remaining, wakeup_swapper; td = curthread; KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, ("invalid mode %d", mode)); /* * If allowing non-ALLPROC singlethreading for non-curproc * callers, calc_remaining() and remain_for_mode() should be * adjusted to also account for td->td_proc != p. For now * this is not implemented because it is not used. */ KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || (mode != SINGLE_ALLPROC && td->td_proc == p), ("mode %d proc %p curproc %p", mode, p, td->td_proc)); mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC) return (0); /* Is someone already single threading? */ if (p->p_singlethread != NULL && p->p_singlethread != td) return (1); if (mode == SINGLE_EXIT) { p->p_flag |= P_SINGLE_EXIT; p->p_flag &= ~P_SINGLE_BOUNDARY; } else { p->p_flag &= ~P_SINGLE_EXIT; if (mode == SINGLE_BOUNDARY) p->p_flag |= P_SINGLE_BOUNDARY; else p->p_flag &= ~P_SINGLE_BOUNDARY; } if (mode == SINGLE_ALLPROC) p->p_flag |= P_TOTAL_STOP; p->p_flag |= P_STOPPED_SINGLE; PROC_SLOCK(p); p->p_singlethread = td; remaining = calc_remaining(p, mode); while (remaining != remain_for_mode(mode)) { if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) goto stopme; wakeup_swapper = 0; FOREACH_THREAD_IN_PROC(p, td2) { if (td2 == td) continue; thread_lock(td2); td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; if (TD_IS_INHIBITED(td2)) { wakeup_swapper |= weed_inhib(mode, td2, p); #ifdef SMP } else if (TD_IS_RUNNING(td2) && td != td2) { forward_signal(td2); #endif } thread_unlock(td2); } if (wakeup_swapper) kick_proc0(); remaining = calc_remaining(p, mode); /* * Maybe we suspended some threads.. was it enough? */ if (remaining == remain_for_mode(mode)) break; stopme: /* * Wake us up when everyone else has suspended. * In the mean time we suspend as well. */ thread_suspend_switch(td, p); remaining = calc_remaining(p, mode); } if (mode == SINGLE_EXIT) { /* * Convert the process to an unthreaded process. The * SINGLE_EXIT is called by exit1() or execve(), in * both cases other threads must be retired. */ KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); p->p_singlethread = NULL; p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); /* * Wait for any remaining threads to exit cpu_throw(). */ while (p->p_exitthreads != 0) { PROC_SUNLOCK(p); PROC_UNLOCK(p); sched_relinquish(td); PROC_LOCK(p); PROC_SLOCK(p); } } else if (mode == SINGLE_BOUNDARY) { /* * Wait until all suspended threads are removed from * the processors. The thread_suspend_check() * increments p_boundary_count while it is still * running, which makes it possible for the execve() * to destroy vmspace while our other threads are * still using the address space. * * We lock the thread, which is only allowed to * succeed after context switch code finished using * the address space. */ FOREACH_THREAD_IN_PROC(p, td2) { if (td2 == td) continue; thread_lock(td2); KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, ("td %p not on boundary", td2)); KASSERT(TD_IS_SUSPENDED(td2), ("td %p is not suspended", td2)); thread_unlock(td2); } } PROC_SUNLOCK(p); return (0); } bool thread_suspend_check_needed(void) { struct proc *p; struct thread *td; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && (td->td_dbgflags & TDB_SUSPEND) != 0)); } /* * Called in from locations that can safely check to see * whether we have to suspend or at least throttle for a * single-thread event (e.g. fork). * * Such locations include userret(). * If the "return_instead" argument is non zero, the thread must be able to * accept 0 (caller may continue), or 1 (caller must abort) as a result. * * The 'return_instead' argument tells the function if it may do a * thread_exit() or suspend, or whether the caller must abort and back * out instead. * * If the thread that set the single_threading request has set the * P_SINGLE_EXIT bit in the process flags then this call will never return * if 'return_instead' is false, but will exit. * * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 *---------------+--------------------+--------------------- * 0 | returns 0 | returns 0 or 1 * | when ST ends | immediately *---------------+--------------------+--------------------- * 1 | thread exits | returns 1 * | | immediately * 0 = thread_exit() or suspension ok, * other = return error instead of stopping the thread. * * While a full suspension is under effect, even a single threading * thread would be suspended if it made this call (but it shouldn't). * This call should only be made from places where * thread_exit() would be safe as that may be the outcome unless * return_instead is set. */ int thread_suspend_check(int return_instead) { struct thread *td; struct proc *p; int wakeup_swapper; td = curthread; p = td->td_proc; mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); while (thread_suspend_check_needed()) { if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { KASSERT(p->p_singlethread != NULL, ("singlethread not set")); /* * The only suspension in action is a * single-threading. Single threader need not stop. * It is safe to access p->p_singlethread unlocked * because it can only be set to our address by us. */ if (p->p_singlethread == td) return (0); /* Exempt from stopping. */ } if ((p->p_flag & P_SINGLE_EXIT) && return_instead) return (EINTR); /* Should we goto user boundary if we didn't come from there? */ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) return (ERESTART); /* * Ignore suspend requests if they are deferred. */ if ((td->td_flags & TDF_SBDRY) != 0) { KASSERT(return_instead, ("TDF_SBDRY set for unsafe thread_suspend_check")); KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != (TDF_SEINTR | TDF_SERESTART), ("both TDF_SEINTR and TDF_SERESTART")); return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0); } /* * If the process is waiting for us to exit, * this thread should just suicide. * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. */ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { PROC_UNLOCK(p); /* * Allow Linux emulation layer to do some work * before thread suicide. */ if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) (p->p_sysent->sv_thread_detach)(td); umtx_thread_exit(td); kern_thr_exit(td); panic("stopped thread did not exit"); } PROC_SLOCK(p); thread_stopped(p); if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { if (p->p_numthreads == p->p_suspcount + 1) { thread_lock(p->p_singlethread); wakeup_swapper = thread_unsuspend_one( p->p_singlethread, p, false); thread_unlock(p->p_singlethread); if (wakeup_swapper) kick_proc0(); } } PROC_UNLOCK(p); thread_lock(td); /* * When a thread suspends, it just * gets taken off all queues. */ thread_suspend_one(td); if (return_instead == 0) { p->p_boundary_count++; td->td_flags |= TDF_BOUNDARY; } PROC_SUNLOCK(p); mi_switch(SW_INVOL | SWT_SUSPEND, NULL); thread_unlock(td); PROC_LOCK(p); } return (0); } void thread_suspend_switch(struct thread *td, struct proc *p) { KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); /* * We implement thread_suspend_one in stages here to avoid * dropping the proc lock while the thread lock is owned. */ if (p == td->td_proc) { thread_stopped(p); p->p_suspcount++; } PROC_UNLOCK(p); thread_lock(td); td->td_flags &= ~TDF_NEEDSUSPCHK; TD_SET_SUSPENDED(td); sched_sleep(td, 0); PROC_SUNLOCK(p); DROP_GIANT(); mi_switch(SW_VOL | SWT_SUSPEND, NULL); thread_unlock(td); PICKUP_GIANT(); PROC_LOCK(p); PROC_SLOCK(p); } void thread_suspend_one(struct thread *td) { struct proc *p; p = td->td_proc; PROC_SLOCK_ASSERT(p, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); p->p_suspcount++; td->td_flags &= ~TDF_NEEDSUSPCHK; TD_SET_SUSPENDED(td); sched_sleep(td, 0); } static int thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) { THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); TD_CLR_SUSPENDED(td); td->td_flags &= ~TDF_ALLPROCSUSP; if (td->td_proc == p) { PROC_SLOCK_ASSERT(p, MA_OWNED); p->p_suspcount--; if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { td->td_flags &= ~TDF_BOUNDARY; p->p_boundary_count--; } } return (setrunnable(td)); } /* * Allow all threads blocked by single threading to continue running. */ void thread_unsuspend(struct proc *p) { struct thread *td; int wakeup_swapper; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); wakeup_swapper = 0; if (!P_SHOULDSTOP(p)) { FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); if (TD_IS_SUSPENDED(td)) { wakeup_swapper |= thread_unsuspend_one(td, p, true); } thread_unlock(td); } } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && p->p_numthreads == p->p_suspcount) { /* * Stopping everything also did the job for the single * threading request. Now we've downgraded to single-threaded, * let it continue. */ if (p->p_singlethread->td_proc == p) { thread_lock(p->p_singlethread); wakeup_swapper = thread_unsuspend_one( p->p_singlethread, p, false); thread_unlock(p->p_singlethread); } } if (wakeup_swapper) kick_proc0(); } /* * End the single threading mode.. */ void thread_single_end(struct proc *p, int mode) { struct thread *td; int wakeup_swapper; KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, ("invalid mode %d", mode)); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), ("mode %d does not match P_TOTAL_STOP", mode)); KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, ("thread_single_end from other thread %p %p", curthread, p->p_singlethread)); KASSERT(mode != SINGLE_BOUNDARY || (p->p_flag & P_SINGLE_BOUNDARY) != 0, ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | P_TOTAL_STOP); PROC_SLOCK(p); p->p_singlethread = NULL; wakeup_swapper = 0; /* * If there are other threads they may now run, * unless of course there is a blanket 'stop order' * on the process. The single threader must be allowed * to continue however as this is a bad place to stop. */ if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); if (TD_IS_SUSPENDED(td)) { wakeup_swapper |= thread_unsuspend_one(td, p, mode == SINGLE_BOUNDARY); } thread_unlock(td); } } KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, ("inconsistent boundary count %d", p->p_boundary_count)); PROC_SUNLOCK(p); if (wakeup_swapper) kick_proc0(); } struct thread * thread_find(struct proc *p, lwpid_t tid) { struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); FOREACH_THREAD_IN_PROC(p, td) { if (td->td_tid == tid) break; } return (td); } /* Locate a thread by number; return with proc lock held. */ struct thread * tdfind(lwpid_t tid, pid_t pid) { #define RUN_THRESH 16 struct thread *td; int run = 0; rw_rlock(&tidhash_lock); LIST_FOREACH(td, TIDHASH(tid), td_hash) { if (td->td_tid == tid) { if (pid != -1 && td->td_proc->p_pid != pid) { td = NULL; break; } PROC_LOCK(td->td_proc); if (td->td_proc->p_state == PRS_NEW) { PROC_UNLOCK(td->td_proc); td = NULL; break; } if (run > RUN_THRESH) { if (rw_try_upgrade(&tidhash_lock)) { LIST_REMOVE(td, td_hash); LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); rw_wunlock(&tidhash_lock); return (td); } } break; } run++; } rw_runlock(&tidhash_lock); return (td); } void tidhash_add(struct thread *td) { rw_wlock(&tidhash_lock); LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); rw_wunlock(&tidhash_lock); } void tidhash_remove(struct thread *td) { rw_wlock(&tidhash_lock); LIST_REMOVE(td, td_hash); rw_wunlock(&tidhash_lock); } Index: user/jeff/numa/sys/kern/makesyscalls.sh =================================================================== --- user/jeff/numa/sys/kern/makesyscalls.sh (revision 326888) +++ user/jeff/numa/sys/kern/makesyscalls.sh (revision 326889) @@ -1,712 +1,713 @@ #! /bin/sh - # @(#)makesyscalls.sh 8.1 (Berkeley) 6/10/93 # $FreeBSD$ set -e # name of compat options: compat=COMPAT_43 compat4=COMPAT_FREEBSD4 compat6=COMPAT_FREEBSD6 compat7=COMPAT_FREEBSD7 compat10=COMPAT_FREEBSD10 compat11=COMPAT_FREEBSD11 # output files: sysnames="syscalls.c" sysproto="../sys/sysproto.h" sysproto_h=_SYS_SYSPROTO_H_ syshdr="../sys/syscall.h" sysmk="../sys/syscall.mk" syssw="init_sysent.c" syscallprefix="SYS_" switchname="sysent" namesname="syscallnames" systrace="systrace_args.c" # tmp files: sysaue="sysent.aue.$$" sysdcl="sysent.dcl.$$" syscompat="sysent.compat.$$" syscompatdcl="sysent.compatdcl.$$" syscompat4="sysent.compat4.$$" syscompat4dcl="sysent.compat4dcl.$$" syscompat6="sysent.compat6.$$" syscompat6dcl="sysent.compat6dcl.$$" syscompat7="sysent.compat7.$$" syscompat7dcl="sysent.compat7dcl.$$" syscompat10="sysent.compat10.$$" syscompat10dcl="sysent.compat10dcl.$$" syscompat11="sysent.compat11.$$" syscompat11dcl="sysent.compat11dcl.$$" sysent="sysent.switch.$$" sysinc="sysinc.switch.$$" sysarg="sysarg.switch.$$" sysprotoend="sysprotoend.$$" systracetmp="systrace.$$" systraceret="systraceret.$$" if [ -r capabilities.conf ]; then capenabled=`cat capabilities.conf | grep -v "^#" | grep -v "^$"` capenabled=`echo $capenabled | sed 's/ /,/g'` else capenabled="" fi trap "rm $sysaue $sysdcl $syscompat $syscompatdcl $syscompat4 $syscompat4dcl $syscompat6 $syscompat6dcl $syscompat7 $syscompat7dcl $syscompat10 $syscompat10dcl $syscompat11 $syscompat11dcl $sysent $sysinc $sysarg $sysprotoend $systracetmp $systraceret" 0 touch $sysaue $sysdcl $syscompat $syscompatdcl $syscompat4 $syscompat4dcl $syscompat6 $syscompat6dcl $syscompat7 $syscompat7dcl $syscompat10 $syscompat10dcl $syscompat11 $syscompat11dcl $sysent $sysinc $sysarg $sysprotoend $systracetmp $systraceret case $# in 0) echo "usage: $0 input-file " 1>&2 exit 1 ;; esac if [ -n "$2" ]; then . $2 fi sed -e ' :join /\\$/{a\ N s/\\\n// b join } 2,${ /^#/!s/\([{}()*,]\)/ \1 /g } ' < $1 | awk " BEGIN { sysaue = \"$sysaue\" sysdcl = \"$sysdcl\" sysproto = \"$sysproto\" sysprotoend = \"$sysprotoend\" sysproto_h = \"$sysproto_h\" syscompat = \"$syscompat\" syscompatdcl = \"$syscompatdcl\" syscompat4 = \"$syscompat4\" syscompat4dcl = \"$syscompat4dcl\" syscompat6 = \"$syscompat6\" syscompat6dcl = \"$syscompat6dcl\" syscompat7 = \"$syscompat7\" syscompat7dcl = \"$syscompat7dcl\" syscompat10 = \"$syscompat10\" syscompat10dcl = \"$syscompat10dcl\" syscompat11 = \"$syscompat11\" syscompat11dcl = \"$syscompat11dcl\" sysent = \"$sysent\" syssw = \"$syssw\" sysinc = \"$sysinc\" sysarg = \"$sysarg\" sysnames = \"$sysnames\" syshdr = \"$syshdr\" sysmk = \"$sysmk\" systrace = \"$systrace\" systracetmp = \"$systracetmp\" systraceret = \"$systraceret\" compat = \"$compat\" compat4 = \"$compat4\" compat6 = \"$compat6\" compat7 = \"$compat7\" compat10 = \"$compat10\" compat11 = \"$compat11\" syscallprefix = \"$syscallprefix\" switchname = \"$switchname\" namesname = \"$namesname\" infile = \"$1\" capenabled_string = \"$capenabled\" "' split(capenabled_string, capenabled, ","); printf "\n/* The casts are bogus but will do for now. */\n" > sysent printf "struct sysent %s[] = {\n",switchname > sysent printf "/*\n * System call switch table.\n *\n" > syssw printf " * DO NOT EDIT-- this file is automatically generated.\n" > syssw printf " * $%s$\n", "FreeBSD" > syssw printf " */\n\n" > syssw printf "/*\n * System call prototypes.\n *\n" > sysarg printf " * DO NOT EDIT-- this file is automatically generated.\n" > sysarg printf " * $%s$\n", "FreeBSD" > sysarg printf " */\n\n" > sysarg printf "#ifndef %s\n", sysproto_h > sysarg printf "#define\t%s\n\n", sysproto_h > sysarg printf "#include \n" > sysarg printf "#include \n" > sysarg printf "#include \n" > sysarg + printf "#include \n" > sysarg printf "#include \n" > sysarg printf "#include \n" > sysarg printf "#include \n" > sysarg printf "#include \n\n" > sysarg printf "#include \n\n" > sysarg printf "struct proc;\n\n" > sysarg printf "struct thread;\n\n" > sysarg printf "#define\tPAD_(t)\t(sizeof(register_t) <= sizeof(t) ? \\\n" > sysarg printf "\t\t0 : sizeof(register_t) - sizeof(t))\n\n" > sysarg printf "#if BYTE_ORDER == LITTLE_ENDIAN\n"> sysarg printf "#define\tPADL_(t)\t0\n" > sysarg printf "#define\tPADR_(t)\tPAD_(t)\n" > sysarg printf "#else\n" > sysarg printf "#define\tPADL_(t)\tPAD_(t)\n" > sysarg printf "#define\tPADR_(t)\t0\n" > sysarg printf "#endif\n\n" > sysarg printf "\n#ifdef %s\n\n", compat > syscompat printf "\n#ifdef %s\n\n", compat4 > syscompat4 printf "\n#ifdef %s\n\n", compat6 > syscompat6 printf "\n#ifdef %s\n\n", compat7 > syscompat7 printf "\n#ifdef %s\n\n", compat10 > syscompat10 printf "\n#ifdef %s\n\n", compat11 > syscompat11 printf "/*\n * System call names.\n *\n" > sysnames printf " * DO NOT EDIT-- this file is automatically generated.\n" > sysnames printf " * $%s$\n", "FreeBSD" > sysnames printf " */\n\n" > sysnames printf "const char *%s[] = {\n", namesname > sysnames printf "/*\n * System call numbers.\n *\n" > syshdr printf " * DO NOT EDIT-- this file is automatically generated.\n" > syshdr printf " * $%s$\n", "FreeBSD" > syshdr printf " */\n\n" > syshdr printf "# FreeBSD system call object files.\n" > sysmk printf "# DO NOT EDIT-- this file is automatically generated.\n" > sysmk printf "# $%s$\n", "FreeBSD" > sysmk printf "MIASM = " > sysmk printf "/*\n * System call argument to DTrace register array converstion.\n *\n" > systrace printf " * DO NOT EDIT-- this file is automatically generated.\n" > systrace printf " * $%s$\n", "FreeBSD" > systrace printf " * This file is part of the DTrace syscall provider.\n */\n\n" > systrace printf "static void\nsystrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)\n{\n" > systrace printf "\tint64_t *iarg = (int64_t *) uarg;\n" > systrace printf "\tswitch (sysnum) {\n" > systrace printf "static void\nsystrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)\n{\n\tconst char *p = NULL;\n" > systracetmp printf "\tswitch (sysnum) {\n" > systracetmp printf "static void\nsystrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)\n{\n\tconst char *p = NULL;\n" > systraceret printf "\tswitch (sysnum) {\n" > systraceret } NR == 1 { next } NF == 0 || $1 ~ /^;/ { next } $1 ~ /^#[ ]*include/ { print > sysinc next } $1 ~ /^#[ ]*if/ { print > sysent print > sysdcl print > sysarg print > syscompat print > syscompat4 print > syscompat6 print > syscompat7 print > syscompat10 print > syscompat11 print > sysnames print > systrace print > systracetmp print > systraceret savesyscall = syscall next } $1 ~ /^#[ ]*else/ { print > sysent print > sysdcl print > sysarg print > syscompat print > syscompat4 print > syscompat6 print > syscompat7 print > syscompat10 print > syscompat11 print > sysnames print > systrace print > systracetmp print > systraceret syscall = savesyscall next } $1 ~ /^#/ { print > sysent print > sysdcl print > sysarg print > syscompat print > syscompat4 print > syscompat6 print > syscompat7 print > syscompat10 print > syscompat11 print > sysnames print > systrace print > systracetmp print > systraceret next } syscall != $1 { printf "%s: line %d: syscall number out of sync at %d\n", infile, NR, syscall printf "line is:\n" print exit 1 } # Returns true if the type "name" is the first flag in the type field function type(name, flags, n) { n = split($3, flags, /\|/) return (n > 0 && flags[1] == name) } # Returns true if the flag "name" is set in the type field function flag(name, flags, i, n) { n = split($3, flags, /\|/) for (i = 1; i <= n; i++) if (flags[i] == name) return 1 return 0 } function align_sysent_comment(column) { printf("\t") > sysent column = column + 8 - column % 8 while (column < 56) { printf("\t") > sysent column = column + 8 } } function parserr(was, wanted) { printf "%s: line %d: unexpected %s (expected %s)\n", infile, NR, was, wanted exit 1 } function parseline() { f=4 # toss number, type, audit event argc= 0; argssize = "0" thr_flag = "SY_THR_STATIC" if (flag("NOTSTATIC")) { thr_flag = "SY_THR_ABSENT" } if ($NF != "}") { funcalias=$(NF-2) argalias=$(NF-1) rettype=$NF end=NF-3 } else { funcalias="" argalias="" rettype="int" end=NF } if (flag("NODEF")) { auditev="AUE_NULL" funcname=$4 argssize = "AS(" $6 ")" return } if ($f != "{") parserr($f, "{") f++ if ($end != "}") parserr($end, "}") end-- if ($end != ";") parserr($end, ";") end-- if ($end != ")") parserr($end, ")") end-- syscallret=$f f++ funcname=$f # # We now know the func name, so define a flags field for it. # Do this before any other processing as we may return early # from it. # for (cap in capenabled) { if (funcname == capenabled[cap]) { flags = "SYF_CAPENABLED"; break; } } if (funcalias == "") funcalias = funcname if (argalias == "") { argalias = funcname "_args" if (flag("COMPAT")) argalias = "o" argalias if (flag("COMPAT4")) argalias = "freebsd4_" argalias if (flag("COMPAT6")) argalias = "freebsd6_" argalias if (flag("COMPAT7")) argalias = "freebsd7_" argalias if (flag("COMPAT10")) argalias = "freebsd10_" argalias if (flag("COMPAT11")) argalias = "freebsd11_" argalias } f++ if ($f != "(") parserr($f, ")") f++ if (f == end) { if ($f != "void") parserr($f, "argument definition") return } while (f <= end) { argc++ argtype[argc]="" oldf="" while (f < end && $(f+1) != ",") { if (argtype[argc] != "" && oldf != "*") argtype[argc] = argtype[argc]" "; argtype[argc] = argtype[argc]$f; oldf = $f; f++ } if (argtype[argc] == "") parserr($f, "argument definition") argname[argc]=$f; f += 2; # skip name, and any comma } if (argc != 0) argssize = "AS(" argalias ")" } { comment = $4 if (NF < 7) for (i = 5; i <= NF; i++) comment = comment " " $i } # # The AUE_ audit event identifier. # { auditev = $2; } # # The flags, if any. # { flags = "0"; } type("STD") || type("NODEF") || type("NOARGS") || type("NOPROTO") \ || type("NOSTD") { parseline() printf("\t/* %s */\n\tcase %d: {\n", funcname, syscall) > systrace printf("\t/* %s */\n\tcase %d:\n", funcname, syscall) > systracetmp printf("\t/* %s */\n\tcase %d:\n", funcname, syscall) > systraceret if (argc > 0) { printf("\t\tswitch(ndx) {\n") > systracetmp printf("\t\tstruct %s *p = params;\n", argalias) > systrace for (i = 1; i <= argc; i++) { arg = argtype[i] sub("__restrict$", "", arg) if (index(arg, "*") > 0) printf("\t\tcase %d:\n\t\t\tp = \"userland %s\";\n\t\t\tbreak;\n", i - 1, arg) > systracetmp else printf("\t\tcase %d:\n\t\t\tp = \"%s\";\n\t\t\tbreak;\n", i - 1, arg) > systracetmp if (index(arg, "*") > 0 || arg == "caddr_t") printf("\t\tuarg[%d] = (intptr_t) p->%s; /* %s */\n", \ i - 1, \ argname[i], arg) > systrace else if (arg == "union l_semun") printf("\t\tuarg[%d] = p->%s.buf; /* %s */\n", \ i - 1, \ argname[i], arg) > systrace else if (substr(arg, 1, 1) == "u" || arg == "size_t") printf("\t\tuarg[%d] = p->%s; /* %s */\n", \ i - 1, \ argname[i], arg) > systrace else printf("\t\tiarg[%d] = p->%s; /* %s */\n", \ i - 1, \ argname[i], arg) > systrace } printf("\t\tdefault:\n\t\t\tbreak;\n\t\t};\n") > systracetmp printf("\t\tif (ndx == 0 || ndx == 1)\n") > systraceret printf("\t\t\tp = \"%s\";\n", syscallret) > systraceret printf("\t\tbreak;\n") > systraceret } printf("\t\t*n_args = %d;\n\t\tbreak;\n\t}\n", argc) > systrace printf("\t\tbreak;\n") > systracetmp if (argc != 0 && !flag("NOARGS") && !flag("NOPROTO") && \ !flag("NODEF")) { printf("struct %s {\n", argalias) > sysarg for (i = 1; i <= argc; i++) printf("\tchar %s_l_[PADL_(%s)]; " \ "%s %s; char %s_r_[PADR_(%s)];\n", argname[i], argtype[i], argtype[i], argname[i], argname[i], argtype[i]) > sysarg printf("};\n") > sysarg } else if (!flag("NOARGS") && !flag("NOPROTO") && !flag("NODEF")) printf("struct %s {\n\tregister_t dummy;\n};\n", argalias) > sysarg if (!flag("NOPROTO") && !flag("NODEF")) { if (funcname == "nosys" || funcname == "lkmnosys" || funcname == "sysarch" || funcname ~ /^freebsd/ || funcname ~ /^linux/ || funcname ~ /^ibcs2/ || funcname ~ /^xenix/ || funcname ~ /^cloudabi/) { printf("%s\t%s(struct thread *, struct %s *)", rettype, funcname, argalias) > sysdcl } else { printf("%s\tsys_%s(struct thread *, struct %s *)", rettype, funcname, argalias) > sysdcl } printf(";\n") > sysdcl printf("#define\t%sAUE_%s\t%s\n", syscallprefix, funcalias, auditev) > sysaue } printf("\t{ %s, (sy_call_t *)", argssize) > sysent column = 8 + 2 + length(argssize) + 15 if (flag("NOSTD")) { printf("lkmressys, AUE_NULL, NULL, 0, 0, %s, SY_THR_ABSENT },", flags) > sysent column = column + length("lkmressys") + length("AUE_NULL") + 3 } else { if (funcname == "nosys" || funcname == "sysarch" || funcname == "lkmnosys" || funcname ~ /^freebsd/ || funcname ~ /^linux/ || funcname ~ /^ibcs2/ || funcname ~ /^xenix/ || funcname ~ /^cloudabi/) { printf("%s, %s, NULL, 0, 0, %s, %s },", funcname, auditev, flags, thr_flag) > sysent column = column + length(funcname) + length(auditev) + length(flags) + 3 } else { printf("sys_%s, %s, NULL, 0, 0, %s, %s },", funcname, auditev, flags, thr_flag) > sysent column = column + length(funcname) + length(auditev) + length(flags) + 3 + 4 } } align_sysent_comment(column) printf("/* %d = %s */\n", syscall, funcalias) > sysent printf("\t\"%s\",\t\t\t/* %d = %s */\n", funcalias, syscall, funcalias) > sysnames if (!flag("NODEF")) { printf("#define\t%s%s\t%d\n", syscallprefix, funcalias, syscall) > syshdr printf(" \\\n\t%s.o", funcalias) > sysmk } syscall++ next } type("COMPAT") || type("COMPAT4") || type("COMPAT6") || \ type("COMPAT7") || type("COMPAT10") || type("COMPAT11") { if (flag("COMPAT")) { ncompat++ out = syscompat outdcl = syscompatdcl wrap = "compat" prefix = "o" descr = "old" } else if (flag("COMPAT4")) { ncompat4++ out = syscompat4 outdcl = syscompat4dcl wrap = "compat4" prefix = "freebsd4_" descr = "freebsd4" } else if (flag("COMPAT6")) { ncompat6++ out = syscompat6 outdcl = syscompat6dcl wrap = "compat6" prefix = "freebsd6_" descr = "freebsd6" } else if (flag("COMPAT7")) { ncompat7++ out = syscompat7 outdcl = syscompat7dcl wrap = "compat7" prefix = "freebsd7_" descr = "freebsd7" } else if (flag("COMPAT10")) { ncompat10++ out = syscompat10 outdcl = syscompat10dcl wrap = "compat10" prefix = "freebsd10_" descr = "freebsd10" } else if (flag("COMPAT11")) { ncompat11++ out = syscompat11 outdcl = syscompat11dcl wrap = "compat11" prefix = "freebsd11_" descr = "freebsd11" } parseline() if (argc != 0 && !flag("NOARGS") && !flag("NOPROTO") && \ !flag("NODEF")) { printf("struct %s {\n", argalias) > out for (i = 1; i <= argc; i++) printf("\tchar %s_l_[PADL_(%s)]; %s %s; " \ "char %s_r_[PADR_(%s)];\n", argname[i], argtype[i], argtype[i], argname[i], argname[i], argtype[i]) > out printf("};\n") > out } else if (!flag("NOARGS") && !flag("NOPROTO") && !flag("NODEF")) printf("struct %s {\n\tregister_t dummy;\n};\n", argalias) > sysarg if (!flag("NOPROTO") && !flag("NODEF")) { printf("%s\t%s%s(struct thread *, struct %s *);\n", rettype, prefix, funcname, argalias) > outdcl printf("#define\t%sAUE_%s%s\t%s\n", syscallprefix, prefix, funcname, auditev) > sysaue } if (flag("NOSTD")) { printf("\t{ %s, (sy_call_t *)%s, %s, NULL, 0, 0, 0, SY_THR_ABSENT },", "0", "lkmressys", "AUE_NULL") > sysent align_sysent_comment(8 + 2 + length("0") + 15 + \ length("lkmressys") + length("AUE_NULL") + 3) } else { printf("\t{ %s(%s,%s), %s, NULL, 0, 0, %s, %s },", wrap, argssize, funcname, auditev, flags, thr_flag) > sysent align_sysent_comment(8 + 9 + length(argssize) + 1 + \ length(funcname) + length(auditev) + \ length(flags) + 4) } printf("/* %d = %s %s */\n", syscall, descr, funcalias) > sysent printf("\t\"%s.%s\",\t\t/* %d = %s %s */\n", wrap, funcalias, syscall, descr, funcalias) > sysnames # Do not provide freebsdN_* symbols in libc for < FreeBSD 7 if (flag("COMPAT") || flag("COMPAT4") || flag("COMPAT6")) { printf("\t\t\t\t/* %d is %s %s */\n", syscall, descr, funcalias) > syshdr } else if (!flag("NODEF")) { printf("#define\t%s%s%s\t%d\n", syscallprefix, prefix, funcalias, syscall) > syshdr printf(" \\\n\t%s%s.o", prefix, funcalias) > sysmk } syscall++ next } type("OBSOL") { printf("\t{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT },") > sysent align_sysent_comment(34) printf("/* %d = obsolete %s */\n", syscall, comment) > sysent printf("\t\"obs_%s\",\t\t\t/* %d = obsolete %s */\n", $4, syscall, comment) > sysnames printf("\t\t\t\t/* %d is obsolete %s */\n", syscall, comment) > syshdr syscall++ next } type("UNIMPL") { printf("\t{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT },\t\t\t/* %d = %s */\n", syscall, comment) > sysent printf("\t\"#%d\",\t\t\t/* %d = %s */\n", syscall, syscall, comment) > sysnames syscall++ next } { printf "%s: line %d: unrecognized keyword %s\n", infile, NR, $3 exit 1 } END { printf "\n#define AS(name) (sizeof(struct name) / sizeof(register_t))\n" > sysinc if (ncompat != 0 || ncompat4 != 0 || ncompat6 != 0 || ncompat7 != 0 || ncompat10 != 0 || ncompat11 != 0) printf "#include \"opt_compat.h\"\n\n" > syssw if (ncompat != 0) { printf "\n#ifdef %s\n", compat > sysinc printf "#define compat(n, name) n, (sy_call_t *)__CONCAT(o,name)\n" > sysinc printf "#else\n" > sysinc printf "#define compat(n, name) 0, (sy_call_t *)nosys\n" > sysinc printf "#endif\n" > sysinc } if (ncompat4 != 0) { printf "\n#ifdef %s\n", compat4 > sysinc printf "#define compat4(n, name) n, (sy_call_t *)__CONCAT(freebsd4_,name)\n" > sysinc printf "#else\n" > sysinc printf "#define compat4(n, name) 0, (sy_call_t *)nosys\n" > sysinc printf "#endif\n" > sysinc } if (ncompat6 != 0) { printf "\n#ifdef %s\n", compat6 > sysinc printf "#define compat6(n, name) n, (sy_call_t *)__CONCAT(freebsd6_,name)\n" > sysinc printf "#else\n" > sysinc printf "#define compat6(n, name) 0, (sy_call_t *)nosys\n" > sysinc printf "#endif\n" > sysinc } if (ncompat7 != 0) { printf "\n#ifdef %s\n", compat7 > sysinc printf "#define compat7(n, name) n, (sy_call_t *)__CONCAT(freebsd7_,name)\n" > sysinc printf "#else\n" > sysinc printf "#define compat7(n, name) 0, (sy_call_t *)nosys\n" > sysinc printf "#endif\n" > sysinc } if (ncompat10 != 0) { printf "\n#ifdef %s\n", compat10 > sysinc printf "#define compat10(n, name) n, (sy_call_t *)__CONCAT(freebsd10_,name)\n" > sysinc printf "#else\n" > sysinc printf "#define compat10(n, name) 0, (sy_call_t *)nosys\n" > sysinc printf "#endif\n" > sysinc } if (ncompat11 != 0) { printf "\n#ifdef %s\n", compat11 > sysinc printf "#define compat11(n, name) n, (sy_call_t *)__CONCAT(freebsd11_,name)\n" > sysinc printf "#else\n" > sysinc printf "#define compat11(n, name) 0, (sy_call_t *)nosys\n" > sysinc printf "#endif\n" > sysinc } printf("\n#endif /* %s */\n\n", compat) > syscompatdcl printf("\n#endif /* %s */\n\n", compat4) > syscompat4dcl printf("\n#endif /* %s */\n\n", compat6) > syscompat6dcl printf("\n#endif /* %s */\n\n", compat7) > syscompat7dcl printf("\n#endif /* %s */\n\n", compat10) > syscompat10dcl printf("\n#endif /* %s */\n\n", compat11) > syscompat11dcl printf("\n#undef PAD_\n") > sysprotoend printf("#undef PADL_\n") > sysprotoend printf("#undef PADR_\n") > sysprotoend printf("\n#endif /* !%s */\n", sysproto_h) > sysprotoend printf("\n") > sysmk printf("};\n") > sysent printf("};\n") > sysnames printf("#define\t%sMAXSYSCALL\t%d\n", syscallprefix, syscall) \ > syshdr printf "\tdefault:\n\t\t*n_args = 0;\n\t\tbreak;\n\t};\n}\n" > systrace printf "\tdefault:\n\t\tbreak;\n\t};\n\tif (p != NULL)\n\t\tstrlcpy(desc, p, descsz);\n}\n" > systracetmp printf "\tdefault:\n\t\tbreak;\n\t};\n\tif (p != NULL)\n\t\tstrlcpy(desc, p, descsz);\n}\n" > systraceret } ' cat $sysinc $sysent >> $syssw cat $sysarg $sysdcl \ $syscompat $syscompatdcl \ $syscompat4 $syscompat4dcl \ $syscompat6 $syscompat6dcl \ $syscompat7 $syscompat7dcl \ $syscompat10 $syscompat10dcl \ $syscompat11 $syscompat11dcl \ $sysaue $sysprotoend > $sysproto cat $systracetmp >> $systrace cat $systraceret >> $systrace Index: user/jeff/numa/sys/kern/sched_4bsd.c =================================================================== --- user/jeff/numa/sys/kern/sched_4bsd.c (revision 326888) +++ user/jeff/numa/sys/kern/sched_4bsd.c (revision 326889) @@ -1,1779 +1,1780 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1990, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_hwpmc_hooks.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif #ifdef KDTRACE_HOOKS #include int dtrace_vtime_active; dtrace_vtime_switch_func_t dtrace_vtime_switch_func; #endif /* * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in * the range 100-256 Hz (approximately). */ #define ESTCPULIM(e) \ min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) #ifdef SMP #define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) #else #define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ #endif #define NICE_WEIGHT 1 /* Priorities per nice level. */ #define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX))) /* * The schedulable entity that runs a context. * This is an extension to the thread structure and is tailored to * the requirements of this scheduler. * All fields are protected by the scheduler lock. */ struct td_sched { fixpt_t ts_pctcpu; /* %cpu during p_swtime. */ u_int ts_estcpu; /* Estimated cpu utilization. */ int ts_cpticks; /* Ticks of cpu time. */ int ts_slptime; /* Seconds !RUNNING. */ int ts_slice; /* Remaining part of time slice. */ int ts_flags; struct runq *ts_runq; /* runq the thread is currently on */ #ifdef KTR char ts_name[TS_NAME_LEN]; #endif }; /* flags kept in td_flags */ #define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */ #define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */ #define TDF_SLICEEND TDF_SCHED2 /* Thread time slice is over. */ /* flags kept in ts_flags */ #define TSF_AFFINITY 0x0001 /* Has a non-"full" CPU set. */ #define SKE_RUNQ_PCPU(ts) \ ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq) #define THREAD_CAN_SCHED(td, cpu) \ CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) _Static_assert(sizeof(struct thread) + sizeof(struct td_sched) <= sizeof(struct thread0_storage), "increase struct thread0_storage.t0st_sched size"); static struct mtx sched_lock; static int realstathz = 127; /* stathz is sometimes 0 and run off of hz. */ static int sched_tdcnt; /* Total runnable threads in the system. */ static int sched_slice = 12; /* Thread run time before rescheduling. */ static void setup_runqs(void); static void schedcpu(void); static void schedcpu_thread(void); static void sched_priority(struct thread *td, u_char prio); static void sched_setup(void *dummy); static void maybe_resched(struct thread *td); static void updatepri(struct thread *td); static void resetpriority(struct thread *td); static void resetpriority_thread(struct thread *td); #ifdef SMP static int sched_pickcpu(struct thread *td); static int forward_wakeup(int cpunum); static void kick_other_cpu(int pri, int cpuid); #endif static struct kproc_desc sched_kp = { "schedcpu", schedcpu_thread, NULL }; SYSINIT(schedcpu, SI_SUB_LAST, SI_ORDER_FIRST, kproc_start, &sched_kp); SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); static void sched_initticks(void *dummy); SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL); /* * Global run queue. */ static struct runq runq; #ifdef SMP /* * Per-CPU run queues */ static struct runq runq_pcpu[MAXCPU]; long runq_length[MAXCPU]; static cpuset_t idle_cpus_mask; #endif struct pcpuidlestat { u_int idlecalls; u_int oldidlecalls; }; static DPCPU_DEFINE(struct pcpuidlestat, idlestat); static void setup_runqs(void) { #ifdef SMP int i; for (i = 0; i < MAXCPU; ++i) runq_init(&runq_pcpu[i]); #endif runq_init(&runq); } static int sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) { int error, new_val, period; period = 1000000 / realstathz; new_val = period * sched_slice; error = sysctl_handle_int(oidp, &new_val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (new_val <= 0) return (EINVAL); sched_slice = imax(1, (new_val + period / 2) / period); hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) / realstathz); return (0); } SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler"); SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0, "Scheduler name"); SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_kern_quantum, "I", "Quantum for timeshare threads in microseconds"); SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, "Quantum for timeshare threads in stathz ticks"); #ifdef SMP /* Enable forwarding of wakeups to all other cpus */ static SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP"); static int runq_fuzz = 1; SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); static int forward_wakeup_enabled = 1; SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW, &forward_wakeup_enabled, 0, "Forwarding of wakeup to idle CPUs"); static int forward_wakeups_requested = 0; SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD, &forward_wakeups_requested, 0, "Requests for Forwarding of wakeup to idle CPUs"); static int forward_wakeups_delivered = 0; SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD, &forward_wakeups_delivered, 0, "Completed Forwarding of wakeup to idle CPUs"); static int forward_wakeup_use_mask = 1; SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW, &forward_wakeup_use_mask, 0, "Use the mask of idle cpus"); static int forward_wakeup_use_loop = 0; SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW, &forward_wakeup_use_loop, 0, "Use a loop to find idle cpus"); #endif #if 0 static int sched_followon = 0; SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW, &sched_followon, 0, "allow threads to share a quantum"); #endif SDT_PROVIDER_DEFINE(sched); SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *", "struct proc *", "uint8_t"); SDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *", "struct proc *", "void *"); SDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *", "struct proc *", "void *", "int"); SDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *", "struct proc *", "uint8_t", "struct thread *"); SDT_PROBE_DEFINE2(sched, , , load__change, "int", "int"); SDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *", "struct proc *"); SDT_PROBE_DEFINE(sched, , , on__cpu); SDT_PROBE_DEFINE(sched, , , remain__cpu); SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *", "struct proc *"); static __inline void sched_load_add(void) { sched_tdcnt++; KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt); SDT_PROBE2(sched, , , load__change, NOCPU, sched_tdcnt); } static __inline void sched_load_rem(void) { sched_tdcnt--; KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt); SDT_PROBE2(sched, , , load__change, NOCPU, sched_tdcnt); } /* * Arrange to reschedule if necessary, taking the priorities and * schedulers into account. */ static void maybe_resched(struct thread *td) { THREAD_LOCK_ASSERT(td, MA_OWNED); if (td->td_priority < curthread->td_priority) curthread->td_flags |= TDF_NEEDRESCHED; } /* * This function is called when a thread is about to be put on run queue * because it has been made runnable or its priority has been adjusted. It * determines if the new thread should preempt the current thread. If so, * it sets td_owepreempt to request a preemption. */ int maybe_preempt(struct thread *td) { #ifdef PREEMPTION struct thread *ctd; int cpri, pri; /* * The new thread should not preempt the current thread if any of the * following conditions are true: * * - The kernel is in the throes of crashing (panicstr). * - The current thread has a higher (numerically lower) or * equivalent priority. Note that this prevents curthread from * trying to preempt to itself. * - The current thread has an inhibitor set or is in the process of * exiting. In this case, the current thread is about to switch * out anyways, so there's no point in preempting. If we did, * the current thread would not be properly resumed as well, so * just avoid that whole landmine. * - If the new thread's priority is not a realtime priority and * the current thread's priority is not an idle priority and * FULL_PREEMPTION is disabled. * * If all of these conditions are false, but the current thread is in * a nested critical section, then we have to defer the preemption * until we exit the critical section. Otherwise, switch immediately * to the new thread. */ ctd = curthread; THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT((td->td_inhibitors == 0), ("maybe_preempt: trying to run inhibited thread")); pri = td->td_priority; cpri = ctd->td_priority; if (panicstr != NULL || pri >= cpri /* || dumping */ || TD_IS_INHIBITED(ctd)) return (0); #ifndef FULL_PREEMPTION if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE) return (0); #endif CTR0(KTR_PROC, "maybe_preempt: scheduling preemption"); ctd->td_owepreempt = 1; return (1); #else return (0); #endif } /* * Constants for digital decay and forget: * 90% of (ts_estcpu) usage in 5 * loadav time * 95% of (ts_pctcpu) usage in 60 seconds (load insensitive) * Note that, as ps(1) mentions, this can let percentages * total over 100% (I've seen 137.9% for 3 processes). * * Note that schedclock() updates ts_estcpu and p_cpticks asynchronously. * * We wish to decay away 90% of ts_estcpu in (5 * loadavg) seconds. * That is, the system wants to compute a value of decay such * that the following for loop: * for (i = 0; i < (5 * loadavg); i++) * ts_estcpu *= decay; * will compute * ts_estcpu *= 0.1; * for all values of loadavg: * * Mathematically this loop can be expressed by saying: * decay ** (5 * loadavg) ~= .1 * * The system computes decay as: * decay = (2 * loadavg) / (2 * loadavg + 1) * * We wish to prove that the system's computation of decay * will always fulfill the equation: * decay ** (5 * loadavg) ~= .1 * * If we compute b as: * b = 2 * loadavg * then * decay = b / (b + 1) * * We now need to prove two things: * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) * * Facts: * For x close to zero, exp(x) =~ 1 + x, since * exp(x) = 0! + x**1/1! + x**2/2! + ... . * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. * For x close to zero, ln(1+x) =~ x, since * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). * ln(.1) =~ -2.30 * * Proof of (1): * Solve (factor)**(power) =~ .1 given power (5*loadav): * solving for factor, * ln(factor) =~ (-2.30/5*loadav), or * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED * * Proof of (2): * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): * solving for power, * power*ln(b/(b+1)) =~ -2.30, or * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED * * Actual power values for the implemented algorithm are as follows: * loadav: 1 2 3 4 * power: 5.68 10.32 14.94 19.55 */ /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ #define loadfactor(loadav) (2 * (loadav)) #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) /* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ SYSCTL_UINT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); /* * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). * * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). * * If you don't want to bother with the faster/more-accurate formula, you * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate * (more general) method of calculating the %age of CPU used by a process. */ #define CCPU_SHIFT 11 /* * Recompute process priorities, every hz ticks. * MP-safe, called without the Giant mutex. */ /* ARGSUSED */ static void schedcpu(void) { fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); struct thread *td; struct proc *p; struct td_sched *ts; int awake; sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { PROC_LOCK(p); if (p->p_state == PRS_NEW) { PROC_UNLOCK(p); continue; } FOREACH_THREAD_IN_PROC(p, td) { awake = 0; ts = td_get_sched(td); thread_lock(td); /* * Increment sleep time (if sleeping). We * ignore overflow, as above. */ /* * The td_sched slptimes are not touched in wakeup * because the thread may not HAVE everything in * memory? XXX I think this is out of date. */ if (TD_ON_RUNQ(td)) { awake = 1; td->td_flags &= ~TDF_DIDRUN; } else if (TD_IS_RUNNING(td)) { awake = 1; /* Do not clear TDF_DIDRUN */ } else if (td->td_flags & TDF_DIDRUN) { awake = 1; td->td_flags &= ~TDF_DIDRUN; } /* * ts_pctcpu is only for ps and ttyinfo(). */ ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT; /* * If the td_sched has been idle the entire second, * stop recalculating its priority until * it wakes up. */ if (ts->ts_cpticks != 0) { #if (FSHIFT >= CCPU_SHIFT) ts->ts_pctcpu += (realstathz == 100) ? ((fixpt_t) ts->ts_cpticks) << (FSHIFT - CCPU_SHIFT) : 100 * (((fixpt_t) ts->ts_cpticks) << (FSHIFT - CCPU_SHIFT)) / realstathz; #else ts->ts_pctcpu += ((FSCALE - ccpu) * (ts->ts_cpticks * FSCALE / realstathz)) >> FSHIFT; #endif ts->ts_cpticks = 0; } /* * If there are ANY running threads in this process, * then don't count it as sleeping. * XXX: this is broken. */ if (awake) { if (ts->ts_slptime > 1) { /* * In an ideal world, this should not * happen, because whoever woke us * up from the long sleep should have * unwound the slptime and reset our * priority before we run at the stale * priority. Should KASSERT at some * point when all the cases are fixed. */ updatepri(td); } ts->ts_slptime = 0; } else ts->ts_slptime++; if (ts->ts_slptime > 1) { thread_unlock(td); continue; } ts->ts_estcpu = decay_cpu(loadfac, ts->ts_estcpu); resetpriority(td); resetpriority_thread(td); thread_unlock(td); } PROC_UNLOCK(p); } sx_sunlock(&allproc_lock); } /* * Main loop for a kthread that executes schedcpu once a second. */ static void schedcpu_thread(void) { for (;;) { schedcpu(); pause("-", hz); } } /* * Recalculate the priority of a process after it has slept for a while. * For all load averages >= 1 and max ts_estcpu of 255, sleeping for at * least six times the loadfactor will decay ts_estcpu to zero. */ static void updatepri(struct thread *td) { struct td_sched *ts; fixpt_t loadfac; unsigned int newcpu; ts = td_get_sched(td); loadfac = loadfactor(averunnable.ldavg[0]); if (ts->ts_slptime > 5 * loadfac) ts->ts_estcpu = 0; else { newcpu = ts->ts_estcpu; ts->ts_slptime--; /* was incremented in schedcpu() */ while (newcpu && --ts->ts_slptime) newcpu = decay_cpu(loadfac, newcpu); ts->ts_estcpu = newcpu; } } /* * Compute the priority of a process when running in user mode. * Arrange to reschedule if the resulting priority is better * than that of the current process. */ static void resetpriority(struct thread *td) { u_int newpriority; if (td->td_pri_class != PRI_TIMESHARE) return; newpriority = PUSER + td_get_sched(td)->ts_estcpu / INVERSE_ESTCPU_WEIGHT + NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN); newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), PRI_MAX_TIMESHARE); sched_user_prio(td, newpriority); } /* * Update the thread's priority when the associated process's user * priority changes. */ static void resetpriority_thread(struct thread *td) { /* Only change threads with a time sharing user priority. */ if (td->td_priority < PRI_MIN_TIMESHARE || td->td_priority > PRI_MAX_TIMESHARE) return; /* XXX the whole needresched thing is broken, but not silly. */ maybe_resched(td); sched_prio(td, td->td_user_pri); } /* ARGSUSED */ static void sched_setup(void *dummy) { setup_runqs(); /* Account for thread0. */ sched_load_add(); } /* * This routine determines time constants after stathz and hz are setup. */ static void sched_initticks(void *dummy) { realstathz = stathz ? stathz : hz; sched_slice = realstathz / 10; /* ~100ms */ hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) / realstathz); } /* External interfaces start here */ /* * Very early in the boot some setup of scheduler-specific * parts of proc0 and of some scheduler resources needs to be done. * Called from: * proc0_init() */ void schedinit(void) { /* * Set up the scheduler specific parts of thread0. */ thread0.td_lock = &sched_lock; td_get_sched(&thread0)->ts_slice = sched_slice; mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); } int sched_runnable(void) { #ifdef SMP return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); #else return runq_check(&runq); #endif } int sched_rr_interval(void) { /* Convert sched_slice from stathz to hz. */ return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz)); } /* * We adjust the priority of the current process. The priority of a * process gets worse as it accumulates CPU time. The cpu usage * estimator (ts_estcpu) is increased here. resetpriority() will * compute a different priority each time ts_estcpu increases by * INVERSE_ESTCPU_WEIGHT (until PRI_MAX_TIMESHARE is reached). The * cpu usage estimator ramps up quite quickly when the process is * running (linearly), and decays away exponentially, at a rate which * is proportionally slower when the system is busy. The basic * principle is that the system will 90% forget that the process used * a lot of CPU time in 5 * loadav seconds. This causes the system to * favor processes which haven't run much recently, and to round-robin * among other processes. */ void sched_clock(struct thread *td) { struct pcpuidlestat *stat; struct td_sched *ts; THREAD_LOCK_ASSERT(td, MA_OWNED); ts = td_get_sched(td); ts->ts_cpticks++; ts->ts_estcpu = ESTCPULIM(ts->ts_estcpu + 1); if ((ts->ts_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { resetpriority(td); resetpriority_thread(td); } /* * Force a context switch if the current thread has used up a full * time slice (default is 100ms). */ if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) { ts->ts_slice = sched_slice; td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND; } stat = DPCPU_PTR(idlestat); stat->oldidlecalls = stat->idlecalls; stat->idlecalls = 0; } /* * Charge child's scheduling CPU usage to parent. */ void sched_exit(struct proc *p, struct thread *td) { KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit", "prio:%d", td->td_priority); PROC_LOCK_ASSERT(p, MA_OWNED); sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); } void sched_exit_thread(struct thread *td, struct thread *child) { KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit", "prio:%d", child->td_priority); thread_lock(td); td_get_sched(td)->ts_estcpu = ESTCPULIM(td_get_sched(td)->ts_estcpu + td_get_sched(child)->ts_estcpu); thread_unlock(td); thread_lock(child); if ((child->td_flags & TDF_NOLOAD) == 0) sched_load_rem(); thread_unlock(child); } void sched_fork(struct thread *td, struct thread *childtd) { sched_fork_thread(td, childtd); } void sched_fork_thread(struct thread *td, struct thread *childtd) { struct td_sched *ts, *tsc; childtd->td_oncpu = NOCPU; childtd->td_lastcpu = NOCPU; childtd->td_lock = &sched_lock; childtd->td_cpuset = cpuset_ref(td->td_cpuset); + child->td_domain.dr_policy = td->td_cpuset->cs_domain; childtd->td_priority = childtd->td_base_pri; ts = td_get_sched(childtd); bzero(ts, sizeof(*ts)); tsc = td_get_sched(td); ts->ts_estcpu = tsc->ts_estcpu; ts->ts_flags |= (tsc->ts_flags & TSF_AFFINITY); ts->ts_slice = 1; } void sched_nice(struct proc *p, int nice) { struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); p->p_nice = nice; FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); resetpriority(td); resetpriority_thread(td); thread_unlock(td); } } void sched_class(struct thread *td, int class) { THREAD_LOCK_ASSERT(td, MA_OWNED); td->td_pri_class = class; } /* * Adjust the priority of a thread. */ static void sched_priority(struct thread *td, u_char prio) { KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change", "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED, sched_tdname(curthread)); SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio); if (td != curthread && prio > td->td_priority) { KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread), "lend prio", "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED, sched_tdname(td)); SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio, curthread); } THREAD_LOCK_ASSERT(td, MA_OWNED); if (td->td_priority == prio) return; td->td_priority = prio; if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) { sched_rem(td); sched_add(td, SRQ_BORING); } } /* * Update a thread's priority when it is lent another thread's * priority. */ void sched_lend_prio(struct thread *td, u_char prio) { td->td_flags |= TDF_BORROWING; sched_priority(td, prio); } /* * Restore a thread's priority when priority propagation is * over. The prio argument is the minimum priority the thread * needs to have to satisfy other possible priority lending * requests. If the thread's regulary priority is less * important than prio the thread will keep a priority boost * of prio. */ void sched_unlend_prio(struct thread *td, u_char prio) { u_char base_pri; if (td->td_base_pri >= PRI_MIN_TIMESHARE && td->td_base_pri <= PRI_MAX_TIMESHARE) base_pri = td->td_user_pri; else base_pri = td->td_base_pri; if (prio >= base_pri) { td->td_flags &= ~TDF_BORROWING; sched_prio(td, base_pri); } else sched_lend_prio(td, prio); } void sched_prio(struct thread *td, u_char prio) { u_char oldprio; /* First, update the base priority. */ td->td_base_pri = prio; /* * If the thread is borrowing another thread's priority, don't ever * lower the priority. */ if (td->td_flags & TDF_BORROWING && td->td_priority < prio) return; /* Change the real priority. */ oldprio = td->td_priority; sched_priority(td, prio); /* * If the thread is on a turnstile, then let the turnstile update * its state. */ if (TD_ON_LOCK(td) && oldprio != prio) turnstile_adjust(td, oldprio); } void sched_user_prio(struct thread *td, u_char prio) { THREAD_LOCK_ASSERT(td, MA_OWNED); td->td_base_user_pri = prio; if (td->td_lend_user_pri <= prio) return; td->td_user_pri = prio; } void sched_lend_user_prio(struct thread *td, u_char prio) { THREAD_LOCK_ASSERT(td, MA_OWNED); td->td_lend_user_pri = prio; td->td_user_pri = min(prio, td->td_base_user_pri); if (td->td_priority > td->td_user_pri) sched_prio(td, td->td_user_pri); else if (td->td_priority != td->td_user_pri) td->td_flags |= TDF_NEEDRESCHED; } void sched_sleep(struct thread *td, int pri) { THREAD_LOCK_ASSERT(td, MA_OWNED); td->td_slptick = ticks; td_get_sched(td)->ts_slptime = 0; if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) sched_prio(td, pri); if (TD_IS_SUSPENDED(td) || pri >= PSOCK) td->td_flags |= TDF_CANSWAP; } void sched_switch(struct thread *td, struct thread *newtd, int flags) { struct mtx *tmtx; struct td_sched *ts; struct proc *p; int preempted; tmtx = NULL; ts = td_get_sched(td); p = td->td_proc; THREAD_LOCK_ASSERT(td, MA_OWNED); /* * Switch to the sched lock to fix things up and pick * a new thread. * Block the td_lock in order to avoid breaking the critical path. */ if (td->td_lock != &sched_lock) { mtx_lock_spin(&sched_lock); tmtx = thread_lock_block(td); } if ((td->td_flags & TDF_NOLOAD) == 0) sched_load_rem(); td->td_lastcpu = td->td_oncpu; preempted = (td->td_flags & TDF_SLICEEND) == 0 && (flags & SW_PREEMPT) != 0; td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND); td->td_owepreempt = 0; td->td_oncpu = NOCPU; /* * At the last moment, if this thread is still marked RUNNING, * then put it back on the run queue as it has not been suspended * or stopped or any thing else similar. We never put the idle * threads on the run queue, however. */ if (td->td_flags & TDF_IDLETD) { TD_SET_CAN_RUN(td); #ifdef SMP CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask); #endif } else { if (TD_IS_RUNNING(td)) { /* Put us back on the run queue. */ sched_add(td, preempted ? SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : SRQ_OURSELF|SRQ_YIELDING); } } if (newtd) { /* * The thread we are about to run needs to be counted * as if it had been added to the run queue and selected. * It came from: * * A preemption * * An upcall * * A followon */ KASSERT((newtd->td_inhibitors == 0), ("trying to run inhibited thread")); newtd->td_flags |= TDF_DIDRUN; TD_SET_RUNNING(newtd); if ((newtd->td_flags & TDF_NOLOAD) == 0) sched_load_add(); } else { newtd = choosethread(); MPASS(newtd->td_lock == &sched_lock); } #if (KTR_COMPILE & KTR_SCHED) != 0 if (TD_IS_IDLETHREAD(td)) KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle", "prio:%d", td->td_priority); else KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td), "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg, "lockname:\"%s\"", td->td_lockname); #endif if (td != newtd) { #ifdef HWPMC_HOOKS if (PMC_PROC_IS_USING_PMCS(td->td_proc)) PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); #endif SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc); /* I feel sleepy */ lock_profile_release_lock(&sched_lock.lock_object); #ifdef KDTRACE_HOOKS /* * If DTrace has set the active vtime enum to anything * other than INACTIVE (0), then it should have set the * function to call. */ if (dtrace_vtime_active) (*dtrace_vtime_switch_func)(newtd); #endif cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock); lock_profile_obtain_lock_success(&sched_lock.lock_object, 0, 0, __FILE__, __LINE__); /* * Where am I? What year is it? * We are in the same thread that went to sleep above, * but any amount of time may have passed. All our context * will still be available as will local variables. * PCPU values however may have changed as we may have * changed CPU so don't trust cached values of them. * New threads will go to fork_exit() instead of here * so if you change things here you may need to change * things there too. * * If the thread above was exiting it will never wake * up again here, so either it has saved everything it * needed to, or the thread_wait() or wait() will * need to reap it. */ SDT_PROBE0(sched, , , on__cpu); #ifdef HWPMC_HOOKS if (PMC_PROC_IS_USING_PMCS(td->td_proc)) PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); #endif } else SDT_PROBE0(sched, , , remain__cpu); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running", "prio:%d", td->td_priority); #ifdef SMP if (td->td_flags & TDF_IDLETD) CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask); #endif sched_lock.mtx_lock = (uintptr_t)td; td->td_oncpu = PCPU_GET(cpuid); MPASS(td->td_lock == &sched_lock); } void sched_wakeup(struct thread *td) { struct td_sched *ts; THREAD_LOCK_ASSERT(td, MA_OWNED); ts = td_get_sched(td); td->td_flags &= ~TDF_CANSWAP; if (ts->ts_slptime > 1) { updatepri(td); resetpriority(td); } td->td_slptick = 0; ts->ts_slptime = 0; ts->ts_slice = sched_slice; sched_add(td, SRQ_BORING); } #ifdef SMP static int forward_wakeup(int cpunum) { struct pcpu *pc; cpuset_t dontuse, map, map2; u_int id, me; int iscpuset; mtx_assert(&sched_lock, MA_OWNED); CTR0(KTR_RUNQ, "forward_wakeup()"); if ((!forward_wakeup_enabled) || (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0)) return (0); if (!smp_started || panicstr) return (0); forward_wakeups_requested++; /* * Check the idle mask we received against what we calculated * before in the old version. */ me = PCPU_GET(cpuid); /* Don't bother if we should be doing it ourself. */ if (CPU_ISSET(me, &idle_cpus_mask) && (cpunum == NOCPU || me == cpunum)) return (0); CPU_SETOF(me, &dontuse); CPU_OR(&dontuse, &stopped_cpus); CPU_OR(&dontuse, &hlt_cpus_mask); CPU_ZERO(&map2); if (forward_wakeup_use_loop) { STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { id = pc->pc_cpuid; if (!CPU_ISSET(id, &dontuse) && pc->pc_curthread == pc->pc_idlethread) { CPU_SET(id, &map2); } } } if (forward_wakeup_use_mask) { map = idle_cpus_mask; CPU_NAND(&map, &dontuse); /* If they are both on, compare and use loop if different. */ if (forward_wakeup_use_loop) { if (CPU_CMP(&map, &map2)) { printf("map != map2, loop method preferred\n"); map = map2; } } } else { map = map2; } /* If we only allow a specific CPU, then mask off all the others. */ if (cpunum != NOCPU) { KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum.")); iscpuset = CPU_ISSET(cpunum, &map); if (iscpuset == 0) CPU_ZERO(&map); else CPU_SETOF(cpunum, &map); } if (!CPU_EMPTY(&map)) { forward_wakeups_delivered++; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { id = pc->pc_cpuid; if (!CPU_ISSET(id, &map)) continue; if (cpu_idle_wakeup(pc->pc_cpuid)) CPU_CLR(id, &map); } if (!CPU_EMPTY(&map)) ipi_selected(map, IPI_AST); return (1); } if (cpunum == NOCPU) printf("forward_wakeup: Idle processor not found\n"); return (0); } static void kick_other_cpu(int pri, int cpuid) { struct pcpu *pcpu; int cpri; pcpu = pcpu_find(cpuid); if (CPU_ISSET(cpuid, &idle_cpus_mask)) { forward_wakeups_delivered++; if (!cpu_idle_wakeup(cpuid)) ipi_cpu(cpuid, IPI_AST); return; } cpri = pcpu->pc_curthread->td_priority; if (pri >= cpri) return; #if defined(IPI_PREEMPTION) && defined(PREEMPTION) #if !defined(FULL_PREEMPTION) if (pri <= PRI_MAX_ITHD) #endif /* ! FULL_PREEMPTION */ { ipi_cpu(cpuid, IPI_PREEMPT); return; } #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */ pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; ipi_cpu(cpuid, IPI_AST); return; } #endif /* SMP */ #ifdef SMP static int sched_pickcpu(struct thread *td) { int best, cpu; mtx_assert(&sched_lock, MA_OWNED); if (td->td_lastcpu != NOCPU && THREAD_CAN_SCHED(td, td->td_lastcpu)) best = td->td_lastcpu; else best = NOCPU; CPU_FOREACH(cpu) { if (!THREAD_CAN_SCHED(td, cpu)) continue; if (best == NOCPU) best = cpu; else if (runq_length[cpu] < runq_length[best]) best = cpu; } KASSERT(best != NOCPU, ("no valid CPUs")); return (best); } #endif void sched_add(struct thread *td, int flags) #ifdef SMP { cpuset_t tidlemsk; struct td_sched *ts; u_int cpu, cpuid; int forwarded = 0; int single_cpu = 0; ts = td_get_sched(td); THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT((td->td_inhibitors == 0), ("sched_add: trying to run inhibited thread")); KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), ("sched_add: bad thread state")); KASSERT(td->td_flags & TDF_INMEM, ("sched_add: thread swapped out")); KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", "prio:%d", td->td_priority, KTR_ATTR_LINKED, sched_tdname(curthread)); KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup", KTR_ATTR_LINKED, sched_tdname(td)); SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL, flags & SRQ_PREEMPTED); /* * Now that the thread is moving to the run-queue, set the lock * to the scheduler's lock. */ if (td->td_lock != &sched_lock) { mtx_lock_spin(&sched_lock); thread_lock_set(td, &sched_lock); } TD_SET_RUNQ(td); /* * If SMP is started and the thread is pinned or otherwise limited to * a specific set of CPUs, queue the thread to a per-CPU run queue. * Otherwise, queue the thread to the global run queue. * * If SMP has not yet been started we must use the global run queue * as per-CPU state may not be initialized yet and we may crash if we * try to access the per-CPU run queues. */ if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND || ts->ts_flags & TSF_AFFINITY)) { if (td->td_pinned != 0) cpu = td->td_lastcpu; else if (td->td_flags & TDF_BOUND) { /* Find CPU from bound runq. */ KASSERT(SKE_RUNQ_PCPU(ts), ("sched_add: bound td_sched not on cpu runq")); cpu = ts->ts_runq - &runq_pcpu[0]; } else /* Find a valid CPU for our cpuset */ cpu = sched_pickcpu(td); ts->ts_runq = &runq_pcpu[cpu]; single_cpu = 1; CTR3(KTR_RUNQ, "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu); } else { CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, td); cpu = NOCPU; ts->ts_runq = &runq; } if ((td->td_flags & TDF_NOLOAD) == 0) sched_load_add(); runq_add(ts->ts_runq, td, flags); if (cpu != NOCPU) runq_length[cpu]++; cpuid = PCPU_GET(cpuid); if (single_cpu && cpu != cpuid) { kick_other_cpu(td->td_priority, cpu); } else { if (!single_cpu) { tidlemsk = idle_cpus_mask; CPU_NAND(&tidlemsk, &hlt_cpus_mask); CPU_CLR(cpuid, &tidlemsk); if (!CPU_ISSET(cpuid, &idle_cpus_mask) && ((flags & SRQ_INTR) == 0) && !CPU_EMPTY(&tidlemsk)) forwarded = forward_wakeup(cpu); } if (!forwarded) { if (!maybe_preempt(td)) maybe_resched(td); } } } #else /* SMP */ { struct td_sched *ts; ts = td_get_sched(td); THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT((td->td_inhibitors == 0), ("sched_add: trying to run inhibited thread")); KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), ("sched_add: bad thread state")); KASSERT(td->td_flags & TDF_INMEM, ("sched_add: thread swapped out")); KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", "prio:%d", td->td_priority, KTR_ATTR_LINKED, sched_tdname(curthread)); KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup", KTR_ATTR_LINKED, sched_tdname(td)); SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL, flags & SRQ_PREEMPTED); /* * Now that the thread is moving to the run-queue, set the lock * to the scheduler's lock. */ if (td->td_lock != &sched_lock) { mtx_lock_spin(&sched_lock); thread_lock_set(td, &sched_lock); } TD_SET_RUNQ(td); CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td); ts->ts_runq = &runq; if ((td->td_flags & TDF_NOLOAD) == 0) sched_load_add(); runq_add(ts->ts_runq, td, flags); if (!maybe_preempt(td)) maybe_resched(td); } #endif /* SMP */ void sched_rem(struct thread *td) { struct td_sched *ts; ts = td_get_sched(td); KASSERT(td->td_flags & TDF_INMEM, ("sched_rem: thread swapped out")); KASSERT(TD_ON_RUNQ(td), ("sched_rem: thread not on run queue")); mtx_assert(&sched_lock, MA_OWNED); KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem", "prio:%d", td->td_priority, KTR_ATTR_LINKED, sched_tdname(curthread)); SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL); if ((td->td_flags & TDF_NOLOAD) == 0) sched_load_rem(); #ifdef SMP if (ts->ts_runq != &runq) runq_length[ts->ts_runq - runq_pcpu]--; #endif runq_remove(ts->ts_runq, td); TD_SET_CAN_RUN(td); } /* * Select threads to run. Note that running threads still consume a * slot. */ struct thread * sched_choose(void) { struct thread *td; struct runq *rq; mtx_assert(&sched_lock, MA_OWNED); #ifdef SMP struct thread *tdcpu; rq = &runq; td = runq_choose_fuzz(&runq, runq_fuzz); tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); if (td == NULL || (tdcpu != NULL && tdcpu->td_priority < td->td_priority)) { CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu, PCPU_GET(cpuid)); td = tdcpu; rq = &runq_pcpu[PCPU_GET(cpuid)]; } else { CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td); } #else rq = &runq; td = runq_choose(&runq); #endif if (td) { #ifdef SMP if (td == tdcpu) runq_length[PCPU_GET(cpuid)]--; #endif runq_remove(rq, td); td->td_flags |= TDF_DIDRUN; KASSERT(td->td_flags & TDF_INMEM, ("sched_choose: thread swapped out")); return (td); } return (PCPU_GET(idlethread)); } void sched_preempt(struct thread *td) { SDT_PROBE2(sched, , , surrender, td, td->td_proc); thread_lock(td); if (td->td_critnest > 1) td->td_owepreempt = 1; else mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL); thread_unlock(td); } void sched_userret(struct thread *td) { /* * XXX we cheat slightly on the locking here to avoid locking in * the usual case. Setting td_priority here is essentially an * incomplete workaround for not setting it properly elsewhere. * Now that some interrupt handlers are threads, not setting it * properly elsewhere can clobber it in the window between setting * it here and returning to user mode, so don't waste time setting * it perfectly here. */ KASSERT((td->td_flags & TDF_BORROWING) == 0, ("thread with borrowed priority returning to userland")); if (td->td_priority != td->td_user_pri) { thread_lock(td); td->td_priority = td->td_user_pri; td->td_base_pri = td->td_user_pri; thread_unlock(td); } } void sched_bind(struct thread *td, int cpu) { struct td_sched *ts; THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); KASSERT(td == curthread, ("sched_bind: can only bind curthread")); ts = td_get_sched(td); td->td_flags |= TDF_BOUND; #ifdef SMP ts->ts_runq = &runq_pcpu[cpu]; if (PCPU_GET(cpuid) == cpu) return; mi_switch(SW_VOL, NULL); #endif } void sched_unbind(struct thread* td) { THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(td == curthread, ("sched_unbind: can only bind curthread")); td->td_flags &= ~TDF_BOUND; } int sched_is_bound(struct thread *td) { THREAD_LOCK_ASSERT(td, MA_OWNED); return (td->td_flags & TDF_BOUND); } void sched_relinquish(struct thread *td) { thread_lock(td); mi_switch(SW_VOL | SWT_RELINQUISH, NULL); thread_unlock(td); } int sched_load(void) { return (sched_tdcnt); } int sched_sizeof_proc(void) { return (sizeof(struct proc)); } int sched_sizeof_thread(void) { return (sizeof(struct thread) + sizeof(struct td_sched)); } fixpt_t sched_pctcpu(struct thread *td) { struct td_sched *ts; THREAD_LOCK_ASSERT(td, MA_OWNED); ts = td_get_sched(td); return (ts->ts_pctcpu); } #ifdef RACCT /* * Calculates the contribution to the thread cpu usage for the latest * (unfinished) second. */ fixpt_t sched_pctcpu_delta(struct thread *td) { struct td_sched *ts; fixpt_t delta; int realstathz; THREAD_LOCK_ASSERT(td, MA_OWNED); ts = td_get_sched(td); delta = 0; realstathz = stathz ? stathz : hz; if (ts->ts_cpticks != 0) { #if (FSHIFT >= CCPU_SHIFT) delta = (realstathz == 100) ? ((fixpt_t) ts->ts_cpticks) << (FSHIFT - CCPU_SHIFT) : 100 * (((fixpt_t) ts->ts_cpticks) << (FSHIFT - CCPU_SHIFT)) / realstathz; #else delta = ((FSCALE - ccpu) * (ts->ts_cpticks * FSCALE / realstathz)) >> FSHIFT; #endif } return (delta); } #endif u_int sched_estcpu(struct thread *td) { return (td_get_sched(td)->ts_estcpu); } /* * The actual idle process. */ void sched_idletd(void *dummy) { struct pcpuidlestat *stat; THREAD_NO_SLEEPING(); stat = DPCPU_PTR(idlestat); for (;;) { mtx_assert(&Giant, MA_NOTOWNED); while (sched_runnable() == 0) { cpu_idle(stat->idlecalls + stat->oldidlecalls > 64); stat->idlecalls++; } mtx_lock_spin(&sched_lock); mi_switch(SW_VOL | SWT_IDLE, NULL); mtx_unlock_spin(&sched_lock); } } /* * A CPU is entering for the first time or a thread is exiting. */ void sched_throw(struct thread *td) { /* * Correct spinlock nesting. The idle thread context that we are * borrowing was created so that it would start out with a single * spin lock (sched_lock) held in fork_trampoline(). Since we've * explicitly acquired locks in this function, the nesting count * is now 2 rather than 1. Since we are nested, calling * spinlock_exit() will simply adjust the counts without allowing * spin lock using code to interrupt us. */ if (td == NULL) { mtx_lock_spin(&sched_lock); spinlock_exit(); PCPU_SET(switchtime, cpu_ticks()); PCPU_SET(switchticks, ticks); } else { lock_profile_release_lock(&sched_lock.lock_object); MPASS(td->td_lock == &sched_lock); td->td_lastcpu = td->td_oncpu; td->td_oncpu = NOCPU; } mtx_assert(&sched_lock, MA_OWNED); KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); cpu_throw(td, choosethread()); /* doesn't return */ } void sched_fork_exit(struct thread *td) { /* * Finish setting up thread glue so that it begins execution in a * non-nested critical section with sched_lock held but not recursed. */ td->td_oncpu = PCPU_GET(cpuid); sched_lock.mtx_lock = (uintptr_t)td; lock_profile_obtain_lock_success(&sched_lock.lock_object, 0, 0, __FILE__, __LINE__); THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running", "prio:%d", td->td_priority); SDT_PROBE0(sched, , , on__cpu); } char * sched_tdname(struct thread *td) { #ifdef KTR struct td_sched *ts; ts = td_get_sched(td); if (ts->ts_name[0] == '\0') snprintf(ts->ts_name, sizeof(ts->ts_name), "%s tid %d", td->td_name, td->td_tid); return (ts->ts_name); #else return (td->td_name); #endif } #ifdef KTR void sched_clear_tdname(struct thread *td) { struct td_sched *ts; ts = td_get_sched(td); ts->ts_name[0] = '\0'; } #endif void sched_affinity(struct thread *td) { #ifdef SMP struct td_sched *ts; int cpu; THREAD_LOCK_ASSERT(td, MA_OWNED); /* * Set the TSF_AFFINITY flag if there is at least one CPU this * thread can't run on. */ ts = td_get_sched(td); ts->ts_flags &= ~TSF_AFFINITY; CPU_FOREACH(cpu) { if (!THREAD_CAN_SCHED(td, cpu)) { ts->ts_flags |= TSF_AFFINITY; break; } } /* * If this thread can run on all CPUs, nothing else to do. */ if (!(ts->ts_flags & TSF_AFFINITY)) return; /* Pinned threads and bound threads should be left alone. */ if (td->td_pinned != 0 || td->td_flags & TDF_BOUND) return; switch (td->td_state) { case TDS_RUNQ: /* * If we are on a per-CPU runqueue that is in the set, * then nothing needs to be done. */ if (ts->ts_runq != &runq && THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu)) return; /* Put this thread on a valid per-CPU runqueue. */ sched_rem(td); sched_add(td, SRQ_BORING); break; case TDS_RUNNING: /* * See if our current CPU is in the set. If not, force a * context switch. */ if (THREAD_CAN_SCHED(td, td->td_oncpu)) return; td->td_flags |= TDF_NEEDRESCHED; if (td != curthread) ipi_cpu(cpu, IPI_AST); break; default: break; } #endif } Index: user/jeff/numa/sys/kern/sched_ule.c =================================================================== --- user/jeff/numa/sys/kern/sched_ule.c (revision 326888) +++ user/jeff/numa/sys/kern/sched_ule.c (revision 326889) @@ -1,2958 +1,2959 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2007, Jeffrey Roberson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * This file implements the ULE scheduler. ULE supports independent CPU * run queues and fine grain locking. It has superior interactive * performance under load even on uni-processor systems. * * etymology: * ULE is the last three letters in schedule. It owes its name to a * generic user created for a scheduling system by Paul Mikesell at * Isilon Systems and a general lack of creativity on the part of the author. */ #include __FBSDID("$FreeBSD$"); #include "opt_hwpmc_hooks.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif #ifdef KDTRACE_HOOKS #include int dtrace_vtime_active; dtrace_vtime_switch_func_t dtrace_vtime_switch_func; #endif #include #include #define KTR_ULE 0 #define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX))) #define TDQ_NAME_LEN (sizeof("sched lock ") + sizeof(__XSTRING(MAXCPU))) #define TDQ_LOADNAME_LEN (sizeof("CPU ") + sizeof(__XSTRING(MAXCPU)) - 1 + sizeof(" load")) /* * Thread scheduler specific section. All fields are protected * by the thread lock. */ struct td_sched { struct runq *ts_runq; /* Run-queue we're queued on. */ short ts_flags; /* TSF_* flags. */ int ts_cpu; /* CPU that we have affinity for. */ int ts_rltick; /* Real last tick, for affinity. */ int ts_slice; /* Ticks of slice remaining. */ u_int ts_slptime; /* Number of ticks we vol. slept */ u_int ts_runtime; /* Number of ticks we were running */ int ts_ltick; /* Last tick that we were running on */ int ts_ftick; /* First tick that we were running on */ int ts_ticks; /* Tick count */ #ifdef KTR char ts_name[TS_NAME_LEN]; #endif }; /* flags kept in ts_flags */ #define TSF_BOUND 0x0001 /* Thread can not migrate. */ #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) #define THREAD_CAN_SCHED(td, cpu) \ CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) _Static_assert(sizeof(struct thread) + sizeof(struct td_sched) <= sizeof(struct thread0_storage), "increase struct thread0_storage.t0st_sched size"); /* * Priority ranges used for interactive and non-interactive timeshare * threads. The timeshare priorities are split up into four ranges. * The first range handles interactive threads. The last three ranges * (NHALF, x, and NHALF) handle non-interactive threads with the outer * ranges supporting nice values. */ #define PRI_TIMESHARE_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) #define PRI_INTERACT_RANGE ((PRI_TIMESHARE_RANGE - SCHED_PRI_NRESV) / 2) #define PRI_BATCH_RANGE (PRI_TIMESHARE_RANGE - PRI_INTERACT_RANGE) #define PRI_MIN_INTERACT PRI_MIN_TIMESHARE #define PRI_MAX_INTERACT (PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE - 1) #define PRI_MIN_BATCH (PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE) #define PRI_MAX_BATCH PRI_MAX_TIMESHARE /* * Cpu percentage computation macros and defines. * * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. * SCHED_TICK_MAX: Maximum number of ticks before scaling back. * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. */ #define SCHED_TICK_SECS 10 #define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) #define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) #define SCHED_TICK_SHIFT 10 #define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) #define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz)) /* * These macros determine priorities for non-interactive threads. They are * assigned a priority based on their recent cpu utilization as expressed * by the ratio of ticks to the tick total. NHALF priorities at the start * and end of the MIN to MAX timeshare range are only reachable with negative * or positive nice respectively. * * PRI_RANGE: Priority range for utilization dependent priorities. * PRI_NRESV: Number of nice values. * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. * PRI_NICE: Determines the part of the priority inherited from nice. */ #define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) #define SCHED_PRI_MIN (PRI_MIN_BATCH + SCHED_PRI_NHALF) #define SCHED_PRI_MAX (PRI_MAX_BATCH - SCHED_PRI_NHALF) #define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN + 1) #define SCHED_PRI_TICKS(ts) \ (SCHED_TICK_HZ((ts)) / \ (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) #define SCHED_PRI_NICE(nice) (nice) /* * These determine the interactivity of a process. Interactivity differs from * cpu utilization in that it expresses the voluntary time slept vs time ran * while cpu utilization includes all time not running. This more accurately * models the intent of the thread. * * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate * before throttling back. * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. * INTERACT_MAX: Maximum interactivity value. Smaller is better. * INTERACT_THRESH: Threshold for placement on the current runq. */ #define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) #define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) #define SCHED_INTERACT_MAX (100) #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) #define SCHED_INTERACT_THRESH (30) /* * These parameters determine the slice behavior for batch work. */ #define SCHED_SLICE_DEFAULT_DIVISOR 10 /* ~94 ms, 12 stathz ticks. */ #define SCHED_SLICE_MIN_DIVISOR 6 /* DEFAULT/MIN = ~16 ms. */ /* Flags kept in td_flags. */ #define TDF_SLICEEND TDF_SCHED2 /* Thread time slice is over. */ /* * tickincr: Converts a stathz tick into a hz domain scaled by * the shift factor. Without the shift the error rate * due to rounding would be unacceptably high. * realstathz: stathz is sometimes 0 and run off of hz. * sched_slice: Runtime of each thread before rescheduling. * preempt_thresh: Priority threshold for preemption and remote IPIs. */ static int sched_interact = SCHED_INTERACT_THRESH; static int tickincr = 8 << SCHED_TICK_SHIFT; static int realstathz = 127; /* reset during boot. */ static int sched_slice = 10; /* reset during boot. */ static int sched_slice_min = 1; /* reset during boot. */ #ifdef PREEMPTION #ifdef FULL_PREEMPTION static int preempt_thresh = PRI_MAX_IDLE; #else static int preempt_thresh = PRI_MIN_KERN; #endif #else static int preempt_thresh = 0; #endif static int static_boost = PRI_MIN_BATCH; static int sched_idlespins = 10000; static int sched_idlespinthresh = -1; /* * tdq - per processor runqs and statistics. All fields are protected by the * tdq_lock. The load and lowpri may be accessed without to avoid excess * locking in sched_pickcpu(); */ struct tdq { /* * Ordered to improve efficiency of cpu_search() and switch(). * tdq_lock is padded to avoid false sharing with tdq_load and * tdq_cpu_idle. */ struct mtx_padalign tdq_lock; /* run queue lock. */ struct cpu_group *tdq_cg; /* Pointer to cpu topology. */ volatile int tdq_load; /* Aggregate load. */ volatile int tdq_cpu_idle; /* cpu_idle() is active. */ int tdq_sysload; /* For loadavg, !ITHD load. */ int tdq_transferable; /* Transferable thread count. */ short tdq_switchcnt; /* Switches this tick. */ short tdq_oldswitchcnt; /* Switches last tick. */ u_char tdq_lowpri; /* Lowest priority thread. */ u_char tdq_ipipending; /* IPI pending. */ u_char tdq_idx; /* Current insert index. */ u_char tdq_ridx; /* Current removal index. */ struct runq tdq_realtime; /* real-time run queue. */ struct runq tdq_timeshare; /* timeshare run queue. */ struct runq tdq_idle; /* Queue of IDLE threads. */ char tdq_name[TDQ_NAME_LEN]; #ifdef KTR char tdq_loadname[TDQ_LOADNAME_LEN]; #endif } __aligned(64); /* Idle thread states and config. */ #define TDQ_RUNNING 1 #define TDQ_IDLE 2 #ifdef SMP struct cpu_group *cpu_top; /* CPU topology */ #define SCHED_AFFINITY_DEFAULT (max(1, hz / 1000)) #define SCHED_AFFINITY(ts, t) ((ts)->ts_rltick > ticks - ((t) * affinity)) /* * Run-time tunables. */ static int rebalance = 1; static int balance_interval = 128; /* Default set in sched_initticks(). */ static int affinity; static int steal_idle = 1; static int steal_thresh = 2; /* * One thread queue per processor. */ static struct tdq tdq_cpu[MAXCPU]; static struct tdq *balance_tdq; static int balance_ticks; static DPCPU_DEFINE(uint32_t, randomval); #define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) #define TDQ_CPU(x) (&tdq_cpu[(x)]) #define TDQ_ID(x) ((int)((x) - tdq_cpu)) #else /* !SMP */ static struct tdq tdq_cpu; #define TDQ_ID(x) (0) #define TDQ_SELF() (&tdq_cpu) #define TDQ_CPU(x) (&tdq_cpu) #endif #define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type)) #define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t))) #define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f)) #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) #define TDQ_LOCKPTR(t) ((struct mtx *)(&(t)->tdq_lock)) static void sched_priority(struct thread *); static void sched_thread_priority(struct thread *, u_char); static int sched_interact_score(struct thread *); static void sched_interact_update(struct thread *); static void sched_interact_fork(struct thread *); static void sched_pctcpu_update(struct td_sched *, int); /* Operations on per processor queues */ static struct thread *tdq_choose(struct tdq *); static void tdq_setup(struct tdq *); static void tdq_load_add(struct tdq *, struct thread *); static void tdq_load_rem(struct tdq *, struct thread *); static __inline void tdq_runq_add(struct tdq *, struct thread *, int); static __inline void tdq_runq_rem(struct tdq *, struct thread *); static inline int sched_shouldpreempt(int, int, int); void tdq_print(int cpu); static void runq_print(struct runq *rq); static void tdq_add(struct tdq *, struct thread *, int); #ifdef SMP static int tdq_move(struct tdq *, struct tdq *); static int tdq_idled(struct tdq *); static void tdq_notify(struct tdq *, struct thread *); static struct thread *tdq_steal(struct tdq *, int); static struct thread *runq_steal(struct runq *, int); static int sched_pickcpu(struct thread *, int); static void sched_balance(void); static int sched_balance_pair(struct tdq *, struct tdq *); static inline struct tdq *sched_setcpu(struct thread *, int, int); static inline void thread_unblock_switch(struct thread *, struct mtx *); static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); static int sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS); static int sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, struct cpu_group *cg, int indent); #endif static void sched_setup(void *dummy); SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); static void sched_initticks(void *dummy); SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL); SDT_PROVIDER_DEFINE(sched); SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *", "struct proc *", "uint8_t"); SDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *", "struct proc *", "void *"); SDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *", "struct proc *", "void *", "int"); SDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *", "struct proc *", "uint8_t", "struct thread *"); SDT_PROBE_DEFINE2(sched, , , load__change, "int", "int"); SDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *", "struct proc *"); SDT_PROBE_DEFINE(sched, , , on__cpu); SDT_PROBE_DEFINE(sched, , , remain__cpu); SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *", "struct proc *"); /* * Print the threads waiting on a run-queue. */ static void runq_print(struct runq *rq) { struct rqhead *rqh; struct thread *td; int pri; int j; int i; for (i = 0; i < RQB_LEN; i++) { printf("\t\trunq bits %d 0x%zx\n", i, rq->rq_status.rqb_bits[i]); for (j = 0; j < RQB_BPW; j++) if (rq->rq_status.rqb_bits[i] & (1ul << j)) { pri = j + (i << RQB_L2BPW); rqh = &rq->rq_queues[pri]; TAILQ_FOREACH(td, rqh, td_runq) { printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", td, td->td_name, td->td_priority, td->td_rqindex, pri); } } } } /* * Print the status of a per-cpu thread queue. Should be a ddb show cmd. */ void tdq_print(int cpu) { struct tdq *tdq; tdq = TDQ_CPU(cpu); printf("tdq %d:\n", TDQ_ID(tdq)); printf("\tlock %p\n", TDQ_LOCKPTR(tdq)); printf("\tLock name: %s\n", tdq->tdq_name); printf("\tload: %d\n", tdq->tdq_load); printf("\tswitch cnt: %d\n", tdq->tdq_switchcnt); printf("\told switch cnt: %d\n", tdq->tdq_oldswitchcnt); printf("\ttimeshare idx: %d\n", tdq->tdq_idx); printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); printf("\tload transferable: %d\n", tdq->tdq_transferable); printf("\tlowest priority: %d\n", tdq->tdq_lowpri); printf("\trealtime runq:\n"); runq_print(&tdq->tdq_realtime); printf("\ttimeshare runq:\n"); runq_print(&tdq->tdq_timeshare); printf("\tidle runq:\n"); runq_print(&tdq->tdq_idle); } static inline int sched_shouldpreempt(int pri, int cpri, int remote) { /* * If the new priority is not better than the current priority there is * nothing to do. */ if (pri >= cpri) return (0); /* * Always preempt idle. */ if (cpri >= PRI_MIN_IDLE) return (1); /* * If preemption is disabled don't preempt others. */ if (preempt_thresh == 0) return (0); /* * Preempt if we exceed the threshold. */ if (pri <= preempt_thresh) return (1); /* * If we're interactive or better and there is non-interactive * or worse running preempt only remote processors. */ if (remote && pri <= PRI_MAX_INTERACT && cpri > PRI_MAX_INTERACT) return (1); return (0); } /* * Add a thread to the actual run-queue. Keeps transferable counts up to * date with what is actually on the run-queue. Selects the correct * queue position for timeshare threads. */ static __inline void tdq_runq_add(struct tdq *tdq, struct thread *td, int flags) { struct td_sched *ts; u_char pri; TDQ_LOCK_ASSERT(tdq, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); pri = td->td_priority; ts = td_get_sched(td); TD_SET_RUNQ(td); if (THREAD_CAN_MIGRATE(td)) { tdq->tdq_transferable++; ts->ts_flags |= TSF_XFERABLE; } if (pri < PRI_MIN_BATCH) { ts->ts_runq = &tdq->tdq_realtime; } else if (pri <= PRI_MAX_BATCH) { ts->ts_runq = &tdq->tdq_timeshare; KASSERT(pri <= PRI_MAX_BATCH && pri >= PRI_MIN_BATCH, ("Invalid priority %d on timeshare runq", pri)); /* * This queue contains only priorities between MIN and MAX * realtime. Use the whole queue to represent these values. */ if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { pri = RQ_NQS * (pri - PRI_MIN_BATCH) / PRI_BATCH_RANGE; pri = (pri + tdq->tdq_idx) % RQ_NQS; /* * This effectively shortens the queue by one so we * can have a one slot difference between idx and * ridx while we wait for threads to drain. */ if (tdq->tdq_ridx != tdq->tdq_idx && pri == tdq->tdq_ridx) pri = (unsigned char)(pri - 1) % RQ_NQS; } else pri = tdq->tdq_ridx; runq_add_pri(ts->ts_runq, td, pri, flags); return; } else ts->ts_runq = &tdq->tdq_idle; runq_add(ts->ts_runq, td, flags); } /* * Remove a thread from a run-queue. This typically happens when a thread * is selected to run. Running threads are not on the queue and the * transferable count does not reflect them. */ static __inline void tdq_runq_rem(struct tdq *tdq, struct thread *td) { struct td_sched *ts; ts = td_get_sched(td); TDQ_LOCK_ASSERT(tdq, MA_OWNED); KASSERT(ts->ts_runq != NULL, ("tdq_runq_remove: thread %p null ts_runq", td)); if (ts->ts_flags & TSF_XFERABLE) { tdq->tdq_transferable--; ts->ts_flags &= ~TSF_XFERABLE; } if (ts->ts_runq == &tdq->tdq_timeshare) { if (tdq->tdq_idx != tdq->tdq_ridx) runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx); else runq_remove_idx(ts->ts_runq, td, NULL); } else runq_remove(ts->ts_runq, td); } /* * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load * for this thread to the referenced thread queue. */ static void tdq_load_add(struct tdq *tdq, struct thread *td) { TDQ_LOCK_ASSERT(tdq, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); tdq->tdq_load++; if ((td->td_flags & TDF_NOLOAD) == 0) tdq->tdq_sysload++; KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load); SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load); } /* * Remove the load from a thread that is transitioning to a sleep state or * exiting. */ static void tdq_load_rem(struct tdq *tdq, struct thread *td) { THREAD_LOCK_ASSERT(td, MA_OWNED); TDQ_LOCK_ASSERT(tdq, MA_OWNED); KASSERT(tdq->tdq_load != 0, ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); tdq->tdq_load--; if ((td->td_flags & TDF_NOLOAD) == 0) tdq->tdq_sysload--; KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load); SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load); } /* * Bound timeshare latency by decreasing slice size as load increases. We * consider the maximum latency as the sum of the threads waiting to run * aside from curthread and target no more than sched_slice latency but * no less than sched_slice_min runtime. */ static inline int tdq_slice(struct tdq *tdq) { int load; /* * It is safe to use sys_load here because this is called from * contexts where timeshare threads are running and so there * cannot be higher priority load in the system. */ load = tdq->tdq_sysload - 1; if (load >= SCHED_SLICE_MIN_DIVISOR) return (sched_slice_min); if (load <= 1) return (sched_slice); return (sched_slice / load); } /* * Set lowpri to its exact value by searching the run-queue and * evaluating curthread. curthread may be passed as an optimization. */ static void tdq_setlowpri(struct tdq *tdq, struct thread *ctd) { struct thread *td; TDQ_LOCK_ASSERT(tdq, MA_OWNED); if (ctd == NULL) ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread; td = tdq_choose(tdq); if (td == NULL || td->td_priority > ctd->td_priority) tdq->tdq_lowpri = ctd->td_priority; else tdq->tdq_lowpri = td->td_priority; } #ifdef SMP /* * We need some randomness. Implement a classic Linear Congruential * Generator X_{n+1}=(aX_n+c) mod m. These values are optimized for * m = 2^32, a = 69069 and c = 5. We only return the upper 16 bits * of the random state (in the low bits of our answer) to keep * the maximum randomness. */ static uint32_t sched_random(void) { uint32_t *rndptr; rndptr = DPCPU_PTR(randomval); *rndptr = *rndptr * 69069 + 5; return (*rndptr >> 16); } struct cpu_search { cpuset_t cs_mask; u_int cs_prefer; int cs_pri; /* Min priority for low. */ int cs_limit; /* Max load for low, min load for high. */ int cs_cpu; int cs_load; }; #define CPU_SEARCH_LOWEST 0x1 #define CPU_SEARCH_HIGHEST 0x2 #define CPU_SEARCH_BOTH (CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST) #define CPUSET_FOREACH(cpu, mask) \ for ((cpu) = 0; (cpu) <= mp_maxid; (cpu)++) \ if (CPU_ISSET(cpu, &mask)) static __always_inline int cpu_search(const struct cpu_group *cg, struct cpu_search *low, struct cpu_search *high, const int match); int __noinline cpu_search_lowest(const struct cpu_group *cg, struct cpu_search *low); int __noinline cpu_search_highest(const struct cpu_group *cg, struct cpu_search *high); int __noinline cpu_search_both(const struct cpu_group *cg, struct cpu_search *low, struct cpu_search *high); /* * Search the tree of cpu_groups for the lowest or highest loaded cpu * according to the match argument. This routine actually compares the * load on all paths through the tree and finds the least loaded cpu on * the least loaded path, which may differ from the least loaded cpu in * the system. This balances work among caches and buses. * * This inline is instantiated in three forms below using constants for the * match argument. It is reduced to the minimum set for each case. It is * also recursive to the depth of the tree. */ static __always_inline int cpu_search(const struct cpu_group *cg, struct cpu_search *low, struct cpu_search *high, const int match) { struct cpu_search lgroup; struct cpu_search hgroup; cpuset_t cpumask; struct cpu_group *child; struct tdq *tdq; int cpu, i, hload, lload, load, total, rnd; total = 0; cpumask = cg->cg_mask; if (match & CPU_SEARCH_LOWEST) { lload = INT_MAX; lgroup = *low; } if (match & CPU_SEARCH_HIGHEST) { hload = INT_MIN; hgroup = *high; } /* Iterate through the child CPU groups and then remaining CPUs. */ for (i = cg->cg_children, cpu = mp_maxid; ; ) { if (i == 0) { #ifdef HAVE_INLINE_FFSL cpu = CPU_FFS(&cpumask) - 1; #else while (cpu >= 0 && !CPU_ISSET(cpu, &cpumask)) cpu--; #endif if (cpu < 0) break; child = NULL; } else child = &cg->cg_child[i - 1]; if (match & CPU_SEARCH_LOWEST) lgroup.cs_cpu = -1; if (match & CPU_SEARCH_HIGHEST) hgroup.cs_cpu = -1; if (child) { /* Handle child CPU group. */ CPU_NAND(&cpumask, &child->cg_mask); switch (match) { case CPU_SEARCH_LOWEST: load = cpu_search_lowest(child, &lgroup); break; case CPU_SEARCH_HIGHEST: load = cpu_search_highest(child, &hgroup); break; case CPU_SEARCH_BOTH: load = cpu_search_both(child, &lgroup, &hgroup); break; } } else { /* Handle child CPU. */ CPU_CLR(cpu, &cpumask); tdq = TDQ_CPU(cpu); load = tdq->tdq_load * 256; rnd = sched_random() % 32; if (match & CPU_SEARCH_LOWEST) { if (cpu == low->cs_prefer) load -= 64; /* If that CPU is allowed and get data. */ if (tdq->tdq_lowpri > lgroup.cs_pri && tdq->tdq_load <= lgroup.cs_limit && CPU_ISSET(cpu, &lgroup.cs_mask)) { lgroup.cs_cpu = cpu; lgroup.cs_load = load - rnd; } } if (match & CPU_SEARCH_HIGHEST) if (tdq->tdq_load >= hgroup.cs_limit && tdq->tdq_transferable && CPU_ISSET(cpu, &hgroup.cs_mask)) { hgroup.cs_cpu = cpu; hgroup.cs_load = load - rnd; } } total += load; /* We have info about child item. Compare it. */ if (match & CPU_SEARCH_LOWEST) { if (lgroup.cs_cpu >= 0 && (load < lload || (load == lload && lgroup.cs_load < low->cs_load))) { lload = load; low->cs_cpu = lgroup.cs_cpu; low->cs_load = lgroup.cs_load; } } if (match & CPU_SEARCH_HIGHEST) if (hgroup.cs_cpu >= 0 && (load > hload || (load == hload && hgroup.cs_load > high->cs_load))) { hload = load; high->cs_cpu = hgroup.cs_cpu; high->cs_load = hgroup.cs_load; } if (child) { i--; if (i == 0 && CPU_EMPTY(&cpumask)) break; } #ifndef HAVE_INLINE_FFSL else cpu--; #endif } return (total); } /* * cpu_search instantiations must pass constants to maintain the inline * optimization. */ int cpu_search_lowest(const struct cpu_group *cg, struct cpu_search *low) { return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST); } int cpu_search_highest(const struct cpu_group *cg, struct cpu_search *high) { return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST); } int cpu_search_both(const struct cpu_group *cg, struct cpu_search *low, struct cpu_search *high) { return cpu_search(cg, low, high, CPU_SEARCH_BOTH); } /* * Find the cpu with the least load via the least loaded path that has a * lowpri greater than pri pri. A pri of -1 indicates any priority is * acceptable. */ static inline int sched_lowest(const struct cpu_group *cg, cpuset_t mask, int pri, int maxload, int prefer) { struct cpu_search low; low.cs_cpu = -1; low.cs_prefer = prefer; low.cs_mask = mask; low.cs_pri = pri; low.cs_limit = maxload; cpu_search_lowest(cg, &low); return low.cs_cpu; } /* * Find the cpu with the highest load via the highest loaded path. */ static inline int sched_highest(const struct cpu_group *cg, cpuset_t mask, int minload) { struct cpu_search high; high.cs_cpu = -1; high.cs_mask = mask; high.cs_limit = minload; cpu_search_highest(cg, &high); return high.cs_cpu; } static void sched_balance_group(struct cpu_group *cg) { cpuset_t hmask, lmask; int high, low, anylow; CPU_FILL(&hmask); for (;;) { high = sched_highest(cg, hmask, 1); /* Stop if there is no more CPU with transferrable threads. */ if (high == -1) break; CPU_CLR(high, &hmask); CPU_COPY(&hmask, &lmask); /* Stop if there is no more CPU left for low. */ if (CPU_EMPTY(&lmask)) break; anylow = 1; nextlow: low = sched_lowest(cg, lmask, -1, TDQ_CPU(high)->tdq_load - 1, high); /* Stop if we looked well and found no less loaded CPU. */ if (anylow && low == -1) break; /* Go to next high if we found no less loaded CPU. */ if (low == -1) continue; /* Transfer thread from high to low. */ if (sched_balance_pair(TDQ_CPU(high), TDQ_CPU(low))) { /* CPU that got thread can no longer be a donor. */ CPU_CLR(low, &hmask); } else { /* * If failed, then there is no threads on high * that can run on this low. Drop low from low * mask and look for different one. */ CPU_CLR(low, &lmask); anylow = 0; goto nextlow; } } } static void sched_balance(void) { struct tdq *tdq; if (smp_started == 0 || rebalance == 0) return; balance_ticks = max(balance_interval / 2, 1) + (sched_random() % balance_interval); tdq = TDQ_SELF(); TDQ_UNLOCK(tdq); sched_balance_group(cpu_top); TDQ_LOCK(tdq); } /* * Lock two thread queues using their address to maintain lock order. */ static void tdq_lock_pair(struct tdq *one, struct tdq *two) { if (one < two) { TDQ_LOCK(one); TDQ_LOCK_FLAGS(two, MTX_DUPOK); } else { TDQ_LOCK(two); TDQ_LOCK_FLAGS(one, MTX_DUPOK); } } /* * Unlock two thread queues. Order is not important here. */ static void tdq_unlock_pair(struct tdq *one, struct tdq *two) { TDQ_UNLOCK(one); TDQ_UNLOCK(two); } /* * Transfer load between two imbalanced thread queues. */ static int sched_balance_pair(struct tdq *high, struct tdq *low) { int moved; int cpu; tdq_lock_pair(high, low); moved = 0; /* * Determine what the imbalance is and then adjust that to how many * threads we actually have to give up (transferable). */ if (high->tdq_transferable != 0 && high->tdq_load > low->tdq_load && (moved = tdq_move(high, low)) > 0) { /* * In case the target isn't the current cpu IPI it to force a * reschedule with the new workload. */ cpu = TDQ_ID(low); if (cpu != PCPU_GET(cpuid)) ipi_cpu(cpu, IPI_PREEMPT); } tdq_unlock_pair(high, low); return (moved); } /* * Move a thread from one thread queue to another. */ static int tdq_move(struct tdq *from, struct tdq *to) { struct td_sched *ts; struct thread *td; struct tdq *tdq; int cpu; TDQ_LOCK_ASSERT(from, MA_OWNED); TDQ_LOCK_ASSERT(to, MA_OWNED); tdq = from; cpu = TDQ_ID(to); td = tdq_steal(tdq, cpu); if (td == NULL) return (0); ts = td_get_sched(td); /* * Although the run queue is locked the thread may be blocked. Lock * it to clear this and acquire the run-queue lock. */ thread_lock(td); /* Drop recursive lock on from acquired via thread_lock(). */ TDQ_UNLOCK(from); sched_rem(td); ts->ts_cpu = cpu; td->td_lock = TDQ_LOCKPTR(to); tdq_add(to, td, SRQ_YIELDING); return (1); } /* * This tdq has idled. Try to steal a thread from another cpu and switch * to it. */ static int tdq_idled(struct tdq *tdq) { struct cpu_group *cg; struct tdq *steal; cpuset_t mask; int thresh; int cpu; if (smp_started == 0 || steal_idle == 0) return (1); CPU_FILL(&mask); CPU_CLR(PCPU_GET(cpuid), &mask); /* We don't want to be preempted while we're iterating. */ spinlock_enter(); for (cg = tdq->tdq_cg; cg != NULL; ) { if ((cg->cg_flags & CG_FLAG_THREAD) == 0) thresh = steal_thresh; else thresh = 1; cpu = sched_highest(cg, mask, thresh); if (cpu == -1) { cg = cg->cg_parent; continue; } steal = TDQ_CPU(cpu); CPU_CLR(cpu, &mask); tdq_lock_pair(tdq, steal); if (steal->tdq_load < thresh || steal->tdq_transferable == 0) { tdq_unlock_pair(tdq, steal); continue; } /* * If a thread was added while interrupts were disabled don't * steal one here. If we fail to acquire one due to affinity * restrictions loop again with this cpu removed from the * set. */ if (tdq->tdq_load == 0 && tdq_move(steal, tdq) == 0) { tdq_unlock_pair(tdq, steal); continue; } spinlock_exit(); TDQ_UNLOCK(steal); mi_switch(SW_VOL | SWT_IDLE, NULL); thread_unlock(curthread); return (0); } spinlock_exit(); return (1); } /* * Notify a remote cpu of new work. Sends an IPI if criteria are met. */ static void tdq_notify(struct tdq *tdq, struct thread *td) { struct thread *ctd; int pri; int cpu; if (tdq->tdq_ipipending) return; cpu = td_get_sched(td)->ts_cpu; pri = td->td_priority; ctd = pcpu_find(cpu)->pc_curthread; if (!sched_shouldpreempt(pri, ctd->td_priority, 1)) return; /* * Make sure that our caller's earlier update to tdq_load is * globally visible before we read tdq_cpu_idle. Idle thread * accesses both of them without locks, and the order is important. */ atomic_thread_fence_seq_cst(); if (TD_IS_IDLETHREAD(ctd)) { /* * If the MD code has an idle wakeup routine try that before * falling back to IPI. */ if (!tdq->tdq_cpu_idle || cpu_idle_wakeup(cpu)) return; } tdq->tdq_ipipending = 1; ipi_cpu(cpu, IPI_PREEMPT); } /* * Steals load from a timeshare queue. Honors the rotating queue head * index. */ static struct thread * runq_steal_from(struct runq *rq, int cpu, u_char start) { struct rqbits *rqb; struct rqhead *rqh; struct thread *td, *first; int bit; int i; rqb = &rq->rq_status; bit = start & (RQB_BPW -1); first = NULL; again: for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) { if (rqb->rqb_bits[i] == 0) continue; if (bit == 0) bit = RQB_FFS(rqb->rqb_bits[i]); for (; bit < RQB_BPW; bit++) { if ((rqb->rqb_bits[i] & (1ul << bit)) == 0) continue; rqh = &rq->rq_queues[bit + (i << RQB_L2BPW)]; TAILQ_FOREACH(td, rqh, td_runq) { if (first && THREAD_CAN_MIGRATE(td) && THREAD_CAN_SCHED(td, cpu)) return (td); first = td; } } } if (start != 0) { start = 0; goto again; } if (first && THREAD_CAN_MIGRATE(first) && THREAD_CAN_SCHED(first, cpu)) return (first); return (NULL); } /* * Steals load from a standard linear queue. */ static struct thread * runq_steal(struct runq *rq, int cpu) { struct rqhead *rqh; struct rqbits *rqb; struct thread *td; int word; int bit; rqb = &rq->rq_status; for (word = 0; word < RQB_LEN; word++) { if (rqb->rqb_bits[word] == 0) continue; for (bit = 0; bit < RQB_BPW; bit++) { if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) continue; rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; TAILQ_FOREACH(td, rqh, td_runq) if (THREAD_CAN_MIGRATE(td) && THREAD_CAN_SCHED(td, cpu)) return (td); } } return (NULL); } /* * Attempt to steal a thread in priority order from a thread queue. */ static struct thread * tdq_steal(struct tdq *tdq, int cpu) { struct thread *td; TDQ_LOCK_ASSERT(tdq, MA_OWNED); if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL) return (td); if ((td = runq_steal_from(&tdq->tdq_timeshare, cpu, tdq->tdq_ridx)) != NULL) return (td); return (runq_steal(&tdq->tdq_idle, cpu)); } /* * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the * current lock and returns with the assigned queue locked. */ static inline struct tdq * sched_setcpu(struct thread *td, int cpu, int flags) { struct tdq *tdq; THREAD_LOCK_ASSERT(td, MA_OWNED); tdq = TDQ_CPU(cpu); td_get_sched(td)->ts_cpu = cpu; /* * If the lock matches just return the queue. */ if (td->td_lock == TDQ_LOCKPTR(tdq)) return (tdq); #ifdef notyet /* * If the thread isn't running its lockptr is a * turnstile or a sleepqueue. We can just lock_set without * blocking. */ if (TD_CAN_RUN(td)) { TDQ_LOCK(tdq); thread_lock_set(td, TDQ_LOCKPTR(tdq)); return (tdq); } #endif /* * The hard case, migration, we need to block the thread first to * prevent order reversals with other cpus locks. */ spinlock_enter(); thread_lock_block(td); TDQ_LOCK(tdq); thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); spinlock_exit(); return (tdq); } SCHED_STAT_DEFINE(pickcpu_intrbind, "Soft interrupt binding"); SCHED_STAT_DEFINE(pickcpu_idle_affinity, "Picked idle cpu based on affinity"); SCHED_STAT_DEFINE(pickcpu_affinity, "Picked cpu based on affinity"); SCHED_STAT_DEFINE(pickcpu_lowest, "Selected lowest load"); SCHED_STAT_DEFINE(pickcpu_local, "Migrated to current cpu"); SCHED_STAT_DEFINE(pickcpu_migration, "Selection may have caused migration"); static int sched_pickcpu(struct thread *td, int flags) { struct cpu_group *cg, *ccg; struct td_sched *ts; struct tdq *tdq; cpuset_t mask; int cpu, pri, self; self = PCPU_GET(cpuid); ts = td_get_sched(td); KASSERT(!CPU_ABSENT(ts->ts_cpu), ("sched_pickcpu: Start scheduler on " "absent CPU %d for thread %s.", ts->ts_cpu, td->td_name)); if (smp_started == 0) return (self); /* * Don't migrate a running thread from sched_switch(). */ if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td)) return (ts->ts_cpu); /* * Prefer to run interrupt threads on the processors that generate * the interrupt. */ pri = td->td_priority; if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) && curthread->td_intr_nesting_level && ts->ts_cpu != self) { SCHED_STAT_INC(pickcpu_intrbind); ts->ts_cpu = self; if (TDQ_CPU(self)->tdq_lowpri > pri) { SCHED_STAT_INC(pickcpu_affinity); return (ts->ts_cpu); } } /* * If the thread can run on the last cpu and the affinity has not * expired or it is idle run it there. */ tdq = TDQ_CPU(ts->ts_cpu); cg = tdq->tdq_cg; if (THREAD_CAN_SCHED(td, ts->ts_cpu) && tdq->tdq_lowpri >= PRI_MIN_IDLE && SCHED_AFFINITY(ts, CG_SHARE_L2)) { if (cg->cg_flags & CG_FLAG_THREAD) { CPUSET_FOREACH(cpu, cg->cg_mask) { if (TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE) break; } } else cpu = INT_MAX; if (cpu > mp_maxid) { SCHED_STAT_INC(pickcpu_idle_affinity); return (ts->ts_cpu); } } /* * Search for the last level cache CPU group in the tree. * Skip caches with expired affinity time and SMT groups. * Affinity to higher level caches will be handled less aggressively. */ for (ccg = NULL; cg != NULL; cg = cg->cg_parent) { if (cg->cg_flags & CG_FLAG_THREAD) continue; if (!SCHED_AFFINITY(ts, cg->cg_level)) continue; ccg = cg; } if (ccg != NULL) cg = ccg; cpu = -1; /* Search the group for the less loaded idle CPU we can run now. */ mask = td->td_cpuset->cs_mask; if (cg != NULL && cg != cpu_top && CPU_CMP(&cg->cg_mask, &cpu_top->cg_mask) != 0) cpu = sched_lowest(cg, mask, max(pri, PRI_MAX_TIMESHARE), INT_MAX, ts->ts_cpu); /* Search globally for the less loaded CPU we can run now. */ if (cpu == -1) cpu = sched_lowest(cpu_top, mask, pri, INT_MAX, ts->ts_cpu); /* Search globally for the less loaded CPU. */ if (cpu == -1) cpu = sched_lowest(cpu_top, mask, -1, INT_MAX, ts->ts_cpu); KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu.")); KASSERT(!CPU_ABSENT(cpu), ("sched_pickcpu: Picked absent CPU %d.", cpu)); /* * Compare the lowest loaded cpu to current cpu. */ if (THREAD_CAN_SCHED(td, self) && TDQ_CPU(self)->tdq_lowpri > pri && TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE && TDQ_CPU(self)->tdq_load <= TDQ_CPU(cpu)->tdq_load + 1) { SCHED_STAT_INC(pickcpu_local); cpu = self; } else SCHED_STAT_INC(pickcpu_lowest); if (cpu != ts->ts_cpu) SCHED_STAT_INC(pickcpu_migration); return (cpu); } #endif /* * Pick the highest priority task we have and return it. */ static struct thread * tdq_choose(struct tdq *tdq) { struct thread *td; TDQ_LOCK_ASSERT(tdq, MA_OWNED); td = runq_choose(&tdq->tdq_realtime); if (td != NULL) return (td); td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); if (td != NULL) { KASSERT(td->td_priority >= PRI_MIN_BATCH, ("tdq_choose: Invalid priority on timeshare queue %d", td->td_priority)); return (td); } td = runq_choose(&tdq->tdq_idle); if (td != NULL) { KASSERT(td->td_priority >= PRI_MIN_IDLE, ("tdq_choose: Invalid priority on idle queue %d", td->td_priority)); return (td); } return (NULL); } /* * Initialize a thread queue. */ static void tdq_setup(struct tdq *tdq) { if (bootverbose) printf("ULE: setup cpu %d\n", TDQ_ID(tdq)); runq_init(&tdq->tdq_realtime); runq_init(&tdq->tdq_timeshare); runq_init(&tdq->tdq_idle); snprintf(tdq->tdq_name, sizeof(tdq->tdq_name), "sched lock %d", (int)TDQ_ID(tdq)); mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock", MTX_SPIN | MTX_RECURSE); #ifdef KTR snprintf(tdq->tdq_loadname, sizeof(tdq->tdq_loadname), "CPU %d load", (int)TDQ_ID(tdq)); #endif } #ifdef SMP static void sched_setup_smp(void) { struct tdq *tdq; int i; cpu_top = smp_topo(); CPU_FOREACH(i) { tdq = TDQ_CPU(i); tdq_setup(tdq); tdq->tdq_cg = smp_topo_find(cpu_top, i); if (tdq->tdq_cg == NULL) panic("Can't find cpu group for %d\n", i); } balance_tdq = TDQ_SELF(); sched_balance(); } #endif /* * Setup the thread queues and initialize the topology based on MD * information. */ static void sched_setup(void *dummy) { struct tdq *tdq; tdq = TDQ_SELF(); #ifdef SMP sched_setup_smp(); #else tdq_setup(tdq); #endif /* Add thread0's load since it's running. */ TDQ_LOCK(tdq); td_get_sched(&thread0)->ts_cpu = curcpu; /* Something valid to start */ thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); tdq_load_add(tdq, &thread0); tdq->tdq_lowpri = thread0.td_priority; TDQ_UNLOCK(tdq); } /* * This routine determines time constants after stathz and hz are setup. */ /* ARGSUSED */ static void sched_initticks(void *dummy) { int incr; realstathz = stathz ? stathz : hz; sched_slice = realstathz / SCHED_SLICE_DEFAULT_DIVISOR; sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR; hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) / realstathz); /* * tickincr is shifted out by 10 to avoid rounding errors due to * hz not being evenly divisible by stathz on all platforms. */ incr = (hz << SCHED_TICK_SHIFT) / realstathz; /* * This does not work for values of stathz that are more than * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. */ if (incr == 0) incr = 1; tickincr = incr; #ifdef SMP /* * Set the default balance interval now that we know * what realstathz is. */ balance_interval = realstathz; affinity = SCHED_AFFINITY_DEFAULT; #endif if (sched_idlespinthresh < 0) sched_idlespinthresh = 2 * max(10000, 6 * hz) / realstathz; } /* * This is the core of the interactivity algorithm. Determines a score based * on past behavior. It is the ratio of sleep time to run time scaled to * a [0, 100] integer. This is the voluntary sleep time of a process, which * differs from the cpu usage because it does not account for time spent * waiting on a run-queue. Would be prettier if we had floating point. * * When a thread's sleep time is greater than its run time the * calculation is: * * scaling factor * interactivity score = --------------------- * sleep time / run time * * * When a thread's run time is greater than its sleep time the * calculation is: * * scaling factor * interactivity score = --------------------- + scaling factor * run time / sleep time */ static int sched_interact_score(struct thread *td) { struct td_sched *ts; int div; ts = td_get_sched(td); /* * The score is only needed if this is likely to be an interactive * task. Don't go through the expense of computing it if there's * no chance. */ if (sched_interact <= SCHED_INTERACT_HALF && ts->ts_runtime >= ts->ts_slptime) return (SCHED_INTERACT_HALF); if (ts->ts_runtime > ts->ts_slptime) { div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF); return (SCHED_INTERACT_HALF + (SCHED_INTERACT_HALF - (ts->ts_slptime / div))); } if (ts->ts_slptime > ts->ts_runtime) { div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF); return (ts->ts_runtime / div); } /* runtime == slptime */ if (ts->ts_runtime) return (SCHED_INTERACT_HALF); /* * This can happen if slptime and runtime are 0. */ return (0); } /* * Scale the scheduling priority according to the "interactivity" of this * process. */ static void sched_priority(struct thread *td) { int score; int pri; if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE) return; /* * If the score is interactive we place the thread in the realtime * queue with a priority that is less than kernel and interrupt * priorities. These threads are not subject to nice restrictions. * * Scores greater than this are placed on the normal timeshare queue * where the priority is partially decided by the most recent cpu * utilization and the rest is decided by nice value. * * The nice value of the process has a linear effect on the calculated * score. Negative nice values make it easier for a thread to be * considered interactive. */ score = imax(0, sched_interact_score(td) + td->td_proc->p_nice); if (score < sched_interact) { pri = PRI_MIN_INTERACT; pri += ((PRI_MAX_INTERACT - PRI_MIN_INTERACT + 1) / sched_interact) * score; KASSERT(pri >= PRI_MIN_INTERACT && pri <= PRI_MAX_INTERACT, ("sched_priority: invalid interactive priority %d score %d", pri, score)); } else { pri = SCHED_PRI_MIN; if (td_get_sched(td)->ts_ticks) pri += min(SCHED_PRI_TICKS(td_get_sched(td)), SCHED_PRI_RANGE - 1); pri += SCHED_PRI_NICE(td->td_proc->p_nice); KASSERT(pri >= PRI_MIN_BATCH && pri <= PRI_MAX_BATCH, ("sched_priority: invalid priority %d: nice %d, " "ticks %d ftick %d ltick %d tick pri %d", pri, td->td_proc->p_nice, td_get_sched(td)->ts_ticks, td_get_sched(td)->ts_ftick, td_get_sched(td)->ts_ltick, SCHED_PRI_TICKS(td_get_sched(td)))); } sched_user_prio(td, pri); return; } /* * This routine enforces a maximum limit on the amount of scheduling history * kept. It is called after either the slptime or runtime is adjusted. This * function is ugly due to integer math. */ static void sched_interact_update(struct thread *td) { struct td_sched *ts; u_int sum; ts = td_get_sched(td); sum = ts->ts_runtime + ts->ts_slptime; if (sum < SCHED_SLP_RUN_MAX) return; /* * This only happens from two places: * 1) We have added an unusual amount of run time from fork_exit. * 2) We have added an unusual amount of sleep time from sched_sleep(). */ if (sum > SCHED_SLP_RUN_MAX * 2) { if (ts->ts_runtime > ts->ts_slptime) { ts->ts_runtime = SCHED_SLP_RUN_MAX; ts->ts_slptime = 1; } else { ts->ts_slptime = SCHED_SLP_RUN_MAX; ts->ts_runtime = 1; } return; } /* * If we have exceeded by more than 1/5th then the algorithm below * will not bring us back into range. Dividing by two here forces * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] */ if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { ts->ts_runtime /= 2; ts->ts_slptime /= 2; return; } ts->ts_runtime = (ts->ts_runtime / 5) * 4; ts->ts_slptime = (ts->ts_slptime / 5) * 4; } /* * Scale back the interactivity history when a child thread is created. The * history is inherited from the parent but the thread may behave totally * differently. For example, a shell spawning a compiler process. We want * to learn that the compiler is behaving badly very quickly. */ static void sched_interact_fork(struct thread *td) { struct td_sched *ts; int ratio; int sum; ts = td_get_sched(td); sum = ts->ts_runtime + ts->ts_slptime; if (sum > SCHED_SLP_RUN_FORK) { ratio = sum / SCHED_SLP_RUN_FORK; ts->ts_runtime /= ratio; ts->ts_slptime /= ratio; } } /* * Called from proc0_init() to setup the scheduler fields. */ void schedinit(void) { struct td_sched *ts0; /* * Set up the scheduler specific parts of thread0. */ ts0 = td_get_sched(&thread0); ts0->ts_ltick = ticks; ts0->ts_ftick = ticks; ts0->ts_slice = 0; } /* * This is only somewhat accurate since given many processes of the same * priority they will switch when their slices run out, which will be * at most sched_slice stathz ticks. */ int sched_rr_interval(void) { /* Convert sched_slice from stathz to hz. */ return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz)); } /* * Update the percent cpu tracking information when it is requested or * the total history exceeds the maximum. We keep a sliding history of * tick counts that slowly decays. This is less precise than the 4BSD * mechanism since it happens with less regular and frequent events. */ static void sched_pctcpu_update(struct td_sched *ts, int run) { int t = ticks; /* * The signed difference may be negative if the thread hasn't run for * over half of the ticks rollover period. */ if ((u_int)(t - ts->ts_ltick) >= SCHED_TICK_TARG) { ts->ts_ticks = 0; ts->ts_ftick = t - SCHED_TICK_TARG; } else if (t - ts->ts_ftick >= SCHED_TICK_MAX) { ts->ts_ticks = (ts->ts_ticks / (ts->ts_ltick - ts->ts_ftick)) * (ts->ts_ltick - (t - SCHED_TICK_TARG)); ts->ts_ftick = t - SCHED_TICK_TARG; } if (run) ts->ts_ticks += (t - ts->ts_ltick) << SCHED_TICK_SHIFT; ts->ts_ltick = t; } /* * Adjust the priority of a thread. Move it to the appropriate run-queue * if necessary. This is the back-end for several priority related * functions. */ static void sched_thread_priority(struct thread *td, u_char prio) { struct td_sched *ts; struct tdq *tdq; int oldpri; KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "prio", "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED, sched_tdname(curthread)); SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio); if (td != curthread && prio < td->td_priority) { KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread), "lend prio", "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED, sched_tdname(td)); SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio, curthread); } ts = td_get_sched(td); THREAD_LOCK_ASSERT(td, MA_OWNED); if (td->td_priority == prio) return; /* * If the priority has been elevated due to priority * propagation, we may have to move ourselves to a new * queue. This could be optimized to not re-add in some * cases. */ if (TD_ON_RUNQ(td) && prio < td->td_priority) { sched_rem(td); td->td_priority = prio; sched_add(td, SRQ_BORROWING); return; } /* * If the thread is currently running we may have to adjust the lowpri * information so other cpus are aware of our current priority. */ if (TD_IS_RUNNING(td)) { tdq = TDQ_CPU(ts->ts_cpu); oldpri = td->td_priority; td->td_priority = prio; if (prio < tdq->tdq_lowpri) tdq->tdq_lowpri = prio; else if (tdq->tdq_lowpri == oldpri) tdq_setlowpri(tdq, td); return; } td->td_priority = prio; } /* * Update a thread's priority when it is lent another thread's * priority. */ void sched_lend_prio(struct thread *td, u_char prio) { td->td_flags |= TDF_BORROWING; sched_thread_priority(td, prio); } /* * Restore a thread's priority when priority propagation is * over. The prio argument is the minimum priority the thread * needs to have to satisfy other possible priority lending * requests. If the thread's regular priority is less * important than prio, the thread will keep a priority boost * of prio. */ void sched_unlend_prio(struct thread *td, u_char prio) { u_char base_pri; if (td->td_base_pri >= PRI_MIN_TIMESHARE && td->td_base_pri <= PRI_MAX_TIMESHARE) base_pri = td->td_user_pri; else base_pri = td->td_base_pri; if (prio >= base_pri) { td->td_flags &= ~TDF_BORROWING; sched_thread_priority(td, base_pri); } else sched_lend_prio(td, prio); } /* * Standard entry for setting the priority to an absolute value. */ void sched_prio(struct thread *td, u_char prio) { u_char oldprio; /* First, update the base priority. */ td->td_base_pri = prio; /* * If the thread is borrowing another thread's priority, don't * ever lower the priority. */ if (td->td_flags & TDF_BORROWING && td->td_priority < prio) return; /* Change the real priority. */ oldprio = td->td_priority; sched_thread_priority(td, prio); /* * If the thread is on a turnstile, then let the turnstile update * its state. */ if (TD_ON_LOCK(td) && oldprio != prio) turnstile_adjust(td, oldprio); } /* * Set the base user priority, does not effect current running priority. */ void sched_user_prio(struct thread *td, u_char prio) { td->td_base_user_pri = prio; if (td->td_lend_user_pri <= prio) return; td->td_user_pri = prio; } void sched_lend_user_prio(struct thread *td, u_char prio) { THREAD_LOCK_ASSERT(td, MA_OWNED); td->td_lend_user_pri = prio; td->td_user_pri = min(prio, td->td_base_user_pri); if (td->td_priority > td->td_user_pri) sched_prio(td, td->td_user_pri); else if (td->td_priority != td->td_user_pri) td->td_flags |= TDF_NEEDRESCHED; } /* * Handle migration from sched_switch(). This happens only for * cpu binding. */ static struct mtx * sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) { struct tdq *tdn; KASSERT(!CPU_ABSENT(td_get_sched(td)->ts_cpu), ("sched_switch_migrate: " "thread %s queued on absent CPU %d.", td->td_name, td_get_sched(td)->ts_cpu)); tdn = TDQ_CPU(td_get_sched(td)->ts_cpu); #ifdef SMP tdq_load_rem(tdq, td); /* * Do the lock dance required to avoid LOR. We grab an extra * spinlock nesting to prevent preemption while we're * not holding either run-queue lock. */ spinlock_enter(); thread_lock_block(td); /* This releases the lock on tdq. */ /* * Acquire both run-queue locks before placing the thread on the new * run-queue to avoid deadlocks created by placing a thread with a * blocked lock on the run-queue of a remote processor. The deadlock * occurs when a third processor attempts to lock the two queues in * question while the target processor is spinning with its own * run-queue lock held while waiting for the blocked lock to clear. */ tdq_lock_pair(tdn, tdq); tdq_add(tdn, td, flags); tdq_notify(tdn, td); TDQ_UNLOCK(tdn); spinlock_exit(); #endif return (TDQ_LOCKPTR(tdn)); } /* * Variadic version of thread_lock_unblock() that does not assume td_lock * is blocked. */ static inline void thread_unblock_switch(struct thread *td, struct mtx *mtx) { atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock, (uintptr_t)mtx); } /* * Switch threads. This function has to handle threads coming in while * blocked for some reason, running, or idle. It also must deal with * migrating a thread from one queue to another as running threads may * be assigned elsewhere via binding. */ void sched_switch(struct thread *td, struct thread *newtd, int flags) { struct tdq *tdq; struct td_sched *ts; struct mtx *mtx; int srqflag; int cpuid, preempted; THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(newtd == NULL, ("sched_switch: Unsupported newtd argument")); cpuid = PCPU_GET(cpuid); tdq = TDQ_CPU(cpuid); ts = td_get_sched(td); mtx = td->td_lock; sched_pctcpu_update(ts, 1); ts->ts_rltick = ticks; td->td_lastcpu = td->td_oncpu; td->td_oncpu = NOCPU; preempted = (td->td_flags & TDF_SLICEEND) == 0 && (flags & SW_PREEMPT) != 0; td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND); td->td_owepreempt = 0; if (!TD_IS_IDLETHREAD(td)) tdq->tdq_switchcnt++; /* * The lock pointer in an idle thread should never change. Reset it * to CAN_RUN as well. */ if (TD_IS_IDLETHREAD(td)) { MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); TD_SET_CAN_RUN(td); } else if (TD_IS_RUNNING(td)) { MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); srqflag = preempted ? SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : SRQ_OURSELF|SRQ_YIELDING; #ifdef SMP if (THREAD_CAN_MIGRATE(td) && !THREAD_CAN_SCHED(td, ts->ts_cpu)) ts->ts_cpu = sched_pickcpu(td, 0); #endif if (ts->ts_cpu == cpuid) tdq_runq_add(tdq, td, srqflag); else { KASSERT(THREAD_CAN_MIGRATE(td) || (ts->ts_flags & TSF_BOUND) != 0, ("Thread %p shouldn't migrate", td)); mtx = sched_switch_migrate(tdq, td, srqflag); } } else { /* This thread must be going to sleep. */ TDQ_LOCK(tdq); mtx = thread_lock_block(td); tdq_load_rem(tdq, td); } #if (KTR_COMPILE & KTR_SCHED) != 0 if (TD_IS_IDLETHREAD(td)) KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle", "prio:%d", td->td_priority); else KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td), "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg, "lockname:\"%s\"", td->td_lockname); #endif /* * We enter here with the thread blocked and assigned to the * appropriate cpu run-queue or sleep-queue and with the current * thread-queue locked. */ TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); newtd = choosethread(); /* * Call the MD code to switch contexts if necessary. */ if (td != newtd) { #ifdef HWPMC_HOOKS if (PMC_PROC_IS_USING_PMCS(td->td_proc)) PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); #endif SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc); lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; sched_pctcpu_update(td_get_sched(newtd), 0); #ifdef KDTRACE_HOOKS /* * If DTrace has set the active vtime enum to anything * other than INACTIVE (0), then it should have set the * function to call. */ if (dtrace_vtime_active) (*dtrace_vtime_switch_func)(newtd); #endif cpu_switch(td, newtd, mtx); /* * We may return from cpu_switch on a different cpu. However, * we always return with td_lock pointing to the current cpu's * run queue lock. */ cpuid = PCPU_GET(cpuid); tdq = TDQ_CPU(cpuid); lock_profile_obtain_lock_success( &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); SDT_PROBE0(sched, , , on__cpu); #ifdef HWPMC_HOOKS if (PMC_PROC_IS_USING_PMCS(td->td_proc)) PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); #endif } else { thread_unblock_switch(td, mtx); SDT_PROBE0(sched, , , remain__cpu); } KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running", "prio:%d", td->td_priority); /* * Assert that all went well and return. */ TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED); MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); td->td_oncpu = cpuid; } /* * Adjust thread priorities as a result of a nice request. */ void sched_nice(struct proc *p, int nice) { struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); p->p_nice = nice; FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); sched_priority(td); sched_prio(td, td->td_base_user_pri); thread_unlock(td); } } /* * Record the sleep time for the interactivity scorer. */ void sched_sleep(struct thread *td, int prio) { THREAD_LOCK_ASSERT(td, MA_OWNED); td->td_slptick = ticks; if (TD_IS_SUSPENDED(td) || prio >= PSOCK) td->td_flags |= TDF_CANSWAP; if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE) return; if (static_boost == 1 && prio) sched_prio(td, prio); else if (static_boost && td->td_priority > static_boost) sched_prio(td, static_boost); } /* * Schedule a thread to resume execution and record how long it voluntarily * slept. We also update the pctcpu, interactivity, and priority. */ void sched_wakeup(struct thread *td) { struct td_sched *ts; int slptick; THREAD_LOCK_ASSERT(td, MA_OWNED); ts = td_get_sched(td); td->td_flags &= ~TDF_CANSWAP; /* * If we slept for more than a tick update our interactivity and * priority. */ slptick = td->td_slptick; td->td_slptick = 0; if (slptick && slptick != ticks) { ts->ts_slptime += (ticks - slptick) << SCHED_TICK_SHIFT; sched_interact_update(td); sched_pctcpu_update(ts, 0); } /* * Reset the slice value since we slept and advanced the round-robin. */ ts->ts_slice = 0; sched_add(td, SRQ_BORING); } /* * Penalize the parent for creating a new child and initialize the child's * priority. */ void sched_fork(struct thread *td, struct thread *child) { THREAD_LOCK_ASSERT(td, MA_OWNED); sched_pctcpu_update(td_get_sched(td), 1); sched_fork_thread(td, child); /* * Penalize the parent and child for forking. */ sched_interact_fork(child); sched_priority(child); td_get_sched(td)->ts_runtime += tickincr; sched_interact_update(td); sched_priority(td); } /* * Fork a new thread, may be within the same process. */ void sched_fork_thread(struct thread *td, struct thread *child) { struct td_sched *ts; struct td_sched *ts2; struct tdq *tdq; tdq = TDQ_SELF(); THREAD_LOCK_ASSERT(td, MA_OWNED); /* * Initialize child. */ ts = td_get_sched(td); ts2 = td_get_sched(child); child->td_oncpu = NOCPU; child->td_lastcpu = NOCPU; child->td_lock = TDQ_LOCKPTR(tdq); child->td_cpuset = cpuset_ref(td->td_cpuset); + child->td_domain.dr_policy = td->td_cpuset->cs_domain; ts2->ts_cpu = ts->ts_cpu; ts2->ts_flags = 0; /* * Grab our parents cpu estimation information. */ ts2->ts_ticks = ts->ts_ticks; ts2->ts_ltick = ts->ts_ltick; ts2->ts_ftick = ts->ts_ftick; /* * Do not inherit any borrowed priority from the parent. */ child->td_priority = child->td_base_pri; /* * And update interactivity score. */ ts2->ts_slptime = ts->ts_slptime; ts2->ts_runtime = ts->ts_runtime; /* Attempt to quickly learn interactivity. */ ts2->ts_slice = tdq_slice(tdq) - sched_slice_min; #ifdef KTR bzero(ts2->ts_name, sizeof(ts2->ts_name)); #endif } /* * Adjust the priority class of a thread. */ void sched_class(struct thread *td, int class) { THREAD_LOCK_ASSERT(td, MA_OWNED); if (td->td_pri_class == class) return; td->td_pri_class = class; } /* * Return some of the child's priority and interactivity to the parent. */ void sched_exit(struct proc *p, struct thread *child) { struct thread *td; KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "proc exit", "prio:%d", child->td_priority); PROC_LOCK_ASSERT(p, MA_OWNED); td = FIRST_THREAD_IN_PROC(p); sched_exit_thread(td, child); } /* * Penalize another thread for the time spent on this one. This helps to * worsen the priority and interactivity of processes which schedule batch * jobs such as make. This has little effect on the make process itself but * causes new processes spawned by it to receive worse scores immediately. */ void sched_exit_thread(struct thread *td, struct thread *child) { KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "thread exit", "prio:%d", child->td_priority); /* * Give the child's runtime to the parent without returning the * sleep time as a penalty to the parent. This causes shells that * launch expensive things to mark their children as expensive. */ thread_lock(td); td_get_sched(td)->ts_runtime += td_get_sched(child)->ts_runtime; sched_interact_update(td); sched_priority(td); thread_unlock(td); } void sched_preempt(struct thread *td) { struct tdq *tdq; SDT_PROBE2(sched, , , surrender, td, td->td_proc); thread_lock(td); tdq = TDQ_SELF(); TDQ_LOCK_ASSERT(tdq, MA_OWNED); tdq->tdq_ipipending = 0; if (td->td_priority > tdq->tdq_lowpri) { int flags; flags = SW_INVOL | SW_PREEMPT; if (td->td_critnest > 1) td->td_owepreempt = 1; else if (TD_IS_IDLETHREAD(td)) mi_switch(flags | SWT_REMOTEWAKEIDLE, NULL); else mi_switch(flags | SWT_REMOTEPREEMPT, NULL); } thread_unlock(td); } /* * Fix priorities on return to user-space. Priorities may be elevated due * to static priorities in msleep() or similar. */ void sched_userret(struct thread *td) { /* * XXX we cheat slightly on the locking here to avoid locking in * the usual case. Setting td_priority here is essentially an * incomplete workaround for not setting it properly elsewhere. * Now that some interrupt handlers are threads, not setting it * properly elsewhere can clobber it in the window between setting * it here and returning to user mode, so don't waste time setting * it perfectly here. */ KASSERT((td->td_flags & TDF_BORROWING) == 0, ("thread with borrowed priority returning to userland")); if (td->td_priority != td->td_user_pri) { thread_lock(td); td->td_priority = td->td_user_pri; td->td_base_pri = td->td_user_pri; tdq_setlowpri(TDQ_SELF(), td); thread_unlock(td); } } /* * Handle a stathz tick. This is really only relevant for timeshare * threads. */ void sched_clock(struct thread *td) { struct tdq *tdq; struct td_sched *ts; THREAD_LOCK_ASSERT(td, MA_OWNED); tdq = TDQ_SELF(); #ifdef SMP /* * We run the long term load balancer infrequently on the first cpu. */ if (balance_tdq == tdq) { if (balance_ticks && --balance_ticks == 0) sched_balance(); } #endif /* * Save the old switch count so we have a record of the last ticks * activity. Initialize the new switch count based on our load. * If there is some activity seed it to reflect that. */ tdq->tdq_oldswitchcnt = tdq->tdq_switchcnt; tdq->tdq_switchcnt = tdq->tdq_load; /* * Advance the insert index once for each tick to ensure that all * threads get a chance to run. */ if (tdq->tdq_idx == tdq->tdq_ridx) { tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) tdq->tdq_ridx = tdq->tdq_idx; } ts = td_get_sched(td); sched_pctcpu_update(ts, 1); if (td->td_pri_class & PRI_FIFO_BIT) return; if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) { /* * We used a tick; charge it to the thread so * that we can compute our interactivity. */ td_get_sched(td)->ts_runtime += tickincr; sched_interact_update(td); sched_priority(td); } /* * Force a context switch if the current thread has used up a full * time slice (default is 100ms). */ if (!TD_IS_IDLETHREAD(td) && ++ts->ts_slice >= tdq_slice(tdq)) { ts->ts_slice = 0; td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND; } } u_int sched_estcpu(struct thread *td __unused) { return (0); } /* * Return whether the current CPU has runnable tasks. Used for in-kernel * cooperative idle threads. */ int sched_runnable(void) { struct tdq *tdq; int load; load = 1; tdq = TDQ_SELF(); if ((curthread->td_flags & TDF_IDLETD) != 0) { if (tdq->tdq_load > 0) goto out; } else if (tdq->tdq_load - 1 > 0) goto out; load = 0; out: return (load); } /* * Choose the highest priority thread to run. The thread is removed from * the run-queue while running however the load remains. For SMP we set * the tdq in the global idle bitmask if it idles here. */ struct thread * sched_choose(void) { struct thread *td; struct tdq *tdq; tdq = TDQ_SELF(); TDQ_LOCK_ASSERT(tdq, MA_OWNED); td = tdq_choose(tdq); if (td) { tdq_runq_rem(tdq, td); tdq->tdq_lowpri = td->td_priority; return (td); } tdq->tdq_lowpri = PRI_MAX_IDLE; return (PCPU_GET(idlethread)); } /* * Set owepreempt if necessary. Preemption never happens directly in ULE, * we always request it once we exit a critical section. */ static inline void sched_setpreempt(struct thread *td) { struct thread *ctd; int cpri; int pri; THREAD_LOCK_ASSERT(curthread, MA_OWNED); ctd = curthread; pri = td->td_priority; cpri = ctd->td_priority; if (pri < cpri) ctd->td_flags |= TDF_NEEDRESCHED; if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) return; if (!sched_shouldpreempt(pri, cpri, 0)) return; ctd->td_owepreempt = 1; } /* * Add a thread to a thread queue. Select the appropriate runq and add the * thread to it. This is the internal function called when the tdq is * predetermined. */ void tdq_add(struct tdq *tdq, struct thread *td, int flags) { TDQ_LOCK_ASSERT(tdq, MA_OWNED); KASSERT((td->td_inhibitors == 0), ("sched_add: trying to run inhibited thread")); KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), ("sched_add: bad thread state")); KASSERT(td->td_flags & TDF_INMEM, ("sched_add: thread swapped out")); if (td->td_priority < tdq->tdq_lowpri) tdq->tdq_lowpri = td->td_priority; tdq_runq_add(tdq, td, flags); tdq_load_add(tdq, td); } /* * Select the target thread queue and add a thread to it. Request * preemption or IPI a remote processor if required. */ void sched_add(struct thread *td, int flags) { struct tdq *tdq; #ifdef SMP int cpu; #endif KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", "prio:%d", td->td_priority, KTR_ATTR_LINKED, sched_tdname(curthread)); KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup", KTR_ATTR_LINKED, sched_tdname(td)); SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL, flags & SRQ_PREEMPTED); THREAD_LOCK_ASSERT(td, MA_OWNED); /* * Recalculate the priority before we select the target cpu or * run-queue. */ if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) sched_priority(td); #ifdef SMP /* * Pick the destination cpu and if it isn't ours transfer to the * target cpu. */ td_get_sched(td)->ts_cpu = curcpu; /* Pick something valid to start */ cpu = sched_pickcpu(td, flags); tdq = sched_setcpu(td, cpu, flags); tdq_add(tdq, td, flags); if (cpu != PCPU_GET(cpuid)) { tdq_notify(tdq, td); return; } #else tdq = TDQ_SELF(); TDQ_LOCK(tdq); /* * Now that the thread is moving to the run-queue, set the lock * to the scheduler's lock. */ thread_lock_set(td, TDQ_LOCKPTR(tdq)); tdq_add(tdq, td, flags); #endif if (!(flags & SRQ_YIELDING)) sched_setpreempt(td); } /* * Remove a thread from a run-queue without running it. This is used * when we're stealing a thread from a remote queue. Otherwise all threads * exit by calling sched_exit_thread() and sched_throw() themselves. */ void sched_rem(struct thread *td) { struct tdq *tdq; KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "runq rem", "prio:%d", td->td_priority); SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL); tdq = TDQ_CPU(td_get_sched(td)->ts_cpu); TDQ_LOCK_ASSERT(tdq, MA_OWNED); MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); KASSERT(TD_ON_RUNQ(td), ("sched_rem: thread not on run queue")); tdq_runq_rem(tdq, td); tdq_load_rem(tdq, td); TD_SET_CAN_RUN(td); if (td->td_priority == tdq->tdq_lowpri) tdq_setlowpri(tdq, NULL); } /* * Fetch cpu utilization information. Updates on demand. */ fixpt_t sched_pctcpu(struct thread *td) { fixpt_t pctcpu; struct td_sched *ts; pctcpu = 0; ts = td_get_sched(td); THREAD_LOCK_ASSERT(td, MA_OWNED); sched_pctcpu_update(ts, TD_IS_RUNNING(td)); if (ts->ts_ticks) { int rtick; /* How many rtick per second ? */ rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; } return (pctcpu); } /* * Enforce affinity settings for a thread. Called after adjustments to * cpumask. */ void sched_affinity(struct thread *td) { #ifdef SMP struct td_sched *ts; THREAD_LOCK_ASSERT(td, MA_OWNED); ts = td_get_sched(td); if (THREAD_CAN_SCHED(td, ts->ts_cpu)) return; if (TD_ON_RUNQ(td)) { sched_rem(td); sched_add(td, SRQ_BORING); return; } if (!TD_IS_RUNNING(td)) return; /* * Force a switch before returning to userspace. If the * target thread is not running locally send an ipi to force * the issue. */ td->td_flags |= TDF_NEEDRESCHED; if (td != curthread) ipi_cpu(ts->ts_cpu, IPI_PREEMPT); #endif } /* * Bind a thread to a target cpu. */ void sched_bind(struct thread *td, int cpu) { struct td_sched *ts; THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); KASSERT(td == curthread, ("sched_bind: can only bind curthread")); ts = td_get_sched(td); if (ts->ts_flags & TSF_BOUND) sched_unbind(td); KASSERT(THREAD_CAN_MIGRATE(td), ("%p must be migratable", td)); ts->ts_flags |= TSF_BOUND; sched_pin(); if (PCPU_GET(cpuid) == cpu) return; ts->ts_cpu = cpu; /* When we return from mi_switch we'll be on the correct cpu. */ mi_switch(SW_VOL, NULL); } /* * Release a bound thread. */ void sched_unbind(struct thread *td) { struct td_sched *ts; THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(td == curthread, ("sched_unbind: can only bind curthread")); ts = td_get_sched(td); if ((ts->ts_flags & TSF_BOUND) == 0) return; ts->ts_flags &= ~TSF_BOUND; sched_unpin(); } int sched_is_bound(struct thread *td) { THREAD_LOCK_ASSERT(td, MA_OWNED); return (td_get_sched(td)->ts_flags & TSF_BOUND); } /* * Basic yield call. */ void sched_relinquish(struct thread *td) { thread_lock(td); mi_switch(SW_VOL | SWT_RELINQUISH, NULL); thread_unlock(td); } /* * Return the total system load. */ int sched_load(void) { #ifdef SMP int total; int i; total = 0; CPU_FOREACH(i) total += TDQ_CPU(i)->tdq_sysload; return (total); #else return (TDQ_SELF()->tdq_sysload); #endif } int sched_sizeof_proc(void) { return (sizeof(struct proc)); } int sched_sizeof_thread(void) { return (sizeof(struct thread) + sizeof(struct td_sched)); } #ifdef SMP #define TDQ_IDLESPIN(tdq) \ ((tdq)->tdq_cg != NULL && ((tdq)->tdq_cg->cg_flags & CG_FLAG_THREAD) == 0) #else #define TDQ_IDLESPIN(tdq) 1 #endif /* * The actual idle process. */ void sched_idletd(void *dummy) { struct thread *td; struct tdq *tdq; int oldswitchcnt, switchcnt; int i; mtx_assert(&Giant, MA_NOTOWNED); td = curthread; tdq = TDQ_SELF(); THREAD_NO_SLEEPING(); oldswitchcnt = -1; for (;;) { if (tdq->tdq_load) { thread_lock(td); mi_switch(SW_VOL | SWT_IDLE, NULL); thread_unlock(td); } switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt; #ifdef SMP if (switchcnt != oldswitchcnt) { oldswitchcnt = switchcnt; if (tdq_idled(tdq) == 0) continue; } switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt; #else oldswitchcnt = switchcnt; #endif /* * If we're switching very frequently, spin while checking * for load rather than entering a low power state that * may require an IPI. However, don't do any busy * loops while on SMT machines as this simply steals * cycles from cores doing useful work. */ if (TDQ_IDLESPIN(tdq) && switchcnt > sched_idlespinthresh) { for (i = 0; i < sched_idlespins; i++) { if (tdq->tdq_load) break; cpu_spinwait(); } } /* If there was context switch during spin, restart it. */ switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt; if (tdq->tdq_load != 0 || switchcnt != oldswitchcnt) continue; /* Run main MD idle handler. */ tdq->tdq_cpu_idle = 1; /* * Make sure that tdq_cpu_idle update is globally visible * before cpu_idle() read tdq_load. The order is important * to avoid race with tdq_notify. */ atomic_thread_fence_seq_cst(); cpu_idle(switchcnt * 4 > sched_idlespinthresh); tdq->tdq_cpu_idle = 0; /* * Account thread-less hardware interrupts and * other wakeup reasons equal to context switches. */ switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt; if (switchcnt != oldswitchcnt) continue; tdq->tdq_switchcnt++; oldswitchcnt++; } } /* * A CPU is entering for the first time or a thread is exiting. */ void sched_throw(struct thread *td) { struct thread *newtd; struct tdq *tdq; tdq = TDQ_SELF(); if (td == NULL) { /* Correct spinlock nesting and acquire the correct lock. */ TDQ_LOCK(tdq); spinlock_exit(); PCPU_SET(switchtime, cpu_ticks()); PCPU_SET(switchticks, ticks); } else { MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); tdq_load_rem(tdq, td); lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); td->td_lastcpu = td->td_oncpu; td->td_oncpu = NOCPU; } KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); newtd = choosethread(); TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; cpu_throw(td, newtd); /* doesn't return */ } /* * This is called from fork_exit(). Just acquire the correct locks and * let fork do the rest of the work. */ void sched_fork_exit(struct thread *td) { struct tdq *tdq; int cpuid; /* * Finish setting up thread glue so that it begins execution in a * non-nested critical section with the scheduler lock held. */ cpuid = PCPU_GET(cpuid); tdq = TDQ_CPU(cpuid); if (TD_IS_IDLETHREAD(td)) td->td_lock = TDQ_LOCKPTR(tdq); MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); td->td_oncpu = cpuid; TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); lock_profile_obtain_lock_success( &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running", "prio:%d", td->td_priority); SDT_PROBE0(sched, , , on__cpu); } /* * Create on first use to catch odd startup conditons. */ char * sched_tdname(struct thread *td) { #ifdef KTR struct td_sched *ts; ts = td_get_sched(td); if (ts->ts_name[0] == '\0') snprintf(ts->ts_name, sizeof(ts->ts_name), "%s tid %d", td->td_name, td->td_tid); return (ts->ts_name); #else return (td->td_name); #endif } #ifdef KTR void sched_clear_tdname(struct thread *td) { struct td_sched *ts; ts = td_get_sched(td); ts->ts_name[0] = '\0'; } #endif #ifdef SMP /* * Build the CPU topology dump string. Is recursively called to collect * the topology tree. */ static int sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, struct cpu_group *cg, int indent) { char cpusetbuf[CPUSETBUFSIZ]; int i, first; sbuf_printf(sb, "%*s\n", indent, "", 1 + indent / 2, cg->cg_level); sbuf_printf(sb, "%*s ", indent, "", cg->cg_count, cpusetobj_strprint(cpusetbuf, &cg->cg_mask)); first = TRUE; for (i = 0; i < MAXCPU; i++) { if (CPU_ISSET(i, &cg->cg_mask)) { if (!first) sbuf_printf(sb, ", "); else first = FALSE; sbuf_printf(sb, "%d", i); } } sbuf_printf(sb, "\n"); if (cg->cg_flags != 0) { sbuf_printf(sb, "%*s ", indent, ""); if ((cg->cg_flags & CG_FLAG_HTT) != 0) sbuf_printf(sb, "HTT group"); if ((cg->cg_flags & CG_FLAG_THREAD) != 0) sbuf_printf(sb, "THREAD group"); if ((cg->cg_flags & CG_FLAG_SMT) != 0) sbuf_printf(sb, "SMT group"); sbuf_printf(sb, "\n"); } if (cg->cg_children > 0) { sbuf_printf(sb, "%*s \n", indent, ""); for (i = 0; i < cg->cg_children; i++) sysctl_kern_sched_topology_spec_internal(sb, &cg->cg_child[i], indent+2); sbuf_printf(sb, "%*s \n", indent, ""); } sbuf_printf(sb, "%*s\n", indent, ""); return (0); } /* * Sysctl handler for retrieving topology dump. It's a wrapper for * the recursive sysctl_kern_smp_topology_spec_internal(). */ static int sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS) { struct sbuf *topo; int err; KASSERT(cpu_top != NULL, ("cpu_top isn't initialized")); topo = sbuf_new_for_sysctl(NULL, NULL, 512, req); if (topo == NULL) return (ENOMEM); sbuf_printf(topo, "\n"); err = sysctl_kern_sched_topology_spec_internal(topo, cpu_top, 1); sbuf_printf(topo, "\n"); if (err == 0) { err = sbuf_finish(topo); } sbuf_delete(topo); return (err); } #endif static int sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) { int error, new_val, period; period = 1000000 / realstathz; new_val = period * sched_slice; error = sysctl_handle_int(oidp, &new_val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (new_val <= 0) return (EINVAL); sched_slice = imax(1, (new_val + period / 2) / period); sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR; hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) / realstathz); return (0); } SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, "Scheduler name"); SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_kern_quantum, "I", "Quantum for timeshare threads in microseconds"); SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, "Quantum for timeshare threads in stathz ticks"); SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, "Interactivity score threshold"); SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh, 0, "Maximal (lowest) priority for preemption"); SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 0, "Assign static kernel priorities to sleeping threads"); SYSCTL_INT(_kern_sched, OID_AUTO, idlespins, CTLFLAG_RW, &sched_idlespins, 0, "Number of times idle thread will spin waiting for new work"); SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW, &sched_idlespinthresh, 0, "Threshold before we will permit idle thread spinning"); #ifdef SMP SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, "Number of hz ticks to keep thread affinity for"); SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, "Enables the long-term load balancer"); SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW, &balance_interval, 0, "Average period in stathz ticks to run the long-term balancer"); SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0, "Attempts to steal work from other cores before idling"); SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, "Minimum load on remote CPU before we'll steal"); SYSCTL_PROC(_kern_sched, OID_AUTO, topology_spec, CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_kern_sched_topology_spec, "A", "XML dump of detected CPU topology"); #endif /* ps compat. All cpu percentages from ULE are weighted. */ static int ccpu = 0; SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); Index: user/jeff/numa/sys/kern/syscalls.c =================================================================== --- user/jeff/numa/sys/kern/syscalls.c (revision 326888) +++ user/jeff/numa/sys/kern/syscalls.c (revision 326889) @@ -1,570 +1,572 @@ /* * System call names. * * DO NOT EDIT-- this file is automatically generated. * $FreeBSD$ */ const char *syscallnames[] = { "syscall", /* 0 = syscall */ "exit", /* 1 = exit */ "fork", /* 2 = fork */ "read", /* 3 = read */ "write", /* 4 = write */ "open", /* 5 = open */ "close", /* 6 = close */ "wait4", /* 7 = wait4 */ "compat.creat", /* 8 = old creat */ "link", /* 9 = link */ "unlink", /* 10 = unlink */ "obs_execv", /* 11 = obsolete execv */ "chdir", /* 12 = chdir */ "fchdir", /* 13 = fchdir */ "compat11.mknod", /* 14 = freebsd11 mknod */ "chmod", /* 15 = chmod */ "chown", /* 16 = chown */ "break", /* 17 = break */ "compat4.getfsstat", /* 18 = freebsd4 getfsstat */ "compat.lseek", /* 19 = old lseek */ "getpid", /* 20 = getpid */ "mount", /* 21 = mount */ "unmount", /* 22 = unmount */ "setuid", /* 23 = setuid */ "getuid", /* 24 = getuid */ "geteuid", /* 25 = geteuid */ "ptrace", /* 26 = ptrace */ "recvmsg", /* 27 = recvmsg */ "sendmsg", /* 28 = sendmsg */ "recvfrom", /* 29 = recvfrom */ "accept", /* 30 = accept */ "getpeername", /* 31 = getpeername */ "getsockname", /* 32 = getsockname */ "access", /* 33 = access */ "chflags", /* 34 = chflags */ "fchflags", /* 35 = fchflags */ "sync", /* 36 = sync */ "kill", /* 37 = kill */ "compat.stat", /* 38 = old stat */ "getppid", /* 39 = getppid */ "compat.lstat", /* 40 = old lstat */ "dup", /* 41 = dup */ "compat10.pipe", /* 42 = freebsd10 pipe */ "getegid", /* 43 = getegid */ "profil", /* 44 = profil */ "ktrace", /* 45 = ktrace */ "compat.sigaction", /* 46 = old sigaction */ "getgid", /* 47 = getgid */ "compat.sigprocmask", /* 48 = old sigprocmask */ "getlogin", /* 49 = getlogin */ "setlogin", /* 50 = setlogin */ "acct", /* 51 = acct */ "compat.sigpending", /* 52 = old sigpending */ "sigaltstack", /* 53 = sigaltstack */ "ioctl", /* 54 = ioctl */ "reboot", /* 55 = reboot */ "revoke", /* 56 = revoke */ "symlink", /* 57 = symlink */ "readlink", /* 58 = readlink */ "execve", /* 59 = execve */ "umask", /* 60 = umask */ "chroot", /* 61 = chroot */ "compat.fstat", /* 62 = old fstat */ "compat.getkerninfo", /* 63 = old getkerninfo */ "compat.getpagesize", /* 64 = old getpagesize */ "msync", /* 65 = msync */ "vfork", /* 66 = vfork */ "obs_vread", /* 67 = obsolete vread */ "obs_vwrite", /* 68 = obsolete vwrite */ "sbrk", /* 69 = sbrk */ "sstk", /* 70 = sstk */ "compat.mmap", /* 71 = old mmap */ "vadvise", /* 72 = vadvise */ "munmap", /* 73 = munmap */ "mprotect", /* 74 = mprotect */ "madvise", /* 75 = madvise */ "obs_vhangup", /* 76 = obsolete vhangup */ "obs_vlimit", /* 77 = obsolete vlimit */ "mincore", /* 78 = mincore */ "getgroups", /* 79 = getgroups */ "setgroups", /* 80 = setgroups */ "getpgrp", /* 81 = getpgrp */ "setpgid", /* 82 = setpgid */ "setitimer", /* 83 = setitimer */ "compat.wait", /* 84 = old wait */ "swapon", /* 85 = swapon */ "getitimer", /* 86 = getitimer */ "compat.gethostname", /* 87 = old gethostname */ "compat.sethostname", /* 88 = old sethostname */ "getdtablesize", /* 89 = getdtablesize */ "dup2", /* 90 = dup2 */ "#91", /* 91 = getdopt */ "fcntl", /* 92 = fcntl */ "select", /* 93 = select */ "#94", /* 94 = setdopt */ "fsync", /* 95 = fsync */ "setpriority", /* 96 = setpriority */ "socket", /* 97 = socket */ "connect", /* 98 = connect */ "compat.accept", /* 99 = old accept */ "getpriority", /* 100 = getpriority */ "compat.send", /* 101 = old send */ "compat.recv", /* 102 = old recv */ "compat.sigreturn", /* 103 = old sigreturn */ "bind", /* 104 = bind */ "setsockopt", /* 105 = setsockopt */ "listen", /* 106 = listen */ "obs_vtimes", /* 107 = obsolete vtimes */ "compat.sigvec", /* 108 = old sigvec */ "compat.sigblock", /* 109 = old sigblock */ "compat.sigsetmask", /* 110 = old sigsetmask */ "compat.sigsuspend", /* 111 = old sigsuspend */ "compat.sigstack", /* 112 = old sigstack */ "compat.recvmsg", /* 113 = old recvmsg */ "compat.sendmsg", /* 114 = old sendmsg */ "obs_vtrace", /* 115 = obsolete vtrace */ "gettimeofday", /* 116 = gettimeofday */ "getrusage", /* 117 = getrusage */ "getsockopt", /* 118 = getsockopt */ "#119", /* 119 = resuba */ "readv", /* 120 = readv */ "writev", /* 121 = writev */ "settimeofday", /* 122 = settimeofday */ "fchown", /* 123 = fchown */ "fchmod", /* 124 = fchmod */ "compat.recvfrom", /* 125 = old recvfrom */ "setreuid", /* 126 = setreuid */ "setregid", /* 127 = setregid */ "rename", /* 128 = rename */ "compat.truncate", /* 129 = old truncate */ "compat.ftruncate", /* 130 = old ftruncate */ "flock", /* 131 = flock */ "mkfifo", /* 132 = mkfifo */ "sendto", /* 133 = sendto */ "shutdown", /* 134 = shutdown */ "socketpair", /* 135 = socketpair */ "mkdir", /* 136 = mkdir */ "rmdir", /* 137 = rmdir */ "utimes", /* 138 = utimes */ "obs_4.2", /* 139 = obsolete 4.2 sigreturn */ "adjtime", /* 140 = adjtime */ "compat.getpeername", /* 141 = old getpeername */ "compat.gethostid", /* 142 = old gethostid */ "compat.sethostid", /* 143 = old sethostid */ "compat.getrlimit", /* 144 = old getrlimit */ "compat.setrlimit", /* 145 = old setrlimit */ "compat.killpg", /* 146 = old killpg */ "setsid", /* 147 = setsid */ "quotactl", /* 148 = quotactl */ "compat.quota", /* 149 = old quota */ "compat.getsockname", /* 150 = old getsockname */ "#151", /* 151 = sem_lock */ "#152", /* 152 = sem_wakeup */ "#153", /* 153 = asyncdaemon */ "nlm_syscall", /* 154 = nlm_syscall */ "nfssvc", /* 155 = nfssvc */ "compat.getdirentries", /* 156 = old getdirentries */ "compat4.statfs", /* 157 = freebsd4 statfs */ "compat4.fstatfs", /* 158 = freebsd4 fstatfs */ "#159", /* 159 = nosys */ "lgetfh", /* 160 = lgetfh */ "getfh", /* 161 = getfh */ "compat4.getdomainname", /* 162 = freebsd4 getdomainname */ "compat4.setdomainname", /* 163 = freebsd4 setdomainname */ "compat4.uname", /* 164 = freebsd4 uname */ "sysarch", /* 165 = sysarch */ "rtprio", /* 166 = rtprio */ "#167", /* 167 = nosys */ "#168", /* 168 = nosys */ "semsys", /* 169 = semsys */ "msgsys", /* 170 = msgsys */ "shmsys", /* 171 = shmsys */ "#172", /* 172 = nosys */ "compat6.pread", /* 173 = freebsd6 pread */ "compat6.pwrite", /* 174 = freebsd6 pwrite */ "setfib", /* 175 = setfib */ "ntp_adjtime", /* 176 = ntp_adjtime */ "#177", /* 177 = sfork */ "#178", /* 178 = getdescriptor */ "#179", /* 179 = setdescriptor */ "#180", /* 180 = nosys */ "setgid", /* 181 = setgid */ "setegid", /* 182 = setegid */ "seteuid", /* 183 = seteuid */ "#184", /* 184 = lfs_bmapv */ "#185", /* 185 = lfs_markv */ "#186", /* 186 = lfs_segclean */ "#187", /* 187 = lfs_segwait */ "compat11.stat", /* 188 = freebsd11 stat */ "compat11.fstat", /* 189 = freebsd11 fstat */ "compat11.lstat", /* 190 = freebsd11 lstat */ "pathconf", /* 191 = pathconf */ "fpathconf", /* 192 = fpathconf */ "#193", /* 193 = nosys */ "getrlimit", /* 194 = getrlimit */ "setrlimit", /* 195 = setrlimit */ "compat11.getdirentries", /* 196 = freebsd11 getdirentries */ "compat6.mmap", /* 197 = freebsd6 mmap */ "__syscall", /* 198 = __syscall */ "compat6.lseek", /* 199 = freebsd6 lseek */ "compat6.truncate", /* 200 = freebsd6 truncate */ "compat6.ftruncate", /* 201 = freebsd6 ftruncate */ "__sysctl", /* 202 = __sysctl */ "mlock", /* 203 = mlock */ "munlock", /* 204 = munlock */ "undelete", /* 205 = undelete */ "futimes", /* 206 = futimes */ "getpgid", /* 207 = getpgid */ "#208", /* 208 = newreboot */ "poll", /* 209 = poll */ "lkmnosys", /* 210 = lkmnosys */ "lkmnosys", /* 211 = lkmnosys */ "lkmnosys", /* 212 = lkmnosys */ "lkmnosys", /* 213 = lkmnosys */ "lkmnosys", /* 214 = lkmnosys */ "lkmnosys", /* 215 = lkmnosys */ "lkmnosys", /* 216 = lkmnosys */ "lkmnosys", /* 217 = lkmnosys */ "lkmnosys", /* 218 = lkmnosys */ "lkmnosys", /* 219 = lkmnosys */ "compat7.__semctl", /* 220 = freebsd7 __semctl */ "semget", /* 221 = semget */ "semop", /* 222 = semop */ "#223", /* 223 = semconfig */ "compat7.msgctl", /* 224 = freebsd7 msgctl */ "msgget", /* 225 = msgget */ "msgsnd", /* 226 = msgsnd */ "msgrcv", /* 227 = msgrcv */ "shmat", /* 228 = shmat */ "compat7.shmctl", /* 229 = freebsd7 shmctl */ "shmdt", /* 230 = shmdt */ "shmget", /* 231 = shmget */ "clock_gettime", /* 232 = clock_gettime */ "clock_settime", /* 233 = clock_settime */ "clock_getres", /* 234 = clock_getres */ "ktimer_create", /* 235 = ktimer_create */ "ktimer_delete", /* 236 = ktimer_delete */ "ktimer_settime", /* 237 = ktimer_settime */ "ktimer_gettime", /* 238 = ktimer_gettime */ "ktimer_getoverrun", /* 239 = ktimer_getoverrun */ "nanosleep", /* 240 = nanosleep */ "ffclock_getcounter", /* 241 = ffclock_getcounter */ "ffclock_setestimate", /* 242 = ffclock_setestimate */ "ffclock_getestimate", /* 243 = ffclock_getestimate */ "clock_nanosleep", /* 244 = clock_nanosleep */ "#245", /* 245 = nosys */ "#246", /* 246 = nosys */ "clock_getcpuclockid2", /* 247 = clock_getcpuclockid2 */ "ntp_gettime", /* 248 = ntp_gettime */ "#249", /* 249 = nosys */ "minherit", /* 250 = minherit */ "rfork", /* 251 = rfork */ "obs_openbsd_poll", /* 252 = obsolete openbsd_poll */ "issetugid", /* 253 = issetugid */ "lchown", /* 254 = lchown */ "aio_read", /* 255 = aio_read */ "aio_write", /* 256 = aio_write */ "lio_listio", /* 257 = lio_listio */ "#258", /* 258 = nosys */ "#259", /* 259 = nosys */ "#260", /* 260 = nosys */ "#261", /* 261 = nosys */ "#262", /* 262 = nosys */ "#263", /* 263 = nosys */ "#264", /* 264 = nosys */ "#265", /* 265 = nosys */ "#266", /* 266 = nosys */ "#267", /* 267 = nosys */ "#268", /* 268 = nosys */ "#269", /* 269 = nosys */ "#270", /* 270 = nosys */ "#271", /* 271 = nosys */ "compat11.getdents", /* 272 = freebsd11 getdents */ "#273", /* 273 = nosys */ "lchmod", /* 274 = lchmod */ "netbsd_lchown", /* 275 = netbsd_lchown */ "lutimes", /* 276 = lutimes */ "netbsd_msync", /* 277 = netbsd_msync */ "compat11.nstat", /* 278 = freebsd11 nstat */ "compat11.nfstat", /* 279 = freebsd11 nfstat */ "compat11.nlstat", /* 280 = freebsd11 nlstat */ "#281", /* 281 = nosys */ "#282", /* 282 = nosys */ "#283", /* 283 = nosys */ "#284", /* 284 = nosys */ "#285", /* 285 = nosys */ "#286", /* 286 = nosys */ "#287", /* 287 = nosys */ "#288", /* 288 = nosys */ "preadv", /* 289 = preadv */ "pwritev", /* 290 = pwritev */ "#291", /* 291 = nosys */ "#292", /* 292 = nosys */ "#293", /* 293 = nosys */ "#294", /* 294 = nosys */ "#295", /* 295 = nosys */ "#296", /* 296 = nosys */ "compat4.fhstatfs", /* 297 = freebsd4 fhstatfs */ "fhopen", /* 298 = fhopen */ "compat11.fhstat", /* 299 = freebsd11 fhstat */ "modnext", /* 300 = modnext */ "modstat", /* 301 = modstat */ "modfnext", /* 302 = modfnext */ "modfind", /* 303 = modfind */ "kldload", /* 304 = kldload */ "kldunload", /* 305 = kldunload */ "kldfind", /* 306 = kldfind */ "kldnext", /* 307 = kldnext */ "kldstat", /* 308 = kldstat */ "kldfirstmod", /* 309 = kldfirstmod */ "getsid", /* 310 = getsid */ "setresuid", /* 311 = setresuid */ "setresgid", /* 312 = setresgid */ "obs_signanosleep", /* 313 = obsolete signanosleep */ "aio_return", /* 314 = aio_return */ "aio_suspend", /* 315 = aio_suspend */ "aio_cancel", /* 316 = aio_cancel */ "aio_error", /* 317 = aio_error */ "compat6.aio_read", /* 318 = freebsd6 aio_read */ "compat6.aio_write", /* 319 = freebsd6 aio_write */ "compat6.lio_listio", /* 320 = freebsd6 lio_listio */ "yield", /* 321 = yield */ "obs_thr_sleep", /* 322 = obsolete thr_sleep */ "obs_thr_wakeup", /* 323 = obsolete thr_wakeup */ "mlockall", /* 324 = mlockall */ "munlockall", /* 325 = munlockall */ "__getcwd", /* 326 = __getcwd */ "sched_setparam", /* 327 = sched_setparam */ "sched_getparam", /* 328 = sched_getparam */ "sched_setscheduler", /* 329 = sched_setscheduler */ "sched_getscheduler", /* 330 = sched_getscheduler */ "sched_yield", /* 331 = sched_yield */ "sched_get_priority_max", /* 332 = sched_get_priority_max */ "sched_get_priority_min", /* 333 = sched_get_priority_min */ "sched_rr_get_interval", /* 334 = sched_rr_get_interval */ "utrace", /* 335 = utrace */ "compat4.sendfile", /* 336 = freebsd4 sendfile */ "kldsym", /* 337 = kldsym */ "jail", /* 338 = jail */ "nnpfs_syscall", /* 339 = nnpfs_syscall */ "sigprocmask", /* 340 = sigprocmask */ "sigsuspend", /* 341 = sigsuspend */ "compat4.sigaction", /* 342 = freebsd4 sigaction */ "sigpending", /* 343 = sigpending */ "compat4.sigreturn", /* 344 = freebsd4 sigreturn */ "sigtimedwait", /* 345 = sigtimedwait */ "sigwaitinfo", /* 346 = sigwaitinfo */ "__acl_get_file", /* 347 = __acl_get_file */ "__acl_set_file", /* 348 = __acl_set_file */ "__acl_get_fd", /* 349 = __acl_get_fd */ "__acl_set_fd", /* 350 = __acl_set_fd */ "__acl_delete_file", /* 351 = __acl_delete_file */ "__acl_delete_fd", /* 352 = __acl_delete_fd */ "__acl_aclcheck_file", /* 353 = __acl_aclcheck_file */ "__acl_aclcheck_fd", /* 354 = __acl_aclcheck_fd */ "extattrctl", /* 355 = extattrctl */ "extattr_set_file", /* 356 = extattr_set_file */ "extattr_get_file", /* 357 = extattr_get_file */ "extattr_delete_file", /* 358 = extattr_delete_file */ "aio_waitcomplete", /* 359 = aio_waitcomplete */ "getresuid", /* 360 = getresuid */ "getresgid", /* 361 = getresgid */ "kqueue", /* 362 = kqueue */ "compat11.kevent", /* 363 = freebsd11 kevent */ "#364", /* 364 = __cap_get_proc */ "#365", /* 365 = __cap_set_proc */ "#366", /* 366 = __cap_get_fd */ "#367", /* 367 = __cap_get_file */ "#368", /* 368 = __cap_set_fd */ "#369", /* 369 = __cap_set_file */ "#370", /* 370 = nosys */ "extattr_set_fd", /* 371 = extattr_set_fd */ "extattr_get_fd", /* 372 = extattr_get_fd */ "extattr_delete_fd", /* 373 = extattr_delete_fd */ "__setugid", /* 374 = __setugid */ "#375", /* 375 = nfsclnt */ "eaccess", /* 376 = eaccess */ "afs3_syscall", /* 377 = afs3_syscall */ "nmount", /* 378 = nmount */ "#379", /* 379 = kse_exit */ "#380", /* 380 = kse_wakeup */ "#381", /* 381 = kse_create */ "#382", /* 382 = kse_thr_interrupt */ "#383", /* 383 = kse_release */ "__mac_get_proc", /* 384 = __mac_get_proc */ "__mac_set_proc", /* 385 = __mac_set_proc */ "__mac_get_fd", /* 386 = __mac_get_fd */ "__mac_get_file", /* 387 = __mac_get_file */ "__mac_set_fd", /* 388 = __mac_set_fd */ "__mac_set_file", /* 389 = __mac_set_file */ "kenv", /* 390 = kenv */ "lchflags", /* 391 = lchflags */ "uuidgen", /* 392 = uuidgen */ "sendfile", /* 393 = sendfile */ "mac_syscall", /* 394 = mac_syscall */ "compat11.getfsstat", /* 395 = freebsd11 getfsstat */ "compat11.statfs", /* 396 = freebsd11 statfs */ "compat11.fstatfs", /* 397 = freebsd11 fstatfs */ "compat11.fhstatfs", /* 398 = freebsd11 fhstatfs */ "#399", /* 399 = nosys */ "ksem_close", /* 400 = ksem_close */ "ksem_post", /* 401 = ksem_post */ "ksem_wait", /* 402 = ksem_wait */ "ksem_trywait", /* 403 = ksem_trywait */ "ksem_init", /* 404 = ksem_init */ "ksem_open", /* 405 = ksem_open */ "ksem_unlink", /* 406 = ksem_unlink */ "ksem_getvalue", /* 407 = ksem_getvalue */ "ksem_destroy", /* 408 = ksem_destroy */ "__mac_get_pid", /* 409 = __mac_get_pid */ "__mac_get_link", /* 410 = __mac_get_link */ "__mac_set_link", /* 411 = __mac_set_link */ "extattr_set_link", /* 412 = extattr_set_link */ "extattr_get_link", /* 413 = extattr_get_link */ "extattr_delete_link", /* 414 = extattr_delete_link */ "__mac_execve", /* 415 = __mac_execve */ "sigaction", /* 416 = sigaction */ "sigreturn", /* 417 = sigreturn */ "#418", /* 418 = __xstat */ "#419", /* 419 = __xfstat */ "#420", /* 420 = __xlstat */ "getcontext", /* 421 = getcontext */ "setcontext", /* 422 = setcontext */ "swapcontext", /* 423 = swapcontext */ "swapoff", /* 424 = swapoff */ "__acl_get_link", /* 425 = __acl_get_link */ "__acl_set_link", /* 426 = __acl_set_link */ "__acl_delete_link", /* 427 = __acl_delete_link */ "__acl_aclcheck_link", /* 428 = __acl_aclcheck_link */ "sigwait", /* 429 = sigwait */ "thr_create", /* 430 = thr_create */ "thr_exit", /* 431 = thr_exit */ "thr_self", /* 432 = thr_self */ "thr_kill", /* 433 = thr_kill */ "#434", /* 434 = nosys */ "#435", /* 435 = nosys */ "jail_attach", /* 436 = jail_attach */ "extattr_list_fd", /* 437 = extattr_list_fd */ "extattr_list_file", /* 438 = extattr_list_file */ "extattr_list_link", /* 439 = extattr_list_link */ "#440", /* 440 = kse_switchin */ "ksem_timedwait", /* 441 = ksem_timedwait */ "thr_suspend", /* 442 = thr_suspend */ "thr_wake", /* 443 = thr_wake */ "kldunloadf", /* 444 = kldunloadf */ "audit", /* 445 = audit */ "auditon", /* 446 = auditon */ "getauid", /* 447 = getauid */ "setauid", /* 448 = setauid */ "getaudit", /* 449 = getaudit */ "setaudit", /* 450 = setaudit */ "getaudit_addr", /* 451 = getaudit_addr */ "setaudit_addr", /* 452 = setaudit_addr */ "auditctl", /* 453 = auditctl */ "_umtx_op", /* 454 = _umtx_op */ "thr_new", /* 455 = thr_new */ "sigqueue", /* 456 = sigqueue */ "kmq_open", /* 457 = kmq_open */ "kmq_setattr", /* 458 = kmq_setattr */ "kmq_timedreceive", /* 459 = kmq_timedreceive */ "kmq_timedsend", /* 460 = kmq_timedsend */ "kmq_notify", /* 461 = kmq_notify */ "kmq_unlink", /* 462 = kmq_unlink */ "abort2", /* 463 = abort2 */ "thr_set_name", /* 464 = thr_set_name */ "aio_fsync", /* 465 = aio_fsync */ "rtprio_thread", /* 466 = rtprio_thread */ "#467", /* 467 = nosys */ "#468", /* 468 = nosys */ "#469", /* 469 = __getpath_fromfd */ "#470", /* 470 = __getpath_fromaddr */ "sctp_peeloff", /* 471 = sctp_peeloff */ "sctp_generic_sendmsg", /* 472 = sctp_generic_sendmsg */ "sctp_generic_sendmsg_iov", /* 473 = sctp_generic_sendmsg_iov */ "sctp_generic_recvmsg", /* 474 = sctp_generic_recvmsg */ "pread", /* 475 = pread */ "pwrite", /* 476 = pwrite */ "mmap", /* 477 = mmap */ "lseek", /* 478 = lseek */ "truncate", /* 479 = truncate */ "ftruncate", /* 480 = ftruncate */ "thr_kill2", /* 481 = thr_kill2 */ "shm_open", /* 482 = shm_open */ "shm_unlink", /* 483 = shm_unlink */ "cpuset", /* 484 = cpuset */ "cpuset_setid", /* 485 = cpuset_setid */ "cpuset_getid", /* 486 = cpuset_getid */ "cpuset_getaffinity", /* 487 = cpuset_getaffinity */ "cpuset_setaffinity", /* 488 = cpuset_setaffinity */ "faccessat", /* 489 = faccessat */ "fchmodat", /* 490 = fchmodat */ "fchownat", /* 491 = fchownat */ "fexecve", /* 492 = fexecve */ "compat11.fstatat", /* 493 = freebsd11 fstatat */ "futimesat", /* 494 = futimesat */ "linkat", /* 495 = linkat */ "mkdirat", /* 496 = mkdirat */ "mkfifoat", /* 497 = mkfifoat */ "compat11.mknodat", /* 498 = freebsd11 mknodat */ "openat", /* 499 = openat */ "readlinkat", /* 500 = readlinkat */ "renameat", /* 501 = renameat */ "symlinkat", /* 502 = symlinkat */ "unlinkat", /* 503 = unlinkat */ "posix_openpt", /* 504 = posix_openpt */ "gssd_syscall", /* 505 = gssd_syscall */ "jail_get", /* 506 = jail_get */ "jail_set", /* 507 = jail_set */ "jail_remove", /* 508 = jail_remove */ "closefrom", /* 509 = closefrom */ "__semctl", /* 510 = __semctl */ "msgctl", /* 511 = msgctl */ "shmctl", /* 512 = shmctl */ "lpathconf", /* 513 = lpathconf */ "obs_cap_new", /* 514 = obsolete cap_new */ "__cap_rights_get", /* 515 = __cap_rights_get */ "cap_enter", /* 516 = cap_enter */ "cap_getmode", /* 517 = cap_getmode */ "pdfork", /* 518 = pdfork */ "pdkill", /* 519 = pdkill */ "pdgetpid", /* 520 = pdgetpid */ "#521", /* 521 = pdwait4 */ "pselect", /* 522 = pselect */ "getloginclass", /* 523 = getloginclass */ "setloginclass", /* 524 = setloginclass */ "rctl_get_racct", /* 525 = rctl_get_racct */ "rctl_get_rules", /* 526 = rctl_get_rules */ "rctl_get_limits", /* 527 = rctl_get_limits */ "rctl_add_rule", /* 528 = rctl_add_rule */ "rctl_remove_rule", /* 529 = rctl_remove_rule */ "posix_fallocate", /* 530 = posix_fallocate */ "posix_fadvise", /* 531 = posix_fadvise */ "wait6", /* 532 = wait6 */ "cap_rights_limit", /* 533 = cap_rights_limit */ "cap_ioctls_limit", /* 534 = cap_ioctls_limit */ "cap_ioctls_get", /* 535 = cap_ioctls_get */ "cap_fcntls_limit", /* 536 = cap_fcntls_limit */ "cap_fcntls_get", /* 537 = cap_fcntls_get */ "bindat", /* 538 = bindat */ "connectat", /* 539 = connectat */ "chflagsat", /* 540 = chflagsat */ "accept4", /* 541 = accept4 */ "pipe2", /* 542 = pipe2 */ "aio_mlock", /* 543 = aio_mlock */ "procctl", /* 544 = procctl */ "ppoll", /* 545 = ppoll */ "futimens", /* 546 = futimens */ "utimensat", /* 547 = utimensat */ "numa_getaffinity", /* 548 = numa_getaffinity */ "numa_setaffinity", /* 549 = numa_setaffinity */ "fdatasync", /* 550 = fdatasync */ "fstat", /* 551 = fstat */ "fstatat", /* 552 = fstatat */ "fhstat", /* 553 = fhstat */ "getdirentries", /* 554 = getdirentries */ "statfs", /* 555 = statfs */ "fstatfs", /* 556 = fstatfs */ "getfsstat", /* 557 = getfsstat */ "fhstatfs", /* 558 = fhstatfs */ "mknodat", /* 559 = mknodat */ "kevent", /* 560 = kevent */ + "cpuset_getdomain", /* 561 = cpuset_getdomain */ + "cpuset_setdomain", /* 562 = cpuset_setdomain */ }; Index: user/jeff/numa/sys/kern/syscalls.master =================================================================== --- user/jeff/numa/sys/kern/syscalls.master (revision 326888) +++ user/jeff/numa/sys/kern/syscalls.master (revision 326889) @@ -1,1029 +1,1037 @@ $FreeBSD$ ; from: @(#)syscalls.master 8.2 (Berkeley) 1/13/94 ; ; System call name/number master file. ; Processed to created init_sysent.c, syscalls.c and syscall.h. ; Columns: number audit type name alt{name,tag,rtyp}/comments ; number system call number, must be in order ; audit the audit event associated with the system call ; A value of AUE_NULL means no auditing, but it also means that ; there is no audit event for the call at this time. For the ; case where the event exists, but we don't want auditing, the ; event should be #defined to AUE_NULL in audit_kevents.h. ; type one of STD, OBSOL, UNIMPL, COMPAT, COMPAT4, COMPAT6, ; COMPAT7, COMPAT11, NODEF, NOARGS, NOPROTO, NOSTD ; The COMPAT* options may be combined with one or more NO* ; options separated by '|' with no spaces (e.g. COMPAT|NOARGS) ; name psuedo-prototype of syscall routine ; If one of the following alts is different, then all appear: ; altname name of system call if different ; alttag name of args struct tag if different from [o]`name'"_args" ; altrtyp return type if not int (bogus - syscalls always return int) ; for UNIMPL/OBSOL, name continues with comments ; types: ; STD always included ; COMPAT included on COMPAT #ifdef ; COMPAT4 included on COMPAT_FREEBSD4 #ifdef (FreeBSD 4 compat) ; COMPAT6 included on COMPAT_FREEBSD6 #ifdef (FreeBSD 6 compat) ; COMPAT7 included on COMPAT_FREEBSD7 #ifdef (FreeBSD 7 compat) ; COMPAT10 included on COMPAT_FREEBSD10 #ifdef (FreeBSD 10 compat) ; COMPAT11 included on COMPAT11 #ifdef (FreeBSD 11 compat) ; OBSOL obsolete, not included in system, only specifies name ; UNIMPL not implemented, placeholder only ; NOSTD implemented but as a lkm that can be statically ; compiled in; sysent entry will be filled with lkmressys ; so the SYSCALL_MODULE macro works ; NOARGS same as STD except do not create structure in sys/sysproto.h ; NODEF same as STD except only have the entry in the syscall table ; added. Meaning - do not create structure or function ; prototype in sys/sysproto.h ; NOPROTO same as STD except do not create structure or ; function prototype in sys/sysproto.h. Does add a ; definition to syscall.h besides adding a sysent. ; NOTSTATIC syscall is loadable ; ; Please copy any additions and changes to the following compatability tables: ; sys/compat/freebsd32/syscalls.master ; #ifdef's, etc. may be included, and are copied to the output files. #include #include #include ; Reserved/unimplemented system calls in the range 0-150 inclusive ; are reserved for use in future Berkeley releases. ; Additional system calls implemented in vendor and other ; redistributions should be placed in the reserved range at the end ; of the current calls. 0 AUE_NULL STD { int nosys(void); } syscall nosys_args int 1 AUE_EXIT STD { void sys_exit(int rval); } exit \ sys_exit_args void 2 AUE_FORK STD { int fork(void); } 3 AUE_READ STD { ssize_t read(int fd, void *buf, \ size_t nbyte); } 4 AUE_WRITE STD { ssize_t write(int fd, const void *buf, \ size_t nbyte); } 5 AUE_OPEN_RWTC STD { int open(char *path, int flags, int mode); } ; XXX should be { int open(const char *path, int flags, ...); } ; but we're not ready for `const' or varargs. ; XXX man page says `mode_t mode'. 6 AUE_CLOSE STD { int close(int fd); } 7 AUE_WAIT4 STD { int wait4(int pid, int *status, \ int options, struct rusage *rusage); } 8 AUE_CREAT COMPAT { int creat(char *path, int mode); } 9 AUE_LINK STD { int link(char *path, char *link); } 10 AUE_UNLINK STD { int unlink(char *path); } 11 AUE_NULL OBSOL execv 12 AUE_CHDIR STD { int chdir(char *path); } 13 AUE_FCHDIR STD { int fchdir(int fd); } 14 AUE_MKNOD COMPAT11 { int mknod(char *path, int mode, int dev); } 15 AUE_CHMOD STD { int chmod(char *path, int mode); } 16 AUE_CHOWN STD { int chown(char *path, int uid, int gid); } 17 AUE_NULL STD { int obreak(char *nsize); } break \ obreak_args int 18 AUE_GETFSSTAT COMPAT4 { int getfsstat(struct ostatfs *buf, \ long bufsize, int mode); } 19 AUE_LSEEK COMPAT { long lseek(int fd, long offset, \ int whence); } 20 AUE_GETPID STD { pid_t getpid(void); } 21 AUE_MOUNT STD { int mount(char *type, char *path, \ int flags, caddr_t data); } ; XXX `path' should have type `const char *' but we're not ready for that. 22 AUE_UMOUNT STD { int unmount(char *path, int flags); } 23 AUE_SETUID STD { int setuid(uid_t uid); } 24 AUE_GETUID STD { uid_t getuid(void); } 25 AUE_GETEUID STD { uid_t geteuid(void); } 26 AUE_PTRACE STD { int ptrace(int req, pid_t pid, \ caddr_t addr, int data); } 27 AUE_RECVMSG STD { int recvmsg(int s, struct msghdr *msg, \ int flags); } 28 AUE_SENDMSG STD { int sendmsg(int s, struct msghdr *msg, \ int flags); } 29 AUE_RECVFROM STD { int recvfrom(int s, caddr_t buf, \ size_t len, int flags, \ struct sockaddr * __restrict from, \ __socklen_t * __restrict fromlenaddr); } 30 AUE_ACCEPT STD { int accept(int s, \ struct sockaddr * __restrict name, \ __socklen_t * __restrict anamelen); } 31 AUE_GETPEERNAME STD { int getpeername(int fdes, \ struct sockaddr * __restrict asa, \ __socklen_t * __restrict alen); } 32 AUE_GETSOCKNAME STD { int getsockname(int fdes, \ struct sockaddr * __restrict asa, \ __socklen_t * __restrict alen); } 33 AUE_ACCESS STD { int access(char *path, int amode); } 34 AUE_CHFLAGS STD { int chflags(const char *path, u_long flags); } 35 AUE_FCHFLAGS STD { int fchflags(int fd, u_long flags); } 36 AUE_SYNC STD { int sync(void); } 37 AUE_KILL STD { int kill(int pid, int signum); } 38 AUE_STAT COMPAT { int stat(char *path, struct ostat *ub); } 39 AUE_GETPPID STD { pid_t getppid(void); } 40 AUE_LSTAT COMPAT { int lstat(char *path, struct ostat *ub); } 41 AUE_DUP STD { int dup(u_int fd); } 42 AUE_PIPE COMPAT10 { int pipe(void); } 43 AUE_GETEGID STD { gid_t getegid(void); } 44 AUE_PROFILE STD { int profil(caddr_t samples, size_t size, \ size_t offset, u_int scale); } 45 AUE_KTRACE STD { int ktrace(const char *fname, int ops, \ int facs, int pid); } 46 AUE_SIGACTION COMPAT { int sigaction(int signum, \ struct osigaction *nsa, \ struct osigaction *osa); } 47 AUE_GETGID STD { gid_t getgid(void); } 48 AUE_SIGPROCMASK COMPAT { int sigprocmask(int how, osigset_t mask); } ; XXX note nonstandard (bogus) calling convention - the libc stub passes ; us the mask, not a pointer to it, and we return the old mask as the ; (int) return value. 49 AUE_GETLOGIN STD { int getlogin(char *namebuf, u_int \ namelen); } 50 AUE_SETLOGIN STD { int setlogin(char *namebuf); } 51 AUE_ACCT STD { int acct(char *path); } 52 AUE_SIGPENDING COMPAT { int sigpending(void); } 53 AUE_SIGALTSTACK STD { int sigaltstack(stack_t *ss, \ stack_t *oss); } 54 AUE_IOCTL STD { int ioctl(int fd, u_long com, \ caddr_t data); } 55 AUE_REBOOT STD { int reboot(int opt); } 56 AUE_REVOKE STD { int revoke(char *path); } 57 AUE_SYMLINK STD { int symlink(char *path, char *link); } 58 AUE_READLINK STD { ssize_t readlink(char *path, char *buf, \ size_t count); } 59 AUE_EXECVE STD { int execve(char *fname, char **argv, \ char **envv); } 60 AUE_UMASK STD { int umask(int newmask); } umask umask_args \ int 61 AUE_CHROOT STD { int chroot(char *path); } 62 AUE_FSTAT COMPAT { int fstat(int fd, struct ostat *sb); } 63 AUE_NULL COMPAT { int getkerninfo(int op, char *where, \ size_t *size, int arg); } getkerninfo \ getkerninfo_args int 64 AUE_NULL COMPAT { int getpagesize(void); } getpagesize \ getpagesize_args int 65 AUE_MSYNC STD { int msync(void *addr, size_t len, \ int flags); } 66 AUE_VFORK STD { int vfork(void); } 67 AUE_NULL OBSOL vread 68 AUE_NULL OBSOL vwrite 69 AUE_SBRK STD { int sbrk(int incr); } 70 AUE_SSTK STD { int sstk(int incr); } 71 AUE_MMAP COMPAT { int mmap(void *addr, int len, int prot, \ int flags, int fd, long pos); } 72 AUE_O_VADVISE STD { int ovadvise(int anom); } vadvise \ ovadvise_args int 73 AUE_MUNMAP STD { int munmap(void *addr, size_t len); } 74 AUE_MPROTECT STD { int mprotect(void *addr, size_t len, \ int prot); } 75 AUE_MADVISE STD { int madvise(void *addr, size_t len, \ int behav); } 76 AUE_NULL OBSOL vhangup 77 AUE_NULL OBSOL vlimit 78 AUE_MINCORE STD { int mincore(const void *addr, size_t len, \ char *vec); } 79 AUE_GETGROUPS STD { int getgroups(u_int gidsetsize, \ gid_t *gidset); } 80 AUE_SETGROUPS STD { int setgroups(u_int gidsetsize, \ gid_t *gidset); } 81 AUE_GETPGRP STD { int getpgrp(void); } 82 AUE_SETPGRP STD { int setpgid(int pid, int pgid); } 83 AUE_SETITIMER STD { int setitimer(u_int which, struct \ itimerval *itv, struct itimerval *oitv); } 84 AUE_WAIT4 COMPAT { int wait(void); } 85 AUE_SWAPON STD { int swapon(char *name); } 86 AUE_GETITIMER STD { int getitimer(u_int which, \ struct itimerval *itv); } 87 AUE_SYSCTL COMPAT { int gethostname(char *hostname, \ u_int len); } gethostname \ gethostname_args int 88 AUE_SYSCTL COMPAT { int sethostname(char *hostname, \ u_int len); } sethostname \ sethostname_args int 89 AUE_GETDTABLESIZE STD { int getdtablesize(void); } 90 AUE_DUP2 STD { int dup2(u_int from, u_int to); } 91 AUE_NULL UNIMPL getdopt 92 AUE_FCNTL STD { int fcntl(int fd, int cmd, long arg); } ; XXX should be { int fcntl(int fd, int cmd, ...); } ; but we're not ready for varargs. 93 AUE_SELECT STD { int select(int nd, fd_set *in, fd_set *ou, \ fd_set *ex, struct timeval *tv); } 94 AUE_NULL UNIMPL setdopt 95 AUE_FSYNC STD { int fsync(int fd); } 96 AUE_SETPRIORITY STD { int setpriority(int which, int who, \ int prio); } 97 AUE_SOCKET STD { int socket(int domain, int type, \ int protocol); } 98 AUE_CONNECT STD { int connect(int s, caddr_t name, \ int namelen); } 99 AUE_ACCEPT COMPAT|NOARGS { int accept(int s, caddr_t name, \ int *anamelen); } accept accept_args int 100 AUE_GETPRIORITY STD { int getpriority(int which, int who); } 101 AUE_SEND COMPAT { int send(int s, caddr_t buf, int len, \ int flags); } 102 AUE_RECV COMPAT { int recv(int s, caddr_t buf, int len, \ int flags); } 103 AUE_SIGRETURN COMPAT { int sigreturn( \ struct osigcontext *sigcntxp); } 104 AUE_BIND STD { int bind(int s, caddr_t name, \ int namelen); } 105 AUE_SETSOCKOPT STD { int setsockopt(int s, int level, int name, \ caddr_t val, int valsize); } 106 AUE_LISTEN STD { int listen(int s, int backlog); } 107 AUE_NULL OBSOL vtimes 108 AUE_NULL COMPAT { int sigvec(int signum, struct sigvec *nsv, \ struct sigvec *osv); } 109 AUE_NULL COMPAT { int sigblock(int mask); } 110 AUE_NULL COMPAT { int sigsetmask(int mask); } 111 AUE_NULL COMPAT { int sigsuspend(osigset_t mask); } ; XXX note nonstandard (bogus) calling convention - the libc stub passes ; us the mask, not a pointer to it. 112 AUE_NULL COMPAT { int sigstack(struct sigstack *nss, \ struct sigstack *oss); } 113 AUE_RECVMSG COMPAT { int recvmsg(int s, struct omsghdr *msg, \ int flags); } 114 AUE_SENDMSG COMPAT { int sendmsg(int s, caddr_t msg, \ int flags); } 115 AUE_NULL OBSOL vtrace 116 AUE_GETTIMEOFDAY STD { int gettimeofday(struct timeval *tp, \ struct timezone *tzp); } 117 AUE_GETRUSAGE STD { int getrusage(int who, \ struct rusage *rusage); } 118 AUE_GETSOCKOPT STD { int getsockopt(int s, int level, int name, \ caddr_t val, int *avalsize); } 119 AUE_NULL UNIMPL resuba (BSD/OS 2.x) 120 AUE_READV STD { int readv(int fd, struct iovec *iovp, \ u_int iovcnt); } 121 AUE_WRITEV STD { int writev(int fd, struct iovec *iovp, \ u_int iovcnt); } 122 AUE_SETTIMEOFDAY STD { int settimeofday(struct timeval *tv, \ struct timezone *tzp); } 123 AUE_FCHOWN STD { int fchown(int fd, int uid, int gid); } 124 AUE_FCHMOD STD { int fchmod(int fd, int mode); } 125 AUE_RECVFROM COMPAT|NOARGS { int recvfrom(int s, caddr_t buf, \ size_t len, int flags, caddr_t from, int \ *fromlenaddr); } recvfrom recvfrom_args \ int 126 AUE_SETREUID STD { int setreuid(int ruid, int euid); } 127 AUE_SETREGID STD { int setregid(int rgid, int egid); } 128 AUE_RENAME STD { int rename(char *from, char *to); } 129 AUE_TRUNCATE COMPAT { int truncate(char *path, long length); } 130 AUE_FTRUNCATE COMPAT { int ftruncate(int fd, long length); } 131 AUE_FLOCK STD { int flock(int fd, int how); } 132 AUE_MKFIFO STD { int mkfifo(char *path, int mode); } 133 AUE_SENDTO STD { int sendto(int s, caddr_t buf, size_t len, \ int flags, caddr_t to, int tolen); } 134 AUE_SHUTDOWN STD { int shutdown(int s, int how); } 135 AUE_SOCKETPAIR STD { int socketpair(int domain, int type, \ int protocol, int *rsv); } 136 AUE_MKDIR STD { int mkdir(char *path, int mode); } 137 AUE_RMDIR STD { int rmdir(char *path); } 138 AUE_UTIMES STD { int utimes(char *path, \ struct timeval *tptr); } 139 AUE_NULL OBSOL 4.2 sigreturn 140 AUE_ADJTIME STD { int adjtime(struct timeval *delta, \ struct timeval *olddelta); } 141 AUE_GETPEERNAME COMPAT { int getpeername(int fdes, caddr_t asa, \ int *alen); } 142 AUE_SYSCTL COMPAT { long gethostid(void); } 143 AUE_SYSCTL COMPAT { int sethostid(long hostid); } 144 AUE_GETRLIMIT COMPAT { int getrlimit(u_int which, struct \ orlimit *rlp); } 145 AUE_SETRLIMIT COMPAT { int setrlimit(u_int which, \ struct orlimit *rlp); } 146 AUE_KILLPG COMPAT { int killpg(int pgid, int signum); } 147 AUE_SETSID STD { int setsid(void); } 148 AUE_QUOTACTL STD { int quotactl(char *path, int cmd, int uid, \ caddr_t arg); } 149 AUE_O_QUOTA COMPAT { int quota(void); } 150 AUE_GETSOCKNAME COMPAT|NOARGS { int getsockname(int fdec, \ caddr_t asa, int *alen); } getsockname \ getsockname_args int ; Syscalls 151-180 inclusive are reserved for vendor-specific ; system calls. (This includes various calls added for compatibity ; with other Unix variants.) ; Some of these calls are now supported by BSD... 151 AUE_NULL UNIMPL sem_lock (BSD/OS 2.x) 152 AUE_NULL UNIMPL sem_wakeup (BSD/OS 2.x) 153 AUE_NULL UNIMPL asyncdaemon (BSD/OS 2.x) ; 154 is initialised by the NLM code, if present. 154 AUE_NULL NOSTD { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } ; 155 is initialized by the NFS code, if present. 155 AUE_NFS_SVC NOSTD { int nfssvc(int flag, caddr_t argp); } 156 AUE_GETDIRENTRIES COMPAT { int getdirentries(int fd, char *buf, \ u_int count, long *basep); } 157 AUE_STATFS COMPAT4 { int statfs(char *path, \ struct ostatfs *buf); } 158 AUE_FSTATFS COMPAT4 { int fstatfs(int fd, \ struct ostatfs *buf); } 159 AUE_NULL UNIMPL nosys 160 AUE_LGETFH STD { int lgetfh(char *fname, \ struct fhandle *fhp); } 161 AUE_NFS_GETFH STD { int getfh(char *fname, \ struct fhandle *fhp); } 162 AUE_SYSCTL COMPAT4 { int getdomainname(char *domainname, \ int len); } 163 AUE_SYSCTL COMPAT4 { int setdomainname(char *domainname, \ int len); } 164 AUE_NULL COMPAT4 { int uname(struct utsname *name); } 165 AUE_SYSARCH STD { int sysarch(int op, char *parms); } 166 AUE_RTPRIO STD { int rtprio(int function, pid_t pid, \ struct rtprio *rtp); } 167 AUE_NULL UNIMPL nosys 168 AUE_NULL UNIMPL nosys 169 AUE_SEMSYS NOSTD { int semsys(int which, int a2, int a3, \ int a4, int a5); } ; XXX should be { int semsys(int which, ...); } 170 AUE_MSGSYS NOSTD { int msgsys(int which, int a2, int a3, \ int a4, int a5, int a6); } ; XXX should be { int msgsys(int which, ...); } 171 AUE_SHMSYS NOSTD { int shmsys(int which, int a2, int a3, \ int a4); } ; XXX should be { int shmsys(int which, ...); } 172 AUE_NULL UNIMPL nosys 173 AUE_PREAD COMPAT6 { ssize_t pread(int fd, void *buf, \ size_t nbyte, int pad, off_t offset); } 174 AUE_PWRITE COMPAT6 { ssize_t pwrite(int fd, \ const void *buf, \ size_t nbyte, int pad, off_t offset); } 175 AUE_SETFIB STD { int setfib(int fibnum); } 176 AUE_NTP_ADJTIME STD { int ntp_adjtime(struct timex *tp); } 177 AUE_NULL UNIMPL sfork (BSD/OS 2.x) 178 AUE_NULL UNIMPL getdescriptor (BSD/OS 2.x) 179 AUE_NULL UNIMPL setdescriptor (BSD/OS 2.x) 180 AUE_NULL UNIMPL nosys ; Syscalls 181-199 are used by/reserved for BSD 181 AUE_SETGID STD { int setgid(gid_t gid); } 182 AUE_SETEGID STD { int setegid(gid_t egid); } 183 AUE_SETEUID STD { int seteuid(uid_t euid); } 184 AUE_NULL UNIMPL lfs_bmapv 185 AUE_NULL UNIMPL lfs_markv 186 AUE_NULL UNIMPL lfs_segclean 187 AUE_NULL UNIMPL lfs_segwait 188 AUE_STAT COMPAT11 { int stat(char *path, \ struct freebsd11_stat *ub); } 189 AUE_FSTAT COMPAT11 { int fstat(int fd, \ struct freebsd11_stat *sb); } 190 AUE_LSTAT COMPAT11 { int lstat(char *path, \ struct freebsd11_stat *ub); } 191 AUE_PATHCONF STD { int pathconf(char *path, int name); } 192 AUE_FPATHCONF STD { int fpathconf(int fd, int name); } 193 AUE_NULL UNIMPL nosys 194 AUE_GETRLIMIT STD { int getrlimit(u_int which, \ struct rlimit *rlp); } getrlimit \ __getrlimit_args int 195 AUE_SETRLIMIT STD { int setrlimit(u_int which, \ struct rlimit *rlp); } setrlimit \ __setrlimit_args int 196 AUE_GETDIRENTRIES COMPAT11 { int getdirentries(int fd, char *buf, \ u_int count, long *basep); } 197 AUE_MMAP COMPAT6 { caddr_t mmap(caddr_t addr, \ size_t len, int prot, int flags, int fd, \ int pad, off_t pos); } 198 AUE_NULL NOPROTO { int nosys(void); } __syscall \ __syscall_args int 199 AUE_LSEEK COMPAT6 { off_t lseek(int fd, int pad, \ off_t offset, int whence); } 200 AUE_TRUNCATE COMPAT6 { int truncate(char *path, int pad, \ off_t length); } 201 AUE_FTRUNCATE COMPAT6 { int ftruncate(int fd, int pad, \ off_t length); } 202 AUE_SYSCTL STD { int __sysctl(int *name, u_int namelen, \ void *old, size_t *oldlenp, void *new, \ size_t newlen); } __sysctl sysctl_args int 203 AUE_MLOCK STD { int mlock(const void *addr, size_t len); } 204 AUE_MUNLOCK STD { int munlock(const void *addr, size_t len); } 205 AUE_UNDELETE STD { int undelete(char *path); } 206 AUE_FUTIMES STD { int futimes(int fd, struct timeval *tptr); } 207 AUE_GETPGID STD { int getpgid(pid_t pid); } 208 AUE_NULL UNIMPL newreboot (NetBSD) 209 AUE_POLL STD { int poll(struct pollfd *fds, u_int nfds, \ int timeout); } ; ; The following are reserved for loadable syscalls ; 210 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 211 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 212 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 213 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 214 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 215 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 216 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 217 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 218 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int 219 AUE_NULL NODEF|NOTSTATIC lkmnosys lkmnosys nosys_args int ; ; The following were introduced with NetBSD/4.4Lite-2 220 AUE_SEMCTL COMPAT7|NOSTD { int __semctl(int semid, int semnum, \ int cmd, union semun_old *arg); } 221 AUE_SEMGET NOSTD { int semget(key_t key, int nsems, \ int semflg); } 222 AUE_SEMOP NOSTD { int semop(int semid, struct sembuf *sops, \ size_t nsops); } 223 AUE_NULL UNIMPL semconfig 224 AUE_MSGCTL COMPAT7|NOSTD { int msgctl(int msqid, int cmd, \ struct msqid_ds_old *buf); } 225 AUE_MSGGET NOSTD { int msgget(key_t key, int msgflg); } 226 AUE_MSGSND NOSTD { int msgsnd(int msqid, const void *msgp, \ size_t msgsz, int msgflg); } 227 AUE_MSGRCV NOSTD { ssize_t msgrcv(int msqid, void *msgp, \ size_t msgsz, long msgtyp, int msgflg); } 228 AUE_SHMAT NOSTD { int shmat(int shmid, const void *shmaddr, \ int shmflg); } 229 AUE_SHMCTL COMPAT7|NOSTD { int shmctl(int shmid, int cmd, \ struct shmid_ds_old *buf); } 230 AUE_SHMDT NOSTD { int shmdt(const void *shmaddr); } 231 AUE_SHMGET NOSTD { int shmget(key_t key, size_t size, \ int shmflg); } ; 232 AUE_NULL STD { int clock_gettime(clockid_t clock_id, \ struct timespec *tp); } 233 AUE_CLOCK_SETTIME STD { int clock_settime( \ clockid_t clock_id, \ const struct timespec *tp); } 234 AUE_NULL STD { int clock_getres(clockid_t clock_id, \ struct timespec *tp); } 235 AUE_NULL STD { int ktimer_create(clockid_t clock_id, \ struct sigevent *evp, int *timerid); } 236 AUE_NULL STD { int ktimer_delete(int timerid); } 237 AUE_NULL STD { int ktimer_settime(int timerid, int flags, \ const struct itimerspec *value, \ struct itimerspec *ovalue); } 238 AUE_NULL STD { int ktimer_gettime(int timerid, struct \ itimerspec *value); } 239 AUE_NULL STD { int ktimer_getoverrun(int timerid); } 240 AUE_NULL STD { int nanosleep(const struct timespec *rqtp, \ struct timespec *rmtp); } 241 AUE_NULL STD { int ffclock_getcounter(ffcounter *ffcount); } 242 AUE_NULL STD { int ffclock_setestimate( \ struct ffclock_estimate *cest); } 243 AUE_NULL STD { int ffclock_getestimate( \ struct ffclock_estimate *cest); } 244 AUE_NULL STD { int clock_nanosleep(clockid_t clock_id, \ int flags, const struct timespec *rqtp, \ struct timespec *rmtp); } 245 AUE_NULL UNIMPL nosys 246 AUE_NULL UNIMPL nosys 247 AUE_NULL STD { int clock_getcpuclockid2(id_t id,\ int which, clockid_t *clock_id); } 248 AUE_NULL STD { int ntp_gettime(struct ntptimeval *ntvp); } 249 AUE_NULL UNIMPL nosys ; syscall numbers initially used in OpenBSD 250 AUE_MINHERIT STD { int minherit(void *addr, size_t len, \ int inherit); } 251 AUE_RFORK STD { int rfork(int flags); } 252 AUE_POLL OBSOL openbsd_poll 253 AUE_ISSETUGID STD { int issetugid(void); } 254 AUE_LCHOWN STD { int lchown(char *path, int uid, int gid); } 255 AUE_AIO_READ STD { int aio_read(struct aiocb *aiocbp); } 256 AUE_AIO_WRITE STD { int aio_write(struct aiocb *aiocbp); } 257 AUE_LIO_LISTIO STD { int lio_listio(int mode, \ struct aiocb * const *acb_list, \ int nent, struct sigevent *sig); } 258 AUE_NULL UNIMPL nosys 259 AUE_NULL UNIMPL nosys 260 AUE_NULL UNIMPL nosys 261 AUE_NULL UNIMPL nosys 262 AUE_NULL UNIMPL nosys 263 AUE_NULL UNIMPL nosys 264 AUE_NULL UNIMPL nosys 265 AUE_NULL UNIMPL nosys 266 AUE_NULL UNIMPL nosys 267 AUE_NULL UNIMPL nosys 268 AUE_NULL UNIMPL nosys 269 AUE_NULL UNIMPL nosys 270 AUE_NULL UNIMPL nosys 271 AUE_NULL UNIMPL nosys 272 AUE_O_GETDENTS COMPAT11 { int getdents(int fd, char *buf, \ size_t count); } 273 AUE_NULL UNIMPL nosys 274 AUE_LCHMOD STD { int lchmod(char *path, mode_t mode); } 275 AUE_LCHOWN NOPROTO { int lchown(char *path, uid_t uid, \ gid_t gid); } netbsd_lchown lchown_args \ int 276 AUE_LUTIMES STD { int lutimes(char *path, \ struct timeval *tptr); } 277 AUE_MSYNC NOPROTO { int msync(void *addr, size_t len, \ int flags); } netbsd_msync msync_args int 278 AUE_STAT COMPAT11 { int nstat(char *path, struct nstat *ub); } 279 AUE_FSTAT COMPAT11 { int nfstat(int fd, struct nstat *sb); } 280 AUE_LSTAT COMPAT11 { int nlstat(char *path, struct nstat *ub); } 281 AUE_NULL UNIMPL nosys 282 AUE_NULL UNIMPL nosys 283 AUE_NULL UNIMPL nosys 284 AUE_NULL UNIMPL nosys 285 AUE_NULL UNIMPL nosys 286 AUE_NULL UNIMPL nosys 287 AUE_NULL UNIMPL nosys 288 AUE_NULL UNIMPL nosys ; 289 and 290 from NetBSD (OpenBSD: 267 and 268) 289 AUE_PREADV STD { ssize_t preadv(int fd, struct iovec *iovp, \ u_int iovcnt, off_t offset); } 290 AUE_PWRITEV STD { ssize_t pwritev(int fd, struct iovec *iovp, \ u_int iovcnt, off_t offset); } 291 AUE_NULL UNIMPL nosys 292 AUE_NULL UNIMPL nosys 293 AUE_NULL UNIMPL nosys 294 AUE_NULL UNIMPL nosys 295 AUE_NULL UNIMPL nosys 296 AUE_NULL UNIMPL nosys ; XXX 297 is 300 in NetBSD 297 AUE_FHSTATFS COMPAT4 { int fhstatfs( \ const struct fhandle *u_fhp, \ struct ostatfs *buf); } 298 AUE_FHOPEN STD { int fhopen(const struct fhandle *u_fhp, \ int flags); } 299 AUE_FHSTAT COMPAT11 { int fhstat(const struct fhandle *u_fhp, \ struct freebsd11_stat *sb); } ; syscall numbers for FreeBSD 300 AUE_NULL STD { int modnext(int modid); } 301 AUE_NULL STD { int modstat(int modid, \ struct module_stat *stat); } 302 AUE_NULL STD { int modfnext(int modid); } 303 AUE_NULL STD { int modfind(const char *name); } 304 AUE_MODLOAD STD { int kldload(const char *file); } 305 AUE_MODUNLOAD STD { int kldunload(int fileid); } 306 AUE_NULL STD { int kldfind(const char *file); } 307 AUE_NULL STD { int kldnext(int fileid); } 308 AUE_NULL STD { int kldstat(int fileid, struct \ kld_file_stat* stat); } 309 AUE_NULL STD { int kldfirstmod(int fileid); } 310 AUE_GETSID STD { int getsid(pid_t pid); } 311 AUE_SETRESUID STD { int setresuid(uid_t ruid, uid_t euid, \ uid_t suid); } 312 AUE_SETRESGID STD { int setresgid(gid_t rgid, gid_t egid, \ gid_t sgid); } 313 AUE_NULL OBSOL signanosleep 314 AUE_AIO_RETURN STD { ssize_t aio_return(struct aiocb *aiocbp); } 315 AUE_AIO_SUSPEND STD { int aio_suspend( \ struct aiocb * const * aiocbp, int nent, \ const struct timespec *timeout); } 316 AUE_AIO_CANCEL STD { int aio_cancel(int fd, \ struct aiocb *aiocbp); } 317 AUE_AIO_ERROR STD { int aio_error(struct aiocb *aiocbp); } 318 AUE_AIO_READ COMPAT6 { int aio_read(struct oaiocb *aiocbp); } 319 AUE_AIO_WRITE COMPAT6 { int aio_write(struct oaiocb *aiocbp); } 320 AUE_LIO_LISTIO COMPAT6 { int lio_listio(int mode, \ struct oaiocb * const *acb_list, \ int nent, struct osigevent *sig); } 321 AUE_NULL STD { int yield(void); } 322 AUE_NULL OBSOL thr_sleep 323 AUE_NULL OBSOL thr_wakeup 324 AUE_MLOCKALL STD { int mlockall(int how); } 325 AUE_MUNLOCKALL STD { int munlockall(void); } 326 AUE_GETCWD STD { int __getcwd(char *buf, size_t buflen); } 327 AUE_NULL STD { int sched_setparam (pid_t pid, \ const struct sched_param *param); } 328 AUE_NULL STD { int sched_getparam (pid_t pid, struct \ sched_param *param); } 329 AUE_NULL STD { int sched_setscheduler (pid_t pid, int \ policy, const struct sched_param \ *param); } 330 AUE_NULL STD { int sched_getscheduler (pid_t pid); } 331 AUE_NULL STD { int sched_yield (void); } 332 AUE_NULL STD { int sched_get_priority_max (int policy); } 333 AUE_NULL STD { int sched_get_priority_min (int policy); } 334 AUE_NULL STD { int sched_rr_get_interval (pid_t pid, \ struct timespec *interval); } 335 AUE_NULL STD { int utrace(const void *addr, size_t len); } 336 AUE_SENDFILE COMPAT4 { int sendfile(int fd, int s, \ off_t offset, size_t nbytes, \ struct sf_hdtr *hdtr, off_t *sbytes, \ int flags); } 337 AUE_NULL STD { int kldsym(int fileid, int cmd, \ void *data); } 338 AUE_JAIL STD { int jail(struct jail *jail); } 339 AUE_NULL NOSTD|NOTSTATIC { int nnpfs_syscall(int operation, \ char *a_pathP, int a_opcode, \ void *a_paramsP, int a_followSymlinks); } 340 AUE_SIGPROCMASK STD { int sigprocmask(int how, \ const sigset_t *set, sigset_t *oset); } 341 AUE_SIGSUSPEND STD { int sigsuspend(const sigset_t *sigmask); } 342 AUE_SIGACTION COMPAT4 { int sigaction(int sig, const \ struct sigaction *act, \ struct sigaction *oact); } 343 AUE_SIGPENDING STD { int sigpending(sigset_t *set); } 344 AUE_SIGRETURN COMPAT4 { int sigreturn( \ const struct ucontext4 *sigcntxp); } 345 AUE_SIGWAIT STD { int sigtimedwait(const sigset_t *set, \ siginfo_t *info, \ const struct timespec *timeout); } 346 AUE_NULL STD { int sigwaitinfo(const sigset_t *set, \ siginfo_t *info); } 347 AUE_ACL_GET_FILE STD { int __acl_get_file(const char *path, \ acl_type_t type, struct acl *aclp); } 348 AUE_ACL_SET_FILE STD { int __acl_set_file(const char *path, \ acl_type_t type, struct acl *aclp); } 349 AUE_ACL_GET_FD STD { int __acl_get_fd(int filedes, \ acl_type_t type, struct acl *aclp); } 350 AUE_ACL_SET_FD STD { int __acl_set_fd(int filedes, \ acl_type_t type, struct acl *aclp); } 351 AUE_ACL_DELETE_FILE STD { int __acl_delete_file(const char *path, \ acl_type_t type); } 352 AUE_ACL_DELETE_FD STD { int __acl_delete_fd(int filedes, \ acl_type_t type); } 353 AUE_ACL_CHECK_FILE STD { int __acl_aclcheck_file(const char *path, \ acl_type_t type, struct acl *aclp); } 354 AUE_ACL_CHECK_FD STD { int __acl_aclcheck_fd(int filedes, \ acl_type_t type, struct acl *aclp); } 355 AUE_EXTATTRCTL STD { int extattrctl(const char *path, int cmd, \ const char *filename, int attrnamespace, \ const char *attrname); } 356 AUE_EXTATTR_SET_FILE STD { ssize_t extattr_set_file( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 357 AUE_EXTATTR_GET_FILE STD { ssize_t extattr_get_file( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 358 AUE_EXTATTR_DELETE_FILE STD { int extattr_delete_file(const char *path, \ int attrnamespace, \ const char *attrname); } 359 AUE_AIO_WAITCOMPLETE STD { ssize_t aio_waitcomplete( \ struct aiocb **aiocbp, \ struct timespec *timeout); } 360 AUE_GETRESUID STD { int getresuid(uid_t *ruid, uid_t *euid, \ uid_t *suid); } 361 AUE_GETRESGID STD { int getresgid(gid_t *rgid, gid_t *egid, \ gid_t *sgid); } 362 AUE_KQUEUE STD { int kqueue(void); } 363 AUE_KEVENT COMPAT11 { int kevent(int fd, \ struct kevent_freebsd11 *changelist, \ int nchanges, \ struct kevent_freebsd11 *eventlist, \ int nevents, \ const struct timespec *timeout); } 364 AUE_NULL UNIMPL __cap_get_proc 365 AUE_NULL UNIMPL __cap_set_proc 366 AUE_NULL UNIMPL __cap_get_fd 367 AUE_NULL UNIMPL __cap_get_file 368 AUE_NULL UNIMPL __cap_set_fd 369 AUE_NULL UNIMPL __cap_set_file 370 AUE_NULL UNIMPL nosys 371 AUE_EXTATTR_SET_FD STD { ssize_t extattr_set_fd(int fd, \ int attrnamespace, const char *attrname, \ void *data, size_t nbytes); } 372 AUE_EXTATTR_GET_FD STD { ssize_t extattr_get_fd(int fd, \ int attrnamespace, const char *attrname, \ void *data, size_t nbytes); } 373 AUE_EXTATTR_DELETE_FD STD { int extattr_delete_fd(int fd, \ int attrnamespace, \ const char *attrname); } 374 AUE_SETUGID STD { int __setugid(int flag); } 375 AUE_NULL UNIMPL nfsclnt 376 AUE_EACCESS STD { int eaccess(char *path, int amode); } 377 AUE_NULL NOSTD|NOTSTATIC { int afs3_syscall(long syscall, \ long parm1, long parm2, long parm3, \ long parm4, long parm5, long parm6); } 378 AUE_NMOUNT STD { int nmount(struct iovec *iovp, \ unsigned int iovcnt, int flags); } 379 AUE_NULL UNIMPL kse_exit 380 AUE_NULL UNIMPL kse_wakeup 381 AUE_NULL UNIMPL kse_create 382 AUE_NULL UNIMPL kse_thr_interrupt 383 AUE_NULL UNIMPL kse_release 384 AUE_NULL STD { int __mac_get_proc(struct mac *mac_p); } 385 AUE_NULL STD { int __mac_set_proc(struct mac *mac_p); } 386 AUE_NULL STD { int __mac_get_fd(int fd, \ struct mac *mac_p); } 387 AUE_NULL STD { int __mac_get_file(const char *path_p, \ struct mac *mac_p); } 388 AUE_NULL STD { int __mac_set_fd(int fd, \ struct mac *mac_p); } 389 AUE_NULL STD { int __mac_set_file(const char *path_p, \ struct mac *mac_p); } 390 AUE_NULL STD { int kenv(int what, const char *name, \ char *value, int len); } 391 AUE_LCHFLAGS STD { int lchflags(const char *path, \ u_long flags); } 392 AUE_NULL STD { int uuidgen(struct uuid *store, \ int count); } 393 AUE_SENDFILE STD { int sendfile(int fd, int s, off_t offset, \ size_t nbytes, struct sf_hdtr *hdtr, \ off_t *sbytes, int flags); } 394 AUE_NULL STD { int mac_syscall(const char *policy, \ int call, void *arg); } 395 AUE_GETFSSTAT COMPAT11 { int getfsstat(struct freebsd11_statfs *buf, \ long bufsize, int mode); } 396 AUE_STATFS COMPAT11 { int statfs(char *path, \ struct freebsd11_statfs *buf); } 397 AUE_FSTATFS COMPAT11 { int fstatfs(int fd, \ struct freebsd11_statfs *buf); } 398 AUE_FHSTATFS COMPAT11 { int fhstatfs(const struct fhandle *u_fhp, \ struct freebsd11_statfs *buf); } 399 AUE_NULL UNIMPL nosys 400 AUE_SEMCLOSE NOSTD { int ksem_close(semid_t id); } 401 AUE_SEMPOST NOSTD { int ksem_post(semid_t id); } 402 AUE_SEMWAIT NOSTD { int ksem_wait(semid_t id); } 403 AUE_SEMTRYWAIT NOSTD { int ksem_trywait(semid_t id); } 404 AUE_SEMINIT NOSTD { int ksem_init(semid_t *idp, \ unsigned int value); } 405 AUE_SEMOPEN NOSTD { int ksem_open(semid_t *idp, \ const char *name, int oflag, \ mode_t mode, unsigned int value); } 406 AUE_SEMUNLINK NOSTD { int ksem_unlink(const char *name); } 407 AUE_SEMGETVALUE NOSTD { int ksem_getvalue(semid_t id, int *val); } 408 AUE_SEMDESTROY NOSTD { int ksem_destroy(semid_t id); } 409 AUE_NULL STD { int __mac_get_pid(pid_t pid, \ struct mac *mac_p); } 410 AUE_NULL STD { int __mac_get_link(const char *path_p, \ struct mac *mac_p); } 411 AUE_NULL STD { int __mac_set_link(const char *path_p, \ struct mac *mac_p); } 412 AUE_EXTATTR_SET_LINK STD { ssize_t extattr_set_link( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 413 AUE_EXTATTR_GET_LINK STD { ssize_t extattr_get_link( \ const char *path, int attrnamespace, \ const char *attrname, void *data, \ size_t nbytes); } 414 AUE_EXTATTR_DELETE_LINK STD { int extattr_delete_link( \ const char *path, int attrnamespace, \ const char *attrname); } 415 AUE_NULL STD { int __mac_execve(char *fname, char **argv, \ char **envv, struct mac *mac_p); } 416 AUE_SIGACTION STD { int sigaction(int sig, \ const struct sigaction *act, \ struct sigaction *oact); } 417 AUE_SIGRETURN STD { int sigreturn( \ const struct __ucontext *sigcntxp); } 418 AUE_NULL UNIMPL __xstat 419 AUE_NULL UNIMPL __xfstat 420 AUE_NULL UNIMPL __xlstat 421 AUE_NULL STD { int getcontext(struct __ucontext *ucp); } 422 AUE_NULL STD { int setcontext( \ const struct __ucontext *ucp); } 423 AUE_NULL STD { int swapcontext(struct __ucontext *oucp, \ const struct __ucontext *ucp); } 424 AUE_SWAPOFF STD { int swapoff(const char *name); } 425 AUE_ACL_GET_LINK STD { int __acl_get_link(const char *path, \ acl_type_t type, struct acl *aclp); } 426 AUE_ACL_SET_LINK STD { int __acl_set_link(const char *path, \ acl_type_t type, struct acl *aclp); } 427 AUE_ACL_DELETE_LINK STD { int __acl_delete_link(const char *path, \ acl_type_t type); } 428 AUE_ACL_CHECK_LINK STD { int __acl_aclcheck_link(const char *path, \ acl_type_t type, struct acl *aclp); } 429 AUE_SIGWAIT STD { int sigwait(const sigset_t *set, \ int *sig); } 430 AUE_THR_CREATE STD { int thr_create(ucontext_t *ctx, long *id, \ int flags); } 431 AUE_THR_EXIT STD { void thr_exit(long *state); } 432 AUE_NULL STD { int thr_self(long *id); } 433 AUE_THR_KILL STD { int thr_kill(long id, int sig); } 434 AUE_NULL UNIMPL nosys 435 AUE_NULL UNIMPL nosys 436 AUE_JAIL_ATTACH STD { int jail_attach(int jid); } 437 AUE_EXTATTR_LIST_FD STD { ssize_t extattr_list_fd(int fd, \ int attrnamespace, void *data, \ size_t nbytes); } 438 AUE_EXTATTR_LIST_FILE STD { ssize_t extattr_list_file( \ const char *path, int attrnamespace, \ void *data, size_t nbytes); } 439 AUE_EXTATTR_LIST_LINK STD { ssize_t extattr_list_link( \ const char *path, int attrnamespace, \ void *data, size_t nbytes); } 440 AUE_NULL UNIMPL kse_switchin 441 AUE_SEMWAIT NOSTD { int ksem_timedwait(semid_t id, \ const struct timespec *abstime); } 442 AUE_NULL STD { int thr_suspend( \ const struct timespec *timeout); } 443 AUE_NULL STD { int thr_wake(long id); } 444 AUE_MODUNLOAD STD { int kldunloadf(int fileid, int flags); } 445 AUE_AUDIT STD { int audit(const void *record, \ u_int length); } 446 AUE_AUDITON STD { int auditon(int cmd, void *data, \ u_int length); } 447 AUE_GETAUID STD { int getauid(uid_t *auid); } 448 AUE_SETAUID STD { int setauid(uid_t *auid); } 449 AUE_GETAUDIT STD { int getaudit(struct auditinfo *auditinfo); } 450 AUE_SETAUDIT STD { int setaudit(struct auditinfo *auditinfo); } 451 AUE_GETAUDIT_ADDR STD { int getaudit_addr( \ struct auditinfo_addr *auditinfo_addr, \ u_int length); } 452 AUE_SETAUDIT_ADDR STD { int setaudit_addr( \ struct auditinfo_addr *auditinfo_addr, \ u_int length); } 453 AUE_AUDITCTL STD { int auditctl(char *path); } 454 AUE_NULL STD { int _umtx_op(void *obj, int op, \ u_long val, void *uaddr1, void *uaddr2); } 455 AUE_THR_NEW STD { int thr_new(struct thr_param *param, \ int param_size); } 456 AUE_NULL STD { int sigqueue(pid_t pid, int signum, void *value); } 457 AUE_MQ_OPEN NOSTD { int kmq_open(const char *path, int flags, \ mode_t mode, const struct mq_attr *attr); } 458 AUE_MQ_SETATTR NOSTD { int kmq_setattr(int mqd, \ const struct mq_attr *attr, \ struct mq_attr *oattr); } 459 AUE_MQ_TIMEDRECEIVE NOSTD { int kmq_timedreceive(int mqd, \ char *msg_ptr, size_t msg_len, \ unsigned *msg_prio, \ const struct timespec *abs_timeout); } 460 AUE_MQ_TIMEDSEND NOSTD { int kmq_timedsend(int mqd, \ const char *msg_ptr, size_t msg_len,\ unsigned msg_prio, \ const struct timespec *abs_timeout);} 461 AUE_MQ_NOTIFY NOSTD { int kmq_notify(int mqd, \ const struct sigevent *sigev); } 462 AUE_MQ_UNLINK NOSTD { int kmq_unlink(const char *path); } 463 AUE_NULL STD { int abort2(const char *why, int nargs, void **args); } 464 AUE_NULL STD { int thr_set_name(long id, const char *name); } 465 AUE_AIO_FSYNC STD { int aio_fsync(int op, struct aiocb *aiocbp); } 466 AUE_RTPRIO STD { int rtprio_thread(int function, \ lwpid_t lwpid, struct rtprio *rtp); } 467 AUE_NULL UNIMPL nosys 468 AUE_NULL UNIMPL nosys 469 AUE_NULL UNIMPL __getpath_fromfd 470 AUE_NULL UNIMPL __getpath_fromaddr 471 AUE_SCTP_PEELOFF NOSTD { int sctp_peeloff(int sd, uint32_t name); } 472 AUE_SCTP_GENERIC_SENDMSG NOSTD { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ caddr_t to, __socklen_t tolen, \ struct sctp_sndrcvinfo *sinfo, int flags); } 473 AUE_SCTP_GENERIC_SENDMSG_IOV NOSTD { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ caddr_t to, __socklen_t tolen, \ struct sctp_sndrcvinfo *sinfo, int flags); } 474 AUE_SCTP_GENERIC_RECVMSG NOSTD { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ struct sockaddr * from, __socklen_t *fromlenaddr, \ struct sctp_sndrcvinfo *sinfo, int *msg_flags); } 475 AUE_PREAD STD { ssize_t pread(int fd, void *buf, \ size_t nbyte, off_t offset); } 476 AUE_PWRITE STD { ssize_t pwrite(int fd, const void *buf, \ size_t nbyte, off_t offset); } 477 AUE_MMAP STD { caddr_t mmap(caddr_t addr, size_t len, \ int prot, int flags, int fd, off_t pos); } 478 AUE_LSEEK STD { off_t lseek(int fd, off_t offset, \ int whence); } 479 AUE_TRUNCATE STD { int truncate(char *path, off_t length); } 480 AUE_FTRUNCATE STD { int ftruncate(int fd, off_t length); } 481 AUE_THR_KILL2 STD { int thr_kill2(pid_t pid, long id, int sig); } 482 AUE_SHMOPEN STD { int shm_open(const char *path, int flags, \ mode_t mode); } 483 AUE_SHMUNLINK STD { int shm_unlink(const char *path); } 484 AUE_NULL STD { int cpuset(cpusetid_t *setid); } 485 AUE_NULL STD { int cpuset_setid(cpuwhich_t which, id_t id, \ cpusetid_t setid); } 486 AUE_NULL STD { int cpuset_getid(cpulevel_t level, \ cpuwhich_t which, id_t id, \ cpusetid_t *setid); } 487 AUE_NULL STD { int cpuset_getaffinity(cpulevel_t level, \ cpuwhich_t which, id_t id, size_t cpusetsize, \ cpuset_t *mask); } 488 AUE_NULL STD { int cpuset_setaffinity(cpulevel_t level, \ cpuwhich_t which, id_t id, size_t cpusetsize, \ const cpuset_t *mask); } 489 AUE_FACCESSAT STD { int faccessat(int fd, char *path, int amode, \ int flag); } 490 AUE_FCHMODAT STD { int fchmodat(int fd, char *path, mode_t mode, \ int flag); } 491 AUE_FCHOWNAT STD { int fchownat(int fd, char *path, uid_t uid, \ gid_t gid, int flag); } 492 AUE_FEXECVE STD { int fexecve(int fd, char **argv, \ char **envv); } 493 AUE_FSTATAT COMPAT11 { int fstatat(int fd, char *path, \ struct freebsd11_stat *buf, int flag); } 494 AUE_FUTIMESAT STD { int futimesat(int fd, char *path, \ struct timeval *times); } 495 AUE_LINKAT STD { int linkat(int fd1, char *path1, int fd2, \ char *path2, int flag); } 496 AUE_MKDIRAT STD { int mkdirat(int fd, char *path, mode_t mode); } 497 AUE_MKFIFOAT STD { int mkfifoat(int fd, char *path, mode_t mode); } 498 AUE_MKNODAT COMPAT11 { int mknodat(int fd, char *path, mode_t mode, \ uint32_t dev); } ; XXX: see the comment for open 499 AUE_OPENAT_RWTC STD { int openat(int fd, char *path, int flag, \ mode_t mode); } 500 AUE_READLINKAT STD { int readlinkat(int fd, char *path, char *buf, \ size_t bufsize); } 501 AUE_RENAMEAT STD { int renameat(int oldfd, char *old, int newfd, \ char *new); } 502 AUE_SYMLINKAT STD { int symlinkat(char *path1, int fd, \ char *path2); } 503 AUE_UNLINKAT STD { int unlinkat(int fd, char *path, int flag); } 504 AUE_POSIX_OPENPT STD { int posix_openpt(int flags); } ; 505 is initialised by the kgssapi code, if present. 505 AUE_NULL NOSTD { int gssd_syscall(char *path); } 506 AUE_JAIL_GET STD { int jail_get(struct iovec *iovp, \ unsigned int iovcnt, int flags); } 507 AUE_JAIL_SET STD { int jail_set(struct iovec *iovp, \ unsigned int iovcnt, int flags); } 508 AUE_JAIL_REMOVE STD { int jail_remove(int jid); } 509 AUE_CLOSEFROM STD { int closefrom(int lowfd); } 510 AUE_SEMCTL NOSTD { int __semctl(int semid, int semnum, \ int cmd, union semun *arg); } 511 AUE_MSGCTL NOSTD { int msgctl(int msqid, int cmd, \ struct msqid_ds *buf); } 512 AUE_SHMCTL NOSTD { int shmctl(int shmid, int cmd, \ struct shmid_ds *buf); } 513 AUE_LPATHCONF STD { int lpathconf(char *path, int name); } 514 AUE_NULL OBSOL cap_new 515 AUE_CAP_RIGHTS_GET STD { int __cap_rights_get(int version, \ int fd, cap_rights_t *rightsp); } 516 AUE_CAP_ENTER STD { int cap_enter(void); } 517 AUE_CAP_GETMODE STD { int cap_getmode(u_int *modep); } 518 AUE_PDFORK STD { int pdfork(int *fdp, int flags); } 519 AUE_PDKILL STD { int pdkill(int fd, int signum); } 520 AUE_PDGETPID STD { int pdgetpid(int fd, pid_t *pidp); } 521 AUE_PDWAIT UNIMPL pdwait4 522 AUE_SELECT STD { int pselect(int nd, fd_set *in, \ fd_set *ou, fd_set *ex, \ const struct timespec *ts, \ const sigset_t *sm); } 523 AUE_GETLOGINCLASS STD { int getloginclass(char *namebuf, \ size_t namelen); } 524 AUE_SETLOGINCLASS STD { int setloginclass(const char *namebuf); } 525 AUE_NULL STD { int rctl_get_racct(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 526 AUE_NULL STD { int rctl_get_rules(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 527 AUE_NULL STD { int rctl_get_limits(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 528 AUE_NULL STD { int rctl_add_rule(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 529 AUE_NULL STD { int rctl_remove_rule(const void *inbufp, \ size_t inbuflen, void *outbufp, \ size_t outbuflen); } 530 AUE_POSIX_FALLOCATE STD { int posix_fallocate(int fd, \ off_t offset, off_t len); } 531 AUE_POSIX_FADVISE STD { int posix_fadvise(int fd, off_t offset, \ off_t len, int advice); } 532 AUE_WAIT6 STD { int wait6(idtype_t idtype, id_t id, \ int *status, int options, \ struct __wrusage *wrusage, \ siginfo_t *info); } 533 AUE_CAP_RIGHTS_LIMIT STD { int cap_rights_limit(int fd, \ cap_rights_t *rightsp); } 534 AUE_CAP_IOCTLS_LIMIT STD { int cap_ioctls_limit(int fd, \ const u_long *cmds, size_t ncmds); } 535 AUE_CAP_IOCTLS_GET STD { ssize_t cap_ioctls_get(int fd, \ u_long *cmds, size_t maxcmds); } 536 AUE_CAP_FCNTLS_LIMIT STD { int cap_fcntls_limit(int fd, \ uint32_t fcntlrights); } 537 AUE_CAP_FCNTLS_GET STD { int cap_fcntls_get(int fd, \ uint32_t *fcntlrightsp); } 538 AUE_BINDAT STD { int bindat(int fd, int s, caddr_t name, \ int namelen); } 539 AUE_CONNECTAT STD { int connectat(int fd, int s, caddr_t name, \ int namelen); } 540 AUE_CHFLAGSAT STD { int chflagsat(int fd, const char *path, \ u_long flags, int atflag); } 541 AUE_ACCEPT STD { int accept4(int s, \ struct sockaddr * __restrict name, \ __socklen_t * __restrict anamelen, \ int flags); } 542 AUE_PIPE STD { int pipe2(int *fildes, int flags); } 543 AUE_AIO_MLOCK STD { int aio_mlock(struct aiocb *aiocbp); } 544 AUE_PROCCTL STD { int procctl(idtype_t idtype, id_t id, \ int com, void *data); } 545 AUE_POLL STD { int ppoll(struct pollfd *fds, u_int nfds, \ const struct timespec *ts, \ const sigset_t *set); } 546 AUE_FUTIMES STD { int futimens(int fd, \ struct timespec *times); } 547 AUE_FUTIMESAT STD { int utimensat(int fd, \ char *path, \ struct timespec *times, int flag); } 548 AUE_NULL STD { int numa_getaffinity(cpuwhich_t which, \ id_t id, \ struct vm_domain_policy_entry *policy); } 549 AUE_NULL STD { int numa_setaffinity(cpuwhich_t which, \ id_t id, const struct \ vm_domain_policy_entry *policy); } 550 AUE_FSYNC STD { int fdatasync(int fd); } 551 AUE_FSTAT STD { int fstat(int fd, struct stat *sb); } 552 AUE_FSTATAT STD { int fstatat(int fd, char *path, \ struct stat *buf, int flag); } 553 AUE_FHSTAT STD { int fhstat(const struct fhandle *u_fhp, \ struct stat *sb); } 554 AUE_GETDIRENTRIES STD { ssize_t getdirentries(int fd, char *buf, \ size_t count, off_t *basep); } 555 AUE_STATFS STD { int statfs(char *path, struct statfs *buf); } 556 AUE_FSTATFS STD { int fstatfs(int fd, struct statfs *buf); } 557 AUE_GETFSSTAT STD { int getfsstat(struct statfs *buf, \ long bufsize, int mode); } 558 AUE_FHSTATFS STD { int fhstatfs(const struct fhandle *u_fhp, \ struct statfs *buf); } 559 AUE_MKNODAT STD { int mknodat(int fd, char *path, mode_t mode, \ dev_t dev); } 560 AUE_KEVENT STD { int kevent(int fd, \ struct kevent *changelist, int nchanges, \ struct kevent *eventlist, int nevents, \ const struct timespec *timeout); } +561 AUE_NULL STD { int cpuset_getdomain(cpulevel_t level, \ + cpuwhich_t which, id_t id, \ + size_t domainsetsize, domainset_t *mask, \ + int *policy); } +562 AUE_NULL STD { int cpuset_setdomain(cpulevel_t level, \ + cpuwhich_t which, id_t id, \ + size_t domainsetsize, domainset_t *mask, \ + int policy); } ; Please copy any additions and changes to the following compatability tables: ; sys/compat/freebsd32/syscalls.master ; vim: syntax=off Index: user/jeff/numa/sys/kern/systrace_args.c =================================================================== --- user/jeff/numa/sys/kern/systrace_args.c (revision 326888) +++ user/jeff/numa/sys/kern/systrace_args.c (revision 326889) @@ -1,10622 +1,10706 @@ /* * System call argument to DTrace register array converstion. * * DO NOT EDIT-- this file is automatically generated. * $FreeBSD$ * This file is part of the DTrace syscall provider. */ static void systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args) { int64_t *iarg = (int64_t *) uarg; switch (sysnum) { /* nosys */ case 0: { *n_args = 0; break; } /* sys_exit */ case 1: { struct sys_exit_args *p = params; iarg[0] = p->rval; /* int */ *n_args = 1; break; } /* fork */ case 2: { *n_args = 0; break; } /* read */ case 3: { struct read_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->buf; /* void * */ uarg[2] = p->nbyte; /* size_t */ *n_args = 3; break; } /* write */ case 4: { struct write_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->buf; /* const void * */ uarg[2] = p->nbyte; /* size_t */ *n_args = 3; break; } /* open */ case 5: { struct open_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->flags; /* int */ iarg[2] = p->mode; /* int */ *n_args = 3; break; } /* close */ case 6: { struct close_args *p = params; iarg[0] = p->fd; /* int */ *n_args = 1; break; } /* wait4 */ case 7: { struct wait4_args *p = params; iarg[0] = p->pid; /* int */ uarg[1] = (intptr_t) p->status; /* int * */ iarg[2] = p->options; /* int */ uarg[3] = (intptr_t) p->rusage; /* struct rusage * */ *n_args = 4; break; } /* link */ case 9: { struct link_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ uarg[1] = (intptr_t) p->link; /* char * */ *n_args = 2; break; } /* unlink */ case 10: { struct unlink_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ *n_args = 1; break; } /* chdir */ case 12: { struct chdir_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ *n_args = 1; break; } /* fchdir */ case 13: { struct fchdir_args *p = params; iarg[0] = p->fd; /* int */ *n_args = 1; break; } /* chmod */ case 15: { struct chmod_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->mode; /* int */ *n_args = 2; break; } /* chown */ case 16: { struct chown_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->uid; /* int */ iarg[2] = p->gid; /* int */ *n_args = 3; break; } /* obreak */ case 17: { struct obreak_args *p = params; uarg[0] = (intptr_t) p->nsize; /* char * */ *n_args = 1; break; } /* getpid */ case 20: { *n_args = 0; break; } /* mount */ case 21: { struct mount_args *p = params; uarg[0] = (intptr_t) p->type; /* char * */ uarg[1] = (intptr_t) p->path; /* char * */ iarg[2] = p->flags; /* int */ uarg[3] = (intptr_t) p->data; /* caddr_t */ *n_args = 4; break; } /* unmount */ case 22: { struct unmount_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->flags; /* int */ *n_args = 2; break; } /* setuid */ case 23: { struct setuid_args *p = params; uarg[0] = p->uid; /* uid_t */ *n_args = 1; break; } /* getuid */ case 24: { *n_args = 0; break; } /* geteuid */ case 25: { *n_args = 0; break; } /* ptrace */ case 26: { struct ptrace_args *p = params; iarg[0] = p->req; /* int */ iarg[1] = p->pid; /* pid_t */ uarg[2] = (intptr_t) p->addr; /* caddr_t */ iarg[3] = p->data; /* int */ *n_args = 4; break; } /* recvmsg */ case 27: { struct recvmsg_args *p = params; iarg[0] = p->s; /* int */ uarg[1] = (intptr_t) p->msg; /* struct msghdr * */ iarg[2] = p->flags; /* int */ *n_args = 3; break; } /* sendmsg */ case 28: { struct sendmsg_args *p = params; iarg[0] = p->s; /* int */ uarg[1] = (intptr_t) p->msg; /* struct msghdr * */ iarg[2] = p->flags; /* int */ *n_args = 3; break; } /* recvfrom */ case 29: { struct recvfrom_args *p = params; iarg[0] = p->s; /* int */ uarg[1] = (intptr_t) p->buf; /* caddr_t */ uarg[2] = p->len; /* size_t */ iarg[3] = p->flags; /* int */ uarg[4] = (intptr_t) p->from; /* struct sockaddr * */ uarg[5] = (intptr_t) p->fromlenaddr; /* __socklen_t * */ *n_args = 6; break; } /* accept */ case 30: { struct accept_args *p = params; iarg[0] = p->s; /* int */ uarg[1] = (intptr_t) p->name; /* struct sockaddr * */ uarg[2] = (intptr_t) p->anamelen; /* __socklen_t * */ *n_args = 3; break; } /* getpeername */ case 31: { struct getpeername_args *p = params; iarg[0] = p->fdes; /* int */ uarg[1] = (intptr_t) p->asa; /* struct sockaddr * */ uarg[2] = (intptr_t) p->alen; /* __socklen_t * */ *n_args = 3; break; } /* getsockname */ case 32: { struct getsockname_args *p = params; iarg[0] = p->fdes; /* int */ uarg[1] = (intptr_t) p->asa; /* struct sockaddr * */ uarg[2] = (intptr_t) p->alen; /* __socklen_t * */ *n_args = 3; break; } /* access */ case 33: { struct access_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->amode; /* int */ *n_args = 2; break; } /* chflags */ case 34: { struct chflags_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ uarg[1] = p->flags; /* u_long */ *n_args = 2; break; } /* fchflags */ case 35: { struct fchflags_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = p->flags; /* u_long */ *n_args = 2; break; } /* sync */ case 36: { *n_args = 0; break; } /* kill */ case 37: { struct kill_args *p = params; iarg[0] = p->pid; /* int */ iarg[1] = p->signum; /* int */ *n_args = 2; break; } /* getppid */ case 39: { *n_args = 0; break; } /* dup */ case 41: { struct dup_args *p = params; uarg[0] = p->fd; /* u_int */ *n_args = 1; break; } /* getegid */ case 43: { *n_args = 0; break; } /* profil */ case 44: { struct profil_args *p = params; uarg[0] = (intptr_t) p->samples; /* caddr_t */ uarg[1] = p->size; /* size_t */ uarg[2] = p->offset; /* size_t */ uarg[3] = p->scale; /* u_int */ *n_args = 4; break; } /* ktrace */ case 45: { struct ktrace_args *p = params; uarg[0] = (intptr_t) p->fname; /* const char * */ iarg[1] = p->ops; /* int */ iarg[2] = p->facs; /* int */ iarg[3] = p->pid; /* int */ *n_args = 4; break; } /* getgid */ case 47: { *n_args = 0; break; } /* getlogin */ case 49: { struct getlogin_args *p = params; uarg[0] = (intptr_t) p->namebuf; /* char * */ uarg[1] = p->namelen; /* u_int */ *n_args = 2; break; } /* setlogin */ case 50: { struct setlogin_args *p = params; uarg[0] = (intptr_t) p->namebuf; /* char * */ *n_args = 1; break; } /* acct */ case 51: { struct acct_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ *n_args = 1; break; } /* sigaltstack */ case 53: { struct sigaltstack_args *p = params; uarg[0] = (intptr_t) p->ss; /* stack_t * */ uarg[1] = (intptr_t) p->oss; /* stack_t * */ *n_args = 2; break; } /* ioctl */ case 54: { struct ioctl_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = p->com; /* u_long */ uarg[2] = (intptr_t) p->data; /* caddr_t */ *n_args = 3; break; } /* reboot */ case 55: { struct reboot_args *p = params; iarg[0] = p->opt; /* int */ *n_args = 1; break; } /* revoke */ case 56: { struct revoke_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ *n_args = 1; break; } /* symlink */ case 57: { struct symlink_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ uarg[1] = (intptr_t) p->link; /* char * */ *n_args = 2; break; } /* readlink */ case 58: { struct readlink_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ uarg[1] = (intptr_t) p->buf; /* char * */ uarg[2] = p->count; /* size_t */ *n_args = 3; break; } /* execve */ case 59: { struct execve_args *p = params; uarg[0] = (intptr_t) p->fname; /* char * */ uarg[1] = (intptr_t) p->argv; /* char ** */ uarg[2] = (intptr_t) p->envv; /* char ** */ *n_args = 3; break; } /* umask */ case 60: { struct umask_args *p = params; iarg[0] = p->newmask; /* int */ *n_args = 1; break; } /* chroot */ case 61: { struct chroot_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ *n_args = 1; break; } /* msync */ case 65: { struct msync_args *p = params; uarg[0] = (intptr_t) p->addr; /* void * */ uarg[1] = p->len; /* size_t */ iarg[2] = p->flags; /* int */ *n_args = 3; break; } /* vfork */ case 66: { *n_args = 0; break; } /* sbrk */ case 69: { struct sbrk_args *p = params; iarg[0] = p->incr; /* int */ *n_args = 1; break; } /* sstk */ case 70: { struct sstk_args *p = params; iarg[0] = p->incr; /* int */ *n_args = 1; break; } /* ovadvise */ case 72: { struct ovadvise_args *p = params; iarg[0] = p->anom; /* int */ *n_args = 1; break; } /* munmap */ case 73: { struct munmap_args *p = params; uarg[0] = (intptr_t) p->addr; /* void * */ uarg[1] = p->len; /* size_t */ *n_args = 2; break; } /* mprotect */ case 74: { struct mprotect_args *p = params; uarg[0] = (intptr_t) p->addr; /* void * */ uarg[1] = p->len; /* size_t */ iarg[2] = p->prot; /* int */ *n_args = 3; break; } /* madvise */ case 75: { struct madvise_args *p = params; uarg[0] = (intptr_t) p->addr; /* void * */ uarg[1] = p->len; /* size_t */ iarg[2] = p->behav; /* int */ *n_args = 3; break; } /* mincore */ case 78: { struct mincore_args *p = params; uarg[0] = (intptr_t) p->addr; /* const void * */ uarg[1] = p->len; /* size_t */ uarg[2] = (intptr_t) p->vec; /* char * */ *n_args = 3; break; } /* getgroups */ case 79: { struct getgroups_args *p = params; uarg[0] = p->gidsetsize; /* u_int */ uarg[1] = (intptr_t) p->gidset; /* gid_t * */ *n_args = 2; break; } /* setgroups */ case 80: { struct setgroups_args *p = params; uarg[0] = p->gidsetsize; /* u_int */ uarg[1] = (intptr_t) p->gidset; /* gid_t * */ *n_args = 2; break; } /* getpgrp */ case 81: { *n_args = 0; break; } /* setpgid */ case 82: { struct setpgid_args *p = params; iarg[0] = p->pid; /* int */ iarg[1] = p->pgid; /* int */ *n_args = 2; break; } /* setitimer */ case 83: { struct setitimer_args *p = params; uarg[0] = p->which; /* u_int */ uarg[1] = (intptr_t) p->itv; /* struct itimerval * */ uarg[2] = (intptr_t) p->oitv; /* struct itimerval * */ *n_args = 3; break; } /* swapon */ case 85: { struct swapon_args *p = params; uarg[0] = (intptr_t) p->name; /* char * */ *n_args = 1; break; } /* getitimer */ case 86: { struct getitimer_args *p = params; uarg[0] = p->which; /* u_int */ uarg[1] = (intptr_t) p->itv; /* struct itimerval * */ *n_args = 2; break; } /* getdtablesize */ case 89: { *n_args = 0; break; } /* dup2 */ case 90: { struct dup2_args *p = params; uarg[0] = p->from; /* u_int */ uarg[1] = p->to; /* u_int */ *n_args = 2; break; } /* fcntl */ case 92: { struct fcntl_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->cmd; /* int */ iarg[2] = p->arg; /* long */ *n_args = 3; break; } /* select */ case 93: { struct select_args *p = params; iarg[0] = p->nd; /* int */ uarg[1] = (intptr_t) p->in; /* fd_set * */ uarg[2] = (intptr_t) p->ou; /* fd_set * */ uarg[3] = (intptr_t) p->ex; /* fd_set * */ uarg[4] = (intptr_t) p->tv; /* struct timeval * */ *n_args = 5; break; } /* fsync */ case 95: { struct fsync_args *p = params; iarg[0] = p->fd; /* int */ *n_args = 1; break; } /* setpriority */ case 96: { struct setpriority_args *p = params; iarg[0] = p->which; /* int */ iarg[1] = p->who; /* int */ iarg[2] = p->prio; /* int */ *n_args = 3; break; } /* socket */ case 97: { struct socket_args *p = params; iarg[0] = p->domain; /* int */ iarg[1] = p->type; /* int */ iarg[2] = p->protocol; /* int */ *n_args = 3; break; } /* connect */ case 98: { struct connect_args *p = params; iarg[0] = p->s; /* int */ uarg[1] = (intptr_t) p->name; /* caddr_t */ iarg[2] = p->namelen; /* int */ *n_args = 3; break; } /* getpriority */ case 100: { struct getpriority_args *p = params; iarg[0] = p->which; /* int */ iarg[1] = p->who; /* int */ *n_args = 2; break; } /* bind */ case 104: { struct bind_args *p = params; iarg[0] = p->s; /* int */ uarg[1] = (intptr_t) p->name; /* caddr_t */ iarg[2] = p->namelen; /* int */ *n_args = 3; break; } /* setsockopt */ case 105: { struct setsockopt_args *p = params; iarg[0] = p->s; /* int */ iarg[1] = p->level; /* int */ iarg[2] = p->name; /* int */ uarg[3] = (intptr_t) p->val; /* caddr_t */ iarg[4] = p->valsize; /* int */ *n_args = 5; break; } /* listen */ case 106: { struct listen_args *p = params; iarg[0] = p->s; /* int */ iarg[1] = p->backlog; /* int */ *n_args = 2; break; } /* gettimeofday */ case 116: { struct gettimeofday_args *p = params; uarg[0] = (intptr_t) p->tp; /* struct timeval * */ uarg[1] = (intptr_t) p->tzp; /* struct timezone * */ *n_args = 2; break; } /* getrusage */ case 117: { struct getrusage_args *p = params; iarg[0] = p->who; /* int */ uarg[1] = (intptr_t) p->rusage; /* struct rusage * */ *n_args = 2; break; } /* getsockopt */ case 118: { struct getsockopt_args *p = params; iarg[0] = p->s; /* int */ iarg[1] = p->level; /* int */ iarg[2] = p->name; /* int */ uarg[3] = (intptr_t) p->val; /* caddr_t */ uarg[4] = (intptr_t) p->avalsize; /* int * */ *n_args = 5; break; } /* readv */ case 120: { struct readv_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->iovp; /* struct iovec * */ uarg[2] = p->iovcnt; /* u_int */ *n_args = 3; break; } /* writev */ case 121: { struct writev_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->iovp; /* struct iovec * */ uarg[2] = p->iovcnt; /* u_int */ *n_args = 3; break; } /* settimeofday */ case 122: { struct settimeofday_args *p = params; uarg[0] = (intptr_t) p->tv; /* struct timeval * */ uarg[1] = (intptr_t) p->tzp; /* struct timezone * */ *n_args = 2; break; } /* fchown */ case 123: { struct fchown_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->uid; /* int */ iarg[2] = p->gid; /* int */ *n_args = 3; break; } /* fchmod */ case 124: { struct fchmod_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->mode; /* int */ *n_args = 2; break; } /* setreuid */ case 126: { struct setreuid_args *p = params; iarg[0] = p->ruid; /* int */ iarg[1] = p->euid; /* int */ *n_args = 2; break; } /* setregid */ case 127: { struct setregid_args *p = params; iarg[0] = p->rgid; /* int */ iarg[1] = p->egid; /* int */ *n_args = 2; break; } /* rename */ case 128: { struct rename_args *p = params; uarg[0] = (intptr_t) p->from; /* char * */ uarg[1] = (intptr_t) p->to; /* char * */ *n_args = 2; break; } /* flock */ case 131: { struct flock_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->how; /* int */ *n_args = 2; break; } /* mkfifo */ case 132: { struct mkfifo_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->mode; /* int */ *n_args = 2; break; } /* sendto */ case 133: { struct sendto_args *p = params; iarg[0] = p->s; /* int */ uarg[1] = (intptr_t) p->buf; /* caddr_t */ uarg[2] = p->len; /* size_t */ iarg[3] = p->flags; /* int */ uarg[4] = (intptr_t) p->to; /* caddr_t */ iarg[5] = p->tolen; /* int */ *n_args = 6; break; } /* shutdown */ case 134: { struct shutdown_args *p = params; iarg[0] = p->s; /* int */ iarg[1] = p->how; /* int */ *n_args = 2; break; } /* socketpair */ case 135: { struct socketpair_args *p = params; iarg[0] = p->domain; /* int */ iarg[1] = p->type; /* int */ iarg[2] = p->protocol; /* int */ uarg[3] = (intptr_t) p->rsv; /* int * */ *n_args = 4; break; } /* mkdir */ case 136: { struct mkdir_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->mode; /* int */ *n_args = 2; break; } /* rmdir */ case 137: { struct rmdir_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ *n_args = 1; break; } /* utimes */ case 138: { struct utimes_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ uarg[1] = (intptr_t) p->tptr; /* struct timeval * */ *n_args = 2; break; } /* adjtime */ case 140: { struct adjtime_args *p = params; uarg[0] = (intptr_t) p->delta; /* struct timeval * */ uarg[1] = (intptr_t) p->olddelta; /* struct timeval * */ *n_args = 2; break; } /* setsid */ case 147: { *n_args = 0; break; } /* quotactl */ case 148: { struct quotactl_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->cmd; /* int */ iarg[2] = p->uid; /* int */ uarg[3] = (intptr_t) p->arg; /* caddr_t */ *n_args = 4; break; } /* nlm_syscall */ case 154: { struct nlm_syscall_args *p = params; iarg[0] = p->debug_level; /* int */ iarg[1] = p->grace_period; /* int */ iarg[2] = p->addr_count; /* int */ uarg[3] = (intptr_t) p->addrs; /* char ** */ *n_args = 4; break; } /* nfssvc */ case 155: { struct nfssvc_args *p = params; iarg[0] = p->flag; /* int */ uarg[1] = (intptr_t) p->argp; /* caddr_t */ *n_args = 2; break; } /* lgetfh */ case 160: { struct lgetfh_args *p = params; uarg[0] = (intptr_t) p->fname; /* char * */ uarg[1] = (intptr_t) p->fhp; /* struct fhandle * */ *n_args = 2; break; } /* getfh */ case 161: { struct getfh_args *p = params; uarg[0] = (intptr_t) p->fname; /* char * */ uarg[1] = (intptr_t) p->fhp; /* struct fhandle * */ *n_args = 2; break; } /* sysarch */ case 165: { struct sysarch_args *p = params; iarg[0] = p->op; /* int */ uarg[1] = (intptr_t) p->parms; /* char * */ *n_args = 2; break; } /* rtprio */ case 166: { struct rtprio_args *p = params; iarg[0] = p->function; /* int */ iarg[1] = p->pid; /* pid_t */ uarg[2] = (intptr_t) p->rtp; /* struct rtprio * */ *n_args = 3; break; } /* semsys */ case 169: { struct semsys_args *p = params; iarg[0] = p->which; /* int */ iarg[1] = p->a2; /* int */ iarg[2] = p->a3; /* int */ iarg[3] = p->a4; /* int */ iarg[4] = p->a5; /* int */ *n_args = 5; break; } /* msgsys */ case 170: { struct msgsys_args *p = params; iarg[0] = p->which; /* int */ iarg[1] = p->a2; /* int */ iarg[2] = p->a3; /* int */ iarg[3] = p->a4; /* int */ iarg[4] = p->a5; /* int */ iarg[5] = p->a6; /* int */ *n_args = 6; break; } /* shmsys */ case 171: { struct shmsys_args *p = params; iarg[0] = p->which; /* int */ iarg[1] = p->a2; /* int */ iarg[2] = p->a3; /* int */ iarg[3] = p->a4; /* int */ *n_args = 4; break; } /* setfib */ case 175: { struct setfib_args *p = params; iarg[0] = p->fibnum; /* int */ *n_args = 1; break; } /* ntp_adjtime */ case 176: { struct ntp_adjtime_args *p = params; uarg[0] = (intptr_t) p->tp; /* struct timex * */ *n_args = 1; break; } /* setgid */ case 181: { struct setgid_args *p = params; iarg[0] = p->gid; /* gid_t */ *n_args = 1; break; } /* setegid */ case 182: { struct setegid_args *p = params; iarg[0] = p->egid; /* gid_t */ *n_args = 1; break; } /* seteuid */ case 183: { struct seteuid_args *p = params; uarg[0] = p->euid; /* uid_t */ *n_args = 1; break; } /* pathconf */ case 191: { struct pathconf_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->name; /* int */ *n_args = 2; break; } /* fpathconf */ case 192: { struct fpathconf_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->name; /* int */ *n_args = 2; break; } /* getrlimit */ case 194: { struct __getrlimit_args *p = params; uarg[0] = p->which; /* u_int */ uarg[1] = (intptr_t) p->rlp; /* struct rlimit * */ *n_args = 2; break; } /* setrlimit */ case 195: { struct __setrlimit_args *p = params; uarg[0] = p->which; /* u_int */ uarg[1] = (intptr_t) p->rlp; /* struct rlimit * */ *n_args = 2; break; } /* nosys */ case 198: { *n_args = 0; break; } /* __sysctl */ case 202: { struct sysctl_args *p = params; uarg[0] = (intptr_t) p->name; /* int * */ uarg[1] = p->namelen; /* u_int */ uarg[2] = (intptr_t) p->old; /* void * */ uarg[3] = (intptr_t) p->oldlenp; /* size_t * */ uarg[4] = (intptr_t) p->new; /* void * */ uarg[5] = p->newlen; /* size_t */ *n_args = 6; break; } /* mlock */ case 203: { struct mlock_args *p = params; uarg[0] = (intptr_t) p->addr; /* const void * */ uarg[1] = p->len; /* size_t */ *n_args = 2; break; } /* munlock */ case 204: { struct munlock_args *p = params; uarg[0] = (intptr_t) p->addr; /* const void * */ uarg[1] = p->len; /* size_t */ *n_args = 2; break; } /* undelete */ case 205: { struct undelete_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ *n_args = 1; break; } /* futimes */ case 206: { struct futimes_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->tptr; /* struct timeval * */ *n_args = 2; break; } /* getpgid */ case 207: { struct getpgid_args *p = params; iarg[0] = p->pid; /* pid_t */ *n_args = 1; break; } /* poll */ case 209: { struct poll_args *p = params; uarg[0] = (intptr_t) p->fds; /* struct pollfd * */ uarg[1] = p->nfds; /* u_int */ iarg[2] = p->timeout; /* int */ *n_args = 3; break; } /* lkmnosys */ case 210: { *n_args = 0; break; } /* lkmnosys */ case 211: { *n_args = 0; break; } /* lkmnosys */ case 212: { *n_args = 0; break; } /* lkmnosys */ case 213: { *n_args = 0; break; } /* lkmnosys */ case 214: { *n_args = 0; break; } /* lkmnosys */ case 215: { *n_args = 0; break; } /* lkmnosys */ case 216: { *n_args = 0; break; } /* lkmnosys */ case 217: { *n_args = 0; break; } /* lkmnosys */ case 218: { *n_args = 0; break; } /* lkmnosys */ case 219: { *n_args = 0; break; } /* semget */ case 221: { struct semget_args *p = params; iarg[0] = p->key; /* key_t */ iarg[1] = p->nsems; /* int */ iarg[2] = p->semflg; /* int */ *n_args = 3; break; } /* semop */ case 222: { struct semop_args *p = params; iarg[0] = p->semid; /* int */ uarg[1] = (intptr_t) p->sops; /* struct sembuf * */ uarg[2] = p->nsops; /* size_t */ *n_args = 3; break; } /* msgget */ case 225: { struct msgget_args *p = params; iarg[0] = p->key; /* key_t */ iarg[1] = p->msgflg; /* int */ *n_args = 2; break; } /* msgsnd */ case 226: { struct msgsnd_args *p = params; iarg[0] = p->msqid; /* int */ uarg[1] = (intptr_t) p->msgp; /* const void * */ uarg[2] = p->msgsz; /* size_t */ iarg[3] = p->msgflg; /* int */ *n_args = 4; break; } /* msgrcv */ case 227: { struct msgrcv_args *p = params; iarg[0] = p->msqid; /* int */ uarg[1] = (intptr_t) p->msgp; /* void * */ uarg[2] = p->msgsz; /* size_t */ iarg[3] = p->msgtyp; /* long */ iarg[4] = p->msgflg; /* int */ *n_args = 5; break; } /* shmat */ case 228: { struct shmat_args *p = params; iarg[0] = p->shmid; /* int */ uarg[1] = (intptr_t) p->shmaddr; /* const void * */ iarg[2] = p->shmflg; /* int */ *n_args = 3; break; } /* shmdt */ case 230: { struct shmdt_args *p = params; uarg[0] = (intptr_t) p->shmaddr; /* const void * */ *n_args = 1; break; } /* shmget */ case 231: { struct shmget_args *p = params; iarg[0] = p->key; /* key_t */ uarg[1] = p->size; /* size_t */ iarg[2] = p->shmflg; /* int */ *n_args = 3; break; } /* clock_gettime */ case 232: { struct clock_gettime_args *p = params; iarg[0] = p->clock_id; /* clockid_t */ uarg[1] = (intptr_t) p->tp; /* struct timespec * */ *n_args = 2; break; } /* clock_settime */ case 233: { struct clock_settime_args *p = params; iarg[0] = p->clock_id; /* clockid_t */ uarg[1] = (intptr_t) p->tp; /* const struct timespec * */ *n_args = 2; break; } /* clock_getres */ case 234: { struct clock_getres_args *p = params; iarg[0] = p->clock_id; /* clockid_t */ uarg[1] = (intptr_t) p->tp; /* struct timespec * */ *n_args = 2; break; } /* ktimer_create */ case 235: { struct ktimer_create_args *p = params; iarg[0] = p->clock_id; /* clockid_t */ uarg[1] = (intptr_t) p->evp; /* struct sigevent * */ uarg[2] = (intptr_t) p->timerid; /* int * */ *n_args = 3; break; } /* ktimer_delete */ case 236: { struct ktimer_delete_args *p = params; iarg[0] = p->timerid; /* int */ *n_args = 1; break; } /* ktimer_settime */ case 237: { struct ktimer_settime_args *p = params; iarg[0] = p->timerid; /* int */ iarg[1] = p->flags; /* int */ uarg[2] = (intptr_t) p->value; /* const struct itimerspec * */ uarg[3] = (intptr_t) p->ovalue; /* struct itimerspec * */ *n_args = 4; break; } /* ktimer_gettime */ case 238: { struct ktimer_gettime_args *p = params; iarg[0] = p->timerid; /* int */ uarg[1] = (intptr_t) p->value; /* struct itimerspec * */ *n_args = 2; break; } /* ktimer_getoverrun */ case 239: { struct ktimer_getoverrun_args *p = params; iarg[0] = p->timerid; /* int */ *n_args = 1; break; } /* nanosleep */ case 240: { struct nanosleep_args *p = params; uarg[0] = (intptr_t) p->rqtp; /* const struct timespec * */ uarg[1] = (intptr_t) p->rmtp; /* struct timespec * */ *n_args = 2; break; } /* ffclock_getcounter */ case 241: { struct ffclock_getcounter_args *p = params; uarg[0] = (intptr_t) p->ffcount; /* ffcounter * */ *n_args = 1; break; } /* ffclock_setestimate */ case 242: { struct ffclock_setestimate_args *p = params; uarg[0] = (intptr_t) p->cest; /* struct ffclock_estimate * */ *n_args = 1; break; } /* ffclock_getestimate */ case 243: { struct ffclock_getestimate_args *p = params; uarg[0] = (intptr_t) p->cest; /* struct ffclock_estimate * */ *n_args = 1; break; } /* clock_nanosleep */ case 244: { struct clock_nanosleep_args *p = params; iarg[0] = p->clock_id; /* clockid_t */ iarg[1] = p->flags; /* int */ uarg[2] = (intptr_t) p->rqtp; /* const struct timespec * */ uarg[3] = (intptr_t) p->rmtp; /* struct timespec * */ *n_args = 4; break; } /* clock_getcpuclockid2 */ case 247: { struct clock_getcpuclockid2_args *p = params; iarg[0] = p->id; /* id_t */ iarg[1] = p->which; /* int */ uarg[2] = (intptr_t) p->clock_id; /* clockid_t * */ *n_args = 3; break; } /* ntp_gettime */ case 248: { struct ntp_gettime_args *p = params; uarg[0] = (intptr_t) p->ntvp; /* struct ntptimeval * */ *n_args = 1; break; } /* minherit */ case 250: { struct minherit_args *p = params; uarg[0] = (intptr_t) p->addr; /* void * */ uarg[1] = p->len; /* size_t */ iarg[2] = p->inherit; /* int */ *n_args = 3; break; } /* rfork */ case 251: { struct rfork_args *p = params; iarg[0] = p->flags; /* int */ *n_args = 1; break; } /* issetugid */ case 253: { *n_args = 0; break; } /* lchown */ case 254: { struct lchown_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->uid; /* int */ iarg[2] = p->gid; /* int */ *n_args = 3; break; } /* aio_read */ case 255: { struct aio_read_args *p = params; uarg[0] = (intptr_t) p->aiocbp; /* struct aiocb * */ *n_args = 1; break; } /* aio_write */ case 256: { struct aio_write_args *p = params; uarg[0] = (intptr_t) p->aiocbp; /* struct aiocb * */ *n_args = 1; break; } /* lio_listio */ case 257: { struct lio_listio_args *p = params; iarg[0] = p->mode; /* int */ uarg[1] = (intptr_t) p->acb_list; /* struct aiocb *const * */ iarg[2] = p->nent; /* int */ uarg[3] = (intptr_t) p->sig; /* struct sigevent * */ *n_args = 4; break; } /* lchmod */ case 274: { struct lchmod_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->mode; /* mode_t */ *n_args = 2; break; } /* lchown */ case 275: { struct lchown_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ uarg[1] = p->uid; /* uid_t */ iarg[2] = p->gid; /* gid_t */ *n_args = 3; break; } /* lutimes */ case 276: { struct lutimes_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ uarg[1] = (intptr_t) p->tptr; /* struct timeval * */ *n_args = 2; break; } /* msync */ case 277: { struct msync_args *p = params; uarg[0] = (intptr_t) p->addr; /* void * */ uarg[1] = p->len; /* size_t */ iarg[2] = p->flags; /* int */ *n_args = 3; break; } /* preadv */ case 289: { struct preadv_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->iovp; /* struct iovec * */ uarg[2] = p->iovcnt; /* u_int */ iarg[3] = p->offset; /* off_t */ *n_args = 4; break; } /* pwritev */ case 290: { struct pwritev_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->iovp; /* struct iovec * */ uarg[2] = p->iovcnt; /* u_int */ iarg[3] = p->offset; /* off_t */ *n_args = 4; break; } /* fhopen */ case 298: { struct fhopen_args *p = params; uarg[0] = (intptr_t) p->u_fhp; /* const struct fhandle * */ iarg[1] = p->flags; /* int */ *n_args = 2; break; } /* modnext */ case 300: { struct modnext_args *p = params; iarg[0] = p->modid; /* int */ *n_args = 1; break; } /* modstat */ case 301: { struct modstat_args *p = params; iarg[0] = p->modid; /* int */ uarg[1] = (intptr_t) p->stat; /* struct module_stat * */ *n_args = 2; break; } /* modfnext */ case 302: { struct modfnext_args *p = params; iarg[0] = p->modid; /* int */ *n_args = 1; break; } /* modfind */ case 303: { struct modfind_args *p = params; uarg[0] = (intptr_t) p->name; /* const char * */ *n_args = 1; break; } /* kldload */ case 304: { struct kldload_args *p = params; uarg[0] = (intptr_t) p->file; /* const char * */ *n_args = 1; break; } /* kldunload */ case 305: { struct kldunload_args *p = params; iarg[0] = p->fileid; /* int */ *n_args = 1; break; } /* kldfind */ case 306: { struct kldfind_args *p = params; uarg[0] = (intptr_t) p->file; /* const char * */ *n_args = 1; break; } /* kldnext */ case 307: { struct kldnext_args *p = params; iarg[0] = p->fileid; /* int */ *n_args = 1; break; } /* kldstat */ case 308: { struct kldstat_args *p = params; iarg[0] = p->fileid; /* int */ uarg[1] = (intptr_t) p->stat; /* struct kld_file_stat * */ *n_args = 2; break; } /* kldfirstmod */ case 309: { struct kldfirstmod_args *p = params; iarg[0] = p->fileid; /* int */ *n_args = 1; break; } /* getsid */ case 310: { struct getsid_args *p = params; iarg[0] = p->pid; /* pid_t */ *n_args = 1; break; } /* setresuid */ case 311: { struct setresuid_args *p = params; uarg[0] = p->ruid; /* uid_t */ uarg[1] = p->euid; /* uid_t */ uarg[2] = p->suid; /* uid_t */ *n_args = 3; break; } /* setresgid */ case 312: { struct setresgid_args *p = params; iarg[0] = p->rgid; /* gid_t */ iarg[1] = p->egid; /* gid_t */ iarg[2] = p->sgid; /* gid_t */ *n_args = 3; break; } /* aio_return */ case 314: { struct aio_return_args *p = params; uarg[0] = (intptr_t) p->aiocbp; /* struct aiocb * */ *n_args = 1; break; } /* aio_suspend */ case 315: { struct aio_suspend_args *p = params; uarg[0] = (intptr_t) p->aiocbp; /* struct aiocb *const * */ iarg[1] = p->nent; /* int */ uarg[2] = (intptr_t) p->timeout; /* const struct timespec * */ *n_args = 3; break; } /* aio_cancel */ case 316: { struct aio_cancel_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->aiocbp; /* struct aiocb * */ *n_args = 2; break; } /* aio_error */ case 317: { struct aio_error_args *p = params; uarg[0] = (intptr_t) p->aiocbp; /* struct aiocb * */ *n_args = 1; break; } /* yield */ case 321: { *n_args = 0; break; } /* mlockall */ case 324: { struct mlockall_args *p = params; iarg[0] = p->how; /* int */ *n_args = 1; break; } /* munlockall */ case 325: { *n_args = 0; break; } /* __getcwd */ case 326: { struct __getcwd_args *p = params; uarg[0] = (intptr_t) p->buf; /* char * */ uarg[1] = p->buflen; /* size_t */ *n_args = 2; break; } /* sched_setparam */ case 327: { struct sched_setparam_args *p = params; iarg[0] = p->pid; /* pid_t */ uarg[1] = (intptr_t) p->param; /* const struct sched_param * */ *n_args = 2; break; } /* sched_getparam */ case 328: { struct sched_getparam_args *p = params; iarg[0] = p->pid; /* pid_t */ uarg[1] = (intptr_t) p->param; /* struct sched_param * */ *n_args = 2; break; } /* sched_setscheduler */ case 329: { struct sched_setscheduler_args *p = params; iarg[0] = p->pid; /* pid_t */ iarg[1] = p->policy; /* int */ uarg[2] = (intptr_t) p->param; /* const struct sched_param * */ *n_args = 3; break; } /* sched_getscheduler */ case 330: { struct sched_getscheduler_args *p = params; iarg[0] = p->pid; /* pid_t */ *n_args = 1; break; } /* sched_yield */ case 331: { *n_args = 0; break; } /* sched_get_priority_max */ case 332: { struct sched_get_priority_max_args *p = params; iarg[0] = p->policy; /* int */ *n_args = 1; break; } /* sched_get_priority_min */ case 333: { struct sched_get_priority_min_args *p = params; iarg[0] = p->policy; /* int */ *n_args = 1; break; } /* sched_rr_get_interval */ case 334: { struct sched_rr_get_interval_args *p = params; iarg[0] = p->pid; /* pid_t */ uarg[1] = (intptr_t) p->interval; /* struct timespec * */ *n_args = 2; break; } /* utrace */ case 335: { struct utrace_args *p = params; uarg[0] = (intptr_t) p->addr; /* const void * */ uarg[1] = p->len; /* size_t */ *n_args = 2; break; } /* kldsym */ case 337: { struct kldsym_args *p = params; iarg[0] = p->fileid; /* int */ iarg[1] = p->cmd; /* int */ uarg[2] = (intptr_t) p->data; /* void * */ *n_args = 3; break; } /* jail */ case 338: { struct jail_args *p = params; uarg[0] = (intptr_t) p->jail; /* struct jail * */ *n_args = 1; break; } /* nnpfs_syscall */ case 339: { struct nnpfs_syscall_args *p = params; iarg[0] = p->operation; /* int */ uarg[1] = (intptr_t) p->a_pathP; /* char * */ iarg[2] = p->a_opcode; /* int */ uarg[3] = (intptr_t) p->a_paramsP; /* void * */ iarg[4] = p->a_followSymlinks; /* int */ *n_args = 5; break; } /* sigprocmask */ case 340: { struct sigprocmask_args *p = params; iarg[0] = p->how; /* int */ uarg[1] = (intptr_t) p->set; /* const sigset_t * */ uarg[2] = (intptr_t) p->oset; /* sigset_t * */ *n_args = 3; break; } /* sigsuspend */ case 341: { struct sigsuspend_args *p = params; uarg[0] = (intptr_t) p->sigmask; /* const sigset_t * */ *n_args = 1; break; } /* sigpending */ case 343: { struct sigpending_args *p = params; uarg[0] = (intptr_t) p->set; /* sigset_t * */ *n_args = 1; break; } /* sigtimedwait */ case 345: { struct sigtimedwait_args *p = params; uarg[0] = (intptr_t) p->set; /* const sigset_t * */ uarg[1] = (intptr_t) p->info; /* siginfo_t * */ uarg[2] = (intptr_t) p->timeout; /* const struct timespec * */ *n_args = 3; break; } /* sigwaitinfo */ case 346: { struct sigwaitinfo_args *p = params; uarg[0] = (intptr_t) p->set; /* const sigset_t * */ uarg[1] = (intptr_t) p->info; /* siginfo_t * */ *n_args = 2; break; } /* __acl_get_file */ case 347: { struct __acl_get_file_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->type; /* acl_type_t */ uarg[2] = (intptr_t) p->aclp; /* struct acl * */ *n_args = 3; break; } /* __acl_set_file */ case 348: { struct __acl_set_file_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->type; /* acl_type_t */ uarg[2] = (intptr_t) p->aclp; /* struct acl * */ *n_args = 3; break; } /* __acl_get_fd */ case 349: { struct __acl_get_fd_args *p = params; iarg[0] = p->filedes; /* int */ iarg[1] = p->type; /* acl_type_t */ uarg[2] = (intptr_t) p->aclp; /* struct acl * */ *n_args = 3; break; } /* __acl_set_fd */ case 350: { struct __acl_set_fd_args *p = params; iarg[0] = p->filedes; /* int */ iarg[1] = p->type; /* acl_type_t */ uarg[2] = (intptr_t) p->aclp; /* struct acl * */ *n_args = 3; break; } /* __acl_delete_file */ case 351: { struct __acl_delete_file_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->type; /* acl_type_t */ *n_args = 2; break; } /* __acl_delete_fd */ case 352: { struct __acl_delete_fd_args *p = params; iarg[0] = p->filedes; /* int */ iarg[1] = p->type; /* acl_type_t */ *n_args = 2; break; } /* __acl_aclcheck_file */ case 353: { struct __acl_aclcheck_file_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->type; /* acl_type_t */ uarg[2] = (intptr_t) p->aclp; /* struct acl * */ *n_args = 3; break; } /* __acl_aclcheck_fd */ case 354: { struct __acl_aclcheck_fd_args *p = params; iarg[0] = p->filedes; /* int */ iarg[1] = p->type; /* acl_type_t */ uarg[2] = (intptr_t) p->aclp; /* struct acl * */ *n_args = 3; break; } /* extattrctl */ case 355: { struct extattrctl_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->cmd; /* int */ uarg[2] = (intptr_t) p->filename; /* const char * */ iarg[3] = p->attrnamespace; /* int */ uarg[4] = (intptr_t) p->attrname; /* const char * */ *n_args = 5; break; } /* extattr_set_file */ case 356: { struct extattr_set_file_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->attrnamespace; /* int */ uarg[2] = (intptr_t) p->attrname; /* const char * */ uarg[3] = (intptr_t) p->data; /* void * */ uarg[4] = p->nbytes; /* size_t */ *n_args = 5; break; } /* extattr_get_file */ case 357: { struct extattr_get_file_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->attrnamespace; /* int */ uarg[2] = (intptr_t) p->attrname; /* const char * */ uarg[3] = (intptr_t) p->data; /* void * */ uarg[4] = p->nbytes; /* size_t */ *n_args = 5; break; } /* extattr_delete_file */ case 358: { struct extattr_delete_file_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->attrnamespace; /* int */ uarg[2] = (intptr_t) p->attrname; /* const char * */ *n_args = 3; break; } /* aio_waitcomplete */ case 359: { struct aio_waitcomplete_args *p = params; uarg[0] = (intptr_t) p->aiocbp; /* struct aiocb ** */ uarg[1] = (intptr_t) p->timeout; /* struct timespec * */ *n_args = 2; break; } /* getresuid */ case 360: { struct getresuid_args *p = params; uarg[0] = (intptr_t) p->ruid; /* uid_t * */ uarg[1] = (intptr_t) p->euid; /* uid_t * */ uarg[2] = (intptr_t) p->suid; /* uid_t * */ *n_args = 3; break; } /* getresgid */ case 361: { struct getresgid_args *p = params; uarg[0] = (intptr_t) p->rgid; /* gid_t * */ uarg[1] = (intptr_t) p->egid; /* gid_t * */ uarg[2] = (intptr_t) p->sgid; /* gid_t * */ *n_args = 3; break; } /* kqueue */ case 362: { *n_args = 0; break; } /* extattr_set_fd */ case 371: { struct extattr_set_fd_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->attrnamespace; /* int */ uarg[2] = (intptr_t) p->attrname; /* const char * */ uarg[3] = (intptr_t) p->data; /* void * */ uarg[4] = p->nbytes; /* size_t */ *n_args = 5; break; } /* extattr_get_fd */ case 372: { struct extattr_get_fd_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->attrnamespace; /* int */ uarg[2] = (intptr_t) p->attrname; /* const char * */ uarg[3] = (intptr_t) p->data; /* void * */ uarg[4] = p->nbytes; /* size_t */ *n_args = 5; break; } /* extattr_delete_fd */ case 373: { struct extattr_delete_fd_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->attrnamespace; /* int */ uarg[2] = (intptr_t) p->attrname; /* const char * */ *n_args = 3; break; } /* __setugid */ case 374: { struct __setugid_args *p = params; iarg[0] = p->flag; /* int */ *n_args = 1; break; } /* eaccess */ case 376: { struct eaccess_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->amode; /* int */ *n_args = 2; break; } /* afs3_syscall */ case 377: { struct afs3_syscall_args *p = params; iarg[0] = p->syscall; /* long */ iarg[1] = p->parm1; /* long */ iarg[2] = p->parm2; /* long */ iarg[3] = p->parm3; /* long */ iarg[4] = p->parm4; /* long */ iarg[5] = p->parm5; /* long */ iarg[6] = p->parm6; /* long */ *n_args = 7; break; } /* nmount */ case 378: { struct nmount_args *p = params; uarg[0] = (intptr_t) p->iovp; /* struct iovec * */ uarg[1] = p->iovcnt; /* unsigned int */ iarg[2] = p->flags; /* int */ *n_args = 3; break; } /* __mac_get_proc */ case 384: { struct __mac_get_proc_args *p = params; uarg[0] = (intptr_t) p->mac_p; /* struct mac * */ *n_args = 1; break; } /* __mac_set_proc */ case 385: { struct __mac_set_proc_args *p = params; uarg[0] = (intptr_t) p->mac_p; /* struct mac * */ *n_args = 1; break; } /* __mac_get_fd */ case 386: { struct __mac_get_fd_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->mac_p; /* struct mac * */ *n_args = 2; break; } /* __mac_get_file */ case 387: { struct __mac_get_file_args *p = params; uarg[0] = (intptr_t) p->path_p; /* const char * */ uarg[1] = (intptr_t) p->mac_p; /* struct mac * */ *n_args = 2; break; } /* __mac_set_fd */ case 388: { struct __mac_set_fd_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->mac_p; /* struct mac * */ *n_args = 2; break; } /* __mac_set_file */ case 389: { struct __mac_set_file_args *p = params; uarg[0] = (intptr_t) p->path_p; /* const char * */ uarg[1] = (intptr_t) p->mac_p; /* struct mac * */ *n_args = 2; break; } /* kenv */ case 390: { struct kenv_args *p = params; iarg[0] = p->what; /* int */ uarg[1] = (intptr_t) p->name; /* const char * */ uarg[2] = (intptr_t) p->value; /* char * */ iarg[3] = p->len; /* int */ *n_args = 4; break; } /* lchflags */ case 391: { struct lchflags_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ uarg[1] = p->flags; /* u_long */ *n_args = 2; break; } /* uuidgen */ case 392: { struct uuidgen_args *p = params; uarg[0] = (intptr_t) p->store; /* struct uuid * */ iarg[1] = p->count; /* int */ *n_args = 2; break; } /* sendfile */ case 393: { struct sendfile_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->s; /* int */ iarg[2] = p->offset; /* off_t */ uarg[3] = p->nbytes; /* size_t */ uarg[4] = (intptr_t) p->hdtr; /* struct sf_hdtr * */ uarg[5] = (intptr_t) p->sbytes; /* off_t * */ iarg[6] = p->flags; /* int */ *n_args = 7; break; } /* mac_syscall */ case 394: { struct mac_syscall_args *p = params; uarg[0] = (intptr_t) p->policy; /* const char * */ iarg[1] = p->call; /* int */ uarg[2] = (intptr_t) p->arg; /* void * */ *n_args = 3; break; } /* ksem_close */ case 400: { struct ksem_close_args *p = params; iarg[0] = p->id; /* semid_t */ *n_args = 1; break; } /* ksem_post */ case 401: { struct ksem_post_args *p = params; iarg[0] = p->id; /* semid_t */ *n_args = 1; break; } /* ksem_wait */ case 402: { struct ksem_wait_args *p = params; iarg[0] = p->id; /* semid_t */ *n_args = 1; break; } /* ksem_trywait */ case 403: { struct ksem_trywait_args *p = params; iarg[0] = p->id; /* semid_t */ *n_args = 1; break; } /* ksem_init */ case 404: { struct ksem_init_args *p = params; uarg[0] = (intptr_t) p->idp; /* semid_t * */ uarg[1] = p->value; /* unsigned int */ *n_args = 2; break; } /* ksem_open */ case 405: { struct ksem_open_args *p = params; uarg[0] = (intptr_t) p->idp; /* semid_t * */ uarg[1] = (intptr_t) p->name; /* const char * */ iarg[2] = p->oflag; /* int */ iarg[3] = p->mode; /* mode_t */ uarg[4] = p->value; /* unsigned int */ *n_args = 5; break; } /* ksem_unlink */ case 406: { struct ksem_unlink_args *p = params; uarg[0] = (intptr_t) p->name; /* const char * */ *n_args = 1; break; } /* ksem_getvalue */ case 407: { struct ksem_getvalue_args *p = params; iarg[0] = p->id; /* semid_t */ uarg[1] = (intptr_t) p->val; /* int * */ *n_args = 2; break; } /* ksem_destroy */ case 408: { struct ksem_destroy_args *p = params; iarg[0] = p->id; /* semid_t */ *n_args = 1; break; } /* __mac_get_pid */ case 409: { struct __mac_get_pid_args *p = params; iarg[0] = p->pid; /* pid_t */ uarg[1] = (intptr_t) p->mac_p; /* struct mac * */ *n_args = 2; break; } /* __mac_get_link */ case 410: { struct __mac_get_link_args *p = params; uarg[0] = (intptr_t) p->path_p; /* const char * */ uarg[1] = (intptr_t) p->mac_p; /* struct mac * */ *n_args = 2; break; } /* __mac_set_link */ case 411: { struct __mac_set_link_args *p = params; uarg[0] = (intptr_t) p->path_p; /* const char * */ uarg[1] = (intptr_t) p->mac_p; /* struct mac * */ *n_args = 2; break; } /* extattr_set_link */ case 412: { struct extattr_set_link_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->attrnamespace; /* int */ uarg[2] = (intptr_t) p->attrname; /* const char * */ uarg[3] = (intptr_t) p->data; /* void * */ uarg[4] = p->nbytes; /* size_t */ *n_args = 5; break; } /* extattr_get_link */ case 413: { struct extattr_get_link_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->attrnamespace; /* int */ uarg[2] = (intptr_t) p->attrname; /* const char * */ uarg[3] = (intptr_t) p->data; /* void * */ uarg[4] = p->nbytes; /* size_t */ *n_args = 5; break; } /* extattr_delete_link */ case 414: { struct extattr_delete_link_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->attrnamespace; /* int */ uarg[2] = (intptr_t) p->attrname; /* const char * */ *n_args = 3; break; } /* __mac_execve */ case 415: { struct __mac_execve_args *p = params; uarg[0] = (intptr_t) p->fname; /* char * */ uarg[1] = (intptr_t) p->argv; /* char ** */ uarg[2] = (intptr_t) p->envv; /* char ** */ uarg[3] = (intptr_t) p->mac_p; /* struct mac * */ *n_args = 4; break; } /* sigaction */ case 416: { struct sigaction_args *p = params; iarg[0] = p->sig; /* int */ uarg[1] = (intptr_t) p->act; /* const struct sigaction * */ uarg[2] = (intptr_t) p->oact; /* struct sigaction * */ *n_args = 3; break; } /* sigreturn */ case 417: { struct sigreturn_args *p = params; uarg[0] = (intptr_t) p->sigcntxp; /* const struct __ucontext * */ *n_args = 1; break; } /* getcontext */ case 421: { struct getcontext_args *p = params; uarg[0] = (intptr_t) p->ucp; /* struct __ucontext * */ *n_args = 1; break; } /* setcontext */ case 422: { struct setcontext_args *p = params; uarg[0] = (intptr_t) p->ucp; /* const struct __ucontext * */ *n_args = 1; break; } /* swapcontext */ case 423: { struct swapcontext_args *p = params; uarg[0] = (intptr_t) p->oucp; /* struct __ucontext * */ uarg[1] = (intptr_t) p->ucp; /* const struct __ucontext * */ *n_args = 2; break; } /* swapoff */ case 424: { struct swapoff_args *p = params; uarg[0] = (intptr_t) p->name; /* const char * */ *n_args = 1; break; } /* __acl_get_link */ case 425: { struct __acl_get_link_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->type; /* acl_type_t */ uarg[2] = (intptr_t) p->aclp; /* struct acl * */ *n_args = 3; break; } /* __acl_set_link */ case 426: { struct __acl_set_link_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->type; /* acl_type_t */ uarg[2] = (intptr_t) p->aclp; /* struct acl * */ *n_args = 3; break; } /* __acl_delete_link */ case 427: { struct __acl_delete_link_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->type; /* acl_type_t */ *n_args = 2; break; } /* __acl_aclcheck_link */ case 428: { struct __acl_aclcheck_link_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->type; /* acl_type_t */ uarg[2] = (intptr_t) p->aclp; /* struct acl * */ *n_args = 3; break; } /* sigwait */ case 429: { struct sigwait_args *p = params; uarg[0] = (intptr_t) p->set; /* const sigset_t * */ uarg[1] = (intptr_t) p->sig; /* int * */ *n_args = 2; break; } /* thr_create */ case 430: { struct thr_create_args *p = params; uarg[0] = (intptr_t) p->ctx; /* ucontext_t * */ uarg[1] = (intptr_t) p->id; /* long * */ iarg[2] = p->flags; /* int */ *n_args = 3; break; } /* thr_exit */ case 431: { struct thr_exit_args *p = params; uarg[0] = (intptr_t) p->state; /* long * */ *n_args = 1; break; } /* thr_self */ case 432: { struct thr_self_args *p = params; uarg[0] = (intptr_t) p->id; /* long * */ *n_args = 1; break; } /* thr_kill */ case 433: { struct thr_kill_args *p = params; iarg[0] = p->id; /* long */ iarg[1] = p->sig; /* int */ *n_args = 2; break; } /* jail_attach */ case 436: { struct jail_attach_args *p = params; iarg[0] = p->jid; /* int */ *n_args = 1; break; } /* extattr_list_fd */ case 437: { struct extattr_list_fd_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->attrnamespace; /* int */ uarg[2] = (intptr_t) p->data; /* void * */ uarg[3] = p->nbytes; /* size_t */ *n_args = 4; break; } /* extattr_list_file */ case 438: { struct extattr_list_file_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->attrnamespace; /* int */ uarg[2] = (intptr_t) p->data; /* void * */ uarg[3] = p->nbytes; /* size_t */ *n_args = 4; break; } /* extattr_list_link */ case 439: { struct extattr_list_link_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->attrnamespace; /* int */ uarg[2] = (intptr_t) p->data; /* void * */ uarg[3] = p->nbytes; /* size_t */ *n_args = 4; break; } /* ksem_timedwait */ case 441: { struct ksem_timedwait_args *p = params; iarg[0] = p->id; /* semid_t */ uarg[1] = (intptr_t) p->abstime; /* const struct timespec * */ *n_args = 2; break; } /* thr_suspend */ case 442: { struct thr_suspend_args *p = params; uarg[0] = (intptr_t) p->timeout; /* const struct timespec * */ *n_args = 1; break; } /* thr_wake */ case 443: { struct thr_wake_args *p = params; iarg[0] = p->id; /* long */ *n_args = 1; break; } /* kldunloadf */ case 444: { struct kldunloadf_args *p = params; iarg[0] = p->fileid; /* int */ iarg[1] = p->flags; /* int */ *n_args = 2; break; } /* audit */ case 445: { struct audit_args *p = params; uarg[0] = (intptr_t) p->record; /* const void * */ uarg[1] = p->length; /* u_int */ *n_args = 2; break; } /* auditon */ case 446: { struct auditon_args *p = params; iarg[0] = p->cmd; /* int */ uarg[1] = (intptr_t) p->data; /* void * */ uarg[2] = p->length; /* u_int */ *n_args = 3; break; } /* getauid */ case 447: { struct getauid_args *p = params; uarg[0] = (intptr_t) p->auid; /* uid_t * */ *n_args = 1; break; } /* setauid */ case 448: { struct setauid_args *p = params; uarg[0] = (intptr_t) p->auid; /* uid_t * */ *n_args = 1; break; } /* getaudit */ case 449: { struct getaudit_args *p = params; uarg[0] = (intptr_t) p->auditinfo; /* struct auditinfo * */ *n_args = 1; break; } /* setaudit */ case 450: { struct setaudit_args *p = params; uarg[0] = (intptr_t) p->auditinfo; /* struct auditinfo * */ *n_args = 1; break; } /* getaudit_addr */ case 451: { struct getaudit_addr_args *p = params; uarg[0] = (intptr_t) p->auditinfo_addr; /* struct auditinfo_addr * */ uarg[1] = p->length; /* u_int */ *n_args = 2; break; } /* setaudit_addr */ case 452: { struct setaudit_addr_args *p = params; uarg[0] = (intptr_t) p->auditinfo_addr; /* struct auditinfo_addr * */ uarg[1] = p->length; /* u_int */ *n_args = 2; break; } /* auditctl */ case 453: { struct auditctl_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ *n_args = 1; break; } /* _umtx_op */ case 454: { struct _umtx_op_args *p = params; uarg[0] = (intptr_t) p->obj; /* void * */ iarg[1] = p->op; /* int */ uarg[2] = p->val; /* u_long */ uarg[3] = (intptr_t) p->uaddr1; /* void * */ uarg[4] = (intptr_t) p->uaddr2; /* void * */ *n_args = 5; break; } /* thr_new */ case 455: { struct thr_new_args *p = params; uarg[0] = (intptr_t) p->param; /* struct thr_param * */ iarg[1] = p->param_size; /* int */ *n_args = 2; break; } /* sigqueue */ case 456: { struct sigqueue_args *p = params; iarg[0] = p->pid; /* pid_t */ iarg[1] = p->signum; /* int */ uarg[2] = (intptr_t) p->value; /* void * */ *n_args = 3; break; } /* kmq_open */ case 457: { struct kmq_open_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->flags; /* int */ iarg[2] = p->mode; /* mode_t */ uarg[3] = (intptr_t) p->attr; /* const struct mq_attr * */ *n_args = 4; break; } /* kmq_setattr */ case 458: { struct kmq_setattr_args *p = params; iarg[0] = p->mqd; /* int */ uarg[1] = (intptr_t) p->attr; /* const struct mq_attr * */ uarg[2] = (intptr_t) p->oattr; /* struct mq_attr * */ *n_args = 3; break; } /* kmq_timedreceive */ case 459: { struct kmq_timedreceive_args *p = params; iarg[0] = p->mqd; /* int */ uarg[1] = (intptr_t) p->msg_ptr; /* char * */ uarg[2] = p->msg_len; /* size_t */ uarg[3] = (intptr_t) p->msg_prio; /* unsigned * */ uarg[4] = (intptr_t) p->abs_timeout; /* const struct timespec * */ *n_args = 5; break; } /* kmq_timedsend */ case 460: { struct kmq_timedsend_args *p = params; iarg[0] = p->mqd; /* int */ uarg[1] = (intptr_t) p->msg_ptr; /* const char * */ uarg[2] = p->msg_len; /* size_t */ uarg[3] = p->msg_prio; /* unsigned */ uarg[4] = (intptr_t) p->abs_timeout; /* const struct timespec * */ *n_args = 5; break; } /* kmq_notify */ case 461: { struct kmq_notify_args *p = params; iarg[0] = p->mqd; /* int */ uarg[1] = (intptr_t) p->sigev; /* const struct sigevent * */ *n_args = 2; break; } /* kmq_unlink */ case 462: { struct kmq_unlink_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ *n_args = 1; break; } /* abort2 */ case 463: { struct abort2_args *p = params; uarg[0] = (intptr_t) p->why; /* const char * */ iarg[1] = p->nargs; /* int */ uarg[2] = (intptr_t) p->args; /* void ** */ *n_args = 3; break; } /* thr_set_name */ case 464: { struct thr_set_name_args *p = params; iarg[0] = p->id; /* long */ uarg[1] = (intptr_t) p->name; /* const char * */ *n_args = 2; break; } /* aio_fsync */ case 465: { struct aio_fsync_args *p = params; iarg[0] = p->op; /* int */ uarg[1] = (intptr_t) p->aiocbp; /* struct aiocb * */ *n_args = 2; break; } /* rtprio_thread */ case 466: { struct rtprio_thread_args *p = params; iarg[0] = p->function; /* int */ iarg[1] = p->lwpid; /* lwpid_t */ uarg[2] = (intptr_t) p->rtp; /* struct rtprio * */ *n_args = 3; break; } /* sctp_peeloff */ case 471: { struct sctp_peeloff_args *p = params; iarg[0] = p->sd; /* int */ uarg[1] = p->name; /* uint32_t */ *n_args = 2; break; } /* sctp_generic_sendmsg */ case 472: { struct sctp_generic_sendmsg_args *p = params; iarg[0] = p->sd; /* int */ uarg[1] = (intptr_t) p->msg; /* caddr_t */ iarg[2] = p->mlen; /* int */ uarg[3] = (intptr_t) p->to; /* caddr_t */ iarg[4] = p->tolen; /* __socklen_t */ uarg[5] = (intptr_t) p->sinfo; /* struct sctp_sndrcvinfo * */ iarg[6] = p->flags; /* int */ *n_args = 7; break; } /* sctp_generic_sendmsg_iov */ case 473: { struct sctp_generic_sendmsg_iov_args *p = params; iarg[0] = p->sd; /* int */ uarg[1] = (intptr_t) p->iov; /* struct iovec * */ iarg[2] = p->iovlen; /* int */ uarg[3] = (intptr_t) p->to; /* caddr_t */ iarg[4] = p->tolen; /* __socklen_t */ uarg[5] = (intptr_t) p->sinfo; /* struct sctp_sndrcvinfo * */ iarg[6] = p->flags; /* int */ *n_args = 7; break; } /* sctp_generic_recvmsg */ case 474: { struct sctp_generic_recvmsg_args *p = params; iarg[0] = p->sd; /* int */ uarg[1] = (intptr_t) p->iov; /* struct iovec * */ iarg[2] = p->iovlen; /* int */ uarg[3] = (intptr_t) p->from; /* struct sockaddr * */ uarg[4] = (intptr_t) p->fromlenaddr; /* __socklen_t * */ uarg[5] = (intptr_t) p->sinfo; /* struct sctp_sndrcvinfo * */ uarg[6] = (intptr_t) p->msg_flags; /* int * */ *n_args = 7; break; } /* pread */ case 475: { struct pread_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->buf; /* void * */ uarg[2] = p->nbyte; /* size_t */ iarg[3] = p->offset; /* off_t */ *n_args = 4; break; } /* pwrite */ case 476: { struct pwrite_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->buf; /* const void * */ uarg[2] = p->nbyte; /* size_t */ iarg[3] = p->offset; /* off_t */ *n_args = 4; break; } /* mmap */ case 477: { struct mmap_args *p = params; uarg[0] = (intptr_t) p->addr; /* caddr_t */ uarg[1] = p->len; /* size_t */ iarg[2] = p->prot; /* int */ iarg[3] = p->flags; /* int */ iarg[4] = p->fd; /* int */ iarg[5] = p->pos; /* off_t */ *n_args = 6; break; } /* lseek */ case 478: { struct lseek_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->offset; /* off_t */ iarg[2] = p->whence; /* int */ *n_args = 3; break; } /* truncate */ case 479: { struct truncate_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->length; /* off_t */ *n_args = 2; break; } /* ftruncate */ case 480: { struct ftruncate_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->length; /* off_t */ *n_args = 2; break; } /* thr_kill2 */ case 481: { struct thr_kill2_args *p = params; iarg[0] = p->pid; /* pid_t */ iarg[1] = p->id; /* long */ iarg[2] = p->sig; /* int */ *n_args = 3; break; } /* shm_open */ case 482: { struct shm_open_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ iarg[1] = p->flags; /* int */ iarg[2] = p->mode; /* mode_t */ *n_args = 3; break; } /* shm_unlink */ case 483: { struct shm_unlink_args *p = params; uarg[0] = (intptr_t) p->path; /* const char * */ *n_args = 1; break; } /* cpuset */ case 484: { struct cpuset_args *p = params; uarg[0] = (intptr_t) p->setid; /* cpusetid_t * */ *n_args = 1; break; } /* cpuset_setid */ case 485: { struct cpuset_setid_args *p = params; iarg[0] = p->which; /* cpuwhich_t */ iarg[1] = p->id; /* id_t */ iarg[2] = p->setid; /* cpusetid_t */ *n_args = 3; break; } /* cpuset_getid */ case 486: { struct cpuset_getid_args *p = params; iarg[0] = p->level; /* cpulevel_t */ iarg[1] = p->which; /* cpuwhich_t */ iarg[2] = p->id; /* id_t */ uarg[3] = (intptr_t) p->setid; /* cpusetid_t * */ *n_args = 4; break; } /* cpuset_getaffinity */ case 487: { struct cpuset_getaffinity_args *p = params; iarg[0] = p->level; /* cpulevel_t */ iarg[1] = p->which; /* cpuwhich_t */ iarg[2] = p->id; /* id_t */ uarg[3] = p->cpusetsize; /* size_t */ uarg[4] = (intptr_t) p->mask; /* cpuset_t * */ *n_args = 5; break; } /* cpuset_setaffinity */ case 488: { struct cpuset_setaffinity_args *p = params; iarg[0] = p->level; /* cpulevel_t */ iarg[1] = p->which; /* cpuwhich_t */ iarg[2] = p->id; /* id_t */ uarg[3] = p->cpusetsize; /* size_t */ uarg[4] = (intptr_t) p->mask; /* const cpuset_t * */ *n_args = 5; break; } /* faccessat */ case 489: { struct faccessat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* char * */ iarg[2] = p->amode; /* int */ iarg[3] = p->flag; /* int */ *n_args = 4; break; } /* fchmodat */ case 490: { struct fchmodat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* char * */ iarg[2] = p->mode; /* mode_t */ iarg[3] = p->flag; /* int */ *n_args = 4; break; } /* fchownat */ case 491: { struct fchownat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* char * */ uarg[2] = p->uid; /* uid_t */ iarg[3] = p->gid; /* gid_t */ iarg[4] = p->flag; /* int */ *n_args = 5; break; } /* fexecve */ case 492: { struct fexecve_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->argv; /* char ** */ uarg[2] = (intptr_t) p->envv; /* char ** */ *n_args = 3; break; } /* futimesat */ case 494: { struct futimesat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* char * */ uarg[2] = (intptr_t) p->times; /* struct timeval * */ *n_args = 3; break; } /* linkat */ case 495: { struct linkat_args *p = params; iarg[0] = p->fd1; /* int */ uarg[1] = (intptr_t) p->path1; /* char * */ iarg[2] = p->fd2; /* int */ uarg[3] = (intptr_t) p->path2; /* char * */ iarg[4] = p->flag; /* int */ *n_args = 5; break; } /* mkdirat */ case 496: { struct mkdirat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* char * */ iarg[2] = p->mode; /* mode_t */ *n_args = 3; break; } /* mkfifoat */ case 497: { struct mkfifoat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* char * */ iarg[2] = p->mode; /* mode_t */ *n_args = 3; break; } /* openat */ case 499: { struct openat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* char * */ iarg[2] = p->flag; /* int */ iarg[3] = p->mode; /* mode_t */ *n_args = 4; break; } /* readlinkat */ case 500: { struct readlinkat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* char * */ uarg[2] = (intptr_t) p->buf; /* char * */ uarg[3] = p->bufsize; /* size_t */ *n_args = 4; break; } /* renameat */ case 501: { struct renameat_args *p = params; iarg[0] = p->oldfd; /* int */ uarg[1] = (intptr_t) p->old; /* char * */ iarg[2] = p->newfd; /* int */ uarg[3] = (intptr_t) p->new; /* char * */ *n_args = 4; break; } /* symlinkat */ case 502: { struct symlinkat_args *p = params; uarg[0] = (intptr_t) p->path1; /* char * */ iarg[1] = p->fd; /* int */ uarg[2] = (intptr_t) p->path2; /* char * */ *n_args = 3; break; } /* unlinkat */ case 503: { struct unlinkat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* char * */ iarg[2] = p->flag; /* int */ *n_args = 3; break; } /* posix_openpt */ case 504: { struct posix_openpt_args *p = params; iarg[0] = p->flags; /* int */ *n_args = 1; break; } /* gssd_syscall */ case 505: { struct gssd_syscall_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ *n_args = 1; break; } /* jail_get */ case 506: { struct jail_get_args *p = params; uarg[0] = (intptr_t) p->iovp; /* struct iovec * */ uarg[1] = p->iovcnt; /* unsigned int */ iarg[2] = p->flags; /* int */ *n_args = 3; break; } /* jail_set */ case 507: { struct jail_set_args *p = params; uarg[0] = (intptr_t) p->iovp; /* struct iovec * */ uarg[1] = p->iovcnt; /* unsigned int */ iarg[2] = p->flags; /* int */ *n_args = 3; break; } /* jail_remove */ case 508: { struct jail_remove_args *p = params; iarg[0] = p->jid; /* int */ *n_args = 1; break; } /* closefrom */ case 509: { struct closefrom_args *p = params; iarg[0] = p->lowfd; /* int */ *n_args = 1; break; } /* __semctl */ case 510: { struct __semctl_args *p = params; iarg[0] = p->semid; /* int */ iarg[1] = p->semnum; /* int */ iarg[2] = p->cmd; /* int */ uarg[3] = (intptr_t) p->arg; /* union semun * */ *n_args = 4; break; } /* msgctl */ case 511: { struct msgctl_args *p = params; iarg[0] = p->msqid; /* int */ iarg[1] = p->cmd; /* int */ uarg[2] = (intptr_t) p->buf; /* struct msqid_ds * */ *n_args = 3; break; } /* shmctl */ case 512: { struct shmctl_args *p = params; iarg[0] = p->shmid; /* int */ iarg[1] = p->cmd; /* int */ uarg[2] = (intptr_t) p->buf; /* struct shmid_ds * */ *n_args = 3; break; } /* lpathconf */ case 513: { struct lpathconf_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ iarg[1] = p->name; /* int */ *n_args = 2; break; } /* __cap_rights_get */ case 515: { struct __cap_rights_get_args *p = params; iarg[0] = p->version; /* int */ iarg[1] = p->fd; /* int */ uarg[2] = (intptr_t) p->rightsp; /* cap_rights_t * */ *n_args = 3; break; } /* cap_enter */ case 516: { *n_args = 0; break; } /* cap_getmode */ case 517: { struct cap_getmode_args *p = params; uarg[0] = (intptr_t) p->modep; /* u_int * */ *n_args = 1; break; } /* pdfork */ case 518: { struct pdfork_args *p = params; uarg[0] = (intptr_t) p->fdp; /* int * */ iarg[1] = p->flags; /* int */ *n_args = 2; break; } /* pdkill */ case 519: { struct pdkill_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->signum; /* int */ *n_args = 2; break; } /* pdgetpid */ case 520: { struct pdgetpid_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->pidp; /* pid_t * */ *n_args = 2; break; } /* pselect */ case 522: { struct pselect_args *p = params; iarg[0] = p->nd; /* int */ uarg[1] = (intptr_t) p->in; /* fd_set * */ uarg[2] = (intptr_t) p->ou; /* fd_set * */ uarg[3] = (intptr_t) p->ex; /* fd_set * */ uarg[4] = (intptr_t) p->ts; /* const struct timespec * */ uarg[5] = (intptr_t) p->sm; /* const sigset_t * */ *n_args = 6; break; } /* getloginclass */ case 523: { struct getloginclass_args *p = params; uarg[0] = (intptr_t) p->namebuf; /* char * */ uarg[1] = p->namelen; /* size_t */ *n_args = 2; break; } /* setloginclass */ case 524: { struct setloginclass_args *p = params; uarg[0] = (intptr_t) p->namebuf; /* const char * */ *n_args = 1; break; } /* rctl_get_racct */ case 525: { struct rctl_get_racct_args *p = params; uarg[0] = (intptr_t) p->inbufp; /* const void * */ uarg[1] = p->inbuflen; /* size_t */ uarg[2] = (intptr_t) p->outbufp; /* void * */ uarg[3] = p->outbuflen; /* size_t */ *n_args = 4; break; } /* rctl_get_rules */ case 526: { struct rctl_get_rules_args *p = params; uarg[0] = (intptr_t) p->inbufp; /* const void * */ uarg[1] = p->inbuflen; /* size_t */ uarg[2] = (intptr_t) p->outbufp; /* void * */ uarg[3] = p->outbuflen; /* size_t */ *n_args = 4; break; } /* rctl_get_limits */ case 527: { struct rctl_get_limits_args *p = params; uarg[0] = (intptr_t) p->inbufp; /* const void * */ uarg[1] = p->inbuflen; /* size_t */ uarg[2] = (intptr_t) p->outbufp; /* void * */ uarg[3] = p->outbuflen; /* size_t */ *n_args = 4; break; } /* rctl_add_rule */ case 528: { struct rctl_add_rule_args *p = params; uarg[0] = (intptr_t) p->inbufp; /* const void * */ uarg[1] = p->inbuflen; /* size_t */ uarg[2] = (intptr_t) p->outbufp; /* void * */ uarg[3] = p->outbuflen; /* size_t */ *n_args = 4; break; } /* rctl_remove_rule */ case 529: { struct rctl_remove_rule_args *p = params; uarg[0] = (intptr_t) p->inbufp; /* const void * */ uarg[1] = p->inbuflen; /* size_t */ uarg[2] = (intptr_t) p->outbufp; /* void * */ uarg[3] = p->outbuflen; /* size_t */ *n_args = 4; break; } /* posix_fallocate */ case 530: { struct posix_fallocate_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->offset; /* off_t */ iarg[2] = p->len; /* off_t */ *n_args = 3; break; } /* posix_fadvise */ case 531: { struct posix_fadvise_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->offset; /* off_t */ iarg[2] = p->len; /* off_t */ iarg[3] = p->advice; /* int */ *n_args = 4; break; } /* wait6 */ case 532: { struct wait6_args *p = params; iarg[0] = p->idtype; /* idtype_t */ iarg[1] = p->id; /* id_t */ uarg[2] = (intptr_t) p->status; /* int * */ iarg[3] = p->options; /* int */ uarg[4] = (intptr_t) p->wrusage; /* struct __wrusage * */ uarg[5] = (intptr_t) p->info; /* siginfo_t * */ *n_args = 6; break; } /* cap_rights_limit */ case 533: { struct cap_rights_limit_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->rightsp; /* cap_rights_t * */ *n_args = 2; break; } /* cap_ioctls_limit */ case 534: { struct cap_ioctls_limit_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->cmds; /* const u_long * */ uarg[2] = p->ncmds; /* size_t */ *n_args = 3; break; } /* cap_ioctls_get */ case 535: { struct cap_ioctls_get_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->cmds; /* u_long * */ uarg[2] = p->maxcmds; /* size_t */ *n_args = 3; break; } /* cap_fcntls_limit */ case 536: { struct cap_fcntls_limit_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = p->fcntlrights; /* uint32_t */ *n_args = 2; break; } /* cap_fcntls_get */ case 537: { struct cap_fcntls_get_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->fcntlrightsp; /* uint32_t * */ *n_args = 2; break; } /* bindat */ case 538: { struct bindat_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->s; /* int */ uarg[2] = (intptr_t) p->name; /* caddr_t */ iarg[3] = p->namelen; /* int */ *n_args = 4; break; } /* connectat */ case 539: { struct connectat_args *p = params; iarg[0] = p->fd; /* int */ iarg[1] = p->s; /* int */ uarg[2] = (intptr_t) p->name; /* caddr_t */ iarg[3] = p->namelen; /* int */ *n_args = 4; break; } /* chflagsat */ case 540: { struct chflagsat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* const char * */ uarg[2] = p->flags; /* u_long */ iarg[3] = p->atflag; /* int */ *n_args = 4; break; } /* accept4 */ case 541: { struct accept4_args *p = params; iarg[0] = p->s; /* int */ uarg[1] = (intptr_t) p->name; /* struct sockaddr * */ uarg[2] = (intptr_t) p->anamelen; /* __socklen_t * */ iarg[3] = p->flags; /* int */ *n_args = 4; break; } /* pipe2 */ case 542: { struct pipe2_args *p = params; uarg[0] = (intptr_t) p->fildes; /* int * */ iarg[1] = p->flags; /* int */ *n_args = 2; break; } /* aio_mlock */ case 543: { struct aio_mlock_args *p = params; uarg[0] = (intptr_t) p->aiocbp; /* struct aiocb * */ *n_args = 1; break; } /* procctl */ case 544: { struct procctl_args *p = params; iarg[0] = p->idtype; /* idtype_t */ iarg[1] = p->id; /* id_t */ iarg[2] = p->com; /* int */ uarg[3] = (intptr_t) p->data; /* void * */ *n_args = 4; break; } /* ppoll */ case 545: { struct ppoll_args *p = params; uarg[0] = (intptr_t) p->fds; /* struct pollfd * */ uarg[1] = p->nfds; /* u_int */ uarg[2] = (intptr_t) p->ts; /* const struct timespec * */ uarg[3] = (intptr_t) p->set; /* const sigset_t * */ *n_args = 4; break; } /* futimens */ case 546: { struct futimens_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->times; /* struct timespec * */ *n_args = 2; break; } /* utimensat */ case 547: { struct utimensat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* char * */ uarg[2] = (intptr_t) p->times; /* struct timespec * */ iarg[3] = p->flag; /* int */ *n_args = 4; break; } /* numa_getaffinity */ case 548: { struct numa_getaffinity_args *p = params; iarg[0] = p->which; /* cpuwhich_t */ iarg[1] = p->id; /* id_t */ uarg[2] = (intptr_t) p->policy; /* struct vm_domain_policy_entry * */ *n_args = 3; break; } /* numa_setaffinity */ case 549: { struct numa_setaffinity_args *p = params; iarg[0] = p->which; /* cpuwhich_t */ iarg[1] = p->id; /* id_t */ uarg[2] = (intptr_t) p->policy; /* const struct vm_domain_policy_entry * */ *n_args = 3; break; } /* fdatasync */ case 550: { struct fdatasync_args *p = params; iarg[0] = p->fd; /* int */ *n_args = 1; break; } /* fstat */ case 551: { struct fstat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->sb; /* struct stat * */ *n_args = 2; break; } /* fstatat */ case 552: { struct fstatat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* char * */ uarg[2] = (intptr_t) p->buf; /* struct stat * */ iarg[3] = p->flag; /* int */ *n_args = 4; break; } /* fhstat */ case 553: { struct fhstat_args *p = params; uarg[0] = (intptr_t) p->u_fhp; /* const struct fhandle * */ uarg[1] = (intptr_t) p->sb; /* struct stat * */ *n_args = 2; break; } /* getdirentries */ case 554: { struct getdirentries_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->buf; /* char * */ uarg[2] = p->count; /* size_t */ uarg[3] = (intptr_t) p->basep; /* off_t * */ *n_args = 4; break; } /* statfs */ case 555: { struct statfs_args *p = params; uarg[0] = (intptr_t) p->path; /* char * */ uarg[1] = (intptr_t) p->buf; /* struct statfs * */ *n_args = 2; break; } /* fstatfs */ case 556: { struct fstatfs_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->buf; /* struct statfs * */ *n_args = 2; break; } /* getfsstat */ case 557: { struct getfsstat_args *p = params; uarg[0] = (intptr_t) p->buf; /* struct statfs * */ iarg[1] = p->bufsize; /* long */ iarg[2] = p->mode; /* int */ *n_args = 3; break; } /* fhstatfs */ case 558: { struct fhstatfs_args *p = params; uarg[0] = (intptr_t) p->u_fhp; /* const struct fhandle * */ uarg[1] = (intptr_t) p->buf; /* struct statfs * */ *n_args = 2; break; } /* mknodat */ case 559: { struct mknodat_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->path; /* char * */ iarg[2] = p->mode; /* mode_t */ iarg[3] = p->dev; /* dev_t */ *n_args = 4; break; } /* kevent */ case 560: { struct kevent_args *p = params; iarg[0] = p->fd; /* int */ uarg[1] = (intptr_t) p->changelist; /* struct kevent * */ iarg[2] = p->nchanges; /* int */ uarg[3] = (intptr_t) p->eventlist; /* struct kevent * */ iarg[4] = p->nevents; /* int */ uarg[5] = (intptr_t) p->timeout; /* const struct timespec * */ *n_args = 6; break; } + /* cpuset_getdomain */ + case 561: { + struct cpuset_getdomain_args *p = params; + iarg[0] = p->level; /* cpulevel_t */ + iarg[1] = p->which; /* cpuwhich_t */ + iarg[2] = p->id; /* id_t */ + uarg[3] = p->domainsetsize; /* size_t */ + uarg[4] = (intptr_t) p->mask; /* domainset_t * */ + uarg[5] = (intptr_t) p->policy; /* int * */ + *n_args = 6; + break; + } + /* cpuset_setdomain */ + case 562: { + struct cpuset_setdomain_args *p = params; + iarg[0] = p->level; /* cpulevel_t */ + iarg[1] = p->which; /* cpuwhich_t */ + iarg[2] = p->id; /* id_t */ + uarg[3] = p->domainsetsize; /* size_t */ + uarg[4] = (intptr_t) p->mask; /* domainset_t * */ + iarg[5] = p->policy; /* int */ + *n_args = 6; + break; + } default: *n_args = 0; break; }; } static void systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz) { const char *p = NULL; switch (sysnum) { /* nosys */ case 0: break; /* sys_exit */ case 1: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* fork */ case 2: break; /* read */ case 3: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland void *"; break; case 2: p = "size_t"; break; default: break; }; break; /* write */ case 4: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland const void *"; break; case 2: p = "size_t"; break; default: break; }; break; /* open */ case 5: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; case 2: p = "int"; break; default: break; }; break; /* close */ case 6: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* wait4 */ case 7: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland int *"; break; case 2: p = "int"; break; case 3: p = "userland struct rusage *"; break; default: break; }; break; /* link */ case 9: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "userland char *"; break; default: break; }; break; /* unlink */ case 10: switch(ndx) { case 0: p = "userland char *"; break; default: break; }; break; /* chdir */ case 12: switch(ndx) { case 0: p = "userland char *"; break; default: break; }; break; /* fchdir */ case 13: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* chmod */ case 15: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; default: break; }; break; /* chown */ case 16: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; case 2: p = "int"; break; default: break; }; break; /* obreak */ case 17: switch(ndx) { case 0: p = "userland char *"; break; default: break; }; break; /* getpid */ case 20: break; /* mount */ case 21: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "userland char *"; break; case 2: p = "int"; break; case 3: p = "caddr_t"; break; default: break; }; break; /* unmount */ case 22: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; default: break; }; break; /* setuid */ case 23: switch(ndx) { case 0: p = "uid_t"; break; default: break; }; break; /* getuid */ case 24: break; /* geteuid */ case 25: break; /* ptrace */ case 26: switch(ndx) { case 0: p = "int"; break; case 1: p = "pid_t"; break; case 2: p = "caddr_t"; break; case 3: p = "int"; break; default: break; }; break; /* recvmsg */ case 27: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct msghdr *"; break; case 2: p = "int"; break; default: break; }; break; /* sendmsg */ case 28: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct msghdr *"; break; case 2: p = "int"; break; default: break; }; break; /* recvfrom */ case 29: switch(ndx) { case 0: p = "int"; break; case 1: p = "caddr_t"; break; case 2: p = "size_t"; break; case 3: p = "int"; break; case 4: p = "userland struct sockaddr *"; break; case 5: p = "userland __socklen_t *"; break; default: break; }; break; /* accept */ case 30: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct sockaddr *"; break; case 2: p = "userland __socklen_t *"; break; default: break; }; break; /* getpeername */ case 31: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct sockaddr *"; break; case 2: p = "userland __socklen_t *"; break; default: break; }; break; /* getsockname */ case 32: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct sockaddr *"; break; case 2: p = "userland __socklen_t *"; break; default: break; }; break; /* access */ case 33: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; default: break; }; break; /* chflags */ case 34: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "u_long"; break; default: break; }; break; /* fchflags */ case 35: switch(ndx) { case 0: p = "int"; break; case 1: p = "u_long"; break; default: break; }; break; /* sync */ case 36: break; /* kill */ case 37: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; default: break; }; break; /* getppid */ case 39: break; /* dup */ case 41: switch(ndx) { case 0: p = "u_int"; break; default: break; }; break; /* getegid */ case 43: break; /* profil */ case 44: switch(ndx) { case 0: p = "caddr_t"; break; case 1: p = "size_t"; break; case 2: p = "size_t"; break; case 3: p = "u_int"; break; default: break; }; break; /* ktrace */ case 45: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "int"; break; case 3: p = "int"; break; default: break; }; break; /* getgid */ case 47: break; /* getlogin */ case 49: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "u_int"; break; default: break; }; break; /* setlogin */ case 50: switch(ndx) { case 0: p = "userland char *"; break; default: break; }; break; /* acct */ case 51: switch(ndx) { case 0: p = "userland char *"; break; default: break; }; break; /* sigaltstack */ case 53: switch(ndx) { case 0: p = "userland stack_t *"; break; case 1: p = "userland stack_t *"; break; default: break; }; break; /* ioctl */ case 54: switch(ndx) { case 0: p = "int"; break; case 1: p = "u_long"; break; case 2: p = "caddr_t"; break; default: break; }; break; /* reboot */ case 55: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* revoke */ case 56: switch(ndx) { case 0: p = "userland char *"; break; default: break; }; break; /* symlink */ case 57: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "userland char *"; break; default: break; }; break; /* readlink */ case 58: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "userland char *"; break; case 2: p = "size_t"; break; default: break; }; break; /* execve */ case 59: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "userland char **"; break; case 2: p = "userland char **"; break; default: break; }; break; /* umask */ case 60: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* chroot */ case 61: switch(ndx) { case 0: p = "userland char *"; break; default: break; }; break; /* msync */ case 65: switch(ndx) { case 0: p = "userland void *"; break; case 1: p = "size_t"; break; case 2: p = "int"; break; default: break; }; break; /* vfork */ case 66: break; /* sbrk */ case 69: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* sstk */ case 70: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* ovadvise */ case 72: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* munmap */ case 73: switch(ndx) { case 0: p = "userland void *"; break; case 1: p = "size_t"; break; default: break; }; break; /* mprotect */ case 74: switch(ndx) { case 0: p = "userland void *"; break; case 1: p = "size_t"; break; case 2: p = "int"; break; default: break; }; break; /* madvise */ case 75: switch(ndx) { case 0: p = "userland void *"; break; case 1: p = "size_t"; break; case 2: p = "int"; break; default: break; }; break; /* mincore */ case 78: switch(ndx) { case 0: p = "userland const void *"; break; case 1: p = "size_t"; break; case 2: p = "userland char *"; break; default: break; }; break; /* getgroups */ case 79: switch(ndx) { case 0: p = "u_int"; break; case 1: p = "userland gid_t *"; break; default: break; }; break; /* setgroups */ case 80: switch(ndx) { case 0: p = "u_int"; break; case 1: p = "userland gid_t *"; break; default: break; }; break; /* getpgrp */ case 81: break; /* setpgid */ case 82: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; default: break; }; break; /* setitimer */ case 83: switch(ndx) { case 0: p = "u_int"; break; case 1: p = "userland struct itimerval *"; break; case 2: p = "userland struct itimerval *"; break; default: break; }; break; /* swapon */ case 85: switch(ndx) { case 0: p = "userland char *"; break; default: break; }; break; /* getitimer */ case 86: switch(ndx) { case 0: p = "u_int"; break; case 1: p = "userland struct itimerval *"; break; default: break; }; break; /* getdtablesize */ case 89: break; /* dup2 */ case 90: switch(ndx) { case 0: p = "u_int"; break; case 1: p = "u_int"; break; default: break; }; break; /* fcntl */ case 92: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "long"; break; default: break; }; break; /* select */ case 93: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland fd_set *"; break; case 2: p = "userland fd_set *"; break; case 3: p = "userland fd_set *"; break; case 4: p = "userland struct timeval *"; break; default: break; }; break; /* fsync */ case 95: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* setpriority */ case 96: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "int"; break; default: break; }; break; /* socket */ case 97: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "int"; break; default: break; }; break; /* connect */ case 98: switch(ndx) { case 0: p = "int"; break; case 1: p = "caddr_t"; break; case 2: p = "int"; break; default: break; }; break; /* getpriority */ case 100: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; default: break; }; break; /* bind */ case 104: switch(ndx) { case 0: p = "int"; break; case 1: p = "caddr_t"; break; case 2: p = "int"; break; default: break; }; break; /* setsockopt */ case 105: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "int"; break; case 3: p = "caddr_t"; break; case 4: p = "int"; break; default: break; }; break; /* listen */ case 106: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; default: break; }; break; /* gettimeofday */ case 116: switch(ndx) { case 0: p = "userland struct timeval *"; break; case 1: p = "userland struct timezone *"; break; default: break; }; break; /* getrusage */ case 117: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct rusage *"; break; default: break; }; break; /* getsockopt */ case 118: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "int"; break; case 3: p = "caddr_t"; break; case 4: p = "userland int *"; break; default: break; }; break; /* readv */ case 120: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct iovec *"; break; case 2: p = "u_int"; break; default: break; }; break; /* writev */ case 121: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct iovec *"; break; case 2: p = "u_int"; break; default: break; }; break; /* settimeofday */ case 122: switch(ndx) { case 0: p = "userland struct timeval *"; break; case 1: p = "userland struct timezone *"; break; default: break; }; break; /* fchown */ case 123: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "int"; break; default: break; }; break; /* fchmod */ case 124: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; default: break; }; break; /* setreuid */ case 126: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; default: break; }; break; /* setregid */ case 127: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; default: break; }; break; /* rename */ case 128: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "userland char *"; break; default: break; }; break; /* flock */ case 131: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; default: break; }; break; /* mkfifo */ case 132: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; default: break; }; break; /* sendto */ case 133: switch(ndx) { case 0: p = "int"; break; case 1: p = "caddr_t"; break; case 2: p = "size_t"; break; case 3: p = "int"; break; case 4: p = "caddr_t"; break; case 5: p = "int"; break; default: break; }; break; /* shutdown */ case 134: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; default: break; }; break; /* socketpair */ case 135: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "int"; break; case 3: p = "userland int *"; break; default: break; }; break; /* mkdir */ case 136: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; default: break; }; break; /* rmdir */ case 137: switch(ndx) { case 0: p = "userland char *"; break; default: break; }; break; /* utimes */ case 138: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "userland struct timeval *"; break; default: break; }; break; /* adjtime */ case 140: switch(ndx) { case 0: p = "userland struct timeval *"; break; case 1: p = "userland struct timeval *"; break; default: break; }; break; /* setsid */ case 147: break; /* quotactl */ case 148: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; case 2: p = "int"; break; case 3: p = "caddr_t"; break; default: break; }; break; /* nlm_syscall */ case 154: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "int"; break; case 3: p = "userland char **"; break; default: break; }; break; /* nfssvc */ case 155: switch(ndx) { case 0: p = "int"; break; case 1: p = "caddr_t"; break; default: break; }; break; /* lgetfh */ case 160: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "userland struct fhandle *"; break; default: break; }; break; /* getfh */ case 161: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "userland struct fhandle *"; break; default: break; }; break; /* sysarch */ case 165: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; default: break; }; break; /* rtprio */ case 166: switch(ndx) { case 0: p = "int"; break; case 1: p = "pid_t"; break; case 2: p = "userland struct rtprio *"; break; default: break; }; break; /* semsys */ case 169: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "int"; break; case 3: p = "int"; break; case 4: p = "int"; break; default: break; }; break; /* msgsys */ case 170: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "int"; break; case 3: p = "int"; break; case 4: p = "int"; break; case 5: p = "int"; break; default: break; }; break; /* shmsys */ case 171: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "int"; break; case 3: p = "int"; break; default: break; }; break; /* setfib */ case 175: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* ntp_adjtime */ case 176: switch(ndx) { case 0: p = "userland struct timex *"; break; default: break; }; break; /* setgid */ case 181: switch(ndx) { case 0: p = "gid_t"; break; default: break; }; break; /* setegid */ case 182: switch(ndx) { case 0: p = "gid_t"; break; default: break; }; break; /* seteuid */ case 183: switch(ndx) { case 0: p = "uid_t"; break; default: break; }; break; /* pathconf */ case 191: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; default: break; }; break; /* fpathconf */ case 192: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; default: break; }; break; /* getrlimit */ case 194: switch(ndx) { case 0: p = "u_int"; break; case 1: p = "userland struct rlimit *"; break; default: break; }; break; /* setrlimit */ case 195: switch(ndx) { case 0: p = "u_int"; break; case 1: p = "userland struct rlimit *"; break; default: break; }; break; /* nosys */ case 198: break; /* __sysctl */ case 202: switch(ndx) { case 0: p = "userland int *"; break; case 1: p = "u_int"; break; case 2: p = "userland void *"; break; case 3: p = "userland size_t *"; break; case 4: p = "userland void *"; break; case 5: p = "size_t"; break; default: break; }; break; /* mlock */ case 203: switch(ndx) { case 0: p = "userland const void *"; break; case 1: p = "size_t"; break; default: break; }; break; /* munlock */ case 204: switch(ndx) { case 0: p = "userland const void *"; break; case 1: p = "size_t"; break; default: break; }; break; /* undelete */ case 205: switch(ndx) { case 0: p = "userland char *"; break; default: break; }; break; /* futimes */ case 206: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct timeval *"; break; default: break; }; break; /* getpgid */ case 207: switch(ndx) { case 0: p = "pid_t"; break; default: break; }; break; /* poll */ case 209: switch(ndx) { case 0: p = "userland struct pollfd *"; break; case 1: p = "u_int"; break; case 2: p = "int"; break; default: break; }; break; /* lkmnosys */ case 210: break; /* lkmnosys */ case 211: break; /* lkmnosys */ case 212: break; /* lkmnosys */ case 213: break; /* lkmnosys */ case 214: break; /* lkmnosys */ case 215: break; /* lkmnosys */ case 216: break; /* lkmnosys */ case 217: break; /* lkmnosys */ case 218: break; /* lkmnosys */ case 219: break; /* semget */ case 221: switch(ndx) { case 0: p = "key_t"; break; case 1: p = "int"; break; case 2: p = "int"; break; default: break; }; break; /* semop */ case 222: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct sembuf *"; break; case 2: p = "size_t"; break; default: break; }; break; /* msgget */ case 225: switch(ndx) { case 0: p = "key_t"; break; case 1: p = "int"; break; default: break; }; break; /* msgsnd */ case 226: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland const void *"; break; case 2: p = "size_t"; break; case 3: p = "int"; break; default: break; }; break; /* msgrcv */ case 227: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland void *"; break; case 2: p = "size_t"; break; case 3: p = "long"; break; case 4: p = "int"; break; default: break; }; break; /* shmat */ case 228: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland const void *"; break; case 2: p = "int"; break; default: break; }; break; /* shmdt */ case 230: switch(ndx) { case 0: p = "userland const void *"; break; default: break; }; break; /* shmget */ case 231: switch(ndx) { case 0: p = "key_t"; break; case 1: p = "size_t"; break; case 2: p = "int"; break; default: break; }; break; /* clock_gettime */ case 232: switch(ndx) { case 0: p = "clockid_t"; break; case 1: p = "userland struct timespec *"; break; default: break; }; break; /* clock_settime */ case 233: switch(ndx) { case 0: p = "clockid_t"; break; case 1: p = "userland const struct timespec *"; break; default: break; }; break; /* clock_getres */ case 234: switch(ndx) { case 0: p = "clockid_t"; break; case 1: p = "userland struct timespec *"; break; default: break; }; break; /* ktimer_create */ case 235: switch(ndx) { case 0: p = "clockid_t"; break; case 1: p = "userland struct sigevent *"; break; case 2: p = "userland int *"; break; default: break; }; break; /* ktimer_delete */ case 236: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* ktimer_settime */ case 237: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "userland const struct itimerspec *"; break; case 3: p = "userland struct itimerspec *"; break; default: break; }; break; /* ktimer_gettime */ case 238: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct itimerspec *"; break; default: break; }; break; /* ktimer_getoverrun */ case 239: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* nanosleep */ case 240: switch(ndx) { case 0: p = "userland const struct timespec *"; break; case 1: p = "userland struct timespec *"; break; default: break; }; break; /* ffclock_getcounter */ case 241: switch(ndx) { case 0: p = "userland ffcounter *"; break; default: break; }; break; /* ffclock_setestimate */ case 242: switch(ndx) { case 0: p = "userland struct ffclock_estimate *"; break; default: break; }; break; /* ffclock_getestimate */ case 243: switch(ndx) { case 0: p = "userland struct ffclock_estimate *"; break; default: break; }; break; /* clock_nanosleep */ case 244: switch(ndx) { case 0: p = "clockid_t"; break; case 1: p = "int"; break; case 2: p = "userland const struct timespec *"; break; case 3: p = "userland struct timespec *"; break; default: break; }; break; /* clock_getcpuclockid2 */ case 247: switch(ndx) { case 0: p = "id_t"; break; case 1: p = "int"; break; case 2: p = "userland clockid_t *"; break; default: break; }; break; /* ntp_gettime */ case 248: switch(ndx) { case 0: p = "userland struct ntptimeval *"; break; default: break; }; break; /* minherit */ case 250: switch(ndx) { case 0: p = "userland void *"; break; case 1: p = "size_t"; break; case 2: p = "int"; break; default: break; }; break; /* rfork */ case 251: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* issetugid */ case 253: break; /* lchown */ case 254: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; case 2: p = "int"; break; default: break; }; break; /* aio_read */ case 255: switch(ndx) { case 0: p = "userland struct aiocb *"; break; default: break; }; break; /* aio_write */ case 256: switch(ndx) { case 0: p = "userland struct aiocb *"; break; default: break; }; break; /* lio_listio */ case 257: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct aiocb *const *"; break; case 2: p = "int"; break; case 3: p = "userland struct sigevent *"; break; default: break; }; break; /* lchmod */ case 274: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "mode_t"; break; default: break; }; break; /* lchown */ case 275: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "uid_t"; break; case 2: p = "gid_t"; break; default: break; }; break; /* lutimes */ case 276: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "userland struct timeval *"; break; default: break; }; break; /* msync */ case 277: switch(ndx) { case 0: p = "userland void *"; break; case 1: p = "size_t"; break; case 2: p = "int"; break; default: break; }; break; /* preadv */ case 289: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct iovec *"; break; case 2: p = "u_int"; break; case 3: p = "off_t"; break; default: break; }; break; /* pwritev */ case 290: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct iovec *"; break; case 2: p = "u_int"; break; case 3: p = "off_t"; break; default: break; }; break; /* fhopen */ case 298: switch(ndx) { case 0: p = "userland const struct fhandle *"; break; case 1: p = "int"; break; default: break; }; break; /* modnext */ case 300: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* modstat */ case 301: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct module_stat *"; break; default: break; }; break; /* modfnext */ case 302: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* modfind */ case 303: switch(ndx) { case 0: p = "userland const char *"; break; default: break; }; break; /* kldload */ case 304: switch(ndx) { case 0: p = "userland const char *"; break; default: break; }; break; /* kldunload */ case 305: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* kldfind */ case 306: switch(ndx) { case 0: p = "userland const char *"; break; default: break; }; break; /* kldnext */ case 307: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* kldstat */ case 308: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct kld_file_stat *"; break; default: break; }; break; /* kldfirstmod */ case 309: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* getsid */ case 310: switch(ndx) { case 0: p = "pid_t"; break; default: break; }; break; /* setresuid */ case 311: switch(ndx) { case 0: p = "uid_t"; break; case 1: p = "uid_t"; break; case 2: p = "uid_t"; break; default: break; }; break; /* setresgid */ case 312: switch(ndx) { case 0: p = "gid_t"; break; case 1: p = "gid_t"; break; case 2: p = "gid_t"; break; default: break; }; break; /* aio_return */ case 314: switch(ndx) { case 0: p = "userland struct aiocb *"; break; default: break; }; break; /* aio_suspend */ case 315: switch(ndx) { case 0: p = "userland struct aiocb *const *"; break; case 1: p = "int"; break; case 2: p = "userland const struct timespec *"; break; default: break; }; break; /* aio_cancel */ case 316: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct aiocb *"; break; default: break; }; break; /* aio_error */ case 317: switch(ndx) { case 0: p = "userland struct aiocb *"; break; default: break; }; break; /* yield */ case 321: break; /* mlockall */ case 324: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* munlockall */ case 325: break; /* __getcwd */ case 326: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "size_t"; break; default: break; }; break; /* sched_setparam */ case 327: switch(ndx) { case 0: p = "pid_t"; break; case 1: p = "userland const struct sched_param *"; break; default: break; }; break; /* sched_getparam */ case 328: switch(ndx) { case 0: p = "pid_t"; break; case 1: p = "userland struct sched_param *"; break; default: break; }; break; /* sched_setscheduler */ case 329: switch(ndx) { case 0: p = "pid_t"; break; case 1: p = "int"; break; case 2: p = "userland const struct sched_param *"; break; default: break; }; break; /* sched_getscheduler */ case 330: switch(ndx) { case 0: p = "pid_t"; break; default: break; }; break; /* sched_yield */ case 331: break; /* sched_get_priority_max */ case 332: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* sched_get_priority_min */ case 333: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* sched_rr_get_interval */ case 334: switch(ndx) { case 0: p = "pid_t"; break; case 1: p = "userland struct timespec *"; break; default: break; }; break; /* utrace */ case 335: switch(ndx) { case 0: p = "userland const void *"; break; case 1: p = "size_t"; break; default: break; }; break; /* kldsym */ case 337: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "userland void *"; break; default: break; }; break; /* jail */ case 338: switch(ndx) { case 0: p = "userland struct jail *"; break; default: break; }; break; /* nnpfs_syscall */ case 339: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "int"; break; case 3: p = "userland void *"; break; case 4: p = "int"; break; default: break; }; break; /* sigprocmask */ case 340: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland const sigset_t *"; break; case 2: p = "userland sigset_t *"; break; default: break; }; break; /* sigsuspend */ case 341: switch(ndx) { case 0: p = "userland const sigset_t *"; break; default: break; }; break; /* sigpending */ case 343: switch(ndx) { case 0: p = "userland sigset_t *"; break; default: break; }; break; /* sigtimedwait */ case 345: switch(ndx) { case 0: p = "userland const sigset_t *"; break; case 1: p = "userland siginfo_t *"; break; case 2: p = "userland const struct timespec *"; break; default: break; }; break; /* sigwaitinfo */ case 346: switch(ndx) { case 0: p = "userland const sigset_t *"; break; case 1: p = "userland siginfo_t *"; break; default: break; }; break; /* __acl_get_file */ case 347: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "acl_type_t"; break; case 2: p = "userland struct acl *"; break; default: break; }; break; /* __acl_set_file */ case 348: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "acl_type_t"; break; case 2: p = "userland struct acl *"; break; default: break; }; break; /* __acl_get_fd */ case 349: switch(ndx) { case 0: p = "int"; break; case 1: p = "acl_type_t"; break; case 2: p = "userland struct acl *"; break; default: break; }; break; /* __acl_set_fd */ case 350: switch(ndx) { case 0: p = "int"; break; case 1: p = "acl_type_t"; break; case 2: p = "userland struct acl *"; break; default: break; }; break; /* __acl_delete_file */ case 351: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "acl_type_t"; break; default: break; }; break; /* __acl_delete_fd */ case 352: switch(ndx) { case 0: p = "int"; break; case 1: p = "acl_type_t"; break; default: break; }; break; /* __acl_aclcheck_file */ case 353: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "acl_type_t"; break; case 2: p = "userland struct acl *"; break; default: break; }; break; /* __acl_aclcheck_fd */ case 354: switch(ndx) { case 0: p = "int"; break; case 1: p = "acl_type_t"; break; case 2: p = "userland struct acl *"; break; default: break; }; break; /* extattrctl */ case 355: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "userland const char *"; break; case 3: p = "int"; break; case 4: p = "userland const char *"; break; default: break; }; break; /* extattr_set_file */ case 356: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "userland const char *"; break; case 3: p = "userland void *"; break; case 4: p = "size_t"; break; default: break; }; break; /* extattr_get_file */ case 357: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "userland const char *"; break; case 3: p = "userland void *"; break; case 4: p = "size_t"; break; default: break; }; break; /* extattr_delete_file */ case 358: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "userland const char *"; break; default: break; }; break; /* aio_waitcomplete */ case 359: switch(ndx) { case 0: p = "userland struct aiocb **"; break; case 1: p = "userland struct timespec *"; break; default: break; }; break; /* getresuid */ case 360: switch(ndx) { case 0: p = "userland uid_t *"; break; case 1: p = "userland uid_t *"; break; case 2: p = "userland uid_t *"; break; default: break; }; break; /* getresgid */ case 361: switch(ndx) { case 0: p = "userland gid_t *"; break; case 1: p = "userland gid_t *"; break; case 2: p = "userland gid_t *"; break; default: break; }; break; /* kqueue */ case 362: break; /* extattr_set_fd */ case 371: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "userland const char *"; break; case 3: p = "userland void *"; break; case 4: p = "size_t"; break; default: break; }; break; /* extattr_get_fd */ case 372: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "userland const char *"; break; case 3: p = "userland void *"; break; case 4: p = "size_t"; break; default: break; }; break; /* extattr_delete_fd */ case 373: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "userland const char *"; break; default: break; }; break; /* __setugid */ case 374: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* eaccess */ case 376: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; default: break; }; break; /* afs3_syscall */ case 377: switch(ndx) { case 0: p = "long"; break; case 1: p = "long"; break; case 2: p = "long"; break; case 3: p = "long"; break; case 4: p = "long"; break; case 5: p = "long"; break; case 6: p = "long"; break; default: break; }; break; /* nmount */ case 378: switch(ndx) { case 0: p = "userland struct iovec *"; break; case 1: p = "unsigned int"; break; case 2: p = "int"; break; default: break; }; break; /* __mac_get_proc */ case 384: switch(ndx) { case 0: p = "userland struct mac *"; break; default: break; }; break; /* __mac_set_proc */ case 385: switch(ndx) { case 0: p = "userland struct mac *"; break; default: break; }; break; /* __mac_get_fd */ case 386: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct mac *"; break; default: break; }; break; /* __mac_get_file */ case 387: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "userland struct mac *"; break; default: break; }; break; /* __mac_set_fd */ case 388: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct mac *"; break; default: break; }; break; /* __mac_set_file */ case 389: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "userland struct mac *"; break; default: break; }; break; /* kenv */ case 390: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland const char *"; break; case 2: p = "userland char *"; break; case 3: p = "int"; break; default: break; }; break; /* lchflags */ case 391: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "u_long"; break; default: break; }; break; /* uuidgen */ case 392: switch(ndx) { case 0: p = "userland struct uuid *"; break; case 1: p = "int"; break; default: break; }; break; /* sendfile */ case 393: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "off_t"; break; case 3: p = "size_t"; break; case 4: p = "userland struct sf_hdtr *"; break; case 5: p = "userland off_t *"; break; case 6: p = "int"; break; default: break; }; break; /* mac_syscall */ case 394: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "userland void *"; break; default: break; }; break; /* ksem_close */ case 400: switch(ndx) { case 0: p = "semid_t"; break; default: break; }; break; /* ksem_post */ case 401: switch(ndx) { case 0: p = "semid_t"; break; default: break; }; break; /* ksem_wait */ case 402: switch(ndx) { case 0: p = "semid_t"; break; default: break; }; break; /* ksem_trywait */ case 403: switch(ndx) { case 0: p = "semid_t"; break; default: break; }; break; /* ksem_init */ case 404: switch(ndx) { case 0: p = "userland semid_t *"; break; case 1: p = "unsigned int"; break; default: break; }; break; /* ksem_open */ case 405: switch(ndx) { case 0: p = "userland semid_t *"; break; case 1: p = "userland const char *"; break; case 2: p = "int"; break; case 3: p = "mode_t"; break; case 4: p = "unsigned int"; break; default: break; }; break; /* ksem_unlink */ case 406: switch(ndx) { case 0: p = "userland const char *"; break; default: break; }; break; /* ksem_getvalue */ case 407: switch(ndx) { case 0: p = "semid_t"; break; case 1: p = "userland int *"; break; default: break; }; break; /* ksem_destroy */ case 408: switch(ndx) { case 0: p = "semid_t"; break; default: break; }; break; /* __mac_get_pid */ case 409: switch(ndx) { case 0: p = "pid_t"; break; case 1: p = "userland struct mac *"; break; default: break; }; break; /* __mac_get_link */ case 410: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "userland struct mac *"; break; default: break; }; break; /* __mac_set_link */ case 411: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "userland struct mac *"; break; default: break; }; break; /* extattr_set_link */ case 412: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "userland const char *"; break; case 3: p = "userland void *"; break; case 4: p = "size_t"; break; default: break; }; break; /* extattr_get_link */ case 413: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "userland const char *"; break; case 3: p = "userland void *"; break; case 4: p = "size_t"; break; default: break; }; break; /* extattr_delete_link */ case 414: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "userland const char *"; break; default: break; }; break; /* __mac_execve */ case 415: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "userland char **"; break; case 2: p = "userland char **"; break; case 3: p = "userland struct mac *"; break; default: break; }; break; /* sigaction */ case 416: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland const struct sigaction *"; break; case 2: p = "userland struct sigaction *"; break; default: break; }; break; /* sigreturn */ case 417: switch(ndx) { case 0: p = "userland const struct __ucontext *"; break; default: break; }; break; /* getcontext */ case 421: switch(ndx) { case 0: p = "userland struct __ucontext *"; break; default: break; }; break; /* setcontext */ case 422: switch(ndx) { case 0: p = "userland const struct __ucontext *"; break; default: break; }; break; /* swapcontext */ case 423: switch(ndx) { case 0: p = "userland struct __ucontext *"; break; case 1: p = "userland const struct __ucontext *"; break; default: break; }; break; /* swapoff */ case 424: switch(ndx) { case 0: p = "userland const char *"; break; default: break; }; break; /* __acl_get_link */ case 425: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "acl_type_t"; break; case 2: p = "userland struct acl *"; break; default: break; }; break; /* __acl_set_link */ case 426: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "acl_type_t"; break; case 2: p = "userland struct acl *"; break; default: break; }; break; /* __acl_delete_link */ case 427: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "acl_type_t"; break; default: break; }; break; /* __acl_aclcheck_link */ case 428: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "acl_type_t"; break; case 2: p = "userland struct acl *"; break; default: break; }; break; /* sigwait */ case 429: switch(ndx) { case 0: p = "userland const sigset_t *"; break; case 1: p = "userland int *"; break; default: break; }; break; /* thr_create */ case 430: switch(ndx) { case 0: p = "userland ucontext_t *"; break; case 1: p = "userland long *"; break; case 2: p = "int"; break; default: break; }; break; /* thr_exit */ case 431: switch(ndx) { case 0: p = "userland long *"; break; default: break; }; break; /* thr_self */ case 432: switch(ndx) { case 0: p = "userland long *"; break; default: break; }; break; /* thr_kill */ case 433: switch(ndx) { case 0: p = "long"; break; case 1: p = "int"; break; default: break; }; break; /* jail_attach */ case 436: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* extattr_list_fd */ case 437: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "userland void *"; break; case 3: p = "size_t"; break; default: break; }; break; /* extattr_list_file */ case 438: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "userland void *"; break; case 3: p = "size_t"; break; default: break; }; break; /* extattr_list_link */ case 439: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "userland void *"; break; case 3: p = "size_t"; break; default: break; }; break; /* ksem_timedwait */ case 441: switch(ndx) { case 0: p = "semid_t"; break; case 1: p = "userland const struct timespec *"; break; default: break; }; break; /* thr_suspend */ case 442: switch(ndx) { case 0: p = "userland const struct timespec *"; break; default: break; }; break; /* thr_wake */ case 443: switch(ndx) { case 0: p = "long"; break; default: break; }; break; /* kldunloadf */ case 444: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; default: break; }; break; /* audit */ case 445: switch(ndx) { case 0: p = "userland const void *"; break; case 1: p = "u_int"; break; default: break; }; break; /* auditon */ case 446: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland void *"; break; case 2: p = "u_int"; break; default: break; }; break; /* getauid */ case 447: switch(ndx) { case 0: p = "userland uid_t *"; break; default: break; }; break; /* setauid */ case 448: switch(ndx) { case 0: p = "userland uid_t *"; break; default: break; }; break; /* getaudit */ case 449: switch(ndx) { case 0: p = "userland struct auditinfo *"; break; default: break; }; break; /* setaudit */ case 450: switch(ndx) { case 0: p = "userland struct auditinfo *"; break; default: break; }; break; /* getaudit_addr */ case 451: switch(ndx) { case 0: p = "userland struct auditinfo_addr *"; break; case 1: p = "u_int"; break; default: break; }; break; /* setaudit_addr */ case 452: switch(ndx) { case 0: p = "userland struct auditinfo_addr *"; break; case 1: p = "u_int"; break; default: break; }; break; /* auditctl */ case 453: switch(ndx) { case 0: p = "userland char *"; break; default: break; }; break; /* _umtx_op */ case 454: switch(ndx) { case 0: p = "userland void *"; break; case 1: p = "int"; break; case 2: p = "u_long"; break; case 3: p = "userland void *"; break; case 4: p = "userland void *"; break; default: break; }; break; /* thr_new */ case 455: switch(ndx) { case 0: p = "userland struct thr_param *"; break; case 1: p = "int"; break; default: break; }; break; /* sigqueue */ case 456: switch(ndx) { case 0: p = "pid_t"; break; case 1: p = "int"; break; case 2: p = "userland void *"; break; default: break; }; break; /* kmq_open */ case 457: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "mode_t"; break; case 3: p = "userland const struct mq_attr *"; break; default: break; }; break; /* kmq_setattr */ case 458: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland const struct mq_attr *"; break; case 2: p = "userland struct mq_attr *"; break; default: break; }; break; /* kmq_timedreceive */ case 459: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "size_t"; break; case 3: p = "userland unsigned *"; break; case 4: p = "userland const struct timespec *"; break; default: break; }; break; /* kmq_timedsend */ case 460: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland const char *"; break; case 2: p = "size_t"; break; case 3: p = "unsigned"; break; case 4: p = "userland const struct timespec *"; break; default: break; }; break; /* kmq_notify */ case 461: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland const struct sigevent *"; break; default: break; }; break; /* kmq_unlink */ case 462: switch(ndx) { case 0: p = "userland const char *"; break; default: break; }; break; /* abort2 */ case 463: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "userland void **"; break; default: break; }; break; /* thr_set_name */ case 464: switch(ndx) { case 0: p = "long"; break; case 1: p = "userland const char *"; break; default: break; }; break; /* aio_fsync */ case 465: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct aiocb *"; break; default: break; }; break; /* rtprio_thread */ case 466: switch(ndx) { case 0: p = "int"; break; case 1: p = "lwpid_t"; break; case 2: p = "userland struct rtprio *"; break; default: break; }; break; /* sctp_peeloff */ case 471: switch(ndx) { case 0: p = "int"; break; case 1: p = "uint32_t"; break; default: break; }; break; /* sctp_generic_sendmsg */ case 472: switch(ndx) { case 0: p = "int"; break; case 1: p = "caddr_t"; break; case 2: p = "int"; break; case 3: p = "caddr_t"; break; case 4: p = "__socklen_t"; break; case 5: p = "userland struct sctp_sndrcvinfo *"; break; case 6: p = "int"; break; default: break; }; break; /* sctp_generic_sendmsg_iov */ case 473: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct iovec *"; break; case 2: p = "int"; break; case 3: p = "caddr_t"; break; case 4: p = "__socklen_t"; break; case 5: p = "userland struct sctp_sndrcvinfo *"; break; case 6: p = "int"; break; default: break; }; break; /* sctp_generic_recvmsg */ case 474: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct iovec *"; break; case 2: p = "int"; break; case 3: p = "userland struct sockaddr *"; break; case 4: p = "userland __socklen_t *"; break; case 5: p = "userland struct sctp_sndrcvinfo *"; break; case 6: p = "userland int *"; break; default: break; }; break; /* pread */ case 475: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland void *"; break; case 2: p = "size_t"; break; case 3: p = "off_t"; break; default: break; }; break; /* pwrite */ case 476: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland const void *"; break; case 2: p = "size_t"; break; case 3: p = "off_t"; break; default: break; }; break; /* mmap */ case 477: switch(ndx) { case 0: p = "caddr_t"; break; case 1: p = "size_t"; break; case 2: p = "int"; break; case 3: p = "int"; break; case 4: p = "int"; break; case 5: p = "off_t"; break; default: break; }; break; /* lseek */ case 478: switch(ndx) { case 0: p = "int"; break; case 1: p = "off_t"; break; case 2: p = "int"; break; default: break; }; break; /* truncate */ case 479: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "off_t"; break; default: break; }; break; /* ftruncate */ case 480: switch(ndx) { case 0: p = "int"; break; case 1: p = "off_t"; break; default: break; }; break; /* thr_kill2 */ case 481: switch(ndx) { case 0: p = "pid_t"; break; case 1: p = "long"; break; case 2: p = "int"; break; default: break; }; break; /* shm_open */ case 482: switch(ndx) { case 0: p = "userland const char *"; break; case 1: p = "int"; break; case 2: p = "mode_t"; break; default: break; }; break; /* shm_unlink */ case 483: switch(ndx) { case 0: p = "userland const char *"; break; default: break; }; break; /* cpuset */ case 484: switch(ndx) { case 0: p = "userland cpusetid_t *"; break; default: break; }; break; /* cpuset_setid */ case 485: switch(ndx) { case 0: p = "cpuwhich_t"; break; case 1: p = "id_t"; break; case 2: p = "cpusetid_t"; break; default: break; }; break; /* cpuset_getid */ case 486: switch(ndx) { case 0: p = "cpulevel_t"; break; case 1: p = "cpuwhich_t"; break; case 2: p = "id_t"; break; case 3: p = "userland cpusetid_t *"; break; default: break; }; break; /* cpuset_getaffinity */ case 487: switch(ndx) { case 0: p = "cpulevel_t"; break; case 1: p = "cpuwhich_t"; break; case 2: p = "id_t"; break; case 3: p = "size_t"; break; case 4: p = "userland cpuset_t *"; break; default: break; }; break; /* cpuset_setaffinity */ case 488: switch(ndx) { case 0: p = "cpulevel_t"; break; case 1: p = "cpuwhich_t"; break; case 2: p = "id_t"; break; case 3: p = "size_t"; break; case 4: p = "userland const cpuset_t *"; break; default: break; }; break; /* faccessat */ case 489: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "int"; break; case 3: p = "int"; break; default: break; }; break; /* fchmodat */ case 490: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "mode_t"; break; case 3: p = "int"; break; default: break; }; break; /* fchownat */ case 491: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "uid_t"; break; case 3: p = "gid_t"; break; case 4: p = "int"; break; default: break; }; break; /* fexecve */ case 492: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char **"; break; case 2: p = "userland char **"; break; default: break; }; break; /* futimesat */ case 494: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "userland struct timeval *"; break; default: break; }; break; /* linkat */ case 495: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "int"; break; case 3: p = "userland char *"; break; case 4: p = "int"; break; default: break; }; break; /* mkdirat */ case 496: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "mode_t"; break; default: break; }; break; /* mkfifoat */ case 497: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "mode_t"; break; default: break; }; break; /* openat */ case 499: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "int"; break; case 3: p = "mode_t"; break; default: break; }; break; /* readlinkat */ case 500: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "userland char *"; break; case 3: p = "size_t"; break; default: break; }; break; /* renameat */ case 501: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "int"; break; case 3: p = "userland char *"; break; default: break; }; break; /* symlinkat */ case 502: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; case 2: p = "userland char *"; break; default: break; }; break; /* unlinkat */ case 503: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "int"; break; default: break; }; break; /* posix_openpt */ case 504: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* gssd_syscall */ case 505: switch(ndx) { case 0: p = "userland char *"; break; default: break; }; break; /* jail_get */ case 506: switch(ndx) { case 0: p = "userland struct iovec *"; break; case 1: p = "unsigned int"; break; case 2: p = "int"; break; default: break; }; break; /* jail_set */ case 507: switch(ndx) { case 0: p = "userland struct iovec *"; break; case 1: p = "unsigned int"; break; case 2: p = "int"; break; default: break; }; break; /* jail_remove */ case 508: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* closefrom */ case 509: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* __semctl */ case 510: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "int"; break; case 3: p = "userland union semun *"; break; default: break; }; break; /* msgctl */ case 511: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "userland struct msqid_ds *"; break; default: break; }; break; /* shmctl */ case 512: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "userland struct shmid_ds *"; break; default: break; }; break; /* lpathconf */ case 513: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "int"; break; default: break; }; break; /* __cap_rights_get */ case 515: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "userland cap_rights_t *"; break; default: break; }; break; /* cap_enter */ case 516: break; /* cap_getmode */ case 517: switch(ndx) { case 0: p = "userland u_int *"; break; default: break; }; break; /* pdfork */ case 518: switch(ndx) { case 0: p = "userland int *"; break; case 1: p = "int"; break; default: break; }; break; /* pdkill */ case 519: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; default: break; }; break; /* pdgetpid */ case 520: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland pid_t *"; break; default: break; }; break; /* pselect */ case 522: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland fd_set *"; break; case 2: p = "userland fd_set *"; break; case 3: p = "userland fd_set *"; break; case 4: p = "userland const struct timespec *"; break; case 5: p = "userland const sigset_t *"; break; default: break; }; break; /* getloginclass */ case 523: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "size_t"; break; default: break; }; break; /* setloginclass */ case 524: switch(ndx) { case 0: p = "userland const char *"; break; default: break; }; break; /* rctl_get_racct */ case 525: switch(ndx) { case 0: p = "userland const void *"; break; case 1: p = "size_t"; break; case 2: p = "userland void *"; break; case 3: p = "size_t"; break; default: break; }; break; /* rctl_get_rules */ case 526: switch(ndx) { case 0: p = "userland const void *"; break; case 1: p = "size_t"; break; case 2: p = "userland void *"; break; case 3: p = "size_t"; break; default: break; }; break; /* rctl_get_limits */ case 527: switch(ndx) { case 0: p = "userland const void *"; break; case 1: p = "size_t"; break; case 2: p = "userland void *"; break; case 3: p = "size_t"; break; default: break; }; break; /* rctl_add_rule */ case 528: switch(ndx) { case 0: p = "userland const void *"; break; case 1: p = "size_t"; break; case 2: p = "userland void *"; break; case 3: p = "size_t"; break; default: break; }; break; /* rctl_remove_rule */ case 529: switch(ndx) { case 0: p = "userland const void *"; break; case 1: p = "size_t"; break; case 2: p = "userland void *"; break; case 3: p = "size_t"; break; default: break; }; break; /* posix_fallocate */ case 530: switch(ndx) { case 0: p = "int"; break; case 1: p = "off_t"; break; case 2: p = "off_t"; break; default: break; }; break; /* posix_fadvise */ case 531: switch(ndx) { case 0: p = "int"; break; case 1: p = "off_t"; break; case 2: p = "off_t"; break; case 3: p = "int"; break; default: break; }; break; /* wait6 */ case 532: switch(ndx) { case 0: p = "idtype_t"; break; case 1: p = "id_t"; break; case 2: p = "userland int *"; break; case 3: p = "int"; break; case 4: p = "userland struct __wrusage *"; break; case 5: p = "userland siginfo_t *"; break; default: break; }; break; /* cap_rights_limit */ case 533: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland cap_rights_t *"; break; default: break; }; break; /* cap_ioctls_limit */ case 534: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland const u_long *"; break; case 2: p = "size_t"; break; default: break; }; break; /* cap_ioctls_get */ case 535: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland u_long *"; break; case 2: p = "size_t"; break; default: break; }; break; /* cap_fcntls_limit */ case 536: switch(ndx) { case 0: p = "int"; break; case 1: p = "uint32_t"; break; default: break; }; break; /* cap_fcntls_get */ case 537: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland uint32_t *"; break; default: break; }; break; /* bindat */ case 538: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "caddr_t"; break; case 3: p = "int"; break; default: break; }; break; /* connectat */ case 539: switch(ndx) { case 0: p = "int"; break; case 1: p = "int"; break; case 2: p = "caddr_t"; break; case 3: p = "int"; break; default: break; }; break; /* chflagsat */ case 540: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland const char *"; break; case 2: p = "u_long"; break; case 3: p = "int"; break; default: break; }; break; /* accept4 */ case 541: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct sockaddr *"; break; case 2: p = "userland __socklen_t *"; break; case 3: p = "int"; break; default: break; }; break; /* pipe2 */ case 542: switch(ndx) { case 0: p = "userland int *"; break; case 1: p = "int"; break; default: break; }; break; /* aio_mlock */ case 543: switch(ndx) { case 0: p = "userland struct aiocb *"; break; default: break; }; break; /* procctl */ case 544: switch(ndx) { case 0: p = "idtype_t"; break; case 1: p = "id_t"; break; case 2: p = "int"; break; case 3: p = "userland void *"; break; default: break; }; break; /* ppoll */ case 545: switch(ndx) { case 0: p = "userland struct pollfd *"; break; case 1: p = "u_int"; break; case 2: p = "userland const struct timespec *"; break; case 3: p = "userland const sigset_t *"; break; default: break; }; break; /* futimens */ case 546: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct timespec *"; break; default: break; }; break; /* utimensat */ case 547: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "userland struct timespec *"; break; case 3: p = "int"; break; default: break; }; break; /* numa_getaffinity */ case 548: switch(ndx) { case 0: p = "cpuwhich_t"; break; case 1: p = "id_t"; break; case 2: p = "userland struct vm_domain_policy_entry *"; break; default: break; }; break; /* numa_setaffinity */ case 549: switch(ndx) { case 0: p = "cpuwhich_t"; break; case 1: p = "id_t"; break; case 2: p = "userland const struct vm_domain_policy_entry *"; break; default: break; }; break; /* fdatasync */ case 550: switch(ndx) { case 0: p = "int"; break; default: break; }; break; /* fstat */ case 551: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct stat *"; break; default: break; }; break; /* fstatat */ case 552: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "userland struct stat *"; break; case 3: p = "int"; break; default: break; }; break; /* fhstat */ case 553: switch(ndx) { case 0: p = "userland const struct fhandle *"; break; case 1: p = "userland struct stat *"; break; default: break; }; break; /* getdirentries */ case 554: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "size_t"; break; case 3: p = "userland off_t *"; break; default: break; }; break; /* statfs */ case 555: switch(ndx) { case 0: p = "userland char *"; break; case 1: p = "userland struct statfs *"; break; default: break; }; break; /* fstatfs */ case 556: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct statfs *"; break; default: break; }; break; /* getfsstat */ case 557: switch(ndx) { case 0: p = "userland struct statfs *"; break; case 1: p = "long"; break; case 2: p = "int"; break; default: break; }; break; /* fhstatfs */ case 558: switch(ndx) { case 0: p = "userland const struct fhandle *"; break; case 1: p = "userland struct statfs *"; break; default: break; }; break; /* mknodat */ case 559: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland char *"; break; case 2: p = "mode_t"; break; case 3: p = "dev_t"; break; default: break; }; break; /* kevent */ case 560: switch(ndx) { case 0: p = "int"; break; case 1: p = "userland struct kevent *"; break; case 2: p = "int"; break; case 3: p = "userland struct kevent *"; break; case 4: p = "int"; break; case 5: p = "userland const struct timespec *"; break; default: break; }; break; + /* cpuset_getdomain */ + case 561: + switch(ndx) { + case 0: + p = "cpulevel_t"; + break; + case 1: + p = "cpuwhich_t"; + break; + case 2: + p = "id_t"; + break; + case 3: + p = "size_t"; + break; + case 4: + p = "userland domainset_t *"; + break; + case 5: + p = "userland int *"; + break; + default: + break; + }; + break; + /* cpuset_setdomain */ + case 562: + switch(ndx) { + case 0: + p = "cpulevel_t"; + break; + case 1: + p = "cpuwhich_t"; + break; + case 2: + p = "id_t"; + break; + case 3: + p = "size_t"; + break; + case 4: + p = "userland domainset_t *"; + break; + case 5: + p = "int"; + break; + default: + break; + }; + break; default: break; }; if (p != NULL) strlcpy(desc, p, descsz); } static void systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz) { const char *p = NULL; switch (sysnum) { /* nosys */ case 0: /* sys_exit */ case 1: if (ndx == 0 || ndx == 1) p = "void"; break; /* fork */ case 2: /* read */ case 3: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* write */ case 4: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* open */ case 5: if (ndx == 0 || ndx == 1) p = "int"; break; /* close */ case 6: if (ndx == 0 || ndx == 1) p = "int"; break; /* wait4 */ case 7: if (ndx == 0 || ndx == 1) p = "int"; break; /* link */ case 9: if (ndx == 0 || ndx == 1) p = "int"; break; /* unlink */ case 10: if (ndx == 0 || ndx == 1) p = "int"; break; /* chdir */ case 12: if (ndx == 0 || ndx == 1) p = "int"; break; /* fchdir */ case 13: if (ndx == 0 || ndx == 1) p = "int"; break; /* chmod */ case 15: if (ndx == 0 || ndx == 1) p = "int"; break; /* chown */ case 16: if (ndx == 0 || ndx == 1) p = "int"; break; /* obreak */ case 17: if (ndx == 0 || ndx == 1) p = "int"; break; /* getpid */ case 20: /* mount */ case 21: if (ndx == 0 || ndx == 1) p = "int"; break; /* unmount */ case 22: if (ndx == 0 || ndx == 1) p = "int"; break; /* setuid */ case 23: if (ndx == 0 || ndx == 1) p = "int"; break; /* getuid */ case 24: /* geteuid */ case 25: /* ptrace */ case 26: if (ndx == 0 || ndx == 1) p = "int"; break; /* recvmsg */ case 27: if (ndx == 0 || ndx == 1) p = "int"; break; /* sendmsg */ case 28: if (ndx == 0 || ndx == 1) p = "int"; break; /* recvfrom */ case 29: if (ndx == 0 || ndx == 1) p = "int"; break; /* accept */ case 30: if (ndx == 0 || ndx == 1) p = "int"; break; /* getpeername */ case 31: if (ndx == 0 || ndx == 1) p = "int"; break; /* getsockname */ case 32: if (ndx == 0 || ndx == 1) p = "int"; break; /* access */ case 33: if (ndx == 0 || ndx == 1) p = "int"; break; /* chflags */ case 34: if (ndx == 0 || ndx == 1) p = "int"; break; /* fchflags */ case 35: if (ndx == 0 || ndx == 1) p = "int"; break; /* sync */ case 36: /* kill */ case 37: if (ndx == 0 || ndx == 1) p = "int"; break; /* getppid */ case 39: /* dup */ case 41: if (ndx == 0 || ndx == 1) p = "int"; break; /* getegid */ case 43: /* profil */ case 44: if (ndx == 0 || ndx == 1) p = "int"; break; /* ktrace */ case 45: if (ndx == 0 || ndx == 1) p = "int"; break; /* getgid */ case 47: /* getlogin */ case 49: if (ndx == 0 || ndx == 1) p = "int"; break; /* setlogin */ case 50: if (ndx == 0 || ndx == 1) p = "int"; break; /* acct */ case 51: if (ndx == 0 || ndx == 1) p = "int"; break; /* sigaltstack */ case 53: if (ndx == 0 || ndx == 1) p = "int"; break; /* ioctl */ case 54: if (ndx == 0 || ndx == 1) p = "int"; break; /* reboot */ case 55: if (ndx == 0 || ndx == 1) p = "int"; break; /* revoke */ case 56: if (ndx == 0 || ndx == 1) p = "int"; break; /* symlink */ case 57: if (ndx == 0 || ndx == 1) p = "int"; break; /* readlink */ case 58: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* execve */ case 59: if (ndx == 0 || ndx == 1) p = "int"; break; /* umask */ case 60: if (ndx == 0 || ndx == 1) p = "int"; break; /* chroot */ case 61: if (ndx == 0 || ndx == 1) p = "int"; break; /* msync */ case 65: if (ndx == 0 || ndx == 1) p = "int"; break; /* vfork */ case 66: /* sbrk */ case 69: if (ndx == 0 || ndx == 1) p = "int"; break; /* sstk */ case 70: if (ndx == 0 || ndx == 1) p = "int"; break; /* ovadvise */ case 72: if (ndx == 0 || ndx == 1) p = "int"; break; /* munmap */ case 73: if (ndx == 0 || ndx == 1) p = "int"; break; /* mprotect */ case 74: if (ndx == 0 || ndx == 1) p = "int"; break; /* madvise */ case 75: if (ndx == 0 || ndx == 1) p = "int"; break; /* mincore */ case 78: if (ndx == 0 || ndx == 1) p = "int"; break; /* getgroups */ case 79: if (ndx == 0 || ndx == 1) p = "int"; break; /* setgroups */ case 80: if (ndx == 0 || ndx == 1) p = "int"; break; /* getpgrp */ case 81: /* setpgid */ case 82: if (ndx == 0 || ndx == 1) p = "int"; break; /* setitimer */ case 83: if (ndx == 0 || ndx == 1) p = "int"; break; /* swapon */ case 85: if (ndx == 0 || ndx == 1) p = "int"; break; /* getitimer */ case 86: if (ndx == 0 || ndx == 1) p = "int"; break; /* getdtablesize */ case 89: /* dup2 */ case 90: if (ndx == 0 || ndx == 1) p = "int"; break; /* fcntl */ case 92: if (ndx == 0 || ndx == 1) p = "int"; break; /* select */ case 93: if (ndx == 0 || ndx == 1) p = "int"; break; /* fsync */ case 95: if (ndx == 0 || ndx == 1) p = "int"; break; /* setpriority */ case 96: if (ndx == 0 || ndx == 1) p = "int"; break; /* socket */ case 97: if (ndx == 0 || ndx == 1) p = "int"; break; /* connect */ case 98: if (ndx == 0 || ndx == 1) p = "int"; break; /* getpriority */ case 100: if (ndx == 0 || ndx == 1) p = "int"; break; /* bind */ case 104: if (ndx == 0 || ndx == 1) p = "int"; break; /* setsockopt */ case 105: if (ndx == 0 || ndx == 1) p = "int"; break; /* listen */ case 106: if (ndx == 0 || ndx == 1) p = "int"; break; /* gettimeofday */ case 116: if (ndx == 0 || ndx == 1) p = "int"; break; /* getrusage */ case 117: if (ndx == 0 || ndx == 1) p = "int"; break; /* getsockopt */ case 118: if (ndx == 0 || ndx == 1) p = "int"; break; /* readv */ case 120: if (ndx == 0 || ndx == 1) p = "int"; break; /* writev */ case 121: if (ndx == 0 || ndx == 1) p = "int"; break; /* settimeofday */ case 122: if (ndx == 0 || ndx == 1) p = "int"; break; /* fchown */ case 123: if (ndx == 0 || ndx == 1) p = "int"; break; /* fchmod */ case 124: if (ndx == 0 || ndx == 1) p = "int"; break; /* setreuid */ case 126: if (ndx == 0 || ndx == 1) p = "int"; break; /* setregid */ case 127: if (ndx == 0 || ndx == 1) p = "int"; break; /* rename */ case 128: if (ndx == 0 || ndx == 1) p = "int"; break; /* flock */ case 131: if (ndx == 0 || ndx == 1) p = "int"; break; /* mkfifo */ case 132: if (ndx == 0 || ndx == 1) p = "int"; break; /* sendto */ case 133: if (ndx == 0 || ndx == 1) p = "int"; break; /* shutdown */ case 134: if (ndx == 0 || ndx == 1) p = "int"; break; /* socketpair */ case 135: if (ndx == 0 || ndx == 1) p = "int"; break; /* mkdir */ case 136: if (ndx == 0 || ndx == 1) p = "int"; break; /* rmdir */ case 137: if (ndx == 0 || ndx == 1) p = "int"; break; /* utimes */ case 138: if (ndx == 0 || ndx == 1) p = "int"; break; /* adjtime */ case 140: if (ndx == 0 || ndx == 1) p = "int"; break; /* setsid */ case 147: /* quotactl */ case 148: if (ndx == 0 || ndx == 1) p = "int"; break; /* nlm_syscall */ case 154: if (ndx == 0 || ndx == 1) p = "int"; break; /* nfssvc */ case 155: if (ndx == 0 || ndx == 1) p = "int"; break; /* lgetfh */ case 160: if (ndx == 0 || ndx == 1) p = "int"; break; /* getfh */ case 161: if (ndx == 0 || ndx == 1) p = "int"; break; /* sysarch */ case 165: if (ndx == 0 || ndx == 1) p = "int"; break; /* rtprio */ case 166: if (ndx == 0 || ndx == 1) p = "int"; break; /* semsys */ case 169: if (ndx == 0 || ndx == 1) p = "int"; break; /* msgsys */ case 170: if (ndx == 0 || ndx == 1) p = "int"; break; /* shmsys */ case 171: if (ndx == 0 || ndx == 1) p = "int"; break; /* setfib */ case 175: if (ndx == 0 || ndx == 1) p = "int"; break; /* ntp_adjtime */ case 176: if (ndx == 0 || ndx == 1) p = "int"; break; /* setgid */ case 181: if (ndx == 0 || ndx == 1) p = "int"; break; /* setegid */ case 182: if (ndx == 0 || ndx == 1) p = "int"; break; /* seteuid */ case 183: if (ndx == 0 || ndx == 1) p = "int"; break; /* pathconf */ case 191: if (ndx == 0 || ndx == 1) p = "int"; break; /* fpathconf */ case 192: if (ndx == 0 || ndx == 1) p = "int"; break; /* getrlimit */ case 194: if (ndx == 0 || ndx == 1) p = "int"; break; /* setrlimit */ case 195: if (ndx == 0 || ndx == 1) p = "int"; break; /* nosys */ case 198: /* __sysctl */ case 202: if (ndx == 0 || ndx == 1) p = "int"; break; /* mlock */ case 203: if (ndx == 0 || ndx == 1) p = "int"; break; /* munlock */ case 204: if (ndx == 0 || ndx == 1) p = "int"; break; /* undelete */ case 205: if (ndx == 0 || ndx == 1) p = "int"; break; /* futimes */ case 206: if (ndx == 0 || ndx == 1) p = "int"; break; /* getpgid */ case 207: if (ndx == 0 || ndx == 1) p = "int"; break; /* poll */ case 209: if (ndx == 0 || ndx == 1) p = "int"; break; /* lkmnosys */ case 210: /* lkmnosys */ case 211: /* lkmnosys */ case 212: /* lkmnosys */ case 213: /* lkmnosys */ case 214: /* lkmnosys */ case 215: /* lkmnosys */ case 216: /* lkmnosys */ case 217: /* lkmnosys */ case 218: /* lkmnosys */ case 219: /* semget */ case 221: if (ndx == 0 || ndx == 1) p = "int"; break; /* semop */ case 222: if (ndx == 0 || ndx == 1) p = "int"; break; /* msgget */ case 225: if (ndx == 0 || ndx == 1) p = "int"; break; /* msgsnd */ case 226: if (ndx == 0 || ndx == 1) p = "int"; break; /* msgrcv */ case 227: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* shmat */ case 228: if (ndx == 0 || ndx == 1) p = "int"; break; /* shmdt */ case 230: if (ndx == 0 || ndx == 1) p = "int"; break; /* shmget */ case 231: if (ndx == 0 || ndx == 1) p = "int"; break; /* clock_gettime */ case 232: if (ndx == 0 || ndx == 1) p = "int"; break; /* clock_settime */ case 233: if (ndx == 0 || ndx == 1) p = "int"; break; /* clock_getres */ case 234: if (ndx == 0 || ndx == 1) p = "int"; break; /* ktimer_create */ case 235: if (ndx == 0 || ndx == 1) p = "int"; break; /* ktimer_delete */ case 236: if (ndx == 0 || ndx == 1) p = "int"; break; /* ktimer_settime */ case 237: if (ndx == 0 || ndx == 1) p = "int"; break; /* ktimer_gettime */ case 238: if (ndx == 0 || ndx == 1) p = "int"; break; /* ktimer_getoverrun */ case 239: if (ndx == 0 || ndx == 1) p = "int"; break; /* nanosleep */ case 240: if (ndx == 0 || ndx == 1) p = "int"; break; /* ffclock_getcounter */ case 241: if (ndx == 0 || ndx == 1) p = "int"; break; /* ffclock_setestimate */ case 242: if (ndx == 0 || ndx == 1) p = "int"; break; /* ffclock_getestimate */ case 243: if (ndx == 0 || ndx == 1) p = "int"; break; /* clock_nanosleep */ case 244: if (ndx == 0 || ndx == 1) p = "int"; break; /* clock_getcpuclockid2 */ case 247: if (ndx == 0 || ndx == 1) p = "int"; break; /* ntp_gettime */ case 248: if (ndx == 0 || ndx == 1) p = "int"; break; /* minherit */ case 250: if (ndx == 0 || ndx == 1) p = "int"; break; /* rfork */ case 251: if (ndx == 0 || ndx == 1) p = "int"; break; /* issetugid */ case 253: /* lchown */ case 254: if (ndx == 0 || ndx == 1) p = "int"; break; /* aio_read */ case 255: if (ndx == 0 || ndx == 1) p = "int"; break; /* aio_write */ case 256: if (ndx == 0 || ndx == 1) p = "int"; break; /* lio_listio */ case 257: if (ndx == 0 || ndx == 1) p = "int"; break; /* lchmod */ case 274: if (ndx == 0 || ndx == 1) p = "int"; break; /* lchown */ case 275: if (ndx == 0 || ndx == 1) p = "int"; break; /* lutimes */ case 276: if (ndx == 0 || ndx == 1) p = "int"; break; /* msync */ case 277: if (ndx == 0 || ndx == 1) p = "int"; break; /* preadv */ case 289: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* pwritev */ case 290: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* fhopen */ case 298: if (ndx == 0 || ndx == 1) p = "int"; break; /* modnext */ case 300: if (ndx == 0 || ndx == 1) p = "int"; break; /* modstat */ case 301: if (ndx == 0 || ndx == 1) p = "int"; break; /* modfnext */ case 302: if (ndx == 0 || ndx == 1) p = "int"; break; /* modfind */ case 303: if (ndx == 0 || ndx == 1) p = "int"; break; /* kldload */ case 304: if (ndx == 0 || ndx == 1) p = "int"; break; /* kldunload */ case 305: if (ndx == 0 || ndx == 1) p = "int"; break; /* kldfind */ case 306: if (ndx == 0 || ndx == 1) p = "int"; break; /* kldnext */ case 307: if (ndx == 0 || ndx == 1) p = "int"; break; /* kldstat */ case 308: if (ndx == 0 || ndx == 1) p = "int"; break; /* kldfirstmod */ case 309: if (ndx == 0 || ndx == 1) p = "int"; break; /* getsid */ case 310: if (ndx == 0 || ndx == 1) p = "int"; break; /* setresuid */ case 311: if (ndx == 0 || ndx == 1) p = "int"; break; /* setresgid */ case 312: if (ndx == 0 || ndx == 1) p = "int"; break; /* aio_return */ case 314: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* aio_suspend */ case 315: if (ndx == 0 || ndx == 1) p = "int"; break; /* aio_cancel */ case 316: if (ndx == 0 || ndx == 1) p = "int"; break; /* aio_error */ case 317: if (ndx == 0 || ndx == 1) p = "int"; break; /* yield */ case 321: /* mlockall */ case 324: if (ndx == 0 || ndx == 1) p = "int"; break; /* munlockall */ case 325: /* __getcwd */ case 326: if (ndx == 0 || ndx == 1) p = "int"; break; /* sched_setparam */ case 327: if (ndx == 0 || ndx == 1) p = "int"; break; /* sched_getparam */ case 328: if (ndx == 0 || ndx == 1) p = "int"; break; /* sched_setscheduler */ case 329: if (ndx == 0 || ndx == 1) p = "int"; break; /* sched_getscheduler */ case 330: if (ndx == 0 || ndx == 1) p = "int"; break; /* sched_yield */ case 331: /* sched_get_priority_max */ case 332: if (ndx == 0 || ndx == 1) p = "int"; break; /* sched_get_priority_min */ case 333: if (ndx == 0 || ndx == 1) p = "int"; break; /* sched_rr_get_interval */ case 334: if (ndx == 0 || ndx == 1) p = "int"; break; /* utrace */ case 335: if (ndx == 0 || ndx == 1) p = "int"; break; /* kldsym */ case 337: if (ndx == 0 || ndx == 1) p = "int"; break; /* jail */ case 338: if (ndx == 0 || ndx == 1) p = "int"; break; /* nnpfs_syscall */ case 339: if (ndx == 0 || ndx == 1) p = "int"; break; /* sigprocmask */ case 340: if (ndx == 0 || ndx == 1) p = "int"; break; /* sigsuspend */ case 341: if (ndx == 0 || ndx == 1) p = "int"; break; /* sigpending */ case 343: if (ndx == 0 || ndx == 1) p = "int"; break; /* sigtimedwait */ case 345: if (ndx == 0 || ndx == 1) p = "int"; break; /* sigwaitinfo */ case 346: if (ndx == 0 || ndx == 1) p = "int"; break; /* __acl_get_file */ case 347: if (ndx == 0 || ndx == 1) p = "int"; break; /* __acl_set_file */ case 348: if (ndx == 0 || ndx == 1) p = "int"; break; /* __acl_get_fd */ case 349: if (ndx == 0 || ndx == 1) p = "int"; break; /* __acl_set_fd */ case 350: if (ndx == 0 || ndx == 1) p = "int"; break; /* __acl_delete_file */ case 351: if (ndx == 0 || ndx == 1) p = "int"; break; /* __acl_delete_fd */ case 352: if (ndx == 0 || ndx == 1) p = "int"; break; /* __acl_aclcheck_file */ case 353: if (ndx == 0 || ndx == 1) p = "int"; break; /* __acl_aclcheck_fd */ case 354: if (ndx == 0 || ndx == 1) p = "int"; break; /* extattrctl */ case 355: if (ndx == 0 || ndx == 1) p = "int"; break; /* extattr_set_file */ case 356: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* extattr_get_file */ case 357: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* extattr_delete_file */ case 358: if (ndx == 0 || ndx == 1) p = "int"; break; /* aio_waitcomplete */ case 359: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* getresuid */ case 360: if (ndx == 0 || ndx == 1) p = "int"; break; /* getresgid */ case 361: if (ndx == 0 || ndx == 1) p = "int"; break; /* kqueue */ case 362: /* extattr_set_fd */ case 371: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* extattr_get_fd */ case 372: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* extattr_delete_fd */ case 373: if (ndx == 0 || ndx == 1) p = "int"; break; /* __setugid */ case 374: if (ndx == 0 || ndx == 1) p = "int"; break; /* eaccess */ case 376: if (ndx == 0 || ndx == 1) p = "int"; break; /* afs3_syscall */ case 377: if (ndx == 0 || ndx == 1) p = "int"; break; /* nmount */ case 378: if (ndx == 0 || ndx == 1) p = "int"; break; /* __mac_get_proc */ case 384: if (ndx == 0 || ndx == 1) p = "int"; break; /* __mac_set_proc */ case 385: if (ndx == 0 || ndx == 1) p = "int"; break; /* __mac_get_fd */ case 386: if (ndx == 0 || ndx == 1) p = "int"; break; /* __mac_get_file */ case 387: if (ndx == 0 || ndx == 1) p = "int"; break; /* __mac_set_fd */ case 388: if (ndx == 0 || ndx == 1) p = "int"; break; /* __mac_set_file */ case 389: if (ndx == 0 || ndx == 1) p = "int"; break; /* kenv */ case 390: if (ndx == 0 || ndx == 1) p = "int"; break; /* lchflags */ case 391: if (ndx == 0 || ndx == 1) p = "int"; break; /* uuidgen */ case 392: if (ndx == 0 || ndx == 1) p = "int"; break; /* sendfile */ case 393: if (ndx == 0 || ndx == 1) p = "int"; break; /* mac_syscall */ case 394: if (ndx == 0 || ndx == 1) p = "int"; break; /* ksem_close */ case 400: if (ndx == 0 || ndx == 1) p = "int"; break; /* ksem_post */ case 401: if (ndx == 0 || ndx == 1) p = "int"; break; /* ksem_wait */ case 402: if (ndx == 0 || ndx == 1) p = "int"; break; /* ksem_trywait */ case 403: if (ndx == 0 || ndx == 1) p = "int"; break; /* ksem_init */ case 404: if (ndx == 0 || ndx == 1) p = "int"; break; /* ksem_open */ case 405: if (ndx == 0 || ndx == 1) p = "int"; break; /* ksem_unlink */ case 406: if (ndx == 0 || ndx == 1) p = "int"; break; /* ksem_getvalue */ case 407: if (ndx == 0 || ndx == 1) p = "int"; break; /* ksem_destroy */ case 408: if (ndx == 0 || ndx == 1) p = "int"; break; /* __mac_get_pid */ case 409: if (ndx == 0 || ndx == 1) p = "int"; break; /* __mac_get_link */ case 410: if (ndx == 0 || ndx == 1) p = "int"; break; /* __mac_set_link */ case 411: if (ndx == 0 || ndx == 1) p = "int"; break; /* extattr_set_link */ case 412: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* extattr_get_link */ case 413: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* extattr_delete_link */ case 414: if (ndx == 0 || ndx == 1) p = "int"; break; /* __mac_execve */ case 415: if (ndx == 0 || ndx == 1) p = "int"; break; /* sigaction */ case 416: if (ndx == 0 || ndx == 1) p = "int"; break; /* sigreturn */ case 417: if (ndx == 0 || ndx == 1) p = "int"; break; /* getcontext */ case 421: if (ndx == 0 || ndx == 1) p = "int"; break; /* setcontext */ case 422: if (ndx == 0 || ndx == 1) p = "int"; break; /* swapcontext */ case 423: if (ndx == 0 || ndx == 1) p = "int"; break; /* swapoff */ case 424: if (ndx == 0 || ndx == 1) p = "int"; break; /* __acl_get_link */ case 425: if (ndx == 0 || ndx == 1) p = "int"; break; /* __acl_set_link */ case 426: if (ndx == 0 || ndx == 1) p = "int"; break; /* __acl_delete_link */ case 427: if (ndx == 0 || ndx == 1) p = "int"; break; /* __acl_aclcheck_link */ case 428: if (ndx == 0 || ndx == 1) p = "int"; break; /* sigwait */ case 429: if (ndx == 0 || ndx == 1) p = "int"; break; /* thr_create */ case 430: if (ndx == 0 || ndx == 1) p = "int"; break; /* thr_exit */ case 431: if (ndx == 0 || ndx == 1) p = "void"; break; /* thr_self */ case 432: if (ndx == 0 || ndx == 1) p = "int"; break; /* thr_kill */ case 433: if (ndx == 0 || ndx == 1) p = "int"; break; /* jail_attach */ case 436: if (ndx == 0 || ndx == 1) p = "int"; break; /* extattr_list_fd */ case 437: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* extattr_list_file */ case 438: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* extattr_list_link */ case 439: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* ksem_timedwait */ case 441: if (ndx == 0 || ndx == 1) p = "int"; break; /* thr_suspend */ case 442: if (ndx == 0 || ndx == 1) p = "int"; break; /* thr_wake */ case 443: if (ndx == 0 || ndx == 1) p = "int"; break; /* kldunloadf */ case 444: if (ndx == 0 || ndx == 1) p = "int"; break; /* audit */ case 445: if (ndx == 0 || ndx == 1) p = "int"; break; /* auditon */ case 446: if (ndx == 0 || ndx == 1) p = "int"; break; /* getauid */ case 447: if (ndx == 0 || ndx == 1) p = "int"; break; /* setauid */ case 448: if (ndx == 0 || ndx == 1) p = "int"; break; /* getaudit */ case 449: if (ndx == 0 || ndx == 1) p = "int"; break; /* setaudit */ case 450: if (ndx == 0 || ndx == 1) p = "int"; break; /* getaudit_addr */ case 451: if (ndx == 0 || ndx == 1) p = "int"; break; /* setaudit_addr */ case 452: if (ndx == 0 || ndx == 1) p = "int"; break; /* auditctl */ case 453: if (ndx == 0 || ndx == 1) p = "int"; break; /* _umtx_op */ case 454: if (ndx == 0 || ndx == 1) p = "int"; break; /* thr_new */ case 455: if (ndx == 0 || ndx == 1) p = "int"; break; /* sigqueue */ case 456: if (ndx == 0 || ndx == 1) p = "int"; break; /* kmq_open */ case 457: if (ndx == 0 || ndx == 1) p = "int"; break; /* kmq_setattr */ case 458: if (ndx == 0 || ndx == 1) p = "int"; break; /* kmq_timedreceive */ case 459: if (ndx == 0 || ndx == 1) p = "int"; break; /* kmq_timedsend */ case 460: if (ndx == 0 || ndx == 1) p = "int"; break; /* kmq_notify */ case 461: if (ndx == 0 || ndx == 1) p = "int"; break; /* kmq_unlink */ case 462: if (ndx == 0 || ndx == 1) p = "int"; break; /* abort2 */ case 463: if (ndx == 0 || ndx == 1) p = "int"; break; /* thr_set_name */ case 464: if (ndx == 0 || ndx == 1) p = "int"; break; /* aio_fsync */ case 465: if (ndx == 0 || ndx == 1) p = "int"; break; /* rtprio_thread */ case 466: if (ndx == 0 || ndx == 1) p = "int"; break; /* sctp_peeloff */ case 471: if (ndx == 0 || ndx == 1) p = "int"; break; /* sctp_generic_sendmsg */ case 472: if (ndx == 0 || ndx == 1) p = "int"; break; /* sctp_generic_sendmsg_iov */ case 473: if (ndx == 0 || ndx == 1) p = "int"; break; /* sctp_generic_recvmsg */ case 474: if (ndx == 0 || ndx == 1) p = "int"; break; /* pread */ case 475: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* pwrite */ case 476: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* mmap */ case 477: if (ndx == 0 || ndx == 1) p = "caddr_t"; break; /* lseek */ case 478: if (ndx == 0 || ndx == 1) p = "off_t"; break; /* truncate */ case 479: if (ndx == 0 || ndx == 1) p = "int"; break; /* ftruncate */ case 480: if (ndx == 0 || ndx == 1) p = "int"; break; /* thr_kill2 */ case 481: if (ndx == 0 || ndx == 1) p = "int"; break; /* shm_open */ case 482: if (ndx == 0 || ndx == 1) p = "int"; break; /* shm_unlink */ case 483: if (ndx == 0 || ndx == 1) p = "int"; break; /* cpuset */ case 484: if (ndx == 0 || ndx == 1) p = "int"; break; /* cpuset_setid */ case 485: if (ndx == 0 || ndx == 1) p = "int"; break; /* cpuset_getid */ case 486: if (ndx == 0 || ndx == 1) p = "int"; break; /* cpuset_getaffinity */ case 487: if (ndx == 0 || ndx == 1) p = "int"; break; /* cpuset_setaffinity */ case 488: if (ndx == 0 || ndx == 1) p = "int"; break; /* faccessat */ case 489: if (ndx == 0 || ndx == 1) p = "int"; break; /* fchmodat */ case 490: if (ndx == 0 || ndx == 1) p = "int"; break; /* fchownat */ case 491: if (ndx == 0 || ndx == 1) p = "int"; break; /* fexecve */ case 492: if (ndx == 0 || ndx == 1) p = "int"; break; /* futimesat */ case 494: if (ndx == 0 || ndx == 1) p = "int"; break; /* linkat */ case 495: if (ndx == 0 || ndx == 1) p = "int"; break; /* mkdirat */ case 496: if (ndx == 0 || ndx == 1) p = "int"; break; /* mkfifoat */ case 497: if (ndx == 0 || ndx == 1) p = "int"; break; /* openat */ case 499: if (ndx == 0 || ndx == 1) p = "int"; break; /* readlinkat */ case 500: if (ndx == 0 || ndx == 1) p = "int"; break; /* renameat */ case 501: if (ndx == 0 || ndx == 1) p = "int"; break; /* symlinkat */ case 502: if (ndx == 0 || ndx == 1) p = "int"; break; /* unlinkat */ case 503: if (ndx == 0 || ndx == 1) p = "int"; break; /* posix_openpt */ case 504: if (ndx == 0 || ndx == 1) p = "int"; break; /* gssd_syscall */ case 505: if (ndx == 0 || ndx == 1) p = "int"; break; /* jail_get */ case 506: if (ndx == 0 || ndx == 1) p = "int"; break; /* jail_set */ case 507: if (ndx == 0 || ndx == 1) p = "int"; break; /* jail_remove */ case 508: if (ndx == 0 || ndx == 1) p = "int"; break; /* closefrom */ case 509: if (ndx == 0 || ndx == 1) p = "int"; break; /* __semctl */ case 510: if (ndx == 0 || ndx == 1) p = "int"; break; /* msgctl */ case 511: if (ndx == 0 || ndx == 1) p = "int"; break; /* shmctl */ case 512: if (ndx == 0 || ndx == 1) p = "int"; break; /* lpathconf */ case 513: if (ndx == 0 || ndx == 1) p = "int"; break; /* __cap_rights_get */ case 515: if (ndx == 0 || ndx == 1) p = "int"; break; /* cap_enter */ case 516: /* cap_getmode */ case 517: if (ndx == 0 || ndx == 1) p = "int"; break; /* pdfork */ case 518: if (ndx == 0 || ndx == 1) p = "int"; break; /* pdkill */ case 519: if (ndx == 0 || ndx == 1) p = "int"; break; /* pdgetpid */ case 520: if (ndx == 0 || ndx == 1) p = "int"; break; /* pselect */ case 522: if (ndx == 0 || ndx == 1) p = "int"; break; /* getloginclass */ case 523: if (ndx == 0 || ndx == 1) p = "int"; break; /* setloginclass */ case 524: if (ndx == 0 || ndx == 1) p = "int"; break; /* rctl_get_racct */ case 525: if (ndx == 0 || ndx == 1) p = "int"; break; /* rctl_get_rules */ case 526: if (ndx == 0 || ndx == 1) p = "int"; break; /* rctl_get_limits */ case 527: if (ndx == 0 || ndx == 1) p = "int"; break; /* rctl_add_rule */ case 528: if (ndx == 0 || ndx == 1) p = "int"; break; /* rctl_remove_rule */ case 529: if (ndx == 0 || ndx == 1) p = "int"; break; /* posix_fallocate */ case 530: if (ndx == 0 || ndx == 1) p = "int"; break; /* posix_fadvise */ case 531: if (ndx == 0 || ndx == 1) p = "int"; break; /* wait6 */ case 532: if (ndx == 0 || ndx == 1) p = "int"; break; /* cap_rights_limit */ case 533: if (ndx == 0 || ndx == 1) p = "int"; break; /* cap_ioctls_limit */ case 534: if (ndx == 0 || ndx == 1) p = "int"; break; /* cap_ioctls_get */ case 535: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* cap_fcntls_limit */ case 536: if (ndx == 0 || ndx == 1) p = "int"; break; /* cap_fcntls_get */ case 537: if (ndx == 0 || ndx == 1) p = "int"; break; /* bindat */ case 538: if (ndx == 0 || ndx == 1) p = "int"; break; /* connectat */ case 539: if (ndx == 0 || ndx == 1) p = "int"; break; /* chflagsat */ case 540: if (ndx == 0 || ndx == 1) p = "int"; break; /* accept4 */ case 541: if (ndx == 0 || ndx == 1) p = "int"; break; /* pipe2 */ case 542: if (ndx == 0 || ndx == 1) p = "int"; break; /* aio_mlock */ case 543: if (ndx == 0 || ndx == 1) p = "int"; break; /* procctl */ case 544: if (ndx == 0 || ndx == 1) p = "int"; break; /* ppoll */ case 545: if (ndx == 0 || ndx == 1) p = "int"; break; /* futimens */ case 546: if (ndx == 0 || ndx == 1) p = "int"; break; /* utimensat */ case 547: if (ndx == 0 || ndx == 1) p = "int"; break; /* numa_getaffinity */ case 548: if (ndx == 0 || ndx == 1) p = "int"; break; /* numa_setaffinity */ case 549: if (ndx == 0 || ndx == 1) p = "int"; break; /* fdatasync */ case 550: if (ndx == 0 || ndx == 1) p = "int"; break; /* fstat */ case 551: if (ndx == 0 || ndx == 1) p = "int"; break; /* fstatat */ case 552: if (ndx == 0 || ndx == 1) p = "int"; break; /* fhstat */ case 553: if (ndx == 0 || ndx == 1) p = "int"; break; /* getdirentries */ case 554: if (ndx == 0 || ndx == 1) p = "ssize_t"; break; /* statfs */ case 555: if (ndx == 0 || ndx == 1) p = "int"; break; /* fstatfs */ case 556: if (ndx == 0 || ndx == 1) p = "int"; break; /* getfsstat */ case 557: if (ndx == 0 || ndx == 1) p = "int"; break; /* fhstatfs */ case 558: if (ndx == 0 || ndx == 1) p = "int"; break; /* mknodat */ case 559: if (ndx == 0 || ndx == 1) p = "int"; break; /* kevent */ case 560: + if (ndx == 0 || ndx == 1) + p = "int"; + break; + /* cpuset_getdomain */ + case 561: + if (ndx == 0 || ndx == 1) + p = "int"; + break; + /* cpuset_setdomain */ + case 562: if (ndx == 0 || ndx == 1) p = "int"; break; default: break; }; if (p != NULL) strlcpy(desc, p, descsz); } Index: user/jeff/numa/sys/sys/_domainset.h =================================================================== --- user/jeff/numa/sys/sys/_domainset.h (nonexistent) +++ user/jeff/numa/sys/sys/_domainset.h (revision 326889) @@ -0,0 +1,60 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2017, Jeffrey Roberson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SYS__DOMAINSET_H_ +#define _SYS__DOMAINSET_H_ + +#include + +#ifdef _KERNEL +#define DOMAINSET_SETSIZE MAXMEMDOM +#endif + +#define DOMAINSET_MAXSIZE 256 + +#ifndef DOMAINSET_SETSIZE +#define DOMAINSET_SETSIZE DOMAINSET_MAXSIZE +#endif + +BITSET_DEFINE(_domainset, DOMAINSET_SETSIZE); +typedef struct _domainset domainset_t; + +/* + * This structure is intended to be embedded in objects which have policy + * attributes. Each object keeps its own iterator so round-robin is + * synchronized and accurate. + */ +struct domainset; +struct domainset_ref { + struct domainset * volatile dr_policy; + int dr_iterator; +}; + +#endif /* !_SYS__DOMAINSET_H_ */ Property changes on: user/jeff/numa/sys/sys/_domainset.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: user/jeff/numa/sys/sys/cpuset.h =================================================================== --- user/jeff/numa/sys/sys/cpuset.h (revision 326888) +++ user/jeff/numa/sys/sys/cpuset.h (revision 326889) @@ -1,157 +1,158 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008, Jeffrey Roberson * All rights reserved. * * Copyright (c) 2008 Nokia Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_CPUSET_H_ #define _SYS_CPUSET_H_ #include #include #define _NCPUBITS _BITSET_BITS #define _NCPUWORDS __bitset_words(CPU_SETSIZE) #define CPUSETBUFSIZ ((2 + sizeof(long) * 2) * _NCPUWORDS) #define CPU_CLR(n, p) BIT_CLR(CPU_SETSIZE, n, p) #define CPU_COPY(f, t) BIT_COPY(CPU_SETSIZE, f, t) #define CPU_ISSET(n, p) BIT_ISSET(CPU_SETSIZE, n, p) #define CPU_SET(n, p) BIT_SET(CPU_SETSIZE, n, p) #define CPU_ZERO(p) BIT_ZERO(CPU_SETSIZE, p) #define CPU_FILL(p) BIT_FILL(CPU_SETSIZE, p) #define CPU_SETOF(n, p) BIT_SETOF(CPU_SETSIZE, n, p) #define CPU_EMPTY(p) BIT_EMPTY(CPU_SETSIZE, p) #define CPU_ISFULLSET(p) BIT_ISFULLSET(CPU_SETSIZE, p) #define CPU_SUBSET(p, c) BIT_SUBSET(CPU_SETSIZE, p, c) #define CPU_OVERLAP(p, c) BIT_OVERLAP(CPU_SETSIZE, p, c) #define CPU_CMP(p, c) BIT_CMP(CPU_SETSIZE, p, c) #define CPU_OR(d, s) BIT_OR(CPU_SETSIZE, d, s) #define CPU_AND(d, s) BIT_AND(CPU_SETSIZE, d, s) #define CPU_NAND(d, s) BIT_NAND(CPU_SETSIZE, d, s) #define CPU_CLR_ATOMIC(n, p) BIT_CLR_ATOMIC(CPU_SETSIZE, n, p) #define CPU_SET_ATOMIC(n, p) BIT_SET_ATOMIC(CPU_SETSIZE, n, p) #define CPU_SET_ATOMIC_ACQ(n, p) BIT_SET_ATOMIC_ACQ(CPU_SETSIZE, n, p) #define CPU_AND_ATOMIC(n, p) BIT_AND_ATOMIC(CPU_SETSIZE, n, p) #define CPU_OR_ATOMIC(d, s) BIT_OR_ATOMIC(CPU_SETSIZE, d, s) #define CPU_COPY_STORE_REL(f, t) BIT_COPY_STORE_REL(CPU_SETSIZE, f, t) #define CPU_FFS(p) BIT_FFS(CPU_SETSIZE, p) #define CPU_COUNT(p) BIT_COUNT(CPU_SETSIZE, p) #define CPUSET_FSET BITSET_FSET(_NCPUWORDS) #define CPUSET_T_INITIALIZER BITSET_T_INITIALIZER /* * Valid cpulevel_t values. */ #define CPU_LEVEL_ROOT 1 /* All system cpus. */ #define CPU_LEVEL_CPUSET 2 /* Available cpus for which. */ #define CPU_LEVEL_WHICH 3 /* Actual mask/id for which. */ /* * Valid cpuwhich_t values. */ #define CPU_WHICH_TID 1 /* Specifies a thread id. */ #define CPU_WHICH_PID 2 /* Specifies a process id. */ #define CPU_WHICH_CPUSET 3 /* Specifies a set id. */ #define CPU_WHICH_IRQ 4 /* Specifies an irq #. */ #define CPU_WHICH_JAIL 5 /* Specifies a jail id. */ #define CPU_WHICH_DOMAIN 6 /* Specifies a NUMA domain id. */ #define CPU_WHICH_INTRHANDLER 7 /* Specifies an irq # (not ithread). */ #define CPU_WHICH_ITHREAD 8 /* Specifies an irq's ithread. */ /* * Reserved cpuset identifiers. */ #define CPUSET_INVALID -1 #define CPUSET_DEFAULT 0 #ifdef _KERNEL #include LIST_HEAD(setlist, cpuset); /* * cpusets encapsulate cpu binding information for one or more threads. * * a - Accessed with atomics. * s - Set at creation, never modified. Only a ref required to read. * c - Locked internally by a cpuset lock. * * The bitmask is only modified while holding the cpuset lock. It may be * read while only a reference is held but the consumer must be prepared * to deal with inconsistent results. */ struct cpuset { cpuset_t cs_mask; /* bitmask of valid cpus. */ + struct domainset *cs_domain; /* (c) NUMA policy. */ volatile u_int cs_ref; /* (a) Reference count. */ int cs_flags; /* (s) Flags from below. */ cpusetid_t cs_id; /* (s) Id or INVALID. */ struct cpuset *cs_parent; /* (s) Pointer to our parent. */ LIST_ENTRY(cpuset) cs_link; /* (c) All identified sets. */ LIST_ENTRY(cpuset) cs_siblings; /* (c) Sibling set link. */ struct setlist cs_children; /* (c) List of children. */ }; #define CPU_SET_ROOT 0x0001 /* Set is a root set. */ #define CPU_SET_RDONLY 0x0002 /* No modification allowed. */ extern cpuset_t *cpuset_root; struct prison; struct proc; struct thread; struct cpuset *cpuset_thread0(void); struct cpuset *cpuset_ref(struct cpuset *); void cpuset_rel(struct cpuset *); int cpuset_setthread(lwpid_t id, cpuset_t *); int cpuset_setithread(lwpid_t id, int cpu); int cpuset_create_root(struct prison *, struct cpuset **); int cpuset_setproc_update_set(struct proc *, struct cpuset *); int cpuset_which(cpuwhich_t, id_t, struct proc **, struct thread **, struct cpuset **); char *cpusetobj_strprint(char *, const cpuset_t *); int cpusetobj_strscan(cpuset_t *, const char *); #ifdef DDB void ddb_display_cpuset(const cpuset_t *); #endif #else __BEGIN_DECLS int cpuset(cpusetid_t *); int cpuset_setid(cpuwhich_t, id_t, cpusetid_t); int cpuset_getid(cpulevel_t, cpuwhich_t, id_t, cpusetid_t *); int cpuset_getaffinity(cpulevel_t, cpuwhich_t, id_t, size_t, cpuset_t *); int cpuset_setaffinity(cpulevel_t, cpuwhich_t, id_t, size_t, const cpuset_t *); __END_DECLS #endif #endif /* !_SYS_CPUSET_H_ */ Index: user/jeff/numa/sys/sys/domainset.h =================================================================== --- user/jeff/numa/sys/sys/domainset.h (nonexistent) +++ user/jeff/numa/sys/sys/domainset.h (revision 326889) @@ -0,0 +1,100 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2017, Jeffrey Roberson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SYS_DOMAINSETSET_H_ +#define _SYS_DOMAINSETSET_H_ + +#include + +#include + +#define _NDOMAINSETBITS _BITSET_BITS +#define _NDOMAINSETWORDS __bitset_words(DOMAINSET_SETSIZE) + +#define DOMAINSETSETBUFSIZ ((2 + sizeof(long) * 2) * _NDOMAINSETWORDS) + +#define DOMAINSET_CLR(n, p) BIT_CLR(DOMAINSET_SETSIZE, n, p) +#define DOMAINSET_COPY(f, t) BIT_COPY(DOMAINSET_SETSIZE, f, t) +#define DOMAINSET_ISSET(n, p) BIT_ISSET(DOMAINSET_SETSIZE, n, p) +#define DOMAINSET_SET(n, p) BIT_SET(DOMAINSET_SETSIZE, n, p) +#define DOMAINSET_ZERO(p) BIT_ZERO(DOMAINSET_SETSIZE, p) +#define DOMAINSET_FILL(p) BIT_FILL(DOMAINSET_SETSIZE, p) +#define DOMAINSET_SETOF(n, p) BIT_SETOF(DOMAINSET_SETSIZE, n, p) +#define DOMAINSET_EMPTY(p) BIT_EMPTY(DOMAINSET_SETSIZE, p) +#define DOMAINSET_ISFULLSET(p) BIT_ISFULLSET(DOMAINSET_SETSIZE, p) +#define DOMAINSET_SUBSET(p, c) BIT_SUBSET(DOMAINSET_SETSIZE, p, c) +#define DOMAINSET_OVERLAP(p, c) BIT_OVERLAP(DOMAINSET_SETSIZE, p, c) +#define DOMAINSET_CMP(p, c) BIT_CMP(DOMAINSET_SETSIZE, p, c) +#define DOMAINSET_OR(d, s) BIT_OR(DOMAINSET_SETSIZE, d, s) +#define DOMAINSET_AND(d, s) BIT_AND(DOMAINSET_SETSIZE, d, s) +#define DOMAINSET_NAND(d, s) BIT_NAND(DOMAINSET_SETSIZE, d, s) +#define DOMAINSET_CLR_ATOMIC(n, p) BIT_CLR_ATOMIC(DOMAINSET_SETSIZE, n, p) +#define DOMAINSET_SET_ATOMIC(n, p) BIT_SET_ATOMIC(DOMAINSET_SETSIZE, n, p) +#define DOMAINSET_SET_ATOMIC_ACQ(n, p) \ + BIT_SET_ATOMIC_ACQ(DOMAINSET_SETSIZE, n, p) +#define DOMAINSET_AND_ATOMIC(n, p) BIT_AND_ATOMIC(DOMAINSET_SETSIZE, n, p) +#define DOMAINSET_OR_ATOMIC(d, s) BIT_OR_ATOMIC(DOMAINSET_SETSIZE, d, s) +#define DOMAINSET_COPY_STORE_REL(f, t) \ + BIT_COPY_STORE_REL(DOMAINSET_SETSIZE, f, t) +#define DOMAINSET_FFS(p) BIT_FFS(DOMAINSET_SETSIZE, p) +#define DOMAINSET_FLS(p) BIT_FLS(DOMAINSET_SETSIZE, p) +#define DOMAINSET_COUNT(p) BIT_COUNT(DOMAINSET_SETSIZE, p) +#define DOMAINSET_FSET BITSET_FSET(_NDOMAINSETWORDS) +#define DOMAINSET_T_INITIALIZER BITSET_T_INITIALIZER + +#define DOMAINSET_POLICY_INVALID 0 +#define DOMAINSET_POLICY_ROUNDROBIN 1 +#define DOMAINSET_POLICY_FIRSTTOUCH 2 +#define DOMAINSET_POLICY_MAX DOMAINSET_POLICY_FIRSTTOUCH + +#ifdef _KERNEL +#include +LIST_HEAD(domainlist, domainset); + +struct domainset { + LIST_ENTRY(domainset) ds_link; + domainset_t ds_mask; /* allowed domains. */ + uint16_t ds_cnt; /* popcnt from above. */ + uint16_t ds_max; /* Maximum domain in set. */ + uint16_t ds_policy; /* Policy type. */ +}; + +void domainset_zero(void); + +#else +__BEGIN_DECLS +int cpuset_getdomain(cpulevel_t, cpuwhich_t, id_t, size_t, domainset_t *, + int *); +int cpuset_setdomain(cpulevel_t, cpuwhich_t, id_t, size_t, + const domainset_t *, int); + +__END_DECLS +#endif +#endif /* !_SYS_DOMAINSETSET_H_ */ Property changes on: user/jeff/numa/sys/sys/domainset.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: user/jeff/numa/sys/sys/param.h =================================================================== --- user/jeff/numa/sys/sys/param.h (revision 326888) +++ user/jeff/numa/sys/sys/param.h (revision 326889) @@ -1,365 +1,365 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)param.h 8.3 (Berkeley) 4/4/95 * $FreeBSD$ */ #ifndef _SYS_PARAM_H_ #define _SYS_PARAM_H_ #include #define BSD 199506 /* System version (year & month). */ #define BSD4_3 1 #define BSD4_4 1 /* * __FreeBSD_version numbers are documented in the Porter's Handbook. * If you bump the version for any reason, you should update the documentation * there. * Currently this lives here in the doc/ repository: * * head/en_US.ISO8859-1/books/porters-handbook/versions/chapter.xml * * scheme is: Rxx * 'R' is in the range 0 to 4 if this is a release branch or * X.0-CURRENT before releng/X.0 is created, otherwise 'R' is * in the range 5 to 9. */ #undef __FreeBSD_version -#define __FreeBSD_version 1200054 /* Master, propagated to newvers */ +#define __FreeBSD_version 1200055 /* Master, propagated to newvers */ /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, * which by definition is always true on FreeBSD. This macro is also defined * on other systems that use the kernel of FreeBSD, such as GNU/kFreeBSD. * * It is tempting to use this macro in userland code when we want to enable * kernel-specific routines, and in fact it's fine to do this in code that * is part of FreeBSD itself. However, be aware that as presence of this * macro is still not widespread (e.g. older FreeBSD versions, 3rd party * compilers, etc), it is STRONGLY DISCOURAGED to check for this macro in * external applications without also checking for __FreeBSD__ as an * alternative. */ #undef __FreeBSD_kernel__ #define __FreeBSD_kernel__ #if defined(_KERNEL) || defined(IN_RTLD) #define P_OSREL_SIGWAIT 700000 #define P_OSREL_SIGSEGV 700004 #define P_OSREL_MAP_ANON 800104 #define P_OSREL_MAP_FSTRICT 1100036 #define P_OSREL_SHUTDOWN_ENOTCONN 1100077 #define P_OSREL_MAP_GUARD 1200035 #define P_OSREL_WRFSBASE 1200041 #define P_OSREL_CK_CYLGRP 1200046 #define P_OSREL_VMTOTAL64 1200054 #define P_OSREL_MAJOR(x) ((x) / 100000) #endif #ifndef LOCORE #include #endif /* * Machine-independent constants (some used in following include files). * Redefined constants are from POSIX 1003.1 limits file. * * MAXCOMLEN should be >= sizeof(ac_comm) (see ) */ #include #define MAXCOMLEN 19 /* max command name remembered */ #define MAXINTERP PATH_MAX /* max interpreter file name length */ #define MAXLOGNAME 33 /* max login name length (incl. NUL) */ #define MAXUPRC CHILD_MAX /* max simultaneous processes */ #define NCARGS ARG_MAX /* max bytes for an exec function */ #define NGROUPS (NGROUPS_MAX+1) /* max number groups */ #define NOFILE OPEN_MAX /* max open files per process */ #define NOGROUP 65535 /* marker for empty group set member */ #define MAXHOSTNAMELEN 256 /* max hostname size */ #define SPECNAMELEN 63 /* max length of devicename */ /* More types and definitions used throughout the kernel. */ #ifdef _KERNEL #include #include #ifndef LOCORE #include #include #endif #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #endif #ifndef _KERNEL /* Signals. */ #include #endif /* Machine type dependent parameters. */ #include #ifndef _KERNEL #include #endif #ifndef DEV_BSHIFT #define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ #endif #define DEV_BSIZE (1<>PAGE_SHIFT) #endif /* * btodb() is messy and perhaps slow because `bytes' may be an off_t. We * want to shift an unsigned type to avoid sign extension and we don't * want to widen `bytes' unnecessarily. Assume that the result fits in * a daddr_t. */ #ifndef btodb #define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ (sizeof (bytes) > sizeof(long) \ ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \ : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)) #endif #ifndef dbtob #define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((off_t)(db) << DEV_BSHIFT) #endif #define PRIMASK 0x0ff #define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ #define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */ #define NZERO 0 /* default "nice" */ #define NBBY 8 /* number of bits in a byte */ #define NBPW sizeof(int) /* number of bytes per word (integer) */ #define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ #define NODEV (dev_t)(-1) /* non-existent device */ /* * File system parameters and macros. * * MAXBSIZE - Filesystems are made out of blocks of at most MAXBSIZE bytes * per block. MAXBSIZE may be made larger without effecting * any existing filesystems as long as it does not exceed MAXPHYS, * and may be made smaller at the risk of not being able to use * filesystems which require a block size exceeding MAXBSIZE. * * MAXBCACHEBUF - Maximum size of a buffer in the buffer cache. This must * be >= MAXBSIZE and can be set differently for different * architectures by defining it in . * Making this larger allows NFS to do larger reads/writes. * * BKVASIZE - Nominal buffer space per buffer, in bytes. BKVASIZE is the * minimum KVM memory reservation the kernel is willing to make. * Filesystems can of course request smaller chunks. Actual * backing memory uses a chunk size of a page (PAGE_SIZE). * The default value here can be overridden on a per-architecture * basis by defining it in . * * If you make BKVASIZE too small you risk seriously fragmenting * the buffer KVM map which may slow things down a bit. If you * make it too big the kernel will not be able to optimally use * the KVM memory reserved for the buffer cache and will wind * up with too-few buffers. * * The default is 16384, roughly 2x the block size used by a * normal UFS filesystem. */ #define MAXBSIZE 65536 /* must be power of 2 */ #ifndef MAXBCACHEBUF #define MAXBCACHEBUF MAXBSIZE /* must be a power of 2 >= MAXBSIZE */ #endif #ifndef BKVASIZE #define BKVASIZE 16384 /* must be power of 2 */ #endif #define BKVAMASK (BKVASIZE-1) /* * MAXPATHLEN defines the longest permissible path length after expanding * symbolic links. It is used to allocate a temporary buffer from the buffer * pool in which to do the name expansion, hence should be a power of two, * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the * maximum number of symbolic links that may be expanded in a path name. * It should be set high enough to allow all legitimate uses, but halt * infinite loops reasonably quickly. */ #define MAXPATHLEN PATH_MAX #define MAXSYMLINKS 32 /* Bit map related macros. */ #define setbit(a,i) (((unsigned char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) #define clrbit(a,i) (((unsigned char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) #define isset(a,i) \ (((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) #define isclr(a,i) \ ((((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany #define howmany(x, y) (((x)+((y)-1))/(y)) #endif #define nitems(x) (sizeof((x)) / sizeof((x)[0])) #define rounddown(x, y) (((x)/(y))*(y)) #define rounddown2(x, y) ((x)&(~((y)-1))) /* if y is power of two */ #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */ #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */ #define powerof2(x) ((((x)-1)&(x))==0) /* Macros for min/max. */ #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #ifdef _KERNEL /* * Basic byte order function prototypes for non-inline functions. */ #ifndef LOCORE #ifndef _BYTEORDER_PROTOTYPED #define _BYTEORDER_PROTOTYPED __BEGIN_DECLS __uint32_t htonl(__uint32_t); __uint16_t htons(__uint16_t); __uint32_t ntohl(__uint32_t); __uint16_t ntohs(__uint16_t); __END_DECLS #endif #endif #ifndef _BYTEORDER_FUNC_DEFINED #define _BYTEORDER_FUNC_DEFINED #define htonl(x) __htonl(x) #define htons(x) __htons(x) #define ntohl(x) __ntohl(x) #define ntohs(x) __ntohs(x) #endif /* !_BYTEORDER_FUNC_DEFINED */ #endif /* _KERNEL */ /* * Scale factor for scaled integers used to count %cpu time and load avgs. * * The number of CPU `tick's that map to a unique `%age' can be expressed * by the formula (1 / (2 ^ (FSHIFT - 11))). The maximum load average that * can be calculated (assuming 32 bits) can be closely approximated using * the formula (2 ^ (2 * (16 - FSHIFT))) for (FSHIFT < 15). * * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', * FSHIFT must be at least 11; this gives us a maximum load avg of ~1024. */ #define FSHIFT 11 /* bits to right of fixed binary point */ #define FSCALE (1<> (PAGE_SHIFT - DEV_BSHIFT)) #define ctodb(db) /* calculates pages to devblks */ \ ((db) << (PAGE_SHIFT - DEV_BSHIFT)) /* * Old spelling of __containerof(). */ #define member2struct(s, m, x) \ ((struct s *)(void *)((char *)(x) - offsetof(struct s, m))) /* * Access a variable length array that has been declared as a fixed * length array. */ #define __PAST_END(array, offset) (((__typeof__(*(array)) *)(array))[offset]) #endif /* _SYS_PARAM_H_ */ Index: user/jeff/numa/sys/sys/proc.h =================================================================== --- user/jeff/numa/sys/sys/proc.h (revision 326888) +++ user/jeff/numa/sys/sys/proc.h (revision 326889) @@ -1,1141 +1,1141 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)proc.h 8.15 (Berkeley) 5/19/95 * $FreeBSD$ */ #ifndef _SYS_PROC_H_ #define _SYS_PROC_H_ #include /* For struct callout. */ #include /* For struct klist. */ #include #ifndef _KERNEL #include #endif #include #include #include #include #include #include #include /* XXX. */ #include #include #include #include #include #ifndef _KERNEL #include /* For structs itimerval, timeval. */ #else #include #endif #include #include #include +#include #include /* Machine-dependent proc substruct. */ + /* * One structure allocated per session. * * List of locks * (m) locked by s_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct session { u_int s_count; /* Ref cnt; pgrps in session - atomic. */ struct proc *s_leader; /* (m + e) Session leader. */ struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */ struct cdev_priv *s_ttydp; /* (m) Device of controlling tty. */ struct tty *s_ttyp; /* (e) Controlling tty. */ pid_t s_sid; /* (c) Session ID. */ /* (m) Setlogin() name: */ char s_login[roundup(MAXLOGNAME, sizeof(long))]; struct mtx s_mtx; /* Mutex to protect members. */ }; /* * One structure allocated per process group. * * List of locks * (m) locked by pg_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct pgrp { LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */ LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */ struct session *pg_session; /* (c) Pointer to session. */ struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */ pid_t pg_id; /* (c) Process group id. */ int pg_jobc; /* (m) Job control process count. */ struct mtx pg_mtx; /* Mutex to protect members */ }; /* * pargs, used to hold a copy of the command line, if it had a sane length. */ struct pargs { u_int ar_ref; /* Reference count. */ u_int ar_length; /* Length. */ u_char ar_args[1]; /* Arguments. */ }; /*- * Description of a process. * * This structure contains the information needed to manage a thread of * control, known in UN*X as a process; it has references to substructures * containing descriptions of things that the process uses, but may share * with related processes. The process structure and the substructures * are always addressable except for those marked "(CPU)" below, * which might be addressable only on a processor on which the process * is running. * * Below is a key of locks used to protect each member of struct proc. The * lock is indicated by a reference to a specific character in parens in the * associated comment. * * - not yet protected * a - only touched by curproc or parent during fork/wait * b - created at fork, never changes * (exception aiods switch vmspaces, but they are also * marked 'P_SYSTEM' so hopefully it will be left alone) * c - locked by proc mtx * d - locked by allproc_lock lock * e - locked by proctree_lock lock * f - session mtx * g - process group mtx * h - callout_lock mtx * i - by curproc or the master session mtx * j - locked by proc slock * k - only accessed by curthread * k*- only accessed by curthread and from an interrupt * kx- only accessed by curthread and by debugger * l - the attaching proc or attaching proc parent * m - Giant * n - not locked, lazy * o - ktrace lock * q - td_contested lock * r - p_peers lock * s - see sleepq_switch(), sleeping_on_old_rtc(), and sleep(9) * t - thread lock * u - process stat lock * w - process timer lock * x - created at fork, only changes during single threading in exec * y - created at first aio, doesn't change until exit or exec at which * point we are single-threaded and only curthread changes it * z - zombie threads lock * * If the locking key specifies two identifiers (for example, p_pptr) then * either lock is sufficient for read access, but both locks must be held * for write access. */ struct cpuset; struct filecaps; struct filemon; struct kaioinfo; struct kaudit_record; struct kdtrace_proc; struct kdtrace_thread; struct mqueue_notifier; struct nlminfo; struct p_sched; struct proc; struct procdesc; struct racct; struct sbuf; struct sleepqueue; struct syscall_args; struct td_sched; struct thread; struct trapframe; struct turnstile; /* * XXX: Does this belong in resource.h or resourcevar.h instead? * Resource usage extension. The times in rusage structs in the kernel are * never up to date. The actual times are kept as runtimes and tick counts * (with control info in the "previous" times), and are converted when * userland asks for rusage info. Backwards compatibility prevents putting * this directly in the user-visible rusage struct. * * Locking for p_rux: (cu) means (u) for p_rux and (c) for p_crux. * Locking for td_rux: (t) for all fields. */ struct rusage_ext { uint64_t rux_runtime; /* (cu) Real time. */ uint64_t rux_uticks; /* (cu) Statclock hits in user mode. */ uint64_t rux_sticks; /* (cu) Statclock hits in sys mode. */ uint64_t rux_iticks; /* (cu) Statclock hits in intr mode. */ uint64_t rux_uu; /* (c) Previous user time in usec. */ uint64_t rux_su; /* (c) Previous sys time in usec. */ uint64_t rux_tu; /* (c) Previous total time in usec. */ }; /* * Kernel runnable context (thread). * This is what is put to sleep and reactivated. * Thread context. Processes may have multiple threads. */ struct thread { struct mtx *volatile td_lock; /* replaces sched lock */ struct proc *td_proc; /* (*) Associated process. */ TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */ TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */ TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */ TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */ LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */ struct cpuset *td_cpuset; /* (t) CPU affinity mask. */ + struct domainset_ref td_domain; /* (a) NUMA policy */ struct seltd *td_sel; /* Select queue/channel. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */ struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */ struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */ - struct vm_domain_policy td_vm_dom_policy; /* (c) current numa domain policy */ lwpid_t td_tid; /* (b) Thread ID. */ sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */ #define td_siglist td_sigqueue.sq_signals u_char td_lend_user_pri; /* (t) Lend user pri. */ /* Cleared during fork1() */ #define td_startzero td_flags int td_flags; /* (t) TDF_* flags. */ int td_inhibitors; /* (t) Why can not run. */ int td_pflags; /* (k) Private thread (TDP_*) flags. */ int td_dupfd; /* (k) Ret value from fdopen. XXX */ int td_sqqueue; /* (t) Sleepqueue queue blocked on. */ void *td_wchan; /* (t) Sleep address. */ const char *td_wmesg; /* (t) Reason for sleep. */ volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */ u_char td_tsqueue; /* (t) Turnstile queue blocked on. */ short td_locks; /* (k) Debug: count of non-spin locks */ short td_rw_rlocks; /* (k) Count of rwlock read locks. */ short td_lk_slocks; /* (k) Count of lockmgr shared locks. */ short td_stopsched; /* (k) Scheduler stopped. */ struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */ const char *td_lockname; /* (t) Name of lock blocked on. */ LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */ struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */ int td_intr_nesting_level; /* (k) Interrupt recursion. */ int td_pinned; /* (k) Temporary cpu pin count. */ struct ucred *td_ucred; /* (k) Reference to credentials. */ struct plimit *td_limit; /* (k) Resource limits. */ int td_slptick; /* (t) Time at sleep. */ int td_blktick; /* (t) Time spent blocked. */ int td_swvoltick; /* (t) Time at last SW_VOL switch. */ int td_swinvoltick; /* (t) Time at last SW_INVOL switch. */ u_int td_cow; /* (*) Number of copy-on-write faults */ struct rusage td_ru; /* (t) rusage information. */ struct rusage_ext td_rux; /* (t) Internal rusage information. */ uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */ uint64_t td_runtime; /* (t) How many cpu ticks we've run. */ u_int td_pticks; /* (t) Statclock hits for profiling */ u_int td_sticks; /* (t) Statclock hits in system mode. */ u_int td_iticks; /* (t) Statclock hits in intr mode. */ u_int td_uticks; /* (t) Statclock hits in user mode. */ int td_intrval; /* (t) Return value for sleepq. */ sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */ volatile u_int td_generation; /* (k) For detection of preemption */ stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */ int td_xsig; /* (c) Signal for ptrace */ u_long td_profil_addr; /* (k) Temporary addr until AST. */ u_int td_profil_ticks; /* (k) Temporary ticks until AST. */ char td_name[MAXCOMLEN + 1]; /* (*) Thread name. */ struct file *td_fpop; /* (k) file referencing cdev under op */ int td_dbgflags; /* (c) Userland debugger flags */ siginfo_t td_si; /* (c) For debugger or core file */ int td_ng_outbound; /* (k) Thread entered ng from above. */ struct osd td_osd; /* (k) Object specific data. */ struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */ pid_t td_dbg_forked; /* (c) Child pid for debugger. */ u_int td_vp_reserv; /* (k) Count of reserved vnodes. */ int td_no_sleeping; /* (k) Sleeping disabled count. */ - int td_dom_rr_idx; /* (k) RR Numa domain selection. */ void *td_su; /* (k) FFS SU private */ sbintime_t td_sleeptimo; /* (t) Sleep timeout. */ int td_rtcgen; /* (s) rtc_generation of abs. sleep */ #define td_endzero td_sigmask /* Copied during fork1() or create_thread(). */ #define td_startcopy td_endzero sigset_t td_sigmask; /* (c) Current signal mask. */ u_char td_rqindex; /* (t) Run queue index. */ u_char td_base_pri; /* (t) Thread base kernel priority. */ u_char td_priority; /* (t) Thread active priority. */ u_char td_pri_class; /* (t) Scheduling class. */ u_char td_user_pri; /* (t) User pri from estcpu and nice. */ u_char td_base_user_pri; /* (t) Base user pri */ uintptr_t td_rb_list; /* (k) Robust list head. */ uintptr_t td_rbp_list; /* (k) Robust priv list head. */ uintptr_t td_rb_inact; /* (k) Current in-action mutex loc. */ struct syscall_args td_sa; /* (kx) Syscall parameters. Copied on fork for child tracing. */ #define td_endcopy td_pcb /* * Fields that must be manually set in fork1() or create_thread() * or already have been set in the allocator, constructor, etc. */ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ enum { TDS_INACTIVE = 0x0, TDS_INHIBITED, TDS_CAN_RUN, TDS_RUNQ, TDS_RUNNING } td_state; /* (t) thread state */ union { register_t tdu_retval[2]; off_t tdu_off; } td_uretoff; /* (k) Syscall aux returns. */ #define td_retval td_uretoff.tdu_retval u_int td_cowgen; /* (k) Generation of COW pointers. */ struct callout td_slpcallout; /* (h) Callout for sleep. */ struct trapframe *td_frame; /* (k) */ struct vm_object *td_kstack_obj;/* (a) Kstack object. */ vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */ int td_kstack_pages; /* (a) Size of the kstack. */ volatile u_int td_critnest; /* (k*) Critical section nest level. */ struct mdthread td_md; /* (k) Any machine-dependent fields. */ struct kaudit_record *td_ar; /* (k) Active audit record, if any. */ struct lpohead td_lprof[2]; /* (a) lock profiling objects. */ struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */ int td_errno; /* Error returned by last syscall. */ struct vnet *td_vnet; /* (k) Effective vnet. */ const char *td_vnet_lpush; /* (k) Debugging vnet push / pop. */ struct trapframe *td_intr_frame;/* (k) Frame of the current irq */ struct proc *td_rfppwait_p; /* (k) The vforked child */ struct vm_page **td_ma; /* (k) uio pages held */ int td_ma_cnt; /* (k) size of *td_ma */ void *td_emuldata; /* Emulator state data */ int td_lastcpu; /* (t) Last cpu we were on. */ int td_oncpu; /* (t) Which cpu we are on. */ void *td_lkpi_task; /* LinuxKPI task struct pointer */ }; struct thread0_storage { struct thread t0st_thread; uint64_t t0st_sched[10]; }; struct mtx *thread_lock_block(struct thread *); void thread_lock_unblock(struct thread *, struct mtx *); void thread_lock_set(struct thread *, struct mtx *); #define THREAD_LOCK_ASSERT(td, type) \ do { \ struct mtx *__m = (td)->td_lock; \ if (__m != &blocked_lock) \ mtx_assert(__m, (type)); \ } while (0) #ifdef INVARIANTS #define THREAD_LOCKPTR_ASSERT(td, lock) \ do { \ struct mtx *__m = (td)->td_lock; \ KASSERT((__m == &blocked_lock || __m == (lock)), \ ("Thread %p lock %p does not match %p", td, __m, (lock))); \ } while (0) #define TD_LOCKS_INC(td) ((td)->td_locks++) #define TD_LOCKS_DEC(td) ((td)->td_locks--) #else #define THREAD_LOCKPTR_ASSERT(td, lock) #define TD_LOCKS_INC(td) #define TD_LOCKS_DEC(td) #endif /* * Flags kept in td_flags: * To change these you MUST have the scheduler lock. */ #define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */ #define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */ #define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */ #define TDF_SINTR 0x00000008 /* Sleep is interruptible. */ #define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */ #define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */ #define TDF_CANSWAP 0x00000040 /* Thread can be swapped. */ #define TDF_SLEEPABORT 0x00000080 /* sleepq_abort was called. */ #define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */ #define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */ #define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */ #define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */ #define TDF_UNUSED12 0x00001000 /* --available-- */ #define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */ #define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */ #define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */ #define TDF_NEEDRESCHED 0x00010000 /* Thread needs to yield. */ #define TDF_NEEDSIGCHK 0x00020000 /* Thread may need signal delivery. */ #define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */ #define TDF_SERESTART 0x00080000 /* ERESTART on stop attempts. */ #define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */ #define TDF_SEINTR 0x00200000 /* EINTR on stop attempts. */ #define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */ #define TDF_UNUSED23 0x00800000 /* --available-- */ #define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */ #define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */ #define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */ #define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */ #define TDF_ALRMPEND 0x10000000 /* Pending SIGVTALRM needs to be posted. */ #define TDF_PROFPEND 0x20000000 /* Pending SIGPROF needs to be posted. */ #define TDF_MACPEND 0x40000000 /* AST-based MAC event pending. */ /* Userland debug flags */ #define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */ #define TDB_XSIG 0x00000002 /* Thread is exchanging signal under trace */ #define TDB_USERWR 0x00000004 /* Debugger modified memory or registers */ #define TDB_SCE 0x00000008 /* Thread performs syscall enter */ #define TDB_SCX 0x00000010 /* Thread performs syscall exit */ #define TDB_EXEC 0x00000020 /* TDB_SCX from exec(2) family */ #define TDB_FORK 0x00000040 /* TDB_SCX from fork(2) that created new process */ #define TDB_STOPATFORK 0x00000080 /* Stop at the return from fork (child only) */ #define TDB_CHILD 0x00000100 /* New child indicator for ptrace() */ #define TDB_BORN 0x00000200 /* New LWP indicator for ptrace() */ #define TDB_EXIT 0x00000400 /* Exiting LWP indicator for ptrace() */ #define TDB_VFORK 0x00000800 /* vfork indicator for ptrace() */ #define TDB_FSTP 0x00001000 /* The thread is PT_ATTACH leader */ /* * "Private" flags kept in td_pflags: * These are only written by curthread and thus need no locking. */ #define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */ #define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */ #define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */ #define TDP_BUFNEED 0x00000008 /* Do not recurse into the buf flush */ #define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */ #define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */ #define TDP_DEADLKTREAT 0x00000040 /* Lock acquisition - deadlock treatment. */ #define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */ #define TDP_UNUSED9 0x00000100 /* --available-- */ #define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */ #define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */ #define TDP_SYNCIO 0x00000800 /* Local override, disable async i/o. */ #define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */ #define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */ #define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */ #define TDP_SCHED4 0x00008000 /* Reserved for scheduler private use */ #define TDP_GEOM 0x00010000 /* Settle GEOM before finishing syscall */ #define TDP_SOFTDEP 0x00020000 /* Stuck processing softdep worklist */ #define TDP_NORUNNINGBUF 0x00040000 /* Ignore runningbufspace check */ #define TDP_WAKEUP 0x00080000 /* Don't sleep in umtx cond_wait */ #define TDP_INBDFLUSH 0x00100000 /* Already in BO_BDFLUSH, do not recurse */ #define TDP_KTHREAD 0x00200000 /* This is an official kernel thread */ #define TDP_CALLCHAIN 0x00400000 /* Capture thread's callchain */ #define TDP_IGNSUSP 0x00800000 /* Permission to ignore the MNTK_SUSPEND* */ #define TDP_AUDITREC 0x01000000 /* Audit record pending on thread */ #define TDP_RFPPWAIT 0x02000000 /* Handle RFPPWAIT on syscall exit */ #define TDP_RESETSPUR 0x04000000 /* Reset spurious page fault history. */ #define TDP_NERRNO 0x08000000 /* Last errno is already in td_errno */ #define TDP_UIOHELD 0x10000000 /* Current uio has pages held in td_ma */ #define TDP_FORKING 0x20000000 /* Thread is being created through fork() */ #define TDP_EXECVMSPC 0x40000000 /* Execve destroyed old vmspace */ /* * Reasons that the current thread can not be run yet. * More than one may apply. */ #define TDI_SUSPENDED 0x0001 /* On suspension queue. */ #define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */ #define TDI_SWAPPED 0x0004 /* Stack not in mem. Bad juju if run. */ #define TDI_LOCK 0x0008 /* Stopped on a lock. */ #define TDI_IWAIT 0x0010 /* Awaiting interrupt. */ #define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING) #define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL) #define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED) #define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED) #define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK) #define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT) #define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING) #define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ) #define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN) #define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED) #define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED) #define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD) #define KTDSTATE(td) \ (((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \ ((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \ ((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" : \ ((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \ ((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding") #define TD_SET_INHIB(td, inhib) do { \ (td)->td_state = TDS_INHIBITED; \ (td)->td_inhibitors |= (inhib); \ } while (0) #define TD_CLR_INHIB(td, inhib) do { \ if (((td)->td_inhibitors & (inhib)) && \ (((td)->td_inhibitors &= ~(inhib)) == 0)) \ (td)->td_state = TDS_CAN_RUN; \ } while (0) #define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING) #define TD_SET_SWAPPED(td) TD_SET_INHIB((td), TDI_SWAPPED) #define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK) #define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED) #define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT) #define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING) #define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING) #define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED) #define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK) #define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED) #define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT) #define TD_SET_RUNNING(td) (td)->td_state = TDS_RUNNING #define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ #define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN #define TD_SBDRY_INTR(td) \ (((td)->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 0) #define TD_SBDRY_ERRNO(td) \ (((td)->td_flags & TDF_SEINTR) != 0 ? EINTR : ERESTART) /* * Process structure. */ struct proc { LIST_ENTRY(proc) p_list; /* (d) List of all processes. */ TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */ struct mtx p_slock; /* process spin lock */ struct ucred *p_ucred; /* (c) Process owner's identity. */ struct filedesc *p_fd; /* (b) Open files. */ struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */ struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */ struct plimit *p_limit; /* (c) Resource limits. */ struct callout p_limco; /* (c) Limit callout handle */ struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */ int p_flag; /* (c) P_* flags. */ int p_flag2; /* (c) P2_* flags. */ enum { PRS_NEW = 0, /* In creation */ PRS_NORMAL, /* threads can be run. */ PRS_ZOMBIE } p_state; /* (j/c) Process status. */ pid_t p_pid; /* (b) Process identifier. */ LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */ LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */ struct proc *p_pptr; /* (c + e) Pointer to parent process. */ LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */ LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */ struct proc *p_reaper; /* (e) My reaper. */ LIST_HEAD(, proc) p_reaplist; /* (e) List of my descendants (if I am reaper). */ LIST_ENTRY(proc) p_reapsibling; /* (e) List of siblings - descendants of the same reaper. */ struct mtx p_mtx; /* (n) Lock for this struct. */ struct mtx p_statmtx; /* Lock for the stats */ struct mtx p_itimmtx; /* Lock for the virt/prof timers */ struct mtx p_profmtx; /* Lock for the profiling */ struct ksiginfo *p_ksi; /* Locked by parent proc lock */ sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */ #define p_siglist p_sigqueue.sq_signals /* The following fields are all zeroed upon creation in fork. */ #define p_startzero p_oppid pid_t p_oppid; /* (c + e) Save ppid in ptrace. XXX */ struct vmspace *p_vmspace; /* (b) Address space. */ u_int p_swtick; /* (c) Tick when swapped in or out. */ u_int p_cowgen; /* (c) Generation of COW pointers. */ struct itimerval p_realtimer; /* (c) Alarm timer. */ struct rusage p_ru; /* (a) Exit information. */ struct rusage_ext p_rux; /* (cu) Internal resource usage. */ struct rusage_ext p_crux; /* (c) Internal child resource usage. */ int p_profthreads; /* (c) Num threads in addupc_task. */ volatile int p_exitthreads; /* (j) Number of threads exiting */ int p_traceflag; /* (o) Kernel trace points. */ struct vnode *p_tracevp; /* (c + o) Trace to vnode. */ struct ucred *p_tracecred; /* (o) Credentials to trace with. */ struct vnode *p_textvp; /* (b) Vnode of executable. */ u_int p_lock; /* (c) Proclock (prevent swap) count. */ struct sigiolst p_sigiolst; /* (c) List of sigio sources. */ int p_sigparent; /* (c) Signal to parent on exit. */ int p_sig; /* (n) For core dump/debugger XXX. */ u_long p_code; /* (n) For core dump/debugger XXX. */ u_int p_stops; /* (c) Stop event bitmask. */ u_int p_stype; /* (c) Stop event type. */ char p_step; /* (c) Process is stopped. */ u_char p_pfsflags; /* (c) Procfs flags. */ u_int p_ptevents; /* (c) ptrace() event mask. */ struct nlminfo *p_nlminfo; /* (?) Only used by/for lockd. */ struct kaioinfo *p_aioinfo; /* (y) ASYNC I/O info. */ struct thread *p_singlethread;/* (c + j) If single threading this is it */ int p_suspcount; /* (j) Num threads in suspended mode. */ struct thread *p_xthread; /* (c) Trap thread */ int p_boundary_count;/* (j) Num threads at user boundary */ int p_pendingcnt; /* how many signals are pending */ struct itimers *p_itimers; /* (c) POSIX interval timers. */ struct procdesc *p_procdesc; /* (e) Process descriptor, if any. */ u_int p_treeflag; /* (e) P_TREE flags */ int p_pendingexits; /* (c) Count of pending thread exits. */ struct filemon *p_filemon; /* (c) filemon-specific data. */ /* End area that is zeroed on creation. */ #define p_endzero p_magic /* The following fields are all copied upon creation in fork. */ #define p_startcopy p_endzero u_int p_magic; /* (b) Magic number. */ int p_osrel; /* (x) osreldate for the binary (from ELF note, if any) */ char p_comm[MAXCOMLEN + 1]; /* (x) Process name. */ struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */ struct pargs *p_args; /* (c) Process arguments. */ rlim_t p_cpulimit; /* (c) Current CPU limit in seconds. */ signed char p_nice; /* (c) Process "nice" value. */ int p_fibnum; /* in this routing domain XXX MRT */ pid_t p_reapsubtree; /* (e) Pid of the direct child of the reaper which spawned our subtree. */ uint16_t p_elf_machine; /* (x) ELF machine type */ uint64_t p_elf_flags; /* (x) ELF flags */ /* End area that is copied on creation. */ #define p_endcopy p_xexit u_int p_xexit; /* (c) Exit code. */ u_int p_xsig; /* (c) Stop/kill sig. */ struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */ struct knlist *p_klist; /* (c) Knotes attached to this proc. */ int p_numthreads; /* (c) Number of threads. */ struct mdproc p_md; /* Any machine-dependent fields. */ struct callout p_itcallout; /* (h + c) Interval timer callout. */ u_short p_acflag; /* (c) Accounting flags. */ struct proc *p_peers; /* (r) */ struct proc *p_leader; /* (b) */ void *p_emuldata; /* (c) Emulator state data. */ struct label *p_label; /* (*) Proc (not subject) MAC label. */ STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */ LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/ struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */ struct cv p_pwait; /* (*) wait cv for exit/exec. */ struct cv p_dbgwait; /* (*) wait cv for debugger attach after fork. */ uint64_t p_prev_runtime; /* (c) Resource usage accounting. */ struct racct *p_racct; /* (b) Resource accounting. */ int p_throttled; /* (c) Flag for racct pcpu throttling */ - struct vm_domain_policy p_vm_dom_policy; /* (c) process default VM domain, or -1 */ /* * An orphan is the child that has beed re-parented to the * debugger as a result of attaching to it. Need to keep * track of them for parent to be able to collect the exit * status of what used to be children. */ LIST_ENTRY(proc) p_orphan; /* (e) List of orphan processes. */ LIST_HEAD(, proc) p_orphans; /* (e) Pointer to list of orphans. */ }; #define p_session p_pgrp->pg_session #define p_pgid p_pgrp->pg_id #define NOCPU (-1) /* For when we aren't on a CPU. */ #define NOCPU_OLD (255) #define MAXCPU_OLD (254) #define PROC_SLOCK(p) mtx_lock_spin(&(p)->p_slock) #define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock) #define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type)) #define PROC_STATLOCK(p) mtx_lock_spin(&(p)->p_statmtx) #define PROC_STATUNLOCK(p) mtx_unlock_spin(&(p)->p_statmtx) #define PROC_STATLOCK_ASSERT(p, type) mtx_assert(&(p)->p_statmtx, (type)) #define PROC_ITIMLOCK(p) mtx_lock_spin(&(p)->p_itimmtx) #define PROC_ITIMUNLOCK(p) mtx_unlock_spin(&(p)->p_itimmtx) #define PROC_ITIMLOCK_ASSERT(p, type) mtx_assert(&(p)->p_itimmtx, (type)) #define PROC_PROFLOCK(p) mtx_lock_spin(&(p)->p_profmtx) #define PROC_PROFUNLOCK(p) mtx_unlock_spin(&(p)->p_profmtx) #define PROC_PROFLOCK_ASSERT(p, type) mtx_assert(&(p)->p_profmtx, (type)) /* These flags are kept in p_flag. */ #define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock. */ #define P_CONTROLT 0x00002 /* Has a controlling terminal. */ #define P_KPROC 0x00004 /* Kernel process. */ #define P_UNUSED3 0x00008 /* --available-- */ #define P_PPWAIT 0x00010 /* Parent is waiting for child to exec/exit. */ #define P_PROFIL 0x00020 /* Has started profiling. */ #define P_STOPPROF 0x00040 /* Has thread requesting to stop profiling. */ #define P_HADTHREADS 0x00080 /* Has had threads (no cleanup shortcuts) */ #define P_SUGID 0x00100 /* Had set id privileges since last exec. */ #define P_SYSTEM 0x00200 /* System proc: no sigs, stats or swapping. */ #define P_SINGLE_EXIT 0x00400 /* Threads suspending should exit, not wait. */ #define P_TRACED 0x00800 /* Debugged process being traced. */ #define P_WAITED 0x01000 /* Someone is waiting for us. */ #define P_WEXIT 0x02000 /* Working on exiting. */ #define P_EXEC 0x04000 /* Process called exec. */ #define P_WKILLED 0x08000 /* Killed, go to kernel/user boundary ASAP. */ #define P_CONTINUED 0x10000 /* Proc has continued from a stopped state. */ #define P_STOPPED_SIG 0x20000 /* Stopped due to SIGSTOP/SIGTSTP. */ #define P_STOPPED_TRACE 0x40000 /* Stopped because of tracing. */ #define P_STOPPED_SINGLE 0x80000 /* Only 1 thread can continue (not to user). */ #define P_PROTECTED 0x100000 /* Do not kill on memory overcommit. */ #define P_SIGEVENT 0x200000 /* Process pending signals changed. */ #define P_SINGLE_BOUNDARY 0x400000 /* Threads should suspend at user boundary. */ #define P_HWPMC 0x800000 /* Process is using HWPMCs */ #define P_JAILED 0x1000000 /* Process is in jail. */ #define P_TOTAL_STOP 0x2000000 /* Stopped in stop_all_proc. */ #define P_INEXEC 0x4000000 /* Process is in execve(). */ #define P_STATCHILD 0x8000000 /* Child process stopped or exited. */ #define P_INMEM 0x10000000 /* Loaded into memory. */ #define P_SWAPPINGOUT 0x20000000 /* Process is being swapped out. */ #define P_SWAPPINGIN 0x40000000 /* Process is being swapped in. */ #define P_PPTRACE 0x80000000 /* PT_TRACEME by vforked child. */ #define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE) #define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED) #define P_KILLED(p) ((p)->p_flag & P_WKILLED) /* These flags are kept in p_flag2. */ #define P2_INHERIT_PROTECTED 0x00000001 /* New children get P_PROTECTED. */ #define P2_NOTRACE 0x00000002 /* No ptrace(2) attach or coredumps. */ #define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on exec(2). */ #define P2_AST_SU 0x00000008 /* Handles SU ast for kthreads. */ #define P2_PTRACE_FSTP 0x00000010 /* SIGSTOP from PT_ATTACH not yet handled. */ #define P2_TRAPCAP 0x00000020 /* SIGTRAP on ENOTCAPABLE */ /* Flags protected by proctree_lock, kept in p_treeflags. */ #define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */ #define P_TREE_FIRST_ORPHAN 0x00000002 /* First element of orphan list */ #define P_TREE_REAPER 0x00000004 /* Reaper of subtree */ /* * These were process status values (p_stat), now they are only used in * legacy conversion code. */ #define SIDL 1 /* Process being created by fork. */ #define SRUN 2 /* Currently runnable. */ #define SSLEEP 3 /* Sleeping on an address. */ #define SSTOP 4 /* Process debugging or suspension. */ #define SZOMB 5 /* Awaiting collection by parent. */ #define SWAIT 6 /* Waiting for interrupt. */ #define SLOCK 7 /* Blocked on a lock. */ #define P_MAGIC 0xbeefface #ifdef _KERNEL /* Types and flags for mi_switch(). */ #define SW_TYPE_MASK 0xff /* First 8 bits are switch type */ #define SWT_NONE 0 /* Unspecified switch. */ #define SWT_PREEMPT 1 /* Switching due to preemption. */ #define SWT_OWEPREEMPT 2 /* Switching due to owepreempt. */ #define SWT_TURNSTILE 3 /* Turnstile contention. */ #define SWT_SLEEPQ 4 /* Sleepq wait. */ #define SWT_SLEEPQTIMO 5 /* Sleepq timeout wait. */ #define SWT_RELINQUISH 6 /* yield call. */ #define SWT_NEEDRESCHED 7 /* NEEDRESCHED was set. */ #define SWT_IDLE 8 /* Switching from the idle thread. */ #define SWT_IWAIT 9 /* Waiting for interrupts. */ #define SWT_SUSPEND 10 /* Thread suspended. */ #define SWT_REMOTEPREEMPT 11 /* Remote processor preempted. */ #define SWT_REMOTEWAKEIDLE 12 /* Remote processor preempted idle. */ #define SWT_COUNT 13 /* Number of switch types. */ /* Flags */ #define SW_VOL 0x0100 /* Voluntary switch. */ #define SW_INVOL 0x0200 /* Involuntary switch. */ #define SW_PREEMPT 0x0400 /* The invol switch is a preemption */ /* How values for thread_single(). */ #define SINGLE_NO_EXIT 0 #define SINGLE_EXIT 1 #define SINGLE_BOUNDARY 2 #define SINGLE_ALLPROC 3 #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_PARGS); MALLOC_DECLARE(M_PGRP); MALLOC_DECLARE(M_SESSION); MALLOC_DECLARE(M_SUBPROC); #endif #define FOREACH_PROC_IN_SYSTEM(p) \ LIST_FOREACH((p), &allproc, p_list) #define FOREACH_THREAD_IN_PROC(p, td) \ TAILQ_FOREACH((td), &(p)->p_threads, td_plist) #define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads) /* * We use process IDs <= pid_max <= PID_MAX; PID_MAX + 1 must also fit * in a pid_t, as it is used to represent "no process group". */ #define PID_MAX 99999 #define NO_PID 100000 extern pid_t pid_max; #define SESS_LEADER(p) ((p)->p_session->s_leader == (p)) #define STOPEVENT(p, e, v) do { \ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, \ "checking stopevent %d", (e)); \ if ((p)->p_stops & (e)) { \ PROC_LOCK(p); \ stopevent((p), (e), (v)); \ PROC_UNLOCK(p); \ } \ } while (0) #define _STOPEVENT(p, e, v) do { \ PROC_LOCK_ASSERT(p, MA_OWNED); \ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &p->p_mtx.lock_object, \ "checking stopevent %d", (e)); \ if ((p)->p_stops & (e)) \ stopevent((p), (e), (v)); \ } while (0) /* Lock and unlock a process. */ #define PROC_LOCK(p) mtx_lock(&(p)->p_mtx) #define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx) #define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx) #define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx) #define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type)) /* Lock and unlock a process group. */ #define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx) #define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx) #define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx) #define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type)) #define PGRP_LOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_LOCK(pg); \ } while (0) #define PGRP_UNLOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_UNLOCK(pg); \ } while (0) /* Lock and unlock a session. */ #define SESS_LOCK(s) mtx_lock(&(s)->s_mtx) #define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx) #define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx) #define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type)) /* * Non-zero p_lock ensures that: * - exit1() is not performed until p_lock reaches zero; * - the process' threads stack are not swapped out if they are currently * not (P_INMEM). * * PHOLD() asserts that the process (except the current process) is * not exiting, increments p_lock and swaps threads stacks into memory, * if needed. * _PHOLD() is same as PHOLD(), it takes the process locked. * _PHOLD_LITE() also takes the process locked, but comparing with * _PHOLD(), it only guarantees that exit1() is not executed, * faultin() is not called. */ #define PHOLD(p) do { \ PROC_LOCK(p); \ _PHOLD(p); \ PROC_UNLOCK(p); \ } while (0) #define _PHOLD(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ ("PHOLD of exiting process %p", p)); \ (p)->p_lock++; \ if (((p)->p_flag & P_INMEM) == 0) \ faultin((p)); \ } while (0) #define _PHOLD_LITE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ ("PHOLD of exiting process %p", p)); \ (p)->p_lock++; \ } while (0) #define PROC_ASSERT_HELD(p) do { \ KASSERT((p)->p_lock > 0, ("process %p not held", p)); \ } while (0) #define PRELE(p) do { \ PROC_LOCK((p)); \ _PRELE((p)); \ PROC_UNLOCK((p)); \ } while (0) #define _PRELE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ PROC_ASSERT_HELD(p); \ (--(p)->p_lock); \ if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \ wakeup(&(p)->p_lock); \ } while (0) #define PROC_ASSERT_NOT_HELD(p) do { \ KASSERT((p)->p_lock == 0, ("process %p held", p)); \ } while (0) #define PROC_UPDATE_COW(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ (p)->p_cowgen++; \ } while (0) /* Check whether a thread is safe to be swapped out. */ #define thread_safetoswapout(td) ((td)->td_flags & TDF_CANSWAP) /* Control whether or not it is safe for curthread to sleep. */ #define THREAD_NO_SLEEPING() ((curthread)->td_no_sleeping++) #define THREAD_SLEEPING_OK() ((curthread)->td_no_sleeping--) #define THREAD_CAN_SLEEP() ((curthread)->td_no_sleeping == 0) #define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; extern u_long pidhash; #define TIDHASH(tid) (&tidhashtbl[(tid) & tidhash]) extern LIST_HEAD(tidhashhead, thread) *tidhashtbl; extern u_long tidhash; extern struct rwlock tidhash_lock; #define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; extern u_long pgrphash; extern struct sx allproc_lock; extern int allproc_gen; extern struct sx proctree_lock; extern struct mtx ppeers_lock; extern struct proc proc0; /* Process slot for swapper. */ extern struct thread0_storage thread0_st; /* Primary thread in proc0. */ #define thread0 (thread0_st.t0st_thread) extern struct vmspace vmspace0; /* VM space for proc0. */ extern int hogticks; /* Limit on kernel cpu hogs. */ extern int lastpid; extern int nprocs, maxproc; /* Current and max number of procs. */ extern int maxprocperuid; /* Max procs per uid. */ extern u_long ps_arg_cache_limit; LIST_HEAD(proclist, proc); TAILQ_HEAD(procqueue, proc); TAILQ_HEAD(threadqueue, thread); extern struct proclist allproc; /* List of all processes. */ extern struct proclist zombproc; /* List of zombie processes. */ extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */ extern struct uma_zone *proc_zone; struct proc *pfind(pid_t); /* Find process by id. */ struct proc *pfind_any(pid_t); /* Find (zombie) process by id. */ struct proc *pfind_locked(pid_t pid); struct pgrp *pgfind(pid_t); /* Find process group by id. */ struct proc *zpfind(pid_t); /* Find zombie process by id. */ struct fork_req { int fr_flags; int fr_pages; int *fr_pidp; struct proc **fr_procp; int *fr_pd_fd; int fr_pd_flags; struct filecaps *fr_pd_fcaps; }; /* * pget() flags. */ #define PGET_HOLD 0x00001 /* Hold the process. */ #define PGET_CANSEE 0x00002 /* Check against p_cansee(). */ #define PGET_CANDEBUG 0x00004 /* Check against p_candebug(). */ #define PGET_ISCURRENT 0x00008 /* Check that the found process is current. */ #define PGET_NOTWEXIT 0x00010 /* Check that the process is not in P_WEXIT. */ #define PGET_NOTINEXEC 0x00020 /* Check that the process is not in P_INEXEC. */ #define PGET_NOTID 0x00040 /* Do not assume tid if pid > PID_MAX. */ #define PGET_WANTREAD (PGET_HOLD | PGET_CANDEBUG | PGET_NOTWEXIT) int pget(pid_t pid, int flags, struct proc **pp); void ast(struct trapframe *framep); struct thread *choosethread(void); int cr_cansee(struct ucred *u1, struct ucred *u2); int cr_canseesocket(struct ucred *cred, struct socket *so); int cr_canseeothergids(struct ucred *u1, struct ucred *u2); int cr_canseeotheruids(struct ucred *u1, struct ucred *u2); int cr_canseejailproc(struct ucred *u1, struct ucred *u2); int cr_cansignal(struct ucred *cred, struct proc *proc, int signum); int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess); int enterthispgrp(struct proc *p, struct pgrp *pgrp); void faultin(struct proc *p); void fixjobc(struct proc *p, struct pgrp *pgrp, int entering); int fork1(struct thread *, struct fork_req *); void fork_exit(void (*)(void *, struct trapframe *), void *, struct trapframe *); void fork_return(struct thread *, struct trapframe *); int inferior(struct proc *p); void kern_yield(int); void kick_proc0(void); void killjobc(void); int leavepgrp(struct proc *p); int maybe_preempt(struct thread *td); void maybe_yield(void); void mi_switch(int flags, struct thread *newtd); int p_candebug(struct thread *td, struct proc *p); int p_cansee(struct thread *td, struct proc *p); int p_cansched(struct thread *td, struct proc *p); int p_cansignal(struct thread *td, struct proc *p, int signum); int p_canwait(struct thread *td, struct proc *p); struct pargs *pargs_alloc(int len); void pargs_drop(struct pargs *pa); void pargs_hold(struct pargs *pa); int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb); void procinit(void); void proc_linkup0(struct proc *p, struct thread *td); void proc_linkup(struct proc *p, struct thread *td); struct proc *proc_realparent(struct proc *child); void proc_reap(struct thread *td, struct proc *p, int *status, int options); void proc_reparent(struct proc *child, struct proc *newparent); void proc_set_traced(struct proc *p, bool stop); struct pstats *pstats_alloc(void); void pstats_fork(struct pstats *src, struct pstats *dst); void pstats_free(struct pstats *ps); void reaper_abandon_children(struct proc *p, bool exiting); int securelevel_ge(struct ucred *cr, int level); int securelevel_gt(struct ucred *cr, int level); void sess_hold(struct session *); void sess_release(struct session *); int setrunnable(struct thread *); void setsugid(struct proc *p); int should_yield(void); int sigonstack(size_t sp); void stopevent(struct proc *, u_int, u_int); struct thread *tdfind(lwpid_t, pid_t); void threadinit(void); void tidhash_add(struct thread *); void tidhash_remove(struct thread *); void cpu_idle(int); int cpu_idle_wakeup(int); extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */ void cpu_switch(struct thread *, struct thread *, struct mtx *); void cpu_throw(struct thread *, struct thread *) __dead2; void unsleep(struct thread *); void userret(struct thread *, struct trapframe *); void cpu_exit(struct thread *); void exit1(struct thread *, int, int) __dead2; void cpu_copy_thread(struct thread *td, struct thread *td0); int cpu_fetch_syscall_args(struct thread *td); void cpu_fork(struct thread *, struct proc *, struct thread *, int); void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *); void cpu_set_syscall_retval(struct thread *, int); void cpu_set_upcall(struct thread *, void (*)(void *), void *, stack_t *); int cpu_set_user_tls(struct thread *, void *tls_base); void cpu_thread_alloc(struct thread *); void cpu_thread_clean(struct thread *); void cpu_thread_exit(struct thread *); void cpu_thread_free(struct thread *); void cpu_thread_swapin(struct thread *); void cpu_thread_swapout(struct thread *); struct thread *thread_alloc(int pages); int thread_alloc_stack(struct thread *, int pages); void thread_cow_get_proc(struct thread *newtd, struct proc *p); void thread_cow_get(struct thread *newtd, struct thread *td); void thread_cow_free(struct thread *td); void thread_cow_update(struct thread *td); int thread_create(struct thread *td, struct rtprio *rtp, int (*initialize_thread)(struct thread *, void *), void *thunk); void thread_exit(void) __dead2; void thread_free(struct thread *td); void thread_link(struct thread *td, struct proc *p); void thread_reap(void); int thread_single(struct proc *p, int how); void thread_single_end(struct proc *p, int how); void thread_stash(struct thread *td); void thread_stopped(struct proc *p); void childproc_stopped(struct proc *child, int reason); void childproc_continued(struct proc *child); void childproc_exited(struct proc *child); int thread_suspend_check(int how); bool thread_suspend_check_needed(void); void thread_suspend_switch(struct thread *, struct proc *p); void thread_suspend_one(struct thread *td); void thread_unlink(struct thread *td); void thread_unsuspend(struct proc *p); void thread_wait(struct proc *p); struct thread *thread_find(struct proc *p, lwpid_t tid); void stop_all_proc(void); void resume_all_proc(void); static __inline int curthread_pflags_set(int flags) { struct thread *td; int save; td = curthread; save = ~flags | (td->td_pflags & flags); td->td_pflags |= flags; return (save); } static __inline void curthread_pflags_restore(int save) { curthread->td_pflags &= save; } static __inline __pure2 struct td_sched * td_get_sched(struct thread *td) { return ((struct td_sched *)&td[1]); } extern void (*softdep_ast_cleanup)(struct thread *); static __inline void td_softdep_cleanup(struct thread *td) { if (td->td_su != NULL && softdep_ast_cleanup != NULL) softdep_ast_cleanup(td); } #endif /* _KERNEL */ #endif /* !_SYS_PROC_H_ */ Index: user/jeff/numa/sys/sys/syscall.h =================================================================== --- user/jeff/numa/sys/sys/syscall.h (revision 326888) +++ user/jeff/numa/sys/sys/syscall.h (revision 326889) @@ -1,481 +1,483 @@ /* * System call numbers. * * DO NOT EDIT-- this file is automatically generated. * $FreeBSD$ */ #define SYS_syscall 0 #define SYS_exit 1 #define SYS_fork 2 #define SYS_read 3 #define SYS_write 4 #define SYS_open 5 #define SYS_close 6 #define SYS_wait4 7 /* 8 is old creat */ #define SYS_link 9 #define SYS_unlink 10 /* 11 is obsolete execv */ #define SYS_chdir 12 #define SYS_fchdir 13 #define SYS_freebsd11_mknod 14 #define SYS_chmod 15 #define SYS_chown 16 #define SYS_break 17 /* 18 is freebsd4 getfsstat */ /* 19 is old lseek */ #define SYS_getpid 20 #define SYS_mount 21 #define SYS_unmount 22 #define SYS_setuid 23 #define SYS_getuid 24 #define SYS_geteuid 25 #define SYS_ptrace 26 #define SYS_recvmsg 27 #define SYS_sendmsg 28 #define SYS_recvfrom 29 #define SYS_accept 30 #define SYS_getpeername 31 #define SYS_getsockname 32 #define SYS_access 33 #define SYS_chflags 34 #define SYS_fchflags 35 #define SYS_sync 36 #define SYS_kill 37 /* 38 is old stat */ #define SYS_getppid 39 /* 40 is old lstat */ #define SYS_dup 41 #define SYS_freebsd10_pipe 42 #define SYS_getegid 43 #define SYS_profil 44 #define SYS_ktrace 45 /* 46 is old sigaction */ #define SYS_getgid 47 /* 48 is old sigprocmask */ #define SYS_getlogin 49 #define SYS_setlogin 50 #define SYS_acct 51 /* 52 is old sigpending */ #define SYS_sigaltstack 53 #define SYS_ioctl 54 #define SYS_reboot 55 #define SYS_revoke 56 #define SYS_symlink 57 #define SYS_readlink 58 #define SYS_execve 59 #define SYS_umask 60 #define SYS_chroot 61 /* 62 is old fstat */ /* 63 is old getkerninfo */ /* 64 is old getpagesize */ #define SYS_msync 65 #define SYS_vfork 66 /* 67 is obsolete vread */ /* 68 is obsolete vwrite */ #define SYS_sbrk 69 #define SYS_sstk 70 /* 71 is old mmap */ #define SYS_vadvise 72 #define SYS_munmap 73 #define SYS_mprotect 74 #define SYS_madvise 75 /* 76 is obsolete vhangup */ /* 77 is obsolete vlimit */ #define SYS_mincore 78 #define SYS_getgroups 79 #define SYS_setgroups 80 #define SYS_getpgrp 81 #define SYS_setpgid 82 #define SYS_setitimer 83 /* 84 is old wait */ #define SYS_swapon 85 #define SYS_getitimer 86 /* 87 is old gethostname */ /* 88 is old sethostname */ #define SYS_getdtablesize 89 #define SYS_dup2 90 #define SYS_fcntl 92 #define SYS_select 93 #define SYS_fsync 95 #define SYS_setpriority 96 #define SYS_socket 97 #define SYS_connect 98 /* 99 is old accept */ #define SYS_getpriority 100 /* 101 is old send */ /* 102 is old recv */ /* 103 is old sigreturn */ #define SYS_bind 104 #define SYS_setsockopt 105 #define SYS_listen 106 /* 107 is obsolete vtimes */ /* 108 is old sigvec */ /* 109 is old sigblock */ /* 110 is old sigsetmask */ /* 111 is old sigsuspend */ /* 112 is old sigstack */ /* 113 is old recvmsg */ /* 114 is old sendmsg */ /* 115 is obsolete vtrace */ #define SYS_gettimeofday 116 #define SYS_getrusage 117 #define SYS_getsockopt 118 #define SYS_readv 120 #define SYS_writev 121 #define SYS_settimeofday 122 #define SYS_fchown 123 #define SYS_fchmod 124 /* 125 is old recvfrom */ #define SYS_setreuid 126 #define SYS_setregid 127 #define SYS_rename 128 /* 129 is old truncate */ /* 130 is old ftruncate */ #define SYS_flock 131 #define SYS_mkfifo 132 #define SYS_sendto 133 #define SYS_shutdown 134 #define SYS_socketpair 135 #define SYS_mkdir 136 #define SYS_rmdir 137 #define SYS_utimes 138 /* 139 is obsolete 4.2 sigreturn */ #define SYS_adjtime 140 /* 141 is old getpeername */ /* 142 is old gethostid */ /* 143 is old sethostid */ /* 144 is old getrlimit */ /* 145 is old setrlimit */ /* 146 is old killpg */ #define SYS_setsid 147 #define SYS_quotactl 148 /* 149 is old quota */ /* 150 is old getsockname */ #define SYS_nlm_syscall 154 #define SYS_nfssvc 155 /* 156 is old getdirentries */ /* 157 is freebsd4 statfs */ /* 158 is freebsd4 fstatfs */ #define SYS_lgetfh 160 #define SYS_getfh 161 /* 162 is freebsd4 getdomainname */ /* 163 is freebsd4 setdomainname */ /* 164 is freebsd4 uname */ #define SYS_sysarch 165 #define SYS_rtprio 166 #define SYS_semsys 169 #define SYS_msgsys 170 #define SYS_shmsys 171 /* 173 is freebsd6 pread */ /* 174 is freebsd6 pwrite */ #define SYS_setfib 175 #define SYS_ntp_adjtime 176 #define SYS_setgid 181 #define SYS_setegid 182 #define SYS_seteuid 183 #define SYS_freebsd11_stat 188 #define SYS_freebsd11_fstat 189 #define SYS_freebsd11_lstat 190 #define SYS_pathconf 191 #define SYS_fpathconf 192 #define SYS_getrlimit 194 #define SYS_setrlimit 195 #define SYS_freebsd11_getdirentries 196 /* 197 is freebsd6 mmap */ #define SYS___syscall 198 /* 199 is freebsd6 lseek */ /* 200 is freebsd6 truncate */ /* 201 is freebsd6 ftruncate */ #define SYS___sysctl 202 #define SYS_mlock 203 #define SYS_munlock 204 #define SYS_undelete 205 #define SYS_futimes 206 #define SYS_getpgid 207 #define SYS_poll 209 #define SYS_freebsd7___semctl 220 #define SYS_semget 221 #define SYS_semop 222 #define SYS_freebsd7_msgctl 224 #define SYS_msgget 225 #define SYS_msgsnd 226 #define SYS_msgrcv 227 #define SYS_shmat 228 #define SYS_freebsd7_shmctl 229 #define SYS_shmdt 230 #define SYS_shmget 231 #define SYS_clock_gettime 232 #define SYS_clock_settime 233 #define SYS_clock_getres 234 #define SYS_ktimer_create 235 #define SYS_ktimer_delete 236 #define SYS_ktimer_settime 237 #define SYS_ktimer_gettime 238 #define SYS_ktimer_getoverrun 239 #define SYS_nanosleep 240 #define SYS_ffclock_getcounter 241 #define SYS_ffclock_setestimate 242 #define SYS_ffclock_getestimate 243 #define SYS_clock_nanosleep 244 #define SYS_clock_getcpuclockid2 247 #define SYS_ntp_gettime 248 #define SYS_minherit 250 #define SYS_rfork 251 /* 252 is obsolete openbsd_poll */ #define SYS_issetugid 253 #define SYS_lchown 254 #define SYS_aio_read 255 #define SYS_aio_write 256 #define SYS_lio_listio 257 #define SYS_freebsd11_getdents 272 #define SYS_lchmod 274 #define SYS_netbsd_lchown 275 #define SYS_lutimes 276 #define SYS_netbsd_msync 277 #define SYS_freebsd11_nstat 278 #define SYS_freebsd11_nfstat 279 #define SYS_freebsd11_nlstat 280 #define SYS_preadv 289 #define SYS_pwritev 290 /* 297 is freebsd4 fhstatfs */ #define SYS_fhopen 298 #define SYS_freebsd11_fhstat 299 #define SYS_modnext 300 #define SYS_modstat 301 #define SYS_modfnext 302 #define SYS_modfind 303 #define SYS_kldload 304 #define SYS_kldunload 305 #define SYS_kldfind 306 #define SYS_kldnext 307 #define SYS_kldstat 308 #define SYS_kldfirstmod 309 #define SYS_getsid 310 #define SYS_setresuid 311 #define SYS_setresgid 312 /* 313 is obsolete signanosleep */ #define SYS_aio_return 314 #define SYS_aio_suspend 315 #define SYS_aio_cancel 316 #define SYS_aio_error 317 /* 318 is freebsd6 aio_read */ /* 319 is freebsd6 aio_write */ /* 320 is freebsd6 lio_listio */ #define SYS_yield 321 /* 322 is obsolete thr_sleep */ /* 323 is obsolete thr_wakeup */ #define SYS_mlockall 324 #define SYS_munlockall 325 #define SYS___getcwd 326 #define SYS_sched_setparam 327 #define SYS_sched_getparam 328 #define SYS_sched_setscheduler 329 #define SYS_sched_getscheduler 330 #define SYS_sched_yield 331 #define SYS_sched_get_priority_max 332 #define SYS_sched_get_priority_min 333 #define SYS_sched_rr_get_interval 334 #define SYS_utrace 335 /* 336 is freebsd4 sendfile */ #define SYS_kldsym 337 #define SYS_jail 338 #define SYS_nnpfs_syscall 339 #define SYS_sigprocmask 340 #define SYS_sigsuspend 341 /* 342 is freebsd4 sigaction */ #define SYS_sigpending 343 /* 344 is freebsd4 sigreturn */ #define SYS_sigtimedwait 345 #define SYS_sigwaitinfo 346 #define SYS___acl_get_file 347 #define SYS___acl_set_file 348 #define SYS___acl_get_fd 349 #define SYS___acl_set_fd 350 #define SYS___acl_delete_file 351 #define SYS___acl_delete_fd 352 #define SYS___acl_aclcheck_file 353 #define SYS___acl_aclcheck_fd 354 #define SYS_extattrctl 355 #define SYS_extattr_set_file 356 #define SYS_extattr_get_file 357 #define SYS_extattr_delete_file 358 #define SYS_aio_waitcomplete 359 #define SYS_getresuid 360 #define SYS_getresgid 361 #define SYS_kqueue 362 #define SYS_freebsd11_kevent 363 #define SYS_extattr_set_fd 371 #define SYS_extattr_get_fd 372 #define SYS_extattr_delete_fd 373 #define SYS___setugid 374 #define SYS_eaccess 376 #define SYS_afs3_syscall 377 #define SYS_nmount 378 #define SYS___mac_get_proc 384 #define SYS___mac_set_proc 385 #define SYS___mac_get_fd 386 #define SYS___mac_get_file 387 #define SYS___mac_set_fd 388 #define SYS___mac_set_file 389 #define SYS_kenv 390 #define SYS_lchflags 391 #define SYS_uuidgen 392 #define SYS_sendfile 393 #define SYS_mac_syscall 394 #define SYS_freebsd11_getfsstat 395 #define SYS_freebsd11_statfs 396 #define SYS_freebsd11_fstatfs 397 #define SYS_freebsd11_fhstatfs 398 #define SYS_ksem_close 400 #define SYS_ksem_post 401 #define SYS_ksem_wait 402 #define SYS_ksem_trywait 403 #define SYS_ksem_init 404 #define SYS_ksem_open 405 #define SYS_ksem_unlink 406 #define SYS_ksem_getvalue 407 #define SYS_ksem_destroy 408 #define SYS___mac_get_pid 409 #define SYS___mac_get_link 410 #define SYS___mac_set_link 411 #define SYS_extattr_set_link 412 #define SYS_extattr_get_link 413 #define SYS_extattr_delete_link 414 #define SYS___mac_execve 415 #define SYS_sigaction 416 #define SYS_sigreturn 417 #define SYS_getcontext 421 #define SYS_setcontext 422 #define SYS_swapcontext 423 #define SYS_swapoff 424 #define SYS___acl_get_link 425 #define SYS___acl_set_link 426 #define SYS___acl_delete_link 427 #define SYS___acl_aclcheck_link 428 #define SYS_sigwait 429 #define SYS_thr_create 430 #define SYS_thr_exit 431 #define SYS_thr_self 432 #define SYS_thr_kill 433 #define SYS_jail_attach 436 #define SYS_extattr_list_fd 437 #define SYS_extattr_list_file 438 #define SYS_extattr_list_link 439 #define SYS_ksem_timedwait 441 #define SYS_thr_suspend 442 #define SYS_thr_wake 443 #define SYS_kldunloadf 444 #define SYS_audit 445 #define SYS_auditon 446 #define SYS_getauid 447 #define SYS_setauid 448 #define SYS_getaudit 449 #define SYS_setaudit 450 #define SYS_getaudit_addr 451 #define SYS_setaudit_addr 452 #define SYS_auditctl 453 #define SYS__umtx_op 454 #define SYS_thr_new 455 #define SYS_sigqueue 456 #define SYS_kmq_open 457 #define SYS_kmq_setattr 458 #define SYS_kmq_timedreceive 459 #define SYS_kmq_timedsend 460 #define SYS_kmq_notify 461 #define SYS_kmq_unlink 462 #define SYS_abort2 463 #define SYS_thr_set_name 464 #define SYS_aio_fsync 465 #define SYS_rtprio_thread 466 #define SYS_sctp_peeloff 471 #define SYS_sctp_generic_sendmsg 472 #define SYS_sctp_generic_sendmsg_iov 473 #define SYS_sctp_generic_recvmsg 474 #define SYS_pread 475 #define SYS_pwrite 476 #define SYS_mmap 477 #define SYS_lseek 478 #define SYS_truncate 479 #define SYS_ftruncate 480 #define SYS_thr_kill2 481 #define SYS_shm_open 482 #define SYS_shm_unlink 483 #define SYS_cpuset 484 #define SYS_cpuset_setid 485 #define SYS_cpuset_getid 486 #define SYS_cpuset_getaffinity 487 #define SYS_cpuset_setaffinity 488 #define SYS_faccessat 489 #define SYS_fchmodat 490 #define SYS_fchownat 491 #define SYS_fexecve 492 #define SYS_freebsd11_fstatat 493 #define SYS_futimesat 494 #define SYS_linkat 495 #define SYS_mkdirat 496 #define SYS_mkfifoat 497 #define SYS_freebsd11_mknodat 498 #define SYS_openat 499 #define SYS_readlinkat 500 #define SYS_renameat 501 #define SYS_symlinkat 502 #define SYS_unlinkat 503 #define SYS_posix_openpt 504 #define SYS_gssd_syscall 505 #define SYS_jail_get 506 #define SYS_jail_set 507 #define SYS_jail_remove 508 #define SYS_closefrom 509 #define SYS___semctl 510 #define SYS_msgctl 511 #define SYS_shmctl 512 #define SYS_lpathconf 513 /* 514 is obsolete cap_new */ #define SYS___cap_rights_get 515 #define SYS_cap_enter 516 #define SYS_cap_getmode 517 #define SYS_pdfork 518 #define SYS_pdkill 519 #define SYS_pdgetpid 520 #define SYS_pselect 522 #define SYS_getloginclass 523 #define SYS_setloginclass 524 #define SYS_rctl_get_racct 525 #define SYS_rctl_get_rules 526 #define SYS_rctl_get_limits 527 #define SYS_rctl_add_rule 528 #define SYS_rctl_remove_rule 529 #define SYS_posix_fallocate 530 #define SYS_posix_fadvise 531 #define SYS_wait6 532 #define SYS_cap_rights_limit 533 #define SYS_cap_ioctls_limit 534 #define SYS_cap_ioctls_get 535 #define SYS_cap_fcntls_limit 536 #define SYS_cap_fcntls_get 537 #define SYS_bindat 538 #define SYS_connectat 539 #define SYS_chflagsat 540 #define SYS_accept4 541 #define SYS_pipe2 542 #define SYS_aio_mlock 543 #define SYS_procctl 544 #define SYS_ppoll 545 #define SYS_futimens 546 #define SYS_utimensat 547 #define SYS_numa_getaffinity 548 #define SYS_numa_setaffinity 549 #define SYS_fdatasync 550 #define SYS_fstat 551 #define SYS_fstatat 552 #define SYS_fhstat 553 #define SYS_getdirentries 554 #define SYS_statfs 555 #define SYS_fstatfs 556 #define SYS_getfsstat 557 #define SYS_fhstatfs 558 #define SYS_mknodat 559 #define SYS_kevent 560 -#define SYS_MAXSYSCALL 561 +#define SYS_cpuset_getdomain 561 +#define SYS_cpuset_setdomain 562 +#define SYS_MAXSYSCALL 563 Index: user/jeff/numa/sys/sys/syscall.mk =================================================================== --- user/jeff/numa/sys/sys/syscall.mk (revision 326888) +++ user/jeff/numa/sys/sys/syscall.mk (revision 326889) @@ -1,408 +1,410 @@ # FreeBSD system call object files. # DO NOT EDIT-- this file is automatically generated. # $FreeBSD$ MIASM = \ syscall.o \ exit.o \ fork.o \ read.o \ write.o \ open.o \ close.o \ wait4.o \ link.o \ unlink.o \ chdir.o \ fchdir.o \ freebsd11_mknod.o \ chmod.o \ chown.o \ break.o \ getpid.o \ mount.o \ unmount.o \ setuid.o \ getuid.o \ geteuid.o \ ptrace.o \ recvmsg.o \ sendmsg.o \ recvfrom.o \ accept.o \ getpeername.o \ getsockname.o \ access.o \ chflags.o \ fchflags.o \ sync.o \ kill.o \ getppid.o \ dup.o \ freebsd10_pipe.o \ getegid.o \ profil.o \ ktrace.o \ getgid.o \ getlogin.o \ setlogin.o \ acct.o \ sigaltstack.o \ ioctl.o \ reboot.o \ revoke.o \ symlink.o \ readlink.o \ execve.o \ umask.o \ chroot.o \ msync.o \ vfork.o \ sbrk.o \ sstk.o \ vadvise.o \ munmap.o \ mprotect.o \ madvise.o \ mincore.o \ getgroups.o \ setgroups.o \ getpgrp.o \ setpgid.o \ setitimer.o \ swapon.o \ getitimer.o \ getdtablesize.o \ dup2.o \ fcntl.o \ select.o \ fsync.o \ setpriority.o \ socket.o \ connect.o \ getpriority.o \ bind.o \ setsockopt.o \ listen.o \ gettimeofday.o \ getrusage.o \ getsockopt.o \ readv.o \ writev.o \ settimeofday.o \ fchown.o \ fchmod.o \ setreuid.o \ setregid.o \ rename.o \ flock.o \ mkfifo.o \ sendto.o \ shutdown.o \ socketpair.o \ mkdir.o \ rmdir.o \ utimes.o \ adjtime.o \ setsid.o \ quotactl.o \ nlm_syscall.o \ nfssvc.o \ lgetfh.o \ getfh.o \ sysarch.o \ rtprio.o \ semsys.o \ msgsys.o \ shmsys.o \ setfib.o \ ntp_adjtime.o \ setgid.o \ setegid.o \ seteuid.o \ freebsd11_stat.o \ freebsd11_fstat.o \ freebsd11_lstat.o \ pathconf.o \ fpathconf.o \ getrlimit.o \ setrlimit.o \ freebsd11_getdirentries.o \ __syscall.o \ __sysctl.o \ mlock.o \ munlock.o \ undelete.o \ futimes.o \ getpgid.o \ poll.o \ freebsd7___semctl.o \ semget.o \ semop.o \ freebsd7_msgctl.o \ msgget.o \ msgsnd.o \ msgrcv.o \ shmat.o \ freebsd7_shmctl.o \ shmdt.o \ shmget.o \ clock_gettime.o \ clock_settime.o \ clock_getres.o \ ktimer_create.o \ ktimer_delete.o \ ktimer_settime.o \ ktimer_gettime.o \ ktimer_getoverrun.o \ nanosleep.o \ ffclock_getcounter.o \ ffclock_setestimate.o \ ffclock_getestimate.o \ clock_nanosleep.o \ clock_getcpuclockid2.o \ ntp_gettime.o \ minherit.o \ rfork.o \ issetugid.o \ lchown.o \ aio_read.o \ aio_write.o \ lio_listio.o \ freebsd11_getdents.o \ lchmod.o \ netbsd_lchown.o \ lutimes.o \ netbsd_msync.o \ freebsd11_nstat.o \ freebsd11_nfstat.o \ freebsd11_nlstat.o \ preadv.o \ pwritev.o \ fhopen.o \ freebsd11_fhstat.o \ modnext.o \ modstat.o \ modfnext.o \ modfind.o \ kldload.o \ kldunload.o \ kldfind.o \ kldnext.o \ kldstat.o \ kldfirstmod.o \ getsid.o \ setresuid.o \ setresgid.o \ aio_return.o \ aio_suspend.o \ aio_cancel.o \ aio_error.o \ yield.o \ mlockall.o \ munlockall.o \ __getcwd.o \ sched_setparam.o \ sched_getparam.o \ sched_setscheduler.o \ sched_getscheduler.o \ sched_yield.o \ sched_get_priority_max.o \ sched_get_priority_min.o \ sched_rr_get_interval.o \ utrace.o \ kldsym.o \ jail.o \ nnpfs_syscall.o \ sigprocmask.o \ sigsuspend.o \ sigpending.o \ sigtimedwait.o \ sigwaitinfo.o \ __acl_get_file.o \ __acl_set_file.o \ __acl_get_fd.o \ __acl_set_fd.o \ __acl_delete_file.o \ __acl_delete_fd.o \ __acl_aclcheck_file.o \ __acl_aclcheck_fd.o \ extattrctl.o \ extattr_set_file.o \ extattr_get_file.o \ extattr_delete_file.o \ aio_waitcomplete.o \ getresuid.o \ getresgid.o \ kqueue.o \ freebsd11_kevent.o \ extattr_set_fd.o \ extattr_get_fd.o \ extattr_delete_fd.o \ __setugid.o \ eaccess.o \ afs3_syscall.o \ nmount.o \ __mac_get_proc.o \ __mac_set_proc.o \ __mac_get_fd.o \ __mac_get_file.o \ __mac_set_fd.o \ __mac_set_file.o \ kenv.o \ lchflags.o \ uuidgen.o \ sendfile.o \ mac_syscall.o \ freebsd11_getfsstat.o \ freebsd11_statfs.o \ freebsd11_fstatfs.o \ freebsd11_fhstatfs.o \ ksem_close.o \ ksem_post.o \ ksem_wait.o \ ksem_trywait.o \ ksem_init.o \ ksem_open.o \ ksem_unlink.o \ ksem_getvalue.o \ ksem_destroy.o \ __mac_get_pid.o \ __mac_get_link.o \ __mac_set_link.o \ extattr_set_link.o \ extattr_get_link.o \ extattr_delete_link.o \ __mac_execve.o \ sigaction.o \ sigreturn.o \ getcontext.o \ setcontext.o \ swapcontext.o \ swapoff.o \ __acl_get_link.o \ __acl_set_link.o \ __acl_delete_link.o \ __acl_aclcheck_link.o \ sigwait.o \ thr_create.o \ thr_exit.o \ thr_self.o \ thr_kill.o \ jail_attach.o \ extattr_list_fd.o \ extattr_list_file.o \ extattr_list_link.o \ ksem_timedwait.o \ thr_suspend.o \ thr_wake.o \ kldunloadf.o \ audit.o \ auditon.o \ getauid.o \ setauid.o \ getaudit.o \ setaudit.o \ getaudit_addr.o \ setaudit_addr.o \ auditctl.o \ _umtx_op.o \ thr_new.o \ sigqueue.o \ kmq_open.o \ kmq_setattr.o \ kmq_timedreceive.o \ kmq_timedsend.o \ kmq_notify.o \ kmq_unlink.o \ abort2.o \ thr_set_name.o \ aio_fsync.o \ rtprio_thread.o \ sctp_peeloff.o \ sctp_generic_sendmsg.o \ sctp_generic_sendmsg_iov.o \ sctp_generic_recvmsg.o \ pread.o \ pwrite.o \ mmap.o \ lseek.o \ truncate.o \ ftruncate.o \ thr_kill2.o \ shm_open.o \ shm_unlink.o \ cpuset.o \ cpuset_setid.o \ cpuset_getid.o \ cpuset_getaffinity.o \ cpuset_setaffinity.o \ faccessat.o \ fchmodat.o \ fchownat.o \ fexecve.o \ freebsd11_fstatat.o \ futimesat.o \ linkat.o \ mkdirat.o \ mkfifoat.o \ freebsd11_mknodat.o \ openat.o \ readlinkat.o \ renameat.o \ symlinkat.o \ unlinkat.o \ posix_openpt.o \ gssd_syscall.o \ jail_get.o \ jail_set.o \ jail_remove.o \ closefrom.o \ __semctl.o \ msgctl.o \ shmctl.o \ lpathconf.o \ __cap_rights_get.o \ cap_enter.o \ cap_getmode.o \ pdfork.o \ pdkill.o \ pdgetpid.o \ pselect.o \ getloginclass.o \ setloginclass.o \ rctl_get_racct.o \ rctl_get_rules.o \ rctl_get_limits.o \ rctl_add_rule.o \ rctl_remove_rule.o \ posix_fallocate.o \ posix_fadvise.o \ wait6.o \ cap_rights_limit.o \ cap_ioctls_limit.o \ cap_ioctls_get.o \ cap_fcntls_limit.o \ cap_fcntls_get.o \ bindat.o \ connectat.o \ chflagsat.o \ accept4.o \ pipe2.o \ aio_mlock.o \ procctl.o \ ppoll.o \ futimens.o \ utimensat.o \ numa_getaffinity.o \ numa_setaffinity.o \ fdatasync.o \ fstat.o \ fstatat.o \ fhstat.o \ getdirentries.o \ statfs.o \ fstatfs.o \ getfsstat.o \ fhstatfs.o \ mknodat.o \ - kevent.o + kevent.o \ + cpuset_getdomain.o \ + cpuset_setdomain.o Index: user/jeff/numa/sys/sys/syscallsubr.h =================================================================== --- user/jeff/numa/sys/sys/syscallsubr.h (revision 326888) +++ user/jeff/numa/sys/sys/syscallsubr.h (revision 326889) @@ -1,302 +1,309 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002 Ian Dowse. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_SYSCALLSUBR_H_ #define _SYS_SYSCALLSUBR_H_ #include #include #include #include #include #include +#include struct file; struct filecaps; enum idtype; struct itimerval; struct image_args; struct jail; struct kevent; struct kevent_copyops; struct kld_file_stat; struct ksiginfo; struct mbuf; struct msghdr; struct msqid_ds; struct pollfd; struct ogetdirentries_args; struct rlimit; struct rusage; union semun; struct sockaddr; struct stat; struct thr_param; struct sched_param; struct __wrusage; int kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, size_t buflen, size_t path_max); int kern_accept(struct thread *td, int s, struct sockaddr **name, socklen_t *namelen, struct file **fp); int kern_accept4(struct thread *td, int s, struct sockaddr **name, socklen_t *namelen, int flags, struct file **fp); int kern_accessat(struct thread *td, int fd, char *path, enum uio_seg pathseg, int flags, int mode); int kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta); int kern_alternate_path(struct thread *td, const char *prefix, const char *path, enum uio_seg pathseg, char **pathbuf, int create, int dirfd); int kern_bindat(struct thread *td, int dirfd, int fd, struct sockaddr *sa); int kern_cap_ioctls_limit(struct thread *td, int fd, u_long *cmds, size_t ncmds); int kern_cap_rights_limit(struct thread *td, int fd, cap_rights_t *rights); int kern_chdir(struct thread *td, char *path, enum uio_seg pathseg); int kern_clock_getcpuclockid2(struct thread *td, id_t id, int which, clockid_t *clk_id); int kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts); int kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats); int kern_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); int kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats); int kern_close(struct thread *td, int fd); int kern_connectat(struct thread *td, int dirfd, int fd, struct sockaddr *sa); int kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *maskp); int kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *maskp); +int kern_cpuset_getdomain(struct thread *td, cpulevel_t level, + cpuwhich_t which, id_t id, size_t domainsetsize, + domainset_t *maskp, int *policyp); +int kern_cpuset_setdomain(struct thread *td, cpulevel_t level, + cpuwhich_t which, id_t id, size_t domainsetsize, + const domainset_t *maskp, int policy); int kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); int kern_cpuset_setid(struct thread *td, cpuwhich_t which, id_t id, cpusetid_t setid); int kern_dup(struct thread *td, u_int mode, int flags, int old, int new); int kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p); int kern_fchmodat(struct thread *td, int fd, char *path, enum uio_seg pathseg, mode_t mode, int flag); int kern_fchownat(struct thread *td, int fd, char *path, enum uio_seg pathseg, int uid, int gid, int flag); int kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg); int kern_fcntl_freebsd(struct thread *td, int fd, int cmd, long arg); int kern_fhstat(struct thread *td, fhandle_t fh, struct stat *buf); int kern_fhstatfs(struct thread *td, fhandle_t fh, struct statfs *buf); int kern_fstat(struct thread *td, int fd, struct stat *sbp); int kern_fstatfs(struct thread *td, int fd, struct statfs *buf); int kern_fsync(struct thread *td, int fd, bool fullsync); int kern_ftruncate(struct thread *td, int fd, off_t length); int kern_futimes(struct thread *td, int fd, struct timeval *tptr, enum uio_seg tptrseg); int kern_futimens(struct thread *td, int fd, struct timespec *tptr, enum uio_seg tptrseg); int kern_getdirentries(struct thread *td, int fd, char *buf, size_t count, off_t *basep, ssize_t *residp, enum uio_seg bufseg); int kern_getfsstat(struct thread *td, struct statfs **buf, size_t bufsize, size_t *countp, enum uio_seg bufseg, int mode); int kern_getitimer(struct thread *, u_int, struct itimerval *); int kern_getppid(struct thread *); int kern_getpeername(struct thread *td, int fd, struct sockaddr **sa, socklen_t *alen); int kern_getrusage(struct thread *td, int who, struct rusage *rup); int kern_getsockname(struct thread *td, int fd, struct sockaddr **sa, socklen_t *alen); int kern_getsockopt(struct thread *td, int s, int level, int name, void *optval, enum uio_seg valseg, socklen_t *valsize); int kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data); int kern_jail(struct thread *td, struct jail *j); int kern_jail_get(struct thread *td, struct uio *options, int flags); int kern_jail_set(struct thread *td, struct uio *options, int flags); int kern_kevent(struct thread *td, int fd, int nchanges, int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout); int kern_kevent_anonymous(struct thread *td, int nevents, struct kevent_copyops *k_ops); int kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout); int kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps); int kern_kldload(struct thread *td, const char *file, int *fileid); int kern_kldstat(struct thread *td, int fileid, struct kld_file_stat *stat); int kern_kldunload(struct thread *td, int fileid, int flags); int kern_linkat(struct thread *td, int fd1, int fd2, char *path1, char *path2, enum uio_seg segflg, int follow); int kern_listen(struct thread *td, int s, int backlog); int kern_lseek(struct thread *td, int fd, off_t offset, int whence); int kern_lutimes(struct thread *td, char *path, enum uio_seg pathseg, struct timeval *tptr, enum uio_seg tptrseg); int kern_madvise(struct thread *td, uintptr_t addr, size_t len, int behav); int kern_mincore(struct thread *td, uintptr_t addr, size_t len, char *vec); int kern_mkdirat(struct thread *td, int fd, char *path, enum uio_seg segflg, int mode); int kern_mkfifoat(struct thread *td, int fd, char *path, enum uio_seg pathseg, int mode); int kern_mknodat(struct thread *td, int fd, char *path, enum uio_seg pathseg, int mode, dev_t dev); int kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr, size_t len); int kern_mmap(struct thread *td, uintptr_t addr, size_t size, int prot, int flags, int fd, off_t pos); int kern_mprotect(struct thread *td, uintptr_t addr, size_t size, int prot); int kern_msgctl(struct thread *, int, int, struct msqid_ds *); int kern_msgrcv(struct thread *, int, void *, size_t, long, int, long *); int kern_msgsnd(struct thread *, int, const void *, size_t, int, long); int kern_msync(struct thread *td, uintptr_t addr, size_t size, int flags); int kern_munlock(struct thread *td, uintptr_t addr, size_t size); int kern_munmap(struct thread *td, uintptr_t addr, size_t size); int kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt); int kern_ogetdirentries(struct thread *td, struct ogetdirentries_args *uap, long *ploff); int kern_openat(struct thread *td, int fd, char *path, enum uio_seg pathseg, int flags, int mode); int kern_pathconf(struct thread *td, char *path, enum uio_seg pathseg, int name, u_long flags); int kern_pipe(struct thread *td, int fildes[2], int flags, struct filecaps *fcaps1, struct filecaps *fcaps2); int kern_poll(struct thread *td, struct pollfd *fds, u_int nfds, struct timespec *tsp, sigset_t *uset); int kern_posix_error(struct thread *td, int error); int kern_posix_fadvise(struct thread *td, int fd, off_t offset, off_t len, int advice); int kern_posix_fallocate(struct thread *td, int fd, off_t offset, off_t len); int kern_procctl(struct thread *td, enum idtype idtype, id_t id, int com, void *data); int kern_pread(struct thread *td, int fd, void *buf, size_t nbyte, off_t offset); int kern_preadv(struct thread *td, int fd, struct uio *auio, off_t offset); int kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tvp, sigset_t *uset, int abi_nfdbits); int kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data); int kern_pwrite(struct thread *td, int fd, const void *buf, size_t nbyte, off_t offset); int kern_pwritev(struct thread *td, int fd, struct uio *auio, off_t offset); int kern_readlinkat(struct thread *td, int fd, char *path, enum uio_seg pathseg, char *buf, enum uio_seg bufseg, size_t count); int kern_readv(struct thread *td, int fd, struct uio *auio); int kern_recvit(struct thread *td, int s, struct msghdr *mp, enum uio_seg fromseg, struct mbuf **controlp); int kern_renameat(struct thread *td, int oldfd, char *old, int newfd, char *new, enum uio_seg pathseg); int kern_rmdirat(struct thread *td, int fd, char *path, enum uio_seg pathseg); int kern_sched_getparam(struct thread *td, struct thread *targettd, struct sched_param *param); int kern_sched_getscheduler(struct thread *td, struct thread *targettd, int *policy); int kern_sched_setparam(struct thread *td, struct thread *targettd, struct sched_param *param); int kern_sched_setscheduler(struct thread *td, struct thread *targettd, int policy, struct sched_param *param); int kern_sched_rr_get_interval(struct thread *td, pid_t pid, struct timespec *ts); int kern_sched_rr_get_interval_td(struct thread *td, struct thread *targettd, struct timespec *ts); int kern_semctl(struct thread *td, int semid, int semnum, int cmd, union semun *arg, register_t *rval); int kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou, fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits); int kern_sendit(struct thread *td, int s, struct msghdr *mp, int flags, struct mbuf *control, enum uio_seg segflg); int kern_setgroups(struct thread *td, u_int ngrp, gid_t *groups); int kern_setitimer(struct thread *, u_int, struct itimerval *, struct itimerval *); int kern_setrlimit(struct thread *, u_int, struct rlimit *); int kern_setsockopt(struct thread *td, int s, int level, int name, void *optval, enum uio_seg valseg, socklen_t valsize); int kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp); int kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode, struct filecaps *fcaps); int kern_shmat(struct thread *td, int shmid, const void *shmaddr, int shmflg); int kern_shmctl(struct thread *td, int shmid, int cmd, void *buf, size_t *bufsz); int kern_shutdown(struct thread *td, int s, int how); int kern_sigaction(struct thread *td, int sig, const struct sigaction *act, struct sigaction *oact, int flags); int kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss); int kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset, int flags); int kern_sigsuspend(struct thread *td, sigset_t mask); int kern_sigtimedwait(struct thread *td, sigset_t waitset, struct ksiginfo *ksi, struct timespec *timeout); int kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value); int kern_socket(struct thread *td, int domain, int type, int protocol); int kern_statat(struct thread *td, int flag, int fd, char *path, enum uio_seg pathseg, struct stat *sbp, void (*hook)(struct vnode *vp, struct stat *sbp)); int kern_statfs(struct thread *td, char *path, enum uio_seg pathseg, struct statfs *buf); int kern_symlinkat(struct thread *td, char *path1, int fd, char *path2, enum uio_seg segflg); int kern_ktimer_create(struct thread *td, clockid_t clock_id, struct sigevent *evp, int *timerid, int preset_id); int kern_ktimer_delete(struct thread *, int); int kern_ktimer_settime(struct thread *td, int timer_id, int flags, struct itimerspec *val, struct itimerspec *oval); int kern_ktimer_gettime(struct thread *td, int timer_id, struct itimerspec *val); int kern_ktimer_getoverrun(struct thread *td, int timer_id); int kern_thr_alloc(struct proc *, int pages, struct thread **); int kern_thr_exit(struct thread *td); int kern_thr_new(struct thread *td, struct thr_param *param); int kern_thr_suspend(struct thread *td, struct timespec *tsp); int kern_truncate(struct thread *td, char *path, enum uio_seg pathseg, off_t length); int kern_unlinkat(struct thread *td, int fd, char *path, enum uio_seg pathseg, ino_t oldinum); int kern_utimesat(struct thread *td, int fd, char *path, enum uio_seg pathseg, struct timeval *tptr, enum uio_seg tptrseg); int kern_utimensat(struct thread *td, int fd, char *path, enum uio_seg pathseg, struct timespec *tptr, enum uio_seg tptrseg, int follow); int kern_wait(struct thread *td, pid_t pid, int *status, int options, struct rusage *rup); int kern_wait6(struct thread *td, enum idtype idtype, id_t id, int *status, int options, struct __wrusage *wrup, siginfo_t *sip); int kern_writev(struct thread *td, int fd, struct uio *auio); int kern_socketpair(struct thread *td, int domain, int type, int protocol, int *rsv); /* flags for kern_sigaction */ #define KSA_OSIGSET 0x0001 /* uses osigact_t */ #define KSA_FREEBSD4 0x0002 /* uses ucontext4 */ struct freebsd11_dirent; int freebsd11_kern_getdirentries(struct thread *td, int fd, char *ubuf, u_int count, long *basep, void (*func)(struct freebsd11_dirent *)); #endif /* !_SYS_SYSCALLSUBR_H_ */ Index: user/jeff/numa/sys/sys/sysproto.h =================================================================== --- user/jeff/numa/sys/sys/sysproto.h (revision 326888) +++ user/jeff/numa/sys/sys/sysproto.h (revision 326889) @@ -1,3041 +1,3062 @@ /* * System call prototypes. * * DO NOT EDIT-- this file is automatically generated. * $FreeBSD$ */ #ifndef _SYS_SYSPROTO_H_ #define _SYS_SYSPROTO_H_ #include #include #include +#include #include #include #include #include #include struct proc; struct thread; #define PAD_(t) (sizeof(register_t) <= sizeof(t) ? \ 0 : sizeof(register_t) - sizeof(t)) #if BYTE_ORDER == LITTLE_ENDIAN #define PADL_(t) 0 #define PADR_(t) PAD_(t) #else #define PADL_(t) PAD_(t) #define PADR_(t) 0 #endif struct nosys_args { register_t dummy; }; struct sys_exit_args { char rval_l_[PADL_(int)]; int rval; char rval_r_[PADR_(int)]; }; struct fork_args { register_t dummy; }; struct read_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(void *)]; void * buf; char buf_r_[PADR_(void *)]; char nbyte_l_[PADL_(size_t)]; size_t nbyte; char nbyte_r_[PADR_(size_t)]; }; struct write_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(const void *)]; const void * buf; char buf_r_[PADR_(const void *)]; char nbyte_l_[PADL_(size_t)]; size_t nbyte; char nbyte_r_[PADR_(size_t)]; }; struct open_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)]; }; struct close_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; }; struct wait4_args { char pid_l_[PADL_(int)]; int pid; char pid_r_[PADR_(int)]; char status_l_[PADL_(int *)]; int * status; char status_r_[PADR_(int *)]; char options_l_[PADL_(int)]; int options; char options_r_[PADR_(int)]; char rusage_l_[PADL_(struct rusage *)]; struct rusage * rusage; char rusage_r_[PADR_(struct rusage *)]; }; struct link_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char link_l_[PADL_(char *)]; char * link; char link_r_[PADR_(char *)]; }; struct unlink_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; }; struct chdir_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; }; struct fchdir_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; }; struct chmod_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)]; }; struct chown_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char uid_l_[PADL_(int)]; int uid; char uid_r_[PADR_(int)]; char gid_l_[PADL_(int)]; int gid; char gid_r_[PADR_(int)]; }; struct obreak_args { char nsize_l_[PADL_(char *)]; char * nsize; char nsize_r_[PADR_(char *)]; }; struct getpid_args { register_t dummy; }; struct mount_args { char type_l_[PADL_(char *)]; char * type; char type_r_[PADR_(char *)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; char data_l_[PADL_(caddr_t)]; caddr_t data; char data_r_[PADR_(caddr_t)]; }; struct unmount_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct setuid_args { char uid_l_[PADL_(uid_t)]; uid_t uid; char uid_r_[PADR_(uid_t)]; }; struct getuid_args { register_t dummy; }; struct geteuid_args { register_t dummy; }; struct ptrace_args { char req_l_[PADL_(int)]; int req; char req_r_[PADR_(int)]; char pid_l_[PADL_(pid_t)]; pid_t pid; char pid_r_[PADR_(pid_t)]; char addr_l_[PADL_(caddr_t)]; caddr_t addr; char addr_r_[PADR_(caddr_t)]; char data_l_[PADL_(int)]; int data; char data_r_[PADR_(int)]; }; struct recvmsg_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char msg_l_[PADL_(struct msghdr *)]; struct msghdr * msg; char msg_r_[PADR_(struct msghdr *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct sendmsg_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char msg_l_[PADL_(struct msghdr *)]; struct msghdr * msg; char msg_r_[PADR_(struct msghdr *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct recvfrom_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char buf_l_[PADL_(caddr_t)]; caddr_t buf; char buf_r_[PADR_(caddr_t)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; char from_l_[PADL_(struct sockaddr *__restrict)]; struct sockaddr *__restrict from; char from_r_[PADR_(struct sockaddr *__restrict)]; char fromlenaddr_l_[PADL_(__socklen_t *__restrict)]; __socklen_t *__restrict fromlenaddr; char fromlenaddr_r_[PADR_(__socklen_t *__restrict)]; }; struct accept_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char name_l_[PADL_(struct sockaddr *__restrict)]; struct sockaddr *__restrict name; char name_r_[PADR_(struct sockaddr *__restrict)]; char anamelen_l_[PADL_(__socklen_t *__restrict)]; __socklen_t *__restrict anamelen; char anamelen_r_[PADR_(__socklen_t *__restrict)]; }; struct getpeername_args { char fdes_l_[PADL_(int)]; int fdes; char fdes_r_[PADR_(int)]; char asa_l_[PADL_(struct sockaddr *__restrict)]; struct sockaddr *__restrict asa; char asa_r_[PADR_(struct sockaddr *__restrict)]; char alen_l_[PADL_(__socklen_t *__restrict)]; __socklen_t *__restrict alen; char alen_r_[PADR_(__socklen_t *__restrict)]; }; struct getsockname_args { char fdes_l_[PADL_(int)]; int fdes; char fdes_r_[PADR_(int)]; char asa_l_[PADL_(struct sockaddr *__restrict)]; struct sockaddr *__restrict asa; char asa_r_[PADR_(struct sockaddr *__restrict)]; char alen_l_[PADL_(__socklen_t *__restrict)]; __socklen_t *__restrict alen; char alen_r_[PADR_(__socklen_t *__restrict)]; }; struct access_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char amode_l_[PADL_(int)]; int amode; char amode_r_[PADR_(int)]; }; struct chflags_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char flags_l_[PADL_(u_long)]; u_long flags; char flags_r_[PADR_(u_long)]; }; struct fchflags_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char flags_l_[PADL_(u_long)]; u_long flags; char flags_r_[PADR_(u_long)]; }; struct sync_args { register_t dummy; }; struct kill_args { char pid_l_[PADL_(int)]; int pid; char pid_r_[PADR_(int)]; char signum_l_[PADL_(int)]; int signum; char signum_r_[PADR_(int)]; }; struct getppid_args { register_t dummy; }; struct dup_args { char fd_l_[PADL_(u_int)]; u_int fd; char fd_r_[PADR_(u_int)]; }; struct freebsd10_pipe_args { register_t dummy; }; struct getegid_args { register_t dummy; }; struct profil_args { char samples_l_[PADL_(caddr_t)]; caddr_t samples; char samples_r_[PADR_(caddr_t)]; char size_l_[PADL_(size_t)]; size_t size; char size_r_[PADR_(size_t)]; char offset_l_[PADL_(size_t)]; size_t offset; char offset_r_[PADR_(size_t)]; char scale_l_[PADL_(u_int)]; u_int scale; char scale_r_[PADR_(u_int)]; }; struct ktrace_args { char fname_l_[PADL_(const char *)]; const char * fname; char fname_r_[PADR_(const char *)]; char ops_l_[PADL_(int)]; int ops; char ops_r_[PADR_(int)]; char facs_l_[PADL_(int)]; int facs; char facs_r_[PADR_(int)]; char pid_l_[PADL_(int)]; int pid; char pid_r_[PADR_(int)]; }; struct getgid_args { register_t dummy; }; struct getlogin_args { char namebuf_l_[PADL_(char *)]; char * namebuf; char namebuf_r_[PADR_(char *)]; char namelen_l_[PADL_(u_int)]; u_int namelen; char namelen_r_[PADR_(u_int)]; }; struct setlogin_args { char namebuf_l_[PADL_(char *)]; char * namebuf; char namebuf_r_[PADR_(char *)]; }; struct acct_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; }; struct osigpending_args { register_t dummy; }; struct sigaltstack_args { char ss_l_[PADL_(stack_t *)]; stack_t * ss; char ss_r_[PADR_(stack_t *)]; char oss_l_[PADL_(stack_t *)]; stack_t * oss; char oss_r_[PADR_(stack_t *)]; }; struct ioctl_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char com_l_[PADL_(u_long)]; u_long com; char com_r_[PADR_(u_long)]; char data_l_[PADL_(caddr_t)]; caddr_t data; char data_r_[PADR_(caddr_t)]; }; struct reboot_args { char opt_l_[PADL_(int)]; int opt; char opt_r_[PADR_(int)]; }; struct revoke_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; }; struct symlink_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char link_l_[PADL_(char *)]; char * link; char link_r_[PADR_(char *)]; }; struct readlink_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)]; char count_l_[PADL_(size_t)]; size_t count; char count_r_[PADR_(size_t)]; }; struct execve_args { char fname_l_[PADL_(char *)]; char * fname; char fname_r_[PADR_(char *)]; char argv_l_[PADL_(char **)]; char ** argv; char argv_r_[PADR_(char **)]; char envv_l_[PADL_(char **)]; char ** envv; char envv_r_[PADR_(char **)]; }; struct umask_args { char newmask_l_[PADL_(int)]; int newmask; char newmask_r_[PADR_(int)]; }; struct chroot_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; }; struct getpagesize_args { register_t dummy; }; struct msync_args { char addr_l_[PADL_(void *)]; void * addr; char addr_r_[PADR_(void *)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct vfork_args { register_t dummy; }; struct sbrk_args { char incr_l_[PADL_(int)]; int incr; char incr_r_[PADR_(int)]; }; struct sstk_args { char incr_l_[PADL_(int)]; int incr; char incr_r_[PADR_(int)]; }; struct ovadvise_args { char anom_l_[PADL_(int)]; int anom; char anom_r_[PADR_(int)]; }; struct munmap_args { char addr_l_[PADL_(void *)]; void * addr; char addr_r_[PADR_(void *)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; }; struct mprotect_args { char addr_l_[PADL_(void *)]; void * addr; char addr_r_[PADR_(void *)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; char prot_l_[PADL_(int)]; int prot; char prot_r_[PADR_(int)]; }; struct madvise_args { char addr_l_[PADL_(void *)]; void * addr; char addr_r_[PADR_(void *)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; char behav_l_[PADL_(int)]; int behav; char behav_r_[PADR_(int)]; }; struct mincore_args { char addr_l_[PADL_(const void *)]; const void * addr; char addr_r_[PADR_(const void *)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; char vec_l_[PADL_(char *)]; char * vec; char vec_r_[PADR_(char *)]; }; struct getgroups_args { char gidsetsize_l_[PADL_(u_int)]; u_int gidsetsize; char gidsetsize_r_[PADR_(u_int)]; char gidset_l_[PADL_(gid_t *)]; gid_t * gidset; char gidset_r_[PADR_(gid_t *)]; }; struct setgroups_args { char gidsetsize_l_[PADL_(u_int)]; u_int gidsetsize; char gidsetsize_r_[PADR_(u_int)]; char gidset_l_[PADL_(gid_t *)]; gid_t * gidset; char gidset_r_[PADR_(gid_t *)]; }; struct getpgrp_args { register_t dummy; }; struct setpgid_args { char pid_l_[PADL_(int)]; int pid; char pid_r_[PADR_(int)]; char pgid_l_[PADL_(int)]; int pgid; char pgid_r_[PADR_(int)]; }; struct setitimer_args { char which_l_[PADL_(u_int)]; u_int which; char which_r_[PADR_(u_int)]; char itv_l_[PADL_(struct itimerval *)]; struct itimerval * itv; char itv_r_[PADR_(struct itimerval *)]; char oitv_l_[PADL_(struct itimerval *)]; struct itimerval * oitv; char oitv_r_[PADR_(struct itimerval *)]; }; struct owait_args { register_t dummy; }; struct swapon_args { char name_l_[PADL_(char *)]; char * name; char name_r_[PADR_(char *)]; }; struct getitimer_args { char which_l_[PADL_(u_int)]; u_int which; char which_r_[PADR_(u_int)]; char itv_l_[PADL_(struct itimerval *)]; struct itimerval * itv; char itv_r_[PADR_(struct itimerval *)]; }; struct getdtablesize_args { register_t dummy; }; struct dup2_args { char from_l_[PADL_(u_int)]; u_int from; char from_r_[PADR_(u_int)]; char to_l_[PADL_(u_int)]; u_int to; char to_r_[PADR_(u_int)]; }; struct fcntl_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char cmd_l_[PADL_(int)]; int cmd; char cmd_r_[PADR_(int)]; char arg_l_[PADL_(long)]; long arg; char arg_r_[PADR_(long)]; }; struct select_args { char nd_l_[PADL_(int)]; int nd; char nd_r_[PADR_(int)]; char in_l_[PADL_(fd_set *)]; fd_set * in; char in_r_[PADR_(fd_set *)]; char ou_l_[PADL_(fd_set *)]; fd_set * ou; char ou_r_[PADR_(fd_set *)]; char ex_l_[PADL_(fd_set *)]; fd_set * ex; char ex_r_[PADR_(fd_set *)]; char tv_l_[PADL_(struct timeval *)]; struct timeval * tv; char tv_r_[PADR_(struct timeval *)]; }; struct fsync_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; }; struct setpriority_args { char which_l_[PADL_(int)]; int which; char which_r_[PADR_(int)]; char who_l_[PADL_(int)]; int who; char who_r_[PADR_(int)]; char prio_l_[PADL_(int)]; int prio; char prio_r_[PADR_(int)]; }; struct socket_args { char domain_l_[PADL_(int)]; int domain; char domain_r_[PADR_(int)]; char type_l_[PADL_(int)]; int type; char type_r_[PADR_(int)]; char protocol_l_[PADL_(int)]; int protocol; char protocol_r_[PADR_(int)]; }; struct connect_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char name_l_[PADL_(caddr_t)]; caddr_t name; char name_r_[PADR_(caddr_t)]; char namelen_l_[PADL_(int)]; int namelen; char namelen_r_[PADR_(int)]; }; struct getpriority_args { char which_l_[PADL_(int)]; int which; char which_r_[PADR_(int)]; char who_l_[PADL_(int)]; int who; char who_r_[PADR_(int)]; }; struct bind_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char name_l_[PADL_(caddr_t)]; caddr_t name; char name_r_[PADR_(caddr_t)]; char namelen_l_[PADL_(int)]; int namelen; char namelen_r_[PADR_(int)]; }; struct setsockopt_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char level_l_[PADL_(int)]; int level; char level_r_[PADR_(int)]; char name_l_[PADL_(int)]; int name; char name_r_[PADR_(int)]; char val_l_[PADL_(caddr_t)]; caddr_t val; char val_r_[PADR_(caddr_t)]; char valsize_l_[PADL_(int)]; int valsize; char valsize_r_[PADR_(int)]; }; struct listen_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char backlog_l_[PADL_(int)]; int backlog; char backlog_r_[PADR_(int)]; }; struct gettimeofday_args { char tp_l_[PADL_(struct timeval *)]; struct timeval * tp; char tp_r_[PADR_(struct timeval *)]; char tzp_l_[PADL_(struct timezone *)]; struct timezone * tzp; char tzp_r_[PADR_(struct timezone *)]; }; struct getrusage_args { char who_l_[PADL_(int)]; int who; char who_r_[PADR_(int)]; char rusage_l_[PADL_(struct rusage *)]; struct rusage * rusage; char rusage_r_[PADR_(struct rusage *)]; }; struct getsockopt_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char level_l_[PADL_(int)]; int level; char level_r_[PADR_(int)]; char name_l_[PADL_(int)]; int name; char name_r_[PADR_(int)]; char val_l_[PADL_(caddr_t)]; caddr_t val; char val_r_[PADR_(caddr_t)]; char avalsize_l_[PADL_(int *)]; int * avalsize; char avalsize_r_[PADR_(int *)]; }; struct readv_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char iovp_l_[PADL_(struct iovec *)]; struct iovec * iovp; char iovp_r_[PADR_(struct iovec *)]; char iovcnt_l_[PADL_(u_int)]; u_int iovcnt; char iovcnt_r_[PADR_(u_int)]; }; struct writev_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char iovp_l_[PADL_(struct iovec *)]; struct iovec * iovp; char iovp_r_[PADR_(struct iovec *)]; char iovcnt_l_[PADL_(u_int)]; u_int iovcnt; char iovcnt_r_[PADR_(u_int)]; }; struct settimeofday_args { char tv_l_[PADL_(struct timeval *)]; struct timeval * tv; char tv_r_[PADR_(struct timeval *)]; char tzp_l_[PADL_(struct timezone *)]; struct timezone * tzp; char tzp_r_[PADR_(struct timezone *)]; }; struct fchown_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char uid_l_[PADL_(int)]; int uid; char uid_r_[PADR_(int)]; char gid_l_[PADL_(int)]; int gid; char gid_r_[PADR_(int)]; }; struct fchmod_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)]; }; struct setreuid_args { char ruid_l_[PADL_(int)]; int ruid; char ruid_r_[PADR_(int)]; char euid_l_[PADL_(int)]; int euid; char euid_r_[PADR_(int)]; }; struct setregid_args { char rgid_l_[PADL_(int)]; int rgid; char rgid_r_[PADR_(int)]; char egid_l_[PADL_(int)]; int egid; char egid_r_[PADR_(int)]; }; struct rename_args { char from_l_[PADL_(char *)]; char * from; char from_r_[PADR_(char *)]; char to_l_[PADL_(char *)]; char * to; char to_r_[PADR_(char *)]; }; struct flock_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char how_l_[PADL_(int)]; int how; char how_r_[PADR_(int)]; }; struct mkfifo_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)]; }; struct sendto_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char buf_l_[PADL_(caddr_t)]; caddr_t buf; char buf_r_[PADR_(caddr_t)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; char to_l_[PADL_(caddr_t)]; caddr_t to; char to_r_[PADR_(caddr_t)]; char tolen_l_[PADL_(int)]; int tolen; char tolen_r_[PADR_(int)]; }; struct shutdown_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char how_l_[PADL_(int)]; int how; char how_r_[PADR_(int)]; }; struct socketpair_args { char domain_l_[PADL_(int)]; int domain; char domain_r_[PADR_(int)]; char type_l_[PADL_(int)]; int type; char type_r_[PADR_(int)]; char protocol_l_[PADL_(int)]; int protocol; char protocol_r_[PADR_(int)]; char rsv_l_[PADL_(int *)]; int * rsv; char rsv_r_[PADR_(int *)]; }; struct mkdir_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)]; }; struct rmdir_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; }; struct utimes_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char tptr_l_[PADL_(struct timeval *)]; struct timeval * tptr; char tptr_r_[PADR_(struct timeval *)]; }; struct adjtime_args { char delta_l_[PADL_(struct timeval *)]; struct timeval * delta; char delta_r_[PADR_(struct timeval *)]; char olddelta_l_[PADL_(struct timeval *)]; struct timeval * olddelta; char olddelta_r_[PADR_(struct timeval *)]; }; struct ogethostid_args { register_t dummy; }; struct setsid_args { register_t dummy; }; struct quotactl_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char cmd_l_[PADL_(int)]; int cmd; char cmd_r_[PADR_(int)]; char uid_l_[PADL_(int)]; int uid; char uid_r_[PADR_(int)]; char arg_l_[PADL_(caddr_t)]; caddr_t arg; char arg_r_[PADR_(caddr_t)]; }; struct oquota_args { register_t dummy; }; struct nlm_syscall_args { char debug_level_l_[PADL_(int)]; int debug_level; char debug_level_r_[PADR_(int)]; char grace_period_l_[PADL_(int)]; int grace_period; char grace_period_r_[PADR_(int)]; char addr_count_l_[PADL_(int)]; int addr_count; char addr_count_r_[PADR_(int)]; char addrs_l_[PADL_(char **)]; char ** addrs; char addrs_r_[PADR_(char **)]; }; struct nfssvc_args { char flag_l_[PADL_(int)]; int flag; char flag_r_[PADR_(int)]; char argp_l_[PADL_(caddr_t)]; caddr_t argp; char argp_r_[PADR_(caddr_t)]; }; struct lgetfh_args { char fname_l_[PADL_(char *)]; char * fname; char fname_r_[PADR_(char *)]; char fhp_l_[PADL_(struct fhandle *)]; struct fhandle * fhp; char fhp_r_[PADR_(struct fhandle *)]; }; struct getfh_args { char fname_l_[PADL_(char *)]; char * fname; char fname_r_[PADR_(char *)]; char fhp_l_[PADL_(struct fhandle *)]; struct fhandle * fhp; char fhp_r_[PADR_(struct fhandle *)]; }; struct sysarch_args { char op_l_[PADL_(int)]; int op; char op_r_[PADR_(int)]; char parms_l_[PADL_(char *)]; char * parms; char parms_r_[PADR_(char *)]; }; struct rtprio_args { char function_l_[PADL_(int)]; int function; char function_r_[PADR_(int)]; char pid_l_[PADL_(pid_t)]; pid_t pid; char pid_r_[PADR_(pid_t)]; char rtp_l_[PADL_(struct rtprio *)]; struct rtprio * rtp; char rtp_r_[PADR_(struct rtprio *)]; }; struct semsys_args { char which_l_[PADL_(int)]; int which; char which_r_[PADR_(int)]; char a2_l_[PADL_(int)]; int a2; char a2_r_[PADR_(int)]; char a3_l_[PADL_(int)]; int a3; char a3_r_[PADR_(int)]; char a4_l_[PADL_(int)]; int a4; char a4_r_[PADR_(int)]; char a5_l_[PADL_(int)]; int a5; char a5_r_[PADR_(int)]; }; struct msgsys_args { char which_l_[PADL_(int)]; int which; char which_r_[PADR_(int)]; char a2_l_[PADL_(int)]; int a2; char a2_r_[PADR_(int)]; char a3_l_[PADL_(int)]; int a3; char a3_r_[PADR_(int)]; char a4_l_[PADL_(int)]; int a4; char a4_r_[PADR_(int)]; char a5_l_[PADL_(int)]; int a5; char a5_r_[PADR_(int)]; char a6_l_[PADL_(int)]; int a6; char a6_r_[PADR_(int)]; }; struct shmsys_args { char which_l_[PADL_(int)]; int which; char which_r_[PADR_(int)]; char a2_l_[PADL_(int)]; int a2; char a2_r_[PADR_(int)]; char a3_l_[PADL_(int)]; int a3; char a3_r_[PADR_(int)]; char a4_l_[PADL_(int)]; int a4; char a4_r_[PADR_(int)]; }; struct setfib_args { char fibnum_l_[PADL_(int)]; int fibnum; char fibnum_r_[PADR_(int)]; }; struct ntp_adjtime_args { char tp_l_[PADL_(struct timex *)]; struct timex * tp; char tp_r_[PADR_(struct timex *)]; }; struct setgid_args { char gid_l_[PADL_(gid_t)]; gid_t gid; char gid_r_[PADR_(gid_t)]; }; struct setegid_args { char egid_l_[PADL_(gid_t)]; gid_t egid; char egid_r_[PADR_(gid_t)]; }; struct seteuid_args { char euid_l_[PADL_(uid_t)]; uid_t euid; char euid_r_[PADR_(uid_t)]; }; struct pathconf_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char name_l_[PADL_(int)]; int name; char name_r_[PADR_(int)]; }; struct fpathconf_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char name_l_[PADL_(int)]; int name; char name_r_[PADR_(int)]; }; struct __getrlimit_args { char which_l_[PADL_(u_int)]; u_int which; char which_r_[PADR_(u_int)]; char rlp_l_[PADL_(struct rlimit *)]; struct rlimit * rlp; char rlp_r_[PADR_(struct rlimit *)]; }; struct __setrlimit_args { char which_l_[PADL_(u_int)]; u_int which; char which_r_[PADR_(u_int)]; char rlp_l_[PADL_(struct rlimit *)]; struct rlimit * rlp; char rlp_r_[PADR_(struct rlimit *)]; }; struct sysctl_args { char name_l_[PADL_(int *)]; int * name; char name_r_[PADR_(int *)]; char namelen_l_[PADL_(u_int)]; u_int namelen; char namelen_r_[PADR_(u_int)]; char old_l_[PADL_(void *)]; void * old; char old_r_[PADR_(void *)]; char oldlenp_l_[PADL_(size_t *)]; size_t * oldlenp; char oldlenp_r_[PADR_(size_t *)]; char new_l_[PADL_(void *)]; void * new; char new_r_[PADR_(void *)]; char newlen_l_[PADL_(size_t)]; size_t newlen; char newlen_r_[PADR_(size_t)]; }; struct mlock_args { char addr_l_[PADL_(const void *)]; const void * addr; char addr_r_[PADR_(const void *)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; }; struct munlock_args { char addr_l_[PADL_(const void *)]; const void * addr; char addr_r_[PADR_(const void *)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; }; struct undelete_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; }; struct futimes_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char tptr_l_[PADL_(struct timeval *)]; struct timeval * tptr; char tptr_r_[PADR_(struct timeval *)]; }; struct getpgid_args { char pid_l_[PADL_(pid_t)]; pid_t pid; char pid_r_[PADR_(pid_t)]; }; struct poll_args { char fds_l_[PADL_(struct pollfd *)]; struct pollfd * fds; char fds_r_[PADR_(struct pollfd *)]; char nfds_l_[PADL_(u_int)]; u_int nfds; char nfds_r_[PADR_(u_int)]; char timeout_l_[PADL_(int)]; int timeout; char timeout_r_[PADR_(int)]; }; struct semget_args { char key_l_[PADL_(key_t)]; key_t key; char key_r_[PADR_(key_t)]; char nsems_l_[PADL_(int)]; int nsems; char nsems_r_[PADR_(int)]; char semflg_l_[PADL_(int)]; int semflg; char semflg_r_[PADR_(int)]; }; struct semop_args { char semid_l_[PADL_(int)]; int semid; char semid_r_[PADR_(int)]; char sops_l_[PADL_(struct sembuf *)]; struct sembuf * sops; char sops_r_[PADR_(struct sembuf *)]; char nsops_l_[PADL_(size_t)]; size_t nsops; char nsops_r_[PADR_(size_t)]; }; struct msgget_args { char key_l_[PADL_(key_t)]; key_t key; char key_r_[PADR_(key_t)]; char msgflg_l_[PADL_(int)]; int msgflg; char msgflg_r_[PADR_(int)]; }; struct msgsnd_args { char msqid_l_[PADL_(int)]; int msqid; char msqid_r_[PADR_(int)]; char msgp_l_[PADL_(const void *)]; const void * msgp; char msgp_r_[PADR_(const void *)]; char msgsz_l_[PADL_(size_t)]; size_t msgsz; char msgsz_r_[PADR_(size_t)]; char msgflg_l_[PADL_(int)]; int msgflg; char msgflg_r_[PADR_(int)]; }; struct msgrcv_args { char msqid_l_[PADL_(int)]; int msqid; char msqid_r_[PADR_(int)]; char msgp_l_[PADL_(void *)]; void * msgp; char msgp_r_[PADR_(void *)]; char msgsz_l_[PADL_(size_t)]; size_t msgsz; char msgsz_r_[PADR_(size_t)]; char msgtyp_l_[PADL_(long)]; long msgtyp; char msgtyp_r_[PADR_(long)]; char msgflg_l_[PADL_(int)]; int msgflg; char msgflg_r_[PADR_(int)]; }; struct shmat_args { char shmid_l_[PADL_(int)]; int shmid; char shmid_r_[PADR_(int)]; char shmaddr_l_[PADL_(const void *)]; const void * shmaddr; char shmaddr_r_[PADR_(const void *)]; char shmflg_l_[PADL_(int)]; int shmflg; char shmflg_r_[PADR_(int)]; }; struct shmdt_args { char shmaddr_l_[PADL_(const void *)]; const void * shmaddr; char shmaddr_r_[PADR_(const void *)]; }; struct shmget_args { char key_l_[PADL_(key_t)]; key_t key; char key_r_[PADR_(key_t)]; char size_l_[PADL_(size_t)]; size_t size; char size_r_[PADR_(size_t)]; char shmflg_l_[PADL_(int)]; int shmflg; char shmflg_r_[PADR_(int)]; }; struct clock_gettime_args { char clock_id_l_[PADL_(clockid_t)]; clockid_t clock_id; char clock_id_r_[PADR_(clockid_t)]; char tp_l_[PADL_(struct timespec *)]; struct timespec * tp; char tp_r_[PADR_(struct timespec *)]; }; struct clock_settime_args { char clock_id_l_[PADL_(clockid_t)]; clockid_t clock_id; char clock_id_r_[PADR_(clockid_t)]; char tp_l_[PADL_(const struct timespec *)]; const struct timespec * tp; char tp_r_[PADR_(const struct timespec *)]; }; struct clock_getres_args { char clock_id_l_[PADL_(clockid_t)]; clockid_t clock_id; char clock_id_r_[PADR_(clockid_t)]; char tp_l_[PADL_(struct timespec *)]; struct timespec * tp; char tp_r_[PADR_(struct timespec *)]; }; struct ktimer_create_args { char clock_id_l_[PADL_(clockid_t)]; clockid_t clock_id; char clock_id_r_[PADR_(clockid_t)]; char evp_l_[PADL_(struct sigevent *)]; struct sigevent * evp; char evp_r_[PADR_(struct sigevent *)]; char timerid_l_[PADL_(int *)]; int * timerid; char timerid_r_[PADR_(int *)]; }; struct ktimer_delete_args { char timerid_l_[PADL_(int)]; int timerid; char timerid_r_[PADR_(int)]; }; struct ktimer_settime_args { char timerid_l_[PADL_(int)]; int timerid; char timerid_r_[PADR_(int)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; char value_l_[PADL_(const struct itimerspec *)]; const struct itimerspec * value; char value_r_[PADR_(const struct itimerspec *)]; char ovalue_l_[PADL_(struct itimerspec *)]; struct itimerspec * ovalue; char ovalue_r_[PADR_(struct itimerspec *)]; }; struct ktimer_gettime_args { char timerid_l_[PADL_(int)]; int timerid; char timerid_r_[PADR_(int)]; char value_l_[PADL_(struct itimerspec *)]; struct itimerspec * value; char value_r_[PADR_(struct itimerspec *)]; }; struct ktimer_getoverrun_args { char timerid_l_[PADL_(int)]; int timerid; char timerid_r_[PADR_(int)]; }; struct nanosleep_args { char rqtp_l_[PADL_(const struct timespec *)]; const struct timespec * rqtp; char rqtp_r_[PADR_(const struct timespec *)]; char rmtp_l_[PADL_(struct timespec *)]; struct timespec * rmtp; char rmtp_r_[PADR_(struct timespec *)]; }; struct ffclock_getcounter_args { char ffcount_l_[PADL_(ffcounter *)]; ffcounter * ffcount; char ffcount_r_[PADR_(ffcounter *)]; }; struct ffclock_setestimate_args { char cest_l_[PADL_(struct ffclock_estimate *)]; struct ffclock_estimate * cest; char cest_r_[PADR_(struct ffclock_estimate *)]; }; struct ffclock_getestimate_args { char cest_l_[PADL_(struct ffclock_estimate *)]; struct ffclock_estimate * cest; char cest_r_[PADR_(struct ffclock_estimate *)]; }; struct clock_nanosleep_args { char clock_id_l_[PADL_(clockid_t)]; clockid_t clock_id; char clock_id_r_[PADR_(clockid_t)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; char rqtp_l_[PADL_(const struct timespec *)]; const struct timespec * rqtp; char rqtp_r_[PADR_(const struct timespec *)]; char rmtp_l_[PADL_(struct timespec *)]; struct timespec * rmtp; char rmtp_r_[PADR_(struct timespec *)]; }; struct clock_getcpuclockid2_args { char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)]; char which_l_[PADL_(int)]; int which; char which_r_[PADR_(int)]; char clock_id_l_[PADL_(clockid_t *)]; clockid_t * clock_id; char clock_id_r_[PADR_(clockid_t *)]; }; struct ntp_gettime_args { char ntvp_l_[PADL_(struct ntptimeval *)]; struct ntptimeval * ntvp; char ntvp_r_[PADR_(struct ntptimeval *)]; }; struct minherit_args { char addr_l_[PADL_(void *)]; void * addr; char addr_r_[PADR_(void *)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; char inherit_l_[PADL_(int)]; int inherit; char inherit_r_[PADR_(int)]; }; struct rfork_args { char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct issetugid_args { register_t dummy; }; struct lchown_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char uid_l_[PADL_(int)]; int uid; char uid_r_[PADR_(int)]; char gid_l_[PADL_(int)]; int gid; char gid_r_[PADR_(int)]; }; struct aio_read_args { char aiocbp_l_[PADL_(struct aiocb *)]; struct aiocb * aiocbp; char aiocbp_r_[PADR_(struct aiocb *)]; }; struct aio_write_args { char aiocbp_l_[PADL_(struct aiocb *)]; struct aiocb * aiocbp; char aiocbp_r_[PADR_(struct aiocb *)]; }; struct lio_listio_args { char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)]; char acb_list_l_[PADL_(struct aiocb *const *)]; struct aiocb *const * acb_list; char acb_list_r_[PADR_(struct aiocb *const *)]; char nent_l_[PADL_(int)]; int nent; char nent_r_[PADR_(int)]; char sig_l_[PADL_(struct sigevent *)]; struct sigevent * sig; char sig_r_[PADR_(struct sigevent *)]; }; struct lchmod_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char mode_l_[PADL_(mode_t)]; mode_t mode; char mode_r_[PADR_(mode_t)]; }; struct lutimes_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char tptr_l_[PADL_(struct timeval *)]; struct timeval * tptr; char tptr_r_[PADR_(struct timeval *)]; }; struct preadv_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char iovp_l_[PADL_(struct iovec *)]; struct iovec * iovp; char iovp_r_[PADR_(struct iovec *)]; char iovcnt_l_[PADL_(u_int)]; u_int iovcnt; char iovcnt_r_[PADR_(u_int)]; char offset_l_[PADL_(off_t)]; off_t offset; char offset_r_[PADR_(off_t)]; }; struct pwritev_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char iovp_l_[PADL_(struct iovec *)]; struct iovec * iovp; char iovp_r_[PADR_(struct iovec *)]; char iovcnt_l_[PADL_(u_int)]; u_int iovcnt; char iovcnt_r_[PADR_(u_int)]; char offset_l_[PADL_(off_t)]; off_t offset; char offset_r_[PADR_(off_t)]; }; struct fhopen_args { char u_fhp_l_[PADL_(const struct fhandle *)]; const struct fhandle * u_fhp; char u_fhp_r_[PADR_(const struct fhandle *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct modnext_args { char modid_l_[PADL_(int)]; int modid; char modid_r_[PADR_(int)]; }; struct modstat_args { char modid_l_[PADL_(int)]; int modid; char modid_r_[PADR_(int)]; char stat_l_[PADL_(struct module_stat *)]; struct module_stat * stat; char stat_r_[PADR_(struct module_stat *)]; }; struct modfnext_args { char modid_l_[PADL_(int)]; int modid; char modid_r_[PADR_(int)]; }; struct modfind_args { char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)]; }; struct kldload_args { char file_l_[PADL_(const char *)]; const char * file; char file_r_[PADR_(const char *)]; }; struct kldunload_args { char fileid_l_[PADL_(int)]; int fileid; char fileid_r_[PADR_(int)]; }; struct kldfind_args { char file_l_[PADL_(const char *)]; const char * file; char file_r_[PADR_(const char *)]; }; struct kldnext_args { char fileid_l_[PADL_(int)]; int fileid; char fileid_r_[PADR_(int)]; }; struct kldstat_args { char fileid_l_[PADL_(int)]; int fileid; char fileid_r_[PADR_(int)]; char stat_l_[PADL_(struct kld_file_stat *)]; struct kld_file_stat * stat; char stat_r_[PADR_(struct kld_file_stat *)]; }; struct kldfirstmod_args { char fileid_l_[PADL_(int)]; int fileid; char fileid_r_[PADR_(int)]; }; struct getsid_args { char pid_l_[PADL_(pid_t)]; pid_t pid; char pid_r_[PADR_(pid_t)]; }; struct setresuid_args { char ruid_l_[PADL_(uid_t)]; uid_t ruid; char ruid_r_[PADR_(uid_t)]; char euid_l_[PADL_(uid_t)]; uid_t euid; char euid_r_[PADR_(uid_t)]; char suid_l_[PADL_(uid_t)]; uid_t suid; char suid_r_[PADR_(uid_t)]; }; struct setresgid_args { char rgid_l_[PADL_(gid_t)]; gid_t rgid; char rgid_r_[PADR_(gid_t)]; char egid_l_[PADL_(gid_t)]; gid_t egid; char egid_r_[PADR_(gid_t)]; char sgid_l_[PADL_(gid_t)]; gid_t sgid; char sgid_r_[PADR_(gid_t)]; }; struct aio_return_args { char aiocbp_l_[PADL_(struct aiocb *)]; struct aiocb * aiocbp; char aiocbp_r_[PADR_(struct aiocb *)]; }; struct aio_suspend_args { char aiocbp_l_[PADL_(struct aiocb *const *)]; struct aiocb *const * aiocbp; char aiocbp_r_[PADR_(struct aiocb *const *)]; char nent_l_[PADL_(int)]; int nent; char nent_r_[PADR_(int)]; char timeout_l_[PADL_(const struct timespec *)]; const struct timespec * timeout; char timeout_r_[PADR_(const struct timespec *)]; }; struct aio_cancel_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char aiocbp_l_[PADL_(struct aiocb *)]; struct aiocb * aiocbp; char aiocbp_r_[PADR_(struct aiocb *)]; }; struct aio_error_args { char aiocbp_l_[PADL_(struct aiocb *)]; struct aiocb * aiocbp; char aiocbp_r_[PADR_(struct aiocb *)]; }; struct yield_args { register_t dummy; }; struct mlockall_args { char how_l_[PADL_(int)]; int how; char how_r_[PADR_(int)]; }; struct munlockall_args { register_t dummy; }; struct __getcwd_args { char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)]; char buflen_l_[PADL_(size_t)]; size_t buflen; char buflen_r_[PADR_(size_t)]; }; struct sched_setparam_args { char pid_l_[PADL_(pid_t)]; pid_t pid; char pid_r_[PADR_(pid_t)]; char param_l_[PADL_(const struct sched_param *)]; const struct sched_param * param; char param_r_[PADR_(const struct sched_param *)]; }; struct sched_getparam_args { char pid_l_[PADL_(pid_t)]; pid_t pid; char pid_r_[PADR_(pid_t)]; char param_l_[PADL_(struct sched_param *)]; struct sched_param * param; char param_r_[PADR_(struct sched_param *)]; }; struct sched_setscheduler_args { char pid_l_[PADL_(pid_t)]; pid_t pid; char pid_r_[PADR_(pid_t)]; char policy_l_[PADL_(int)]; int policy; char policy_r_[PADR_(int)]; char param_l_[PADL_(const struct sched_param *)]; const struct sched_param * param; char param_r_[PADR_(const struct sched_param *)]; }; struct sched_getscheduler_args { char pid_l_[PADL_(pid_t)]; pid_t pid; char pid_r_[PADR_(pid_t)]; }; struct sched_yield_args { register_t dummy; }; struct sched_get_priority_max_args { char policy_l_[PADL_(int)]; int policy; char policy_r_[PADR_(int)]; }; struct sched_get_priority_min_args { char policy_l_[PADL_(int)]; int policy; char policy_r_[PADR_(int)]; }; struct sched_rr_get_interval_args { char pid_l_[PADL_(pid_t)]; pid_t pid; char pid_r_[PADR_(pid_t)]; char interval_l_[PADL_(struct timespec *)]; struct timespec * interval; char interval_r_[PADR_(struct timespec *)]; }; struct utrace_args { char addr_l_[PADL_(const void *)]; const void * addr; char addr_r_[PADR_(const void *)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; }; struct kldsym_args { char fileid_l_[PADL_(int)]; int fileid; char fileid_r_[PADR_(int)]; char cmd_l_[PADL_(int)]; int cmd; char cmd_r_[PADR_(int)]; char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)]; }; struct jail_args { char jail_l_[PADL_(struct jail *)]; struct jail * jail; char jail_r_[PADR_(struct jail *)]; }; struct nnpfs_syscall_args { char operation_l_[PADL_(int)]; int operation; char operation_r_[PADR_(int)]; char a_pathP_l_[PADL_(char *)]; char * a_pathP; char a_pathP_r_[PADR_(char *)]; char a_opcode_l_[PADL_(int)]; int a_opcode; char a_opcode_r_[PADR_(int)]; char a_paramsP_l_[PADL_(void *)]; void * a_paramsP; char a_paramsP_r_[PADR_(void *)]; char a_followSymlinks_l_[PADL_(int)]; int a_followSymlinks; char a_followSymlinks_r_[PADR_(int)]; }; struct sigprocmask_args { char how_l_[PADL_(int)]; int how; char how_r_[PADR_(int)]; char set_l_[PADL_(const sigset_t *)]; const sigset_t * set; char set_r_[PADR_(const sigset_t *)]; char oset_l_[PADL_(sigset_t *)]; sigset_t * oset; char oset_r_[PADR_(sigset_t *)]; }; struct sigsuspend_args { char sigmask_l_[PADL_(const sigset_t *)]; const sigset_t * sigmask; char sigmask_r_[PADR_(const sigset_t *)]; }; struct sigpending_args { char set_l_[PADL_(sigset_t *)]; sigset_t * set; char set_r_[PADR_(sigset_t *)]; }; struct sigtimedwait_args { char set_l_[PADL_(const sigset_t *)]; const sigset_t * set; char set_r_[PADR_(const sigset_t *)]; char info_l_[PADL_(siginfo_t *)]; siginfo_t * info; char info_r_[PADR_(siginfo_t *)]; char timeout_l_[PADL_(const struct timespec *)]; const struct timespec * timeout; char timeout_r_[PADR_(const struct timespec *)]; }; struct sigwaitinfo_args { char set_l_[PADL_(const sigset_t *)]; const sigset_t * set; char set_r_[PADR_(const sigset_t *)]; char info_l_[PADL_(siginfo_t *)]; siginfo_t * info; char info_r_[PADR_(siginfo_t *)]; }; struct __acl_get_file_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char type_l_[PADL_(acl_type_t)]; acl_type_t type; char type_r_[PADR_(acl_type_t)]; char aclp_l_[PADL_(struct acl *)]; struct acl * aclp; char aclp_r_[PADR_(struct acl *)]; }; struct __acl_set_file_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char type_l_[PADL_(acl_type_t)]; acl_type_t type; char type_r_[PADR_(acl_type_t)]; char aclp_l_[PADL_(struct acl *)]; struct acl * aclp; char aclp_r_[PADR_(struct acl *)]; }; struct __acl_get_fd_args { char filedes_l_[PADL_(int)]; int filedes; char filedes_r_[PADR_(int)]; char type_l_[PADL_(acl_type_t)]; acl_type_t type; char type_r_[PADR_(acl_type_t)]; char aclp_l_[PADL_(struct acl *)]; struct acl * aclp; char aclp_r_[PADR_(struct acl *)]; }; struct __acl_set_fd_args { char filedes_l_[PADL_(int)]; int filedes; char filedes_r_[PADR_(int)]; char type_l_[PADL_(acl_type_t)]; acl_type_t type; char type_r_[PADR_(acl_type_t)]; char aclp_l_[PADL_(struct acl *)]; struct acl * aclp; char aclp_r_[PADR_(struct acl *)]; }; struct __acl_delete_file_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char type_l_[PADL_(acl_type_t)]; acl_type_t type; char type_r_[PADR_(acl_type_t)]; }; struct __acl_delete_fd_args { char filedes_l_[PADL_(int)]; int filedes; char filedes_r_[PADR_(int)]; char type_l_[PADL_(acl_type_t)]; acl_type_t type; char type_r_[PADR_(acl_type_t)]; }; struct __acl_aclcheck_file_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char type_l_[PADL_(acl_type_t)]; acl_type_t type; char type_r_[PADR_(acl_type_t)]; char aclp_l_[PADL_(struct acl *)]; struct acl * aclp; char aclp_r_[PADR_(struct acl *)]; }; struct __acl_aclcheck_fd_args { char filedes_l_[PADL_(int)]; int filedes; char filedes_r_[PADR_(int)]; char type_l_[PADL_(acl_type_t)]; acl_type_t type; char type_r_[PADR_(acl_type_t)]; char aclp_l_[PADL_(struct acl *)]; struct acl * aclp; char aclp_r_[PADR_(struct acl *)]; }; struct extattrctl_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char cmd_l_[PADL_(int)]; int cmd; char cmd_r_[PADR_(int)]; char filename_l_[PADL_(const char *)]; const char * filename; char filename_r_[PADR_(const char *)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char attrname_l_[PADL_(const char *)]; const char * attrname; char attrname_r_[PADR_(const char *)]; }; struct extattr_set_file_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char attrname_l_[PADL_(const char *)]; const char * attrname; char attrname_r_[PADR_(const char *)]; char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)]; char nbytes_l_[PADL_(size_t)]; size_t nbytes; char nbytes_r_[PADR_(size_t)]; }; struct extattr_get_file_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char attrname_l_[PADL_(const char *)]; const char * attrname; char attrname_r_[PADR_(const char *)]; char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)]; char nbytes_l_[PADL_(size_t)]; size_t nbytes; char nbytes_r_[PADR_(size_t)]; }; struct extattr_delete_file_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char attrname_l_[PADL_(const char *)]; const char * attrname; char attrname_r_[PADR_(const char *)]; }; struct aio_waitcomplete_args { char aiocbp_l_[PADL_(struct aiocb **)]; struct aiocb ** aiocbp; char aiocbp_r_[PADR_(struct aiocb **)]; char timeout_l_[PADL_(struct timespec *)]; struct timespec * timeout; char timeout_r_[PADR_(struct timespec *)]; }; struct getresuid_args { char ruid_l_[PADL_(uid_t *)]; uid_t * ruid; char ruid_r_[PADR_(uid_t *)]; char euid_l_[PADL_(uid_t *)]; uid_t * euid; char euid_r_[PADR_(uid_t *)]; char suid_l_[PADL_(uid_t *)]; uid_t * suid; char suid_r_[PADR_(uid_t *)]; }; struct getresgid_args { char rgid_l_[PADL_(gid_t *)]; gid_t * rgid; char rgid_r_[PADR_(gid_t *)]; char egid_l_[PADL_(gid_t *)]; gid_t * egid; char egid_r_[PADR_(gid_t *)]; char sgid_l_[PADL_(gid_t *)]; gid_t * sgid; char sgid_r_[PADR_(gid_t *)]; }; struct kqueue_args { register_t dummy; }; struct extattr_set_fd_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char attrname_l_[PADL_(const char *)]; const char * attrname; char attrname_r_[PADR_(const char *)]; char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)]; char nbytes_l_[PADL_(size_t)]; size_t nbytes; char nbytes_r_[PADR_(size_t)]; }; struct extattr_get_fd_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char attrname_l_[PADL_(const char *)]; const char * attrname; char attrname_r_[PADR_(const char *)]; char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)]; char nbytes_l_[PADL_(size_t)]; size_t nbytes; char nbytes_r_[PADR_(size_t)]; }; struct extattr_delete_fd_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char attrname_l_[PADL_(const char *)]; const char * attrname; char attrname_r_[PADR_(const char *)]; }; struct __setugid_args { char flag_l_[PADL_(int)]; int flag; char flag_r_[PADR_(int)]; }; struct eaccess_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char amode_l_[PADL_(int)]; int amode; char amode_r_[PADR_(int)]; }; struct afs3_syscall_args { char syscall_l_[PADL_(long)]; long syscall; char syscall_r_[PADR_(long)]; char parm1_l_[PADL_(long)]; long parm1; char parm1_r_[PADR_(long)]; char parm2_l_[PADL_(long)]; long parm2; char parm2_r_[PADR_(long)]; char parm3_l_[PADL_(long)]; long parm3; char parm3_r_[PADR_(long)]; char parm4_l_[PADL_(long)]; long parm4; char parm4_r_[PADR_(long)]; char parm5_l_[PADL_(long)]; long parm5; char parm5_r_[PADR_(long)]; char parm6_l_[PADL_(long)]; long parm6; char parm6_r_[PADR_(long)]; }; struct nmount_args { char iovp_l_[PADL_(struct iovec *)]; struct iovec * iovp; char iovp_r_[PADR_(struct iovec *)]; char iovcnt_l_[PADL_(unsigned int)]; unsigned int iovcnt; char iovcnt_r_[PADR_(unsigned int)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct __mac_get_proc_args { char mac_p_l_[PADL_(struct mac *)]; struct mac * mac_p; char mac_p_r_[PADR_(struct mac *)]; }; struct __mac_set_proc_args { char mac_p_l_[PADL_(struct mac *)]; struct mac * mac_p; char mac_p_r_[PADR_(struct mac *)]; }; struct __mac_get_fd_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char mac_p_l_[PADL_(struct mac *)]; struct mac * mac_p; char mac_p_r_[PADR_(struct mac *)]; }; struct __mac_get_file_args { char path_p_l_[PADL_(const char *)]; const char * path_p; char path_p_r_[PADR_(const char *)]; char mac_p_l_[PADL_(struct mac *)]; struct mac * mac_p; char mac_p_r_[PADR_(struct mac *)]; }; struct __mac_set_fd_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char mac_p_l_[PADL_(struct mac *)]; struct mac * mac_p; char mac_p_r_[PADR_(struct mac *)]; }; struct __mac_set_file_args { char path_p_l_[PADL_(const char *)]; const char * path_p; char path_p_r_[PADR_(const char *)]; char mac_p_l_[PADL_(struct mac *)]; struct mac * mac_p; char mac_p_r_[PADR_(struct mac *)]; }; struct kenv_args { char what_l_[PADL_(int)]; int what; char what_r_[PADR_(int)]; char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)]; char value_l_[PADL_(char *)]; char * value; char value_r_[PADR_(char *)]; char len_l_[PADL_(int)]; int len; char len_r_[PADR_(int)]; }; struct lchflags_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char flags_l_[PADL_(u_long)]; u_long flags; char flags_r_[PADR_(u_long)]; }; struct uuidgen_args { char store_l_[PADL_(struct uuid *)]; struct uuid * store; char store_r_[PADR_(struct uuid *)]; char count_l_[PADL_(int)]; int count; char count_r_[PADR_(int)]; }; struct sendfile_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char offset_l_[PADL_(off_t)]; off_t offset; char offset_r_[PADR_(off_t)]; char nbytes_l_[PADL_(size_t)]; size_t nbytes; char nbytes_r_[PADR_(size_t)]; char hdtr_l_[PADL_(struct sf_hdtr *)]; struct sf_hdtr * hdtr; char hdtr_r_[PADR_(struct sf_hdtr *)]; char sbytes_l_[PADL_(off_t *)]; off_t * sbytes; char sbytes_r_[PADR_(off_t *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct mac_syscall_args { char policy_l_[PADL_(const char *)]; const char * policy; char policy_r_[PADR_(const char *)]; char call_l_[PADL_(int)]; int call; char call_r_[PADR_(int)]; char arg_l_[PADL_(void *)]; void * arg; char arg_r_[PADR_(void *)]; }; struct ksem_close_args { char id_l_[PADL_(semid_t)]; semid_t id; char id_r_[PADR_(semid_t)]; }; struct ksem_post_args { char id_l_[PADL_(semid_t)]; semid_t id; char id_r_[PADR_(semid_t)]; }; struct ksem_wait_args { char id_l_[PADL_(semid_t)]; semid_t id; char id_r_[PADR_(semid_t)]; }; struct ksem_trywait_args { char id_l_[PADL_(semid_t)]; semid_t id; char id_r_[PADR_(semid_t)]; }; struct ksem_init_args { char idp_l_[PADL_(semid_t *)]; semid_t * idp; char idp_r_[PADR_(semid_t *)]; char value_l_[PADL_(unsigned int)]; unsigned int value; char value_r_[PADR_(unsigned int)]; }; struct ksem_open_args { char idp_l_[PADL_(semid_t *)]; semid_t * idp; char idp_r_[PADR_(semid_t *)]; char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)]; char oflag_l_[PADL_(int)]; int oflag; char oflag_r_[PADR_(int)]; char mode_l_[PADL_(mode_t)]; mode_t mode; char mode_r_[PADR_(mode_t)]; char value_l_[PADL_(unsigned int)]; unsigned int value; char value_r_[PADR_(unsigned int)]; }; struct ksem_unlink_args { char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)]; }; struct ksem_getvalue_args { char id_l_[PADL_(semid_t)]; semid_t id; char id_r_[PADR_(semid_t)]; char val_l_[PADL_(int *)]; int * val; char val_r_[PADR_(int *)]; }; struct ksem_destroy_args { char id_l_[PADL_(semid_t)]; semid_t id; char id_r_[PADR_(semid_t)]; }; struct __mac_get_pid_args { char pid_l_[PADL_(pid_t)]; pid_t pid; char pid_r_[PADR_(pid_t)]; char mac_p_l_[PADL_(struct mac *)]; struct mac * mac_p; char mac_p_r_[PADR_(struct mac *)]; }; struct __mac_get_link_args { char path_p_l_[PADL_(const char *)]; const char * path_p; char path_p_r_[PADR_(const char *)]; char mac_p_l_[PADL_(struct mac *)]; struct mac * mac_p; char mac_p_r_[PADR_(struct mac *)]; }; struct __mac_set_link_args { char path_p_l_[PADL_(const char *)]; const char * path_p; char path_p_r_[PADR_(const char *)]; char mac_p_l_[PADL_(struct mac *)]; struct mac * mac_p; char mac_p_r_[PADR_(struct mac *)]; }; struct extattr_set_link_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char attrname_l_[PADL_(const char *)]; const char * attrname; char attrname_r_[PADR_(const char *)]; char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)]; char nbytes_l_[PADL_(size_t)]; size_t nbytes; char nbytes_r_[PADR_(size_t)]; }; struct extattr_get_link_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char attrname_l_[PADL_(const char *)]; const char * attrname; char attrname_r_[PADR_(const char *)]; char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)]; char nbytes_l_[PADL_(size_t)]; size_t nbytes; char nbytes_r_[PADR_(size_t)]; }; struct extattr_delete_link_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char attrname_l_[PADL_(const char *)]; const char * attrname; char attrname_r_[PADR_(const char *)]; }; struct __mac_execve_args { char fname_l_[PADL_(char *)]; char * fname; char fname_r_[PADR_(char *)]; char argv_l_[PADL_(char **)]; char ** argv; char argv_r_[PADR_(char **)]; char envv_l_[PADL_(char **)]; char ** envv; char envv_r_[PADR_(char **)]; char mac_p_l_[PADL_(struct mac *)]; struct mac * mac_p; char mac_p_r_[PADR_(struct mac *)]; }; struct sigaction_args { char sig_l_[PADL_(int)]; int sig; char sig_r_[PADR_(int)]; char act_l_[PADL_(const struct sigaction *)]; const struct sigaction * act; char act_r_[PADR_(const struct sigaction *)]; char oact_l_[PADL_(struct sigaction *)]; struct sigaction * oact; char oact_r_[PADR_(struct sigaction *)]; }; struct sigreturn_args { char sigcntxp_l_[PADL_(const struct __ucontext *)]; const struct __ucontext * sigcntxp; char sigcntxp_r_[PADR_(const struct __ucontext *)]; }; struct getcontext_args { char ucp_l_[PADL_(struct __ucontext *)]; struct __ucontext * ucp; char ucp_r_[PADR_(struct __ucontext *)]; }; struct setcontext_args { char ucp_l_[PADL_(const struct __ucontext *)]; const struct __ucontext * ucp; char ucp_r_[PADR_(const struct __ucontext *)]; }; struct swapcontext_args { char oucp_l_[PADL_(struct __ucontext *)]; struct __ucontext * oucp; char oucp_r_[PADR_(struct __ucontext *)]; char ucp_l_[PADL_(const struct __ucontext *)]; const struct __ucontext * ucp; char ucp_r_[PADR_(const struct __ucontext *)]; }; struct swapoff_args { char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)]; }; struct __acl_get_link_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char type_l_[PADL_(acl_type_t)]; acl_type_t type; char type_r_[PADR_(acl_type_t)]; char aclp_l_[PADL_(struct acl *)]; struct acl * aclp; char aclp_r_[PADR_(struct acl *)]; }; struct __acl_set_link_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char type_l_[PADL_(acl_type_t)]; acl_type_t type; char type_r_[PADR_(acl_type_t)]; char aclp_l_[PADL_(struct acl *)]; struct acl * aclp; char aclp_r_[PADR_(struct acl *)]; }; struct __acl_delete_link_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char type_l_[PADL_(acl_type_t)]; acl_type_t type; char type_r_[PADR_(acl_type_t)]; }; struct __acl_aclcheck_link_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char type_l_[PADL_(acl_type_t)]; acl_type_t type; char type_r_[PADR_(acl_type_t)]; char aclp_l_[PADL_(struct acl *)]; struct acl * aclp; char aclp_r_[PADR_(struct acl *)]; }; struct sigwait_args { char set_l_[PADL_(const sigset_t *)]; const sigset_t * set; char set_r_[PADR_(const sigset_t *)]; char sig_l_[PADL_(int *)]; int * sig; char sig_r_[PADR_(int *)]; }; struct thr_create_args { char ctx_l_[PADL_(ucontext_t *)]; ucontext_t * ctx; char ctx_r_[PADR_(ucontext_t *)]; char id_l_[PADL_(long *)]; long * id; char id_r_[PADR_(long *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct thr_exit_args { char state_l_[PADL_(long *)]; long * state; char state_r_[PADR_(long *)]; }; struct thr_self_args { char id_l_[PADL_(long *)]; long * id; char id_r_[PADR_(long *)]; }; struct thr_kill_args { char id_l_[PADL_(long)]; long id; char id_r_[PADR_(long)]; char sig_l_[PADL_(int)]; int sig; char sig_r_[PADR_(int)]; }; struct jail_attach_args { char jid_l_[PADL_(int)]; int jid; char jid_r_[PADR_(int)]; }; struct extattr_list_fd_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)]; char nbytes_l_[PADL_(size_t)]; size_t nbytes; char nbytes_r_[PADR_(size_t)]; }; struct extattr_list_file_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)]; char nbytes_l_[PADL_(size_t)]; size_t nbytes; char nbytes_r_[PADR_(size_t)]; }; struct extattr_list_link_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)]; char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)]; char nbytes_l_[PADL_(size_t)]; size_t nbytes; char nbytes_r_[PADR_(size_t)]; }; struct ksem_timedwait_args { char id_l_[PADL_(semid_t)]; semid_t id; char id_r_[PADR_(semid_t)]; char abstime_l_[PADL_(const struct timespec *)]; const struct timespec * abstime; char abstime_r_[PADR_(const struct timespec *)]; }; struct thr_suspend_args { char timeout_l_[PADL_(const struct timespec *)]; const struct timespec * timeout; char timeout_r_[PADR_(const struct timespec *)]; }; struct thr_wake_args { char id_l_[PADL_(long)]; long id; char id_r_[PADR_(long)]; }; struct kldunloadf_args { char fileid_l_[PADL_(int)]; int fileid; char fileid_r_[PADR_(int)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct audit_args { char record_l_[PADL_(const void *)]; const void * record; char record_r_[PADR_(const void *)]; char length_l_[PADL_(u_int)]; u_int length; char length_r_[PADR_(u_int)]; }; struct auditon_args { char cmd_l_[PADL_(int)]; int cmd; char cmd_r_[PADR_(int)]; char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)]; char length_l_[PADL_(u_int)]; u_int length; char length_r_[PADR_(u_int)]; }; struct getauid_args { char auid_l_[PADL_(uid_t *)]; uid_t * auid; char auid_r_[PADR_(uid_t *)]; }; struct setauid_args { char auid_l_[PADL_(uid_t *)]; uid_t * auid; char auid_r_[PADR_(uid_t *)]; }; struct getaudit_args { char auditinfo_l_[PADL_(struct auditinfo *)]; struct auditinfo * auditinfo; char auditinfo_r_[PADR_(struct auditinfo *)]; }; struct setaudit_args { char auditinfo_l_[PADL_(struct auditinfo *)]; struct auditinfo * auditinfo; char auditinfo_r_[PADR_(struct auditinfo *)]; }; struct getaudit_addr_args { char auditinfo_addr_l_[PADL_(struct auditinfo_addr *)]; struct auditinfo_addr * auditinfo_addr; char auditinfo_addr_r_[PADR_(struct auditinfo_addr *)]; char length_l_[PADL_(u_int)]; u_int length; char length_r_[PADR_(u_int)]; }; struct setaudit_addr_args { char auditinfo_addr_l_[PADL_(struct auditinfo_addr *)]; struct auditinfo_addr * auditinfo_addr; char auditinfo_addr_r_[PADR_(struct auditinfo_addr *)]; char length_l_[PADL_(u_int)]; u_int length; char length_r_[PADR_(u_int)]; }; struct auditctl_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; }; struct _umtx_op_args { char obj_l_[PADL_(void *)]; void * obj; char obj_r_[PADR_(void *)]; char op_l_[PADL_(int)]; int op; char op_r_[PADR_(int)]; char val_l_[PADL_(u_long)]; u_long val; char val_r_[PADR_(u_long)]; char uaddr1_l_[PADL_(void *)]; void * uaddr1; char uaddr1_r_[PADR_(void *)]; char uaddr2_l_[PADL_(void *)]; void * uaddr2; char uaddr2_r_[PADR_(void *)]; }; struct thr_new_args { char param_l_[PADL_(struct thr_param *)]; struct thr_param * param; char param_r_[PADR_(struct thr_param *)]; char param_size_l_[PADL_(int)]; int param_size; char param_size_r_[PADR_(int)]; }; struct sigqueue_args { char pid_l_[PADL_(pid_t)]; pid_t pid; char pid_r_[PADR_(pid_t)]; char signum_l_[PADL_(int)]; int signum; char signum_r_[PADR_(int)]; char value_l_[PADL_(void *)]; void * value; char value_r_[PADR_(void *)]; }; struct kmq_open_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; char mode_l_[PADL_(mode_t)]; mode_t mode; char mode_r_[PADR_(mode_t)]; char attr_l_[PADL_(const struct mq_attr *)]; const struct mq_attr * attr; char attr_r_[PADR_(const struct mq_attr *)]; }; struct kmq_setattr_args { char mqd_l_[PADL_(int)]; int mqd; char mqd_r_[PADR_(int)]; char attr_l_[PADL_(const struct mq_attr *)]; const struct mq_attr * attr; char attr_r_[PADR_(const struct mq_attr *)]; char oattr_l_[PADL_(struct mq_attr *)]; struct mq_attr * oattr; char oattr_r_[PADR_(struct mq_attr *)]; }; struct kmq_timedreceive_args { char mqd_l_[PADL_(int)]; int mqd; char mqd_r_[PADR_(int)]; char msg_ptr_l_[PADL_(char *)]; char * msg_ptr; char msg_ptr_r_[PADR_(char *)]; char msg_len_l_[PADL_(size_t)]; size_t msg_len; char msg_len_r_[PADR_(size_t)]; char msg_prio_l_[PADL_(unsigned *)]; unsigned * msg_prio; char msg_prio_r_[PADR_(unsigned *)]; char abs_timeout_l_[PADL_(const struct timespec *)]; const struct timespec * abs_timeout; char abs_timeout_r_[PADR_(const struct timespec *)]; }; struct kmq_timedsend_args { char mqd_l_[PADL_(int)]; int mqd; char mqd_r_[PADR_(int)]; char msg_ptr_l_[PADL_(const char *)]; const char * msg_ptr; char msg_ptr_r_[PADR_(const char *)]; char msg_len_l_[PADL_(size_t)]; size_t msg_len; char msg_len_r_[PADR_(size_t)]; char msg_prio_l_[PADL_(unsigned)]; unsigned msg_prio; char msg_prio_r_[PADR_(unsigned)]; char abs_timeout_l_[PADL_(const struct timespec *)]; const struct timespec * abs_timeout; char abs_timeout_r_[PADR_(const struct timespec *)]; }; struct kmq_notify_args { char mqd_l_[PADL_(int)]; int mqd; char mqd_r_[PADR_(int)]; char sigev_l_[PADL_(const struct sigevent *)]; const struct sigevent * sigev; char sigev_r_[PADR_(const struct sigevent *)]; }; struct kmq_unlink_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; }; struct abort2_args { char why_l_[PADL_(const char *)]; const char * why; char why_r_[PADR_(const char *)]; char nargs_l_[PADL_(int)]; int nargs; char nargs_r_[PADR_(int)]; char args_l_[PADL_(void **)]; void ** args; char args_r_[PADR_(void **)]; }; struct thr_set_name_args { char id_l_[PADL_(long)]; long id; char id_r_[PADR_(long)]; char name_l_[PADL_(const char *)]; const char * name; char name_r_[PADR_(const char *)]; }; struct aio_fsync_args { char op_l_[PADL_(int)]; int op; char op_r_[PADR_(int)]; char aiocbp_l_[PADL_(struct aiocb *)]; struct aiocb * aiocbp; char aiocbp_r_[PADR_(struct aiocb *)]; }; struct rtprio_thread_args { char function_l_[PADL_(int)]; int function; char function_r_[PADR_(int)]; char lwpid_l_[PADL_(lwpid_t)]; lwpid_t lwpid; char lwpid_r_[PADR_(lwpid_t)]; char rtp_l_[PADL_(struct rtprio *)]; struct rtprio * rtp; char rtp_r_[PADR_(struct rtprio *)]; }; struct sctp_peeloff_args { char sd_l_[PADL_(int)]; int sd; char sd_r_[PADR_(int)]; char name_l_[PADL_(uint32_t)]; uint32_t name; char name_r_[PADR_(uint32_t)]; }; struct sctp_generic_sendmsg_args { char sd_l_[PADL_(int)]; int sd; char sd_r_[PADR_(int)]; char msg_l_[PADL_(caddr_t)]; caddr_t msg; char msg_r_[PADR_(caddr_t)]; char mlen_l_[PADL_(int)]; int mlen; char mlen_r_[PADR_(int)]; char to_l_[PADL_(caddr_t)]; caddr_t to; char to_r_[PADR_(caddr_t)]; char tolen_l_[PADL_(__socklen_t)]; __socklen_t tolen; char tolen_r_[PADR_(__socklen_t)]; char sinfo_l_[PADL_(struct sctp_sndrcvinfo *)]; struct sctp_sndrcvinfo * sinfo; char sinfo_r_[PADR_(struct sctp_sndrcvinfo *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct sctp_generic_sendmsg_iov_args { char sd_l_[PADL_(int)]; int sd; char sd_r_[PADR_(int)]; char iov_l_[PADL_(struct iovec *)]; struct iovec * iov; char iov_r_[PADR_(struct iovec *)]; char iovlen_l_[PADL_(int)]; int iovlen; char iovlen_r_[PADR_(int)]; char to_l_[PADL_(caddr_t)]; caddr_t to; char to_r_[PADR_(caddr_t)]; char tolen_l_[PADL_(__socklen_t)]; __socklen_t tolen; char tolen_r_[PADR_(__socklen_t)]; char sinfo_l_[PADL_(struct sctp_sndrcvinfo *)]; struct sctp_sndrcvinfo * sinfo; char sinfo_r_[PADR_(struct sctp_sndrcvinfo *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct sctp_generic_recvmsg_args { char sd_l_[PADL_(int)]; int sd; char sd_r_[PADR_(int)]; char iov_l_[PADL_(struct iovec *)]; struct iovec * iov; char iov_r_[PADR_(struct iovec *)]; char iovlen_l_[PADL_(int)]; int iovlen; char iovlen_r_[PADR_(int)]; char from_l_[PADL_(struct sockaddr *)]; struct sockaddr * from; char from_r_[PADR_(struct sockaddr *)]; char fromlenaddr_l_[PADL_(__socklen_t *)]; __socklen_t * fromlenaddr; char fromlenaddr_r_[PADR_(__socklen_t *)]; char sinfo_l_[PADL_(struct sctp_sndrcvinfo *)]; struct sctp_sndrcvinfo * sinfo; char sinfo_r_[PADR_(struct sctp_sndrcvinfo *)]; char msg_flags_l_[PADL_(int *)]; int * msg_flags; char msg_flags_r_[PADR_(int *)]; }; struct pread_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(void *)]; void * buf; char buf_r_[PADR_(void *)]; char nbyte_l_[PADL_(size_t)]; size_t nbyte; char nbyte_r_[PADR_(size_t)]; char offset_l_[PADL_(off_t)]; off_t offset; char offset_r_[PADR_(off_t)]; }; struct pwrite_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(const void *)]; const void * buf; char buf_r_[PADR_(const void *)]; char nbyte_l_[PADL_(size_t)]; size_t nbyte; char nbyte_r_[PADR_(size_t)]; char offset_l_[PADL_(off_t)]; off_t offset; char offset_r_[PADR_(off_t)]; }; struct mmap_args { char addr_l_[PADL_(caddr_t)]; caddr_t addr; char addr_r_[PADR_(caddr_t)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; char prot_l_[PADL_(int)]; int prot; char prot_r_[PADR_(int)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char pos_l_[PADL_(off_t)]; off_t pos; char pos_r_[PADR_(off_t)]; }; struct lseek_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char offset_l_[PADL_(off_t)]; off_t offset; char offset_r_[PADR_(off_t)]; char whence_l_[PADL_(int)]; int whence; char whence_r_[PADR_(int)]; }; struct truncate_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char length_l_[PADL_(off_t)]; off_t length; char length_r_[PADR_(off_t)]; }; struct ftruncate_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char length_l_[PADL_(off_t)]; off_t length; char length_r_[PADR_(off_t)]; }; struct thr_kill2_args { char pid_l_[PADL_(pid_t)]; pid_t pid; char pid_r_[PADR_(pid_t)]; char id_l_[PADL_(long)]; long id; char id_r_[PADR_(long)]; char sig_l_[PADL_(int)]; int sig; char sig_r_[PADR_(int)]; }; struct shm_open_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; char mode_l_[PADL_(mode_t)]; mode_t mode; char mode_r_[PADR_(mode_t)]; }; struct shm_unlink_args { char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; }; struct cpuset_args { char setid_l_[PADL_(cpusetid_t *)]; cpusetid_t * setid; char setid_r_[PADR_(cpusetid_t *)]; }; struct cpuset_setid_args { char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)]; char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)]; char setid_l_[PADL_(cpusetid_t)]; cpusetid_t setid; char setid_r_[PADR_(cpusetid_t)]; }; struct cpuset_getid_args { char level_l_[PADL_(cpulevel_t)]; cpulevel_t level; char level_r_[PADR_(cpulevel_t)]; char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)]; char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)]; char setid_l_[PADL_(cpusetid_t *)]; cpusetid_t * setid; char setid_r_[PADR_(cpusetid_t *)]; }; struct cpuset_getaffinity_args { char level_l_[PADL_(cpulevel_t)]; cpulevel_t level; char level_r_[PADR_(cpulevel_t)]; char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)]; char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)]; char cpusetsize_l_[PADL_(size_t)]; size_t cpusetsize; char cpusetsize_r_[PADR_(size_t)]; char mask_l_[PADL_(cpuset_t *)]; cpuset_t * mask; char mask_r_[PADR_(cpuset_t *)]; }; struct cpuset_setaffinity_args { char level_l_[PADL_(cpulevel_t)]; cpulevel_t level; char level_r_[PADR_(cpulevel_t)]; char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)]; char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)]; char cpusetsize_l_[PADL_(size_t)]; size_t cpusetsize; char cpusetsize_r_[PADR_(size_t)]; char mask_l_[PADL_(const cpuset_t *)]; const cpuset_t * mask; char mask_r_[PADR_(const cpuset_t *)]; }; struct faccessat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char amode_l_[PADL_(int)]; int amode; char amode_r_[PADR_(int)]; char flag_l_[PADL_(int)]; int flag; char flag_r_[PADR_(int)]; }; struct fchmodat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char mode_l_[PADL_(mode_t)]; mode_t mode; char mode_r_[PADR_(mode_t)]; char flag_l_[PADL_(int)]; int flag; char flag_r_[PADR_(int)]; }; struct fchownat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char uid_l_[PADL_(uid_t)]; uid_t uid; char uid_r_[PADR_(uid_t)]; char gid_l_[PADL_(gid_t)]; gid_t gid; char gid_r_[PADR_(gid_t)]; char flag_l_[PADL_(int)]; int flag; char flag_r_[PADR_(int)]; }; struct fexecve_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char argv_l_[PADL_(char **)]; char ** argv; char argv_r_[PADR_(char **)]; char envv_l_[PADL_(char **)]; char ** envv; char envv_r_[PADR_(char **)]; }; struct futimesat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char times_l_[PADL_(struct timeval *)]; struct timeval * times; char times_r_[PADR_(struct timeval *)]; }; struct linkat_args { char fd1_l_[PADL_(int)]; int fd1; char fd1_r_[PADR_(int)]; char path1_l_[PADL_(char *)]; char * path1; char path1_r_[PADR_(char *)]; char fd2_l_[PADL_(int)]; int fd2; char fd2_r_[PADR_(int)]; char path2_l_[PADL_(char *)]; char * path2; char path2_r_[PADR_(char *)]; char flag_l_[PADL_(int)]; int flag; char flag_r_[PADR_(int)]; }; struct mkdirat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char mode_l_[PADL_(mode_t)]; mode_t mode; char mode_r_[PADR_(mode_t)]; }; struct mkfifoat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char mode_l_[PADL_(mode_t)]; mode_t mode; char mode_r_[PADR_(mode_t)]; }; struct openat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char flag_l_[PADL_(int)]; int flag; char flag_r_[PADR_(int)]; char mode_l_[PADL_(mode_t)]; mode_t mode; char mode_r_[PADR_(mode_t)]; }; struct readlinkat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)]; char bufsize_l_[PADL_(size_t)]; size_t bufsize; char bufsize_r_[PADR_(size_t)]; }; struct renameat_args { char oldfd_l_[PADL_(int)]; int oldfd; char oldfd_r_[PADR_(int)]; char old_l_[PADL_(char *)]; char * old; char old_r_[PADR_(char *)]; char newfd_l_[PADL_(int)]; int newfd; char newfd_r_[PADR_(int)]; char new_l_[PADL_(char *)]; char * new; char new_r_[PADR_(char *)]; }; struct symlinkat_args { char path1_l_[PADL_(char *)]; char * path1; char path1_r_[PADR_(char *)]; char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path2_l_[PADL_(char *)]; char * path2; char path2_r_[PADR_(char *)]; }; struct unlinkat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char flag_l_[PADL_(int)]; int flag; char flag_r_[PADR_(int)]; }; struct posix_openpt_args { char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct gssd_syscall_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; }; struct jail_get_args { char iovp_l_[PADL_(struct iovec *)]; struct iovec * iovp; char iovp_r_[PADR_(struct iovec *)]; char iovcnt_l_[PADL_(unsigned int)]; unsigned int iovcnt; char iovcnt_r_[PADR_(unsigned int)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct jail_set_args { char iovp_l_[PADL_(struct iovec *)]; struct iovec * iovp; char iovp_r_[PADR_(struct iovec *)]; char iovcnt_l_[PADL_(unsigned int)]; unsigned int iovcnt; char iovcnt_r_[PADR_(unsigned int)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct jail_remove_args { char jid_l_[PADL_(int)]; int jid; char jid_r_[PADR_(int)]; }; struct closefrom_args { char lowfd_l_[PADL_(int)]; int lowfd; char lowfd_r_[PADR_(int)]; }; struct __semctl_args { char semid_l_[PADL_(int)]; int semid; char semid_r_[PADR_(int)]; char semnum_l_[PADL_(int)]; int semnum; char semnum_r_[PADR_(int)]; char cmd_l_[PADL_(int)]; int cmd; char cmd_r_[PADR_(int)]; char arg_l_[PADL_(union semun *)]; union semun * arg; char arg_r_[PADR_(union semun *)]; }; struct msgctl_args { char msqid_l_[PADL_(int)]; int msqid; char msqid_r_[PADR_(int)]; char cmd_l_[PADL_(int)]; int cmd; char cmd_r_[PADR_(int)]; char buf_l_[PADL_(struct msqid_ds *)]; struct msqid_ds * buf; char buf_r_[PADR_(struct msqid_ds *)]; }; struct shmctl_args { char shmid_l_[PADL_(int)]; int shmid; char shmid_r_[PADR_(int)]; char cmd_l_[PADL_(int)]; int cmd; char cmd_r_[PADR_(int)]; char buf_l_[PADL_(struct shmid_ds *)]; struct shmid_ds * buf; char buf_r_[PADR_(struct shmid_ds *)]; }; struct lpathconf_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char name_l_[PADL_(int)]; int name; char name_r_[PADR_(int)]; }; struct __cap_rights_get_args { char version_l_[PADL_(int)]; int version; char version_r_[PADR_(int)]; char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char rightsp_l_[PADL_(cap_rights_t *)]; cap_rights_t * rightsp; char rightsp_r_[PADR_(cap_rights_t *)]; }; struct cap_enter_args { register_t dummy; }; struct cap_getmode_args { char modep_l_[PADL_(u_int *)]; u_int * modep; char modep_r_[PADR_(u_int *)]; }; struct pdfork_args { char fdp_l_[PADL_(int *)]; int * fdp; char fdp_r_[PADR_(int *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct pdkill_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char signum_l_[PADL_(int)]; int signum; char signum_r_[PADR_(int)]; }; struct pdgetpid_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char pidp_l_[PADL_(pid_t *)]; pid_t * pidp; char pidp_r_[PADR_(pid_t *)]; }; struct pselect_args { char nd_l_[PADL_(int)]; int nd; char nd_r_[PADR_(int)]; char in_l_[PADL_(fd_set *)]; fd_set * in; char in_r_[PADR_(fd_set *)]; char ou_l_[PADL_(fd_set *)]; fd_set * ou; char ou_r_[PADR_(fd_set *)]; char ex_l_[PADL_(fd_set *)]; fd_set * ex; char ex_r_[PADR_(fd_set *)]; char ts_l_[PADL_(const struct timespec *)]; const struct timespec * ts; char ts_r_[PADR_(const struct timespec *)]; char sm_l_[PADL_(const sigset_t *)]; const sigset_t * sm; char sm_r_[PADR_(const sigset_t *)]; }; struct getloginclass_args { char namebuf_l_[PADL_(char *)]; char * namebuf; char namebuf_r_[PADR_(char *)]; char namelen_l_[PADL_(size_t)]; size_t namelen; char namelen_r_[PADR_(size_t)]; }; struct setloginclass_args { char namebuf_l_[PADL_(const char *)]; const char * namebuf; char namebuf_r_[PADR_(const char *)]; }; struct rctl_get_racct_args { char inbufp_l_[PADL_(const void *)]; const void * inbufp; char inbufp_r_[PADR_(const void *)]; char inbuflen_l_[PADL_(size_t)]; size_t inbuflen; char inbuflen_r_[PADR_(size_t)]; char outbufp_l_[PADL_(void *)]; void * outbufp; char outbufp_r_[PADR_(void *)]; char outbuflen_l_[PADL_(size_t)]; size_t outbuflen; char outbuflen_r_[PADR_(size_t)]; }; struct rctl_get_rules_args { char inbufp_l_[PADL_(const void *)]; const void * inbufp; char inbufp_r_[PADR_(const void *)]; char inbuflen_l_[PADL_(size_t)]; size_t inbuflen; char inbuflen_r_[PADR_(size_t)]; char outbufp_l_[PADL_(void *)]; void * outbufp; char outbufp_r_[PADR_(void *)]; char outbuflen_l_[PADL_(size_t)]; size_t outbuflen; char outbuflen_r_[PADR_(size_t)]; }; struct rctl_get_limits_args { char inbufp_l_[PADL_(const void *)]; const void * inbufp; char inbufp_r_[PADR_(const void *)]; char inbuflen_l_[PADL_(size_t)]; size_t inbuflen; char inbuflen_r_[PADR_(size_t)]; char outbufp_l_[PADL_(void *)]; void * outbufp; char outbufp_r_[PADR_(void *)]; char outbuflen_l_[PADL_(size_t)]; size_t outbuflen; char outbuflen_r_[PADR_(size_t)]; }; struct rctl_add_rule_args { char inbufp_l_[PADL_(const void *)]; const void * inbufp; char inbufp_r_[PADR_(const void *)]; char inbuflen_l_[PADL_(size_t)]; size_t inbuflen; char inbuflen_r_[PADR_(size_t)]; char outbufp_l_[PADL_(void *)]; void * outbufp; char outbufp_r_[PADR_(void *)]; char outbuflen_l_[PADL_(size_t)]; size_t outbuflen; char outbuflen_r_[PADR_(size_t)]; }; struct rctl_remove_rule_args { char inbufp_l_[PADL_(const void *)]; const void * inbufp; char inbufp_r_[PADR_(const void *)]; char inbuflen_l_[PADL_(size_t)]; size_t inbuflen; char inbuflen_r_[PADR_(size_t)]; char outbufp_l_[PADL_(void *)]; void * outbufp; char outbufp_r_[PADR_(void *)]; char outbuflen_l_[PADL_(size_t)]; size_t outbuflen; char outbuflen_r_[PADR_(size_t)]; }; struct posix_fallocate_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char offset_l_[PADL_(off_t)]; off_t offset; char offset_r_[PADR_(off_t)]; char len_l_[PADL_(off_t)]; off_t len; char len_r_[PADR_(off_t)]; }; struct posix_fadvise_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char offset_l_[PADL_(off_t)]; off_t offset; char offset_r_[PADR_(off_t)]; char len_l_[PADL_(off_t)]; off_t len; char len_r_[PADR_(off_t)]; char advice_l_[PADL_(int)]; int advice; char advice_r_[PADR_(int)]; }; struct wait6_args { char idtype_l_[PADL_(idtype_t)]; idtype_t idtype; char idtype_r_[PADR_(idtype_t)]; char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)]; char status_l_[PADL_(int *)]; int * status; char status_r_[PADR_(int *)]; char options_l_[PADL_(int)]; int options; char options_r_[PADR_(int)]; char wrusage_l_[PADL_(struct __wrusage *)]; struct __wrusage * wrusage; char wrusage_r_[PADR_(struct __wrusage *)]; char info_l_[PADL_(siginfo_t *)]; siginfo_t * info; char info_r_[PADR_(siginfo_t *)]; }; struct cap_rights_limit_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char rightsp_l_[PADL_(cap_rights_t *)]; cap_rights_t * rightsp; char rightsp_r_[PADR_(cap_rights_t *)]; }; struct cap_ioctls_limit_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char cmds_l_[PADL_(const u_long *)]; const u_long * cmds; char cmds_r_[PADR_(const u_long *)]; char ncmds_l_[PADL_(size_t)]; size_t ncmds; char ncmds_r_[PADR_(size_t)]; }; struct cap_ioctls_get_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char cmds_l_[PADL_(u_long *)]; u_long * cmds; char cmds_r_[PADR_(u_long *)]; char maxcmds_l_[PADL_(size_t)]; size_t maxcmds; char maxcmds_r_[PADR_(size_t)]; }; struct cap_fcntls_limit_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char fcntlrights_l_[PADL_(uint32_t)]; uint32_t fcntlrights; char fcntlrights_r_[PADR_(uint32_t)]; }; struct cap_fcntls_get_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char fcntlrightsp_l_[PADL_(uint32_t *)]; uint32_t * fcntlrightsp; char fcntlrightsp_r_[PADR_(uint32_t *)]; }; struct bindat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char name_l_[PADL_(caddr_t)]; caddr_t name; char name_r_[PADR_(caddr_t)]; char namelen_l_[PADL_(int)]; int namelen; char namelen_r_[PADR_(int)]; }; struct connectat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char name_l_[PADL_(caddr_t)]; caddr_t name; char name_r_[PADR_(caddr_t)]; char namelen_l_[PADL_(int)]; int namelen; char namelen_r_[PADR_(int)]; }; struct chflagsat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(const char *)]; const char * path; char path_r_[PADR_(const char *)]; char flags_l_[PADL_(u_long)]; u_long flags; char flags_r_[PADR_(u_long)]; char atflag_l_[PADL_(int)]; int atflag; char atflag_r_[PADR_(int)]; }; struct accept4_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char name_l_[PADL_(struct sockaddr *__restrict)]; struct sockaddr *__restrict name; char name_r_[PADR_(struct sockaddr *__restrict)]; char anamelen_l_[PADL_(__socklen_t *__restrict)]; __socklen_t *__restrict anamelen; char anamelen_r_[PADR_(__socklen_t *__restrict)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct pipe2_args { char fildes_l_[PADL_(int *)]; int * fildes; char fildes_r_[PADR_(int *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct aio_mlock_args { char aiocbp_l_[PADL_(struct aiocb *)]; struct aiocb * aiocbp; char aiocbp_r_[PADR_(struct aiocb *)]; }; struct procctl_args { char idtype_l_[PADL_(idtype_t)]; idtype_t idtype; char idtype_r_[PADR_(idtype_t)]; char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)]; char com_l_[PADL_(int)]; int com; char com_r_[PADR_(int)]; char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)]; }; struct ppoll_args { char fds_l_[PADL_(struct pollfd *)]; struct pollfd * fds; char fds_r_[PADR_(struct pollfd *)]; char nfds_l_[PADL_(u_int)]; u_int nfds; char nfds_r_[PADR_(u_int)]; char ts_l_[PADL_(const struct timespec *)]; const struct timespec * ts; char ts_r_[PADR_(const struct timespec *)]; char set_l_[PADL_(const sigset_t *)]; const sigset_t * set; char set_r_[PADR_(const sigset_t *)]; }; struct futimens_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char times_l_[PADL_(struct timespec *)]; struct timespec * times; char times_r_[PADR_(struct timespec *)]; }; struct utimensat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char times_l_[PADL_(struct timespec *)]; struct timespec * times; char times_r_[PADR_(struct timespec *)]; char flag_l_[PADL_(int)]; int flag; char flag_r_[PADR_(int)]; }; struct numa_getaffinity_args { char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)]; char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)]; char policy_l_[PADL_(struct vm_domain_policy_entry *)]; struct vm_domain_policy_entry * policy; char policy_r_[PADR_(struct vm_domain_policy_entry *)]; }; struct numa_setaffinity_args { char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)]; char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)]; char policy_l_[PADL_(const struct vm_domain_policy_entry *)]; const struct vm_domain_policy_entry * policy; char policy_r_[PADR_(const struct vm_domain_policy_entry *)]; }; struct fdatasync_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; }; struct fstat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char sb_l_[PADL_(struct stat *)]; struct stat * sb; char sb_r_[PADR_(struct stat *)]; }; struct fstatat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char buf_l_[PADL_(struct stat *)]; struct stat * buf; char buf_r_[PADR_(struct stat *)]; char flag_l_[PADL_(int)]; int flag; char flag_r_[PADR_(int)]; }; struct fhstat_args { char u_fhp_l_[PADL_(const struct fhandle *)]; const struct fhandle * u_fhp; char u_fhp_r_[PADR_(const struct fhandle *)]; char sb_l_[PADL_(struct stat *)]; struct stat * sb; char sb_r_[PADR_(struct stat *)]; }; struct getdirentries_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)]; char count_l_[PADL_(size_t)]; size_t count; char count_r_[PADR_(size_t)]; char basep_l_[PADL_(off_t *)]; off_t * basep; char basep_r_[PADR_(off_t *)]; }; struct statfs_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char buf_l_[PADL_(struct statfs *)]; struct statfs * buf; char buf_r_[PADR_(struct statfs *)]; }; struct fstatfs_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(struct statfs *)]; struct statfs * buf; char buf_r_[PADR_(struct statfs *)]; }; struct getfsstat_args { char buf_l_[PADL_(struct statfs *)]; struct statfs * buf; char buf_r_[PADR_(struct statfs *)]; char bufsize_l_[PADL_(long)]; long bufsize; char bufsize_r_[PADR_(long)]; char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)]; }; struct fhstatfs_args { char u_fhp_l_[PADL_(const struct fhandle *)]; const struct fhandle * u_fhp; char u_fhp_r_[PADR_(const struct fhandle *)]; char buf_l_[PADL_(struct statfs *)]; struct statfs * buf; char buf_r_[PADR_(struct statfs *)]; }; struct mknodat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char mode_l_[PADL_(mode_t)]; mode_t mode; char mode_r_[PADR_(mode_t)]; char dev_l_[PADL_(dev_t)]; dev_t dev; char dev_r_[PADR_(dev_t)]; }; struct kevent_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char changelist_l_[PADL_(struct kevent *)]; struct kevent * changelist; char changelist_r_[PADR_(struct kevent *)]; char nchanges_l_[PADL_(int)]; int nchanges; char nchanges_r_[PADR_(int)]; char eventlist_l_[PADL_(struct kevent *)]; struct kevent * eventlist; char eventlist_r_[PADR_(struct kevent *)]; char nevents_l_[PADL_(int)]; int nevents; char nevents_r_[PADR_(int)]; char timeout_l_[PADL_(const struct timespec *)]; const struct timespec * timeout; char timeout_r_[PADR_(const struct timespec *)]; }; +struct cpuset_getdomain_args { + char level_l_[PADL_(cpulevel_t)]; cpulevel_t level; char level_r_[PADR_(cpulevel_t)]; + char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)]; + char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)]; + char domainsetsize_l_[PADL_(size_t)]; size_t domainsetsize; char domainsetsize_r_[PADR_(size_t)]; + char mask_l_[PADL_(domainset_t *)]; domainset_t * mask; char mask_r_[PADR_(domainset_t *)]; + char policy_l_[PADL_(int *)]; int * policy; char policy_r_[PADR_(int *)]; +}; +struct cpuset_setdomain_args { + char level_l_[PADL_(cpulevel_t)]; cpulevel_t level; char level_r_[PADR_(cpulevel_t)]; + char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)]; + char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)]; + char domainsetsize_l_[PADL_(size_t)]; size_t domainsetsize; char domainsetsize_r_[PADR_(size_t)]; + char mask_l_[PADL_(domainset_t *)]; domainset_t * mask; char mask_r_[PADR_(domainset_t *)]; + char policy_l_[PADL_(int)]; int policy; char policy_r_[PADR_(int)]; +}; int nosys(struct thread *, struct nosys_args *); void sys_sys_exit(struct thread *, struct sys_exit_args *); int sys_fork(struct thread *, struct fork_args *); int sys_read(struct thread *, struct read_args *); int sys_write(struct thread *, struct write_args *); int sys_open(struct thread *, struct open_args *); int sys_close(struct thread *, struct close_args *); int sys_wait4(struct thread *, struct wait4_args *); int sys_link(struct thread *, struct link_args *); int sys_unlink(struct thread *, struct unlink_args *); int sys_chdir(struct thread *, struct chdir_args *); int sys_fchdir(struct thread *, struct fchdir_args *); int sys_chmod(struct thread *, struct chmod_args *); int sys_chown(struct thread *, struct chown_args *); int sys_obreak(struct thread *, struct obreak_args *); int sys_getpid(struct thread *, struct getpid_args *); int sys_mount(struct thread *, struct mount_args *); int sys_unmount(struct thread *, struct unmount_args *); int sys_setuid(struct thread *, struct setuid_args *); int sys_getuid(struct thread *, struct getuid_args *); int sys_geteuid(struct thread *, struct geteuid_args *); int sys_ptrace(struct thread *, struct ptrace_args *); int sys_recvmsg(struct thread *, struct recvmsg_args *); int sys_sendmsg(struct thread *, struct sendmsg_args *); int sys_recvfrom(struct thread *, struct recvfrom_args *); int sys_accept(struct thread *, struct accept_args *); int sys_getpeername(struct thread *, struct getpeername_args *); int sys_getsockname(struct thread *, struct getsockname_args *); int sys_access(struct thread *, struct access_args *); int sys_chflags(struct thread *, struct chflags_args *); int sys_fchflags(struct thread *, struct fchflags_args *); int sys_sync(struct thread *, struct sync_args *); int sys_kill(struct thread *, struct kill_args *); int sys_getppid(struct thread *, struct getppid_args *); int sys_dup(struct thread *, struct dup_args *); int sys_getegid(struct thread *, struct getegid_args *); int sys_profil(struct thread *, struct profil_args *); int sys_ktrace(struct thread *, struct ktrace_args *); int sys_getgid(struct thread *, struct getgid_args *); int sys_getlogin(struct thread *, struct getlogin_args *); int sys_setlogin(struct thread *, struct setlogin_args *); int sys_acct(struct thread *, struct acct_args *); int sys_sigaltstack(struct thread *, struct sigaltstack_args *); int sys_ioctl(struct thread *, struct ioctl_args *); int sys_reboot(struct thread *, struct reboot_args *); int sys_revoke(struct thread *, struct revoke_args *); int sys_symlink(struct thread *, struct symlink_args *); int sys_readlink(struct thread *, struct readlink_args *); int sys_execve(struct thread *, struct execve_args *); int sys_umask(struct thread *, struct umask_args *); int sys_chroot(struct thread *, struct chroot_args *); int sys_msync(struct thread *, struct msync_args *); int sys_vfork(struct thread *, struct vfork_args *); int sys_sbrk(struct thread *, struct sbrk_args *); int sys_sstk(struct thread *, struct sstk_args *); int sys_ovadvise(struct thread *, struct ovadvise_args *); int sys_munmap(struct thread *, struct munmap_args *); int sys_mprotect(struct thread *, struct mprotect_args *); int sys_madvise(struct thread *, struct madvise_args *); int sys_mincore(struct thread *, struct mincore_args *); int sys_getgroups(struct thread *, struct getgroups_args *); int sys_setgroups(struct thread *, struct setgroups_args *); int sys_getpgrp(struct thread *, struct getpgrp_args *); int sys_setpgid(struct thread *, struct setpgid_args *); int sys_setitimer(struct thread *, struct setitimer_args *); int sys_swapon(struct thread *, struct swapon_args *); int sys_getitimer(struct thread *, struct getitimer_args *); int sys_getdtablesize(struct thread *, struct getdtablesize_args *); int sys_dup2(struct thread *, struct dup2_args *); int sys_fcntl(struct thread *, struct fcntl_args *); int sys_select(struct thread *, struct select_args *); int sys_fsync(struct thread *, struct fsync_args *); int sys_setpriority(struct thread *, struct setpriority_args *); int sys_socket(struct thread *, struct socket_args *); int sys_connect(struct thread *, struct connect_args *); int sys_getpriority(struct thread *, struct getpriority_args *); int sys_bind(struct thread *, struct bind_args *); int sys_setsockopt(struct thread *, struct setsockopt_args *); int sys_listen(struct thread *, struct listen_args *); int sys_gettimeofday(struct thread *, struct gettimeofday_args *); int sys_getrusage(struct thread *, struct getrusage_args *); int sys_getsockopt(struct thread *, struct getsockopt_args *); int sys_readv(struct thread *, struct readv_args *); int sys_writev(struct thread *, struct writev_args *); int sys_settimeofday(struct thread *, struct settimeofday_args *); int sys_fchown(struct thread *, struct fchown_args *); int sys_fchmod(struct thread *, struct fchmod_args *); int sys_setreuid(struct thread *, struct setreuid_args *); int sys_setregid(struct thread *, struct setregid_args *); int sys_rename(struct thread *, struct rename_args *); int sys_flock(struct thread *, struct flock_args *); int sys_mkfifo(struct thread *, struct mkfifo_args *); int sys_sendto(struct thread *, struct sendto_args *); int sys_shutdown(struct thread *, struct shutdown_args *); int sys_socketpair(struct thread *, struct socketpair_args *); int sys_mkdir(struct thread *, struct mkdir_args *); int sys_rmdir(struct thread *, struct rmdir_args *); int sys_utimes(struct thread *, struct utimes_args *); int sys_adjtime(struct thread *, struct adjtime_args *); int sys_setsid(struct thread *, struct setsid_args *); int sys_quotactl(struct thread *, struct quotactl_args *); int sys_nlm_syscall(struct thread *, struct nlm_syscall_args *); int sys_nfssvc(struct thread *, struct nfssvc_args *); int sys_lgetfh(struct thread *, struct lgetfh_args *); int sys_getfh(struct thread *, struct getfh_args *); int sysarch(struct thread *, struct sysarch_args *); int sys_rtprio(struct thread *, struct rtprio_args *); int sys_semsys(struct thread *, struct semsys_args *); int sys_msgsys(struct thread *, struct msgsys_args *); int sys_shmsys(struct thread *, struct shmsys_args *); int sys_setfib(struct thread *, struct setfib_args *); int sys_ntp_adjtime(struct thread *, struct ntp_adjtime_args *); int sys_setgid(struct thread *, struct setgid_args *); int sys_setegid(struct thread *, struct setegid_args *); int sys_seteuid(struct thread *, struct seteuid_args *); int sys_pathconf(struct thread *, struct pathconf_args *); int sys_fpathconf(struct thread *, struct fpathconf_args *); int sys_getrlimit(struct thread *, struct __getrlimit_args *); int sys_setrlimit(struct thread *, struct __setrlimit_args *); int sys___sysctl(struct thread *, struct sysctl_args *); int sys_mlock(struct thread *, struct mlock_args *); int sys_munlock(struct thread *, struct munlock_args *); int sys_undelete(struct thread *, struct undelete_args *); int sys_futimes(struct thread *, struct futimes_args *); int sys_getpgid(struct thread *, struct getpgid_args *); int sys_poll(struct thread *, struct poll_args *); int sys_semget(struct thread *, struct semget_args *); int sys_semop(struct thread *, struct semop_args *); int sys_msgget(struct thread *, struct msgget_args *); int sys_msgsnd(struct thread *, struct msgsnd_args *); int sys_msgrcv(struct thread *, struct msgrcv_args *); int sys_shmat(struct thread *, struct shmat_args *); int sys_shmdt(struct thread *, struct shmdt_args *); int sys_shmget(struct thread *, struct shmget_args *); int sys_clock_gettime(struct thread *, struct clock_gettime_args *); int sys_clock_settime(struct thread *, struct clock_settime_args *); int sys_clock_getres(struct thread *, struct clock_getres_args *); int sys_ktimer_create(struct thread *, struct ktimer_create_args *); int sys_ktimer_delete(struct thread *, struct ktimer_delete_args *); int sys_ktimer_settime(struct thread *, struct ktimer_settime_args *); int sys_ktimer_gettime(struct thread *, struct ktimer_gettime_args *); int sys_ktimer_getoverrun(struct thread *, struct ktimer_getoverrun_args *); int sys_nanosleep(struct thread *, struct nanosleep_args *); int sys_ffclock_getcounter(struct thread *, struct ffclock_getcounter_args *); int sys_ffclock_setestimate(struct thread *, struct ffclock_setestimate_args *); int sys_ffclock_getestimate(struct thread *, struct ffclock_getestimate_args *); int sys_clock_nanosleep(struct thread *, struct clock_nanosleep_args *); int sys_clock_getcpuclockid2(struct thread *, struct clock_getcpuclockid2_args *); int sys_ntp_gettime(struct thread *, struct ntp_gettime_args *); int sys_minherit(struct thread *, struct minherit_args *); int sys_rfork(struct thread *, struct rfork_args *); int sys_issetugid(struct thread *, struct issetugid_args *); int sys_lchown(struct thread *, struct lchown_args *); int sys_aio_read(struct thread *, struct aio_read_args *); int sys_aio_write(struct thread *, struct aio_write_args *); int sys_lio_listio(struct thread *, struct lio_listio_args *); int sys_lchmod(struct thread *, struct lchmod_args *); int sys_lutimes(struct thread *, struct lutimes_args *); int sys_preadv(struct thread *, struct preadv_args *); int sys_pwritev(struct thread *, struct pwritev_args *); int sys_fhopen(struct thread *, struct fhopen_args *); int sys_modnext(struct thread *, struct modnext_args *); int sys_modstat(struct thread *, struct modstat_args *); int sys_modfnext(struct thread *, struct modfnext_args *); int sys_modfind(struct thread *, struct modfind_args *); int sys_kldload(struct thread *, struct kldload_args *); int sys_kldunload(struct thread *, struct kldunload_args *); int sys_kldfind(struct thread *, struct kldfind_args *); int sys_kldnext(struct thread *, struct kldnext_args *); int sys_kldstat(struct thread *, struct kldstat_args *); int sys_kldfirstmod(struct thread *, struct kldfirstmod_args *); int sys_getsid(struct thread *, struct getsid_args *); int sys_setresuid(struct thread *, struct setresuid_args *); int sys_setresgid(struct thread *, struct setresgid_args *); int sys_aio_return(struct thread *, struct aio_return_args *); int sys_aio_suspend(struct thread *, struct aio_suspend_args *); int sys_aio_cancel(struct thread *, struct aio_cancel_args *); int sys_aio_error(struct thread *, struct aio_error_args *); int sys_yield(struct thread *, struct yield_args *); int sys_mlockall(struct thread *, struct mlockall_args *); int sys_munlockall(struct thread *, struct munlockall_args *); int sys___getcwd(struct thread *, struct __getcwd_args *); int sys_sched_setparam(struct thread *, struct sched_setparam_args *); int sys_sched_getparam(struct thread *, struct sched_getparam_args *); int sys_sched_setscheduler(struct thread *, struct sched_setscheduler_args *); int sys_sched_getscheduler(struct thread *, struct sched_getscheduler_args *); int sys_sched_yield(struct thread *, struct sched_yield_args *); int sys_sched_get_priority_max(struct thread *, struct sched_get_priority_max_args *); int sys_sched_get_priority_min(struct thread *, struct sched_get_priority_min_args *); int sys_sched_rr_get_interval(struct thread *, struct sched_rr_get_interval_args *); int sys_utrace(struct thread *, struct utrace_args *); int sys_kldsym(struct thread *, struct kldsym_args *); int sys_jail(struct thread *, struct jail_args *); int sys_nnpfs_syscall(struct thread *, struct nnpfs_syscall_args *); int sys_sigprocmask(struct thread *, struct sigprocmask_args *); int sys_sigsuspend(struct thread *, struct sigsuspend_args *); int sys_sigpending(struct thread *, struct sigpending_args *); int sys_sigtimedwait(struct thread *, struct sigtimedwait_args *); int sys_sigwaitinfo(struct thread *, struct sigwaitinfo_args *); int sys___acl_get_file(struct thread *, struct __acl_get_file_args *); int sys___acl_set_file(struct thread *, struct __acl_set_file_args *); int sys___acl_get_fd(struct thread *, struct __acl_get_fd_args *); int sys___acl_set_fd(struct thread *, struct __acl_set_fd_args *); int sys___acl_delete_file(struct thread *, struct __acl_delete_file_args *); int sys___acl_delete_fd(struct thread *, struct __acl_delete_fd_args *); int sys___acl_aclcheck_file(struct thread *, struct __acl_aclcheck_file_args *); int sys___acl_aclcheck_fd(struct thread *, struct __acl_aclcheck_fd_args *); int sys_extattrctl(struct thread *, struct extattrctl_args *); int sys_extattr_set_file(struct thread *, struct extattr_set_file_args *); int sys_extattr_get_file(struct thread *, struct extattr_get_file_args *); int sys_extattr_delete_file(struct thread *, struct extattr_delete_file_args *); int sys_aio_waitcomplete(struct thread *, struct aio_waitcomplete_args *); int sys_getresuid(struct thread *, struct getresuid_args *); int sys_getresgid(struct thread *, struct getresgid_args *); int sys_kqueue(struct thread *, struct kqueue_args *); int sys_extattr_set_fd(struct thread *, struct extattr_set_fd_args *); int sys_extattr_get_fd(struct thread *, struct extattr_get_fd_args *); int sys_extattr_delete_fd(struct thread *, struct extattr_delete_fd_args *); int sys___setugid(struct thread *, struct __setugid_args *); int sys_eaccess(struct thread *, struct eaccess_args *); int sys_afs3_syscall(struct thread *, struct afs3_syscall_args *); int sys_nmount(struct thread *, struct nmount_args *); int sys___mac_get_proc(struct thread *, struct __mac_get_proc_args *); int sys___mac_set_proc(struct thread *, struct __mac_set_proc_args *); int sys___mac_get_fd(struct thread *, struct __mac_get_fd_args *); int sys___mac_get_file(struct thread *, struct __mac_get_file_args *); int sys___mac_set_fd(struct thread *, struct __mac_set_fd_args *); int sys___mac_set_file(struct thread *, struct __mac_set_file_args *); int sys_kenv(struct thread *, struct kenv_args *); int sys_lchflags(struct thread *, struct lchflags_args *); int sys_uuidgen(struct thread *, struct uuidgen_args *); int sys_sendfile(struct thread *, struct sendfile_args *); int sys_mac_syscall(struct thread *, struct mac_syscall_args *); int sys_ksem_close(struct thread *, struct ksem_close_args *); int sys_ksem_post(struct thread *, struct ksem_post_args *); int sys_ksem_wait(struct thread *, struct ksem_wait_args *); int sys_ksem_trywait(struct thread *, struct ksem_trywait_args *); int sys_ksem_init(struct thread *, struct ksem_init_args *); int sys_ksem_open(struct thread *, struct ksem_open_args *); int sys_ksem_unlink(struct thread *, struct ksem_unlink_args *); int sys_ksem_getvalue(struct thread *, struct ksem_getvalue_args *); int sys_ksem_destroy(struct thread *, struct ksem_destroy_args *); int sys___mac_get_pid(struct thread *, struct __mac_get_pid_args *); int sys___mac_get_link(struct thread *, struct __mac_get_link_args *); int sys___mac_set_link(struct thread *, struct __mac_set_link_args *); int sys_extattr_set_link(struct thread *, struct extattr_set_link_args *); int sys_extattr_get_link(struct thread *, struct extattr_get_link_args *); int sys_extattr_delete_link(struct thread *, struct extattr_delete_link_args *); int sys___mac_execve(struct thread *, struct __mac_execve_args *); int sys_sigaction(struct thread *, struct sigaction_args *); int sys_sigreturn(struct thread *, struct sigreturn_args *); int sys_getcontext(struct thread *, struct getcontext_args *); int sys_setcontext(struct thread *, struct setcontext_args *); int sys_swapcontext(struct thread *, struct swapcontext_args *); int sys_swapoff(struct thread *, struct swapoff_args *); int sys___acl_get_link(struct thread *, struct __acl_get_link_args *); int sys___acl_set_link(struct thread *, struct __acl_set_link_args *); int sys___acl_delete_link(struct thread *, struct __acl_delete_link_args *); int sys___acl_aclcheck_link(struct thread *, struct __acl_aclcheck_link_args *); int sys_sigwait(struct thread *, struct sigwait_args *); int sys_thr_create(struct thread *, struct thr_create_args *); int sys_thr_exit(struct thread *, struct thr_exit_args *); int sys_thr_self(struct thread *, struct thr_self_args *); int sys_thr_kill(struct thread *, struct thr_kill_args *); int sys_jail_attach(struct thread *, struct jail_attach_args *); int sys_extattr_list_fd(struct thread *, struct extattr_list_fd_args *); int sys_extattr_list_file(struct thread *, struct extattr_list_file_args *); int sys_extattr_list_link(struct thread *, struct extattr_list_link_args *); int sys_ksem_timedwait(struct thread *, struct ksem_timedwait_args *); int sys_thr_suspend(struct thread *, struct thr_suspend_args *); int sys_thr_wake(struct thread *, struct thr_wake_args *); int sys_kldunloadf(struct thread *, struct kldunloadf_args *); int sys_audit(struct thread *, struct audit_args *); int sys_auditon(struct thread *, struct auditon_args *); int sys_getauid(struct thread *, struct getauid_args *); int sys_setauid(struct thread *, struct setauid_args *); int sys_getaudit(struct thread *, struct getaudit_args *); int sys_setaudit(struct thread *, struct setaudit_args *); int sys_getaudit_addr(struct thread *, struct getaudit_addr_args *); int sys_setaudit_addr(struct thread *, struct setaudit_addr_args *); int sys_auditctl(struct thread *, struct auditctl_args *); int sys__umtx_op(struct thread *, struct _umtx_op_args *); int sys_thr_new(struct thread *, struct thr_new_args *); int sys_sigqueue(struct thread *, struct sigqueue_args *); int sys_kmq_open(struct thread *, struct kmq_open_args *); int sys_kmq_setattr(struct thread *, struct kmq_setattr_args *); int sys_kmq_timedreceive(struct thread *, struct kmq_timedreceive_args *); int sys_kmq_timedsend(struct thread *, struct kmq_timedsend_args *); int sys_kmq_notify(struct thread *, struct kmq_notify_args *); int sys_kmq_unlink(struct thread *, struct kmq_unlink_args *); int sys_abort2(struct thread *, struct abort2_args *); int sys_thr_set_name(struct thread *, struct thr_set_name_args *); int sys_aio_fsync(struct thread *, struct aio_fsync_args *); int sys_rtprio_thread(struct thread *, struct rtprio_thread_args *); int sys_sctp_peeloff(struct thread *, struct sctp_peeloff_args *); int sys_sctp_generic_sendmsg(struct thread *, struct sctp_generic_sendmsg_args *); int sys_sctp_generic_sendmsg_iov(struct thread *, struct sctp_generic_sendmsg_iov_args *); int sys_sctp_generic_recvmsg(struct thread *, struct sctp_generic_recvmsg_args *); int sys_pread(struct thread *, struct pread_args *); int sys_pwrite(struct thread *, struct pwrite_args *); int sys_mmap(struct thread *, struct mmap_args *); int sys_lseek(struct thread *, struct lseek_args *); int sys_truncate(struct thread *, struct truncate_args *); int sys_ftruncate(struct thread *, struct ftruncate_args *); int sys_thr_kill2(struct thread *, struct thr_kill2_args *); int sys_shm_open(struct thread *, struct shm_open_args *); int sys_shm_unlink(struct thread *, struct shm_unlink_args *); int sys_cpuset(struct thread *, struct cpuset_args *); int sys_cpuset_setid(struct thread *, struct cpuset_setid_args *); int sys_cpuset_getid(struct thread *, struct cpuset_getid_args *); int sys_cpuset_getaffinity(struct thread *, struct cpuset_getaffinity_args *); int sys_cpuset_setaffinity(struct thread *, struct cpuset_setaffinity_args *); int sys_faccessat(struct thread *, struct faccessat_args *); int sys_fchmodat(struct thread *, struct fchmodat_args *); int sys_fchownat(struct thread *, struct fchownat_args *); int sys_fexecve(struct thread *, struct fexecve_args *); int sys_futimesat(struct thread *, struct futimesat_args *); int sys_linkat(struct thread *, struct linkat_args *); int sys_mkdirat(struct thread *, struct mkdirat_args *); int sys_mkfifoat(struct thread *, struct mkfifoat_args *); int sys_openat(struct thread *, struct openat_args *); int sys_readlinkat(struct thread *, struct readlinkat_args *); int sys_renameat(struct thread *, struct renameat_args *); int sys_symlinkat(struct thread *, struct symlinkat_args *); int sys_unlinkat(struct thread *, struct unlinkat_args *); int sys_posix_openpt(struct thread *, struct posix_openpt_args *); int sys_gssd_syscall(struct thread *, struct gssd_syscall_args *); int sys_jail_get(struct thread *, struct jail_get_args *); int sys_jail_set(struct thread *, struct jail_set_args *); int sys_jail_remove(struct thread *, struct jail_remove_args *); int sys_closefrom(struct thread *, struct closefrom_args *); int sys___semctl(struct thread *, struct __semctl_args *); int sys_msgctl(struct thread *, struct msgctl_args *); int sys_shmctl(struct thread *, struct shmctl_args *); int sys_lpathconf(struct thread *, struct lpathconf_args *); int sys___cap_rights_get(struct thread *, struct __cap_rights_get_args *); int sys_cap_enter(struct thread *, struct cap_enter_args *); int sys_cap_getmode(struct thread *, struct cap_getmode_args *); int sys_pdfork(struct thread *, struct pdfork_args *); int sys_pdkill(struct thread *, struct pdkill_args *); int sys_pdgetpid(struct thread *, struct pdgetpid_args *); int sys_pselect(struct thread *, struct pselect_args *); int sys_getloginclass(struct thread *, struct getloginclass_args *); int sys_setloginclass(struct thread *, struct setloginclass_args *); int sys_rctl_get_racct(struct thread *, struct rctl_get_racct_args *); int sys_rctl_get_rules(struct thread *, struct rctl_get_rules_args *); int sys_rctl_get_limits(struct thread *, struct rctl_get_limits_args *); int sys_rctl_add_rule(struct thread *, struct rctl_add_rule_args *); int sys_rctl_remove_rule(struct thread *, struct rctl_remove_rule_args *); int sys_posix_fallocate(struct thread *, struct posix_fallocate_args *); int sys_posix_fadvise(struct thread *, struct posix_fadvise_args *); int sys_wait6(struct thread *, struct wait6_args *); int sys_cap_rights_limit(struct thread *, struct cap_rights_limit_args *); int sys_cap_ioctls_limit(struct thread *, struct cap_ioctls_limit_args *); int sys_cap_ioctls_get(struct thread *, struct cap_ioctls_get_args *); int sys_cap_fcntls_limit(struct thread *, struct cap_fcntls_limit_args *); int sys_cap_fcntls_get(struct thread *, struct cap_fcntls_get_args *); int sys_bindat(struct thread *, struct bindat_args *); int sys_connectat(struct thread *, struct connectat_args *); int sys_chflagsat(struct thread *, struct chflagsat_args *); int sys_accept4(struct thread *, struct accept4_args *); int sys_pipe2(struct thread *, struct pipe2_args *); int sys_aio_mlock(struct thread *, struct aio_mlock_args *); int sys_procctl(struct thread *, struct procctl_args *); int sys_ppoll(struct thread *, struct ppoll_args *); int sys_futimens(struct thread *, struct futimens_args *); int sys_utimensat(struct thread *, struct utimensat_args *); int sys_numa_getaffinity(struct thread *, struct numa_getaffinity_args *); int sys_numa_setaffinity(struct thread *, struct numa_setaffinity_args *); int sys_fdatasync(struct thread *, struct fdatasync_args *); int sys_fstat(struct thread *, struct fstat_args *); int sys_fstatat(struct thread *, struct fstatat_args *); int sys_fhstat(struct thread *, struct fhstat_args *); int sys_getdirentries(struct thread *, struct getdirentries_args *); int sys_statfs(struct thread *, struct statfs_args *); int sys_fstatfs(struct thread *, struct fstatfs_args *); int sys_getfsstat(struct thread *, struct getfsstat_args *); int sys_fhstatfs(struct thread *, struct fhstatfs_args *); int sys_mknodat(struct thread *, struct mknodat_args *); int sys_kevent(struct thread *, struct kevent_args *); +int sys_cpuset_getdomain(struct thread *, struct cpuset_getdomain_args *); +int sys_cpuset_setdomain(struct thread *, struct cpuset_setdomain_args *); #ifdef COMPAT_43 struct ocreat_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)]; }; struct olseek_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char offset_l_[PADL_(long)]; long offset; char offset_r_[PADR_(long)]; char whence_l_[PADL_(int)]; int whence; char whence_r_[PADR_(int)]; }; struct ostat_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char ub_l_[PADL_(struct ostat *)]; struct ostat * ub; char ub_r_[PADR_(struct ostat *)]; }; struct olstat_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char ub_l_[PADL_(struct ostat *)]; struct ostat * ub; char ub_r_[PADR_(struct ostat *)]; }; struct osigaction_args { char signum_l_[PADL_(int)]; int signum; char signum_r_[PADR_(int)]; char nsa_l_[PADL_(struct osigaction *)]; struct osigaction * nsa; char nsa_r_[PADR_(struct osigaction *)]; char osa_l_[PADL_(struct osigaction *)]; struct osigaction * osa; char osa_r_[PADR_(struct osigaction *)]; }; struct osigprocmask_args { char how_l_[PADL_(int)]; int how; char how_r_[PADR_(int)]; char mask_l_[PADL_(osigset_t)]; osigset_t mask; char mask_r_[PADR_(osigset_t)]; }; struct ofstat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char sb_l_[PADL_(struct ostat *)]; struct ostat * sb; char sb_r_[PADR_(struct ostat *)]; }; struct getkerninfo_args { char op_l_[PADL_(int)]; int op; char op_r_[PADR_(int)]; char where_l_[PADL_(char *)]; char * where; char where_r_[PADR_(char *)]; char size_l_[PADL_(size_t *)]; size_t * size; char size_r_[PADR_(size_t *)]; char arg_l_[PADL_(int)]; int arg; char arg_r_[PADR_(int)]; }; struct ommap_args { char addr_l_[PADL_(void *)]; void * addr; char addr_r_[PADR_(void *)]; char len_l_[PADL_(int)]; int len; char len_r_[PADR_(int)]; char prot_l_[PADL_(int)]; int prot; char prot_r_[PADR_(int)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char pos_l_[PADL_(long)]; long pos; char pos_r_[PADR_(long)]; }; struct gethostname_args { char hostname_l_[PADL_(char *)]; char * hostname; char hostname_r_[PADR_(char *)]; char len_l_[PADL_(u_int)]; u_int len; char len_r_[PADR_(u_int)]; }; struct sethostname_args { char hostname_l_[PADL_(char *)]; char * hostname; char hostname_r_[PADR_(char *)]; char len_l_[PADL_(u_int)]; u_int len; char len_r_[PADR_(u_int)]; }; struct osend_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char buf_l_[PADL_(caddr_t)]; caddr_t buf; char buf_r_[PADR_(caddr_t)]; char len_l_[PADL_(int)]; int len; char len_r_[PADR_(int)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct orecv_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char buf_l_[PADL_(caddr_t)]; caddr_t buf; char buf_r_[PADR_(caddr_t)]; char len_l_[PADL_(int)]; int len; char len_r_[PADR_(int)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct osigreturn_args { char sigcntxp_l_[PADL_(struct osigcontext *)]; struct osigcontext * sigcntxp; char sigcntxp_r_[PADR_(struct osigcontext *)]; }; struct osigvec_args { char signum_l_[PADL_(int)]; int signum; char signum_r_[PADR_(int)]; char nsv_l_[PADL_(struct sigvec *)]; struct sigvec * nsv; char nsv_r_[PADR_(struct sigvec *)]; char osv_l_[PADL_(struct sigvec *)]; struct sigvec * osv; char osv_r_[PADR_(struct sigvec *)]; }; struct osigblock_args { char mask_l_[PADL_(int)]; int mask; char mask_r_[PADR_(int)]; }; struct osigsetmask_args { char mask_l_[PADL_(int)]; int mask; char mask_r_[PADR_(int)]; }; struct osigsuspend_args { char mask_l_[PADL_(osigset_t)]; osigset_t mask; char mask_r_[PADR_(osigset_t)]; }; struct osigstack_args { char nss_l_[PADL_(struct sigstack *)]; struct sigstack * nss; char nss_r_[PADR_(struct sigstack *)]; char oss_l_[PADL_(struct sigstack *)]; struct sigstack * oss; char oss_r_[PADR_(struct sigstack *)]; }; struct orecvmsg_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char msg_l_[PADL_(struct omsghdr *)]; struct omsghdr * msg; char msg_r_[PADR_(struct omsghdr *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct osendmsg_args { char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char msg_l_[PADL_(caddr_t)]; caddr_t msg; char msg_r_[PADR_(caddr_t)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct otruncate_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char length_l_[PADL_(long)]; long length; char length_r_[PADR_(long)]; }; struct oftruncate_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char length_l_[PADL_(long)]; long length; char length_r_[PADR_(long)]; }; struct ogetpeername_args { char fdes_l_[PADL_(int)]; int fdes; char fdes_r_[PADR_(int)]; char asa_l_[PADL_(caddr_t)]; caddr_t asa; char asa_r_[PADR_(caddr_t)]; char alen_l_[PADL_(int *)]; int * alen; char alen_r_[PADR_(int *)]; }; struct osethostid_args { char hostid_l_[PADL_(long)]; long hostid; char hostid_r_[PADR_(long)]; }; struct ogetrlimit_args { char which_l_[PADL_(u_int)]; u_int which; char which_r_[PADR_(u_int)]; char rlp_l_[PADL_(struct orlimit *)]; struct orlimit * rlp; char rlp_r_[PADR_(struct orlimit *)]; }; struct osetrlimit_args { char which_l_[PADL_(u_int)]; u_int which; char which_r_[PADR_(u_int)]; char rlp_l_[PADL_(struct orlimit *)]; struct orlimit * rlp; char rlp_r_[PADR_(struct orlimit *)]; }; struct okillpg_args { char pgid_l_[PADL_(int)]; int pgid; char pgid_r_[PADR_(int)]; char signum_l_[PADL_(int)]; int signum; char signum_r_[PADR_(int)]; }; struct ogetdirentries_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)]; char count_l_[PADL_(u_int)]; u_int count; char count_r_[PADR_(u_int)]; char basep_l_[PADL_(long *)]; long * basep; char basep_r_[PADR_(long *)]; }; int ocreat(struct thread *, struct ocreat_args *); int olseek(struct thread *, struct olseek_args *); int ostat(struct thread *, struct ostat_args *); int olstat(struct thread *, struct olstat_args *); int osigaction(struct thread *, struct osigaction_args *); int osigprocmask(struct thread *, struct osigprocmask_args *); int osigpending(struct thread *, struct osigpending_args *); int ofstat(struct thread *, struct ofstat_args *); int ogetkerninfo(struct thread *, struct getkerninfo_args *); int ogetpagesize(struct thread *, struct getpagesize_args *); int ommap(struct thread *, struct ommap_args *); int owait(struct thread *, struct owait_args *); int ogethostname(struct thread *, struct gethostname_args *); int osethostname(struct thread *, struct sethostname_args *); int oaccept(struct thread *, struct accept_args *); int osend(struct thread *, struct osend_args *); int orecv(struct thread *, struct orecv_args *); int osigreturn(struct thread *, struct osigreturn_args *); int osigvec(struct thread *, struct osigvec_args *); int osigblock(struct thread *, struct osigblock_args *); int osigsetmask(struct thread *, struct osigsetmask_args *); int osigsuspend(struct thread *, struct osigsuspend_args *); int osigstack(struct thread *, struct osigstack_args *); int orecvmsg(struct thread *, struct orecvmsg_args *); int osendmsg(struct thread *, struct osendmsg_args *); int orecvfrom(struct thread *, struct recvfrom_args *); int otruncate(struct thread *, struct otruncate_args *); int oftruncate(struct thread *, struct oftruncate_args *); int ogetpeername(struct thread *, struct ogetpeername_args *); int ogethostid(struct thread *, struct ogethostid_args *); int osethostid(struct thread *, struct osethostid_args *); int ogetrlimit(struct thread *, struct ogetrlimit_args *); int osetrlimit(struct thread *, struct osetrlimit_args *); int okillpg(struct thread *, struct okillpg_args *); int oquota(struct thread *, struct oquota_args *); int ogetsockname(struct thread *, struct getsockname_args *); int ogetdirentries(struct thread *, struct ogetdirentries_args *); #endif /* COMPAT_43 */ #ifdef COMPAT_FREEBSD4 struct freebsd4_getfsstat_args { char buf_l_[PADL_(struct ostatfs *)]; struct ostatfs * buf; char buf_r_[PADR_(struct ostatfs *)]; char bufsize_l_[PADL_(long)]; long bufsize; char bufsize_r_[PADR_(long)]; char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)]; }; struct freebsd4_statfs_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char buf_l_[PADL_(struct ostatfs *)]; struct ostatfs * buf; char buf_r_[PADR_(struct ostatfs *)]; }; struct freebsd4_fstatfs_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(struct ostatfs *)]; struct ostatfs * buf; char buf_r_[PADR_(struct ostatfs *)]; }; struct freebsd4_getdomainname_args { char domainname_l_[PADL_(char *)]; char * domainname; char domainname_r_[PADR_(char *)]; char len_l_[PADL_(int)]; int len; char len_r_[PADR_(int)]; }; struct freebsd4_setdomainname_args { char domainname_l_[PADL_(char *)]; char * domainname; char domainname_r_[PADR_(char *)]; char len_l_[PADL_(int)]; int len; char len_r_[PADR_(int)]; }; struct freebsd4_uname_args { char name_l_[PADL_(struct utsname *)]; struct utsname * name; char name_r_[PADR_(struct utsname *)]; }; struct freebsd4_fhstatfs_args { char u_fhp_l_[PADL_(const struct fhandle *)]; const struct fhandle * u_fhp; char u_fhp_r_[PADR_(const struct fhandle *)]; char buf_l_[PADL_(struct ostatfs *)]; struct ostatfs * buf; char buf_r_[PADR_(struct ostatfs *)]; }; struct freebsd4_sendfile_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char s_l_[PADL_(int)]; int s; char s_r_[PADR_(int)]; char offset_l_[PADL_(off_t)]; off_t offset; char offset_r_[PADR_(off_t)]; char nbytes_l_[PADL_(size_t)]; size_t nbytes; char nbytes_r_[PADR_(size_t)]; char hdtr_l_[PADL_(struct sf_hdtr *)]; struct sf_hdtr * hdtr; char hdtr_r_[PADR_(struct sf_hdtr *)]; char sbytes_l_[PADL_(off_t *)]; off_t * sbytes; char sbytes_r_[PADR_(off_t *)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; }; struct freebsd4_sigaction_args { char sig_l_[PADL_(int)]; int sig; char sig_r_[PADR_(int)]; char act_l_[PADL_(const struct sigaction *)]; const struct sigaction * act; char act_r_[PADR_(const struct sigaction *)]; char oact_l_[PADL_(struct sigaction *)]; struct sigaction * oact; char oact_r_[PADR_(struct sigaction *)]; }; struct freebsd4_sigreturn_args { char sigcntxp_l_[PADL_(const struct ucontext4 *)]; const struct ucontext4 * sigcntxp; char sigcntxp_r_[PADR_(const struct ucontext4 *)]; }; int freebsd4_getfsstat(struct thread *, struct freebsd4_getfsstat_args *); int freebsd4_statfs(struct thread *, struct freebsd4_statfs_args *); int freebsd4_fstatfs(struct thread *, struct freebsd4_fstatfs_args *); int freebsd4_getdomainname(struct thread *, struct freebsd4_getdomainname_args *); int freebsd4_setdomainname(struct thread *, struct freebsd4_setdomainname_args *); int freebsd4_uname(struct thread *, struct freebsd4_uname_args *); int freebsd4_fhstatfs(struct thread *, struct freebsd4_fhstatfs_args *); int freebsd4_sendfile(struct thread *, struct freebsd4_sendfile_args *); int freebsd4_sigaction(struct thread *, struct freebsd4_sigaction_args *); int freebsd4_sigreturn(struct thread *, struct freebsd4_sigreturn_args *); #endif /* COMPAT_FREEBSD4 */ #ifdef COMPAT_FREEBSD6 struct freebsd6_pread_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(void *)]; void * buf; char buf_r_[PADR_(void *)]; char nbyte_l_[PADL_(size_t)]; size_t nbyte; char nbyte_r_[PADR_(size_t)]; char pad_l_[PADL_(int)]; int pad; char pad_r_[PADR_(int)]; char offset_l_[PADL_(off_t)]; off_t offset; char offset_r_[PADR_(off_t)]; }; struct freebsd6_pwrite_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(const void *)]; const void * buf; char buf_r_[PADR_(const void *)]; char nbyte_l_[PADL_(size_t)]; size_t nbyte; char nbyte_r_[PADR_(size_t)]; char pad_l_[PADL_(int)]; int pad; char pad_r_[PADR_(int)]; char offset_l_[PADL_(off_t)]; off_t offset; char offset_r_[PADR_(off_t)]; }; struct freebsd6_mmap_args { char addr_l_[PADL_(caddr_t)]; caddr_t addr; char addr_r_[PADR_(caddr_t)]; char len_l_[PADL_(size_t)]; size_t len; char len_r_[PADR_(size_t)]; char prot_l_[PADL_(int)]; int prot; char prot_r_[PADR_(int)]; char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)]; char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char pad_l_[PADL_(int)]; int pad; char pad_r_[PADR_(int)]; char pos_l_[PADL_(off_t)]; off_t pos; char pos_r_[PADR_(off_t)]; }; struct freebsd6_lseek_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char pad_l_[PADL_(int)]; int pad; char pad_r_[PADR_(int)]; char offset_l_[PADL_(off_t)]; off_t offset; char offset_r_[PADR_(off_t)]; char whence_l_[PADL_(int)]; int whence; char whence_r_[PADR_(int)]; }; struct freebsd6_truncate_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char pad_l_[PADL_(int)]; int pad; char pad_r_[PADR_(int)]; char length_l_[PADL_(off_t)]; off_t length; char length_r_[PADR_(off_t)]; }; struct freebsd6_ftruncate_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char pad_l_[PADL_(int)]; int pad; char pad_r_[PADR_(int)]; char length_l_[PADL_(off_t)]; off_t length; char length_r_[PADR_(off_t)]; }; struct freebsd6_aio_read_args { char aiocbp_l_[PADL_(struct oaiocb *)]; struct oaiocb * aiocbp; char aiocbp_r_[PADR_(struct oaiocb *)]; }; struct freebsd6_aio_write_args { char aiocbp_l_[PADL_(struct oaiocb *)]; struct oaiocb * aiocbp; char aiocbp_r_[PADR_(struct oaiocb *)]; }; struct freebsd6_lio_listio_args { char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)]; char acb_list_l_[PADL_(struct oaiocb *const *)]; struct oaiocb *const * acb_list; char acb_list_r_[PADR_(struct oaiocb *const *)]; char nent_l_[PADL_(int)]; int nent; char nent_r_[PADR_(int)]; char sig_l_[PADL_(struct osigevent *)]; struct osigevent * sig; char sig_r_[PADR_(struct osigevent *)]; }; int freebsd6_pread(struct thread *, struct freebsd6_pread_args *); int freebsd6_pwrite(struct thread *, struct freebsd6_pwrite_args *); int freebsd6_mmap(struct thread *, struct freebsd6_mmap_args *); int freebsd6_lseek(struct thread *, struct freebsd6_lseek_args *); int freebsd6_truncate(struct thread *, struct freebsd6_truncate_args *); int freebsd6_ftruncate(struct thread *, struct freebsd6_ftruncate_args *); int freebsd6_aio_read(struct thread *, struct freebsd6_aio_read_args *); int freebsd6_aio_write(struct thread *, struct freebsd6_aio_write_args *); int freebsd6_lio_listio(struct thread *, struct freebsd6_lio_listio_args *); #endif /* COMPAT_FREEBSD6 */ #ifdef COMPAT_FREEBSD7 struct freebsd7___semctl_args { char semid_l_[PADL_(int)]; int semid; char semid_r_[PADR_(int)]; char semnum_l_[PADL_(int)]; int semnum; char semnum_r_[PADR_(int)]; char cmd_l_[PADL_(int)]; int cmd; char cmd_r_[PADR_(int)]; char arg_l_[PADL_(union semun_old *)]; union semun_old * arg; char arg_r_[PADR_(union semun_old *)]; }; struct freebsd7_msgctl_args { char msqid_l_[PADL_(int)]; int msqid; char msqid_r_[PADR_(int)]; char cmd_l_[PADL_(int)]; int cmd; char cmd_r_[PADR_(int)]; char buf_l_[PADL_(struct msqid_ds_old *)]; struct msqid_ds_old * buf; char buf_r_[PADR_(struct msqid_ds_old *)]; }; struct freebsd7_shmctl_args { char shmid_l_[PADL_(int)]; int shmid; char shmid_r_[PADR_(int)]; char cmd_l_[PADL_(int)]; int cmd; char cmd_r_[PADR_(int)]; char buf_l_[PADL_(struct shmid_ds_old *)]; struct shmid_ds_old * buf; char buf_r_[PADR_(struct shmid_ds_old *)]; }; int freebsd7___semctl(struct thread *, struct freebsd7___semctl_args *); int freebsd7_msgctl(struct thread *, struct freebsd7_msgctl_args *); int freebsd7_shmctl(struct thread *, struct freebsd7_shmctl_args *); #endif /* COMPAT_FREEBSD7 */ #ifdef COMPAT_FREEBSD10 int freebsd10_pipe(struct thread *, struct freebsd10_pipe_args *); #endif /* COMPAT_FREEBSD10 */ #ifdef COMPAT_FREEBSD11 struct freebsd11_mknod_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)]; char dev_l_[PADL_(int)]; int dev; char dev_r_[PADR_(int)]; }; struct freebsd11_stat_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char ub_l_[PADL_(struct freebsd11_stat *)]; struct freebsd11_stat * ub; char ub_r_[PADR_(struct freebsd11_stat *)]; }; struct freebsd11_fstat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char sb_l_[PADL_(struct freebsd11_stat *)]; struct freebsd11_stat * sb; char sb_r_[PADR_(struct freebsd11_stat *)]; }; struct freebsd11_lstat_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char ub_l_[PADL_(struct freebsd11_stat *)]; struct freebsd11_stat * ub; char ub_r_[PADR_(struct freebsd11_stat *)]; }; struct freebsd11_getdirentries_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)]; char count_l_[PADL_(u_int)]; u_int count; char count_r_[PADR_(u_int)]; char basep_l_[PADL_(long *)]; long * basep; char basep_r_[PADR_(long *)]; }; struct freebsd11_getdents_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(char *)]; char * buf; char buf_r_[PADR_(char *)]; char count_l_[PADL_(size_t)]; size_t count; char count_r_[PADR_(size_t)]; }; struct freebsd11_nstat_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char ub_l_[PADL_(struct nstat *)]; struct nstat * ub; char ub_r_[PADR_(struct nstat *)]; }; struct freebsd11_nfstat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char sb_l_[PADL_(struct nstat *)]; struct nstat * sb; char sb_r_[PADR_(struct nstat *)]; }; struct freebsd11_nlstat_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char ub_l_[PADL_(struct nstat *)]; struct nstat * ub; char ub_r_[PADR_(struct nstat *)]; }; struct freebsd11_fhstat_args { char u_fhp_l_[PADL_(const struct fhandle *)]; const struct fhandle * u_fhp; char u_fhp_r_[PADR_(const struct fhandle *)]; char sb_l_[PADL_(struct freebsd11_stat *)]; struct freebsd11_stat * sb; char sb_r_[PADR_(struct freebsd11_stat *)]; }; struct freebsd11_kevent_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char changelist_l_[PADL_(struct kevent_freebsd11 *)]; struct kevent_freebsd11 * changelist; char changelist_r_[PADR_(struct kevent_freebsd11 *)]; char nchanges_l_[PADL_(int)]; int nchanges; char nchanges_r_[PADR_(int)]; char eventlist_l_[PADL_(struct kevent_freebsd11 *)]; struct kevent_freebsd11 * eventlist; char eventlist_r_[PADR_(struct kevent_freebsd11 *)]; char nevents_l_[PADL_(int)]; int nevents; char nevents_r_[PADR_(int)]; char timeout_l_[PADL_(const struct timespec *)]; const struct timespec * timeout; char timeout_r_[PADR_(const struct timespec *)]; }; struct freebsd11_getfsstat_args { char buf_l_[PADL_(struct freebsd11_statfs *)]; struct freebsd11_statfs * buf; char buf_r_[PADR_(struct freebsd11_statfs *)]; char bufsize_l_[PADL_(long)]; long bufsize; char bufsize_r_[PADR_(long)]; char mode_l_[PADL_(int)]; int mode; char mode_r_[PADR_(int)]; }; struct freebsd11_statfs_args { char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char buf_l_[PADL_(struct freebsd11_statfs *)]; struct freebsd11_statfs * buf; char buf_r_[PADR_(struct freebsd11_statfs *)]; }; struct freebsd11_fstatfs_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char buf_l_[PADL_(struct freebsd11_statfs *)]; struct freebsd11_statfs * buf; char buf_r_[PADR_(struct freebsd11_statfs *)]; }; struct freebsd11_fhstatfs_args { char u_fhp_l_[PADL_(const struct fhandle *)]; const struct fhandle * u_fhp; char u_fhp_r_[PADR_(const struct fhandle *)]; char buf_l_[PADL_(struct freebsd11_statfs *)]; struct freebsd11_statfs * buf; char buf_r_[PADR_(struct freebsd11_statfs *)]; }; struct freebsd11_fstatat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char buf_l_[PADL_(struct freebsd11_stat *)]; struct freebsd11_stat * buf; char buf_r_[PADR_(struct freebsd11_stat *)]; char flag_l_[PADL_(int)]; int flag; char flag_r_[PADR_(int)]; }; struct freebsd11_mknodat_args { char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)]; char path_l_[PADL_(char *)]; char * path; char path_r_[PADR_(char *)]; char mode_l_[PADL_(mode_t)]; mode_t mode; char mode_r_[PADR_(mode_t)]; char dev_l_[PADL_(uint32_t)]; uint32_t dev; char dev_r_[PADR_(uint32_t)]; }; int freebsd11_mknod(struct thread *, struct freebsd11_mknod_args *); int freebsd11_stat(struct thread *, struct freebsd11_stat_args *); int freebsd11_fstat(struct thread *, struct freebsd11_fstat_args *); int freebsd11_lstat(struct thread *, struct freebsd11_lstat_args *); int freebsd11_getdirentries(struct thread *, struct freebsd11_getdirentries_args *); int freebsd11_getdents(struct thread *, struct freebsd11_getdents_args *); int freebsd11_nstat(struct thread *, struct freebsd11_nstat_args *); int freebsd11_nfstat(struct thread *, struct freebsd11_nfstat_args *); int freebsd11_nlstat(struct thread *, struct freebsd11_nlstat_args *); int freebsd11_fhstat(struct thread *, struct freebsd11_fhstat_args *); int freebsd11_kevent(struct thread *, struct freebsd11_kevent_args *); int freebsd11_getfsstat(struct thread *, struct freebsd11_getfsstat_args *); int freebsd11_statfs(struct thread *, struct freebsd11_statfs_args *); int freebsd11_fstatfs(struct thread *, struct freebsd11_fstatfs_args *); int freebsd11_fhstatfs(struct thread *, struct freebsd11_fhstatfs_args *); int freebsd11_fstatat(struct thread *, struct freebsd11_fstatat_args *); int freebsd11_mknodat(struct thread *, struct freebsd11_mknodat_args *); #endif /* COMPAT_FREEBSD11 */ #define SYS_AUE_syscall AUE_NULL #define SYS_AUE_exit AUE_EXIT #define SYS_AUE_fork AUE_FORK #define SYS_AUE_read AUE_READ #define SYS_AUE_write AUE_WRITE #define SYS_AUE_open AUE_OPEN_RWTC #define SYS_AUE_close AUE_CLOSE #define SYS_AUE_wait4 AUE_WAIT4 #define SYS_AUE_ocreat AUE_CREAT #define SYS_AUE_link AUE_LINK #define SYS_AUE_unlink AUE_UNLINK #define SYS_AUE_chdir AUE_CHDIR #define SYS_AUE_fchdir AUE_FCHDIR #define SYS_AUE_freebsd11_mknod AUE_MKNOD #define SYS_AUE_chmod AUE_CHMOD #define SYS_AUE_chown AUE_CHOWN #define SYS_AUE_break AUE_NULL #define SYS_AUE_freebsd4_getfsstat AUE_GETFSSTAT #define SYS_AUE_olseek AUE_LSEEK #define SYS_AUE_getpid AUE_GETPID #define SYS_AUE_mount AUE_MOUNT #define SYS_AUE_unmount AUE_UMOUNT #define SYS_AUE_setuid AUE_SETUID #define SYS_AUE_getuid AUE_GETUID #define SYS_AUE_geteuid AUE_GETEUID #define SYS_AUE_ptrace AUE_PTRACE #define SYS_AUE_recvmsg AUE_RECVMSG #define SYS_AUE_sendmsg AUE_SENDMSG #define SYS_AUE_recvfrom AUE_RECVFROM #define SYS_AUE_accept AUE_ACCEPT #define SYS_AUE_getpeername AUE_GETPEERNAME #define SYS_AUE_getsockname AUE_GETSOCKNAME #define SYS_AUE_access AUE_ACCESS #define SYS_AUE_chflags AUE_CHFLAGS #define SYS_AUE_fchflags AUE_FCHFLAGS #define SYS_AUE_sync AUE_SYNC #define SYS_AUE_kill AUE_KILL #define SYS_AUE_ostat AUE_STAT #define SYS_AUE_getppid AUE_GETPPID #define SYS_AUE_olstat AUE_LSTAT #define SYS_AUE_dup AUE_DUP #define SYS_AUE_freebsd10_pipe AUE_PIPE #define SYS_AUE_getegid AUE_GETEGID #define SYS_AUE_profil AUE_PROFILE #define SYS_AUE_ktrace AUE_KTRACE #define SYS_AUE_osigaction AUE_SIGACTION #define SYS_AUE_getgid AUE_GETGID #define SYS_AUE_osigprocmask AUE_SIGPROCMASK #define SYS_AUE_getlogin AUE_GETLOGIN #define SYS_AUE_setlogin AUE_SETLOGIN #define SYS_AUE_acct AUE_ACCT #define SYS_AUE_osigpending AUE_SIGPENDING #define SYS_AUE_sigaltstack AUE_SIGALTSTACK #define SYS_AUE_ioctl AUE_IOCTL #define SYS_AUE_reboot AUE_REBOOT #define SYS_AUE_revoke AUE_REVOKE #define SYS_AUE_symlink AUE_SYMLINK #define SYS_AUE_readlink AUE_READLINK #define SYS_AUE_execve AUE_EXECVE #define SYS_AUE_umask AUE_UMASK #define SYS_AUE_chroot AUE_CHROOT #define SYS_AUE_ofstat AUE_FSTAT #define SYS_AUE_ogetkerninfo AUE_NULL #define SYS_AUE_ogetpagesize AUE_NULL #define SYS_AUE_msync AUE_MSYNC #define SYS_AUE_vfork AUE_VFORK #define SYS_AUE_sbrk AUE_SBRK #define SYS_AUE_sstk AUE_SSTK #define SYS_AUE_ommap AUE_MMAP #define SYS_AUE_vadvise AUE_O_VADVISE #define SYS_AUE_munmap AUE_MUNMAP #define SYS_AUE_mprotect AUE_MPROTECT #define SYS_AUE_madvise AUE_MADVISE #define SYS_AUE_mincore AUE_MINCORE #define SYS_AUE_getgroups AUE_GETGROUPS #define SYS_AUE_setgroups AUE_SETGROUPS #define SYS_AUE_getpgrp AUE_GETPGRP #define SYS_AUE_setpgid AUE_SETPGRP #define SYS_AUE_setitimer AUE_SETITIMER #define SYS_AUE_owait AUE_WAIT4 #define SYS_AUE_swapon AUE_SWAPON #define SYS_AUE_getitimer AUE_GETITIMER #define SYS_AUE_ogethostname AUE_SYSCTL #define SYS_AUE_osethostname AUE_SYSCTL #define SYS_AUE_getdtablesize AUE_GETDTABLESIZE #define SYS_AUE_dup2 AUE_DUP2 #define SYS_AUE_fcntl AUE_FCNTL #define SYS_AUE_select AUE_SELECT #define SYS_AUE_fsync AUE_FSYNC #define SYS_AUE_setpriority AUE_SETPRIORITY #define SYS_AUE_socket AUE_SOCKET #define SYS_AUE_connect AUE_CONNECT #define SYS_AUE_oaccept AUE_ACCEPT #define SYS_AUE_getpriority AUE_GETPRIORITY #define SYS_AUE_osend AUE_SEND #define SYS_AUE_orecv AUE_RECV #define SYS_AUE_osigreturn AUE_SIGRETURN #define SYS_AUE_bind AUE_BIND #define SYS_AUE_setsockopt AUE_SETSOCKOPT #define SYS_AUE_listen AUE_LISTEN #define SYS_AUE_osigvec AUE_NULL #define SYS_AUE_osigblock AUE_NULL #define SYS_AUE_osigsetmask AUE_NULL #define SYS_AUE_osigsuspend AUE_NULL #define SYS_AUE_osigstack AUE_NULL #define SYS_AUE_orecvmsg AUE_RECVMSG #define SYS_AUE_osendmsg AUE_SENDMSG #define SYS_AUE_gettimeofday AUE_GETTIMEOFDAY #define SYS_AUE_getrusage AUE_GETRUSAGE #define SYS_AUE_getsockopt AUE_GETSOCKOPT #define SYS_AUE_readv AUE_READV #define SYS_AUE_writev AUE_WRITEV #define SYS_AUE_settimeofday AUE_SETTIMEOFDAY #define SYS_AUE_fchown AUE_FCHOWN #define SYS_AUE_fchmod AUE_FCHMOD #define SYS_AUE_orecvfrom AUE_RECVFROM #define SYS_AUE_setreuid AUE_SETREUID #define SYS_AUE_setregid AUE_SETREGID #define SYS_AUE_rename AUE_RENAME #define SYS_AUE_otruncate AUE_TRUNCATE #define SYS_AUE_oftruncate AUE_FTRUNCATE #define SYS_AUE_flock AUE_FLOCK #define SYS_AUE_mkfifo AUE_MKFIFO #define SYS_AUE_sendto AUE_SENDTO #define SYS_AUE_shutdown AUE_SHUTDOWN #define SYS_AUE_socketpair AUE_SOCKETPAIR #define SYS_AUE_mkdir AUE_MKDIR #define SYS_AUE_rmdir AUE_RMDIR #define SYS_AUE_utimes AUE_UTIMES #define SYS_AUE_adjtime AUE_ADJTIME #define SYS_AUE_ogetpeername AUE_GETPEERNAME #define SYS_AUE_ogethostid AUE_SYSCTL #define SYS_AUE_osethostid AUE_SYSCTL #define SYS_AUE_ogetrlimit AUE_GETRLIMIT #define SYS_AUE_osetrlimit AUE_SETRLIMIT #define SYS_AUE_okillpg AUE_KILLPG #define SYS_AUE_setsid AUE_SETSID #define SYS_AUE_quotactl AUE_QUOTACTL #define SYS_AUE_oquota AUE_O_QUOTA #define SYS_AUE_ogetsockname AUE_GETSOCKNAME #define SYS_AUE_nlm_syscall AUE_NULL #define SYS_AUE_nfssvc AUE_NFS_SVC #define SYS_AUE_ogetdirentries AUE_GETDIRENTRIES #define SYS_AUE_freebsd4_statfs AUE_STATFS #define SYS_AUE_freebsd4_fstatfs AUE_FSTATFS #define SYS_AUE_lgetfh AUE_LGETFH #define SYS_AUE_getfh AUE_NFS_GETFH #define SYS_AUE_freebsd4_getdomainname AUE_SYSCTL #define SYS_AUE_freebsd4_setdomainname AUE_SYSCTL #define SYS_AUE_freebsd4_uname AUE_NULL #define SYS_AUE_sysarch AUE_SYSARCH #define SYS_AUE_rtprio AUE_RTPRIO #define SYS_AUE_semsys AUE_SEMSYS #define SYS_AUE_msgsys AUE_MSGSYS #define SYS_AUE_shmsys AUE_SHMSYS #define SYS_AUE_freebsd6_pread AUE_PREAD #define SYS_AUE_freebsd6_pwrite AUE_PWRITE #define SYS_AUE_setfib AUE_SETFIB #define SYS_AUE_ntp_adjtime AUE_NTP_ADJTIME #define SYS_AUE_setgid AUE_SETGID #define SYS_AUE_setegid AUE_SETEGID #define SYS_AUE_seteuid AUE_SETEUID #define SYS_AUE_freebsd11_stat AUE_STAT #define SYS_AUE_freebsd11_fstat AUE_FSTAT #define SYS_AUE_freebsd11_lstat AUE_LSTAT #define SYS_AUE_pathconf AUE_PATHCONF #define SYS_AUE_fpathconf AUE_FPATHCONF #define SYS_AUE_getrlimit AUE_GETRLIMIT #define SYS_AUE_setrlimit AUE_SETRLIMIT #define SYS_AUE_freebsd11_getdirentries AUE_GETDIRENTRIES #define SYS_AUE_freebsd6_mmap AUE_MMAP #define SYS_AUE_freebsd6_lseek AUE_LSEEK #define SYS_AUE_freebsd6_truncate AUE_TRUNCATE #define SYS_AUE_freebsd6_ftruncate AUE_FTRUNCATE #define SYS_AUE___sysctl AUE_SYSCTL #define SYS_AUE_mlock AUE_MLOCK #define SYS_AUE_munlock AUE_MUNLOCK #define SYS_AUE_undelete AUE_UNDELETE #define SYS_AUE_futimes AUE_FUTIMES #define SYS_AUE_getpgid AUE_GETPGID #define SYS_AUE_poll AUE_POLL #define SYS_AUE_freebsd7___semctl AUE_SEMCTL #define SYS_AUE_semget AUE_SEMGET #define SYS_AUE_semop AUE_SEMOP #define SYS_AUE_freebsd7_msgctl AUE_MSGCTL #define SYS_AUE_msgget AUE_MSGGET #define SYS_AUE_msgsnd AUE_MSGSND #define SYS_AUE_msgrcv AUE_MSGRCV #define SYS_AUE_shmat AUE_SHMAT #define SYS_AUE_freebsd7_shmctl AUE_SHMCTL #define SYS_AUE_shmdt AUE_SHMDT #define SYS_AUE_shmget AUE_SHMGET #define SYS_AUE_clock_gettime AUE_NULL #define SYS_AUE_clock_settime AUE_CLOCK_SETTIME #define SYS_AUE_clock_getres AUE_NULL #define SYS_AUE_ktimer_create AUE_NULL #define SYS_AUE_ktimer_delete AUE_NULL #define SYS_AUE_ktimer_settime AUE_NULL #define SYS_AUE_ktimer_gettime AUE_NULL #define SYS_AUE_ktimer_getoverrun AUE_NULL #define SYS_AUE_nanosleep AUE_NULL #define SYS_AUE_ffclock_getcounter AUE_NULL #define SYS_AUE_ffclock_setestimate AUE_NULL #define SYS_AUE_ffclock_getestimate AUE_NULL #define SYS_AUE_clock_nanosleep AUE_NULL #define SYS_AUE_clock_getcpuclockid2 AUE_NULL #define SYS_AUE_ntp_gettime AUE_NULL #define SYS_AUE_minherit AUE_MINHERIT #define SYS_AUE_rfork AUE_RFORK #define SYS_AUE_issetugid AUE_ISSETUGID #define SYS_AUE_lchown AUE_LCHOWN #define SYS_AUE_aio_read AUE_AIO_READ #define SYS_AUE_aio_write AUE_AIO_WRITE #define SYS_AUE_lio_listio AUE_LIO_LISTIO #define SYS_AUE_freebsd11_getdents AUE_O_GETDENTS #define SYS_AUE_lchmod AUE_LCHMOD #define SYS_AUE_lutimes AUE_LUTIMES #define SYS_AUE_freebsd11_nstat AUE_STAT #define SYS_AUE_freebsd11_nfstat AUE_FSTAT #define SYS_AUE_freebsd11_nlstat AUE_LSTAT #define SYS_AUE_preadv AUE_PREADV #define SYS_AUE_pwritev AUE_PWRITEV #define SYS_AUE_freebsd4_fhstatfs AUE_FHSTATFS #define SYS_AUE_fhopen AUE_FHOPEN #define SYS_AUE_freebsd11_fhstat AUE_FHSTAT #define SYS_AUE_modnext AUE_NULL #define SYS_AUE_modstat AUE_NULL #define SYS_AUE_modfnext AUE_NULL #define SYS_AUE_modfind AUE_NULL #define SYS_AUE_kldload AUE_MODLOAD #define SYS_AUE_kldunload AUE_MODUNLOAD #define SYS_AUE_kldfind AUE_NULL #define SYS_AUE_kldnext AUE_NULL #define SYS_AUE_kldstat AUE_NULL #define SYS_AUE_kldfirstmod AUE_NULL #define SYS_AUE_getsid AUE_GETSID #define SYS_AUE_setresuid AUE_SETRESUID #define SYS_AUE_setresgid AUE_SETRESGID #define SYS_AUE_aio_return AUE_AIO_RETURN #define SYS_AUE_aio_suspend AUE_AIO_SUSPEND #define SYS_AUE_aio_cancel AUE_AIO_CANCEL #define SYS_AUE_aio_error AUE_AIO_ERROR #define SYS_AUE_freebsd6_aio_read AUE_AIO_READ #define SYS_AUE_freebsd6_aio_write AUE_AIO_WRITE #define SYS_AUE_freebsd6_lio_listio AUE_LIO_LISTIO #define SYS_AUE_yield AUE_NULL #define SYS_AUE_mlockall AUE_MLOCKALL #define SYS_AUE_munlockall AUE_MUNLOCKALL #define SYS_AUE___getcwd AUE_GETCWD #define SYS_AUE_sched_setparam AUE_NULL #define SYS_AUE_sched_getparam AUE_NULL #define SYS_AUE_sched_setscheduler AUE_NULL #define SYS_AUE_sched_getscheduler AUE_NULL #define SYS_AUE_sched_yield AUE_NULL #define SYS_AUE_sched_get_priority_max AUE_NULL #define SYS_AUE_sched_get_priority_min AUE_NULL #define SYS_AUE_sched_rr_get_interval AUE_NULL #define SYS_AUE_utrace AUE_NULL #define SYS_AUE_freebsd4_sendfile AUE_SENDFILE #define SYS_AUE_kldsym AUE_NULL #define SYS_AUE_jail AUE_JAIL #define SYS_AUE_nnpfs_syscall AUE_NULL #define SYS_AUE_sigprocmask AUE_SIGPROCMASK #define SYS_AUE_sigsuspend AUE_SIGSUSPEND #define SYS_AUE_freebsd4_sigaction AUE_SIGACTION #define SYS_AUE_sigpending AUE_SIGPENDING #define SYS_AUE_freebsd4_sigreturn AUE_SIGRETURN #define SYS_AUE_sigtimedwait AUE_SIGWAIT #define SYS_AUE_sigwaitinfo AUE_NULL #define SYS_AUE___acl_get_file AUE_ACL_GET_FILE #define SYS_AUE___acl_set_file AUE_ACL_SET_FILE #define SYS_AUE___acl_get_fd AUE_ACL_GET_FD #define SYS_AUE___acl_set_fd AUE_ACL_SET_FD #define SYS_AUE___acl_delete_file AUE_ACL_DELETE_FILE #define SYS_AUE___acl_delete_fd AUE_ACL_DELETE_FD #define SYS_AUE___acl_aclcheck_file AUE_ACL_CHECK_FILE #define SYS_AUE___acl_aclcheck_fd AUE_ACL_CHECK_FD #define SYS_AUE_extattrctl AUE_EXTATTRCTL #define SYS_AUE_extattr_set_file AUE_EXTATTR_SET_FILE #define SYS_AUE_extattr_get_file AUE_EXTATTR_GET_FILE #define SYS_AUE_extattr_delete_file AUE_EXTATTR_DELETE_FILE #define SYS_AUE_aio_waitcomplete AUE_AIO_WAITCOMPLETE #define SYS_AUE_getresuid AUE_GETRESUID #define SYS_AUE_getresgid AUE_GETRESGID #define SYS_AUE_kqueue AUE_KQUEUE #define SYS_AUE_freebsd11_kevent AUE_KEVENT #define SYS_AUE_extattr_set_fd AUE_EXTATTR_SET_FD #define SYS_AUE_extattr_get_fd AUE_EXTATTR_GET_FD #define SYS_AUE_extattr_delete_fd AUE_EXTATTR_DELETE_FD #define SYS_AUE___setugid AUE_SETUGID #define SYS_AUE_eaccess AUE_EACCESS #define SYS_AUE_afs3_syscall AUE_NULL #define SYS_AUE_nmount AUE_NMOUNT #define SYS_AUE___mac_get_proc AUE_NULL #define SYS_AUE___mac_set_proc AUE_NULL #define SYS_AUE___mac_get_fd AUE_NULL #define SYS_AUE___mac_get_file AUE_NULL #define SYS_AUE___mac_set_fd AUE_NULL #define SYS_AUE___mac_set_file AUE_NULL #define SYS_AUE_kenv AUE_NULL #define SYS_AUE_lchflags AUE_LCHFLAGS #define SYS_AUE_uuidgen AUE_NULL #define SYS_AUE_sendfile AUE_SENDFILE #define SYS_AUE_mac_syscall AUE_NULL #define SYS_AUE_freebsd11_getfsstat AUE_GETFSSTAT #define SYS_AUE_freebsd11_statfs AUE_STATFS #define SYS_AUE_freebsd11_fstatfs AUE_FSTATFS #define SYS_AUE_freebsd11_fhstatfs AUE_FHSTATFS #define SYS_AUE_ksem_close AUE_SEMCLOSE #define SYS_AUE_ksem_post AUE_SEMPOST #define SYS_AUE_ksem_wait AUE_SEMWAIT #define SYS_AUE_ksem_trywait AUE_SEMTRYWAIT #define SYS_AUE_ksem_init AUE_SEMINIT #define SYS_AUE_ksem_open AUE_SEMOPEN #define SYS_AUE_ksem_unlink AUE_SEMUNLINK #define SYS_AUE_ksem_getvalue AUE_SEMGETVALUE #define SYS_AUE_ksem_destroy AUE_SEMDESTROY #define SYS_AUE___mac_get_pid AUE_NULL #define SYS_AUE___mac_get_link AUE_NULL #define SYS_AUE___mac_set_link AUE_NULL #define SYS_AUE_extattr_set_link AUE_EXTATTR_SET_LINK #define SYS_AUE_extattr_get_link AUE_EXTATTR_GET_LINK #define SYS_AUE_extattr_delete_link AUE_EXTATTR_DELETE_LINK #define SYS_AUE___mac_execve AUE_NULL #define SYS_AUE_sigaction AUE_SIGACTION #define SYS_AUE_sigreturn AUE_SIGRETURN #define SYS_AUE_getcontext AUE_NULL #define SYS_AUE_setcontext AUE_NULL #define SYS_AUE_swapcontext AUE_NULL #define SYS_AUE_swapoff AUE_SWAPOFF #define SYS_AUE___acl_get_link AUE_ACL_GET_LINK #define SYS_AUE___acl_set_link AUE_ACL_SET_LINK #define SYS_AUE___acl_delete_link AUE_ACL_DELETE_LINK #define SYS_AUE___acl_aclcheck_link AUE_ACL_CHECK_LINK #define SYS_AUE_sigwait AUE_SIGWAIT #define SYS_AUE_thr_create AUE_THR_CREATE #define SYS_AUE_thr_exit AUE_THR_EXIT #define SYS_AUE_thr_self AUE_NULL #define SYS_AUE_thr_kill AUE_THR_KILL #define SYS_AUE_jail_attach AUE_JAIL_ATTACH #define SYS_AUE_extattr_list_fd AUE_EXTATTR_LIST_FD #define SYS_AUE_extattr_list_file AUE_EXTATTR_LIST_FILE #define SYS_AUE_extattr_list_link AUE_EXTATTR_LIST_LINK #define SYS_AUE_ksem_timedwait AUE_SEMWAIT #define SYS_AUE_thr_suspend AUE_NULL #define SYS_AUE_thr_wake AUE_NULL #define SYS_AUE_kldunloadf AUE_MODUNLOAD #define SYS_AUE_audit AUE_AUDIT #define SYS_AUE_auditon AUE_AUDITON #define SYS_AUE_getauid AUE_GETAUID #define SYS_AUE_setauid AUE_SETAUID #define SYS_AUE_getaudit AUE_GETAUDIT #define SYS_AUE_setaudit AUE_SETAUDIT #define SYS_AUE_getaudit_addr AUE_GETAUDIT_ADDR #define SYS_AUE_setaudit_addr AUE_SETAUDIT_ADDR #define SYS_AUE_auditctl AUE_AUDITCTL #define SYS_AUE__umtx_op AUE_NULL #define SYS_AUE_thr_new AUE_THR_NEW #define SYS_AUE_sigqueue AUE_NULL #define SYS_AUE_kmq_open AUE_MQ_OPEN #define SYS_AUE_kmq_setattr AUE_MQ_SETATTR #define SYS_AUE_kmq_timedreceive AUE_MQ_TIMEDRECEIVE #define SYS_AUE_kmq_timedsend AUE_MQ_TIMEDSEND #define SYS_AUE_kmq_notify AUE_MQ_NOTIFY #define SYS_AUE_kmq_unlink AUE_MQ_UNLINK #define SYS_AUE_abort2 AUE_NULL #define SYS_AUE_thr_set_name AUE_NULL #define SYS_AUE_aio_fsync AUE_AIO_FSYNC #define SYS_AUE_rtprio_thread AUE_RTPRIO #define SYS_AUE_sctp_peeloff AUE_SCTP_PEELOFF #define SYS_AUE_sctp_generic_sendmsg AUE_SCTP_GENERIC_SENDMSG #define SYS_AUE_sctp_generic_sendmsg_iov AUE_SCTP_GENERIC_SENDMSG_IOV #define SYS_AUE_sctp_generic_recvmsg AUE_SCTP_GENERIC_RECVMSG #define SYS_AUE_pread AUE_PREAD #define SYS_AUE_pwrite AUE_PWRITE #define SYS_AUE_mmap AUE_MMAP #define SYS_AUE_lseek AUE_LSEEK #define SYS_AUE_truncate AUE_TRUNCATE #define SYS_AUE_ftruncate AUE_FTRUNCATE #define SYS_AUE_thr_kill2 AUE_THR_KILL2 #define SYS_AUE_shm_open AUE_SHMOPEN #define SYS_AUE_shm_unlink AUE_SHMUNLINK #define SYS_AUE_cpuset AUE_NULL #define SYS_AUE_cpuset_setid AUE_NULL #define SYS_AUE_cpuset_getid AUE_NULL #define SYS_AUE_cpuset_getaffinity AUE_NULL #define SYS_AUE_cpuset_setaffinity AUE_NULL #define SYS_AUE_faccessat AUE_FACCESSAT #define SYS_AUE_fchmodat AUE_FCHMODAT #define SYS_AUE_fchownat AUE_FCHOWNAT #define SYS_AUE_fexecve AUE_FEXECVE #define SYS_AUE_freebsd11_fstatat AUE_FSTATAT #define SYS_AUE_futimesat AUE_FUTIMESAT #define SYS_AUE_linkat AUE_LINKAT #define SYS_AUE_mkdirat AUE_MKDIRAT #define SYS_AUE_mkfifoat AUE_MKFIFOAT #define SYS_AUE_freebsd11_mknodat AUE_MKNODAT #define SYS_AUE_openat AUE_OPENAT_RWTC #define SYS_AUE_readlinkat AUE_READLINKAT #define SYS_AUE_renameat AUE_RENAMEAT #define SYS_AUE_symlinkat AUE_SYMLINKAT #define SYS_AUE_unlinkat AUE_UNLINKAT #define SYS_AUE_posix_openpt AUE_POSIX_OPENPT #define SYS_AUE_gssd_syscall AUE_NULL #define SYS_AUE_jail_get AUE_JAIL_GET #define SYS_AUE_jail_set AUE_JAIL_SET #define SYS_AUE_jail_remove AUE_JAIL_REMOVE #define SYS_AUE_closefrom AUE_CLOSEFROM #define SYS_AUE___semctl AUE_SEMCTL #define SYS_AUE_msgctl AUE_MSGCTL #define SYS_AUE_shmctl AUE_SHMCTL #define SYS_AUE_lpathconf AUE_LPATHCONF #define SYS_AUE___cap_rights_get AUE_CAP_RIGHTS_GET #define SYS_AUE_cap_enter AUE_CAP_ENTER #define SYS_AUE_cap_getmode AUE_CAP_GETMODE #define SYS_AUE_pdfork AUE_PDFORK #define SYS_AUE_pdkill AUE_PDKILL #define SYS_AUE_pdgetpid AUE_PDGETPID #define SYS_AUE_pselect AUE_SELECT #define SYS_AUE_getloginclass AUE_GETLOGINCLASS #define SYS_AUE_setloginclass AUE_SETLOGINCLASS #define SYS_AUE_rctl_get_racct AUE_NULL #define SYS_AUE_rctl_get_rules AUE_NULL #define SYS_AUE_rctl_get_limits AUE_NULL #define SYS_AUE_rctl_add_rule AUE_NULL #define SYS_AUE_rctl_remove_rule AUE_NULL #define SYS_AUE_posix_fallocate AUE_POSIX_FALLOCATE #define SYS_AUE_posix_fadvise AUE_POSIX_FADVISE #define SYS_AUE_wait6 AUE_WAIT6 #define SYS_AUE_cap_rights_limit AUE_CAP_RIGHTS_LIMIT #define SYS_AUE_cap_ioctls_limit AUE_CAP_IOCTLS_LIMIT #define SYS_AUE_cap_ioctls_get AUE_CAP_IOCTLS_GET #define SYS_AUE_cap_fcntls_limit AUE_CAP_FCNTLS_LIMIT #define SYS_AUE_cap_fcntls_get AUE_CAP_FCNTLS_GET #define SYS_AUE_bindat AUE_BINDAT #define SYS_AUE_connectat AUE_CONNECTAT #define SYS_AUE_chflagsat AUE_CHFLAGSAT #define SYS_AUE_accept4 AUE_ACCEPT #define SYS_AUE_pipe2 AUE_PIPE #define SYS_AUE_aio_mlock AUE_AIO_MLOCK #define SYS_AUE_procctl AUE_PROCCTL #define SYS_AUE_ppoll AUE_POLL #define SYS_AUE_futimens AUE_FUTIMES #define SYS_AUE_utimensat AUE_FUTIMESAT #define SYS_AUE_numa_getaffinity AUE_NULL #define SYS_AUE_numa_setaffinity AUE_NULL #define SYS_AUE_fdatasync AUE_FSYNC #define SYS_AUE_fstat AUE_FSTAT #define SYS_AUE_fstatat AUE_FSTATAT #define SYS_AUE_fhstat AUE_FHSTAT #define SYS_AUE_getdirentries AUE_GETDIRENTRIES #define SYS_AUE_statfs AUE_STATFS #define SYS_AUE_fstatfs AUE_FSTATFS #define SYS_AUE_getfsstat AUE_GETFSSTAT #define SYS_AUE_fhstatfs AUE_FHSTATFS #define SYS_AUE_mknodat AUE_MKNODAT #define SYS_AUE_kevent AUE_KEVENT +#define SYS_AUE_cpuset_getdomain AUE_NULL +#define SYS_AUE_cpuset_setdomain AUE_NULL #undef PAD_ #undef PADL_ #undef PADR_ #endif /* !_SYS_SYSPROTO_H_ */ Index: user/jeff/numa/sys/vm/uma_core.c =================================================================== --- user/jeff/numa/sys/vm/uma_core.c (revision 326888) +++ user/jeff/numa/sys/vm/uma_core.c (revision 326889) @@ -1,3841 +1,3840 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson * Copyright (c) 2004, 2005 Bosko Milekic * Copyright (c) 2004-2006 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * uma_core.c Implementation of the Universal Memory allocator * * This allocator is intended to replace the multitude of similar object caches * in the standard FreeBSD kernel. The intent is to be flexible as well as * efficient. A primary design goal is to return unused memory to the rest of * the system. This will make the system as a whole more flexible due to the * ability to move memory to subsystems which most need it instead of leaving * pools of reserved memory unused. * * The basic ideas stem from similar slab/zone based allocators whose algorithms * are well known. * */ /* * TODO: * - Improve memory usage for large allocations * - Investigate cache size adjustments */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_param.h" #include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEBUG_MEMGUARD #include #endif /* * This is the zone and keg from which all zones are spawned. */ static uma_zone_t kegs; static uma_zone_t zones; /* This is the zone from which all offpage uma_slab_ts are allocated. */ static uma_zone_t slabzone; /* * The initial hash tables come out of this zone so they can be allocated * prior to malloc coming up. */ static uma_zone_t hashzone; /* The boot-time adjusted value for cache line alignment. */ int uma_align_cache = 64 - 1; static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); /* * Are we allowed to allocate buckets? */ static int bucketdisable = 1; /* Linked list of all kegs in the system */ static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); /* Linked list of all cache-only zones in the system */ static LIST_HEAD(,uma_zone) uma_cachezones = LIST_HEAD_INITIALIZER(uma_cachezones); /* This RW lock protects the keg list */ static struct rwlock_padalign __exclusive_cache_line uma_rwlock; /* * Pointer and counter to pool of pages, that is preallocated at * startup to bootstrap UMA. Early zones continue to use the pool * until it is depleted, so allocations may happen after boot, thus * we need a mutex to protect it. */ static char *bootmem; static int boot_pages; static struct mtx uma_boot_pages_mtx; static struct sx uma_drain_lock; /* kmem soft limit. */ static unsigned long uma_kmem_limit; static volatile unsigned long uma_kmem_total; /* Is the VM done starting up? */ static int booted = 0; #define UMA_STARTUP 1 #define UMA_STARTUP2 2 /* * This is the handle used to schedule events that need to happen * outside of the allocation fast path. */ static struct callout uma_callout; #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ /* * This structure is passed as the zone ctor arg so that I don't have to create * a special allocation function just for zones. */ struct uma_zctor_args { const char *name; size_t size; uma_ctor ctor; uma_dtor dtor; uma_init uminit; uma_fini fini; uma_import import; uma_release release; void *arg; uma_keg_t keg; int align; uint32_t flags; }; struct uma_kctor_args { uma_zone_t zone; size_t size; uma_init uminit; uma_fini fini; int align; uint32_t flags; }; struct uma_bucket_zone { uma_zone_t ubz_zone; char *ubz_name; int ubz_entries; /* Number of items it can hold. */ int ubz_maxsize; /* Maximum allocation size per-item. */ }; /* * Compute the actual number of bucket entries to pack them in power * of two sizes for more efficient space utilization. */ #define BUCKET_SIZE(n) \ (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) #define BUCKET_MAX BUCKET_SIZE(256) struct uma_bucket_zone bucket_zones[] = { { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 }, { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 }, { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, { NULL, NULL, 0} }; /* * Flags and enumerations to be passed to internal functions. */ enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; /* Prototypes.. */ static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); static void page_free(void *, vm_size_t, uint8_t); static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int); static void cache_drain(uma_zone_t); static void bucket_drain(uma_zone_t, uma_bucket_t); static void bucket_cache_drain(uma_zone_t zone); static int keg_ctor(void *, int, void *, int); static void keg_dtor(void *, int, void *); static int zone_ctor(void *, int, void *, int); static void zone_dtor(void *, int, void *); static int zero_init(void *, int, int); static void keg_small_init(uma_keg_t keg); static void keg_large_init(uma_keg_t keg); static void zone_foreach(void (*zfunc)(uma_zone_t)); static void zone_timeout(uma_zone_t zone); static int hash_alloc(struct uma_hash *); static int hash_expand(struct uma_hash *, struct uma_hash *); static void hash_free(struct uma_hash *hash); static void uma_timeout(void *); static void uma_startup3(void); static void *zone_alloc_item(uma_zone_t, void *, int, int); static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); static void bucket_enable(void); static void bucket_init(void); static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); static void bucket_zone_drain(void); static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int); static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int); static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int); static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, int align, uint32_t flags); static int zone_import(uma_zone_t, void **, int, int, int); static void zone_release(uma_zone_t, void **, int); static void uma_zero_item(void *, uma_zone_t); void uma_print_zone(uma_zone_t); void uma_print_stats(void); static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); #ifdef INVARIANTS static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); #endif SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); static int zone_warnings = 1; SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, "Warn when UMA zones becomes full"); /* Adjust bytes under management by UMA. */ static inline void uma_total_dec(unsigned long size) { atomic_subtract_long(&uma_kmem_total, size); } static inline void uma_total_inc(unsigned long size) { if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) uma_reclaim_wakeup(); } /* * This routine checks to see whether or not it's safe to enable buckets. */ static void bucket_enable(void) { bucketdisable = vm_page_count_min(); } /* * Initialize bucket_zones, the array of zones of buckets of various sizes. * * For each zone, calculate the memory required for each bucket, consisting * of the header and an array of pointers. */ static void bucket_init(void) { struct uma_bucket_zone *ubz; int size; for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { size = roundup(sizeof(struct uma_bucket), sizeof(void *)); size += sizeof(void *) * ubz->ubz_entries; ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA); } } /* * Given a desired number of entries for a bucket, return the zone from which * to allocate the bucket. */ static struct uma_bucket_zone * bucket_zone_lookup(int entries) { struct uma_bucket_zone *ubz; for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) if (ubz->ubz_entries >= entries) return (ubz); ubz--; return (ubz); } static int bucket_select(int size) { struct uma_bucket_zone *ubz; ubz = &bucket_zones[0]; if (size > ubz->ubz_maxsize) return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); for (; ubz->ubz_entries != 0; ubz++) if (ubz->ubz_maxsize < size) break; ubz--; return (ubz->ubz_entries); } static uma_bucket_t bucket_alloc(uma_zone_t zone, void *udata, int flags) { struct uma_bucket_zone *ubz; uma_bucket_t bucket; /* * This is to stop us from allocating per cpu buckets while we're * running out of vm.boot_pages. Otherwise, we would exhaust the * boot pages. This also prevents us from allocating buckets in * low memory situations. */ if (bucketdisable) return (NULL); /* * To limit bucket recursion we store the original zone flags * in a cookie passed via zalloc_arg/zfree_arg. This allows the * NOVM flag to persist even through deep recursions. We also * store ZFLAG_BUCKET once we have recursed attempting to allocate * a bucket for a bucket zone so we do not allow infinite bucket * recursion. This cookie will even persist to frees of unused * buckets via the allocation path or bucket allocations in the * free path. */ if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) udata = (void *)(uintptr_t)zone->uz_flags; else { if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) return (NULL); udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); } if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) flags |= M_NOVM; ubz = bucket_zone_lookup(zone->uz_count); if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) ubz++; bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); if (bucket) { #ifdef INVARIANTS bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); #endif bucket->ub_cnt = 0; bucket->ub_entries = ubz->ubz_entries; } return (bucket); } static void bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) { struct uma_bucket_zone *ubz; KASSERT(bucket->ub_cnt == 0, ("bucket_free: Freeing a non free bucket.")); if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) udata = (void *)(uintptr_t)zone->uz_flags; ubz = bucket_zone_lookup(bucket->ub_entries); uma_zfree_arg(ubz->ubz_zone, bucket, udata); } static void bucket_zone_drain(void) { struct uma_bucket_zone *ubz; for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) zone_drain(ubz->ubz_zone); } static void zone_log_warning(uma_zone_t zone) { static const struct timeval warninterval = { 300, 0 }; if (!zone_warnings || zone->uz_warning == NULL) return; if (ratecheck(&zone->uz_ratecheck, &warninterval)) printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); } static inline void zone_maxaction(uma_zone_t zone) { if (zone->uz_maxaction.ta_func != NULL) taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); } static void zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) { uma_klink_t klink; LIST_FOREACH(klink, &zone->uz_kegs, kl_link) kegfn(klink->kl_keg); } /* * Routine called by timeout which is used to fire off some time interval * based calculations. (stats, hash size, etc.) * * Arguments: * arg Unused * * Returns: * Nothing */ static void uma_timeout(void *unused) { bucket_enable(); zone_foreach(zone_timeout); /* Reschedule this event */ callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); } /* * Routine to perform timeout driven calculations. This expands the * hashes and does per cpu statistics aggregation. * * Returns nothing. */ static void keg_timeout(uma_keg_t keg) { KEG_LOCK(keg); /* * Expand the keg hash table. * * This is done if the number of slabs is larger than the hash size. * What I'm trying to do here is completely reduce collisions. This * may be a little aggressive. Should I allow for two collisions max? */ if (keg->uk_flags & UMA_ZONE_HASH && keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { struct uma_hash newhash; struct uma_hash oldhash; int ret; /* * This is so involved because allocating and freeing * while the keg lock is held will lead to deadlock. * I have to do everything in stages and check for * races. */ newhash = keg->uk_hash; KEG_UNLOCK(keg); ret = hash_alloc(&newhash); KEG_LOCK(keg); if (ret) { if (hash_expand(&keg->uk_hash, &newhash)) { oldhash = keg->uk_hash; keg->uk_hash = newhash; } else oldhash = newhash; KEG_UNLOCK(keg); hash_free(&oldhash); return; } } KEG_UNLOCK(keg); } static void zone_timeout(uma_zone_t zone) { zone_foreach_keg(zone, &keg_timeout); } /* * Allocate and zero fill the next sized hash table from the appropriate * backing store. * * Arguments: * hash A new hash structure with the old hash size in uh_hashsize * * Returns: * 1 on success and 0 on failure. */ static int hash_alloc(struct uma_hash *hash) { int oldsize; int alloc; oldsize = hash->uh_hashsize; /* We're just going to go to a power of two greater */ if (oldsize) { hash->uh_hashsize = oldsize * 2; alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; hash->uh_slab_hash = (struct slabhead *)malloc(alloc, M_UMAHASH, M_NOWAIT); } else { alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 0, M_WAITOK); hash->uh_hashsize = UMA_HASH_SIZE_INIT; } if (hash->uh_slab_hash) { bzero(hash->uh_slab_hash, alloc); hash->uh_hashmask = hash->uh_hashsize - 1; return (1); } return (0); } /* * Expands the hash table for HASH zones. This is done from zone_timeout * to reduce collisions. This must not be done in the regular allocation * path, otherwise, we can recurse on the vm while allocating pages. * * Arguments: * oldhash The hash you want to expand * newhash The hash structure for the new table * * Returns: * Nothing * * Discussion: */ static int hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) { uma_slab_t slab; int hval; int i; if (!newhash->uh_slab_hash) return (0); if (oldhash->uh_hashsize >= newhash->uh_hashsize) return (0); /* * I need to investigate hash algorithms for resizing without a * full rehash. */ for (i = 0; i < oldhash->uh_hashsize; i++) while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); hval = UMA_HASH(newhash, slab->us_data); SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], slab, us_hlink); } return (1); } /* * Free the hash bucket to the appropriate backing store. * * Arguments: * slab_hash The hash bucket we're freeing * hashsize The number of entries in that hash bucket * * Returns: * Nothing */ static void hash_free(struct uma_hash *hash) { if (hash->uh_slab_hash == NULL) return; if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); else free(hash->uh_slab_hash, M_UMAHASH); } /* * Frees all outstanding items in a bucket * * Arguments: * zone The zone to free to, must be unlocked. * bucket The free/alloc bucket with items, cpu queue must be locked. * * Returns: * Nothing */ static void bucket_drain(uma_zone_t zone, uma_bucket_t bucket) { int i; if (bucket == NULL) return; if (zone->uz_fini) for (i = 0; i < bucket->ub_cnt; i++) zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); bucket->ub_cnt = 0; } /* * Drains the per cpu caches for a zone. * * NOTE: This may only be called while the zone is being turn down, and not * during normal operation. This is necessary in order that we do not have * to migrate CPUs to drain the per-CPU caches. * * Arguments: * zone The zone to drain, must be unlocked. * * Returns: * Nothing */ static void cache_drain(uma_zone_t zone) { uma_cache_t cache; int cpu; /* * XXX: It is safe to not lock the per-CPU caches, because we're * tearing down the zone anyway. I.e., there will be no further use * of the caches at this point. * * XXX: It would good to be able to assert that the zone is being * torn down to prevent improper use of cache_drain(). * * XXX: We lock the zone before passing into bucket_cache_drain() as * it is used elsewhere. Should the tear-down path be made special * there in some form? */ CPU_FOREACH(cpu) { cache = &zone->uz_cpu[cpu]; bucket_drain(zone, cache->uc_allocbucket); bucket_drain(zone, cache->uc_freebucket); if (cache->uc_allocbucket != NULL) bucket_free(zone, cache->uc_allocbucket, NULL); if (cache->uc_freebucket != NULL) bucket_free(zone, cache->uc_freebucket, NULL); cache->uc_allocbucket = cache->uc_freebucket = NULL; } ZONE_LOCK(zone); bucket_cache_drain(zone); ZONE_UNLOCK(zone); } static void cache_shrink(uma_zone_t zone) { if (zone->uz_flags & UMA_ZFLAG_INTERNAL) return; ZONE_LOCK(zone); zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2; ZONE_UNLOCK(zone); } static void cache_drain_safe_cpu(uma_zone_t zone) { uma_cache_t cache; uma_bucket_t b1, b2; int domain; if (zone->uz_flags & UMA_ZFLAG_INTERNAL) return; b1 = b2 = NULL; ZONE_LOCK(zone); critical_enter(); if (zone->uz_flags & UMA_ZONE_NUMA) domain = PCPU_GET(domain); else domain = 0; cache = &zone->uz_cpu[curcpu]; if (cache->uc_allocbucket) { if (cache->uc_allocbucket->ub_cnt != 0) LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets, cache->uc_allocbucket, ub_link); else b1 = cache->uc_allocbucket; cache->uc_allocbucket = NULL; } if (cache->uc_freebucket) { if (cache->uc_freebucket->ub_cnt != 0) LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets, cache->uc_freebucket, ub_link); else b2 = cache->uc_freebucket; cache->uc_freebucket = NULL; } critical_exit(); ZONE_UNLOCK(zone); if (b1) bucket_free(zone, b1, NULL); if (b2) bucket_free(zone, b2, NULL); } /* * Safely drain per-CPU caches of a zone(s) to alloc bucket. * This is an expensive call because it needs to bind to all CPUs * one by one and enter a critical section on each of them in order * to safely access their cache buckets. * Zone lock must not be held on call this function. */ static void cache_drain_safe(uma_zone_t zone) { int cpu; /* * Polite bucket sizes shrinking was not enouth, shrink aggressively. */ if (zone) cache_shrink(zone); else zone_foreach(cache_shrink); CPU_FOREACH(cpu) { thread_lock(curthread); sched_bind(curthread, cpu); thread_unlock(curthread); if (zone) cache_drain_safe_cpu(zone); else zone_foreach(cache_drain_safe_cpu); } thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } /* * Drain the cached buckets from a zone. Expects a locked zone on entry. */ static void bucket_cache_drain(uma_zone_t zone) { uma_zone_domain_t zdom; uma_bucket_t bucket; int i; /* * Drain the bucket queues and free the buckets. */ for (i = 0; i < vm_ndomains; i++) { zdom = &zone->uz_domain[i]; while ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { LIST_REMOVE(bucket, ub_link); ZONE_UNLOCK(zone); bucket_drain(zone, bucket); bucket_free(zone, bucket, NULL); ZONE_LOCK(zone); } } /* * Shrink further bucket sizes. Price of single zone lock collision * is probably lower then price of global cache drain. */ if (zone->uz_count > zone->uz_count_min) zone->uz_count--; } static void keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) { uint8_t *mem; int i; uint8_t flags; CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes", keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera); mem = slab->us_data; flags = slab->us_flags; i = start; if (keg->uk_fini != NULL) { for (i--; i > -1; i--) keg->uk_fini(slab->us_data + (keg->uk_rsize * i), keg->uk_size); } if (keg->uk_flags & UMA_ZONE_OFFPAGE) zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); uma_total_dec(PAGE_SIZE * keg->uk_ppera); } /* * Frees pages from a keg back to the system. This is done on demand from * the pageout daemon. * * Returns nothing. */ static void keg_drain(uma_keg_t keg) { struct slabhead freeslabs = { 0 }; uma_domain_t dom; uma_slab_t slab, tmp; int i; /* * We don't want to take pages from statically allocated kegs at this * time */ if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) return; CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u", keg->uk_name, keg, keg->uk_free); KEG_LOCK(keg); if (keg->uk_free == 0) goto finished; for (i = 0; i < vm_ndomains; i++) { dom = &keg->uk_domain[i]; LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) { /* We have nowhere to free these to. */ if (slab->us_flags & UMA_SLAB_BOOT) continue; LIST_REMOVE(slab, us_link); keg->uk_pages -= keg->uk_ppera; keg->uk_free -= keg->uk_ipers; if (keg->uk_flags & UMA_ZONE_HASH) UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); } } finished: KEG_UNLOCK(keg); while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); keg_free_slab(keg, slab, keg->uk_ipers); } } static void zone_drain_wait(uma_zone_t zone, int waitok) { /* * Set draining to interlock with zone_dtor() so we can release our * locks as we go. Only dtor() should do a WAITOK call since it * is the only call that knows the structure will still be available * when it wakes up. */ ZONE_LOCK(zone); while (zone->uz_flags & UMA_ZFLAG_DRAINING) { if (waitok == M_NOWAIT) goto out; msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); } zone->uz_flags |= UMA_ZFLAG_DRAINING; bucket_cache_drain(zone); ZONE_UNLOCK(zone); /* * The DRAINING flag protects us from being freed while * we're running. Normally the uma_rwlock would protect us but we * must be able to release and acquire the right lock for each keg. */ zone_foreach_keg(zone, &keg_drain); ZONE_LOCK(zone); zone->uz_flags &= ~UMA_ZFLAG_DRAINING; wakeup(zone); out: ZONE_UNLOCK(zone); } void zone_drain(uma_zone_t zone) { zone_drain_wait(zone, M_NOWAIT); } /* * Allocate a new slab for a keg. This does not insert the slab onto a list. * * Arguments: * wait Shall we wait? * * Returns: * The slab that was allocated or NULL if there is no memory and the * caller specified M_NOWAIT. */ static uma_slab_t keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait) { uma_alloc allocf; uma_slab_t slab; unsigned long size; uint8_t *mem; uint8_t flags; int i; KASSERT(domain >= 0 && domain < vm_ndomains, ("keg_alloc_slab: domain %d out of range", domain)); mtx_assert(&keg->uk_lock, MA_OWNED); slab = NULL; mem = NULL; allocf = keg->uk_allocf; KEG_UNLOCK(keg); size = keg->uk_ppera * PAGE_SIZE; if (keg->uk_flags & UMA_ZONE_OFFPAGE) { slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait); if (slab == NULL) goto out; } /* * This reproduces the old vm_zone behavior of zero filling pages the * first time they are added to a zone. * * Malloced items are zeroed in uma_zalloc. */ if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) wait |= M_ZERO; else wait &= ~M_ZERO; if (keg->uk_flags & UMA_ZONE_NODUMP) wait |= M_NODUMP; /* zone is passed for legacy reasons. */ mem = allocf(zone, size, domain, &flags, wait); if (mem == NULL) { if (keg->uk_flags & UMA_ZONE_OFFPAGE) zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); slab = NULL; goto out; } uma_total_inc(size); /* Point the slab into the allocated memory */ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) slab = (uma_slab_t )(mem + keg->uk_pgoff); if (keg->uk_flags & UMA_ZONE_VTOSLAB) for (i = 0; i < keg->uk_ppera; i++) vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); slab->us_keg = keg; slab->us_data = mem; slab->us_freecount = keg->uk_ipers; slab->us_flags = flags; slab->us_domain = domain; BIT_FILL(SLAB_SETSIZE, &slab->us_free); #ifdef INVARIANTS BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); #endif if (keg->uk_init != NULL) { for (i = 0; i < keg->uk_ipers; i++) if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), keg->uk_size, wait) != 0) break; if (i != keg->uk_ipers) { keg_free_slab(keg, slab, i); slab = NULL; goto out; } } out: KEG_LOCK(keg); CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)", slab, keg->uk_name, keg); if (slab != NULL) { if (keg->uk_flags & UMA_ZONE_HASH) UMA_HASH_INSERT(&keg->uk_hash, slab, mem); keg->uk_pages += keg->uk_ppera; keg->uk_free += keg->uk_ipers; } return (slab); } /* * This function is intended to be used early on in place of page_alloc() so * that we may use the boot time page cache to satisfy allocations before * the VM is ready. */ static void * startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, int wait) { uma_keg_t keg; void *mem; int pages; keg = zone_first_keg(zone); pages = howmany(bytes, PAGE_SIZE); KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n")); /* * Check our small startup cache to see if it has pages remaining. */ mtx_lock(&uma_boot_pages_mtx); if (pages <= boot_pages) { mem = bootmem; boot_pages -= pages; bootmem += pages * PAGE_SIZE; mtx_unlock(&uma_boot_pages_mtx); *pflag = UMA_SLAB_BOOT; return (mem); } mtx_unlock(&uma_boot_pages_mtx); if (booted < UMA_STARTUP2) panic("UMA: Increase vm.boot_pages"); /* * Now that we've booted reset these users to their real allocator. */ #ifdef UMA_MD_SMALL_ALLOC keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc; #else keg->uk_allocf = page_alloc; #endif return keg->uk_allocf(zone, bytes, domain, pflag, wait); } /* * Allocates a number of pages from the system * * Arguments: * bytes The number of bytes requested * wait Shall we wait? * * Returns: * A pointer to the alloced memory or possibly * NULL if M_NOWAIT is set. */ static void * page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, int wait) { void *p; /* Returned page */ *pflag = UMA_SLAB_KERNEL; p = (void *) kmem_malloc_domain(domain, bytes, wait); return (p); } /* * Allocates a number of pages from within an object * * Arguments: * bytes The number of bytes requested * wait Shall we wait? * * Returns: * A pointer to the alloced memory or possibly * NULL if M_NOWAIT is set. */ static void * noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, int wait) { TAILQ_HEAD(, vm_page) alloctail; u_long npages; vm_offset_t retkva, zkva; vm_page_t p, p_next; uma_keg_t keg; TAILQ_INIT(&alloctail); keg = zone_first_keg(zone); npages = howmany(bytes, PAGE_SIZE); while (npages > 0) { p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : VM_ALLOC_NOWAIT)); if (p != NULL) { /* * Since the page does not belong to an object, its * listq is unused. */ TAILQ_INSERT_TAIL(&alloctail, p, listq); npages--; continue; } /* * Page allocation failed, free intermediate pages and * exit. */ TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { vm_page_unwire(p, PQ_NONE); vm_page_free(p); } return (NULL); } *flags = UMA_SLAB_PRIV; zkva = keg->uk_kva + atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); retkva = zkva; TAILQ_FOREACH(p, &alloctail, listq) { pmap_qenter(zkva, &p, 1); zkva += PAGE_SIZE; } return ((void *)retkva); } /* * Frees a number of pages to the system * * Arguments: * mem A pointer to the memory to be freed * size The size of the memory being freed * flags The original p->us_flags field * * Returns: * Nothing */ static void page_free(void *mem, vm_size_t size, uint8_t flags) { struct vmem *vmem; if (flags & UMA_SLAB_KERNEL) vmem = kernel_arena; else panic("UMA: page_free used with invalid flags %x", flags); kmem_free(vmem, (vm_offset_t)mem, size); } /* * Zero fill initializer * * Arguments/Returns follow uma_init specifications */ static int zero_init(void *mem, int size, int flags) { bzero(mem, size); return (0); } /* * Finish creating a small uma keg. This calculates ipers, and the keg size. * * Arguments * keg The zone we should initialize * * Returns * Nothing */ static void keg_small_init(uma_keg_t keg) { u_int rsize; u_int memused; u_int wastedspace; u_int shsize; u_int slabsize; if (keg->uk_flags & UMA_ZONE_PCPU) { u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU; slabsize = sizeof(struct pcpu); keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu), PAGE_SIZE); } else { slabsize = UMA_SLAB_SIZE; keg->uk_ppera = 1; } /* * Calculate the size of each allocation (rsize) according to * alignment. If the requested size is smaller than we have * allocation bits for we round it up. */ rsize = keg->uk_size; if (rsize < slabsize / SLAB_SETSIZE) rsize = slabsize / SLAB_SETSIZE; if (rsize & keg->uk_align) rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); keg->uk_rsize = rsize; KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || keg->uk_rsize < sizeof(struct pcpu), ("%s: size %u too large", __func__, keg->uk_rsize)); if (keg->uk_flags & UMA_ZONE_OFFPAGE) shsize = 0; else shsize = sizeof(struct uma_slab); keg->uk_ipers = (slabsize - shsize) / rsize; KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); memused = keg->uk_ipers * rsize + shsize; wastedspace = slabsize - memused; /* * We can't do OFFPAGE if we're internal or if we've been * asked to not go to the VM for buckets. If we do this we * may end up going to the VM for slabs which we do not * want to do if we're UMA_ZFLAG_CACHEONLY as a result * of UMA_ZONE_VM, which clearly forbids it. */ if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) return; /* * See if using an OFFPAGE slab will limit our waste. Only do * this if it permits more items per-slab. * * XXX We could try growing slabsize to limit max waste as well. * Historically this was not done because the VM could not * efficiently handle contiguous allocations. */ if ((wastedspace >= slabsize / UMA_MAX_WASTE) && (keg->uk_ipers < (slabsize / keg->uk_rsize))) { keg->uk_ipers = slabsize / keg->uk_rsize; KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); CTR6(KTR_UMA, "UMA decided we need offpage slab headers for " "keg: %s(%p), calculated wastedspace = %d, " "maximum wasted space allowed = %d, " "calculated ipers = %d, " "new wasted space = %d\n", keg->uk_name, keg, wastedspace, slabsize / UMA_MAX_WASTE, keg->uk_ipers, slabsize - keg->uk_ipers * keg->uk_rsize); keg->uk_flags |= UMA_ZONE_OFFPAGE; } if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) keg->uk_flags |= UMA_ZONE_HASH; } /* * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be * more complicated. * * Arguments * keg The keg we should initialize * * Returns * Nothing */ static void keg_large_init(uma_keg_t keg) { u_int shsize; KASSERT(keg != NULL, ("Keg is null in keg_large_init")); KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); keg->uk_ipers = 1; keg->uk_rsize = keg->uk_size; /* Check whether we have enough space to not do OFFPAGE. */ if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { shsize = sizeof(struct uma_slab); if (shsize & UMA_ALIGN_PTR) shsize = (shsize & ~UMA_ALIGN_PTR) + (UMA_ALIGN_PTR + 1); if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) { /* * We can't do OFFPAGE if we're internal, in which case * we need an extra page per allocation to contain the * slab header. */ if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0) keg->uk_flags |= UMA_ZONE_OFFPAGE; else keg->uk_ppera++; } } if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) keg->uk_flags |= UMA_ZONE_HASH; } static void keg_cachespread_init(uma_keg_t keg) { int alignsize; int trailer; int pages; int rsize; KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); alignsize = keg->uk_align + 1; rsize = keg->uk_size; /* * We want one item to start on every align boundary in a page. To * do this we will span pages. We will also extend the item by the * size of align if it is an even multiple of align. Otherwise, it * would fall on the same boundary every time. */ if (rsize & keg->uk_align) rsize = (rsize & ~keg->uk_align) + alignsize; if ((rsize & alignsize) == 0) rsize += alignsize; trailer = rsize - keg->uk_size; pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; pages = MIN(pages, (128 * 1024) / PAGE_SIZE); keg->uk_rsize = rsize; keg->uk_ppera = pages; keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; KASSERT(keg->uk_ipers <= SLAB_SETSIZE, ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, keg->uk_ipers)); } /* * Keg header ctor. This initializes all fields, locks, etc. And inserts * the keg onto the global keg list. * * Arguments/Returns follow uma_ctor specifications * udata Actually uma_kctor_args */ static int keg_ctor(void *mem, int size, void *udata, int flags) { struct uma_kctor_args *arg = udata; uma_keg_t keg = mem; uma_zone_t zone; bzero(keg, size); keg->uk_size = arg->size; keg->uk_init = arg->uminit; keg->uk_fini = arg->fini; keg->uk_align = arg->align; keg->uk_cursor = 0; keg->uk_free = 0; keg->uk_reserve = 0; keg->uk_pages = 0; keg->uk_flags = arg->flags; keg->uk_slabzone = NULL; /* * The master zone is passed to us at keg-creation time. */ zone = arg->zone; keg->uk_name = zone->uz_name; if (arg->flags & UMA_ZONE_VM) keg->uk_flags |= UMA_ZFLAG_CACHEONLY; if (arg->flags & UMA_ZONE_ZINIT) keg->uk_init = zero_init; if (arg->flags & UMA_ZONE_MALLOC) keg->uk_flags |= UMA_ZONE_VTOSLAB; if (arg->flags & UMA_ZONE_PCPU) #ifdef SMP keg->uk_flags |= UMA_ZONE_OFFPAGE; #else keg->uk_flags &= ~UMA_ZONE_PCPU; #endif if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { keg_cachespread_init(keg); } else { if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab))) keg_large_init(keg); else keg_small_init(keg); } if (keg->uk_flags & UMA_ZONE_OFFPAGE) keg->uk_slabzone = slabzone; /* * If we haven't booted yet we need allocations to go through the * startup cache until the vm is ready. */ if (booted < UMA_STARTUP2) keg->uk_allocf = startup_alloc; #ifdef UMA_MD_SMALL_ALLOC else if (keg->uk_ppera == 1) keg->uk_allocf = uma_small_alloc; #endif else keg->uk_allocf = page_alloc; #ifdef UMA_MD_SMALL_ALLOC if (keg->uk_ppera == 1) keg->uk_freef = uma_small_free; else #endif keg->uk_freef = page_free; /* * Initialize keg's lock */ KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); /* * If we're putting the slab header in the actual page we need to * figure out where in each page it goes. This calculates a right * justified offset into the memory on an ALIGN_PTR boundary. */ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { u_int totsize; /* Size of the slab struct and free list */ totsize = sizeof(struct uma_slab); if (totsize & UMA_ALIGN_PTR) totsize = (totsize & ~UMA_ALIGN_PTR) + (UMA_ALIGN_PTR + 1); keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; /* * The only way the following is possible is if with our * UMA_ALIGN_PTR adjustments we are now bigger than * UMA_SLAB_SIZE. I haven't checked whether this is * mathematically possible for all cases, so we make * sure here anyway. */ totsize = keg->uk_pgoff + sizeof(struct uma_slab); if (totsize > PAGE_SIZE * keg->uk_ppera) { printf("zone %s ipers %d rsize %d size %d\n", zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size); panic("UMA slab won't fit."); } } if (keg->uk_flags & UMA_ZONE_HASH) hash_alloc(&keg->uk_hash); CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n", keg, zone->uz_name, zone, (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, keg->uk_free); LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); rw_wlock(&uma_rwlock); LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); rw_wunlock(&uma_rwlock); return (0); } /* * Zone header ctor. This initializes all fields, locks, etc. * * Arguments/Returns follow uma_ctor specifications * udata Actually uma_zctor_args */ static int zone_ctor(void *mem, int size, void *udata, int flags) { struct uma_zctor_args *arg = udata; uma_zone_t zone = mem; uma_zone_t z; uma_keg_t keg; bzero(zone, size); zone->uz_name = arg->name; zone->uz_ctor = arg->ctor; zone->uz_dtor = arg->dtor; zone->uz_slab = zone_fetch_slab; zone->uz_init = NULL; zone->uz_fini = NULL; zone->uz_allocs = 0; zone->uz_frees = 0; zone->uz_fails = 0; zone->uz_sleeps = 0; zone->uz_count = 0; zone->uz_count_min = 0; zone->uz_flags = 0; zone->uz_warning = NULL; /* The domain structures follow the cpu structures. */ zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus]; timevalclear(&zone->uz_ratecheck); keg = arg->keg; ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); /* * This is a pure cache zone, no kegs. */ if (arg->import) { if (arg->flags & UMA_ZONE_VM) arg->flags |= UMA_ZFLAG_CACHEONLY; zone->uz_flags = arg->flags; zone->uz_size = arg->size; zone->uz_import = arg->import; zone->uz_release = arg->release; zone->uz_arg = arg->arg; zone->uz_lockptr = &zone->uz_lock; rw_wlock(&uma_rwlock); LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); rw_wunlock(&uma_rwlock); goto out; } /* * Use the regular zone/keg/slab allocator. */ zone->uz_import = (uma_import)zone_import; zone->uz_release = (uma_release)zone_release; zone->uz_arg = zone; if (arg->flags & UMA_ZONE_SECONDARY) { KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); zone->uz_init = arg->uminit; zone->uz_fini = arg->fini; zone->uz_lockptr = &keg->uk_lock; zone->uz_flags |= UMA_ZONE_SECONDARY; rw_wlock(&uma_rwlock); ZONE_LOCK(zone); LIST_FOREACH(z, &keg->uk_zones, uz_link) { if (LIST_NEXT(z, uz_link) == NULL) { LIST_INSERT_AFTER(z, zone, uz_link); break; } } ZONE_UNLOCK(zone); rw_wunlock(&uma_rwlock); } else if (keg == NULL) { if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, arg->align, arg->flags)) == NULL) return (ENOMEM); } else { struct uma_kctor_args karg; int error; /* We should only be here from uma_startup() */ karg.size = arg->size; karg.uminit = arg->uminit; karg.fini = arg->fini; karg.align = arg->align; karg.flags = arg->flags; karg.zone = zone; error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, flags); if (error) return (error); } /* * Link in the first keg. */ zone->uz_klink.kl_keg = keg; LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); zone->uz_lockptr = &keg->uk_lock; zone->uz_size = keg->uk_size; zone->uz_flags |= (keg->uk_flags & (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); /* * Some internal zones don't have room allocated for the per cpu * caches. If we're internal, bail out here. */ if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, ("Secondary zone requested UMA_ZFLAG_INTERNAL")); return (0); } out: if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0) zone->uz_count = bucket_select(zone->uz_size); else zone->uz_count = BUCKET_MAX; zone->uz_count_min = zone->uz_count; return (0); } /* * Keg header dtor. This frees all data, destroys locks, frees the hash * table and removes the keg from the global list. * * Arguments/Returns follow uma_dtor specifications * udata unused */ static void keg_dtor(void *arg, int size, void *udata) { uma_keg_t keg; keg = (uma_keg_t)arg; KEG_LOCK(keg); if (keg->uk_free != 0) { printf("Freed UMA keg (%s) was not empty (%d items). " " Lost %d pages of memory.\n", keg->uk_name ? keg->uk_name : "", keg->uk_free, keg->uk_pages); } KEG_UNLOCK(keg); hash_free(&keg->uk_hash); KEG_LOCK_FINI(keg); } /* * Zone header dtor. * * Arguments/Returns follow uma_dtor specifications * udata unused */ static void zone_dtor(void *arg, int size, void *udata) { uma_klink_t klink; uma_zone_t zone; uma_keg_t keg; zone = (uma_zone_t)arg; keg = zone_first_keg(zone); if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) cache_drain(zone); rw_wlock(&uma_rwlock); LIST_REMOVE(zone, uz_link); rw_wunlock(&uma_rwlock); /* * XXX there are some races here where * the zone can be drained but zone lock * released and then refilled before we * remove it... we dont care for now */ zone_drain_wait(zone, M_WAITOK); /* * Unlink all of our kegs. */ while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { klink->kl_keg = NULL; LIST_REMOVE(klink, kl_link); if (klink == &zone->uz_klink) continue; free(klink, M_TEMP); } /* * We only destroy kegs from non secondary zones. */ if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { rw_wlock(&uma_rwlock); LIST_REMOVE(keg, uk_link); rw_wunlock(&uma_rwlock); zone_free_item(kegs, keg, NULL, SKIP_NONE); } ZONE_LOCK_FINI(zone); } /* * Traverses every zone in the system and calls a callback * * Arguments: * zfunc A pointer to a function which accepts a zone * as an argument. * * Returns: * Nothing */ static void zone_foreach(void (*zfunc)(uma_zone_t)) { uma_keg_t keg; uma_zone_t zone; rw_rlock(&uma_rwlock); LIST_FOREACH(keg, &uma_kegs, uk_link) { LIST_FOREACH(zone, &keg->uk_zones, uz_link) zfunc(zone); } rw_runlock(&uma_rwlock); } /* Public functions */ /* See uma.h */ void uma_startup(void *mem, int npages) { struct uma_zctor_args args; uma_keg_t masterkeg; uintptr_t m; int zsize; int ksize; rw_init(&uma_rwlock, "UMA lock"); ksize = sizeof(struct uma_keg) + (sizeof(struct uma_domain) * vm_ndomains); zsize = sizeof(struct uma_zone) + (sizeof(struct uma_cache) * mp_ncpus) + (sizeof(struct uma_zone_domain) * vm_ndomains); /* Use bootpages memory for the zone of zones and zone of kegs. */ m = (uintptr_t)mem; zones = (uma_zone_t)m; m += roundup(zsize, CACHE_LINE_SIZE); kegs = (uma_zone_t)m; m += roundup(zsize, CACHE_LINE_SIZE); masterkeg = (uma_keg_t)m; m += roundup(ksize, CACHE_LINE_SIZE); m = roundup(m, PAGE_SIZE); npages -= (m - (uintptr_t)mem) / PAGE_SIZE; mem = (void *)m; /* "manually" create the initial zone */ memset(&args, 0, sizeof(args)); args.name = "UMA Kegs"; args.size = ksize; args.ctor = keg_ctor; args.dtor = keg_dtor; args.uminit = zero_init; args.fini = NULL; args.keg = masterkeg; args.align = 32 - 1; args.flags = UMA_ZFLAG_INTERNAL; zone_ctor(kegs, zsize, &args, M_WAITOK); mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF); bootmem = mem; boot_pages = npages; args.name = "UMA Zones"; args.size = sizeof(struct uma_zone) + (sizeof(struct uma_cache) * (mp_maxid + 1)) + (sizeof(struct uma_zone_domain) * vm_ndomains); args.ctor = zone_ctor; args.dtor = zone_dtor; args.uminit = zero_init; args.fini = NULL; args.keg = NULL; args.align = 32 - 1; args.flags = UMA_ZFLAG_INTERNAL; zone_ctor(zones, zsize, &args, M_WAITOK); /* Now make a zone for slab headers */ slabzone = uma_zcreate("UMA Slabs", sizeof(struct uma_slab), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); hashzone = uma_zcreate("UMA Hash", sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); bucket_init(); booted = UMA_STARTUP; } /* see uma.h */ void uma_startup2(void) { booted = UMA_STARTUP2; bucket_enable(); sx_init(&uma_drain_lock, "umadrain"); } /* * Initialize our callout handle * */ static void uma_startup3(void) { callout_init(&uma_callout, 1); callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); } static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, int align, uint32_t flags) { struct uma_kctor_args args; args.size = size; args.uminit = uminit; args.fini = fini; args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; args.flags = flags; args.zone = zone; return (zone_alloc_item(kegs, &args, 0, M_WAITOK)); } /* See uma.h */ void uma_set_align(int align) { if (align != UMA_ALIGN_CACHE) uma_align_cache = align; } /* See uma.h */ uma_zone_t uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, uma_init uminit, uma_fini fini, int align, uint32_t flags) { struct uma_zctor_args args; uma_zone_t res; bool locked; KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"", align, name)); /* This stuff is essential for the zone ctor */ memset(&args, 0, sizeof(args)); args.name = name; args.size = size; args.ctor = ctor; args.dtor = dtor; args.uminit = uminit; args.fini = fini; #ifdef INVARIANTS /* * If a zone is being created with an empty constructor and * destructor, pass UMA constructor/destructor which checks for * memory use after free. */ if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) && ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) { args.ctor = trash_ctor; args.dtor = trash_dtor; args.uminit = trash_init; args.fini = trash_fini; } #endif args.align = align; args.flags = flags; args.keg = NULL; if (booted < UMA_STARTUP2) { locked = false; } else { sx_slock(&uma_drain_lock); locked = true; } res = zone_alloc_item(zones, &args, 0, M_WAITOK); if (locked) sx_sunlock(&uma_drain_lock); return (res); } /* See uma.h */ uma_zone_t uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, uma_init zinit, uma_fini zfini, uma_zone_t master) { struct uma_zctor_args args; uma_keg_t keg; uma_zone_t res; bool locked; keg = zone_first_keg(master); memset(&args, 0, sizeof(args)); args.name = name; args.size = keg->uk_size; args.ctor = ctor; args.dtor = dtor; args.uminit = zinit; args.fini = zfini; args.align = keg->uk_align; args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; args.keg = keg; if (booted < UMA_STARTUP2) { locked = false; } else { sx_slock(&uma_drain_lock); locked = true; } /* XXX Attaches only one keg of potentially many. */ res = zone_alloc_item(zones, &args, 0, M_WAITOK); if (locked) sx_sunlock(&uma_drain_lock); return (res); } /* See uma.h */ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, uma_init zinit, uma_fini zfini, uma_import zimport, uma_release zrelease, void *arg, int flags) { struct uma_zctor_args args; memset(&args, 0, sizeof(args)); args.name = name; args.size = size; args.ctor = ctor; args.dtor = dtor; args.uminit = zinit; args.fini = zfini; args.import = zimport; args.release = zrelease; args.arg = arg; args.align = 0; args.flags = flags; return (zone_alloc_item(zones, &args, 0, M_WAITOK)); } static void zone_lock_pair(uma_zone_t a, uma_zone_t b) { if (a < b) { ZONE_LOCK(a); mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); } else { ZONE_LOCK(b); mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); } } static void zone_unlock_pair(uma_zone_t a, uma_zone_t b) { ZONE_UNLOCK(a); ZONE_UNLOCK(b); } int uma_zsecond_add(uma_zone_t zone, uma_zone_t master) { uma_klink_t klink; uma_klink_t kl; int error; error = 0; klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); zone_lock_pair(zone, master); /* * zone must use vtoslab() to resolve objects and must already be * a secondary. */ if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { error = EINVAL; goto out; } /* * The new master must also use vtoslab(). */ if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { error = EINVAL; goto out; } /* * The underlying object must be the same size. rsize * may be different. */ if (master->uz_size != zone->uz_size) { error = E2BIG; goto out; } /* * Put it at the end of the list. */ klink->kl_keg = zone_first_keg(master); LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { if (LIST_NEXT(kl, kl_link) == NULL) { LIST_INSERT_AFTER(kl, klink, kl_link); break; } } klink = NULL; zone->uz_flags |= UMA_ZFLAG_MULTI; zone->uz_slab = zone_fetch_slab_multi; out: zone_unlock_pair(zone, master); if (klink != NULL) free(klink, M_TEMP); return (error); } /* See uma.h */ void uma_zdestroy(uma_zone_t zone) { sx_slock(&uma_drain_lock); zone_free_item(zones, zone, NULL, SKIP_NONE); sx_sunlock(&uma_drain_lock); } void uma_zwait(uma_zone_t zone) { void *item; item = uma_zalloc_arg(zone, NULL, M_WAITOK); uma_zfree(zone, item); } /* See uma.h */ void * uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) { uma_zone_domain_t zdom; uma_bucket_t bucket; uma_cache_t cache; void *item; int cpu, domain, lockfail; /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); /* This is the fast path allocation */ CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d", curthread, zone->uz_name, zone, flags); if (flags & M_WAITOK) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "uma_zalloc_arg: zone \"%s\"", zone->uz_name); } KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zalloc_arg: called with spinlock or critical section held")); #ifdef DEBUG_MEMGUARD if (memguard_cmp_zone(zone)) { item = memguard_alloc(zone->uz_size, flags); if (item != NULL) { if (zone->uz_init != NULL && zone->uz_init(item, zone->uz_size, flags) != 0) return (NULL); if (zone->uz_ctor != NULL && zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { zone->uz_fini(item, zone->uz_size); return (NULL); } return (item); } /* This is unfortunate but should not be fatal. */ } #endif /* * If possible, allocate from the per-CPU cache. There are two * requirements for safe access to the per-CPU cache: (1) the thread * accessing the cache must not be preempted or yield during access, * and (2) the thread must not migrate CPUs without switching which * cache it accesses. We rely on a critical section to prevent * preemption and migration. We release the critical section in * order to acquire the zone mutex if we are unable to allocate from * the current cache; when we re-acquire the critical section, we * must detect and handle migration if it has occurred. */ critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; zalloc_start: bucket = cache->uc_allocbucket; if (bucket != NULL && bucket->ub_cnt > 0) { bucket->ub_cnt--; item = bucket->ub_bucket[bucket->ub_cnt]; #ifdef INVARIANTS bucket->ub_bucket[bucket->ub_cnt] = NULL; #endif KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); cache->uc_allocs++; critical_exit(); if (zone->uz_ctor != NULL && zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { atomic_add_long(&zone->uz_fails, 1); zone_free_item(zone, item, udata, SKIP_DTOR); return (NULL); } #ifdef INVARIANTS uma_dbg_alloc(zone, NULL, item); #endif if (flags & M_ZERO) uma_zero_item(item, zone); return (item); } /* * We have run out of items in our alloc bucket. * See if we can switch with our free bucket. */ bucket = cache->uc_freebucket; if (bucket != NULL && bucket->ub_cnt > 0) { CTR2(KTR_UMA, "uma_zalloc: zone %s(%p) swapping empty with alloc", zone->uz_name, zone); cache->uc_freebucket = cache->uc_allocbucket; cache->uc_allocbucket = bucket; goto zalloc_start; } /* * Discard any empty allocation bucket while we hold no locks. */ bucket = cache->uc_allocbucket; cache->uc_allocbucket = NULL; critical_exit(); if (bucket != NULL) bucket_free(zone, bucket, udata); if (zone->uz_flags & UMA_ZONE_NUMA) domain = PCPU_GET(domain); else domain = 0; /* Short-circuit for zones without buckets and low memory. */ if (zone->uz_count == 0 || bucketdisable) goto zalloc_item; /* * Attempt to retrieve the item from the per-CPU cache has failed, so * we must go back to the zone. This requires the zone lock, so we * must drop the critical section, then re-acquire it when we go back * to the cache. Since the critical section is released, we may be * preempted or migrate. As such, make sure not to maintain any * thread-local state specific to the cache from prior to releasing * the critical section. */ lockfail = 0; if (ZONE_TRYLOCK(zone) == 0) { /* Record contention to size the buckets. */ ZONE_LOCK(zone); lockfail = 1; } critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; /* * Since we have locked the zone we may as well send back our stats. */ atomic_add_long(&zone->uz_allocs, cache->uc_allocs); atomic_add_long(&zone->uz_frees, cache->uc_frees); cache->uc_allocs = 0; cache->uc_frees = 0; /* See if we lost the race to fill the cache. */ if (cache->uc_allocbucket != NULL) { ZONE_UNLOCK(zone); goto zalloc_start; } /* * Check the zone's cache of buckets. */ zdom = &zone->uz_domain[domain]; if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { KASSERT(bucket->ub_cnt != 0, ("uma_zalloc_arg: Returning an empty bucket.")); LIST_REMOVE(bucket, ub_link); cache->uc_allocbucket = bucket; ZONE_UNLOCK(zone); goto zalloc_start; } /* We are no longer associated with this CPU. */ critical_exit(); /* * We bump the uz count when the cache size is insufficient to * handle the working set. */ if (lockfail && zone->uz_count < BUCKET_MAX) zone->uz_count++; ZONE_UNLOCK(zone); /* * Now lets just fill a bucket and put it on the free list. If that * works we'll restart the allocation from the beginning and it * will use the just filled bucket. */ bucket = zone_alloc_bucket(zone, udata, domain, flags); CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", zone->uz_name, zone, bucket); if (bucket != NULL) { ZONE_LOCK(zone); critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; /* * See if we lost the race or were migrated. Cache the * initialized bucket to make this less likely or claim * the memory directly. */ if (cache->uc_allocbucket != NULL || (zone->uz_flags & UMA_ZONE_NUMA && domain != PCPU_GET(domain))) LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); else cache->uc_allocbucket = bucket; ZONE_UNLOCK(zone); goto zalloc_start; } /* * We may not be able to get a bucket so return an actual item. */ zalloc_item: item = zone_alloc_item(zone, udata, domain, flags); return (item); } /* * Find a slab with some space. Prefer slabs that are partially used over those * that are totally full. This helps to reduce fragmentation. * * If 'rr' is 1, search all domains starting from 'domain'. Otherwise check * only 'domain'. */ static uma_slab_t keg_first_slab(uma_keg_t keg, int domain, int rr) { uma_domain_t dom; uma_slab_t slab; int start; KASSERT(domain >= 0 && domain < vm_ndomains, ("keg_first_slab: domain %d out of range", domain)); slab = NULL; start = domain; do { dom = &keg->uk_domain[domain]; if (!LIST_EMPTY(&dom->ud_part_slab)) return (LIST_FIRST(&dom->ud_part_slab)); if (!LIST_EMPTY(&dom->ud_free_slab)) { slab = LIST_FIRST(&dom->ud_free_slab); LIST_REMOVE(slab, us_link); LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); return (slab); } if (rr) domain = (domain + 1) % vm_ndomains; } while (domain != start); return (NULL); } static uma_slab_t keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, int flags) { uma_domain_t dom; uma_slab_t slab; int allocflags, domain, reserve, rr, start; mtx_assert(&keg->uk_lock, MA_OWNED); slab = NULL; reserve = 0; allocflags = flags; if ((flags & M_USE_RESERVE) == 0) reserve = keg->uk_reserve; /* * Round-robin for non first-touch zones when there is more than one * domain. */ rr = (zone->uz_flags & UMA_ZONE_NUMA) == 0 && vm_ndomains != 1; if (rr) { keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains; domain = start = keg->uk_cursor; /* Only block on the second pass. */ if ((flags & (M_WAITOK | M_NOVM)) == M_WAITOK) allocflags = (allocflags & ~M_WAITOK) | M_NOWAIT; } else domain = start = rdomain; again: do { if (keg->uk_free > reserve && (slab = keg_first_slab(keg, domain, rr)) != NULL) { MPASS(slab->us_keg == keg); return (slab); } /* * M_NOVM means don't ask at all! */ if (flags & M_NOVM) break; if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { keg->uk_flags |= UMA_ZFLAG_FULL; /* * If this is not a multi-zone, set the FULL bit. * Otherwise slab_multi() takes care of it. */ if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { zone->uz_flags |= UMA_ZFLAG_FULL; zone_log_warning(zone); zone_maxaction(zone); } if (flags & M_NOWAIT) return (NULL); zone->uz_sleeps++; msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); continue; } slab = keg_alloc_slab(keg, zone, domain, allocflags); /* * If we got a slab here it's safe to mark it partially used * and return. We assume that the caller is going to remove * at least one item. */ if (slab) { MPASS(slab->us_keg == keg); dom = &keg->uk_domain[slab->us_domain]; LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); return (slab); } if (rr) { keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains; domain = keg->uk_cursor; } } while (domain != start); /* Retry domain scan with blocking. */ if (allocflags != flags) { allocflags = flags; goto again; } /* * We might not have been able to get a slab but another cpu * could have while we were unlocked. Check again before we * fail. */ if (keg->uk_free > reserve && (slab = keg_first_slab(keg, domain, rr)) != NULL) { MPASS(slab->us_keg == keg); return (slab); } return (NULL); } static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags) { uma_slab_t slab; if (keg == NULL) { keg = zone_first_keg(zone); KEG_LOCK(keg); } for (;;) { slab = keg_fetch_slab(keg, zone, domain, flags); if (slab) return (slab); if (flags & (M_NOWAIT | M_NOVM)) break; } KEG_UNLOCK(keg); return (NULL); } /* * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns * with the keg locked. On NULL no lock is held. * * The last pointer is used to seed the search. It is not required. */ static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags) { uma_klink_t klink; uma_slab_t slab; uma_keg_t keg; int flags; int empty; int full; /* * Don't wait on the first pass. This will skip limit tests * as well. We don't want to block if we can find a provider * without blocking. */ flags = (rflags & ~M_WAITOK) | M_NOWAIT; /* * Use the last slab allocated as a hint for where to start * the search. */ if (last != NULL) { slab = keg_fetch_slab(last, zone, domain, flags); if (slab) return (slab); KEG_UNLOCK(last); } /* * Loop until we have a slab incase of transient failures * while M_WAITOK is specified. I'm not sure this is 100% * required but we've done it for so long now. */ for (;;) { empty = 0; full = 0; /* * Search the available kegs for slabs. Be careful to hold the * correct lock while calling into the keg layer. */ LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { keg = klink->kl_keg; KEG_LOCK(keg); if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { slab = keg_fetch_slab(keg, zone, domain, flags); if (slab) return (slab); } if (keg->uk_flags & UMA_ZFLAG_FULL) full++; else empty++; KEG_UNLOCK(keg); } if (rflags & (M_NOWAIT | M_NOVM)) break; flags = rflags; /* * All kegs are full. XXX We can't atomically check all kegs * and sleep so just sleep for a short period and retry. */ if (full && !empty) { ZONE_LOCK(zone); zone->uz_flags |= UMA_ZFLAG_FULL; zone->uz_sleeps++; zone_log_warning(zone); zone_maxaction(zone); msleep(zone, zone->uz_lockptr, PVM, "zonelimit", hz/100); zone->uz_flags &= ~UMA_ZFLAG_FULL; ZONE_UNLOCK(zone); continue; } } return (NULL); } static void * slab_alloc_item(uma_keg_t keg, uma_slab_t slab) { uma_domain_t dom; void *item; uint8_t freei; MPASS(keg == slab->us_keg); mtx_assert(&keg->uk_lock, MA_OWNED); freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); item = slab->us_data + (keg->uk_rsize * freei); slab->us_freecount--; keg->uk_free--; /* Move this slab to the full list */ if (slab->us_freecount == 0) { LIST_REMOVE(slab, us_link); dom = &keg->uk_domain[slab->us_domain]; LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link); } return (item); } static int zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags) { uma_slab_t slab; uma_keg_t keg; int stripe; int i; slab = NULL; keg = NULL; /* Try to keep the buckets totally full */ for (i = 0; i < max; ) { if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL) break; keg = slab->us_keg; stripe = howmany(max, vm_ndomains); while (slab->us_freecount && i < max) { bucket[i++] = slab_alloc_item(keg, slab); if (keg->uk_free <= keg->uk_reserve) break; #if MAXMEMDOM > 1 /* * If the zone is striped we pick a new slab for every * N allocations. Eliminating this conditional will * instead pick a new domain for each bucket rather * than stripe within each bucket. The current option * produces more fragmentation and requires more cpu * time but yields better distribution. */ if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 && vm_ndomains > 1 && --stripe == 0) break; #endif } /* Don't block if we allocated any successfully. */ flags &= ~M_WAITOK; flags |= M_NOWAIT; } if (slab != NULL) KEG_UNLOCK(keg); return i; } static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags) { uma_bucket_t bucket; int max; /* Don't wait for buckets, preserve caller's NOVM setting. */ bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); if (bucket == NULL) return (NULL); max = MIN(bucket->ub_entries, zone->uz_count); bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, max, domain, flags); /* * Initialize the memory if necessary. */ if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { int i; for (i = 0; i < bucket->ub_cnt; i++) if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, flags) != 0) break; /* * If we couldn't initialize the whole bucket, put the * rest back onto the freelist. */ if (i != bucket->ub_cnt) { zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], bucket->ub_cnt - i); #ifdef INVARIANTS bzero(&bucket->ub_bucket[i], sizeof(void *) * (bucket->ub_cnt - i)); #endif bucket->ub_cnt = i; } } if (bucket->ub_cnt == 0) { bucket_free(zone, bucket, udata); atomic_add_long(&zone->uz_fails, 1); return (NULL); } return (bucket); } /* * Allocates a single item from a zone. * * Arguments * zone The zone to alloc for. * udata The data to be passed to the constructor. * flags M_WAITOK, M_NOWAIT, M_ZERO. * * Returns * NULL if there is no memory and M_NOWAIT is set * An item if successful */ static void * zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) { void *item; item = NULL; if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) goto fail; atomic_add_long(&zone->uz_allocs, 1); /* * We have to call both the zone's init (not the keg's init) * and the zone's ctor. This is because the item is going from * a keg slab directly to the user, and the user is expecting it * to be both zone-init'd as well as zone-ctor'd. */ if (zone->uz_init != NULL) { if (zone->uz_init(item, zone->uz_size, flags) != 0) { zone_free_item(zone, item, udata, SKIP_FINI); goto fail; } } if (zone->uz_ctor != NULL) { if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { zone_free_item(zone, item, udata, SKIP_DTOR); goto fail; } } #ifdef INVARIANTS uma_dbg_alloc(zone, NULL, item); #endif if (flags & M_ZERO) uma_zero_item(item, zone); CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, zone->uz_name, zone); return (item); fail: CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", zone->uz_name, zone); atomic_add_long(&zone->uz_fails, 1); return (NULL); } /* See uma.h */ void uma_zfree_arg(uma_zone_t zone, void *item, void *udata) { uma_cache_t cache; uma_bucket_t bucket; uma_zone_domain_t zdom; int cpu, domain, lockfail; /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, zone->uz_name); KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zfree_arg: called with spinlock or critical section held")); /* uma_zfree(..., NULL) does nothing, to match free(9). */ if (item == NULL) return; #ifdef DEBUG_MEMGUARD if (is_memguard_addr(item)) { if (zone->uz_dtor != NULL) zone->uz_dtor(item, zone->uz_size, udata); if (zone->uz_fini != NULL) zone->uz_fini(item, zone->uz_size); memguard_free(item); return; } #endif #ifdef INVARIANTS if (zone->uz_flags & UMA_ZONE_MALLOC) uma_dbg_free(zone, udata, item); else uma_dbg_free(zone, NULL, item); #endif if (zone->uz_dtor != NULL) zone->uz_dtor(item, zone->uz_size, udata); /* * The race here is acceptable. If we miss it we'll just have to wait * a little longer for the limits to be reset. */ if (zone->uz_flags & UMA_ZFLAG_FULL) goto zfree_item; /* * If possible, free to the per-CPU cache. There are two * requirements for safe access to the per-CPU cache: (1) the thread * accessing the cache must not be preempted or yield during access, * and (2) the thread must not migrate CPUs without switching which * cache it accesses. We rely on a critical section to prevent * preemption and migration. We release the critical section in * order to acquire the zone mutex if we are unable to free to the * current cache; when we re-acquire the critical section, we must * detect and handle migration if it has occurred. */ zfree_restart: critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; zfree_start: /* * Try to free into the allocbucket first to give LIFO ordering * for cache-hot datastructures. Spill over into the freebucket * if necessary. Alloc will swap them if one runs dry. */ bucket = cache->uc_allocbucket; if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) bucket = cache->uc_freebucket; if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, ("uma_zfree: Freeing to non free bucket index.")); bucket->ub_bucket[bucket->ub_cnt] = item; bucket->ub_cnt++; cache->uc_frees++; critical_exit(); return; } /* * We must go back the zone, which requires acquiring the zone lock, * which in turn means we must release and re-acquire the critical * section. Since the critical section is released, we may be * preempted or migrate. As such, make sure not to maintain any * thread-local state specific to the cache from prior to releasing * the critical section. */ critical_exit(); if (zone->uz_count == 0 || bucketdisable) goto zfree_item; lockfail = 0; if (ZONE_TRYLOCK(zone) == 0) { /* Record contention to size the buckets. */ ZONE_LOCK(zone); lockfail = 1; } critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; /* * Since we have locked the zone we may as well send back our stats. */ atomic_add_long(&zone->uz_allocs, cache->uc_allocs); atomic_add_long(&zone->uz_frees, cache->uc_frees); cache->uc_allocs = 0; cache->uc_frees = 0; bucket = cache->uc_freebucket; if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { ZONE_UNLOCK(zone); goto zfree_start; } cache->uc_freebucket = NULL; /* We are no longer associated with this CPU. */ critical_exit(); if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) domain = PCPU_GET(domain); else domain = 0; zdom = &zone->uz_domain[0]; /* Can we throw this on the zone full list? */ if (bucket != NULL) { CTR3(KTR_UMA, "uma_zfree: zone %s(%p) putting bucket %p on free list", zone->uz_name, zone, bucket); /* ub_cnt is pointing to the last free item */ KASSERT(bucket->ub_cnt != 0, ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); } /* * We bump the uz count when the cache size is insufficient to * handle the working set. */ if (lockfail && zone->uz_count < BUCKET_MAX) zone->uz_count++; ZONE_UNLOCK(zone); bucket = bucket_alloc(zone, udata, M_NOWAIT); CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", zone->uz_name, zone, bucket); if (bucket) { critical_enter(); cpu = curcpu; cache = &zone->uz_cpu[cpu]; if (cache->uc_freebucket == NULL && ((zone->uz_flags & UMA_ZONE_NUMA) == 0 || domain == PCPU_GET(domain))) { cache->uc_freebucket = bucket; goto zfree_start; } /* * We lost the race, start over. We have to drop our * critical section to free the bucket. */ critical_exit(); bucket_free(zone, bucket, udata); goto zfree_restart; } /* * If nothing else caught this, we'll just do an internal free. */ zfree_item: zone_free_item(zone, item, udata, SKIP_DTOR); return; } static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) { uma_domain_t dom; uint8_t freei; mtx_assert(&keg->uk_lock, MA_OWNED); MPASS(keg == slab->us_keg); dom = &keg->uk_domain[slab->us_domain]; /* Do we need to remove from any lists? */ if (slab->us_freecount+1 == keg->uk_ipers) { LIST_REMOVE(slab, us_link); LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); } else if (slab->us_freecount == 0) { LIST_REMOVE(slab, us_link); LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); } /* Slab management. */ freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); slab->us_freecount++; /* Keg statistics. */ keg->uk_free++; } static void zone_release(uma_zone_t zone, void **bucket, int cnt) { void *item; uma_slab_t slab; uma_keg_t keg; uint8_t *mem; int clearfull; int i; clearfull = 0; keg = zone_first_keg(zone); KEG_LOCK(keg); for (i = 0; i < cnt; i++) { item = bucket[i]; if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); if (zone->uz_flags & UMA_ZONE_HASH) { slab = hash_sfind(&keg->uk_hash, mem); } else { mem += keg->uk_pgoff; slab = (uma_slab_t)mem; } } else { slab = vtoslab((vm_offset_t)item); if (slab->us_keg != keg) { KEG_UNLOCK(keg); keg = slab->us_keg; KEG_LOCK(keg); } } slab_free_item(keg, slab, item); if (keg->uk_flags & UMA_ZFLAG_FULL) { if (keg->uk_pages < keg->uk_maxpages) { keg->uk_flags &= ~UMA_ZFLAG_FULL; clearfull = 1; } /* * We can handle one more allocation. Since we're * clearing ZFLAG_FULL, wake up all procs blocked * on pages. This should be uncommon, so keeping this * simple for now (rather than adding count of blocked * threads etc). */ wakeup(keg); } } KEG_UNLOCK(keg); if (clearfull) { ZONE_LOCK(zone); zone->uz_flags &= ~UMA_ZFLAG_FULL; wakeup(zone); ZONE_UNLOCK(zone); } } /* * Frees a single item to any zone. * * Arguments: * zone The zone to free to * item The item we're freeing * udata User supplied data for the dtor * skip Skip dtors and finis */ static void zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) { #ifdef INVARIANTS if (skip == SKIP_NONE) { if (zone->uz_flags & UMA_ZONE_MALLOC) uma_dbg_free(zone, udata, item); else uma_dbg_free(zone, NULL, item); } #endif if (skip < SKIP_DTOR && zone->uz_dtor) zone->uz_dtor(item, zone->uz_size, udata); if (skip < SKIP_FINI && zone->uz_fini) zone->uz_fini(item, zone->uz_size); atomic_add_long(&zone->uz_frees, 1); zone->uz_release(zone->uz_arg, &item, 1); } /* See uma.h */ int uma_zone_set_max(uma_zone_t zone, int nitems) { uma_keg_t keg; keg = zone_first_keg(zone); if (keg == NULL) return (0); KEG_LOCK(keg); keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; if (keg->uk_maxpages * keg->uk_ipers < nitems) keg->uk_maxpages += keg->uk_ppera; nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; KEG_UNLOCK(keg); return (nitems); } /* See uma.h */ int uma_zone_get_max(uma_zone_t zone) { int nitems; uma_keg_t keg; keg = zone_first_keg(zone); if (keg == NULL) return (0); KEG_LOCK(keg); nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; KEG_UNLOCK(keg); return (nitems); } /* See uma.h */ void uma_zone_set_warning(uma_zone_t zone, const char *warning) { ZONE_LOCK(zone); zone->uz_warning = warning; ZONE_UNLOCK(zone); } /* See uma.h */ void uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction) { ZONE_LOCK(zone); TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone); ZONE_UNLOCK(zone); } /* See uma.h */ int uma_zone_get_cur(uma_zone_t zone) { int64_t nitems; u_int i; ZONE_LOCK(zone); nitems = zone->uz_allocs - zone->uz_frees; CPU_FOREACH(i) { /* * See the comment in sysctl_vm_zone_stats() regarding the * safety of accessing the per-cpu caches. With the zone lock * held, it is safe, but can potentially result in stale data. */ nitems += zone->uz_cpu[i].uc_allocs - zone->uz_cpu[i].uc_frees; } ZONE_UNLOCK(zone); return (nitems < 0 ? 0 : nitems); } /* See uma.h */ void uma_zone_set_init(uma_zone_t zone, uma_init uminit) { uma_keg_t keg; keg = zone_first_keg(zone); KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); KEG_LOCK(keg); KASSERT(keg->uk_pages == 0, ("uma_zone_set_init on non-empty keg")); keg->uk_init = uminit; KEG_UNLOCK(keg); } /* See uma.h */ void uma_zone_set_fini(uma_zone_t zone, uma_fini fini) { uma_keg_t keg; keg = zone_first_keg(zone); KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type")); KEG_LOCK(keg); KASSERT(keg->uk_pages == 0, ("uma_zone_set_fini on non-empty keg")); keg->uk_fini = fini; KEG_UNLOCK(keg); } /* See uma.h */ void uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) { ZONE_LOCK(zone); KASSERT(zone_first_keg(zone)->uk_pages == 0, ("uma_zone_set_zinit on non-empty keg")); zone->uz_init = zinit; ZONE_UNLOCK(zone); } /* See uma.h */ void uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) { ZONE_LOCK(zone); KASSERT(zone_first_keg(zone)->uk_pages == 0, ("uma_zone_set_zfini on non-empty keg")); zone->uz_fini = zfini; ZONE_UNLOCK(zone); } /* See uma.h */ /* XXX uk_freef is not actually used with the zone locked */ void uma_zone_set_freef(uma_zone_t zone, uma_free freef) { uma_keg_t keg; keg = zone_first_keg(zone); KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); KEG_LOCK(keg); keg->uk_freef = freef; KEG_UNLOCK(keg); } /* See uma.h */ /* XXX uk_allocf is not actually used with the zone locked */ void uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) { uma_keg_t keg; keg = zone_first_keg(zone); KEG_LOCK(keg); keg->uk_allocf = allocf; KEG_UNLOCK(keg); } /* See uma.h */ void uma_zone_reserve(uma_zone_t zone, int items) { uma_keg_t keg; keg = zone_first_keg(zone); if (keg == NULL) return; KEG_LOCK(keg); keg->uk_reserve = items; KEG_UNLOCK(keg); return; } /* See uma.h */ int uma_zone_reserve_kva(uma_zone_t zone, int count) { uma_keg_t keg; vm_offset_t kva; u_int pages; keg = zone_first_keg(zone); if (keg == NULL) return (0); pages = count / keg->uk_ipers; if (pages * keg->uk_ipers < count) pages++; pages *= keg->uk_ppera; #ifdef UMA_MD_SMALL_ALLOC if (keg->uk_ppera > 1) { #else if (1) { #endif kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); if (kva == 0) return (0); } else kva = 0; KEG_LOCK(keg); keg->uk_kva = kva; keg->uk_offset = 0; keg->uk_maxpages = pages; #ifdef UMA_MD_SMALL_ALLOC keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; #else keg->uk_allocf = noobj_alloc; #endif keg->uk_flags |= UMA_ZONE_NOFREE; KEG_UNLOCK(keg); return (1); } /* See uma.h */ void uma_prealloc(uma_zone_t zone, int items) { uma_domain_t dom; uma_slab_t slab; uma_keg_t keg; int domain, slabs; keg = zone_first_keg(zone); if (keg == NULL) return; KEG_LOCK(keg); slabs = items / keg->uk_ipers; domain = 0; if (slabs * keg->uk_ipers < items) slabs++; while (slabs > 0) { slab = keg_alloc_slab(keg, zone, domain, M_WAITOK); if (slab == NULL) break; MPASS(slab->us_keg == keg); dom = &keg->uk_domain[slab->us_domain]; LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); slabs--; domain = (domain + 1) % vm_ndomains; } KEG_UNLOCK(keg); } /* See uma.h */ static void uma_reclaim_locked(bool kmem_danger) { CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); sx_assert(&uma_drain_lock, SA_XLOCKED); bucket_enable(); zone_foreach(zone_drain); if (vm_page_count_min() || kmem_danger) { cache_drain_safe(NULL); zone_foreach(zone_drain); } /* * Some slabs may have been freed but this zone will be visited early * we visit again so that we can free pages that are empty once other * zones are drained. We have to do the same for buckets. */ zone_drain(slabzone); bucket_zone_drain(); } void uma_reclaim(void) { sx_xlock(&uma_drain_lock); uma_reclaim_locked(false); sx_xunlock(&uma_drain_lock); } static volatile int uma_reclaim_needed; void uma_reclaim_wakeup(void) { if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) wakeup(uma_reclaim); } void uma_reclaim_worker(void *arg __unused) { for (;;) { sx_xlock(&uma_drain_lock); while (uma_reclaim_needed == 0) sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl", hz); sx_xunlock(&uma_drain_lock); EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); sx_xlock(&uma_drain_lock); uma_reclaim_locked(true); - atomic_set_int(&uma_reclaim_needed, 0); + uma_reclaim_needed = 0; sx_xunlock(&uma_drain_lock); /* Don't fire more than once per-second. */ pause("umarclslp", hz); } } /* See uma.h */ int uma_zone_exhausted(uma_zone_t zone) { int full; ZONE_LOCK(zone); full = (zone->uz_flags & UMA_ZFLAG_FULL); ZONE_UNLOCK(zone); return (full); } int uma_zone_exhausted_nolock(uma_zone_t zone) { return (zone->uz_flags & UMA_ZFLAG_FULL); } void * uma_large_malloc(vm_size_t size, int wait) { vm_offset_t addr; uma_slab_t slab; slab = zone_alloc_item(slabzone, NULL, 0, wait); if (slab == NULL) return (NULL); addr = kmem_malloc(kernel_arena, size, wait); if (addr != 0) { vsetslab(addr, slab); slab->us_data = (void *)addr; slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC; slab->us_size = size; slab->us_domain = vm_phys_domidx(PHYS_TO_VM_PAGE( pmap_kextract(addr))); uma_total_inc(size); } else { zone_free_item(slabzone, slab, NULL, SKIP_NONE); } return ((void *)addr); } void uma_large_free(uma_slab_t slab) { KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0, ("uma_large_free: Memory not allocated with uma_large_malloc.")); kmem_free(kernel_arena, (vm_offset_t)slab->us_data, slab->us_size); uma_total_dec(slab->us_size); zone_free_item(slabzone, slab, NULL, SKIP_NONE); } static void uma_zero_item(void *item, uma_zone_t zone) { int i; if (zone->uz_flags & UMA_ZONE_PCPU) { CPU_FOREACH(i) bzero(zpcpu_get_cpu(item, i), zone->uz_size); } else bzero(item, zone->uz_size); } unsigned long uma_limit(void) { return (uma_kmem_limit); } void uma_set_limit(unsigned long limit) { uma_kmem_limit = limit; } unsigned long uma_size(void) { return uma_kmem_total; } void uma_print_stats(void) { zone_foreach(uma_print_zone); } static void slab_print(uma_slab_t slab) { printf("slab: keg %p, data %p, freecount %d\n", slab->us_keg, slab->us_data, slab->us_freecount); } static void cache_print(uma_cache_t cache) { printf("alloc: %p(%d), free: %p(%d)\n", cache->uc_allocbucket, cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, cache->uc_freebucket, cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); } static void uma_print_keg(uma_keg_t keg) { uma_domain_t dom; uma_slab_t slab; int i; printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " "out %d free %d limit %d\n", keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, keg->uk_ipers, keg->uk_ppera, (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); for (i = 0; i < vm_ndomains; i++) { dom = &keg->uk_domain[i]; printf("Part slabs:\n"); LIST_FOREACH(slab, &dom->ud_part_slab, us_link) slab_print(slab); printf("Free slabs:\n"); LIST_FOREACH(slab, &dom->ud_free_slab, us_link) slab_print(slab); printf("Full slabs:\n"); LIST_FOREACH(slab, &dom->ud_full_slab, us_link) slab_print(slab); } } void uma_print_zone(uma_zone_t zone) { uma_cache_t cache; uma_klink_t kl; int i; printf("zone: %s(%p) size %d flags %#x\n", zone->uz_name, zone, zone->uz_size, zone->uz_flags); LIST_FOREACH(kl, &zone->uz_kegs, kl_link) uma_print_keg(kl->kl_keg); CPU_FOREACH(i) { cache = &zone->uz_cpu[i]; printf("CPU %d Cache:\n", i); cache_print(cache); } } #ifdef DDB /* * Generate statistics across both the zone and its per-cpu cache's. Return * desired statistics if the pointer is non-NULL for that statistic. * * Note: does not update the zone statistics, as it can't safely clear the * per-CPU cache statistic. * * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't * safe from off-CPU; we should modify the caches to track this information * directly so that we don't have to. */ static void uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp, uint64_t *freesp, uint64_t *sleepsp) { uma_cache_t cache; uint64_t allocs, frees, sleeps; int cachefree, cpu; allocs = frees = sleeps = 0; cachefree = 0; CPU_FOREACH(cpu) { cache = &z->uz_cpu[cpu]; if (cache->uc_allocbucket != NULL) cachefree += cache->uc_allocbucket->ub_cnt; if (cache->uc_freebucket != NULL) cachefree += cache->uc_freebucket->ub_cnt; allocs += cache->uc_allocs; frees += cache->uc_frees; } allocs += z->uz_allocs; frees += z->uz_frees; sleeps += z->uz_sleeps; if (cachefreep != NULL) *cachefreep = cachefree; if (allocsp != NULL) *allocsp = allocs; if (freesp != NULL) *freesp = frees; if (sleepsp != NULL) *sleepsp = sleeps; } #endif /* DDB */ static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) { uma_keg_t kz; uma_zone_t z; int count; count = 0; rw_rlock(&uma_rwlock); LIST_FOREACH(kz, &uma_kegs, uk_link) { LIST_FOREACH(z, &kz->uk_zones, uz_link) count++; } rw_runlock(&uma_rwlock); return (sysctl_handle_int(oidp, &count, 0, req)); } static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) { struct uma_stream_header ush; struct uma_type_header uth; struct uma_percpu_stat ups; uma_bucket_t bucket; uma_zone_domain_t zdom; struct sbuf sbuf; uma_cache_t cache; uma_klink_t kl; uma_keg_t kz; uma_zone_t z; uma_keg_t k; int count, error, i; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 128, req); sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); count = 0; rw_rlock(&uma_rwlock); LIST_FOREACH(kz, &uma_kegs, uk_link) { LIST_FOREACH(z, &kz->uk_zones, uz_link) count++; } /* * Insert stream header. */ bzero(&ush, sizeof(ush)); ush.ush_version = UMA_STREAM_VERSION; ush.ush_maxcpus = (mp_maxid + 1); ush.ush_count = count; (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); LIST_FOREACH(kz, &uma_kegs, uk_link) { LIST_FOREACH(z, &kz->uk_zones, uz_link) { bzero(&uth, sizeof(uth)); ZONE_LOCK(z); strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); uth.uth_align = kz->uk_align; uth.uth_size = kz->uk_size; uth.uth_rsize = kz->uk_rsize; LIST_FOREACH(kl, &z->uz_kegs, kl_link) { k = kl->kl_keg; uth.uth_maxpages += k->uk_maxpages; uth.uth_pages += k->uk_pages; uth.uth_keg_free += k->uk_free; uth.uth_limit = (k->uk_maxpages / k->uk_ppera) * k->uk_ipers; } /* * A zone is secondary is it is not the first entry * on the keg's zone list. */ if ((z->uz_flags & UMA_ZONE_SECONDARY) && (LIST_FIRST(&kz->uk_zones) != z)) uth.uth_zone_flags = UTH_ZONE_SECONDARY; for (i = 0; i < vm_ndomains; i++) { zdom = &z->uz_domain[i]; LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link) uth.uth_zone_free += bucket->ub_cnt; } uth.uth_allocs = z->uz_allocs; uth.uth_frees = z->uz_frees; uth.uth_fails = z->uz_fails; uth.uth_sleeps = z->uz_sleeps; (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); /* * While it is not normally safe to access the cache * bucket pointers while not on the CPU that owns the * cache, we only allow the pointers to be exchanged * without the zone lock held, not invalidated, so * accept the possible race associated with bucket * exchange during monitoring. */ for (i = 0; i < (mp_maxid + 1); i++) { bzero(&ups, sizeof(ups)); if (kz->uk_flags & UMA_ZFLAG_INTERNAL) goto skip; if (CPU_ABSENT(i)) goto skip; cache = &z->uz_cpu[i]; if (cache->uc_allocbucket != NULL) ups.ups_cache_free += cache->uc_allocbucket->ub_cnt; if (cache->uc_freebucket != NULL) ups.ups_cache_free += cache->uc_freebucket->ub_cnt; ups.ups_allocs = cache->uc_allocs; ups.ups_frees = cache->uc_frees; skip: (void)sbuf_bcat(&sbuf, &ups, sizeof(ups)); } ZONE_UNLOCK(z); } } rw_runlock(&uma_rwlock); error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } int sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) { uma_zone_t zone = *(uma_zone_t *)arg1; int error, max; max = uma_zone_get_max(zone); error = sysctl_handle_int(oidp, &max, 0, req); if (error || !req->newptr) return (error); uma_zone_set_max(zone, max); return (0); } int sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) { uma_zone_t zone = *(uma_zone_t *)arg1; int cur; cur = uma_zone_get_cur(zone); return (sysctl_handle_int(oidp, &cur, 0, req)); } #ifdef INVARIANTS static uma_slab_t uma_dbg_getslab(uma_zone_t zone, void *item) { uma_slab_t slab; uma_keg_t keg; uint8_t *mem; mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); if (zone->uz_flags & UMA_ZONE_VTOSLAB) { slab = vtoslab((vm_offset_t)mem); } else { /* * It is safe to return the slab here even though the * zone is unlocked because the item's allocation state * essentially holds a reference. */ ZONE_LOCK(zone); keg = LIST_FIRST(&zone->uz_kegs)->kl_keg; if (keg->uk_flags & UMA_ZONE_HASH) slab = hash_sfind(&keg->uk_hash, mem); else slab = (uma_slab_t)(mem + keg->uk_pgoff); ZONE_UNLOCK(zone); } return (slab); } /* * Set up the slab's freei data such that uma_dbg_free can function. * */ static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) { uma_keg_t keg; int freei; if (zone_first_keg(zone) == NULL) return; if (slab == NULL) { slab = uma_dbg_getslab(zone, item); if (slab == NULL) panic("uma: item %p did not belong to zone %s\n", item, zone->uz_name); } keg = slab->us_keg; freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); return; } /* * Verifies freed addresses. Checks for alignment, valid slab membership * and duplicate frees. * */ static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) { uma_keg_t keg; int freei; if (zone_first_keg(zone) == NULL) return; if (slab == NULL) { slab = uma_dbg_getslab(zone, item); if (slab == NULL) panic("uma: Freed item %p did not belong to zone %s\n", item, zone->uz_name); } keg = slab->us_keg; freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; if (freei >= keg->uk_ipers) panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); if (((freei * keg->uk_rsize) + slab->us_data) != item) panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n", item, zone, zone->uz_name, slab, freei); BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); } #endif /* INVARIANTS */ #ifdef DDB DB_SHOW_COMMAND(uma, db_show_uma) { uma_bucket_t bucket; uma_keg_t kz; uma_zone_t z; uma_zone_domain_t zdom; uint64_t allocs, frees, sleeps; int cachefree, i; db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used", "Free", "Requests", "Sleeps", "Bucket"); LIST_FOREACH(kz, &uma_kegs, uk_link) { LIST_FOREACH(z, &kz->uk_zones, uz_link) { if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { allocs = z->uz_allocs; frees = z->uz_frees; sleeps = z->uz_sleeps; cachefree = 0; } else uma_zone_sumstat(z, &cachefree, &allocs, &frees, &sleeps); if (!((z->uz_flags & UMA_ZONE_SECONDARY) && (LIST_FIRST(&kz->uk_zones) != z))) cachefree += kz->uk_free; for (i = 0; i < vm_ndomains; i++) { zdom = &z->uz_domain[i]; LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link) cachefree += bucket->ub_cnt; } db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n", z->uz_name, (uintmax_t)kz->uk_size, (intmax_t)(allocs - frees), cachefree, (uintmax_t)allocs, sleeps, z->uz_count); if (db_pager_quit) return; } } } DB_SHOW_COMMAND(umacache, db_show_umacache) { uma_bucket_t bucket; uma_zone_t z; uma_zone_domain_t zdom; uint64_t allocs, frees; int cachefree, i; db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", "Requests", "Bucket"); LIST_FOREACH(z, &uma_cachezones, uz_link) { uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL); for (i = 0; i < vm_ndomains; i++) { zdom = &z->uz_domain[i]; LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link) cachefree += bucket->ub_cnt; } db_printf("%18s %8ju %8jd %8d %12ju %8u\n", z->uz_name, (uintmax_t)z->uz_size, (intmax_t)(allocs - frees), cachefree, (uintmax_t)allocs, z->uz_count); if (db_pager_quit) return; } } #endif /* DDB */ Index: user/jeff/numa/sys/vm/vm_domainset.c =================================================================== --- user/jeff/numa/sys/vm/vm_domainset.c (nonexistent) +++ user/jeff/numa/sys/vm/vm_domainset.c (revision 326889) @@ -0,0 +1,219 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2017, Jeffrey Roberson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_vm.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * Iterators are written such that the first nowait pass has as short a + * codepath as possible to eliminate bloat from the allocator. It is + * assumed that most allocations are successful. + */ + +/* + * Determine which policy is to be used for this allocation. + */ +static void +vm_domainset_iter_domain(struct vm_domainset_iter *di, struct vm_object *obj) +{ + struct domainset *domain; + + /* + * object policy takes precedence over thread policy. The policies + * are immutable and unsychronized. Updates can race but pointer + * loads are assumed to be atomic. + */ + if (obj != NULL && (domain = obj->domain.dr_policy) != NULL) { + di->di_domain = domain; + di->di_iter = &obj->domain.dr_iterator; + } else { + di->di_domain = curthread->td_domain.dr_policy; + di->di_iter = &curthread->td_domain.dr_iterator; + } +} + +static void +vm_domainset_iter_rr(struct vm_domainset_iter *di, int *domain) +{ + int d; + + d = *di->di_iter; + do { + d = (d + 1) % di->di_domain->ds_max; + } while (!DOMAINSET_ISSET(d, &di->di_domain->ds_mask)); + *di->di_iter = *domain = d; +} + +static void +vm_domainset_iter_next(struct vm_domainset_iter *di, int *domain) +{ + + switch (di->di_domain->ds_policy) { + case DOMAINSET_POLICY_FIRSTTOUCH: + /* + * To prevent impossible allocations we convert an invalid + * first-touch to round-robin. + */ + /* FALLTHROUGH */ + case DOMAINSET_POLICY_ROUNDROBIN: + vm_domainset_iter_rr(di, domain); + break; + default: + panic("vm_domainset_iter_first: Unknown policy %d", + di->di_domain->ds_policy); + } + KASSERT(*domain < vm_ndomains, + ("vm_domainset_iter_next: Invalid domain %d", *domain)); +} + +static void +vm_domainset_iter_first(struct vm_domainset_iter *di, int *domain) +{ + + switch (di->di_domain->ds_policy) { + case DOMAINSET_POLICY_FIRSTTOUCH: + *domain = PCPU_GET(domain); + if (DOMAINSET_ISSET(*domain, &di->di_domain->ds_mask)) { + di->di_n = 1; + break; + } + /* + * To prevent impossible allocations we convert an invalid + * first-touch to round-robin. + */ + /* FALLTHROUGH */ + case DOMAINSET_POLICY_ROUNDROBIN: + di->di_n = di->di_domain->ds_cnt; + vm_domainset_iter_rr(di, domain); + break; + default: + panic("vm_domainset_iter_first: Unknown policy %d", + di->di_domain->ds_policy); + } + KASSERT(*domain < vm_ndomains, + ("vm_domainset_iter_first: Invalid domain %d", *domain)); +} + +void +vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj, + int *domain, int *req) +{ + + vm_domainset_iter_domain(di, obj); + di->di_flags = *req; + *req = (di->di_flags & ~(VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) | + VM_ALLOC_NOWAIT; + vm_domainset_iter_first(di, domain); +} + +int +vm_domainset_iter_page(struct vm_domainset_iter *di, int *domain, int *req) +{ + + /* + * If we exhausted all options with NOWAIT and did a WAITFAIL it + * is time to return an error to the caller. + */ + if ((*req & VM_ALLOC_WAITFAIL) != 0) + return (ENOMEM); + + /* If there are more domains to visit we run the iterator. */ + if (--di->di_n != 0) { + vm_domainset_iter_next(di, domain); + return (0); + } + + /* If we visited all domains and this was a NOWAIT we return error. */ + if ((di->di_flags & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) == 0) + return (ENOMEM); + + /* + * We have visited all domains with non-blocking allocations, try + * from the beginning with a blocking allocation. + */ + vm_domainset_iter_first(di, domain); + *req = di->di_flags; + + return (0); +} + + +void +vm_domainset_iter_malloc_init(struct vm_domainset_iter *di, + struct vm_object *obj, int *domain, int *flags) +{ + + vm_domainset_iter_domain(di, obj); + di->di_flags = *flags; + *flags = (di->di_flags & ~M_WAITOK) | M_NOWAIT; + vm_domainset_iter_first(di, domain); +} + +int +vm_domainset_iter_malloc(struct vm_domainset_iter *di, int *domain, int *flags) +{ + + /* If there are more domains to visit we run the iterator. */ + if (--di->di_n != 0) { + vm_domainset_iter_next(di, domain); + return (0); + } + + /* If we visited all domains and this was a NOWAIT we return error. */ + if ((di->di_flags & M_WAITOK) == 0) + return (ENOMEM); + + /* + * We have visited all domains with non-blocking allocations, try + * from the beginning with a blocking allocation. + */ + vm_domainset_iter_first(di, domain); + *flags = di->di_flags; + + return (0); +} Property changes on: user/jeff/numa/sys/vm/vm_domainset.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: user/jeff/numa/sys/vm/vm_domainset.h =================================================================== --- user/jeff/numa/sys/vm/vm_domainset.h (nonexistent) +++ user/jeff/numa/sys/vm/vm_domainset.h (revision 326889) @@ -0,0 +1,47 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2017, Jeffrey Roberson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ +#ifndef __VM_DOMAINSET_H__ +#define __VM_DOMAINSET_H__ + +struct vm_domainset_iter { + struct domainset *di_domain; + int *di_iter; + int di_flags; + int di_n; +}; + +int vm_domainset_iter_page(struct vm_domainset_iter *, int *, int *); +void vm_domainset_iter_page_init(struct vm_domainset_iter *, + struct vm_object *, int *, int *); +int vm_domainset_iter_malloc(struct vm_domainset_iter *, int *, int *); +void vm_domainset_iter_malloc_init(struct vm_domainset_iter *, + struct vm_object *, int *, int *); + +#endif /* __VM_DOMAINSET_H__ */ Property changes on: user/jeff/numa/sys/vm/vm_domainset.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: user/jeff/numa/sys/vm/vm_fault.c =================================================================== --- user/jeff/numa/sys/vm/vm_fault.c (revision 326888) +++ user/jeff/numa/sys/vm/vm_fault.c (revision 326889) @@ -1,1752 +1,1753 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * Page fault handling module. */ #include __FBSDID("$FreeBSD$"); #include "opt_ktrace.h" #include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #include #include #include #include #include #include #include #include #include #include #include #define PFBAK 4 #define PFFOR 4 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) #define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX) #define VM_FAULT_DONTNEED_MIN 1048576 struct faultstate { vm_page_t m; vm_object_t object; vm_pindex_t pindex; vm_page_t first_m; vm_object_t first_object; vm_pindex_t first_pindex; vm_map_t map; vm_map_entry_t entry; int map_generation; bool lookup_still_valid; struct vnode *vp; }; static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead); static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, int backward, int forward); static inline void release_page(struct faultstate *fs) { vm_page_xunbusy(fs->m); vm_page_lock(fs->m); vm_page_deactivate(fs->m); vm_page_unlock(fs->m); fs->m = NULL; } static inline void unlock_map(struct faultstate *fs) { if (fs->lookup_still_valid) { vm_map_lookup_done(fs->map, fs->entry); fs->lookup_still_valid = false; } } static void unlock_vp(struct faultstate *fs) { if (fs->vp != NULL) { vput(fs->vp); fs->vp = NULL; } } static void unlock_and_deallocate(struct faultstate *fs) { vm_object_pip_wakeup(fs->object); VM_OBJECT_WUNLOCK(fs->object); if (fs->object != fs->first_object) { VM_OBJECT_WLOCK(fs->first_object); vm_page_lock(fs->first_m); vm_page_free(fs->first_m); vm_page_unlock(fs->first_m); vm_object_pip_wakeup(fs->first_object); VM_OBJECT_WUNLOCK(fs->first_object); fs->first_m = NULL; } vm_object_deallocate(fs->first_object); unlock_map(fs); unlock_vp(fs); } static void vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot, vm_prot_t fault_type, int fault_flags, bool set_wd) { bool need_dirty; if (((prot & VM_PROT_WRITE) == 0 && (fault_flags & VM_FAULT_DIRTY) == 0) || (m->oflags & VPO_UNMANAGED) != 0) return; VM_OBJECT_ASSERT_LOCKED(m->object); need_dirty = ((fault_type & VM_PROT_WRITE) != 0 && (fault_flags & VM_FAULT_WIRE) == 0) || (fault_flags & VM_FAULT_DIRTY) != 0; if (set_wd) vm_object_set_writeable_dirty(m->object); else /* * If two callers of vm_fault_dirty() with set_wd == * FALSE, one for the map entry with MAP_ENTRY_NOSYNC * flag set, other with flag clear, race, it is * possible for the no-NOSYNC thread to see m->dirty * != 0 and not clear VPO_NOSYNC. Take vm_page lock * around manipulation of VPO_NOSYNC and * vm_page_dirty() call, to avoid the race and keep * m->oflags consistent. */ vm_page_lock(m); /* * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC * if the page is already dirty to prevent data written with * the expectation of being synced from not being synced. * Likewise if this entry does not request NOSYNC then make * sure the page isn't marked NOSYNC. Applications sharing * data should use the same flags to avoid ping ponging. */ if ((entry->eflags & MAP_ENTRY_NOSYNC) != 0) { if (m->dirty == 0) { m->oflags |= VPO_NOSYNC; } } else { m->oflags &= ~VPO_NOSYNC; } /* * If the fault is a write, we know that this page is being * written NOW so dirty it explicitly to save on * pmap_is_modified() calls later. * * Also, since the page is now dirty, we can possibly tell * the pager to release any swap backing the page. Calling * the pager requires a write lock on the object. */ if (need_dirty) vm_page_dirty(m); if (!set_wd) vm_page_unlock(m); else if (need_dirty) vm_pager_page_unswapped(m); } static void vm_fault_fill_hold(vm_page_t *m_hold, vm_page_t m) { if (m_hold != NULL) { *m_hold = m; vm_page_lock(m); vm_page_hold(m); vm_page_unlock(m); } } /* * Unlocks fs.first_object and fs.map on success. */ static int vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot, int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold) { vm_page_t m, m_map; #if defined(__amd64__) && VM_NRESERVLEVEL > 0 vm_page_t m_super; int flags; #endif int psind, rv; MPASS(fs->vp == NULL); m = vm_page_lookup(fs->first_object, fs->first_pindex); /* A busy page can be mapped for read|execute access. */ if (m == NULL || ((prot & VM_PROT_WRITE) != 0 && vm_page_busied(m)) || m->valid != VM_PAGE_BITS_ALL) return (KERN_FAILURE); m_map = m; psind = 0; #if defined(__amd64__) && VM_NRESERVLEVEL > 0 if ((m->flags & PG_FICTITIOUS) == 0 && (m_super = vm_reserv_to_superpage(m)) != NULL && rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & (pagesizes[m_super->psind] - 1)) && pmap_ps_enabled(fs->map->pmap)) { flags = PS_ALL_VALID; if ((prot & VM_PROT_WRITE) != 0) { /* * Create a superpage mapping allowing write access * only if none of the constituent pages are busy and * all of them are already dirty (except possibly for * the page that was faulted on). */ flags |= PS_NONE_BUSY; if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) flags |= PS_ALL_DIRTY; } if (vm_page_ps_test(m_super, flags, m)) { m_map = m_super; psind = m_super->psind; vaddr = rounddown2(vaddr, pagesizes[psind]); /* Preset the modified bit for dirty superpages. */ if ((flags & PS_ALL_DIRTY) != 0) fault_type |= VM_PROT_WRITE; } } #endif rv = pmap_enter(fs->map->pmap, vaddr, m_map, prot, fault_type | PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), psind); if (rv != KERN_SUCCESS) return (rv); vm_fault_fill_hold(m_hold, m); vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false); VM_OBJECT_RUNLOCK(fs->first_object); if (psind == 0 && !wired) vm_fault_prefault(fs, vaddr, PFBAK, PFFOR); vm_map_lookup_done(fs->map, fs->entry); curthread->td_ru.ru_minflt++; return (KERN_SUCCESS); } static void vm_fault_restore_map_lock(struct faultstate *fs) { VM_OBJECT_ASSERT_WLOCKED(fs->first_object); MPASS(fs->first_object->paging_in_progress > 0); if (!vm_map_trylock_read(fs->map)) { VM_OBJECT_WUNLOCK(fs->first_object); vm_map_lock_read(fs->map); VM_OBJECT_WLOCK(fs->first_object); } fs->lookup_still_valid = true; } static void vm_fault_populate_check_page(vm_page_t m) { /* * Check each page to ensure that the pager is obeying the * interface: the page must be installed in the object, fully * valid, and exclusively busied. */ MPASS(m != NULL); MPASS(m->valid == VM_PAGE_BITS_ALL); MPASS(vm_page_xbusied(m)); } static void vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, vm_pindex_t last) { vm_page_t m; vm_pindex_t pidx; VM_OBJECT_ASSERT_WLOCKED(object); MPASS(first <= last); for (pidx = first, m = vm_page_lookup(object, pidx); pidx <= last; pidx++, m = vm_page_next(m)) { vm_fault_populate_check_page(m); vm_page_lock(m); vm_page_deactivate(m); vm_page_unlock(m); vm_page_xunbusy(m); } } static int vm_fault_populate(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot, int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold) { vm_page_t m; vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; int rv; MPASS(fs->object == fs->first_object); VM_OBJECT_ASSERT_WLOCKED(fs->first_object); MPASS(fs->first_object->paging_in_progress > 0); MPASS(fs->first_object->backing_object == NULL); MPASS(fs->lookup_still_valid); pager_first = OFF_TO_IDX(fs->entry->offset); pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; unlock_map(fs); unlock_vp(fs); /* * Call the pager (driver) populate() method. * * There is no guarantee that the method will be called again * if the current fault is for read, and a future fault is * for write. Report the entry's maximum allowed protection * to the driver. */ rv = vm_pager_populate(fs->first_object, fs->first_pindex, fault_type, fs->entry->max_protection, &pager_first, &pager_last); VM_OBJECT_ASSERT_WLOCKED(fs->first_object); if (rv == VM_PAGER_BAD) { /* * VM_PAGER_BAD is the backdoor for a pager to request * normal fault handling. */ vm_fault_restore_map_lock(fs); if (fs->map->timestamp != fs->map_generation) return (KERN_RESOURCE_SHORTAGE); /* RetryFault */ return (KERN_NOT_RECEIVER); } if (rv != VM_PAGER_OK) return (KERN_FAILURE); /* AKA SIGSEGV */ /* Ensure that the driver is obeying the interface. */ MPASS(pager_first <= pager_last); MPASS(fs->first_pindex <= pager_last); MPASS(fs->first_pindex >= pager_first); MPASS(pager_last < fs->first_object->size); vm_fault_restore_map_lock(fs); if (fs->map->timestamp != fs->map_generation) { vm_fault_populate_cleanup(fs->first_object, pager_first, pager_last); return (KERN_RESOURCE_SHORTAGE); /* RetryFault */ } /* * The map is unchanged after our last unlock. Process the fault. * * The range [pager_first, pager_last] that is given to the * pager is only a hint. The pager may populate any range * within the object that includes the requested page index. * In case the pager expanded the range, clip it to fit into * the map entry. */ map_first = OFF_TO_IDX(fs->entry->offset); if (map_first > pager_first) { vm_fault_populate_cleanup(fs->first_object, pager_first, map_first - 1); pager_first = map_first; } map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; if (map_last < pager_last) { vm_fault_populate_cleanup(fs->first_object, map_last + 1, pager_last); pager_last = map_last; } for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); pidx <= pager_last; pidx++, m = vm_page_next(m)) { vm_fault_populate_check_page(m); vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, true); VM_OBJECT_WUNLOCK(fs->first_object); pmap_enter(fs->map->pmap, fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset, m, prot, fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); VM_OBJECT_WLOCK(fs->first_object); if (pidx == fs->first_pindex) vm_fault_fill_hold(m_hold, m); vm_page_lock(m); if ((fault_flags & VM_FAULT_WIRE) != 0) { KASSERT(wired, ("VM_FAULT_WIRE && !wired")); vm_page_wire(m); } else { vm_page_activate(m); } vm_page_unlock(m); vm_page_xunbusy(m); } curthread->td_ru.ru_majflt++; return (KERN_SUCCESS); } /* * vm_fault: * * Handle a page fault occurring at the given address, * requiring the given permissions, in the map specified. * If successful, the page is inserted into the * associated physical map. * * NOTE: the given address should be truncated to the * proper page address. * * KERN_SUCCESS is returned if the page fault is handled; otherwise, * a standard error specifying why the fault is fatal is returned. * * The map in question must be referenced, and remains so. * Caller may hold no locks. */ int vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags) { struct thread *td; int result; td = curthread; if ((td->td_pflags & TDP_NOFAULTING) != 0) return (KERN_PROTECTION_FAILURE); #ifdef KTRACE if (map != kernel_map && KTRPOINT(td, KTR_FAULT)) ktrfault(vaddr, fault_type); #endif result = vm_fault_hold(map, trunc_page(vaddr), fault_type, fault_flags, NULL); #ifdef KTRACE if (map != kernel_map && KTRPOINT(td, KTR_FAULTEND)) ktrfaultend(result); #endif return (result); } int vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags, vm_page_t *m_hold) { struct faultstate fs; struct vnode *vp; vm_object_t next_object, retry_object; vm_offset_t e_end, e_start; vm_pindex_t retry_pindex; vm_prot_t prot, retry_prot; int ahead, alloc_req, behind, cluster_offset, error, era, faultcount; int locked, nera, result, rv; u_char behavior; boolean_t wired; /* Passed by reference. */ bool dead, hardfault, is_first_object_locked; VM_CNT_INC(v_vm_faults); fs.vp = NULL; faultcount = 0; nera = -1; hardfault = false; RetryFault:; /* * Find the backing store object and offset into it to begin the * search. */ fs.map = map; result = vm_map_lookup(&fs.map, vaddr, fault_type | VM_PROT_FAULT_LOOKUP, &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired); if (result != KERN_SUCCESS) { unlock_vp(&fs); return (result); } fs.map_generation = fs.map->timestamp; if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { panic("%s: fault on nofault entry, addr: %#lx", __func__, (u_long)vaddr); } if (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION && fs.entry->wiring_thread != curthread) { vm_map_unlock_read(fs.map); vm_map_lock(fs.map); if (vm_map_lookup_entry(fs.map, vaddr, &fs.entry) && (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION)) { unlock_vp(&fs); fs.entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; vm_map_unlock_and_wait(fs.map, 0); } else vm_map_unlock(fs.map); goto RetryFault; } MPASS((fs.entry->eflags & MAP_ENTRY_GUARD) == 0); if (wired) fault_type = prot | (fault_type & VM_PROT_COPY); else KASSERT((fault_flags & VM_FAULT_WIRE) == 0, ("!wired && VM_FAULT_WIRE")); /* * Try to avoid lock contention on the top-level object through * special-case handling of some types of page faults, specifically, * those that are both (1) mapping an existing page from the top- * level object and (2) not having to mark that object as containing * dirty pages. Under these conditions, a read lock on the top-level * object suffices, allowing multiple page faults of a similar type to * run in parallel on the same top-level object. */ if (fs.vp == NULL /* avoid locked vnode leak */ && (fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0 && /* avoid calling vm_object_set_writeable_dirty() */ ((prot & VM_PROT_WRITE) == 0 || (fs.first_object->type != OBJT_VNODE && (fs.first_object->flags & OBJ_TMPFS_NODE) == 0) || (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0)) { VM_OBJECT_RLOCK(fs.first_object); if ((prot & VM_PROT_WRITE) == 0 || (fs.first_object->type != OBJT_VNODE && (fs.first_object->flags & OBJ_TMPFS_NODE) == 0) || (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0) { rv = vm_fault_soft_fast(&fs, vaddr, prot, fault_type, fault_flags, wired, m_hold); if (rv == KERN_SUCCESS) return (rv); } if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { VM_OBJECT_RUNLOCK(fs.first_object); VM_OBJECT_WLOCK(fs.first_object); } } else { VM_OBJECT_WLOCK(fs.first_object); } /* * Make a reference to this object to prevent its disposal while we * are messing with it. Once we have the reference, the map is free * to be diddled. Since objects reference their shadows (and copies), * they will stay around as well. * * Bump the paging-in-progress count to prevent size changes (e.g. * truncation operations) during I/O. */ vm_object_reference_locked(fs.first_object); vm_object_pip_add(fs.first_object, 1); fs.lookup_still_valid = true; fs.first_m = NULL; /* * Search for the page at object/offset. */ fs.object = fs.first_object; fs.pindex = fs.first_pindex; while (TRUE) { /* * If the object is marked for imminent termination, * we retry here, since the collapse pass has raced * with us. Otherwise, if we see terminally dead * object, return fail. */ if ((fs.object->flags & OBJ_DEAD) != 0) { dead = fs.object->type == OBJT_DEAD; unlock_and_deallocate(&fs); if (dead) return (KERN_PROTECTION_FAILURE); pause("vmf_de", 1); goto RetryFault; } /* * See if page is resident */ fs.m = vm_page_lookup(fs.object, fs.pindex); if (fs.m != NULL) { /* * Wait/Retry if the page is busy. We have to do this * if the page is either exclusive or shared busy * because the vm_pager may be using read busy for * pageouts (and even pageins if it is the vnode * pager), and we could end up trying to pagein and * pageout the same page simultaneously. * * We can theoretically allow the busy case on a read * fault if the page is marked valid, but since such * pages are typically already pmap'd, putting that * special case in might be more effort then it is * worth. We cannot under any circumstances mess * around with a shared busied page except, perhaps, * to pmap it. */ if (vm_page_busied(fs.m)) { /* * Reference the page before unlocking and * sleeping so that the page daemon is less * likely to reclaim it. */ vm_page_aflag_set(fs.m, PGA_REFERENCED); if (fs.object != fs.first_object) { if (!VM_OBJECT_TRYWLOCK( fs.first_object)) { VM_OBJECT_WUNLOCK(fs.object); VM_OBJECT_WLOCK(fs.first_object); VM_OBJECT_WLOCK(fs.object); } vm_page_lock(fs.first_m); vm_page_free(fs.first_m); vm_page_unlock(fs.first_m); vm_object_pip_wakeup(fs.first_object); VM_OBJECT_WUNLOCK(fs.first_object); fs.first_m = NULL; } unlock_map(&fs); if (fs.m == vm_page_lookup(fs.object, fs.pindex)) { vm_page_sleep_if_busy(fs.m, "vmpfw"); } vm_object_pip_wakeup(fs.object); VM_OBJECT_WUNLOCK(fs.object); VM_CNT_INC(v_intrans); vm_object_deallocate(fs.first_object); goto RetryFault; } vm_page_lock(fs.m); vm_page_remque(fs.m); vm_page_unlock(fs.m); /* * Mark page busy for other processes, and the * pagedaemon. If it still isn't completely valid * (readable), jump to readrest, else break-out ( we * found the page ). */ vm_page_xbusy(fs.m); if (fs.m->valid != VM_PAGE_BITS_ALL) goto readrest; break; } KASSERT(fs.m == NULL, ("fs.m should be NULL, not %p", fs.m)); /* * Page is not resident. If the pager might contain the page * or this is the beginning of the search, allocate a new * page. (Default objects are zero-fill, so there is no real * pager for them.) */ if (fs.object->type != OBJT_DEFAULT || fs.object == fs.first_object) { if (fs.pindex >= fs.object->size) { unlock_and_deallocate(&fs); return (KERN_PROTECTION_FAILURE); } if (fs.object == fs.first_object && (fs.first_object->flags & OBJ_POPULATE) != 0 && fs.first_object->shadow_count == 0) { rv = vm_fault_populate(&fs, vaddr, prot, fault_type, fault_flags, wired, m_hold); switch (rv) { case KERN_SUCCESS: case KERN_FAILURE: unlock_and_deallocate(&fs); return (rv); case KERN_RESOURCE_SHORTAGE: unlock_and_deallocate(&fs); goto RetryFault; case KERN_NOT_RECEIVER: /* * Pager's populate() method * returned VM_PAGER_BAD. */ break; default: panic("inconsistent return codes"); } } /* * Allocate a new page for this object/offset pair. * * Unlocked read of the p_flag is harmless. At * worst, the P_KILLED might be not observed * there, and allocation can fail, causing * restart and new reading of the p_flag. */ if (!vm_page_count_severe() || P_KILLED(curproc)) { #if VM_NRESERVLEVEL > 0 vm_object_color(fs.object, atop(vaddr) - fs.pindex); #endif alloc_req = P_KILLED(curproc) ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; if (fs.object->type != OBJT_VNODE && fs.object->backing_object == NULL) alloc_req |= VM_ALLOC_ZERO; fs.m = vm_page_alloc(fs.object, fs.pindex, alloc_req); } if (fs.m == NULL) { unlock_and_deallocate(&fs); VM_WAITPFAULT; goto RetryFault; } } readrest: /* * At this point, we have either allocated a new page or found * an existing page that is only partially valid. * * We hold a reference on the current object and the page is * exclusive busied. */ /* * If the pager for the current object might have the page, * then determine the number of additional pages to read and * potentially reprioritize previously read pages for earlier * reclamation. These operations should only be performed * once per page fault. Even if the current pager doesn't * have the page, the number of additional pages to read will * apply to subsequent objects in the shadow chain. */ if (fs.object->type != OBJT_DEFAULT && nera == -1 && !P_KILLED(curproc)) { KASSERT(fs.lookup_still_valid, ("map unlocked")); era = fs.entry->read_ahead; behavior = vm_map_entry_behavior(fs.entry); if (behavior == MAP_ENTRY_BEHAV_RANDOM) { nera = 0; } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { nera = VM_FAULT_READ_AHEAD_MAX; if (vaddr == fs.entry->next_read) vm_fault_dontneed(&fs, vaddr, nera); } else if (vaddr == fs.entry->next_read) { /* * This is a sequential fault. Arithmetically * increase the requested number of pages in * the read-ahead window. The requested * number of pages is "# of sequential faults * x (read ahead min + 1) + read ahead min" */ nera = VM_FAULT_READ_AHEAD_MIN; if (era > 0) { nera += era + 1; if (nera > VM_FAULT_READ_AHEAD_MAX) nera = VM_FAULT_READ_AHEAD_MAX; } if (era == VM_FAULT_READ_AHEAD_MAX) vm_fault_dontneed(&fs, vaddr, nera); } else { /* * This is a non-sequential fault. */ nera = 0; } if (era != nera) { /* * A read lock on the map suffices to update * the read ahead count safely. */ fs.entry->read_ahead = nera; } /* * Prepare for unlocking the map. Save the map * entry's start and end addresses, which are used to * optimize the size of the pager operation below. * Even if the map entry's addresses change after * unlocking the map, using the saved addresses is * safe. */ e_start = fs.entry->start; e_end = fs.entry->end; } /* * Call the pager to retrieve the page if there is a chance * that the pager has it, and potentially retrieve additional * pages at the same time. */ if (fs.object->type != OBJT_DEFAULT) { /* * Release the map lock before locking the vnode or * sleeping in the pager. (If the current object has * a shadow, then an earlier iteration of this loop * may have already unlocked the map.) */ unlock_map(&fs); if (fs.object->type == OBJT_VNODE && (vp = fs.object->handle) != fs.vp) { /* * Perform an unlock in case the desired vnode * changed while the map was unlocked during a * retry. */ unlock_vp(&fs); locked = VOP_ISLOCKED(vp); if (locked != LK_EXCLUSIVE) locked = LK_SHARED; /* * We must not sleep acquiring the vnode lock * while we have the page exclusive busied or * the object's paging-in-progress count * incremented. Otherwise, we could deadlock. */ error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT, curthread); if (error != 0) { vhold(vp); release_page(&fs); unlock_and_deallocate(&fs); error = vget(vp, locked | LK_RETRY | LK_CANRECURSE, curthread); vdrop(vp); fs.vp = vp; KASSERT(error == 0, ("vm_fault: vget failed")); goto RetryFault; } fs.vp = vp; } KASSERT(fs.vp == NULL || !fs.map->system_map, ("vm_fault: vnode-backed object mapped by system map")); /* * Page in the requested page and hint the pager, * that it may bring up surrounding pages. */ if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || P_KILLED(curproc)) { behind = 0; ahead = 0; } else { /* Is this a sequential fault? */ if (nera > 0) { behind = 0; ahead = nera; } else { /* * Request a cluster of pages that is * aligned to a VM_FAULT_READ_DEFAULT * page offset boundary within the * object. Alignment to a page offset * boundary is more likely to coincide * with the underlying file system * block than alignment to a virtual * address boundary. */ cluster_offset = fs.pindex % VM_FAULT_READ_DEFAULT; behind = ulmin(cluster_offset, atop(vaddr - e_start)); ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; } ahead = ulmin(ahead, atop(e_end - vaddr) - 1); } rv = vm_pager_get_pages(fs.object, &fs.m, 1, &behind, &ahead); if (rv == VM_PAGER_OK) { faultcount = behind + 1 + ahead; hardfault = true; break; /* break to PAGE HAS BEEN FOUND */ } if (rv == VM_PAGER_ERROR) printf("vm_fault: pager read error, pid %d (%s)\n", curproc->p_pid, curproc->p_comm); /* * If an I/O error occurred or the requested page was * outside the range of the pager, clean up and return * an error. */ if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { vm_page_lock(fs.m); if (fs.m->wire_count == 0) vm_page_free(fs.m); else vm_page_xunbusy_maybelocked(fs.m); vm_page_unlock(fs.m); fs.m = NULL; unlock_and_deallocate(&fs); return (rv == VM_PAGER_ERROR ? KERN_FAILURE : KERN_PROTECTION_FAILURE); } /* * The requested page does not exist at this object/ * offset. Remove the invalid page from the object, * waking up anyone waiting for it, and continue on to * the next object. However, if this is the top-level * object, we must leave the busy page in place to * prevent another process from rushing past us, and * inserting the page in that object at the same time * that we are. */ if (fs.object != fs.first_object) { vm_page_lock(fs.m); if (fs.m->wire_count == 0) vm_page_free(fs.m); else vm_page_xunbusy_maybelocked(fs.m); vm_page_unlock(fs.m); fs.m = NULL; } } /* * We get here if the object has default pager (or unwiring) * or the pager doesn't have the page. */ if (fs.object == fs.first_object) fs.first_m = fs.m; /* * Move on to the next object. Lock the next object before * unlocking the current one. */ next_object = fs.object->backing_object; if (next_object == NULL) { /* * If there's no object left, fill the page in the top * object with zeros. */ if (fs.object != fs.first_object) { vm_object_pip_wakeup(fs.object); VM_OBJECT_WUNLOCK(fs.object); fs.object = fs.first_object; fs.pindex = fs.first_pindex; fs.m = fs.first_m; VM_OBJECT_WLOCK(fs.object); } fs.first_m = NULL; /* * Zero the page if necessary and mark it valid. */ if ((fs.m->flags & PG_ZERO) == 0) { pmap_zero_page(fs.m); } else { VM_CNT_INC(v_ozfod); } VM_CNT_INC(v_zfod); fs.m->valid = VM_PAGE_BITS_ALL; /* Don't try to prefault neighboring pages. */ faultcount = 1; break; /* break to PAGE HAS BEEN FOUND */ } else { KASSERT(fs.object != next_object, ("object loop %p", next_object)); VM_OBJECT_WLOCK(next_object); vm_object_pip_add(next_object, 1); if (fs.object != fs.first_object) vm_object_pip_wakeup(fs.object); fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset); VM_OBJECT_WUNLOCK(fs.object); fs.object = next_object; } } vm_page_assert_xbusied(fs.m); /* * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock * is held.] */ /* * If the page is being written, but isn't already owned by the * top-level object, we have to copy it into a new page owned by the * top-level object. */ if (fs.object != fs.first_object) { /* * We only really need to copy if we want to write it. */ if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { /* * This allows pages to be virtually copied from a * backing_object into the first_object, where the * backing object has no other refs to it, and cannot * gain any more refs. Instead of a bcopy, we just * move the page from the backing object to the * first object. Note that we must mark the page * dirty in the first object so that it will go out * to swap when needed. */ is_first_object_locked = false; if ( /* * Only one shadow object */ (fs.object->shadow_count == 1) && /* * No COW refs, except us */ (fs.object->ref_count == 1) && /* * No one else can look this object up */ (fs.object->handle == NULL) && /* * No other ways to look the object up */ ((fs.object->type == OBJT_DEFAULT) || (fs.object->type == OBJT_SWAP)) && (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) && /* * We don't chase down the shadow chain */ fs.object == fs.first_object->backing_object) { vm_page_lock(fs.m); vm_page_remove(fs.m); vm_page_unlock(fs.m); vm_page_lock(fs.first_m); vm_page_replace_checked(fs.m, fs.first_object, fs.first_pindex, fs.first_m); vm_page_free(fs.first_m); vm_page_unlock(fs.first_m); vm_page_dirty(fs.m); #if VM_NRESERVLEVEL > 0 /* * Rename the reservation. */ vm_reserv_rename(fs.m, fs.first_object, fs.object, OFF_TO_IDX( fs.first_object->backing_object_offset)); #endif /* * Removing the page from the backing object * unbusied it. */ vm_page_xbusy(fs.m); fs.first_m = fs.m; fs.m = NULL; VM_CNT_INC(v_cow_optim); } else { /* * Oh, well, lets copy it. */ pmap_copy_page(fs.m, fs.first_m); fs.first_m->valid = VM_PAGE_BITS_ALL; if (wired && (fault_flags & VM_FAULT_WIRE) == 0) { vm_page_lock(fs.first_m); vm_page_wire(fs.first_m); vm_page_unlock(fs.first_m); vm_page_lock(fs.m); vm_page_unwire(fs.m, PQ_INACTIVE); vm_page_unlock(fs.m); } /* * We no longer need the old page or object. */ release_page(&fs); } /* * fs.object != fs.first_object due to above * conditional */ vm_object_pip_wakeup(fs.object); VM_OBJECT_WUNLOCK(fs.object); /* * Only use the new page below... */ fs.object = fs.first_object; fs.pindex = fs.first_pindex; fs.m = fs.first_m; if (!is_first_object_locked) VM_OBJECT_WLOCK(fs.object); VM_CNT_INC(v_cow_faults); curthread->td_cow++; } else { prot &= ~VM_PROT_WRITE; } } /* * We must verify that the maps have not changed since our last * lookup. */ if (!fs.lookup_still_valid) { if (!vm_map_trylock_read(fs.map)) { release_page(&fs); unlock_and_deallocate(&fs); goto RetryFault; } fs.lookup_still_valid = true; if (fs.map->timestamp != fs.map_generation) { result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); /* * If we don't need the page any longer, put it on the inactive * list (the easiest thing to do here). If no one needs it, * pageout will grab it eventually. */ if (result != KERN_SUCCESS) { release_page(&fs); unlock_and_deallocate(&fs); /* * If retry of map lookup would have blocked then * retry fault from start. */ if (result == KERN_FAILURE) goto RetryFault; return (result); } if ((retry_object != fs.first_object) || (retry_pindex != fs.first_pindex)) { release_page(&fs); unlock_and_deallocate(&fs); goto RetryFault; } /* * Check whether the protection has changed or the object has * been copied while we left the map unlocked. Changing from * read to write permission is OK - we leave the page * write-protected, and catch the write fault. Changing from * write to read permission means that we can't mark the page * write-enabled after all. */ prot &= retry_prot; } } /* * If the page was filled by a pager, save the virtual address that * should be faulted on next under a sequential access pattern to the * map entry. A read lock on the map suffices to update this address * safely. */ if (hardfault) fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags, true); vm_page_assert_xbusied(fs.m); /* * Page must be completely valid or it is not fit to * map into user space. vm_pager_get_pages() ensures this. */ KASSERT(fs.m->valid == VM_PAGE_BITS_ALL, ("vm_fault: page %p partially invalid", fs.m)); VM_OBJECT_WUNLOCK(fs.object); /* * Put this page into the physical map. We had to do the unlock above * because pmap_enter() may sleep. We don't put the page * back on the active queue until later so that the pageout daemon * won't find it (yet). */ pmap_enter(fs.map->pmap, vaddr, fs.m, prot, fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 && wired == 0) vm_fault_prefault(&fs, vaddr, faultcount > 0 ? behind : PFBAK, faultcount > 0 ? ahead : PFFOR); VM_OBJECT_WLOCK(fs.object); vm_page_lock(fs.m); /* * If the page is not wired down, then put it where the pageout daemon * can find it. */ if ((fault_flags & VM_FAULT_WIRE) != 0) { KASSERT(wired, ("VM_FAULT_WIRE && !wired")); vm_page_wire(fs.m); } else vm_page_activate(fs.m); if (m_hold != NULL) { *m_hold = fs.m; vm_page_hold(fs.m); } vm_page_unlock(fs.m); vm_page_xunbusy(fs.m); /* * Unlock everything, and return */ unlock_and_deallocate(&fs); if (hardfault) { VM_CNT_INC(v_io_faults); curthread->td_ru.ru_majflt++; #ifdef RACCT if (racct_enable && fs.object->type == OBJT_VNODE) { PROC_LOCK(curproc); if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { racct_add_force(curproc, RACCT_WRITEBPS, PAGE_SIZE + behind * PAGE_SIZE); racct_add_force(curproc, RACCT_WRITEIOPS, 1); } else { racct_add_force(curproc, RACCT_READBPS, PAGE_SIZE + ahead * PAGE_SIZE); racct_add_force(curproc, RACCT_READIOPS, 1); } PROC_UNLOCK(curproc); } #endif } else curthread->td_ru.ru_minflt++; return (KERN_SUCCESS); } /* * Speed up the reclamation of pages that precede the faulting pindex within * the first object of the shadow chain. Essentially, perform the equivalent * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes * the faulting pindex by the cluster size when the pages read by vm_fault() * cross a cluster-size boundary. The cluster size is the greater of the * smallest superpage size and VM_FAULT_DONTNEED_MIN. * * When "fs->first_object" is a shadow object, the pages in the backing object * that precede the faulting pindex are deactivated by vm_fault(). So, this * function must only be concerned with pages in the first object. */ static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) { vm_map_entry_t entry; vm_object_t first_object, object; vm_offset_t end, start; vm_page_t m, m_next; vm_pindex_t pend, pstart; vm_size_t size; object = fs->object; VM_OBJECT_ASSERT_WLOCKED(object); first_object = fs->first_object; if (first_object != object) { if (!VM_OBJECT_TRYWLOCK(first_object)) { VM_OBJECT_WUNLOCK(object); VM_OBJECT_WLOCK(first_object); VM_OBJECT_WLOCK(object); } } /* Neither fictitious nor unmanaged pages can be reclaimed. */ if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { size = VM_FAULT_DONTNEED_MIN; if (MAXPAGESIZES > 1 && size < pagesizes[1]) size = pagesizes[1]; end = rounddown2(vaddr, size); if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && (entry = fs->entry)->start < end) { if (end - entry->start < size) start = entry->start; else start = end - size; pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); pstart = OFF_TO_IDX(entry->offset) + atop(start - entry->start); m_next = vm_page_find_least(first_object, pstart); pend = OFF_TO_IDX(entry->offset) + atop(end - entry->start); while ((m = m_next) != NULL && m->pindex < pend) { m_next = TAILQ_NEXT(m, listq); if (m->valid != VM_PAGE_BITS_ALL || vm_page_busied(m)) continue; /* * Don't clear PGA_REFERENCED, since it would * likely represent a reference by a different * process. * * Typically, at this point, prefetched pages * are still in the inactive queue. Only * pages that triggered page faults are in the * active queue. */ vm_page_lock(m); vm_page_deactivate(m); vm_page_unlock(m); } } } if (first_object != object) VM_OBJECT_WUNLOCK(first_object); } /* * vm_fault_prefault provides a quick way of clustering * pagefaults into a processes address space. It is a "cousin" * of vm_map_pmap_enter, except it runs at page fault time instead * of mmap time. */ static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, int backward, int forward) { pmap_t pmap; vm_map_entry_t entry; vm_object_t backing_object, lobject; vm_offset_t addr, starta; vm_pindex_t pindex; vm_page_t m; int i; pmap = fs->map->pmap; if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) return; entry = fs->entry; if (addra < backward * PAGE_SIZE) { starta = entry->start; } else { starta = addra - backward * PAGE_SIZE; if (starta < entry->start) starta = entry->start; } /* * Generate the sequence of virtual addresses that are candidates for * prefaulting in an outward spiral from the faulting virtual address, * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... * If the candidate address doesn't have a backing physical page, then * the loop immediately terminates. */ for (i = 0; i < 2 * imax(backward, forward); i++) { addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : PAGE_SIZE); if (addr > addra + forward * PAGE_SIZE) addr = 0; if (addr < starta || addr >= entry->end) continue; if (!pmap_is_prefaultable(pmap, addr)) continue; pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; lobject = entry->object.vm_object; VM_OBJECT_RLOCK(lobject); while ((m = vm_page_lookup(lobject, pindex)) == NULL && lobject->type == OBJT_DEFAULT && (backing_object = lobject->backing_object) != NULL) { KASSERT((lobject->backing_object_offset & PAGE_MASK) == 0, ("vm_fault_prefault: unaligned object offset")); pindex += lobject->backing_object_offset >> PAGE_SHIFT; VM_OBJECT_RLOCK(backing_object); VM_OBJECT_RUNLOCK(lobject); lobject = backing_object; } if (m == NULL) { VM_OBJECT_RUNLOCK(lobject); break; } if (m->valid == VM_PAGE_BITS_ALL && (m->flags & PG_FICTITIOUS) == 0) pmap_enter_quick(pmap, addr, m, entry->protection); VM_OBJECT_RUNLOCK(lobject); } } /* * Hold each of the physical pages that are mapped by the specified range of * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid * and allow the specified types of access, "prot". If all of the implied * pages are successfully held, then the number of held pages is returned * together with pointers to those pages in the array "ma". However, if any * of the pages cannot be held, -1 is returned. */ int vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, vm_prot_t prot, vm_page_t *ma, int max_count) { vm_offset_t end, va; vm_page_t *mp; int count; boolean_t pmap_failed; if (len == 0) return (0); end = round_page(addr + len); addr = trunc_page(addr); /* * Check for illegal addresses. */ if (addr < vm_map_min(map) || addr > end || end > vm_map_max(map)) return (-1); if (atop(end - addr) > max_count) panic("vm_fault_quick_hold_pages: count > max_count"); count = atop(end - addr); /* * Most likely, the physical pages are resident in the pmap, so it is * faster to try pmap_extract_and_hold() first. */ pmap_failed = FALSE; for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { *mp = pmap_extract_and_hold(map->pmap, va, prot); if (*mp == NULL) pmap_failed = TRUE; else if ((prot & VM_PROT_WRITE) != 0 && (*mp)->dirty != VM_PAGE_BITS_ALL) { /* * Explicitly dirty the physical page. Otherwise, the * caller's changes may go unnoticed because they are * performed through an unmanaged mapping or by a DMA * operation. * * The object lock is not held here. * See vm_page_clear_dirty_mask(). */ vm_page_dirty(*mp); } } if (pmap_failed) { /* * One or more pages could not be held by the pmap. Either no * page was mapped at the specified virtual address or that * mapping had insufficient permissions. Attempt to fault in * and hold these pages. */ for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) if (*mp == NULL && vm_fault_hold(map, va, prot, VM_FAULT_NORMAL, mp) != KERN_SUCCESS) goto error; } return (count); error: for (mp = ma; mp < ma + count; mp++) if (*mp != NULL) { vm_page_lock(*mp); vm_page_unhold(*mp); vm_page_unlock(*mp); } return (-1); } /* * Routine: * vm_fault_copy_entry * Function: * Create new shadow object backing dst_entry with private copy of * all underlying pages. When src_entry is equal to dst_entry, * function implements COW for wired-down map entry. Otherwise, * it forks wired entry into dst_map. * * In/out conditions: * The source and destination maps must be locked for write. * The source map entry must be wired down (or be a sharing map * entry corresponding to a main map entry that is wired down). */ void vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, vm_map_entry_t dst_entry, vm_map_entry_t src_entry, vm_ooffset_t *fork_charge) { vm_object_t backing_object, dst_object, object, src_object; vm_pindex_t dst_pindex, pindex, src_pindex; vm_prot_t access, prot; vm_offset_t vaddr; vm_page_t dst_m; vm_page_t src_m; boolean_t upgrade; #ifdef lint src_map++; #endif /* lint */ upgrade = src_entry == dst_entry; access = prot = dst_entry->protection; src_object = src_entry->object.vm_object; src_pindex = OFF_TO_IDX(src_entry->offset); if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { dst_object = src_object; vm_object_reference(dst_object); } else { /* * Create the top-level object for the destination entry. (Doesn't * actually shadow anything - we copy the pages directly.) */ dst_object = vm_object_allocate(OBJT_DEFAULT, atop(dst_entry->end - dst_entry->start)); #if VM_NRESERVLEVEL > 0 dst_object->flags |= OBJ_COLORED; dst_object->pg_color = atop(dst_entry->start); #endif } VM_OBJECT_WLOCK(dst_object); KASSERT(upgrade || dst_entry->object.vm_object == NULL, ("vm_fault_copy_entry: vm_object not NULL")); if (src_object != dst_object) { + dst_object->domain = src_object->domain; dst_entry->object.vm_object = dst_object; dst_entry->offset = 0; dst_object->charge = dst_entry->end - dst_entry->start; } if (fork_charge != NULL) { KASSERT(dst_entry->cred == NULL, ("vm_fault_copy_entry: leaked swp charge")); dst_object->cred = curthread->td_ucred; crhold(dst_object->cred); *fork_charge += dst_object->charge; } else if (dst_object->cred == NULL) { KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", dst_entry)); dst_object->cred = dst_entry->cred; dst_entry->cred = NULL; } /* * If not an upgrade, then enter the mappings in the pmap as * read and/or execute accesses. Otherwise, enter them as * write accesses. * * A writeable large page mapping is only created if all of * the constituent small page mappings are modified. Marking * PTEs as modified on inception allows promotion to happen * without taking potentially large number of soft faults. */ if (!upgrade) access &= ~VM_PROT_WRITE; /* * Loop through all of the virtual pages within the entry's * range, copying each page from the source object to the * destination object. Since the source is wired, those pages * must exist. In contrast, the destination is pageable. * Since the destination object does share any backing storage * with the source object, all of its pages must be dirtied, * regardless of whether they can be written. */ for (vaddr = dst_entry->start, dst_pindex = 0; vaddr < dst_entry->end; vaddr += PAGE_SIZE, dst_pindex++) { again: /* * Find the page in the source object, and copy it in. * Because the source is wired down, the page will be * in memory. */ if (src_object != dst_object) VM_OBJECT_RLOCK(src_object); object = src_object; pindex = src_pindex + dst_pindex; while ((src_m = vm_page_lookup(object, pindex)) == NULL && (backing_object = object->backing_object) != NULL) { /* * Unless the source mapping is read-only or * it is presently being upgraded from * read-only, the first object in the shadow * chain should provide all of the pages. In * other words, this loop body should never be * executed when the source mapping is already * read/write. */ KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || upgrade, ("vm_fault_copy_entry: main object missing page")); VM_OBJECT_RLOCK(backing_object); pindex += OFF_TO_IDX(object->backing_object_offset); if (object != dst_object) VM_OBJECT_RUNLOCK(object); object = backing_object; } KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); if (object != dst_object) { /* * Allocate a page in the destination object. */ dst_m = vm_page_alloc(dst_object, (src_object == dst_object ? src_pindex : 0) + dst_pindex, VM_ALLOC_NORMAL); if (dst_m == NULL) { VM_OBJECT_WUNLOCK(dst_object); VM_OBJECT_RUNLOCK(object); VM_WAIT; VM_OBJECT_WLOCK(dst_object); goto again; } pmap_copy_page(src_m, dst_m); VM_OBJECT_RUNLOCK(object); dst_m->valid = VM_PAGE_BITS_ALL; dst_m->dirty = VM_PAGE_BITS_ALL; } else { dst_m = src_m; if (vm_page_sleep_if_busy(dst_m, "fltupg")) goto again; vm_page_xbusy(dst_m); KASSERT(dst_m->valid == VM_PAGE_BITS_ALL, ("invalid dst page %p", dst_m)); } VM_OBJECT_WUNLOCK(dst_object); /* * Enter it in the pmap. If a wired, copy-on-write * mapping is being replaced by a write-enabled * mapping, then wire that new mapping. */ pmap_enter(dst_map->pmap, vaddr, dst_m, prot, access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); /* * Mark it no longer busy, and put it on the active list. */ VM_OBJECT_WLOCK(dst_object); if (upgrade) { if (src_m != dst_m) { vm_page_lock(src_m); vm_page_unwire(src_m, PQ_INACTIVE); vm_page_unlock(src_m); vm_page_lock(dst_m); vm_page_wire(dst_m); vm_page_unlock(dst_m); } else { KASSERT(dst_m->wire_count > 0, ("dst_m %p is not wired", dst_m)); } } else { vm_page_lock(dst_m); vm_page_activate(dst_m); vm_page_unlock(dst_m); } vm_page_xunbusy(dst_m); } VM_OBJECT_WUNLOCK(dst_object); if (upgrade) { dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); vm_object_deallocate(src_object); } } /* * Block entry into the machine-independent layer's page fault handler by * the calling thread. Subsequent calls to vm_fault() by that thread will * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of * spurious page faults. */ int vm_fault_disable_pagefaults(void) { return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); } void vm_fault_enable_pagefaults(int save) { curthread_pflags_restore(save); } Index: user/jeff/numa/sys/vm/vm_kern.c =================================================================== --- user/jeff/numa/sys/vm/vm_kern.c (revision 326888) +++ user/jeff/numa/sys/vm/vm_kern.c (revision 326889) @@ -1,717 +1,687 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * Kernel memory management. */ #include __FBSDID("$FreeBSD$"); #include "opt_vm.h" #include #include #include /* for ticks and hz */ +#include #include #include #include #include #include #include #include #include #include -#include #include +#include #include #include #include #include #include #include #include #include #include #include vm_map_t kernel_map; vm_map_t exec_map; vm_map_t pipe_map; const void *zero_region; CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0); /* NB: Used by kernel debuggers. */ const u_long vm_maxuser_address = VM_MAXUSER_ADDRESS; u_int exec_map_entry_size; u_int exec_map_entries; SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD, SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address"); SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD, #if defined(__arm__) || defined(__sparc64__) &vm_max_kernel_address, 0, #else SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS, #endif "Max kernel address"); /* * kva_alloc: * * Allocate a virtual address range with no underlying object and * no initial mapping to physical memory. Any mapping from this * range to physical memory must be explicitly created prior to * its use, typically with pmap_qenter(). Any attempt to create * a mapping on demand through vm_fault() will result in a panic. */ vm_offset_t kva_alloc(vm_size_t size) { vm_offset_t addr; size = round_page(size); if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr)) return (0); return (addr); } /* * kva_free: * * Release a region of kernel virtual memory allocated * with kva_alloc, and return the physical pages * associated with that region. * * This routine may not block on kernel maps. */ void kva_free(vm_offset_t addr, vm_size_t size) { size = round_page(size); vmem_free(kernel_arena, addr, size); } /* * Allocates a region from the kernel address map and physical pages * within the specified address range to the kernel object. Creates a * wired mapping from this region to these pages, and returns the * region's starting virtual address. The allocated pages are not * necessarily physically contiguous. If M_ZERO is specified through the * given flags, then the pages are zeroed before they are mapped. */ vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr) { vmem_t *vmem; vm_object_t object = kernel_object; vm_offset_t addr, i, offset; vm_page_t m; int pflags, tries; size = round_page(size); vmem = vm_dom[domain].vmd_kernel_arena; if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) return (0); offset = addr - VM_MIN_KERNEL_ADDRESS; pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); pflags |= VM_ALLOC_NOWAIT; VM_OBJECT_WLOCK(object); for (i = 0; i < size; i += PAGE_SIZE) { tries = 0; retry: m = vm_page_alloc_contig_domain(object, atop(offset + i), domain, pflags, 1, low, high, PAGE_SIZE, 0, memattr); if (m == NULL) { VM_OBJECT_WUNLOCK(object); if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { if (!vm_page_reclaim_contig_domain(domain, pflags, 1, low, high, PAGE_SIZE, 0) && (flags & M_WAITOK) != 0) VM_WAIT; VM_OBJECT_WLOCK(object); tries++; goto retry; } kmem_unback(object, addr, i); vmem_free(vmem, addr, size); return (0); } KASSERT(vm_phys_domidx(m) == domain, ("kmem_alloc_attr_domain: Domain mismatch %d != %d", vm_phys_domidx(m), domain)); if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, VM_PROT_ALL | PMAP_ENTER_WIRED, 0); } VM_OBJECT_WUNLOCK(object); return (addr); } vm_offset_t kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr) { - struct vm_domain_iterator vi; + struct vm_domainset_iter di; vm_offset_t addr; - int domain, wait; + int domain; KASSERT(vmem == kernel_arena, ("kmem_alloc_attr: Only kernel_arena is supported.")); - addr = 0; - vm_policy_iterator_init(&vi); - wait = flags & M_WAITOK; - flags &= ~M_WAITOK; - flags |= M_NOWAIT; - while (vm_domain_iterator_run(&vi, &domain) == 0) { - if (vm_domain_iterator_isdone(&vi) && wait) { - flags |= wait; - flags &= ~M_NOWAIT; - } + + vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags); + do { addr = kmem_alloc_attr_domain(domain, size, flags, low, high, memattr); if (addr != 0) break; - } - vm_policy_iterator_finish(&vi); + } while (vm_domainset_iter_malloc(&di, &domain, &flags) == 0); return (addr); } /* * Allocates a region from the kernel address map and physically * contiguous pages within the specified address range to the kernel * object. Creates a wired mapping from this region to these pages, and * returns the region's starting virtual address. If M_ZERO is specified * through the given flags, then the pages are zeroed before they are * mapped. */ vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { vmem_t *vmem; vm_object_t object = kernel_object; vm_offset_t addr, offset, tmp; vm_page_t end_m, m; u_long npages; int pflags, tries; size = round_page(size); vmem = vm_dom[domain].vmd_kernel_arena; if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) return (0); offset = addr - VM_MIN_KERNEL_ADDRESS; pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); pflags |= VM_ALLOC_NOWAIT; npages = atop(size); VM_OBJECT_WLOCK(object); tries = 0; retry: m = vm_page_alloc_contig_domain(object, atop(offset), domain, pflags, npages, low, high, alignment, boundary, memattr); if (m == NULL) { VM_OBJECT_WUNLOCK(object); if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { if (!vm_page_reclaim_contig_domain(domain, pflags, npages, low, high, alignment, boundary) && (flags & M_WAITOK) != 0) VM_WAIT; VM_OBJECT_WLOCK(object); tries++; goto retry; } vmem_free(vmem, addr, size); return (0); } KASSERT(vm_phys_domidx(m) == domain, ("kmem_alloc_contig_domain: Domain mismatch %d != %d", vm_phys_domidx(m), domain)); end_m = m + npages; tmp = addr; for (; m < end_m; m++) { if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL, VM_PROT_ALL | PMAP_ENTER_WIRED, 0); tmp += PAGE_SIZE; } VM_OBJECT_WUNLOCK(object); return (addr); } vm_offset_t kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { - struct vm_domain_iterator vi; + struct vm_domainset_iter di; vm_offset_t addr; - int domain, wait; + int domain; KASSERT(vmem == kernel_arena, ("kmem_alloc_contig: Only kernel_arena is supported.")); - addr = 0; - vm_policy_iterator_init(&vi); - wait = flags & M_WAITOK; - flags &= ~M_WAITOK; - flags |= M_NOWAIT; - while (vm_domain_iterator_run(&vi, &domain) == 0) { - if (vm_domain_iterator_isdone(&vi) && wait) { - flags |= wait; - flags &= ~M_NOWAIT; - } + + vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags); + do { addr = kmem_alloc_contig_domain(domain, size, flags, low, high, alignment, boundary, memattr); if (addr != 0) break; - } - vm_policy_iterator_finish(&vi); + } while (vm_domainset_iter_malloc(&di, &domain, &flags) == 0); return (addr); } /* * kmem_suballoc: * * Allocates a map to manage a subrange * of the kernel virtual address space. * * Arguments are as follows: * * parent Map to take range from * min, max Returned endpoints of map * size Size of range to find * superpage_align Request that min is superpage aligned */ vm_map_t kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, vm_size_t size, boolean_t superpage_align) { int ret; vm_map_t result; size = round_page(size); *min = vm_map_min(parent); ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ? VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_ACC_NO_CHARGE); if (ret != KERN_SUCCESS) panic("kmem_suballoc: bad status return of %d", ret); *max = *min + size; result = vm_map_create(vm_map_pmap(parent), *min, *max); if (result == NULL) panic("kmem_suballoc: cannot create submap"); if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) panic("kmem_suballoc: unable to change range to submap"); return (result); } /* * kmem_malloc: * * Allocate wired-down pages in the kernel's address space. */ vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags) { vmem_t *vmem; vm_offset_t addr; int rv; vmem = vm_dom[domain].vmd_kernel_arena; size = round_page(size); if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) return (0); rv = kmem_back_domain(domain, kernel_object, addr, size, flags); if (rv != KERN_SUCCESS) { vmem_free(vmem, addr, size); return (0); } return (addr); } vm_offset_t kmem_malloc(struct vmem *vmem, vm_size_t size, int flags) { - struct vm_domain_iterator vi; + struct vm_domainset_iter di; vm_offset_t addr; - int domain, wait; + int domain; KASSERT(vmem == kernel_arena, ("kmem_malloc: Only kernel_arena is supported.")); - addr = 0; - vm_policy_iterator_init(&vi); - wait = flags & M_WAITOK; - flags &= ~M_WAITOK; - flags |= M_NOWAIT; - while (vm_domain_iterator_run(&vi, &domain) == 0) { - if (vm_domain_iterator_isdone(&vi) && wait) { - flags |= wait; - flags &= ~M_NOWAIT; - } + + vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags); + do { addr = kmem_malloc_domain(domain, size, flags); if (addr != 0) break; - } - vm_policy_iterator_finish(&vi); + } while (vm_domainset_iter_malloc(&di, &domain, &flags) == 0); return (addr); } /* * kmem_back: * * Allocate physical pages for the specified virtual address range. */ int kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) { vm_offset_t offset, i; vm_page_t m, mpred; int pflags; KASSERT(object == kernel_object, ("kmem_back_domain: only supports kernel object.")); offset = addr - VM_MIN_KERNEL_ADDRESS; pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); if (flags & M_WAITOK) pflags |= VM_ALLOC_WAITFAIL; i = 0; VM_OBJECT_WLOCK(object); retry: mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i)); for (; i < size; i += PAGE_SIZE, mpred = m) { m = vm_page_alloc_domain_after(object, atop(offset + i), domain, pflags, mpred); /* * Ran out of space, free everything up and return. Don't need * to lock page queues here as we know that the pages we got * aren't on any queues. */ if (m == NULL) { if ((flags & M_NOWAIT) == 0) goto retry; VM_OBJECT_WUNLOCK(object); kmem_unback(object, addr, i); return (KERN_NO_SPACE); } KASSERT(vm_phys_domidx(m) == domain, ("kmem_back_domain: Domain mismatch %d != %d", vm_phys_domidx(m), domain)); if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("kmem_malloc: page %p is managed", m)); m->valid = VM_PAGE_BITS_ALL; pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, VM_PROT_ALL | PMAP_ENTER_WIRED, 0); } VM_OBJECT_WUNLOCK(object); return (KERN_SUCCESS); } int kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) { - struct vm_domain_iterator vi; - int domain, wait, ret; + struct vm_domainset_iter di; + int domain; + int ret; KASSERT(object == kernel_object, ("kmem_back: only supports kernel object.")); - ret = 0; - vm_policy_iterator_init(&vi); - wait = flags & M_WAITOK; - flags &= ~M_WAITOK; - flags |= M_NOWAIT; - while (vm_domain_iterator_run(&vi, &domain) == 0) { - if (vm_domain_iterator_isdone(&vi) && wait) { - flags |= wait; - flags &= ~M_NOWAIT; - } + + vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags); + do { ret = kmem_back_domain(domain, object, addr, size, flags); if (ret == KERN_SUCCESS) break; - } - vm_policy_iterator_finish(&vi); + } while (vm_domainset_iter_malloc(&di, &domain, &flags) == 0); return (ret); } /* * kmem_unback: * * Unmap and free the physical pages underlying the specified virtual * address range. * * A physical page must exist within the specified object at each index * that is being unmapped. */ static int _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) { vm_page_t m, next; vm_offset_t end, offset; int domain; KASSERT(object == kernel_object, ("kmem_unback: only supports kernel object.")); if (size == 0) return (0); pmap_remove(kernel_pmap, addr, addr + size); offset = addr - VM_MIN_KERNEL_ADDRESS; end = offset + size; VM_OBJECT_WLOCK(object); m = vm_page_lookup(object, atop(offset)); domain = vm_phys_domidx(m); for (; offset < end; offset += PAGE_SIZE, m = next) { next = vm_page_next(m); vm_page_unwire(m, PQ_NONE); vm_page_free(m); } VM_OBJECT_WUNLOCK(object); return (domain); } void kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) { _kmem_unback(object, addr, size); } /* * kmem_free: * * Free memory allocated with kmem_malloc. The size must match the * original allocation. */ void kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size) { int domain; KASSERT(vmem == kernel_arena, ("kmem_free: Only kernel_arena is supported.")); size = round_page(size); domain = _kmem_unback(kernel_object, addr, size); vmem_free(vm_dom[domain].vmd_kernel_arena, addr, size); } /* * kmap_alloc_wait: * * Allocates pageable memory from a sub-map of the kernel. If the submap * has no room, the caller sleeps waiting for more memory in the submap. * * This routine may block. */ vm_offset_t kmap_alloc_wait(vm_map_t map, vm_size_t size) { vm_offset_t addr; size = round_page(size); if (!swap_reserve(size)) return (0); for (;;) { /* * To make this work for more than one map, use the map's lock * to lock out sleepers/wakers. */ vm_map_lock(map); if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) break; /* no space now; see if we can ever get space */ if (vm_map_max(map) - vm_map_min(map) < size) { vm_map_unlock(map); swap_release(size); return (0); } map->needs_wakeup = TRUE; vm_map_unlock_and_wait(map, 0); } vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, MAP_ACC_CHARGED); vm_map_unlock(map); return (addr); } /* * kmap_free_wakeup: * * Returns memory to a submap of the kernel, and wakes up any processes * waiting for memory in that map. */ void kmap_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) { vm_map_lock(map); (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); if (map->needs_wakeup) { map->needs_wakeup = FALSE; vm_map_wakeup(map); } vm_map_unlock(map); } void kmem_init_zero_region(void) { vm_offset_t addr, i; vm_page_t m; /* * Map a single physical page of zeros to a larger virtual range. * This requires less looping in places that want large amounts of * zeros, while not using much more physical resources. */ addr = kva_alloc(ZERO_REGION_SIZE); m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE) pmap_qenter(addr + i, &m, 1); pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ); zero_region = (const void *)addr; } /* * kmem_init: * * Create the kernel map; insert a mapping covering kernel text, * data, bss, and all space allocated thus far (`boostrap' data). The * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and * `start' as allocated, and the range between `start' and `end' as free. */ void kmem_init(vm_offset_t start, vm_offset_t end) { vm_map_t m; m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); m->system_map = 1; vm_map_lock(m); /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ kernel_map = m; (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, #ifdef __amd64__ KERNBASE, #else VM_MIN_KERNEL_ADDRESS, #endif start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); /* ... and ending with the completion of the above `insert' */ vm_map_unlock(m); } #ifdef DIAGNOSTIC /* * Allow userspace to directly trigger the VM drain routine for testing * purposes. */ static int debug_vm_lowmem(SYSCTL_HANDLER_ARGS) { int error, i; i = 0; error = sysctl_handle_int(oidp, &i, 0, req); if (error) return (error); if ((i & ~(VM_LOW_KMEM | VM_LOW_PAGES)) != 0) return (EINVAL); if (i != 0) EVENTHANDLER_INVOKE(vm_lowmem, i); return (0); } SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0, debug_vm_lowmem, "I", "set to trigger vm_lowmem event with given flags"); #endif Index: user/jeff/numa/sys/vm/vm_object.c =================================================================== --- user/jeff/numa/sys/vm/vm_object.c (revision 326888) +++ user/jeff/numa/sys/vm/vm_object.c (revision 326889) @@ -1,2713 +1,2716 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * Virtual memory object module. */ #include __FBSDID("$FreeBSD$"); #include "opt_vm.h" #include #include +#include #include #include #include #include #include #include #include #include /* for curproc, pageproc */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int old_msync; SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, "Use old (insecure) msync behavior"); static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, int flags, boolean_t *clearobjflags, boolean_t *eio); static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags); static void vm_object_qcollapse(vm_object_t object); static void vm_object_vndeallocate(vm_object_t object); /* * Virtual memory objects maintain the actual data * associated with allocated virtual memory. A given * page of memory exists within exactly one object. * * An object is only deallocated when all "references" * are given up. Only one "reference" to a given * region of an object should be writeable. * * Associated with each object is a list of all resident * memory pages belonging to that object; this list is * maintained by the "vm_page" module, and locked by the object's * lock. * * Each object also records a "pager" routine which is * used to retrieve (and store) pages to the proper backing * storage. In addition, objects may be backed by other * objects from which they were virtual-copied. * * The only items within the object structure which are * modified after time of creation are: * reference count locked by object's lock * pager routine locked by object's lock * */ struct object_q vm_object_list; struct mtx vm_object_list_mtx; /* lock for object list and count */ struct vm_object kernel_object_store; static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, "VM object stats"); static long object_collapses; SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, &object_collapses, 0, "VM object collapses"); static long object_bypasses; SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, &object_bypasses, 0, "VM object bypasses"); static uma_zone_t obj_zone; static int vm_object_zinit(void *mem, int size, int flags); #ifdef INVARIANTS static void vm_object_zdtor(void *mem, int size, void *arg); static void vm_object_zdtor(void *mem, int size, void *arg) { vm_object_t object; object = (vm_object_t)mem; KASSERT(object->ref_count == 0, ("object %p ref_count = %d", object, object->ref_count)); KASSERT(TAILQ_EMPTY(&object->memq), ("object %p has resident pages in its memq", object)); KASSERT(vm_radix_is_empty(&object->rtree), ("object %p has resident pages in its trie", object)); #if VM_NRESERVLEVEL > 0 KASSERT(LIST_EMPTY(&object->rvq), ("object %p has reservations", object)); #endif KASSERT(object->paging_in_progress == 0, ("object %p paging_in_progress = %d", object, object->paging_in_progress)); KASSERT(object->resident_page_count == 0, ("object %p resident_page_count = %d", object, object->resident_page_count)); KASSERT(object->shadow_count == 0, ("object %p shadow_count = %d", object, object->shadow_count)); KASSERT(object->type == OBJT_DEAD, ("object %p has non-dead type %d", object, object->type)); } #endif static int vm_object_zinit(void *mem, int size, int flags) { vm_object_t object; object = (vm_object_t)mem; rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW); /* These are true for any object that has been freed */ object->type = OBJT_DEAD; object->ref_count = 0; vm_radix_init(&object->rtree); object->paging_in_progress = 0; object->resident_page_count = 0; object->shadow_count = 0; object->flags = OBJ_DEAD; mtx_lock(&vm_object_list_mtx); TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); mtx_unlock(&vm_object_list_mtx); return (0); } static void _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) { TAILQ_INIT(&object->memq); LIST_INIT(&object->shadow_head); object->type = type; if (type == OBJT_SWAP) pctrie_init(&object->un_pager.swp.swp_blks); /* * Ensure that swap_pager_swapoff() iteration over object_list * sees up to date type and pctrie head if it observed * non-dead object. */ atomic_thread_fence_rel(); switch (type) { case OBJT_DEAD: panic("_vm_object_allocate: can't create OBJT_DEAD"); case OBJT_DEFAULT: case OBJT_SWAP: object->flags = OBJ_ONEMAPPING; break; case OBJT_DEVICE: case OBJT_SG: object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; break; case OBJT_MGTDEVICE: object->flags = OBJ_FICTITIOUS; break; case OBJT_PHYS: object->flags = OBJ_UNMANAGED; break; case OBJT_VNODE: object->flags = 0; break; default: panic("_vm_object_allocate: type %d is undefined", type); } object->size = size; object->generation = 1; object->ref_count = 1; object->memattr = VM_MEMATTR_DEFAULT; object->cred = NULL; object->charge = 0; object->handle = NULL; object->backing_object = NULL; object->backing_object_offset = (vm_ooffset_t) 0; #if VM_NRESERVLEVEL > 0 LIST_INIT(&object->rvq); #endif umtx_shm_object_init(object); } /* * vm_object_init: * * Initialize the VM objects module. */ void vm_object_init(void) { TAILQ_INIT(&vm_object_list); mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); rw_init(&kernel_object->lock, "kernel vm object"); _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), kernel_object); #if VM_NRESERVLEVEL > 0 kernel_object->flags |= OBJ_COLORED; kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); #endif /* * The lock portion of struct vm_object must be type stable due * to vm_pageout_fallback_object_lock locking a vm object * without holding any references to it. */ obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, #ifdef INVARIANTS vm_object_zdtor, #else NULL, #endif vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); vm_radix_zinit(); } void vm_object_clear_flag(vm_object_t object, u_short bits) { VM_OBJECT_ASSERT_WLOCKED(object); object->flags &= ~bits; } /* * Sets the default memory attribute for the specified object. Pages * that are allocated to this object are by default assigned this memory * attribute. * * Presently, this function must be called before any pages are allocated * to the object. In the future, this requirement may be relaxed for * "default" and "swap" objects. */ int vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) { VM_OBJECT_ASSERT_WLOCKED(object); switch (object->type) { case OBJT_DEFAULT: case OBJT_DEVICE: case OBJT_MGTDEVICE: case OBJT_PHYS: case OBJT_SG: case OBJT_SWAP: case OBJT_VNODE: if (!TAILQ_EMPTY(&object->memq)) return (KERN_FAILURE); break; case OBJT_DEAD: return (KERN_INVALID_ARGUMENT); default: panic("vm_object_set_memattr: object %p is of undefined type", object); } object->memattr = memattr; return (KERN_SUCCESS); } void vm_object_pip_add(vm_object_t object, short i) { VM_OBJECT_ASSERT_WLOCKED(object); object->paging_in_progress += i; } void vm_object_pip_subtract(vm_object_t object, short i) { VM_OBJECT_ASSERT_WLOCKED(object); object->paging_in_progress -= i; } void vm_object_pip_wakeup(vm_object_t object) { VM_OBJECT_ASSERT_WLOCKED(object); object->paging_in_progress--; if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { vm_object_clear_flag(object, OBJ_PIPWNT); wakeup(object); } } void vm_object_pip_wakeupn(vm_object_t object, short i) { VM_OBJECT_ASSERT_WLOCKED(object); if (i) object->paging_in_progress -= i; if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { vm_object_clear_flag(object, OBJ_PIPWNT); wakeup(object); } } void vm_object_pip_wait(vm_object_t object, char *waitid) { VM_OBJECT_ASSERT_WLOCKED(object); while (object->paging_in_progress) { object->flags |= OBJ_PIPWNT; VM_OBJECT_SLEEP(object, object, PVM, waitid, 0); } } /* * vm_object_allocate: * * Returns a new object with the given size. */ vm_object_t vm_object_allocate(objtype_t type, vm_pindex_t size) { vm_object_t object; object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); _vm_object_allocate(type, size, object); return (object); } /* * vm_object_reference: * * Gets another reference to the given object. Note: OBJ_DEAD * objects can be referenced during final cleaning. */ void vm_object_reference(vm_object_t object) { if (object == NULL) return; VM_OBJECT_WLOCK(object); vm_object_reference_locked(object); VM_OBJECT_WUNLOCK(object); } /* * vm_object_reference_locked: * * Gets another reference to the given object. * * The object must be locked. */ void vm_object_reference_locked(vm_object_t object) { struct vnode *vp; VM_OBJECT_ASSERT_WLOCKED(object); object->ref_count++; if (object->type == OBJT_VNODE) { vp = object->handle; vref(vp); } } /* * Handle deallocating an object of type OBJT_VNODE. */ static void vm_object_vndeallocate(vm_object_t object) { struct vnode *vp = (struct vnode *) object->handle; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_VNODE, ("vm_object_vndeallocate: not a vnode object")); KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); #ifdef INVARIANTS if (object->ref_count == 0) { vn_printf(vp, "vm_object_vndeallocate "); panic("vm_object_vndeallocate: bad object reference count"); } #endif if (!umtx_shm_vnobj_persistent && object->ref_count == 1) umtx_shm_object_terminated(object); /* * The test for text of vp vnode does not need a bypass to * reach right VV_TEXT there, since it is obtained from * object->handle. */ if (object->ref_count > 1 || (vp->v_vflag & VV_TEXT) == 0) { object->ref_count--; VM_OBJECT_WUNLOCK(object); /* vrele may need the vnode lock. */ vrele(vp); } else { vhold(vp); VM_OBJECT_WUNLOCK(object); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vdrop(vp); VM_OBJECT_WLOCK(object); object->ref_count--; if (object->type == OBJT_DEAD) { VM_OBJECT_WUNLOCK(object); VOP_UNLOCK(vp, 0); } else { if (object->ref_count == 0) VOP_UNSET_TEXT(vp); VM_OBJECT_WUNLOCK(object); vput(vp); } } } /* * vm_object_deallocate: * * Release a reference to the specified object, * gained either through a vm_object_allocate * or a vm_object_reference call. When all references * are gone, storage associated with this object * may be relinquished. * * No object may be locked. */ void vm_object_deallocate(vm_object_t object) { vm_object_t temp; struct vnode *vp; while (object != NULL) { VM_OBJECT_WLOCK(object); if (object->type == OBJT_VNODE) { vm_object_vndeallocate(object); return; } KASSERT(object->ref_count != 0, ("vm_object_deallocate: object deallocated too many times: %d", object->type)); /* * If the reference count goes to 0 we start calling * vm_object_terminate() on the object chain. * A ref count of 1 may be a special case depending on the * shadow count being 0 or 1. */ object->ref_count--; if (object->ref_count > 1) { VM_OBJECT_WUNLOCK(object); return; } else if (object->ref_count == 1) { if (object->type == OBJT_SWAP && (object->flags & OBJ_TMPFS) != 0) { vp = object->un_pager.swp.swp_tmpfs; vhold(vp); VM_OBJECT_WUNLOCK(object); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); VM_OBJECT_WLOCK(object); if (object->type == OBJT_DEAD || object->ref_count != 1) { VM_OBJECT_WUNLOCK(object); VOP_UNLOCK(vp, 0); vdrop(vp); return; } if ((object->flags & OBJ_TMPFS) != 0) VOP_UNSET_TEXT(vp); VOP_UNLOCK(vp, 0); vdrop(vp); } if (object->shadow_count == 0 && object->handle == NULL && (object->type == OBJT_DEFAULT || (object->type == OBJT_SWAP && (object->flags & OBJ_TMPFS_NODE) == 0))) { vm_object_set_flag(object, OBJ_ONEMAPPING); } else if ((object->shadow_count == 1) && (object->handle == NULL) && (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { vm_object_t robject; robject = LIST_FIRST(&object->shadow_head); KASSERT(robject != NULL, ("vm_object_deallocate: ref_count: %d, shadow_count: %d", object->ref_count, object->shadow_count)); KASSERT((robject->flags & OBJ_TMPFS_NODE) == 0, ("shadowed tmpfs v_object %p", object)); if (!VM_OBJECT_TRYWLOCK(robject)) { /* * Avoid a potential deadlock. */ object->ref_count++; VM_OBJECT_WUNLOCK(object); /* * More likely than not the thread * holding robject's lock has lower * priority than the current thread. * Let the lower priority thread run. */ pause("vmo_de", 1); continue; } /* * Collapse object into its shadow unless its * shadow is dead. In that case, object will * be deallocated by the thread that is * deallocating its shadow. */ if ((robject->flags & OBJ_DEAD) == 0 && (robject->handle == NULL) && (robject->type == OBJT_DEFAULT || robject->type == OBJT_SWAP)) { robject->ref_count++; retry: if (robject->paging_in_progress) { VM_OBJECT_WUNLOCK(object); vm_object_pip_wait(robject, "objde1"); temp = robject->backing_object; if (object == temp) { VM_OBJECT_WLOCK(object); goto retry; } } else if (object->paging_in_progress) { VM_OBJECT_WUNLOCK(robject); object->flags |= OBJ_PIPWNT; VM_OBJECT_SLEEP(object, object, PDROP | PVM, "objde2", 0); VM_OBJECT_WLOCK(robject); temp = robject->backing_object; if (object == temp) { VM_OBJECT_WLOCK(object); goto retry; } } else VM_OBJECT_WUNLOCK(object); if (robject->ref_count == 1) { robject->ref_count--; object = robject; goto doterm; } object = robject; vm_object_collapse(object); VM_OBJECT_WUNLOCK(object); continue; } VM_OBJECT_WUNLOCK(robject); } VM_OBJECT_WUNLOCK(object); return; } doterm: umtx_shm_object_terminated(object); temp = object->backing_object; if (temp != NULL) { KASSERT((object->flags & OBJ_TMPFS_NODE) == 0, ("shadowed tmpfs v_object 2 %p", object)); VM_OBJECT_WLOCK(temp); LIST_REMOVE(object, shadow_list); temp->shadow_count--; VM_OBJECT_WUNLOCK(temp); object->backing_object = NULL; } /* * Don't double-terminate, we could be in a termination * recursion due to the terminate having to sync data * to disk. */ if ((object->flags & OBJ_DEAD) == 0) vm_object_terminate(object); else VM_OBJECT_WUNLOCK(object); object = temp; } } /* * vm_object_destroy removes the object from the global object list * and frees the space for the object. */ void vm_object_destroy(vm_object_t object) { /* * Release the allocation charge. */ if (object->cred != NULL) { swap_release_by_cred(object->charge, object->cred); object->charge = 0; crfree(object->cred); object->cred = NULL; } /* * Free the space for the object. */ uma_zfree(obj_zone, object); } /* * vm_object_terminate_pages removes any remaining pageable pages * from the object and resets the object to an empty state. */ static void vm_object_terminate_pages(vm_object_t object) { vm_page_t p, p_next; struct mtx *mtx, *mtx1; struct vm_pagequeue *pq, *pq1; int dequeued; VM_OBJECT_ASSERT_WLOCKED(object); mtx = NULL; pq = NULL; /* * Free any remaining pageable pages. This also removes them from the * paging queues. However, don't free wired pages, just remove them * from the object. Rather than incrementally removing each page from * the object, the page and object are reset to any empty state. */ TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { vm_page_assert_unbusied(p); if ((object->flags & OBJ_UNMANAGED) == 0) { /* * vm_page_free_prep() only needs the page * lock for managed pages. */ mtx1 = vm_page_lockptr(p); if (mtx1 != mtx) { if (mtx != NULL) mtx_unlock(mtx); if (pq != NULL) { vm_pagequeue_cnt_add(pq, dequeued); vm_pagequeue_unlock(pq); pq = NULL; } mtx = mtx1; mtx_lock(mtx); } } p->object = NULL; if (p->wire_count != 0) goto unlist; VM_CNT_INC(v_pfree); p->flags &= ~PG_ZERO; if (p->queue != PQ_NONE) { KASSERT(p->queue < PQ_COUNT, ("vm_object_terminate: " "page %p is not queued", p)); pq1 = vm_page_pagequeue(p); if (pq != pq1) { if (pq != NULL) { vm_pagequeue_cnt_add(pq, dequeued); vm_pagequeue_unlock(pq); } pq = pq1; vm_pagequeue_lock(pq); dequeued = 0; } p->queue = PQ_NONE; TAILQ_REMOVE(&pq->pq_pl, p, plinks.q); dequeued--; } if (vm_page_free_prep(p, true)) continue; unlist: TAILQ_REMOVE(&object->memq, p, listq); } if (pq != NULL) { vm_pagequeue_cnt_add(pq, dequeued); vm_pagequeue_unlock(pq); } if (mtx != NULL) mtx_unlock(mtx); vm_page_free_phys_pglist(&object->memq); /* * If the object contained any pages, then reset it to an empty state. * None of the object's fields, including "resident_page_count", were * modified by the preceding loop. */ if (object->resident_page_count != 0) { vm_radix_reclaim_allnodes(&object->rtree); TAILQ_INIT(&object->memq); object->resident_page_count = 0; if (object->type == OBJT_VNODE) vdrop(object->handle); } } /* * vm_object_terminate actually destroys the specified object, freeing * up all previously used resources. * * The object must be locked. * This routine may block. */ void vm_object_terminate(vm_object_t object) { VM_OBJECT_ASSERT_WLOCKED(object); /* * Make sure no one uses us. */ vm_object_set_flag(object, OBJ_DEAD); /* * wait for the pageout daemon to be done with the object */ vm_object_pip_wait(object, "objtrm"); KASSERT(!object->paging_in_progress, ("vm_object_terminate: pageout in progress")); /* * Clean and free the pages, as appropriate. All references to the * object are gone, so we don't need to lock it. */ if (object->type == OBJT_VNODE) { struct vnode *vp = (struct vnode *)object->handle; /* * Clean pages and flush buffers. */ vm_object_page_clean(object, 0, 0, OBJPC_SYNC); VM_OBJECT_WUNLOCK(object); vinvalbuf(vp, V_SAVE, 0, 0); BO_LOCK(&vp->v_bufobj); vp->v_bufobj.bo_flag |= BO_DEAD; BO_UNLOCK(&vp->v_bufobj); VM_OBJECT_WLOCK(object); } KASSERT(object->ref_count == 0, ("vm_object_terminate: object with references, ref_count=%d", object->ref_count)); if ((object->flags & OBJ_PG_DTOR) == 0) vm_object_terminate_pages(object); #if VM_NRESERVLEVEL > 0 if (__predict_false(!LIST_EMPTY(&object->rvq))) vm_reserv_break_all(object); #endif KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT || object->type == OBJT_SWAP, ("%s: non-swap obj %p has cred", __func__, object)); /* * Let the pager know object is dead. */ vm_pager_deallocate(object); VM_OBJECT_WUNLOCK(object); vm_object_destroy(object); } /* * Make the page read-only so that we can clear the object flags. However, if * this is a nosync mmap then the object is likely to stay dirty so do not * mess with the page and do not clear the object flags. Returns TRUE if the * page should be flushed, and FALSE otherwise. */ static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags) { /* * If we have been asked to skip nosync pages and this is a * nosync page, skip it. Note that the object flags were not * cleared in this case so we do not have to set them. */ if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) { *clearobjflags = FALSE; return (FALSE); } else { pmap_remove_write(p); return (p->dirty != 0); } } /* * vm_object_page_clean * * Clean all dirty pages in the specified range of object. Leaves page * on whatever queue it is currently on. If NOSYNC is set then do not * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC), * leaving the object dirty. * * When stuffing pages asynchronously, allow clustering. XXX we need a * synchronous clustering mode implementation. * * Odd semantics: if start == end, we clean everything. * * The object must be locked. * * Returns FALSE if some page from the range was not written, as * reported by the pager, and TRUE otherwise. */ boolean_t vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, int flags) { vm_page_t np, p; vm_pindex_t pi, tend, tstart; int curgeneration, n, pagerflags; boolean_t clearobjflags, eio, res; VM_OBJECT_ASSERT_WLOCKED(object); /* * The OBJ_MIGHTBEDIRTY flag is only set for OBJT_VNODE * objects. The check below prevents the function from * operating on non-vnode objects. */ if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 || object->resident_page_count == 0) return (TRUE); pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0; tstart = OFF_TO_IDX(start); tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); clearobjflags = tstart == 0 && tend >= object->size; res = TRUE; rescan: curgeneration = object->generation; for (p = vm_page_find_least(object, tstart); p != NULL; p = np) { pi = p->pindex; if (pi >= tend) break; np = TAILQ_NEXT(p, listq); if (p->valid == 0) continue; if (vm_page_sleep_if_busy(p, "vpcwai")) { if (object->generation != curgeneration) { if ((flags & OBJPC_SYNC) != 0) goto rescan; else clearobjflags = FALSE; } np = vm_page_find_least(object, pi); continue; } if (!vm_object_page_remove_write(p, flags, &clearobjflags)) continue; n = vm_object_page_collect_flush(object, p, pagerflags, flags, &clearobjflags, &eio); if (eio) { res = FALSE; clearobjflags = FALSE; } if (object->generation != curgeneration) { if ((flags & OBJPC_SYNC) != 0) goto rescan; else clearobjflags = FALSE; } /* * If the VOP_PUTPAGES() did a truncated write, so * that even the first page of the run is not fully * written, vm_pageout_flush() returns 0 as the run * length. Since the condition that caused truncated * write may be permanent, e.g. exhausted free space, * accepting n == 0 would cause an infinite loop. * * Forwarding the iterator leaves the unwritten page * behind, but there is not much we can do there if * filesystem refuses to write it. */ if (n == 0) { n = 1; clearobjflags = FALSE; } np = vm_page_find_least(object, pi + n); } #if 0 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0); #endif if (clearobjflags) vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY); return (res); } static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, int flags, boolean_t *clearobjflags, boolean_t *eio) { vm_page_t ma[vm_pageout_page_count], p_first, tp; int count, i, mreq, runlen; vm_page_lock_assert(p, MA_NOTOWNED); VM_OBJECT_ASSERT_WLOCKED(object); count = 1; mreq = 0; for (tp = p; count < vm_pageout_page_count; count++) { tp = vm_page_next(tp); if (tp == NULL || vm_page_busied(tp)) break; if (!vm_object_page_remove_write(tp, flags, clearobjflags)) break; } for (p_first = p; count < vm_pageout_page_count; count++) { tp = vm_page_prev(p_first); if (tp == NULL || vm_page_busied(tp)) break; if (!vm_object_page_remove_write(tp, flags, clearobjflags)) break; p_first = tp; mreq++; } for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++) ma[i] = tp; vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio); return (runlen); } /* * Note that there is absolutely no sense in writing out * anonymous objects, so we track down the vnode object * to write out. * We invalidate (remove) all pages from the address space * for semantic correctness. * * If the backing object is a device object with unmanaged pages, then any * mappings to the specified range of pages must be removed before this * function is called. * * Note: certain anonymous maps, such as MAP_NOSYNC maps, * may start out with a NULL object. */ boolean_t vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, boolean_t syncio, boolean_t invalidate) { vm_object_t backing_object; struct vnode *vp; struct mount *mp; int error, flags, fsync_after; boolean_t res; if (object == NULL) return (TRUE); res = TRUE; error = 0; VM_OBJECT_WLOCK(object); while ((backing_object = object->backing_object) != NULL) { VM_OBJECT_WLOCK(backing_object); offset += object->backing_object_offset; VM_OBJECT_WUNLOCK(object); object = backing_object; if (object->size < OFF_TO_IDX(offset + size)) size = IDX_TO_OFF(object->size) - offset; } /* * Flush pages if writing is allowed, invalidate them * if invalidation requested. Pages undergoing I/O * will be ignored by vm_object_page_remove(). * * We cannot lock the vnode and then wait for paging * to complete without deadlocking against vm_fault. * Instead we simply call vm_object_page_remove() and * allow it to block internally on a page-by-page * basis when it encounters pages undergoing async * I/O. */ if (object->type == OBJT_VNODE && (object->flags & OBJ_MIGHTBEDIRTY) != 0 && ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { VM_OBJECT_WUNLOCK(object); (void) vn_start_write(vp, &mp, V_WAIT); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (syncio && !invalidate && offset == 0 && atop(size) == object->size) { /* * If syncing the whole mapping of the file, * it is faster to schedule all the writes in * async mode, also allowing the clustering, * and then wait for i/o to complete. */ flags = 0; fsync_after = TRUE; } else { flags = (syncio || invalidate) ? OBJPC_SYNC : 0; flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0; fsync_after = FALSE; } VM_OBJECT_WLOCK(object); res = vm_object_page_clean(object, offset, offset + size, flags); VM_OBJECT_WUNLOCK(object); if (fsync_after) error = VOP_FSYNC(vp, MNT_WAIT, curthread); VOP_UNLOCK(vp, 0); vn_finished_write(mp); if (error != 0) res = FALSE; VM_OBJECT_WLOCK(object); } if ((object->type == OBJT_VNODE || object->type == OBJT_DEVICE) && invalidate) { if (object->type == OBJT_DEVICE) /* * The option OBJPR_NOTMAPPED must be passed here * because vm_object_page_remove() cannot remove * unmanaged mappings. */ flags = OBJPR_NOTMAPPED; else if (old_msync) flags = 0; else flags = OBJPR_CLEANONLY; vm_object_page_remove(object, OFF_TO_IDX(offset), OFF_TO_IDX(offset + size + PAGE_MASK), flags); } VM_OBJECT_WUNLOCK(object); return (res); } /* * Determine whether the given advice can be applied to the object. Advice is * not applied to unmanaged pages since they never belong to page queues, and * since MADV_FREE is destructive, it can apply only to anonymous pages that * have been mapped at most once. */ static bool vm_object_advice_applies(vm_object_t object, int advice) { if ((object->flags & OBJ_UNMANAGED) != 0) return (false); if (advice != MADV_FREE) return (true); return ((object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) && (object->flags & OBJ_ONEMAPPING) != 0); } static void vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, vm_size_t size) { if (advice == MADV_FREE && object->type == OBJT_SWAP) swap_pager_freespace(object, pindex, size); } /* * vm_object_madvise: * * Implements the madvise function at the object/page level. * * MADV_WILLNEED (any object) * * Activate the specified pages if they are resident. * * MADV_DONTNEED (any object) * * Deactivate the specified pages if they are resident. * * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, * OBJ_ONEMAPPING only) * * Deactivate and clean the specified pages if they are * resident. This permits the process to reuse the pages * without faulting or the kernel to reclaim the pages * without I/O. */ void vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, int advice) { vm_pindex_t tpindex; vm_object_t backing_object, tobject; vm_page_t m, tm; if (object == NULL) return; relookup: VM_OBJECT_WLOCK(object); if (!vm_object_advice_applies(object, advice)) { VM_OBJECT_WUNLOCK(object); return; } for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { tobject = object; /* * If the next page isn't resident in the top-level object, we * need to search the shadow chain. When applying MADV_FREE, we * take care to release any swap space used to store * non-resident pages. */ if (m == NULL || pindex < m->pindex) { /* * Optimize a common case: if the top-level object has * no backing object, we can skip over the non-resident * range in constant time. */ if (object->backing_object == NULL) { tpindex = (m != NULL && m->pindex < end) ? m->pindex : end; vm_object_madvise_freespace(object, advice, pindex, tpindex - pindex); if ((pindex = tpindex) == end) break; goto next_page; } tpindex = pindex; do { vm_object_madvise_freespace(tobject, advice, tpindex, 1); /* * Prepare to search the next object in the * chain. */ backing_object = tobject->backing_object; if (backing_object == NULL) goto next_pindex; VM_OBJECT_WLOCK(backing_object); tpindex += OFF_TO_IDX(tobject->backing_object_offset); if (tobject != object) VM_OBJECT_WUNLOCK(tobject); tobject = backing_object; if (!vm_object_advice_applies(tobject, advice)) goto next_pindex; } while ((tm = vm_page_lookup(tobject, tpindex)) == NULL); } else { next_page: tm = m; m = TAILQ_NEXT(m, listq); } /* * If the page is not in a normal state, skip it. */ if (tm->valid != VM_PAGE_BITS_ALL) goto next_pindex; vm_page_lock(tm); if (tm->hold_count != 0 || tm->wire_count != 0) { vm_page_unlock(tm); goto next_pindex; } KASSERT((tm->flags & PG_FICTITIOUS) == 0, ("vm_object_madvise: page %p is fictitious", tm)); KASSERT((tm->oflags & VPO_UNMANAGED) == 0, ("vm_object_madvise: page %p is not managed", tm)); if (vm_page_busied(tm)) { if (object != tobject) VM_OBJECT_WUNLOCK(tobject); VM_OBJECT_WUNLOCK(object); if (advice == MADV_WILLNEED) { /* * Reference the page before unlocking and * sleeping so that the page daemon is less * likely to reclaim it. */ vm_page_aflag_set(tm, PGA_REFERENCED); } vm_page_busy_sleep(tm, "madvpo", false); goto relookup; } vm_page_advise(tm, advice); vm_page_unlock(tm); vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); next_pindex: if (tobject != object) VM_OBJECT_WUNLOCK(tobject); } VM_OBJECT_WUNLOCK(object); } /* * vm_object_shadow: * * Create a new object which is backed by the * specified existing object range. The source * object reference is deallocated. * * The new object and offset into that object * are returned in the source parameters. */ void vm_object_shadow( vm_object_t *object, /* IN/OUT */ vm_ooffset_t *offset, /* IN/OUT */ vm_size_t length) { vm_object_t source; vm_object_t result; source = *object; /* * Don't create the new object if the old object isn't shared. */ if (source != NULL) { VM_OBJECT_WLOCK(source); if (source->ref_count == 1 && source->handle == NULL && (source->type == OBJT_DEFAULT || source->type == OBJT_SWAP)) { VM_OBJECT_WUNLOCK(source); return; } VM_OBJECT_WUNLOCK(source); } /* * Allocate a new object with the given length. */ result = vm_object_allocate(OBJT_DEFAULT, atop(length)); /* * The new object shadows the source object, adding a reference to it. * Our caller changes his reference to point to the new object, * removing a reference to the source object. Net result: no change * of reference count. * * Try to optimize the result object's page color when shadowing * in order to maintain page coloring consistency in the combined * shadowed object. */ result->backing_object = source; /* * Store the offset into the source object, and fix up the offset into * the new object. */ result->backing_object_offset = *offset; if (source != NULL) { VM_OBJECT_WLOCK(source); + result->domain = source->domain; LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); source->shadow_count++; #if VM_NRESERVLEVEL > 0 result->flags |= source->flags & OBJ_COLORED; result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER - 1)) - 1); #endif VM_OBJECT_WUNLOCK(source); } /* * Return the new things */ *offset = 0; *object = result; } /* * vm_object_split: * * Split the pages in a map entry into a new object. This affords * easier removal of unused pages, and keeps object inheritance from * being a negative impact on memory usage. */ void vm_object_split(vm_map_entry_t entry) { vm_page_t m, m_next; vm_object_t orig_object, new_object, source; vm_pindex_t idx, offidxstart; vm_size_t size; orig_object = entry->object.vm_object; if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) return; if (orig_object->ref_count <= 1) return; VM_OBJECT_WUNLOCK(orig_object); offidxstart = OFF_TO_IDX(entry->offset); size = atop(entry->end - entry->start); /* * If swap_pager_copy() is later called, it will convert new_object * into a swap object. */ new_object = vm_object_allocate(OBJT_DEFAULT, size); /* * At this point, the new object is still private, so the order in * which the original and new objects are locked does not matter. */ VM_OBJECT_WLOCK(new_object); VM_OBJECT_WLOCK(orig_object); + new_object->domain = orig_object->domain; source = orig_object->backing_object; if (source != NULL) { VM_OBJECT_WLOCK(source); if ((source->flags & OBJ_DEAD) != 0) { VM_OBJECT_WUNLOCK(source); VM_OBJECT_WUNLOCK(orig_object); VM_OBJECT_WUNLOCK(new_object); vm_object_deallocate(new_object); VM_OBJECT_WLOCK(orig_object); return; } LIST_INSERT_HEAD(&source->shadow_head, new_object, shadow_list); source->shadow_count++; vm_object_reference_locked(source); /* for new_object */ vm_object_clear_flag(source, OBJ_ONEMAPPING); VM_OBJECT_WUNLOCK(source); new_object->backing_object_offset = orig_object->backing_object_offset + entry->offset; new_object->backing_object = source; } if (orig_object->cred != NULL) { new_object->cred = orig_object->cred; crhold(orig_object->cred); new_object->charge = ptoa(size); KASSERT(orig_object->charge >= ptoa(size), ("orig_object->charge < 0")); orig_object->charge -= ptoa(size); } retry: m = vm_page_find_least(orig_object, offidxstart); for (; m != NULL && (idx = m->pindex - offidxstart) < size; m = m_next) { m_next = TAILQ_NEXT(m, listq); /* * We must wait for pending I/O to complete before we can * rename the page. * * We do not have to VM_PROT_NONE the page as mappings should * not be changed by this operation. */ if (vm_page_busied(m)) { VM_OBJECT_WUNLOCK(new_object); vm_page_lock(m); VM_OBJECT_WUNLOCK(orig_object); vm_page_busy_sleep(m, "spltwt", false); VM_OBJECT_WLOCK(orig_object); VM_OBJECT_WLOCK(new_object); goto retry; } /* vm_page_rename() will dirty the page. */ if (vm_page_rename(m, new_object, idx)) { VM_OBJECT_WUNLOCK(new_object); VM_OBJECT_WUNLOCK(orig_object); vm_radix_wait(); VM_OBJECT_WLOCK(orig_object); VM_OBJECT_WLOCK(new_object); goto retry; } #if VM_NRESERVLEVEL > 0 /* * If some of the reservation's allocated pages remain with * the original object, then transferring the reservation to * the new object is neither particularly beneficial nor * particularly harmful as compared to leaving the reservation * with the original object. If, however, all of the * reservation's allocated pages are transferred to the new * object, then transferring the reservation is typically * beneficial. Determining which of these two cases applies * would be more costly than unconditionally renaming the * reservation. */ vm_reserv_rename(m, new_object, orig_object, offidxstart); #endif if (orig_object->type == OBJT_SWAP) vm_page_xbusy(m); } if (orig_object->type == OBJT_SWAP) { /* * swap_pager_copy() can sleep, in which case the orig_object's * and new_object's locks are released and reacquired. */ swap_pager_copy(orig_object, new_object, offidxstart, 0); TAILQ_FOREACH(m, &new_object->memq, listq) vm_page_xunbusy(m); } VM_OBJECT_WUNLOCK(orig_object); VM_OBJECT_WUNLOCK(new_object); entry->object.vm_object = new_object; entry->offset = 0LL; vm_object_deallocate(orig_object); VM_OBJECT_WLOCK(new_object); } #define OBSC_COLLAPSE_NOWAIT 0x0002 #define OBSC_COLLAPSE_WAIT 0x0004 static vm_page_t vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p, vm_page_t next, int op) { vm_object_t backing_object; VM_OBJECT_ASSERT_WLOCKED(object); backing_object = object->backing_object; VM_OBJECT_ASSERT_WLOCKED(backing_object); KASSERT(p == NULL || vm_page_busied(p), ("unbusy page %p", p)); KASSERT(p == NULL || p->object == object || p->object == backing_object, ("invalid ownership %p %p %p", p, object, backing_object)); if ((op & OBSC_COLLAPSE_NOWAIT) != 0) return (next); if (p != NULL) vm_page_lock(p); VM_OBJECT_WUNLOCK(object); VM_OBJECT_WUNLOCK(backing_object); /* The page is only NULL when rename fails. */ if (p == NULL) vm_radix_wait(); else vm_page_busy_sleep(p, "vmocol", false); VM_OBJECT_WLOCK(object); VM_OBJECT_WLOCK(backing_object); return (TAILQ_FIRST(&backing_object->memq)); } static bool vm_object_scan_all_shadowed(vm_object_t object) { vm_object_t backing_object; vm_page_t p, pp; vm_pindex_t backing_offset_index, new_pindex, pi, ps; VM_OBJECT_ASSERT_WLOCKED(object); VM_OBJECT_ASSERT_WLOCKED(object->backing_object); backing_object = object->backing_object; if (backing_object->type != OBJT_DEFAULT && backing_object->type != OBJT_SWAP) return (false); pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset); p = vm_page_find_least(backing_object, pi); ps = swap_pager_find_least(backing_object, pi); /* * Only check pages inside the parent object's range and * inside the parent object's mapping of the backing object. */ for (;; pi++) { if (p != NULL && p->pindex < pi) p = TAILQ_NEXT(p, listq); if (ps < pi) ps = swap_pager_find_least(backing_object, pi); if (p == NULL && ps >= backing_object->size) break; else if (p == NULL) pi = ps; else pi = MIN(p->pindex, ps); new_pindex = pi - backing_offset_index; if (new_pindex >= object->size) break; /* * See if the parent has the page or if the parent's object * pager has the page. If the parent has the page but the page * is not valid, the parent's object pager must have the page. * * If this fails, the parent does not completely shadow the * object and we might as well give up now. */ pp = vm_page_lookup(object, new_pindex); if ((pp == NULL || pp->valid == 0) && !vm_pager_has_page(object, new_pindex, NULL, NULL)) return (false); } return (true); } static bool vm_object_collapse_scan(vm_object_t object, int op) { vm_object_t backing_object; vm_page_t next, p, pp; vm_pindex_t backing_offset_index, new_pindex; VM_OBJECT_ASSERT_WLOCKED(object); VM_OBJECT_ASSERT_WLOCKED(object->backing_object); backing_object = object->backing_object; backing_offset_index = OFF_TO_IDX(object->backing_object_offset); /* * Initial conditions */ if ((op & OBSC_COLLAPSE_WAIT) != 0) vm_object_set_flag(backing_object, OBJ_DEAD); /* * Our scan */ for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) { next = TAILQ_NEXT(p, listq); new_pindex = p->pindex - backing_offset_index; /* * Check for busy page */ if (vm_page_busied(p)) { next = vm_object_collapse_scan_wait(object, p, next, op); continue; } KASSERT(p->object == backing_object, ("vm_object_collapse_scan: object mismatch")); if (p->pindex < backing_offset_index || new_pindex >= object->size) { if (backing_object->type == OBJT_SWAP) swap_pager_freespace(backing_object, p->pindex, 1); /* * Page is out of the parent object's range, we can * simply destroy it. */ vm_page_lock(p); KASSERT(!pmap_page_is_mapped(p), ("freeing mapped page %p", p)); if (p->wire_count == 0) vm_page_free(p); else vm_page_remove(p); vm_page_unlock(p); continue; } pp = vm_page_lookup(object, new_pindex); if (pp != NULL && vm_page_busied(pp)) { /* * The page in the parent is busy and possibly not * (yet) valid. Until its state is finalized by the * busy bit owner, we can't tell whether it shadows the * original page. Therefore, we must either skip it * and the original (backing_object) page or wait for * its state to be finalized. * * This is due to a race with vm_fault() where we must * unbusy the original (backing_obj) page before we can * (re)lock the parent. Hence we can get here. */ next = vm_object_collapse_scan_wait(object, pp, next, op); continue; } KASSERT(pp == NULL || pp->valid != 0, ("unbusy invalid page %p", pp)); if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, NULL)) { /* * The page already exists in the parent OR swap exists * for this location in the parent. Leave the parent's * page alone. Destroy the original page from the * backing object. */ if (backing_object->type == OBJT_SWAP) swap_pager_freespace(backing_object, p->pindex, 1); vm_page_lock(p); KASSERT(!pmap_page_is_mapped(p), ("freeing mapped page %p", p)); if (p->wire_count == 0) vm_page_free(p); else vm_page_remove(p); vm_page_unlock(p); continue; } /* * Page does not exist in parent, rename the page from the * backing object to the main object. * * If the page was mapped to a process, it can remain mapped * through the rename. vm_page_rename() will dirty the page. */ if (vm_page_rename(p, object, new_pindex)) { next = vm_object_collapse_scan_wait(object, NULL, next, op); continue; } /* Use the old pindex to free the right page. */ if (backing_object->type == OBJT_SWAP) swap_pager_freespace(backing_object, new_pindex + backing_offset_index, 1); #if VM_NRESERVLEVEL > 0 /* * Rename the reservation. */ vm_reserv_rename(p, object, backing_object, backing_offset_index); #endif } return (true); } /* * this version of collapse allows the operation to occur earlier and * when paging_in_progress is true for an object... This is not a complete * operation, but should plug 99.9% of the rest of the leaks. */ static void vm_object_qcollapse(vm_object_t object) { vm_object_t backing_object = object->backing_object; VM_OBJECT_ASSERT_WLOCKED(object); VM_OBJECT_ASSERT_WLOCKED(backing_object); if (backing_object->ref_count != 1) return; vm_object_collapse_scan(object, OBSC_COLLAPSE_NOWAIT); } /* * vm_object_collapse: * * Collapse an object with the object backing it. * Pages in the backing object are moved into the * parent, and the backing object is deallocated. */ void vm_object_collapse(vm_object_t object) { vm_object_t backing_object, new_backing_object; VM_OBJECT_ASSERT_WLOCKED(object); while (TRUE) { /* * Verify that the conditions are right for collapse: * * The object exists and the backing object exists. */ if ((backing_object = object->backing_object) == NULL) break; /* * we check the backing object first, because it is most likely * not collapsable. */ VM_OBJECT_WLOCK(backing_object); if (backing_object->handle != NULL || (backing_object->type != OBJT_DEFAULT && backing_object->type != OBJT_SWAP) || (backing_object->flags & OBJ_DEAD) || object->handle != NULL || (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) || (object->flags & OBJ_DEAD)) { VM_OBJECT_WUNLOCK(backing_object); break; } if (object->paging_in_progress != 0 || backing_object->paging_in_progress != 0) { vm_object_qcollapse(object); VM_OBJECT_WUNLOCK(backing_object); break; } /* * We know that we can either collapse the backing object (if * the parent is the only reference to it) or (perhaps) have * the parent bypass the object if the parent happens to shadow * all the resident pages in the entire backing object. * * This is ignoring pager-backed pages such as swap pages. * vm_object_collapse_scan fails the shadowing test in this * case. */ if (backing_object->ref_count == 1) { vm_object_pip_add(object, 1); vm_object_pip_add(backing_object, 1); /* * If there is exactly one reference to the backing * object, we can collapse it into the parent. */ vm_object_collapse_scan(object, OBSC_COLLAPSE_WAIT); #if VM_NRESERVLEVEL > 0 /* * Break any reservations from backing_object. */ if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) vm_reserv_break_all(backing_object); #endif /* * Move the pager from backing_object to object. */ if (backing_object->type == OBJT_SWAP) { /* * swap_pager_copy() can sleep, in which case * the backing_object's and object's locks are * released and reacquired. * Since swap_pager_copy() is being asked to * destroy the source, it will change the * backing_object's type to OBJT_DEFAULT. */ swap_pager_copy( backing_object, object, OFF_TO_IDX(object->backing_object_offset), TRUE); } /* * Object now shadows whatever backing_object did. * Note that the reference to * backing_object->backing_object moves from within * backing_object to within object. */ LIST_REMOVE(object, shadow_list); backing_object->shadow_count--; if (backing_object->backing_object) { VM_OBJECT_WLOCK(backing_object->backing_object); LIST_REMOVE(backing_object, shadow_list); LIST_INSERT_HEAD( &backing_object->backing_object->shadow_head, object, shadow_list); /* * The shadow_count has not changed. */ VM_OBJECT_WUNLOCK(backing_object->backing_object); } object->backing_object = backing_object->backing_object; object->backing_object_offset += backing_object->backing_object_offset; /* * Discard backing_object. * * Since the backing object has no pages, no pager left, * and no object references within it, all that is * necessary is to dispose of it. */ KASSERT(backing_object->ref_count == 1, ( "backing_object %p was somehow re-referenced during collapse!", backing_object)); vm_object_pip_wakeup(backing_object); backing_object->type = OBJT_DEAD; backing_object->ref_count = 0; VM_OBJECT_WUNLOCK(backing_object); vm_object_destroy(backing_object); vm_object_pip_wakeup(object); object_collapses++; } else { /* * If we do not entirely shadow the backing object, * there is nothing we can do so we give up. */ if (object->resident_page_count != object->size && !vm_object_scan_all_shadowed(object)) { VM_OBJECT_WUNLOCK(backing_object); break; } /* * Make the parent shadow the next object in the * chain. Deallocating backing_object will not remove * it, since its reference count is at least 2. */ LIST_REMOVE(object, shadow_list); backing_object->shadow_count--; new_backing_object = backing_object->backing_object; if ((object->backing_object = new_backing_object) != NULL) { VM_OBJECT_WLOCK(new_backing_object); LIST_INSERT_HEAD( &new_backing_object->shadow_head, object, shadow_list ); new_backing_object->shadow_count++; vm_object_reference_locked(new_backing_object); VM_OBJECT_WUNLOCK(new_backing_object); object->backing_object_offset += backing_object->backing_object_offset; } /* * Drop the reference count on backing_object. Since * its ref_count was at least 2, it will not vanish. */ backing_object->ref_count--; VM_OBJECT_WUNLOCK(backing_object); object_bypasses++; } /* * Try again with this object's new backing object. */ } } /* * vm_object_page_remove: * * For the given object, either frees or invalidates each of the * specified pages. In general, a page is freed. However, if a page is * wired for any reason other than the existence of a managed, wired * mapping, then it may be invalidated but not removed from the object. * Pages are specified by the given range ["start", "end") and the option * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range * extends from "start" to the end of the object. If the option * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the * specified range are affected. If the option OBJPR_NOTMAPPED is * specified, then the pages within the specified range must have no * mappings. Otherwise, if this option is not specified, any mappings to * the specified pages are removed before the pages are freed or * invalidated. * * In general, this operation should only be performed on objects that * contain managed pages. There are, however, two exceptions. First, it * is performed on the kernel and kmem objects by vm_map_entry_delete(). * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device- * backed pages. In both of these cases, the option OBJPR_CLEANONLY must * not be specified and the option OBJPR_NOTMAPPED must be specified. * * The object must be locked. */ void vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int options) { vm_page_t p, next; struct mtx *mtx; struct pglist pgl; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT((object->flags & OBJ_UNMANAGED) == 0 || (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, ("vm_object_page_remove: illegal options for object %p", object)); if (object->resident_page_count == 0) return; vm_object_pip_add(object, 1); TAILQ_INIT(&pgl); again: p = vm_page_find_least(object, start); mtx = NULL; /* * Here, the variable "p" is either (1) the page with the least pindex * greater than or equal to the parameter "start" or (2) NULL. */ for (; p != NULL && (p->pindex < end || end == 0); p = next) { next = TAILQ_NEXT(p, listq); /* * If the page is wired for any reason besides the existence * of managed, wired mappings, then it cannot be freed. For * example, fictitious pages, which represent device memory, * are inherently wired and cannot be freed. They can, * however, be invalidated if the option OBJPR_CLEANONLY is * not specified. */ vm_page_change_lock(p, &mtx); if (vm_page_xbusied(p)) { VM_OBJECT_WUNLOCK(object); vm_page_busy_sleep(p, "vmopax", true); VM_OBJECT_WLOCK(object); goto again; } if (p->wire_count != 0) { if ((options & OBJPR_NOTMAPPED) == 0 && object->ref_count != 0) pmap_remove_all(p); if ((options & OBJPR_CLEANONLY) == 0) { p->valid = 0; vm_page_undirty(p); } continue; } if (vm_page_busied(p)) { VM_OBJECT_WUNLOCK(object); vm_page_busy_sleep(p, "vmopar", false); VM_OBJECT_WLOCK(object); goto again; } KASSERT((p->flags & PG_FICTITIOUS) == 0, ("vm_object_page_remove: page %p is fictitious", p)); if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) { if ((options & OBJPR_NOTMAPPED) == 0 && object->ref_count != 0) pmap_remove_write(p); if (p->dirty != 0) continue; } if ((options & OBJPR_NOTMAPPED) == 0 && object->ref_count != 0) pmap_remove_all(p); p->flags &= ~PG_ZERO; if (vm_page_free_prep(p, false)) TAILQ_INSERT_TAIL(&pgl, p, listq); } if (mtx != NULL) mtx_unlock(mtx); vm_page_free_phys_pglist(&pgl); vm_object_pip_wakeup(object); } /* * vm_object_page_noreuse: * * For the given object, attempt to move the specified pages to * the head of the inactive queue. This bypasses regular LRU * operation and allows the pages to be reused quickly under memory * pressure. If a page is wired for any reason, then it will not * be queued. Pages are specified by the range ["start", "end"). * As a special case, if "end" is zero, then the range extends from * "start" to the end of the object. * * This operation should only be performed on objects that * contain non-fictitious, managed pages. * * The object must be locked. */ void vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) { struct mtx *mtx; vm_page_t p, next; VM_OBJECT_ASSERT_LOCKED(object); KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, ("vm_object_page_noreuse: illegal object %p", object)); if (object->resident_page_count == 0) return; p = vm_page_find_least(object, start); /* * Here, the variable "p" is either (1) the page with the least pindex * greater than or equal to the parameter "start" or (2) NULL. */ mtx = NULL; for (; p != NULL && (p->pindex < end || end == 0); p = next) { next = TAILQ_NEXT(p, listq); vm_page_change_lock(p, &mtx); vm_page_deactivate_noreuse(p); } if (mtx != NULL) mtx_unlock(mtx); } /* * Populate the specified range of the object with valid pages. Returns * TRUE if the range is successfully populated and FALSE otherwise. * * Note: This function should be optimized to pass a larger array of * pages to vm_pager_get_pages() before it is applied to a non- * OBJT_DEVICE object. * * The object must be locked. */ boolean_t vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) { vm_page_t m; vm_pindex_t pindex; int rv; VM_OBJECT_ASSERT_WLOCKED(object); for (pindex = start; pindex < end; pindex++) { m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); if (m->valid != VM_PAGE_BITS_ALL) { rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); if (rv != VM_PAGER_OK) { vm_page_lock(m); vm_page_free(m); vm_page_unlock(m); break; } } /* * Keep "m" busy because a subsequent iteration may unlock * the object. */ } if (pindex > start) { m = vm_page_lookup(object, start); while (m != NULL && m->pindex < pindex) { vm_page_xunbusy(m); m = TAILQ_NEXT(m, listq); } } return (pindex == end); } /* * Routine: vm_object_coalesce * Function: Coalesces two objects backing up adjoining * regions of memory into a single object. * * returns TRUE if objects were combined. * * NOTE: Only works at the moment if the second object is NULL - * if it's not, which object do we lock first? * * Parameters: * prev_object First object to coalesce * prev_offset Offset into prev_object * prev_size Size of reference to prev_object * next_size Size of reference to the second object * reserved Indicator that extension region has * swap accounted for * * Conditions: * The object must *not* be locked. */ boolean_t vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) { vm_pindex_t next_pindex; if (prev_object == NULL) return (TRUE); VM_OBJECT_WLOCK(prev_object); if ((prev_object->type != OBJT_DEFAULT && prev_object->type != OBJT_SWAP) || (prev_object->flags & OBJ_TMPFS_NODE) != 0) { VM_OBJECT_WUNLOCK(prev_object); return (FALSE); } /* * Try to collapse the object first */ vm_object_collapse(prev_object); /* * Can't coalesce if: . more than one reference . paged out . shadows * another object . has a copy elsewhere (any of which mean that the * pages not mapped to prev_entry may be in use anyway) */ if (prev_object->backing_object != NULL) { VM_OBJECT_WUNLOCK(prev_object); return (FALSE); } prev_size >>= PAGE_SHIFT; next_size >>= PAGE_SHIFT; next_pindex = OFF_TO_IDX(prev_offset) + prev_size; if ((prev_object->ref_count > 1) && (prev_object->size != next_pindex)) { VM_OBJECT_WUNLOCK(prev_object); return (FALSE); } /* * Account for the charge. */ if (prev_object->cred != NULL) { /* * If prev_object was charged, then this mapping, * although not charged now, may become writable * later. Non-NULL cred in the object would prevent * swap reservation during enabling of the write * access, so reserve swap now. Failed reservation * cause allocation of the separate object for the map * entry, and swap reservation for this entry is * managed in appropriate time. */ if (!reserved && !swap_reserve_by_cred(ptoa(next_size), prev_object->cred)) { VM_OBJECT_WUNLOCK(prev_object); return (FALSE); } prev_object->charge += ptoa(next_size); } /* * Remove any pages that may still be in the object from a previous * deallocation. */ if (next_pindex < prev_object->size) { vm_object_page_remove(prev_object, next_pindex, next_pindex + next_size, 0); if (prev_object->type == OBJT_SWAP) swap_pager_freespace(prev_object, next_pindex, next_size); #if 0 if (prev_object->cred != NULL) { KASSERT(prev_object->charge >= ptoa(prev_object->size - next_pindex), ("object %p overcharged 1 %jx %jx", prev_object, (uintmax_t)next_pindex, (uintmax_t)next_size)); prev_object->charge -= ptoa(prev_object->size - next_pindex); } #endif } /* * Extend the object if necessary. */ if (next_pindex + next_size > prev_object->size) prev_object->size = next_pindex + next_size; VM_OBJECT_WUNLOCK(prev_object); return (TRUE); } void vm_object_set_writeable_dirty(vm_object_t object) { VM_OBJECT_ASSERT_WLOCKED(object); if (object->type != OBJT_VNODE) { if ((object->flags & OBJ_TMPFS_NODE) != 0) { KASSERT(object->type == OBJT_SWAP, ("non-swap tmpfs")); vm_object_set_flag(object, OBJ_TMPFS_DIRTY); } return; } object->generation++; if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) return; vm_object_set_flag(object, OBJ_MIGHTBEDIRTY); } /* * vm_object_unwire: * * For each page offset within the specified range of the given object, * find the highest-level page in the shadow chain and unwire it. A page * must exist at every page offset, and the highest-level page must be * wired. */ void vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, uint8_t queue) { vm_object_t tobject; vm_page_t m, tm; vm_pindex_t end_pindex, pindex, tpindex; int depth, locked_depth; KASSERT((offset & PAGE_MASK) == 0, ("vm_object_unwire: offset is not page aligned")); KASSERT((length & PAGE_MASK) == 0, ("vm_object_unwire: length is not a multiple of PAGE_SIZE")); /* The wired count of a fictitious page never changes. */ if ((object->flags & OBJ_FICTITIOUS) != 0) return; pindex = OFF_TO_IDX(offset); end_pindex = pindex + atop(length); locked_depth = 1; VM_OBJECT_RLOCK(object); m = vm_page_find_least(object, pindex); while (pindex < end_pindex) { if (m == NULL || pindex < m->pindex) { /* * The first object in the shadow chain doesn't * contain a page at the current index. Therefore, * the page must exist in a backing object. */ tobject = object; tpindex = pindex; depth = 0; do { tpindex += OFF_TO_IDX(tobject->backing_object_offset); tobject = tobject->backing_object; KASSERT(tobject != NULL, ("vm_object_unwire: missing page")); if ((tobject->flags & OBJ_FICTITIOUS) != 0) goto next_page; depth++; if (depth == locked_depth) { locked_depth++; VM_OBJECT_RLOCK(tobject); } } while ((tm = vm_page_lookup(tobject, tpindex)) == NULL); } else { tm = m; m = TAILQ_NEXT(m, listq); } vm_page_lock(tm); vm_page_unwire(tm, queue); vm_page_unlock(tm); next_page: pindex++; } /* Release the accumulated object locks. */ for (depth = 0; depth < locked_depth; depth++) { tobject = object->backing_object; VM_OBJECT_RUNLOCK(object); object = tobject; } } struct vnode * vm_object_vnode(vm_object_t object) { VM_OBJECT_ASSERT_LOCKED(object); if (object->type == OBJT_VNODE) return (object->handle); if (object->type == OBJT_SWAP && (object->flags & OBJ_TMPFS) != 0) return (object->un_pager.swp.swp_tmpfs); return (NULL); } static int sysctl_vm_object_list(SYSCTL_HANDLER_ARGS) { struct kinfo_vmobject *kvo; char *fullpath, *freepath; struct vnode *vp; struct vattr va; vm_object_t obj; vm_page_t m; int count, error; if (req->oldptr == NULL) { /* * If an old buffer has not been provided, generate an * estimate of the space needed for a subsequent call. */ mtx_lock(&vm_object_list_mtx); count = 0; TAILQ_FOREACH(obj, &vm_object_list, object_list) { if (obj->type == OBJT_DEAD) continue; count++; } mtx_unlock(&vm_object_list_mtx); return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) * count * 11 / 10)); } kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK); error = 0; /* * VM objects are type stable and are never removed from the * list once added. This allows us to safely read obj->object_list * after reacquiring the VM object lock. */ mtx_lock(&vm_object_list_mtx); TAILQ_FOREACH(obj, &vm_object_list, object_list) { if (obj->type == OBJT_DEAD) continue; VM_OBJECT_RLOCK(obj); if (obj->type == OBJT_DEAD) { VM_OBJECT_RUNLOCK(obj); continue; } mtx_unlock(&vm_object_list_mtx); kvo->kvo_size = ptoa(obj->size); kvo->kvo_resident = obj->resident_page_count; kvo->kvo_ref_count = obj->ref_count; kvo->kvo_shadow_count = obj->shadow_count; kvo->kvo_memattr = obj->memattr; kvo->kvo_active = 0; kvo->kvo_inactive = 0; TAILQ_FOREACH(m, &obj->memq, listq) { /* * A page may belong to the object but be * dequeued and set to PQ_NONE while the * object lock is not held. This makes the * reads of m->queue below racy, and we do not * count pages set to PQ_NONE. However, this * sysctl is only meant to give an * approximation of the system anyway. */ if (vm_page_active(m)) kvo->kvo_active++; else if (vm_page_inactive(m)) kvo->kvo_inactive++; } kvo->kvo_vn_fileid = 0; kvo->kvo_vn_fsid = 0; kvo->kvo_vn_fsid_freebsd11 = 0; freepath = NULL; fullpath = ""; vp = NULL; switch (obj->type) { case OBJT_DEFAULT: kvo->kvo_type = KVME_TYPE_DEFAULT; break; case OBJT_VNODE: kvo->kvo_type = KVME_TYPE_VNODE; vp = obj->handle; vref(vp); break; case OBJT_SWAP: kvo->kvo_type = KVME_TYPE_SWAP; break; case OBJT_DEVICE: kvo->kvo_type = KVME_TYPE_DEVICE; break; case OBJT_PHYS: kvo->kvo_type = KVME_TYPE_PHYS; break; case OBJT_DEAD: kvo->kvo_type = KVME_TYPE_DEAD; break; case OBJT_SG: kvo->kvo_type = KVME_TYPE_SG; break; case OBJT_MGTDEVICE: kvo->kvo_type = KVME_TYPE_MGTDEVICE; break; default: kvo->kvo_type = KVME_TYPE_UNKNOWN; break; } VM_OBJECT_RUNLOCK(obj); if (vp != NULL) { vn_fullpath(curthread, vp, &fullpath, &freepath); vn_lock(vp, LK_SHARED | LK_RETRY); if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) { kvo->kvo_vn_fileid = va.va_fileid; kvo->kvo_vn_fsid = va.va_fsid; kvo->kvo_vn_fsid_freebsd11 = va.va_fsid; /* truncate */ } vput(vp); } strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path)); if (freepath != NULL) free(freepath, M_TEMP); /* Pack record size down */ kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path) + strlen(kvo->kvo_path) + 1; kvo->kvo_structsize = roundup(kvo->kvo_structsize, sizeof(uint64_t)); error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize); mtx_lock(&vm_object_list_mtx); if (error) break; } mtx_unlock(&vm_object_list_mtx); free(kvo, M_TEMP); return (error); } SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject", "List of VM objects"); #include "opt_ddb.h" #ifdef DDB #include #include #include static int _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) { vm_map_t tmpm; vm_map_entry_t tmpe; vm_object_t obj; int entcount; if (map == 0) return 0; if (entry == 0) { tmpe = map->header.next; entcount = map->nentries; while (entcount-- && (tmpe != &map->header)) { if (_vm_object_in_map(map, object, tmpe)) { return 1; } tmpe = tmpe->next; } } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { tmpm = entry->object.sub_map; tmpe = tmpm->header.next; entcount = tmpm->nentries; while (entcount-- && tmpe != &tmpm->header) { if (_vm_object_in_map(tmpm, object, tmpe)) { return 1; } tmpe = tmpe->next; } } else if ((obj = entry->object.vm_object) != NULL) { for (; obj; obj = obj->backing_object) if (obj == object) { return 1; } } return 0; } static int vm_object_in_map(vm_object_t object) { struct proc *p; /* sx_slock(&allproc_lock); */ FOREACH_PROC_IN_SYSTEM(p) { if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) continue; if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { /* sx_sunlock(&allproc_lock); */ return 1; } } /* sx_sunlock(&allproc_lock); */ if (_vm_object_in_map(kernel_map, object, 0)) return 1; return 0; } DB_SHOW_COMMAND(vmochk, vm_object_check) { vm_object_t object; /* * make sure that internal objs are in a map somewhere * and none have zero ref counts. */ TAILQ_FOREACH(object, &vm_object_list, object_list) { if (object->handle == NULL && (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { if (object->ref_count == 0) { db_printf("vmochk: internal obj has zero ref count: %ld\n", (long)object->size); } if (!vm_object_in_map(object)) { db_printf( "vmochk: internal obj is not in a map: " "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", object->ref_count, (u_long)object->size, (u_long)object->size, (void *)object->backing_object); } } } } /* * vm_object_print: [ debug ] */ DB_SHOW_COMMAND(object, vm_object_print_static) { /* XXX convert args. */ vm_object_t object = (vm_object_t)addr; boolean_t full = have_addr; vm_page_t p; /* XXX count is an (unused) arg. Avoid shadowing it. */ #define count was_count int count; if (object == NULL) return; db_iprintf( "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n", object, (int)object->type, (uintmax_t)object->size, object->resident_page_count, object->ref_count, object->flags, object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", object->shadow_count, object->backing_object ? object->backing_object->ref_count : 0, object->backing_object, (uintmax_t)object->backing_object_offset); if (!full) return; db_indent += 2; count = 0; TAILQ_FOREACH(p, &object->memq, listq) { if (count == 0) db_iprintf("memory:="); else if (count == 6) { db_printf("\n"); db_iprintf(" ..."); count = 0; } else db_printf(","); count++; db_printf("(off=0x%jx,page=0x%jx)", (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); } if (count != 0) db_printf("\n"); db_indent -= 2; } /* XXX. */ #undef count /* XXX need this non-static entry for calling from vm_map_print. */ void vm_object_print( /* db_expr_t */ long addr, boolean_t have_addr, /* db_expr_t */ long count, char *modif) { vm_object_print_static(addr, have_addr, count, modif); } DB_SHOW_COMMAND(vmopag, vm_object_print_pages) { vm_object_t object; vm_pindex_t fidx; vm_paddr_t pa; vm_page_t m, prev_m; int rcount, nl, c; nl = 0; TAILQ_FOREACH(object, &vm_object_list, object_list) { db_printf("new object: %p\n", (void *)object); if (nl > 18) { c = cngetc(); if (c != ' ') return; nl = 0; } nl++; rcount = 0; fidx = 0; pa = -1; TAILQ_FOREACH(m, &object->memq, listq) { if (m->pindex > 128) break; if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL && prev_m->pindex + 1 != m->pindex) { if (rcount) { db_printf(" index(%ld)run(%d)pa(0x%lx)\n", (long)fidx, rcount, (long)pa); if (nl > 18) { c = cngetc(); if (c != ' ') return; nl = 0; } nl++; rcount = 0; } } if (rcount && (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { ++rcount; continue; } if (rcount) { db_printf(" index(%ld)run(%d)pa(0x%lx)\n", (long)fidx, rcount, (long)pa); if (nl > 18) { c = cngetc(); if (c != ' ') return; nl = 0; } nl++; } fidx = m->pindex; pa = VM_PAGE_TO_PHYS(m); rcount = 1; } if (rcount) { db_printf(" index(%ld)run(%d)pa(0x%lx)\n", (long)fidx, rcount, (long)pa); if (nl > 18) { c = cngetc(); if (c != ' ') return; nl = 0; } nl++; } } } #endif /* DDB */ Index: user/jeff/numa/sys/vm/vm_object.h =================================================================== --- user/jeff/numa/sys/vm/vm_object.h (revision 326888) +++ user/jeff/numa/sys/vm/vm_object.h (revision 326889) @@ -1,339 +1,341 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_object.h 8.3 (Berkeley) 1/12/94 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * * $FreeBSD$ */ /* * Virtual memory object module definitions. */ #ifndef _VM_OBJECT_ #define _VM_OBJECT_ #include #include #include #include #include +#include #include /* * Types defined: * * vm_object_t Virtual memory object. * * List of locks * (c) const until freed * (o) per-object lock * (f) free pages queue mutex * */ #ifndef VM_PAGE_HAVE_PGLIST TAILQ_HEAD(pglist, vm_page); #define VM_PAGE_HAVE_PGLIST #endif struct vm_object { struct rwlock lock; TAILQ_ENTRY(vm_object) object_list; /* list of all objects */ LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */ LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */ struct pglist memq; /* list of resident pages */ struct vm_radix rtree; /* root of the resident page radix trie*/ vm_pindex_t size; /* Object size */ + struct domainset_ref domain; /* NUMA policy. */ int generation; /* generation ID */ int ref_count; /* How many refs?? */ int shadow_count; /* how many objects that this is a shadow for */ vm_memattr_t memattr; /* default memory attribute for pages */ objtype_t type; /* type of pager */ u_short flags; /* see below */ u_short pg_color; /* (c) color of first page in obj */ u_int paging_in_progress; /* Paging (in or out) so don't collapse or destroy */ int resident_page_count; /* number of resident pages */ struct vm_object *backing_object; /* object that I'm a shadow of */ vm_ooffset_t backing_object_offset;/* Offset in backing object */ TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */ LIST_HEAD(, vm_reserv) rvq; /* list of reservations */ void *handle; union { /* * VNode pager * * vnp_size - current size of file */ struct { off_t vnp_size; vm_ooffset_t writemappings; } vnp; /* * Device pager * * devp_pglist - list of allocated pages */ struct { TAILQ_HEAD(, vm_page) devp_pglist; struct cdev_pager_ops *ops; struct cdev *dev; } devp; /* * SG pager * * sgp_pglist - list of allocated pages */ struct { TAILQ_HEAD(, vm_page) sgp_pglist; } sgp; /* * Swap pager * * swp_tmpfs - back-pointer to the tmpfs vnode, * if any, which uses the vm object * as backing store. The handle * cannot be reused for linking, * because the vnode can be * reclaimed and recreated, making * the handle changed and hash-chain * invalid. * * swp_blks - pc-trie of the allocated swap blocks. * */ struct { void *swp_tmpfs; struct pctrie swp_blks; } swp; } un_pager; struct ucred *cred; vm_ooffset_t charge; void *umtx_data; }; /* * Flags */ #define OBJ_FICTITIOUS 0x0001 /* (c) contains fictitious pages */ #define OBJ_UNMANAGED 0x0002 /* (c) contains unmanaged pages */ #define OBJ_POPULATE 0x0004 /* pager implements populate() */ #define OBJ_DEAD 0x0008 /* dead objects (during rundown) */ #define OBJ_NOSPLIT 0x0010 /* dont split this object */ #define OBJ_UMTXDEAD 0x0020 /* umtx pshared was terminated */ #define OBJ_PIPWNT 0x0040 /* paging in progress wanted */ #define OBJ_PG_DTOR 0x0080 /* dont reset object, leave that for dtor */ #define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty, only for vnode */ #define OBJ_TMPFS_NODE 0x0200 /* object belongs to tmpfs VREG node */ #define OBJ_TMPFS_DIRTY 0x0400 /* dirty tmpfs obj */ #define OBJ_COLORED 0x1000 /* pg_color is defined */ #define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */ #define OBJ_DISCONNECTWNT 0x4000 /* disconnect from vnode wanted */ #define OBJ_TMPFS 0x8000 /* has tmpfs vnode allocated */ /* * Helpers to perform conversion between vm_object page indexes and offsets. * IDX_TO_OFF() converts an index into an offset. * OFF_TO_IDX() converts an offset into an index. Since offsets are signed * by default, the sign propagation in OFF_TO_IDX(), when applied to * negative offsets, is intentional and returns a vm_object page index * that cannot be created by a userspace mapping. * UOFF_TO_IDX() treats the offset as an unsigned value and converts it * into an index accordingly. Use it only when the full range of offset * values are allowed. Currently, this only applies to device mappings. * OBJ_MAX_SIZE specifies the maximum page index corresponding to the * maximum unsigned offset. */ #define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT) #define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT)) #define UOFF_TO_IDX(off) (((vm_pindex_t)(off)) >> PAGE_SHIFT) #define OBJ_MAX_SIZE (UOFF_TO_IDX(UINT64_MAX) + 1) #ifdef _KERNEL #define OBJPC_SYNC 0x1 /* sync I/O */ #define OBJPC_INVAL 0x2 /* invalidate */ #define OBJPC_NOSYNC 0x4 /* skip if VPO_NOSYNC */ /* * The following options are supported by vm_object_page_remove(). */ #define OBJPR_CLEANONLY 0x1 /* Don't remove dirty pages. */ #define OBJPR_NOTMAPPED 0x2 /* Don't unmap pages. */ TAILQ_HEAD(object_q, vm_object); extern struct object_q vm_object_list; /* list of allocated objects */ extern struct mtx vm_object_list_mtx; /* lock for object list and count */ extern struct vm_object kernel_object_store; /* kernel and kmem are aliased for backwards KPI compat. */ #define kernel_object (&kernel_object_store) #define kmem_object (&kernel_object_store) #define VM_OBJECT_ASSERT_LOCKED(object) \ rw_assert(&(object)->lock, RA_LOCKED) #define VM_OBJECT_ASSERT_RLOCKED(object) \ rw_assert(&(object)->lock, RA_RLOCKED) #define VM_OBJECT_ASSERT_WLOCKED(object) \ rw_assert(&(object)->lock, RA_WLOCKED) #define VM_OBJECT_ASSERT_UNLOCKED(object) \ rw_assert(&(object)->lock, RA_UNLOCKED) #define VM_OBJECT_LOCK_DOWNGRADE(object) \ rw_downgrade(&(object)->lock) #define VM_OBJECT_RLOCK(object) \ rw_rlock(&(object)->lock) #define VM_OBJECT_RUNLOCK(object) \ rw_runlock(&(object)->lock) #define VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo) \ rw_sleep((wchan), &(object)->lock, (pri), (wmesg), (timo)) #define VM_OBJECT_TRYRLOCK(object) \ rw_try_rlock(&(object)->lock) #define VM_OBJECT_TRYWLOCK(object) \ rw_try_wlock(&(object)->lock) #define VM_OBJECT_TRYUPGRADE(object) \ rw_try_upgrade(&(object)->lock) #define VM_OBJECT_WLOCK(object) \ rw_wlock(&(object)->lock) #define VM_OBJECT_WOWNED(object) \ rw_wowned(&(object)->lock) #define VM_OBJECT_WUNLOCK(object) \ rw_wunlock(&(object)->lock) /* * The object must be locked or thread private. */ static __inline void vm_object_set_flag(vm_object_t object, u_short bits) { object->flags |= bits; } /* * Conditionally set the object's color, which (1) enables the allocation * of physical memory reservations for anonymous objects and larger-than- * superpage-sized named objects and (2) determines the first page offset * within the object at which a reservation may be allocated. In other * words, the color determines the alignment of the object with respect * to the largest superpage boundary. When mapping named objects, like * files or POSIX shared memory objects, the color should be set to zero * before a virtual address is selected for the mapping. In contrast, * for anonymous objects, the color may be set after the virtual address * is selected. * * The object must be locked. */ static __inline void vm_object_color(vm_object_t object, u_short color) { if ((object->flags & OBJ_COLORED) == 0) { object->pg_color = color; object->flags |= OBJ_COLORED; } } void vm_object_clear_flag(vm_object_t object, u_short bits); void vm_object_pip_add(vm_object_t object, short i); void vm_object_pip_subtract(vm_object_t object, short i); void vm_object_pip_wakeup(vm_object_t object); void vm_object_pip_wakeupn(vm_object_t object, short i); void vm_object_pip_wait(vm_object_t object, char *waitid); void umtx_shm_object_init(vm_object_t object); void umtx_shm_object_terminated(vm_object_t object); extern int umtx_shm_vnobj_persistent; vm_object_t vm_object_allocate (objtype_t, vm_pindex_t); boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t, boolean_t); void vm_object_collapse (vm_object_t); void vm_object_deallocate (vm_object_t); void vm_object_destroy (vm_object_t); void vm_object_terminate (vm_object_t); void vm_object_set_writeable_dirty (vm_object_t); void vm_object_init (void); void vm_object_madvise(vm_object_t, vm_pindex_t, vm_pindex_t, int); boolean_t vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, int flags); void vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end); void vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int options); boolean_t vm_object_populate(vm_object_t, vm_pindex_t, vm_pindex_t); void vm_object_print(long addr, boolean_t have_addr, long count, char *modif); void vm_object_reference (vm_object_t); void vm_object_reference_locked(vm_object_t); int vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr); void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t); void vm_object_split(vm_map_entry_t); boolean_t vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t, boolean_t); void vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, uint8_t queue); struct vnode *vm_object_vnode(vm_object_t object); #endif /* _KERNEL */ #endif /* _VM_OBJECT_ */ Index: user/jeff/numa/sys/vm/vm_page.c =================================================================== --- user/jeff/numa/sys/vm/vm_page.c (revision 326888) +++ user/jeff/numa/sys/vm/vm_page.c (revision 326889) @@ -1,4034 +1,4021 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 */ /*- * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * GENERAL RULES ON VM_PAGE MANIPULATION * * - A page queue lock is required when adding or removing a page from a * page queue regardless of other locks or the busy state of a page. * * * In general, no thread besides the page daemon can acquire or * hold more than one page queue lock at a time. * * * The page daemon can acquire and hold any pair of page queue * locks in any order. * * - The object lock is required when inserting or removing * pages from an object (vm_page_insert() or vm_page_remove()). * */ /* * Resident memory management module. */ #include __FBSDID("$FreeBSD$"); #include "opt_vm.h" #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include #include #include #include #include #include #include /* * Associated with page of user-allocatable memory is a * page structure. */ struct vm_domain vm_dom[MAXMEMDOM]; struct mtx_padalign __exclusive_cache_line vm_page_queue_free_mtx; struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT]; /* * bogus page -- for I/O to/from partially complete buffers, * or for paging into sparsely invalid regions. */ vm_page_t bogus_page; vm_page_t vm_page_array; long vm_page_array_size; long first_page; static int boot_pages = UMA_BOOT_PAGES; SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &boot_pages, 0, "number of pages allocated for bootstrapping the VM system"); static int pa_tryrelock_restart; SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD, &pa_tryrelock_restart, 0, "Number of tryrelock restarts"); static TAILQ_HEAD(, vm_page) blacklist_head; static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); /* Is the page daemon waiting for free pages? */ static int vm_pageout_pages_needed; static uma_zone_t fakepg_zone; static void vm_page_alloc_check(vm_page_t m); static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); static void vm_page_enqueue(uint8_t queue, vm_page_t m); static void vm_page_free_phys(vm_page_t m); static void vm_page_free_wakeup(void); static void vm_page_init(void *dummy); static int vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, vm_page_t mpred); static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred); static int vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run, vm_paddr_t high); static int vm_page_alloc_fail(vm_object_t object, int req); SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); static void vm_page_init(void *dummy) { fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED); } /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ #if PAGE_SIZE == 32768 #ifdef CTASSERT CTASSERT(sizeof(u_long) >= 8); #endif #endif /* * Try to acquire a physical address lock while a pmap is locked. If we * fail to trylock we unlock and lock the pmap directly and cache the * locked pa in *locked. The caller should then restart their loop in case * the virtual to physical mapping has changed. */ int vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) { vm_paddr_t lockpa; lockpa = *locked; *locked = pa; if (lockpa) { PA_LOCK_ASSERT(lockpa, MA_OWNED); if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) return (0); PA_UNLOCK(lockpa); } if (PA_TRYLOCK(pa)) return (0); PMAP_UNLOCK(pmap); atomic_add_int(&pa_tryrelock_restart, 1); PA_LOCK(pa); PMAP_LOCK(pmap); return (EAGAIN); } /* * vm_set_page_size: * * Sets the page size, perhaps based upon the memory * size. Must be called before any use of page-size * dependent functions. */ void vm_set_page_size(void) { if (vm_cnt.v_page_size == 0) vm_cnt.v_page_size = PAGE_SIZE; if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0) panic("vm_set_page_size: page size not a power of two"); } /* * vm_page_blacklist_next: * * Find the next entry in the provided string of blacklist * addresses. Entries are separated by space, comma, or newline. * If an invalid integer is encountered then the rest of the * string is skipped. Updates the list pointer to the next * character, or NULL if the string is exhausted or invalid. */ static vm_paddr_t vm_page_blacklist_next(char **list, char *end) { vm_paddr_t bad; char *cp, *pos; if (list == NULL || *list == NULL) return (0); if (**list =='\0') { *list = NULL; return (0); } /* * If there's no end pointer then the buffer is coming from * the kenv and we know it's null-terminated. */ if (end == NULL) end = *list + strlen(*list); /* Ensure that strtoq() won't walk off the end */ if (*end != '\0') { if (*end == '\n' || *end == ' ' || *end == ',') *end = '\0'; else { printf("Blacklist not terminated, skipping\n"); *list = NULL; return (0); } } for (pos = *list; *pos != '\0'; pos = cp) { bad = strtoq(pos, &cp, 0); if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') { if (bad == 0) { if (++cp < end) continue; else break; } } else break; if (*cp == '\0' || ++cp >= end) *list = NULL; else *list = cp; return (trunc_page(bad)); } printf("Garbage in RAM blacklist, skipping\n"); *list = NULL; return (0); } /* * vm_page_blacklist_check: * * Iterate through the provided string of blacklist addresses, pulling * each entry out of the physical allocator free list and putting it * onto a list for reporting via the vm.page_blacklist sysctl. */ static void vm_page_blacklist_check(char *list, char *end) { vm_paddr_t pa; vm_page_t m; char *next; int ret; next = list; while (next != NULL) { if ((pa = vm_page_blacklist_next(&next, end)) == 0) continue; m = vm_phys_paddr_to_vm_page(pa); if (m == NULL) continue; mtx_lock(&vm_page_queue_free_mtx); ret = vm_phys_unfree_page(m); mtx_unlock(&vm_page_queue_free_mtx); if (ret == TRUE) { TAILQ_INSERT_TAIL(&blacklist_head, m, listq); if (bootverbose) printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa); } } } /* * vm_page_blacklist_load: * * Search for a special module named "ram_blacklist". It'll be a * plain text file provided by the user via the loader directive * of the same name. */ static void vm_page_blacklist_load(char **list, char **end) { void *mod; u_char *ptr; u_int len; mod = NULL; ptr = NULL; mod = preload_search_by_type("ram_blacklist"); if (mod != NULL) { ptr = preload_fetch_addr(mod); len = preload_fetch_size(mod); } *list = ptr; if (ptr != NULL) *end = ptr + len; else *end = NULL; return; } static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS) { vm_page_t m; struct sbuf sbuf; int error, first; first = 1; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 128, req); TAILQ_FOREACH(m, &blacklist_head, listq) { sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",", (uintmax_t)m->phys_addr); first = 0; } error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } static void vm_page_domain_init(struct vm_domain *vmd) { struct vm_pagequeue *pq; int i; *__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) = "vm inactive pagequeue"; *__DECONST(u_int **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_vcnt) = &vm_cnt.v_inactive_count; *__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) = "vm active pagequeue"; *__DECONST(u_int **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_vcnt) = &vm_cnt.v_active_count; *__DECONST(char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) = "vm laundry pagequeue"; *__DECONST(int **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_vcnt) = &vm_cnt.v_laundry_count; *__DECONST(char **, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) = "vm unswappable pagequeue"; /* Unswappable dirty pages are counted as being in the laundry. */ *__DECONST(int **, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_vcnt) = &vm_cnt.v_laundry_count; vmd->vmd_page_count = 0; vmd->vmd_free_count = 0; vmd->vmd_segs = 0; vmd->vmd_oom = FALSE; for (i = 0; i < PQ_COUNT; i++) { pq = &vmd->vmd_pagequeues[i]; TAILQ_INIT(&pq->pq_pl); mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue", MTX_DEF | MTX_DUPOK); } } /* * Initialize a physical page in preparation for adding it to the free * lists. */ static void vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind) { m->object = NULL; m->wire_count = 0; m->busy_lock = VPB_UNBUSIED; m->hold_count = 0; m->flags = 0; m->phys_addr = pa; m->queue = PQ_NONE; m->psind = 0; m->segind = segind; m->order = VM_NFREEORDER; m->pool = VM_FREEPOOL_DEFAULT; m->valid = m->dirty = 0; pmap_page_init(m); } /* * vm_page_startup: * * Initializes the resident memory module. Allocates physical memory for * bootstrapping UMA and some data structures that are used to manage * physical pages. Initializes these structures, and populates the free * page queues. */ vm_offset_t vm_page_startup(vm_offset_t vaddr) { struct vm_domain *vmd; struct vm_phys_seg *seg; vm_page_t m; char *list, *listend; vm_offset_t mapped; vm_paddr_t end, high_avail, low_avail, new_end, page_range, size; vm_paddr_t biggestsize, last_pa, pa; u_long pagecount; int biggestone, i, pages_per_zone, segind; biggestsize = 0; biggestone = 0; vaddr = round_page(vaddr); for (i = 0; phys_avail[i + 1]; i += 2) { phys_avail[i] = round_page(phys_avail[i]); phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); } for (i = 0; phys_avail[i + 1]; i += 2) { size = phys_avail[i + 1] - phys_avail[i]; if (size > biggestsize) { biggestone = i; biggestsize = size; } } end = phys_avail[biggestone+1]; /* * Initialize the page and queue locks. */ mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF); for (i = 0; i < PA_LOCK_COUNT; i++) mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); for (i = 0; i < vm_ndomains; i++) vm_page_domain_init(&vm_dom[i]); /* * Almost all of the pages needed for bootstrapping UMA are used * for zone structures, so if the number of CPUs results in those * structures taking more than one page each, we set aside more pages * in proportion to the zone structure size. */ pages_per_zone = howmany(sizeof(struct uma_zone) + sizeof(struct uma_cache) * (mp_maxid + 1) + roundup2(sizeof(struct uma_slab), sizeof(void *)), UMA_SLAB_SIZE); if (pages_per_zone > 1) { /* Reserve more pages so that we don't run out. */ boot_pages = UMA_BOOT_PAGES_ZONES * pages_per_zone; } /* * Allocate memory for use when boot strapping the kernel memory * allocator. * * CTFLAG_RDTUN doesn't work during the early boot process, so we must * manually fetch the value. */ TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages); new_end = end - (boot_pages * UMA_SLAB_SIZE); new_end = trunc_page(new_end); mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE); bzero((void *)mapped, end - new_end); uma_startup((void *)mapped, boot_pages); #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \ defined(__i386__) || defined(__mips__) /* * Allocate a bitmap to indicate that a random physical page * needs to be included in a minidump. * * The amd64 port needs this to indicate which direct map pages * need to be dumped, via calls to dump_add_page()/dump_drop_page(). * * However, i386 still needs this workspace internally within the * minidump code. In theory, they are not needed on i386, but are * included should the sf_buf code decide to use them. */ last_pa = 0; for (i = 0; dump_avail[i + 1] != 0; i += 2) if (dump_avail[i + 1] > last_pa) last_pa = dump_avail[i + 1]; page_range = last_pa / PAGE_SIZE; vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); new_end -= vm_page_dump_size; vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); bzero((void *)vm_page_dump, vm_page_dump_size); #else (void)last_pa; #endif #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) /* * Include the UMA bootstrap pages and vm_page_dump in a crash dump. * When pmap_map() uses the direct map, they are not automatically * included. */ for (pa = new_end; pa < end; pa += PAGE_SIZE) dump_add_page(pa); #endif phys_avail[biggestone + 1] = new_end; #ifdef __amd64__ /* * Request that the physical pages underlying the message buffer be * included in a crash dump. Since the message buffer is accessed * through the direct map, they are not automatically included. */ pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr); last_pa = pa + round_page(msgbufsize); while (pa < last_pa) { dump_add_page(pa); pa += PAGE_SIZE; } #endif /* * Compute the number of pages of memory that will be available for * use, taking into account the overhead of a page structure per page. * In other words, solve * "available physical memory" - round_page(page_range * * sizeof(struct vm_page)) = page_range * PAGE_SIZE * for page_range. */ low_avail = phys_avail[0]; high_avail = phys_avail[1]; for (i = 0; i < vm_phys_nsegs; i++) { if (vm_phys_segs[i].start < low_avail) low_avail = vm_phys_segs[i].start; if (vm_phys_segs[i].end > high_avail) high_avail = vm_phys_segs[i].end; } /* Skip the first chunk. It is already accounted for. */ for (i = 2; phys_avail[i + 1] != 0; i += 2) { if (phys_avail[i] < low_avail) low_avail = phys_avail[i]; if (phys_avail[i + 1] > high_avail) high_avail = phys_avail[i + 1]; } first_page = low_avail / PAGE_SIZE; #ifdef VM_PHYSSEG_SPARSE size = 0; for (i = 0; i < vm_phys_nsegs; i++) size += vm_phys_segs[i].end - vm_phys_segs[i].start; for (i = 0; phys_avail[i + 1] != 0; i += 2) size += phys_avail[i + 1] - phys_avail[i]; #elif defined(VM_PHYSSEG_DENSE) size = high_avail - low_avail; #else #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." #endif #ifdef VM_PHYSSEG_DENSE /* * In the VM_PHYSSEG_DENSE case, the number of pages can account for * the overhead of a page structure per page only if vm_page_array is * allocated from the last physical memory chunk. Otherwise, we must * allocate page structures representing the physical memory * underlying vm_page_array, even though they will not be used. */ if (new_end != high_avail) page_range = size / PAGE_SIZE; else #endif { page_range = size / (PAGE_SIZE + sizeof(struct vm_page)); /* * If the partial bytes remaining are large enough for * a page (PAGE_SIZE) without a corresponding * 'struct vm_page', then new_end will contain an * extra page after subtracting the length of the VM * page array. Compensate by subtracting an extra * page from new_end. */ if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) { if (new_end == high_avail) high_avail -= PAGE_SIZE; new_end -= PAGE_SIZE; } } end = new_end; /* * Reserve an unmapped guard page to trap access to vm_page_array[-1]. * However, because this page is allocated from KVM, out-of-bounds * accesses using the direct map will not be trapped. */ vaddr += PAGE_SIZE; /* * Allocate physical memory for the page structures, and map it. */ new_end = trunc_page(end - page_range * sizeof(struct vm_page)); mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE); vm_page_array = (vm_page_t)mapped; vm_page_array_size = page_range; #if VM_NRESERVLEVEL > 0 /* * Allocate physical memory for the reservation management system's * data structures, and map it. */ if (high_avail == end) high_avail = new_end; new_end = vm_reserv_startup(&vaddr, new_end, high_avail); #endif #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) /* * Include vm_page_array and vm_reserv_array in a crash dump. */ for (pa = new_end; pa < end; pa += PAGE_SIZE) dump_add_page(pa); #endif phys_avail[biggestone + 1] = new_end; /* * Add physical memory segments corresponding to the available * physical pages. */ for (i = 0; phys_avail[i + 1] != 0; i += 2) vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]); /* * Initialize the physical memory allocator. */ vm_phys_init(); /* * Initialize the page structures and add every available page to the * physical memory allocator's free lists. */ vm_cnt.v_page_count = 0; vm_cnt.v_free_count = 0; for (segind = 0; segind < vm_phys_nsegs; segind++) { seg = &vm_phys_segs[segind]; for (m = seg->first_page, pa = seg->start; pa < seg->end; m++, pa += PAGE_SIZE) vm_page_init_page(m, pa, segind); /* * Add the segment to the free lists only if it is covered by * one of the ranges in phys_avail. Because we've added the * ranges to the vm_phys_segs array, we can assume that each * segment is either entirely contained in one of the ranges, * or doesn't overlap any of them. */ for (i = 0; phys_avail[i + 1] != 0; i += 2) { if (seg->start < phys_avail[i] || seg->end > phys_avail[i + 1]) continue; m = seg->first_page; pagecount = (u_long)atop(seg->end - seg->start); mtx_lock(&vm_page_queue_free_mtx); vm_phys_free_contig(m, pagecount); vm_phys_freecnt_adj(m, (int)pagecount); mtx_unlock(&vm_page_queue_free_mtx); vm_cnt.v_page_count += (u_int)pagecount; vmd = &vm_dom[seg->domain]; vmd->vmd_page_count += (u_int)pagecount; vmd->vmd_segs |= 1UL << m->segind; break; } } /* * Remove blacklisted pages from the physical memory allocator. */ TAILQ_INIT(&blacklist_head); vm_page_blacklist_load(&list, &listend); vm_page_blacklist_check(list, listend); list = kern_getenv("vm.blacklist"); vm_page_blacklist_check(list, NULL); freeenv(list); #if VM_NRESERVLEVEL > 0 /* * Initialize the reservation management system. */ vm_reserv_init(); #endif + /* + * Set an initial domain policy for thread0 so that allocations + * can work. + */ + domainset_zero(); + return (vaddr); } void vm_page_reference(vm_page_t m) { vm_page_aflag_set(m, PGA_REFERENCED); } /* * vm_page_busy_downgrade: * * Downgrade an exclusive busy page into a single shared busy page. */ void vm_page_busy_downgrade(vm_page_t m) { u_int x; bool locked; vm_page_assert_xbusied(m); locked = mtx_owned(vm_page_lockptr(m)); for (;;) { x = m->busy_lock; x &= VPB_BIT_WAITERS; if (x != 0 && !locked) vm_page_lock(m); if (atomic_cmpset_rel_int(&m->busy_lock, VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1))) break; if (x != 0 && !locked) vm_page_unlock(m); } if (x != 0) { wakeup(m); if (!locked) vm_page_unlock(m); } } /* * vm_page_sbusied: * * Return a positive value if the page is shared busied, 0 otherwise. */ int vm_page_sbusied(vm_page_t m) { u_int x; x = m->busy_lock; return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED); } /* * vm_page_sunbusy: * * Shared unbusy a page. */ void vm_page_sunbusy(vm_page_t m) { u_int x; vm_page_lock_assert(m, MA_NOTOWNED); vm_page_assert_sbusied(m); for (;;) { x = m->busy_lock; if (VPB_SHARERS(x) > 1) { if (atomic_cmpset_int(&m->busy_lock, x, x - VPB_ONE_SHARER)) break; continue; } if ((x & VPB_BIT_WAITERS) == 0) { KASSERT(x == VPB_SHARERS_WORD(1), ("vm_page_sunbusy: invalid lock state")); if (atomic_cmpset_int(&m->busy_lock, VPB_SHARERS_WORD(1), VPB_UNBUSIED)) break; continue; } KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS), ("vm_page_sunbusy: invalid lock state for waiters")); vm_page_lock(m); if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) { vm_page_unlock(m); continue; } wakeup(m); vm_page_unlock(m); break; } } /* * vm_page_busy_sleep: * * Sleep and release the page lock, using the page pointer as wchan. * This is used to implement the hard-path of busying mechanism. * * The given page must be locked. * * If nonshared is true, sleep only if the page is xbusy. */ void vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared) { u_int x; vm_page_assert_locked(m); x = m->busy_lock; if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) || ((x & VPB_BIT_WAITERS) == 0 && !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) { vm_page_unlock(m); return; } msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0); } /* * vm_page_trysbusy: * * Try to shared busy a page. * If the operation succeeds 1 is returned otherwise 0. * The operation never sleeps. */ int vm_page_trysbusy(vm_page_t m) { u_int x; for (;;) { x = m->busy_lock; if ((x & VPB_BIT_SHARED) == 0) return (0); if (atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER)) return (1); } } static void vm_page_xunbusy_locked(vm_page_t m) { vm_page_assert_xbusied(m); vm_page_assert_locked(m); atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); /* There is a waiter, do wakeup() instead of vm_page_flash(). */ wakeup(m); } void vm_page_xunbusy_maybelocked(vm_page_t m) { bool lockacq; vm_page_assert_xbusied(m); /* * Fast path for unbusy. If it succeeds, we know that there * are no waiters, so we do not need a wakeup. */ if (atomic_cmpset_rel_int(&m->busy_lock, VPB_SINGLE_EXCLUSIVER, VPB_UNBUSIED)) return; lockacq = !mtx_owned(vm_page_lockptr(m)); if (lockacq) vm_page_lock(m); vm_page_xunbusy_locked(m); if (lockacq) vm_page_unlock(m); } /* * vm_page_xunbusy_hard: * * Called after the first try the exclusive unbusy of a page failed. * It is assumed that the waiters bit is on. */ void vm_page_xunbusy_hard(vm_page_t m) { vm_page_assert_xbusied(m); vm_page_lock(m); vm_page_xunbusy_locked(m); vm_page_unlock(m); } /* * vm_page_flash: * * Wakeup anyone waiting for the page. * The ownership bits do not change. * * The given page must be locked. */ void vm_page_flash(vm_page_t m) { u_int x; vm_page_lock_assert(m, MA_OWNED); for (;;) { x = m->busy_lock; if ((x & VPB_BIT_WAITERS) == 0) return; if (atomic_cmpset_int(&m->busy_lock, x, x & (~VPB_BIT_WAITERS))) break; } wakeup(m); } /* * Avoid releasing and reacquiring the same page lock. */ void vm_page_change_lock(vm_page_t m, struct mtx **mtx) { struct mtx *mtx1; mtx1 = vm_page_lockptr(m); if (*mtx == mtx1) return; if (*mtx != NULL) mtx_unlock(*mtx); *mtx = mtx1; mtx_lock(mtx1); } /* * Keep page from being freed by the page daemon * much of the same effect as wiring, except much lower * overhead and should be used only for *very* temporary * holding ("wiring"). */ void vm_page_hold(vm_page_t mem) { vm_page_lock_assert(mem, MA_OWNED); mem->hold_count++; } void vm_page_unhold(vm_page_t mem) { vm_page_lock_assert(mem, MA_OWNED); KASSERT(mem->hold_count >= 1, ("vm_page_unhold: hold count < 0!!!")); --mem->hold_count; if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0) vm_page_free_toq(mem); } /* * vm_page_unhold_pages: * * Unhold each of the pages that is referenced by the given array. */ void vm_page_unhold_pages(vm_page_t *ma, int count) { struct mtx *mtx; mtx = NULL; for (; count != 0; count--) { vm_page_change_lock(*ma, &mtx); vm_page_unhold(*ma); ma++; } if (mtx != NULL) mtx_unlock(mtx); } vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa) { vm_page_t m; #ifdef VM_PHYSSEG_SPARSE m = vm_phys_paddr_to_vm_page(pa); if (m == NULL) m = vm_phys_fictitious_to_vm_page(pa); return (m); #elif defined(VM_PHYSSEG_DENSE) long pi; pi = atop(pa); if (pi >= first_page && (pi - first_page) < vm_page_array_size) { m = &vm_page_array[pi - first_page]; return (m); } return (vm_phys_fictitious_to_vm_page(pa)); #else #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." #endif } /* * vm_page_getfake: * * Create a fictitious page with the specified physical address and * memory attribute. The memory attribute is the only the machine- * dependent aspect of a fictitious page that must be initialized. */ vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) { vm_page_t m; m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); vm_page_initfake(m, paddr, memattr); return (m); } void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) { if ((m->flags & PG_FICTITIOUS) != 0) { /* * The page's memattr might have changed since the * previous initialization. Update the pmap to the * new memattr. */ goto memattr; } m->phys_addr = paddr; m->queue = PQ_NONE; /* Fictitious pages don't use "segind". */ m->flags = PG_FICTITIOUS; /* Fictitious pages don't use "order" or "pool". */ m->oflags = VPO_UNMANAGED; m->busy_lock = VPB_SINGLE_EXCLUSIVER; m->wire_count = 1; pmap_page_init(m); memattr: pmap_page_set_memattr(m, memattr); } /* * vm_page_putfake: * * Release a fictitious page. */ void vm_page_putfake(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m)); KASSERT((m->flags & PG_FICTITIOUS) != 0, ("vm_page_putfake: bad page %p", m)); uma_zfree(fakepg_zone, m); } /* * vm_page_updatefake: * * Update the given fictitious page to the specified physical address and * memory attribute. */ void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) { KASSERT((m->flags & PG_FICTITIOUS) != 0, ("vm_page_updatefake: bad page %p", m)); m->phys_addr = paddr; pmap_page_set_memattr(m, memattr); } /* * vm_page_free: * * Free a page. */ void vm_page_free(vm_page_t m) { m->flags &= ~PG_ZERO; vm_page_free_toq(m); } /* * vm_page_free_zero: * * Free a page to the zerod-pages queue */ void vm_page_free_zero(vm_page_t m) { m->flags |= PG_ZERO; vm_page_free_toq(m); } /* * Unbusy and handle the page queueing for a page from a getpages request that * was optionally read ahead or behind. */ void vm_page_readahead_finish(vm_page_t m) { /* We shouldn't put invalid pages on queues. */ KASSERT(m->valid != 0, ("%s: %p is invalid", __func__, m)); /* * Since the page is not the actually needed one, whether it should * be activated or deactivated is not obvious. Empirical results * have shown that deactivating the page is usually the best choice, * unless the page is wanted by another thread. */ vm_page_lock(m); if ((m->busy_lock & VPB_BIT_WAITERS) != 0) vm_page_activate(m); else vm_page_deactivate(m); vm_page_unlock(m); vm_page_xunbusy(m); } /* * vm_page_sleep_if_busy: * * Sleep and release the page queues lock if the page is busied. * Returns TRUE if the thread slept. * * The given page must be unlocked and object containing it must * be locked. */ int vm_page_sleep_if_busy(vm_page_t m, const char *msg) { vm_object_t obj; vm_page_lock_assert(m, MA_NOTOWNED); VM_OBJECT_ASSERT_WLOCKED(m->object); if (vm_page_busied(m)) { /* * The page-specific object must be cached because page * identity can change during the sleep, causing the * re-lock of a different object. * It is assumed that a reference to the object is already * held by the callers. */ obj = m->object; vm_page_lock(m); VM_OBJECT_WUNLOCK(obj); vm_page_busy_sleep(m, msg, false); VM_OBJECT_WLOCK(obj); return (TRUE); } return (FALSE); } /* * vm_page_dirty_KBI: [ internal use only ] * * Set all bits in the page's dirty field. * * The object containing the specified page must be locked if the * call is made from the machine-independent layer. * * See vm_page_clear_dirty_mask(). * * This function should only be called by vm_page_dirty(). */ void vm_page_dirty_KBI(vm_page_t m) { /* Refer to this operation by its public name. */ KASSERT(m->valid == VM_PAGE_BITS_ALL, ("vm_page_dirty: page is invalid!")); m->dirty = VM_PAGE_BITS_ALL; } /* * vm_page_insert: [ internal use only ] * * Inserts the given mem entry into the object and object list. * * The object must be locked. */ int vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) { vm_page_t mpred; VM_OBJECT_ASSERT_WLOCKED(object); mpred = vm_radix_lookup_le(&object->rtree, pindex); return (vm_page_insert_after(m, object, pindex, mpred)); } /* * vm_page_insert_after: * * Inserts the page "m" into the specified object at offset "pindex". * * The page "mpred" must immediately precede the offset "pindex" within * the specified object. * * The object must be locked. */ static int vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, vm_page_t mpred) { vm_page_t msucc; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(m->object == NULL, ("vm_page_insert_after: page already inserted")); if (mpred != NULL) { KASSERT(mpred->object == object, ("vm_page_insert_after: object doesn't contain mpred")); KASSERT(mpred->pindex < pindex, ("vm_page_insert_after: mpred doesn't precede pindex")); msucc = TAILQ_NEXT(mpred, listq); } else msucc = TAILQ_FIRST(&object->memq); if (msucc != NULL) KASSERT(msucc->pindex > pindex, ("vm_page_insert_after: msucc doesn't succeed pindex")); /* * Record the object/offset pair in this page */ m->object = object; m->pindex = pindex; /* * Now link into the object's ordered list of backed pages. */ if (vm_radix_insert(&object->rtree, m)) { m->object = NULL; m->pindex = 0; return (1); } vm_page_insert_radixdone(m, object, mpred); return (0); } /* * vm_page_insert_radixdone: * * Complete page "m" insertion into the specified object after the * radix trie hooking. * * The page "mpred" must precede the offset "m->pindex" within the * specified object. * * The object must be locked. */ static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) { VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object != NULL && m->object == object, ("vm_page_insert_radixdone: page %p has inconsistent object", m)); if (mpred != NULL) { KASSERT(mpred->object == object, ("vm_page_insert_after: object doesn't contain mpred")); KASSERT(mpred->pindex < m->pindex, ("vm_page_insert_after: mpred doesn't precede pindex")); } if (mpred != NULL) TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq); else TAILQ_INSERT_HEAD(&object->memq, m, listq); /* * Show that the object has one more resident page. */ object->resident_page_count++; /* * Hold the vnode until the last page is released. */ if (object->resident_page_count == 1 && object->type == OBJT_VNODE) vhold(object->handle); /* * Since we are inserting a new and possibly dirty page, * update the object's OBJ_MIGHTBEDIRTY flag. */ if (pmap_page_is_write_mapped(m)) vm_object_set_writeable_dirty(object); } /* * vm_page_remove: * * Removes the specified page from its containing object, but does not * invalidate any backing storage. * * The object must be locked. The page must be locked if it is managed. */ void vm_page_remove(vm_page_t m) { vm_object_t object; vm_page_t mrem; if ((m->oflags & VPO_UNMANAGED) == 0) vm_page_assert_locked(m); if ((object = m->object) == NULL) return; VM_OBJECT_ASSERT_WLOCKED(object); if (vm_page_xbusied(m)) vm_page_xunbusy_maybelocked(m); mrem = vm_radix_remove(&object->rtree, m->pindex); KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); /* * Now remove from the object's list of backed pages. */ TAILQ_REMOVE(&object->memq, m, listq); /* * And show that the object has one fewer resident page. */ object->resident_page_count--; /* * The vnode may now be recycled. */ if (object->resident_page_count == 0 && object->type == OBJT_VNODE) vdrop(object->handle); m->object = NULL; } /* * vm_page_lookup: * * Returns the page associated with the object/offset * pair specified; if none is found, NULL is returned. * * The object must be locked. */ vm_page_t vm_page_lookup(vm_object_t object, vm_pindex_t pindex) { VM_OBJECT_ASSERT_LOCKED(object); return (vm_radix_lookup(&object->rtree, pindex)); } /* * vm_page_find_least: * * Returns the page associated with the object with least pindex * greater than or equal to the parameter pindex, or NULL. * * The object must be locked. */ vm_page_t vm_page_find_least(vm_object_t object, vm_pindex_t pindex) { vm_page_t m; VM_OBJECT_ASSERT_LOCKED(object); if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex) m = vm_radix_lookup_ge(&object->rtree, pindex); return (m); } /* * Returns the given page's successor (by pindex) within the object if it is * resident; if none is found, NULL is returned. * * The object must be locked. */ vm_page_t vm_page_next(vm_page_t m) { vm_page_t next; VM_OBJECT_ASSERT_LOCKED(m->object); if ((next = TAILQ_NEXT(m, listq)) != NULL) { MPASS(next->object == m->object); if (next->pindex != m->pindex + 1) next = NULL; } return (next); } /* * Returns the given page's predecessor (by pindex) within the object if it is * resident; if none is found, NULL is returned. * * The object must be locked. */ vm_page_t vm_page_prev(vm_page_t m) { vm_page_t prev; VM_OBJECT_ASSERT_LOCKED(m->object); if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) { MPASS(prev->object == m->object); if (prev->pindex != m->pindex - 1) prev = NULL; } return (prev); } /* * Uses the page mnew as a replacement for an existing page at index * pindex which must be already present in the object. * * The existing page must not be on a paging queue. */ vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex) { vm_page_t mold; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(mnew->object == NULL, ("vm_page_replace: page already in object")); /* * This function mostly follows vm_page_insert() and * vm_page_remove() without the radix, object count and vnode * dance. Double check such functions for more comments. */ mnew->object = object; mnew->pindex = pindex; mold = vm_radix_replace(&object->rtree, mnew); KASSERT(mold->queue == PQ_NONE, ("vm_page_replace: mold is on a paging queue")); /* Keep the resident page list in sorted order. */ TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); TAILQ_REMOVE(&object->memq, mold, listq); mold->object = NULL; vm_page_xunbusy_maybelocked(mold); /* * The object's resident_page_count does not change because we have * swapped one page for another, but OBJ_MIGHTBEDIRTY. */ if (pmap_page_is_write_mapped(mnew)) vm_object_set_writeable_dirty(object); return (mold); } /* * vm_page_rename: * * Move the given memory entry from its * current object to the specified target object/offset. * * Note: swap associated with the page must be invalidated by the move. We * have to do this for several reasons: (1) we aren't freeing the * page, (2) we are dirtying the page, (3) the VM system is probably * moving the page from object A to B, and will then later move * the backing store from A to B and we can't have a conflict. * * Note: we *always* dirty the page. It is necessary both for the * fact that we moved it, and because we may be invalidating * swap. * * The objects must be locked. */ int vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) { vm_page_t mpred; vm_pindex_t opidx; VM_OBJECT_ASSERT_WLOCKED(new_object); mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex); KASSERT(mpred == NULL || mpred->pindex != new_pindex, ("vm_page_rename: pindex already renamed")); /* * Create a custom version of vm_page_insert() which does not depend * by m_prev and can cheat on the implementation aspects of the * function. */ opidx = m->pindex; m->pindex = new_pindex; if (vm_radix_insert(&new_object->rtree, m)) { m->pindex = opidx; return (1); } /* * The operation cannot fail anymore. The removal must happen before * the listq iterator is tainted. */ m->pindex = opidx; vm_page_lock(m); vm_page_remove(m); /* Return back to the new pindex to complete vm_page_insert(). */ m->pindex = new_pindex; m->object = new_object; vm_page_unlock(m); vm_page_insert_radixdone(m, new_object, mpred); vm_page_dirty(m); return (0); } /* * vm_page_alloc: * * Allocate and return a page that is associated with the specified * object and offset pair. By default, this page is exclusive busied. * * The caller must always specify an allocation class. * * allocation classes: * VM_ALLOC_NORMAL normal process request * VM_ALLOC_SYSTEM system *really* needs a page * VM_ALLOC_INTERRUPT interrupt time request * * optional allocation flags: * VM_ALLOC_COUNT(number) the number of additional pages that the caller * intends to allocate * VM_ALLOC_NOBUSY do not exclusive busy the page * VM_ALLOC_NODUMP do not include the page in a kernel core dump * VM_ALLOC_NOOBJ page is not associated with an object and * should not be exclusive busy * VM_ALLOC_SBUSY shared busy the allocated page * VM_ALLOC_WIRED wire the allocated page * VM_ALLOC_ZERO prefer a zeroed page * * This routine may not sleep. */ vm_page_t vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) { return (vm_page_alloc_after(object, pindex, req, object != NULL ? vm_radix_lookup_le(&object->rtree, pindex) : NULL)); } vm_page_t vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain, int req) { return (vm_page_alloc_domain_after(object, pindex, domain, req, object != NULL ? vm_radix_lookup_le(&object->rtree, pindex) : NULL)); } /* * Allocate a page in the specified object with the given page index. To * optimize insertion of the page into the object, the caller must also specifiy * the resident page in the object with largest index smaller than the given * page index, or NULL if no such page exists. */ vm_page_t vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req, vm_page_t mpred) { - struct vm_domain_iterator vi; + struct vm_domainset_iter di; vm_page_t m; - int domain, wait; + int domain; - m = NULL; - vm_policy_iterator_init(&vi); - wait = req & (VM_ALLOC_WAITFAIL | VM_ALLOC_WAITOK); - req &= ~wait; - while (vm_domain_iterator_run(&vi, &domain) == 0) { - if (vm_domain_iterator_isdone(&vi)) - req |= wait; + vm_domainset_iter_page_init(&di, object, &domain, &req); + do { m = vm_page_alloc_domain_after(object, pindex, domain, req, mpred); if (m != NULL) break; - } - vm_policy_iterator_finish(&vi); + } while (vm_domainset_iter_page(&di, &domain, &req) == 0); return (m); } vm_page_t vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, int req, vm_page_t mpred) { vm_page_t m; int flags, req_class; u_int free_count; KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), ("inconsistent object(%p)/req(%x)", object, req)); KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, ("Can't sleep and retry object insertion.")); KASSERT(mpred == NULL || mpred->pindex < pindex, ("mpred %p doesn't precede pindex 0x%jx", mpred, (uintmax_t)pindex)); if (object != NULL) VM_OBJECT_ASSERT_WLOCKED(object); req_class = req & VM_ALLOC_CLASS_MASK; /* * The page daemon is allowed to dig deeper into the free page list. */ if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) req_class = VM_ALLOC_SYSTEM; /* * Allocate a page if the number of free pages exceeds the minimum * for the request class. */ again: m = NULL; mtx_lock(&vm_page_queue_free_mtx); if (vm_cnt.v_free_count > vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && vm_cnt.v_free_count > 0)) { /* * Can we allocate the page from a reservation? */ #if VM_NRESERVLEVEL > 0 if (object == NULL || (object->flags & (OBJ_COLORED | OBJ_FICTITIOUS)) != OBJ_COLORED || (m = vm_reserv_alloc_page(object, pindex, domain, mpred)) == NULL) #endif { /* * If not, allocate it from the free page queues. */ m = vm_phys_alloc_pages(domain, object != NULL ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); #if VM_NRESERVLEVEL > 0 if (m == NULL && vm_reserv_reclaim_inactive(domain)) { m = vm_phys_alloc_pages(domain, object != NULL ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); } #endif } } if (m == NULL) { /* * Not allocatable, give up. */ if (vm_page_alloc_fail(object, req)) goto again; return (NULL); } /* * At this point we had better have found a good page. */ KASSERT(m != NULL, ("missing page")); free_count = vm_phys_freecnt_adj(m, -1); mtx_unlock(&vm_page_queue_free_mtx); vm_page_alloc_check(m); /* * Initialize the page. Only the PG_ZERO flag is inherited. */ flags = 0; if ((req & VM_ALLOC_ZERO) != 0) flags = PG_ZERO; flags &= m->flags; if ((req & VM_ALLOC_NODUMP) != 0) flags |= PG_NODUMP; m->flags = flags; m->aflags = 0; m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; m->busy_lock = VPB_UNBUSIED; if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) m->busy_lock = VPB_SINGLE_EXCLUSIVER; if ((req & VM_ALLOC_SBUSY) != 0) m->busy_lock = VPB_SHARERS_WORD(1); if (req & VM_ALLOC_WIRED) { /* * The page lock is not required for wiring a page until that * page is inserted into the object. */ atomic_add_int(&vm_cnt.v_wire_count, 1); m->wire_count = 1; } m->act_count = 0; if (object != NULL) { if (vm_page_insert_after(m, object, pindex, mpred)) { pagedaemon_wakeup(); if (req & VM_ALLOC_WIRED) { atomic_subtract_int(&vm_cnt.v_wire_count, 1); m->wire_count = 0; } KASSERT(m->object == NULL, ("page %p has object", m)); m->oflags = VPO_UNMANAGED; m->busy_lock = VPB_UNBUSIED; /* Don't change PG_ZERO. */ vm_page_free_toq(m); if (req & VM_ALLOC_WAITFAIL) { VM_OBJECT_WUNLOCK(object); vm_radix_wait(); VM_OBJECT_WLOCK(object); } return (NULL); } /* Ignore device objects; the pager sets "memattr" for them. */ if (object->memattr != VM_MEMATTR_DEFAULT && (object->flags & OBJ_FICTITIOUS) == 0) pmap_page_set_memattr(m, object->memattr); } else m->pindex = pindex; /* * Don't wakeup too often - wakeup the pageout daemon when * we would be nearly out of memory. */ if (vm_paging_needed(free_count)) pagedaemon_wakeup(); return (m); } /* * vm_page_alloc_contig: * * Allocate a contiguous set of physical pages of the given size "npages" * from the free lists. All of the physical pages must be at or above * the given physical address "low" and below the given physical address * "high". The given value "alignment" determines the alignment of the * first physical page in the set. If the given value "boundary" is * non-zero, then the set of physical pages cannot cross any physical * address boundary that is a multiple of that value. Both "alignment" * and "boundary" must be a power of two. * * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT, * then the memory attribute setting for the physical pages is configured * to the object's memory attribute setting. Otherwise, the memory * attribute setting for the physical pages is configured to "memattr", * overriding the object's memory attribute setting. However, if the * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the * memory attribute setting for the physical pages cannot be configured * to VM_MEMATTR_DEFAULT. * * The specified object may not contain fictitious pages. * * The caller must always specify an allocation class. * * allocation classes: * VM_ALLOC_NORMAL normal process request * VM_ALLOC_SYSTEM system *really* needs a page * VM_ALLOC_INTERRUPT interrupt time request * * optional allocation flags: * VM_ALLOC_NOBUSY do not exclusive busy the page * VM_ALLOC_NODUMP do not include the page in a kernel core dump * VM_ALLOC_NOOBJ page is not associated with an object and * should not be exclusive busy * VM_ALLOC_SBUSY shared busy the allocated page * VM_ALLOC_WIRED wire the allocated page * VM_ALLOC_ZERO prefer a zeroed page * * This routine may not sleep. */ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { - struct vm_domain_iterator vi; + struct vm_domainset_iter di; vm_page_t m; - int domain, wait; + int domain; - m = NULL; - vm_policy_iterator_init(&vi); - wait = req & (VM_ALLOC_WAITFAIL | VM_ALLOC_WAITOK); - req &= ~wait; - while (vm_domain_iterator_run(&vi, &domain) == 0) { - if (vm_domain_iterator_isdone(&vi)) - req |= wait; + vm_domainset_iter_page_init(&di, object, &domain, &req); + do { m = vm_page_alloc_contig_domain(object, pindex, domain, req, npages, low, high, alignment, boundary, memattr); if (m != NULL) break; - } - vm_policy_iterator_finish(&vi); + } while (vm_domainset_iter_page(&di, &domain, &req) == 0); return (m); } vm_page_t vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { vm_page_t m, m_ret, mpred; u_int busy_lock, flags, oflags; int req_class; mpred = NULL; /* XXX: pacify gcc */ KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), ("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object, req)); KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0, ("Can't sleep and retry object insertion.")); if (object != NULL) { VM_OBJECT_ASSERT_WLOCKED(object); KASSERT((object->flags & OBJ_FICTITIOUS) == 0, ("vm_page_alloc_contig: object %p has fictitious pages", object)); } KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero")); req_class = req & VM_ALLOC_CLASS_MASK; /* * The page daemon is allowed to dig deeper into the free page list. */ if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) req_class = VM_ALLOC_SYSTEM; if (object != NULL) { mpred = vm_radix_lookup_le(&object->rtree, pindex); KASSERT(mpred == NULL || mpred->pindex != pindex, ("vm_page_alloc_contig: pindex already allocated")); } /* * Can we allocate the pages without the number of free pages falling * below the lower bound for the allocation class? */ again: m_ret = NULL; mtx_lock(&vm_page_queue_free_mtx); if (vm_cnt.v_free_count >= npages + vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && vm_cnt.v_free_count >= npages + vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && vm_cnt.v_free_count >= npages)) { /* * Can we allocate the pages from a reservation? */ #if VM_NRESERVLEVEL > 0 retry: if (object == NULL || (object->flags & OBJ_COLORED) == 0 || (m_ret = vm_reserv_alloc_contig(object, pindex, domain, npages, low, high, alignment, boundary, mpred)) == NULL) #endif /* * If not, allocate them from the free page queues. */ m_ret = vm_phys_alloc_contig(domain, npages, low, high, alignment, boundary); #if VM_NRESERVLEVEL > 0 if (m_ret == NULL && vm_reserv_reclaim_contig( domain, npages, low, high, alignment, boundary)) goto retry; #endif } if (m_ret == NULL) { if (vm_page_alloc_fail(object, req)) goto again; return (NULL); } vm_phys_freecnt_adj(m_ret, -npages); mtx_unlock(&vm_page_queue_free_mtx); for (m = m_ret; m < &m_ret[npages]; m++) vm_page_alloc_check(m); /* * Initialize the pages. Only the PG_ZERO flag is inherited. */ flags = 0; if ((req & VM_ALLOC_ZERO) != 0) flags = PG_ZERO; if ((req & VM_ALLOC_NODUMP) != 0) flags |= PG_NODUMP; oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? VPO_UNMANAGED : 0; busy_lock = VPB_UNBUSIED; if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) busy_lock = VPB_SINGLE_EXCLUSIVER; if ((req & VM_ALLOC_SBUSY) != 0) busy_lock = VPB_SHARERS_WORD(1); if ((req & VM_ALLOC_WIRED) != 0) atomic_add_int(&vm_cnt.v_wire_count, npages); if (object != NULL) { if (object->memattr != VM_MEMATTR_DEFAULT && memattr == VM_MEMATTR_DEFAULT) memattr = object->memattr; } for (m = m_ret; m < &m_ret[npages]; m++) { m->aflags = 0; m->flags = (m->flags | PG_NODUMP) & flags; m->busy_lock = busy_lock; if ((req & VM_ALLOC_WIRED) != 0) m->wire_count = 1; m->act_count = 0; m->oflags = oflags; if (object != NULL) { if (vm_page_insert_after(m, object, pindex, mpred)) { pagedaemon_wakeup(); if ((req & VM_ALLOC_WIRED) != 0) atomic_subtract_int( &vm_cnt.v_wire_count, npages); KASSERT(m->object == NULL, ("page %p has object", m)); mpred = m; for (m = m_ret; m < &m_ret[npages]; m++) { if (m <= mpred && (req & VM_ALLOC_WIRED) != 0) m->wire_count = 0; m->oflags = VPO_UNMANAGED; m->busy_lock = VPB_UNBUSIED; /* Don't change PG_ZERO. */ vm_page_free_toq(m); } if (req & VM_ALLOC_WAITFAIL) { VM_OBJECT_WUNLOCK(object); vm_radix_wait(); VM_OBJECT_WLOCK(object); } return (NULL); } mpred = m; } else m->pindex = pindex; if (memattr != VM_MEMATTR_DEFAULT) pmap_page_set_memattr(m, memattr); pindex++; } if (vm_paging_needed(vm_cnt.v_free_count)) pagedaemon_wakeup(); return (m_ret); } /* * Check a page that has been freshly dequeued from a freelist. */ static void vm_page_alloc_check(vm_page_t m) { KASSERT(m->object == NULL, ("page %p has object", m)); KASSERT(m->queue == PQ_NONE, ("page %p has unexpected queue %d", m, m->queue)); KASSERT(m->wire_count == 0, ("page %p is wired", m)); KASSERT(m->hold_count == 0, ("page %p is held", m)); KASSERT(!vm_page_busied(m), ("page %p is busy", m)); KASSERT(m->dirty == 0, ("page %p is dirty", m)); KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, ("page %p has unexpected memattr %d", m, pmap_page_get_memattr(m))); KASSERT(m->valid == 0, ("free page %p is valid", m)); } /* * vm_page_alloc_freelist: * * Allocate a physical page from the specified free page list. * * The caller must always specify an allocation class. * * allocation classes: * VM_ALLOC_NORMAL normal process request * VM_ALLOC_SYSTEM system *really* needs a page * VM_ALLOC_INTERRUPT interrupt time request * * optional allocation flags: * VM_ALLOC_COUNT(number) the number of additional pages that the caller * intends to allocate * VM_ALLOC_WIRED wire the allocated page * VM_ALLOC_ZERO prefer a zeroed page * * This routine may not sleep. */ vm_page_t vm_page_alloc_freelist(int flind, int req) { - struct vm_domain_iterator vi; + struct vm_domainset_iter di; vm_page_t m; - int domain, wait; + int domain; - m = NULL; - vm_policy_iterator_init(&vi); - wait = req & (VM_ALLOC_WAITFAIL | VM_ALLOC_WAITOK); - req &= ~wait; - while (vm_domain_iterator_run(&vi, &domain) == 0) { - if (vm_domain_iterator_isdone(&vi)) - req |= wait; + vm_domainset_iter_page_init(&di, kernel_object, &domain, &req); + do { m = vm_page_alloc_freelist_domain(domain, flind, req); if (m != NULL) break; - } - vm_policy_iterator_finish(&vi); + } while (vm_domainset_iter_page(&di, &domain, &req) == 0); return (m); } vm_page_t vm_page_alloc_freelist_domain(int domain, int flind, int req) { vm_page_t m; u_int flags, free_count; int req_class; req_class = req & VM_ALLOC_CLASS_MASK; /* * The page daemon is allowed to dig deeper into the free page list. */ if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) req_class = VM_ALLOC_SYSTEM; /* * Do not allocate reserved pages unless the req has asked for it. */ again: mtx_lock(&vm_page_queue_free_mtx); if (vm_cnt.v_free_count > vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && vm_cnt.v_free_count > 0)) m = vm_phys_alloc_freelist_pages(domain, flind, VM_FREEPOOL_DIRECT, 0); if (m == NULL) { if (vm_page_alloc_fail(NULL, req)) goto again; return (NULL); } free_count = vm_phys_freecnt_adj(m, -1); mtx_unlock(&vm_page_queue_free_mtx); vm_page_alloc_check(m); /* * Initialize the page. Only the PG_ZERO flag is inherited. */ m->aflags = 0; flags = 0; if ((req & VM_ALLOC_ZERO) != 0) flags = PG_ZERO; m->flags &= flags; if ((req & VM_ALLOC_WIRED) != 0) { /* * The page lock is not required for wiring a page that does * not belong to an object. */ atomic_add_int(&vm_cnt.v_wire_count, 1); m->wire_count = 1; } /* Unmanaged pages don't use "act_count". */ m->oflags = VPO_UNMANAGED; if (vm_paging_needed(free_count)) pagedaemon_wakeup(); return (m); } #define VPSC_ANY 0 /* No restrictions. */ #define VPSC_NORESERV 1 /* Skip reservations; implies VPSC_NOSUPER. */ #define VPSC_NOSUPER 2 /* Skip superpages. */ /* * vm_page_scan_contig: * * Scan vm_page_array[] between the specified entries "m_start" and * "m_end" for a run of contiguous physical pages that satisfy the * specified conditions, and return the lowest page in the run. The * specified "alignment" determines the alignment of the lowest physical * page in the run. If the specified "boundary" is non-zero, then the * run of physical pages cannot span a physical address that is a * multiple of "boundary". * * "m_end" is never dereferenced, so it need not point to a vm_page * structure within vm_page_array[]. * * "npages" must be greater than zero. "m_start" and "m_end" must not * span a hole (or discontiguity) in the physical address space. Both * "alignment" and "boundary" must be a power of two. */ vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options) { struct mtx *m_mtx; vm_object_t object; vm_paddr_t pa; vm_page_t m, m_run; #if VM_NRESERVLEVEL > 0 int level; #endif int m_inc, order, run_ext, run_len; KASSERT(npages > 0, ("npages is 0")); KASSERT(powerof2(alignment), ("alignment is not a power of 2")); KASSERT(powerof2(boundary), ("boundary is not a power of 2")); m_run = NULL; run_len = 0; m_mtx = NULL; for (m = m_start; m < m_end && run_len < npages; m += m_inc) { KASSERT((m->flags & PG_MARKER) == 0, ("page %p is PG_MARKER", m)); KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->wire_count == 1, ("fictitious page %p has invalid wire count", m)); /* * If the current page would be the start of a run, check its * physical address against the end, alignment, and boundary * conditions. If it doesn't satisfy these conditions, either * terminate the scan or advance to the next page that * satisfies the failed condition. */ if (run_len == 0) { KASSERT(m_run == NULL, ("m_run != NULL")); if (m + npages > m_end) break; pa = VM_PAGE_TO_PHYS(m); if ((pa & (alignment - 1)) != 0) { m_inc = atop(roundup2(pa, alignment) - pa); continue; } if (rounddown2(pa ^ (pa + ptoa(npages) - 1), boundary) != 0) { m_inc = atop(roundup2(pa, boundary) - pa); continue; } } else KASSERT(m_run != NULL, ("m_run == NULL")); vm_page_change_lock(m, &m_mtx); m_inc = 1; retry: if (m->wire_count != 0 || m->hold_count != 0) run_ext = 0; #if VM_NRESERVLEVEL > 0 else if ((level = vm_reserv_level(m)) >= 0 && (options & VPSC_NORESERV) != 0) { run_ext = 0; /* Advance to the end of the reservation. */ pa = VM_PAGE_TO_PHYS(m); m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) - pa); } #endif else if ((object = m->object) != NULL) { /* * The page is considered eligible for relocation if * and only if it could be laundered or reclaimed by * the page daemon. */ if (!VM_OBJECT_TRYRLOCK(object)) { mtx_unlock(m_mtx); VM_OBJECT_RLOCK(object); mtx_lock(m_mtx); if (m->object != object) { /* * The page may have been freed. */ VM_OBJECT_RUNLOCK(object); goto retry; } else if (m->wire_count != 0 || m->hold_count != 0) { run_ext = 0; goto unlock; } } KASSERT((m->flags & PG_UNHOLDFREE) == 0, ("page %p is PG_UNHOLDFREE", m)); /* Don't care: PG_NODUMP, PG_ZERO. */ if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP && object->type != OBJT_VNODE) { run_ext = 0; #if VM_NRESERVLEVEL > 0 } else if ((options & VPSC_NOSUPER) != 0 && (level = vm_reserv_level_iffullpop(m)) >= 0) { run_ext = 0; /* Advance to the end of the superpage. */ pa = VM_PAGE_TO_PHYS(m); m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) - pa); #endif } else if (object->memattr == VM_MEMATTR_DEFAULT && m->queue != PQ_NONE && !vm_page_busied(m)) { /* * The page is allocated but eligible for * relocation. Extend the current run by one * page. */ KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, ("page %p has an unexpected memattr", m)); KASSERT((m->oflags & (VPO_SWAPINPROG | VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, ("page %p has unexpected oflags", m)); /* Don't care: VPO_NOSYNC. */ run_ext = 1; } else run_ext = 0; unlock: VM_OBJECT_RUNLOCK(object); #if VM_NRESERVLEVEL > 0 } else if (level >= 0) { /* * The page is reserved but not yet allocated. In * other words, it is still free. Extend the current * run by one page. */ run_ext = 1; #endif } else if ((order = m->order) < VM_NFREEORDER) { /* * The page is enqueued in the physical memory * allocator's free page queues. Moreover, it is the * first page in a power-of-two-sized run of * contiguous free pages. Add these pages to the end * of the current run, and jump ahead. */ run_ext = 1 << order; m_inc = 1 << order; } else { /* * Skip the page for one of the following reasons: (1) * It is enqueued in the physical memory allocator's * free page queues. However, it is not the first * page in a run of contiguous free pages. (This case * rarely occurs because the scan is performed in * ascending order.) (2) It is not reserved, and it is * transitioning from free to allocated. (Conversely, * the transition from allocated to free for managed * pages is blocked by the page lock.) (3) It is * allocated but not contained by an object and not * wired, e.g., allocated by Xen's balloon driver. */ run_ext = 0; } /* * Extend or reset the current run of pages. */ if (run_ext > 0) { if (run_len == 0) m_run = m; run_len += run_ext; } else { if (run_len > 0) { m_run = NULL; run_len = 0; } } } if (m_mtx != NULL) mtx_unlock(m_mtx); if (run_len >= npages) return (m_run); return (NULL); } /* * vm_page_reclaim_run: * * Try to relocate each of the allocated virtual pages within the * specified run of physical pages to a new physical address. Free the * physical pages underlying the relocated virtual pages. A virtual page * is relocatable if and only if it could be laundered or reclaimed by * the page daemon. Whenever possible, a virtual page is relocated to a * physical address above "high". * * Returns 0 if every physical page within the run was already free or * just freed by a successful relocation. Otherwise, returns a non-zero * value indicating why the last attempt to relocate a virtual page was * unsuccessful. * * "req_class" must be an allocation class. */ static int vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run, vm_paddr_t high) { struct mtx *m_mtx; struct spglist free; vm_object_t object; vm_paddr_t pa; vm_page_t m, m_end, m_new; int error, order, req; KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class, ("req_class is not an allocation class")); SLIST_INIT(&free); error = 0; m = m_run; m_end = m_run + npages; m_mtx = NULL; for (; error == 0 && m < m_end; m++) { KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0, ("page %p is PG_FICTITIOUS or PG_MARKER", m)); /* * Avoid releasing and reacquiring the same page lock. */ vm_page_change_lock(m, &m_mtx); retry: if (m->wire_count != 0 || m->hold_count != 0) error = EBUSY; else if ((object = m->object) != NULL) { /* * The page is relocated if and only if it could be * laundered or reclaimed by the page daemon. */ if (!VM_OBJECT_TRYWLOCK(object)) { mtx_unlock(m_mtx); VM_OBJECT_WLOCK(object); mtx_lock(m_mtx); if (m->object != object) { /* * The page may have been freed. */ VM_OBJECT_WUNLOCK(object); goto retry; } else if (m->wire_count != 0 || m->hold_count != 0) { error = EBUSY; goto unlock; } } KASSERT((m->flags & PG_UNHOLDFREE) == 0, ("page %p is PG_UNHOLDFREE", m)); /* Don't care: PG_NODUMP, PG_ZERO. */ if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP && object->type != OBJT_VNODE) error = EINVAL; else if (object->memattr != VM_MEMATTR_DEFAULT) error = EINVAL; else if (m->queue != PQ_NONE && !vm_page_busied(m)) { KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, ("page %p has an unexpected memattr", m)); KASSERT((m->oflags & (VPO_SWAPINPROG | VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, ("page %p has unexpected oflags", m)); /* Don't care: VPO_NOSYNC. */ if (m->valid != 0) { /* * First, try to allocate a new page * that is above "high". Failing * that, try to allocate a new page * that is below "m_run". Allocate * the new page between the end of * "m_run" and "high" only as a last * resort. */ req = req_class | VM_ALLOC_NOOBJ; if ((m->flags & PG_NODUMP) != 0) req |= VM_ALLOC_NODUMP; if (trunc_page(high) != ~(vm_paddr_t)PAGE_MASK) { m_new = vm_page_alloc_contig( NULL, 0, req, 1, round_page(high), ~(vm_paddr_t)0, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); } else m_new = NULL; if (m_new == NULL) { pa = VM_PAGE_TO_PHYS(m_run); m_new = vm_page_alloc_contig( NULL, 0, req, 1, 0, pa - 1, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); } if (m_new == NULL) { pa += ptoa(npages); m_new = vm_page_alloc_contig( NULL, 0, req, 1, pa, high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT); } if (m_new == NULL) { error = ENOMEM; goto unlock; } KASSERT(m_new->wire_count == 0, ("page %p is wired", m)); /* * Replace "m" with the new page. For * vm_page_replace(), "m" must be busy * and dequeued. Finally, change "m" * as if vm_page_free() was called. */ if (object->ref_count != 0) pmap_remove_all(m); m_new->aflags = m->aflags; KASSERT(m_new->oflags == VPO_UNMANAGED, ("page %p is managed", m)); m_new->oflags = m->oflags & VPO_NOSYNC; pmap_copy_page(m, m_new); m_new->valid = m->valid; m_new->dirty = m->dirty; m->flags &= ~PG_ZERO; vm_page_xbusy(m); vm_page_remque(m); vm_page_replace_checked(m_new, object, m->pindex, m); m->valid = 0; vm_page_undirty(m); /* * The new page must be deactivated * before the object is unlocked. */ vm_page_change_lock(m_new, &m_mtx); vm_page_deactivate(m_new); } else { m->flags &= ~PG_ZERO; vm_page_remque(m); vm_page_remove(m); KASSERT(m->dirty == 0, ("page %p is dirty", m)); } SLIST_INSERT_HEAD(&free, m, plinks.s.ss); } else error = EBUSY; unlock: VM_OBJECT_WUNLOCK(object); } else { mtx_lock(&vm_page_queue_free_mtx); order = m->order; if (order < VM_NFREEORDER) { /* * The page is enqueued in the physical memory * allocator's free page queues. Moreover, it * is the first page in a power-of-two-sized * run of contiguous free pages. Jump ahead * to the last page within that run, and * continue from there. */ m += (1 << order) - 1; } #if VM_NRESERVLEVEL > 0 else if (vm_reserv_is_page_free(m)) order = 0; #endif mtx_unlock(&vm_page_queue_free_mtx); if (order == VM_NFREEORDER) error = EINVAL; } } if (m_mtx != NULL) mtx_unlock(m_mtx); if ((m = SLIST_FIRST(&free)) != NULL) { mtx_lock(&vm_page_queue_free_mtx); do { SLIST_REMOVE_HEAD(&free, plinks.s.ss); vm_page_free_phys(m); } while ((m = SLIST_FIRST(&free)) != NULL); vm_page_free_wakeup(); mtx_unlock(&vm_page_queue_free_mtx); } return (error); } #define NRUNS 16 CTASSERT(powerof2(NRUNS)); #define RUN_INDEX(count) ((count) & (NRUNS - 1)) #define MIN_RECLAIM 8 /* * vm_page_reclaim_contig: * * Reclaim allocated, contiguous physical memory satisfying the specified * conditions by relocating the virtual pages using that physical memory. * Returns true if reclamation is successful and false otherwise. Since * relocation requires the allocation of physical pages, reclamation may * fail due to a shortage of free pages. When reclamation fails, callers * are expected to perform VM_WAIT before retrying a failed allocation * operation, e.g., vm_page_alloc_contig(). * * The caller must always specify an allocation class through "req". * * allocation classes: * VM_ALLOC_NORMAL normal process request * VM_ALLOC_SYSTEM system *really* needs a page * VM_ALLOC_INTERRUPT interrupt time request * * The optional allocation flags are ignored. * * "npages" must be greater than zero. Both "alignment" and "boundary" * must be a power of two. */ bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) { vm_paddr_t curr_low; vm_page_t m_run, m_runs[NRUNS]; u_long count, reclaimed; int error, i, options, req_class; KASSERT(npages > 0, ("npages is 0")); KASSERT(powerof2(alignment), ("alignment is not a power of 2")); KASSERT(powerof2(boundary), ("boundary is not a power of 2")); req_class = req & VM_ALLOC_CLASS_MASK; /* * The page daemon is allowed to dig deeper into the free page list. */ if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) req_class = VM_ALLOC_SYSTEM; /* * Return if the number of free pages cannot satisfy the requested * allocation. */ count = vm_cnt.v_free_count; if (count < npages + vm_cnt.v_free_reserved || (count < npages + vm_cnt.v_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) || (count < npages && req_class == VM_ALLOC_INTERRUPT)) return (false); /* * Scan up to three times, relaxing the restrictions ("options") on * the reclamation of reservations and superpages each time. */ for (options = VPSC_NORESERV;;) { /* * Find the highest runs that satisfy the given constraints * and restrictions, and record them in "m_runs". */ curr_low = low; count = 0; for (;;) { m_run = vm_phys_scan_contig(domain, npages, curr_low, high, alignment, boundary, options); if (m_run == NULL) break; curr_low = VM_PAGE_TO_PHYS(m_run) + ptoa(npages); m_runs[RUN_INDEX(count)] = m_run; count++; } /* * Reclaim the highest runs in LIFO (descending) order until * the number of reclaimed pages, "reclaimed", is at least * MIN_RECLAIM. Reset "reclaimed" each time because each * reclamation is idempotent, and runs will (likely) recur * from one scan to the next as restrictions are relaxed. */ reclaimed = 0; for (i = 0; count > 0 && i < NRUNS; i++) { count--; m_run = m_runs[RUN_INDEX(count)]; error = vm_page_reclaim_run(req_class, npages, m_run, high); if (error == 0) { reclaimed += npages; if (reclaimed >= MIN_RECLAIM) return (true); } } /* * Either relax the restrictions on the next scan or return if * the last scan had no restrictions. */ if (options == VPSC_NORESERV) options = VPSC_NOSUPER; else if (options == VPSC_NOSUPER) options = VPSC_ANY; else if (options == VPSC_ANY) return (reclaimed != 0); } } bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) { - struct vm_domain_iterator vi; + struct vm_domainset_iter di; int domain; bool ret; - ret = false; - vm_policy_iterator_init(&vi); - while ((vm_domain_iterator_run(&vi, &domain)) == 0) { + vm_domainset_iter_page_init(&di, kernel_object, &domain, &req); + do { ret = vm_page_reclaim_contig_domain(domain, req, npages, low, high, alignment, boundary); if (ret) break; - } - vm_policy_iterator_finish(&vi); + } while (vm_domainset_iter_page(&di, &domain, &req) == 0); return (ret); } /* * vm_wait: (also see VM_WAIT macro) * * Sleep until free pages are available for allocation. * - Called in various places before memory allocations. */ static void _vm_wait(void) { mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); if (curproc == pageproc) { vm_pageout_pages_needed = 1; msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, PDROP | PSWP, "VMWait", 0); } else { if (__predict_false(pageproc == NULL)) panic("vm_wait in early boot"); if (!vm_pageout_wanted) { vm_pageout_wanted = true; wakeup(&vm_pageout_wanted); } vm_pages_needed = true; msleep(&vm_cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, "vmwait", 0); } } void vm_wait(void) { mtx_lock(&vm_page_queue_free_mtx); _vm_wait(); } /* * vm_page_alloc_fail: * * Called when a page allocation function fails. Informs the * pagedaemon and performs the requested wait. Requires the * page_queue_free and object lock on entry. Returns with the * object lock held and free lock released. Returns an error when * retry is necessary. * */ static int vm_page_alloc_fail(vm_object_t object, int req) { mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); atomic_add_int(&vm_pageout_deficit, max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); pagedaemon_wakeup(); if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) { if (object != NULL) VM_OBJECT_WUNLOCK(object); _vm_wait(); if (object != NULL) VM_OBJECT_WLOCK(object); if (req & VM_ALLOC_WAITOK) return (EAGAIN); } else mtx_unlock(&vm_page_queue_free_mtx); return (0); } /* * vm_waitpfault: (also see VM_WAITPFAULT macro) * * Sleep until free pages are available for allocation. * - Called only in vm_fault so that processes page faulting * can be easily tracked. * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing * processes will be able to grab memory first. Do not change * this balance without careful testing first. */ void vm_waitpfault(void) { mtx_lock(&vm_page_queue_free_mtx); if (!vm_pageout_wanted) { vm_pageout_wanted = true; wakeup(&vm_pageout_wanted); } vm_pages_needed = true; msleep(&vm_cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, "pfault", 0); } struct vm_pagequeue * vm_page_pagequeue(vm_page_t m) { if (vm_page_in_laundry(m)) return (&vm_dom[0].vmd_pagequeues[m->queue]); else return (&vm_phys_domain(m)->vmd_pagequeues[m->queue]); } /* * vm_page_dequeue: * * Remove the given page from its current page queue. * * The page must be locked. */ void vm_page_dequeue(vm_page_t m) { struct vm_pagequeue *pq; vm_page_assert_locked(m); KASSERT(m->queue < PQ_COUNT, ("vm_page_dequeue: page %p is not queued", m)); pq = vm_page_pagequeue(m); vm_pagequeue_lock(pq); m->queue = PQ_NONE; TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); vm_pagequeue_cnt_dec(pq); vm_pagequeue_unlock(pq); } /* * vm_page_dequeue_locked: * * Remove the given page from its current page queue. * * The page and page queue must be locked. */ void vm_page_dequeue_locked(vm_page_t m) { struct vm_pagequeue *pq; vm_page_lock_assert(m, MA_OWNED); pq = vm_page_pagequeue(m); vm_pagequeue_assert_locked(pq); m->queue = PQ_NONE; TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); vm_pagequeue_cnt_dec(pq); } /* * vm_page_enqueue: * * Add the given page to the specified page queue. * * The page must be locked. */ static void vm_page_enqueue(uint8_t queue, vm_page_t m) { struct vm_pagequeue *pq; vm_page_lock_assert(m, MA_OWNED); KASSERT(queue < PQ_COUNT, ("vm_page_enqueue: invalid queue %u request for page %p", queue, m)); if (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE) pq = &vm_dom[0].vmd_pagequeues[queue]; else pq = &vm_phys_domain(m)->vmd_pagequeues[queue]; vm_pagequeue_lock(pq); m->queue = queue; TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); vm_pagequeue_cnt_inc(pq); vm_pagequeue_unlock(pq); } /* * vm_page_requeue: * * Move the given page to the tail of its current page queue. * * The page must be locked. */ void vm_page_requeue(vm_page_t m) { struct vm_pagequeue *pq; vm_page_lock_assert(m, MA_OWNED); KASSERT(m->queue != PQ_NONE, ("vm_page_requeue: page %p is not queued", m)); pq = vm_page_pagequeue(m); vm_pagequeue_lock(pq); TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); vm_pagequeue_unlock(pq); } /* * vm_page_requeue_locked: * * Move the given page to the tail of its current page queue. * * The page queue must be locked. */ void vm_page_requeue_locked(vm_page_t m) { struct vm_pagequeue *pq; KASSERT(m->queue != PQ_NONE, ("vm_page_requeue_locked: page %p is not queued", m)); pq = vm_page_pagequeue(m); vm_pagequeue_assert_locked(pq); TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); } /* * vm_page_activate: * * Put the specified page on the active list (if appropriate). * Ensure that act_count is at least ACT_INIT but do not otherwise * mess with it. * * The page must be locked. */ void vm_page_activate(vm_page_t m) { int queue; vm_page_lock_assert(m, MA_OWNED); if ((queue = m->queue) != PQ_ACTIVE) { if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { if (m->act_count < ACT_INIT) m->act_count = ACT_INIT; if (queue != PQ_NONE) vm_page_dequeue(m); vm_page_enqueue(PQ_ACTIVE, m); } else KASSERT(queue == PQ_NONE, ("vm_page_activate: wired page %p is queued", m)); } else { if (m->act_count < ACT_INIT) m->act_count = ACT_INIT; } } /* * vm_page_free_wakeup: * * Helper routine for vm_page_free_toq(). This routine is called * when a page is added to the free queues. * * The page queues must be locked. */ static void vm_page_free_wakeup(void) { mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); /* * if pageout daemon needs pages, then tell it that there are * some free. */ if (vm_pageout_pages_needed && vm_cnt.v_free_count >= vm_cnt.v_pageout_free_min) { wakeup(&vm_pageout_pages_needed); vm_pageout_pages_needed = 0; } /* * wakeup processes that are waiting on memory if we hit a * high water mark. And wakeup scheduler process if we have * lots of memory. this process will swapin processes. */ if (vm_pages_needed && !vm_page_count_min()) { vm_pages_needed = false; wakeup(&vm_cnt.v_free_count); } } /* * vm_page_free_prep: * * Prepares the given page to be put on the free list, * disassociating it from any VM object. The caller may return * the page to the free list only if this function returns true. * * The object must be locked. The page must be locked if it is * managed. For a queued managed page, the pagequeue_locked * argument specifies whether the page queue is already locked. */ bool vm_page_free_prep(vm_page_t m, bool pagequeue_locked) { #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP) if ((m->flags & PG_ZERO) != 0) { uint64_t *p; int i; p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++) KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx", m, i, (uintmax_t)*p)); } #endif if ((m->oflags & VPO_UNMANAGED) == 0) { vm_page_lock_assert(m, MA_OWNED); KASSERT(!pmap_page_is_mapped(m), ("vm_page_free_toq: freeing mapped page %p", m)); } else KASSERT(m->queue == PQ_NONE, ("vm_page_free_toq: unmanaged page %p is queued", m)); VM_CNT_INC(v_tfree); if (vm_page_sbusied(m)) panic("vm_page_free: freeing busy page %p", m); vm_page_remove(m); /* * If fictitious remove object association and * return. */ if ((m->flags & PG_FICTITIOUS) != 0) { KASSERT(m->wire_count == 1, ("fictitious page %p is not wired", m)); KASSERT(m->queue == PQ_NONE, ("fictitious page %p is queued", m)); return (false); } if (m->queue != PQ_NONE) { if (pagequeue_locked) vm_page_dequeue_locked(m); else vm_page_dequeue(m); } m->valid = 0; vm_page_undirty(m); if (m->wire_count != 0) panic("vm_page_free: freeing wired page %p", m); if (m->hold_count != 0) { m->flags &= ~PG_ZERO; KASSERT((m->flags & PG_UNHOLDFREE) == 0, ("vm_page_free: freeing PG_UNHOLDFREE page %p", m)); m->flags |= PG_UNHOLDFREE; return (false); } /* * Restore the default memory attribute to the page. */ if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); return (true); } /* * Insert the page into the physical memory allocator's free page * queues. This is the last step to free a page. */ static void vm_page_free_phys(vm_page_t m) { mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); vm_phys_freecnt_adj(m, 1); #if VM_NRESERVLEVEL > 0 if (!vm_reserv_free_page(m)) #endif vm_phys_free_pages(m, 0); } void vm_page_free_phys_pglist(struct pglist *tq) { vm_page_t m; if (TAILQ_EMPTY(tq)) return; mtx_lock(&vm_page_queue_free_mtx); TAILQ_FOREACH(m, tq, listq) vm_page_free_phys(m); vm_page_free_wakeup(); mtx_unlock(&vm_page_queue_free_mtx); } /* * vm_page_free_toq: * * Returns the given page to the free list, disassociating it * from any VM object. * * The object must be locked. The page must be locked if it is * managed. */ void vm_page_free_toq(vm_page_t m) { if (!vm_page_free_prep(m, false)) return; mtx_lock(&vm_page_queue_free_mtx); vm_page_free_phys(m); vm_page_free_wakeup(); mtx_unlock(&vm_page_queue_free_mtx); } /* * vm_page_wire: * * Mark this page as wired down by yet * another map, removing it from paging queues * as necessary. * * If the page is fictitious, then its wire count must remain one. * * The page must be locked. */ void vm_page_wire(vm_page_t m) { /* * Only bump the wire statistics if the page is not already wired, * and only unqueue the page if it is on some queue (if it is unmanaged * it is already off the queues). */ vm_page_lock_assert(m, MA_OWNED); if ((m->flags & PG_FICTITIOUS) != 0) { KASSERT(m->wire_count == 1, ("vm_page_wire: fictitious page %p's wire count isn't one", m)); return; } if (m->wire_count == 0) { KASSERT((m->oflags & VPO_UNMANAGED) == 0 || m->queue == PQ_NONE, ("vm_page_wire: unmanaged page %p is queued", m)); vm_page_remque(m); atomic_add_int(&vm_cnt.v_wire_count, 1); } m->wire_count++; KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); } /* * vm_page_unwire: * * Release one wiring of the specified page, potentially allowing it to be * paged out. Returns TRUE if the number of wirings transitions to zero and * FALSE otherwise. * * Only managed pages belonging to an object can be paged out. If the number * of wirings transitions to zero and the page is eligible for page out, then * the page is added to the specified paging queue (unless PQ_NONE is * specified). * * If a page is fictitious, then its wire count must always be one. * * A managed page must be locked. */ boolean_t vm_page_unwire(vm_page_t m, uint8_t queue) { KASSERT(queue < PQ_COUNT || queue == PQ_NONE, ("vm_page_unwire: invalid queue %u request for page %p", queue, m)); if ((m->oflags & VPO_UNMANAGED) == 0) vm_page_assert_locked(m); if ((m->flags & PG_FICTITIOUS) != 0) { KASSERT(m->wire_count == 1, ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); return (FALSE); } if (m->wire_count > 0) { m->wire_count--; if (m->wire_count == 0) { atomic_subtract_int(&vm_cnt.v_wire_count, 1); if ((m->oflags & VPO_UNMANAGED) == 0 && m->object != NULL && queue != PQ_NONE) vm_page_enqueue(queue, m); return (TRUE); } else return (FALSE); } else panic("vm_page_unwire: page %p's wire count is zero", m); } /* * Move the specified page to the inactive queue. * * Normally, "noreuse" is FALSE, resulting in LRU ordering of the inactive * queue. However, setting "noreuse" to TRUE will accelerate the specified * page's reclamation, but it will not unmap the page from any address space. * This is implemented by inserting the page near the head of the inactive * queue, using a marker page to guide FIFO insertion ordering. * * The page must be locked. */ static inline void _vm_page_deactivate(vm_page_t m, boolean_t noreuse) { struct vm_pagequeue *pq; int queue; vm_page_assert_locked(m); /* * Ignore if the page is already inactive, unless it is unlikely to be * reactivated. */ if ((queue = m->queue) == PQ_INACTIVE && !noreuse) return; if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE]; /* Avoid multiple acquisitions of the inactive queue lock. */ if (queue == PQ_INACTIVE) { vm_pagequeue_lock(pq); vm_page_dequeue_locked(m); } else { if (queue != PQ_NONE) vm_page_dequeue(m); vm_pagequeue_lock(pq); } m->queue = PQ_INACTIVE; if (noreuse) TAILQ_INSERT_BEFORE(&vm_phys_domain(m)->vmd_inacthead, m, plinks.q); else TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); vm_pagequeue_cnt_inc(pq); vm_pagequeue_unlock(pq); } } /* * Move the specified page to the inactive queue. * * The page must be locked. */ void vm_page_deactivate(vm_page_t m) { _vm_page_deactivate(m, FALSE); } /* * Move the specified page to the inactive queue with the expectation * that it is unlikely to be reused. * * The page must be locked. */ void vm_page_deactivate_noreuse(vm_page_t m) { _vm_page_deactivate(m, TRUE); } /* * vm_page_launder * * Put a page in the laundry. */ void vm_page_launder(vm_page_t m) { int queue; vm_page_assert_locked(m); if ((queue = m->queue) != PQ_LAUNDRY) { if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { if (queue != PQ_NONE) vm_page_dequeue(m); vm_page_enqueue(PQ_LAUNDRY, m); } else KASSERT(queue == PQ_NONE, ("wired page %p is queued", m)); } } /* * vm_page_unswappable * * Put a page in the PQ_UNSWAPPABLE holding queue. */ void vm_page_unswappable(vm_page_t m) { vm_page_assert_locked(m); KASSERT(m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0, ("page %p already unswappable", m)); if (m->queue != PQ_NONE) vm_page_dequeue(m); vm_page_enqueue(PQ_UNSWAPPABLE, m); } /* * Attempt to free the page. If it cannot be freed, do nothing. Returns true * if the page is freed and false otherwise. * * The page must be managed. The page and its containing object must be * locked. */ bool vm_page_try_to_free(vm_page_t m) { vm_page_assert_locked(m); VM_OBJECT_ASSERT_WLOCKED(m->object); KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("page %p is unmanaged", m)); if (m->dirty != 0 || m->hold_count != 0 || m->wire_count != 0 || vm_page_busied(m)) return (false); if (m->object->ref_count != 0) { pmap_remove_all(m); if (m->dirty != 0) return (false); } vm_page_free(m); return (true); } /* * vm_page_advise * * Apply the specified advice to the given page. * * The object and page must be locked. */ void vm_page_advise(vm_page_t m, int advice) { vm_page_assert_locked(m); VM_OBJECT_ASSERT_WLOCKED(m->object); if (advice == MADV_FREE) /* * Mark the page clean. This will allow the page to be freed * without first paging it out. MADV_FREE pages are often * quickly reused by malloc(3), so we do not do anything that * would result in a page fault on a later access. */ vm_page_undirty(m); else if (advice != MADV_DONTNEED) { if (advice == MADV_WILLNEED) vm_page_activate(m); return; } /* * Clear any references to the page. Otherwise, the page daemon will * immediately reactivate the page. */ vm_page_aflag_clear(m, PGA_REFERENCED); if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m)) vm_page_dirty(m); /* * Place clean pages near the head of the inactive queue rather than * the tail, thus defeating the queue's LRU operation and ensuring that * the page will be reused quickly. Dirty pages not already in the * laundry are moved there. */ if (m->dirty == 0) vm_page_deactivate_noreuse(m); else vm_page_launder(m); } /* * Grab a page, waiting until we are waken up due to the page * changing state. We keep on waiting, if the page continues * to be in the object. If the page doesn't exist, first allocate it * and then conditionally zero it. * * This routine may sleep. * * The object must be locked on entry. The lock will, however, be released * and reacquired if the routine sleeps. */ vm_page_t vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) { vm_page_t m; int sleep; int pflags; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || (allocflags & VM_ALLOC_IGN_SBUSY) != 0, ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); pflags = allocflags & ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL); if ((allocflags & VM_ALLOC_NOWAIT) == 0) pflags |= VM_ALLOC_WAITFAIL; retrylookup: if ((m = vm_page_lookup(object, pindex)) != NULL) { sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ? vm_page_xbusied(m) : vm_page_busied(m); if (sleep) { if ((allocflags & VM_ALLOC_NOWAIT) != 0) return (NULL); /* * Reference the page before unlocking and * sleeping so that the page daemon is less * likely to reclaim it. */ vm_page_aflag_set(m, PGA_REFERENCED); vm_page_lock(m); VM_OBJECT_WUNLOCK(object); vm_page_busy_sleep(m, "pgrbwt", (allocflags & VM_ALLOC_IGN_SBUSY) != 0); VM_OBJECT_WLOCK(object); goto retrylookup; } else { if ((allocflags & VM_ALLOC_WIRED) != 0) { vm_page_lock(m); vm_page_wire(m); vm_page_unlock(m); } if ((allocflags & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) vm_page_xbusy(m); if ((allocflags & VM_ALLOC_SBUSY) != 0) vm_page_sbusy(m); return (m); } } m = vm_page_alloc(object, pindex, pflags); if (m == NULL) { if ((allocflags & VM_ALLOC_NOWAIT) != 0) return (NULL); goto retrylookup; } if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); return (m); } /* * Return the specified range of pages from the given object. For each * page offset within the range, if a page already exists within the object * at that offset and it is busy, then wait for it to change state. If, * instead, the page doesn't exist, then allocate it. * * The caller must always specify an allocation class. * * allocation classes: * VM_ALLOC_NORMAL normal process request * VM_ALLOC_SYSTEM system *really* needs the pages * * The caller must always specify that the pages are to be busied and/or * wired. * * optional allocation flags: * VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages * VM_ALLOC_NOBUSY do not exclusive busy the page * VM_ALLOC_NOWAIT do not sleep * VM_ALLOC_SBUSY set page to sbusy state * VM_ALLOC_WIRED wire the pages * VM_ALLOC_ZERO zero and validate any invalid pages * * If VM_ALLOC_NOWAIT is not specified, this routine may sleep. Otherwise, it * may return a partial prefix of the requested range. */ int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count) { vm_page_t m, mpred; int pflags; int i; bool sleep; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0, ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed")); KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 || (allocflags & VM_ALLOC_WIRED) != 0, ("vm_page_grab_pages: the pages must be busied or wired")); KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || (allocflags & VM_ALLOC_IGN_SBUSY) != 0, ("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch")); if (count == 0) return (0); pflags = allocflags & ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL | VM_ALLOC_IGN_SBUSY); if ((allocflags & VM_ALLOC_NOWAIT) == 0) pflags |= VM_ALLOC_WAITFAIL; i = 0; retrylookup: m = vm_radix_lookup_le(&object->rtree, pindex + i); if (m == NULL || m->pindex != pindex + i) { mpred = m; m = NULL; } else mpred = TAILQ_PREV(m, pglist, listq); for (; i < count; i++) { if (m != NULL) { sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ? vm_page_xbusied(m) : vm_page_busied(m); if (sleep) { if ((allocflags & VM_ALLOC_NOWAIT) != 0) break; /* * Reference the page before unlocking and * sleeping so that the page daemon is less * likely to reclaim it. */ vm_page_aflag_set(m, PGA_REFERENCED); vm_page_lock(m); VM_OBJECT_WUNLOCK(object); vm_page_busy_sleep(m, "grbmaw", (allocflags & VM_ALLOC_IGN_SBUSY) != 0); VM_OBJECT_WLOCK(object); goto retrylookup; } if ((allocflags & VM_ALLOC_WIRED) != 0) { vm_page_lock(m); vm_page_wire(m); vm_page_unlock(m); } if ((allocflags & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) vm_page_xbusy(m); if ((allocflags & VM_ALLOC_SBUSY) != 0) vm_page_sbusy(m); } else { m = vm_page_alloc_after(object, pindex + i, pflags | VM_ALLOC_COUNT(count - i), mpred); if (m == NULL) { if ((allocflags & VM_ALLOC_NOWAIT) != 0) break; goto retrylookup; } } if (m->valid == 0 && (allocflags & VM_ALLOC_ZERO) != 0) { if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); m->valid = VM_PAGE_BITS_ALL; } ma[i] = mpred = m; m = vm_page_next(m); } return (i); } /* * Mapping function for valid or dirty bits in a page. * * Inputs are required to range within a page. */ vm_page_bits_t vm_page_bits(int base, int size) { int first_bit; int last_bit; KASSERT( base + size <= PAGE_SIZE, ("vm_page_bits: illegal base/size %d/%d", base, size) ); if (size == 0) /* handle degenerate case */ return (0); first_bit = base >> DEV_BSHIFT; last_bit = (base + size - 1) >> DEV_BSHIFT; return (((vm_page_bits_t)2 << last_bit) - ((vm_page_bits_t)1 << first_bit)); } /* * vm_page_set_valid_range: * * Sets portions of a page valid. The arguments are expected * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive * of any partial chunks touched by the range. The invalid portion of * such chunks will be zeroed. * * (base + size) must be less then or equal to PAGE_SIZE. */ void vm_page_set_valid_range(vm_page_t m, int base, int size) { int endoff, frag; VM_OBJECT_ASSERT_WLOCKED(m->object); if (size == 0) /* handle degenerate case */ return; /* * If the base is not DEV_BSIZE aligned and the valid * bit is clear, we have to zero out a portion of the * first block. */ if ((frag = rounddown2(base, DEV_BSIZE)) != base && (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) pmap_zero_page_area(m, frag, base - frag); /* * If the ending offset is not DEV_BSIZE aligned and the * valid bit is clear, we have to zero out a portion of * the last block. */ endoff = base + size; if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) pmap_zero_page_area(m, endoff, DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); /* * Assert that no previously invalid block that is now being validated * is already dirty. */ KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, ("vm_page_set_valid_range: page %p is dirty", m)); /* * Set valid bits inclusive of any overlap. */ m->valid |= vm_page_bits(base, size); } /* * Clear the given bits from the specified page's dirty field. */ static __inline void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) { uintptr_t addr; #if PAGE_SIZE < 16384 int shift; #endif /* * If the object is locked and the page is neither exclusive busy nor * write mapped, then the page's dirty field cannot possibly be * set by a concurrent pmap operation. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) m->dirty &= ~pagebits; else { /* * The pmap layer can call vm_page_dirty() without * holding a distinguished lock. The combination of * the object's lock and an atomic operation suffice * to guarantee consistency of the page dirty field. * * For PAGE_SIZE == 32768 case, compiler already * properly aligns the dirty field, so no forcible * alignment is needed. Only require existence of * atomic_clear_64 when page size is 32768. */ addr = (uintptr_t)&m->dirty; #if PAGE_SIZE == 32768 atomic_clear_64((uint64_t *)addr, pagebits); #elif PAGE_SIZE == 16384 atomic_clear_32((uint32_t *)addr, pagebits); #else /* PAGE_SIZE <= 8192 */ /* * Use a trick to perform a 32-bit atomic on the * containing aligned word, to not depend on the existence * of atomic_clear_{8, 16}. */ shift = addr & (sizeof(uint32_t) - 1); #if BYTE_ORDER == BIG_ENDIAN shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY; #else shift *= NBBY; #endif addr &= ~(sizeof(uint32_t) - 1); atomic_clear_32((uint32_t *)addr, pagebits << shift); #endif /* PAGE_SIZE */ } } /* * vm_page_set_validclean: * * Sets portions of a page valid and clean. The arguments are expected * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive * of any partial chunks touched by the range. The invalid portion of * such chunks will be zero'd. * * (base + size) must be less then or equal to PAGE_SIZE. */ void vm_page_set_validclean(vm_page_t m, int base, int size) { vm_page_bits_t oldvalid, pagebits; int endoff, frag; VM_OBJECT_ASSERT_WLOCKED(m->object); if (size == 0) /* handle degenerate case */ return; /* * If the base is not DEV_BSIZE aligned and the valid * bit is clear, we have to zero out a portion of the * first block. */ if ((frag = rounddown2(base, DEV_BSIZE)) != base && (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0) pmap_zero_page_area(m, frag, base - frag); /* * If the ending offset is not DEV_BSIZE aligned and the * valid bit is clear, we have to zero out a portion of * the last block. */ endoff = base + size; if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff && (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0) pmap_zero_page_area(m, endoff, DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); /* * Set valid, clear dirty bits. If validating the entire * page we can safely clear the pmap modify bit. We also * use this opportunity to clear the VPO_NOSYNC flag. If a process * takes a write fault on a MAP_NOSYNC memory area the flag will * be set again. * * We set valid bits inclusive of any overlap, but we can only * clear dirty bits for DEV_BSIZE chunks that are fully within * the range. */ oldvalid = m->valid; pagebits = vm_page_bits(base, size); m->valid |= pagebits; #if 0 /* NOT YET */ if ((frag = base & (DEV_BSIZE - 1)) != 0) { frag = DEV_BSIZE - frag; base += frag; size -= frag; if (size < 0) size = 0; } pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); #endif if (base == 0 && size == PAGE_SIZE) { /* * The page can only be modified within the pmap if it is * mapped, and it can only be mapped if it was previously * fully valid. */ if (oldvalid == VM_PAGE_BITS_ALL) /* * Perform the pmap_clear_modify() first. Otherwise, * a concurrent pmap operation, such as * pmap_protect(), could clear a modification in the * pmap and set the dirty field on the page before * pmap_clear_modify() had begun and after the dirty * field was cleared here. */ pmap_clear_modify(m); m->dirty = 0; m->oflags &= ~VPO_NOSYNC; } else if (oldvalid != VM_PAGE_BITS_ALL) m->dirty &= ~pagebits; else vm_page_clear_dirty_mask(m, pagebits); } void vm_page_clear_dirty(vm_page_t m, int base, int size) { vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); } /* * vm_page_set_invalid: * * Invalidates DEV_BSIZE'd chunks within a page. Both the * valid and dirty bits for the effected areas are cleared. */ void vm_page_set_invalid(vm_page_t m, int base, int size) { vm_page_bits_t bits; vm_object_t object; object = m->object; VM_OBJECT_ASSERT_WLOCKED(object); if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + size >= object->un_pager.vnp.vnp_size) bits = VM_PAGE_BITS_ALL; else bits = vm_page_bits(base, size); if (object->ref_count != 0 && m->valid == VM_PAGE_BITS_ALL && bits != 0) pmap_remove_all(m); KASSERT((bits == 0 && m->valid == VM_PAGE_BITS_ALL) || !pmap_page_is_mapped(m), ("vm_page_set_invalid: page %p is mapped", m)); m->valid &= ~bits; m->dirty &= ~bits; } /* * vm_page_zero_invalid() * * The kernel assumes that the invalid portions of a page contain * garbage, but such pages can be mapped into memory by user code. * When this occurs, we must zero out the non-valid portions of the * page so user code sees what it expects. * * Pages are most often semi-valid when the end of a file is mapped * into memory and the file's size is not page aligned. */ void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) { int b; int i; VM_OBJECT_ASSERT_WLOCKED(m->object); /* * Scan the valid bits looking for invalid sections that * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the * valid bit may be set ) have already been zeroed by * vm_page_set_validclean(). */ for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { if (i == (PAGE_SIZE / DEV_BSIZE) || (m->valid & ((vm_page_bits_t)1 << i))) { if (i > b) { pmap_zero_page_area(m, b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); } b = i + 1; } } /* * setvalid is TRUE when we can safely set the zero'd areas * as being valid. We can do this if there are no cache consistancy * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. */ if (setvalid) m->valid = VM_PAGE_BITS_ALL; } /* * vm_page_is_valid: * * Is (partial) page valid? Note that the case where size == 0 * will return FALSE in the degenerate case where the page is * entirely invalid, and TRUE otherwise. */ int vm_page_is_valid(vm_page_t m, int base, int size) { vm_page_bits_t bits; VM_OBJECT_ASSERT_LOCKED(m->object); bits = vm_page_bits(base, size); return (m->valid != 0 && (m->valid & bits) == bits); } /* * Returns true if all of the specified predicates are true for the entire * (super)page and false otherwise. */ bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m) { vm_object_t object; int i, npages; object = m->object; VM_OBJECT_ASSERT_LOCKED(object); npages = atop(pagesizes[m->psind]); /* * The physically contiguous pages that make up a superpage, i.e., a * page with a page size index ("psind") greater than zero, will * occupy adjacent entries in vm_page_array[]. */ for (i = 0; i < npages; i++) { /* Always test object consistency, including "skip_m". */ if (m[i].object != object) return (false); if (&m[i] == skip_m) continue; if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i])) return (false); if ((flags & PS_ALL_DIRTY) != 0) { /* * Calling vm_page_test_dirty() or pmap_is_modified() * might stop this case from spuriously returning * "false". However, that would require a write lock * on the object containing "m[i]". */ if (m[i].dirty != VM_PAGE_BITS_ALL) return (false); } if ((flags & PS_ALL_VALID) != 0 && m[i].valid != VM_PAGE_BITS_ALL) return (false); } return (true); } /* * Set the page's dirty bits if the page is modified. */ void vm_page_test_dirty(vm_page_t m) { VM_OBJECT_ASSERT_WLOCKED(m->object); if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) vm_page_dirty(m); } void vm_page_lock_KBI(vm_page_t m, const char *file, int line) { mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); } void vm_page_unlock_KBI(vm_page_t m, const char *file, int line) { mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line); } int vm_page_trylock_KBI(vm_page_t m, const char *file, int line) { return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line)); } #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line) { vm_page_lock_assert_KBI(m, MA_OWNED, file, line); } void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line) { mtx_assert_(vm_page_lockptr(m), a, file, line); } #endif #ifdef INVARIANTS void vm_page_object_lock_assert(vm_page_t m) { /* * Certain of the page's fields may only be modified by the * holder of the containing object's lock or the exclusive busy. * holder. Unfortunately, the holder of the write busy is * not recorded, and thus cannot be checked here. */ if (m->object != NULL && !vm_page_xbusied(m)) VM_OBJECT_ASSERT_WLOCKED(m->object); } void vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits) { if ((bits & PGA_WRITEABLE) == 0) return; /* * The PGA_WRITEABLE flag can only be set if the page is * managed, is exclusively busied or the object is locked. * Currently, this flag is only set by pmap_enter(). */ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("PGA_WRITEABLE on unmanaged page")); if (!vm_page_xbusied(m)) VM_OBJECT_ASSERT_LOCKED(m->object); } #endif #include "opt_ddb.h" #ifdef DDB #include #include DB_SHOW_COMMAND(page, vm_page_print_page_info) { db_printf("vm_cnt.v_free_count: %d\n", vm_cnt.v_free_count); db_printf("vm_cnt.v_inactive_count: %d\n", vm_cnt.v_inactive_count); db_printf("vm_cnt.v_active_count: %d\n", vm_cnt.v_active_count); db_printf("vm_cnt.v_laundry_count: %d\n", vm_cnt.v_laundry_count); db_printf("vm_cnt.v_wire_count: %d\n", vm_cnt.v_wire_count); db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved); db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min); db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target); db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target); } DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) { int dom; db_printf("pq_free %d\n", vm_cnt.v_free_count); for (dom = 0; dom < vm_ndomains; dom++) { db_printf( "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n", dom, vm_dom[dom].vmd_page_count, vm_dom[dom].vmd_free_count, vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt, vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt, vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt, vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt); } } DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo) { vm_page_t m; boolean_t phys; if (!have_addr) { db_printf("show pginfo addr\n"); return; } phys = strchr(modif, 'p') != NULL; if (phys) m = PHYS_TO_VM_PAGE(addr); else m = (vm_page_t)addr; db_printf( "page %p obj %p pidx 0x%jx phys 0x%jx q %d hold %d wire %d\n" " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n", m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, m->queue, m->hold_count, m->wire_count, m->aflags, m->oflags, m->flags, m->act_count, m->busy_lock, m->valid, m->dirty); } #endif /* DDB */ Index: user/jeff/numa/sys/vm/vm_phys.c =================================================================== --- user/jeff/numa/sys/vm/vm_phys.c (revision 326888) +++ user/jeff/numa/sys/vm/vm_phys.c (revision 326889) @@ -1,1261 +1,1259 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2006 Rice University * Copyright (c) 2007 Alan L. Cox * All rights reserved. * * This software was developed for the FreeBSD Project by Alan L. Cox, * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Physical memory system implementation * * Any external functions defined by this module are only to be used by the * virtual memory system. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include - _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, "Too many physsegs."); #ifdef VM_NUMA_ALLOC struct mem_affinity *mem_affinity; int *mem_locality; #endif int vm_ndomains = 1; struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX]; int vm_phys_nsegs; struct vm_phys_fictitious_seg; static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *, struct vm_phys_fictitious_seg *); RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree = RB_INITIALIZER(_vm_phys_fictitious_tree); struct vm_phys_fictitious_seg { RB_ENTRY(vm_phys_fictitious_seg) node; /* Memory region data */ vm_paddr_t start; vm_paddr_t end; vm_page_t first_page; }; RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node, vm_phys_fictitious_cmp); static struct rwlock vm_phys_fictitious_reg_lock; MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages"); static struct vm_freelist vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; static int vm_nfreelists; /* * Provides the mapping from VM_FREELIST_* to free list indices (flind). */ static int vm_freelist_to_flind[VM_NFREELIST]; CTASSERT(VM_FREELIST_DEFAULT == 0); #ifdef VM_FREELIST_ISADMA #define VM_ISADMA_BOUNDARY 16777216 #endif #ifdef VM_FREELIST_DMA32 #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32) #endif /* * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about * the ordering of the free list boundaries. */ #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY) CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY); #endif #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY) CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY); #endif static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info"); static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS); SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info"); #ifdef VM_NUMA_ALLOC static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS); SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info"); #endif SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, &vm_ndomains, 0, "Number of physical memory domains available."); static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order); /* * Red-black tree helpers for vm fictitious range management. */ static inline int vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p, struct vm_phys_fictitious_seg *range) { KASSERT(range->start != 0 && range->end != 0, ("Invalid range passed on search for vm_fictitious page")); if (p->start >= range->end) return (1); if (p->start < range->start) return (-1); return (0); } static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1, struct vm_phys_fictitious_seg *p2) { /* Check if this is a search for a page */ if (p1->end == 0) return (vm_phys_fictitious_in_range(p1, p2)); KASSERT(p2->end != 0, ("Invalid range passed as second parameter to vm fictitious comparison")); /* Searching to add a new range */ if (p1->end <= p2->start) return (-1); if (p1->start >= p2->end) return (1); panic("Trying to add overlapping vm fictitious ranges:\n" "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start, (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end); } boolean_t vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high) { struct vm_phys_seg *s; int idx; while ((idx = ffsl(mask)) != 0) { idx--; /* ffsl counts from 1 */ mask &= ~(1UL << idx); s = &vm_phys_segs[idx]; if (low < s->end && high > s->start) return (TRUE); } return (FALSE); } /* * Outputs the state of the physical memory allocator, specifically, * the amount of physical memory in each free list. */ static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS) { struct sbuf sbuf; struct vm_freelist *fl; int dom, error, flind, oind, pind; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req); for (dom = 0; dom < vm_ndomains; dom++) { sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom); for (flind = 0; flind < vm_nfreelists; flind++) { sbuf_printf(&sbuf, "\nFREE LIST %d:\n" "\n ORDER (SIZE) | NUMBER" "\n ", flind); for (pind = 0; pind < VM_NFREEPOOL; pind++) sbuf_printf(&sbuf, " | POOL %d", pind); sbuf_printf(&sbuf, "\n-- "); for (pind = 0; pind < VM_NFREEPOOL; pind++) sbuf_printf(&sbuf, "-- -- "); sbuf_printf(&sbuf, "--\n"); for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { sbuf_printf(&sbuf, " %2d (%6dK)", oind, 1 << (PAGE_SHIFT - 10 + oind)); for (pind = 0; pind < VM_NFREEPOOL; pind++) { fl = vm_phys_free_queues[dom][flind][pind]; sbuf_printf(&sbuf, " | %6d", fl[oind].lcnt); } sbuf_printf(&sbuf, "\n"); } } } error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } /* * Outputs the set of physical memory segments. */ static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS) { struct sbuf sbuf; struct vm_phys_seg *seg; int error, segind; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 128, req); for (segind = 0; segind < vm_phys_nsegs; segind++) { sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind); seg = &vm_phys_segs[segind]; sbuf_printf(&sbuf, "start: %#jx\n", (uintmax_t)seg->start); sbuf_printf(&sbuf, "end: %#jx\n", (uintmax_t)seg->end); sbuf_printf(&sbuf, "domain: %d\n", seg->domain); sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); } error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } /* * Return affinity, or -1 if there's no affinity information. */ int vm_phys_mem_affinity(int f, int t) { #ifdef VM_NUMA_ALLOC if (mem_locality == NULL) return (-1); if (f >= vm_ndomains || t >= vm_ndomains) return (-1); return (mem_locality[f * vm_ndomains + t]); #else return (-1); #endif } #ifdef VM_NUMA_ALLOC /* * Outputs the VM locality table. */ static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS) { struct sbuf sbuf; int error, i, j; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 128, req); sbuf_printf(&sbuf, "\n"); for (i = 0; i < vm_ndomains; i++) { sbuf_printf(&sbuf, "%d: ", i); for (j = 0; j < vm_ndomains; j++) { sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j)); } sbuf_printf(&sbuf, "\n"); } error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } #endif static void vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) { m->order = order; if (tail) TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q); else TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q); fl[order].lcnt++; } static void vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) { TAILQ_REMOVE(&fl[order].pl, m, plinks.q); fl[order].lcnt--; m->order = VM_NFREEORDER; } /* * Create a physical memory segment. */ static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain) { struct vm_phys_seg *seg; KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX, ("vm_phys_create_seg: increase VM_PHYSSEG_MAX")); KASSERT(domain >= 0 && domain < vm_ndomains, ("vm_phys_create_seg: invalid domain provided")); seg = &vm_phys_segs[vm_phys_nsegs++]; while (seg > vm_phys_segs && (seg - 1)->start >= end) { *seg = *(seg - 1); seg--; } seg->start = start; seg->end = end; seg->domain = domain; } static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end) { #ifdef VM_NUMA_ALLOC int i; if (mem_affinity == NULL) { _vm_phys_create_seg(start, end, 0); return; } for (i = 0;; i++) { if (mem_affinity[i].end == 0) panic("Reached end of affinity info"); if (mem_affinity[i].end <= start) continue; if (mem_affinity[i].start > start) panic("No affinity info for start %jx", (uintmax_t)start); if (mem_affinity[i].end >= end) { _vm_phys_create_seg(start, end, mem_affinity[i].domain); break; } _vm_phys_create_seg(start, mem_affinity[i].end, mem_affinity[i].domain); start = mem_affinity[i].end; } #else _vm_phys_create_seg(start, end, 0); #endif } /* * Add a physical memory segment. */ void vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end) { vm_paddr_t paddr; KASSERT((start & PAGE_MASK) == 0, ("vm_phys_define_seg: start is not page aligned")); KASSERT((end & PAGE_MASK) == 0, ("vm_phys_define_seg: end is not page aligned")); /* * Split the physical memory segment if it spans two or more free * list boundaries. */ paddr = start; #ifdef VM_FREELIST_ISADMA if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) { vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY); paddr = VM_ISADMA_BOUNDARY; } #endif #ifdef VM_FREELIST_LOWMEM if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) { vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY); paddr = VM_LOWMEM_BOUNDARY; } #endif #ifdef VM_FREELIST_DMA32 if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) { vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY); paddr = VM_DMA32_BOUNDARY; } #endif vm_phys_create_seg(paddr, end); } /* * Initialize the physical memory allocator. * * Requires that vm_page_array is initialized! */ void vm_phys_init(void) { struct vm_freelist *fl; struct vm_phys_seg *seg; u_long npages; int dom, flind, freelist, oind, pind, segind; /* * Compute the number of free lists, and generate the mapping from the * manifest constants VM_FREELIST_* to the free list indices. * * Initially, the entries of vm_freelist_to_flind[] are set to either * 0 or 1 to indicate which free lists should be created. */ npages = 0; for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { seg = &vm_phys_segs[segind]; #ifdef VM_FREELIST_ISADMA if (seg->end <= VM_ISADMA_BOUNDARY) vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1; else #endif #ifdef VM_FREELIST_LOWMEM if (seg->end <= VM_LOWMEM_BOUNDARY) vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1; else #endif #ifdef VM_FREELIST_DMA32 if ( #ifdef VM_DMA32_NPAGES_THRESHOLD /* * Create the DMA32 free list only if the amount of * physical memory above physical address 4G exceeds the * given threshold. */ npages > VM_DMA32_NPAGES_THRESHOLD && #endif seg->end <= VM_DMA32_BOUNDARY) vm_freelist_to_flind[VM_FREELIST_DMA32] = 1; else #endif { npages += atop(seg->end - seg->start); vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1; } } /* Change each entry into a running total of the free lists. */ for (freelist = 1; freelist < VM_NFREELIST; freelist++) { vm_freelist_to_flind[freelist] += vm_freelist_to_flind[freelist - 1]; } vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1]; KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists")); /* Change each entry into a free list index. */ for (freelist = 0; freelist < VM_NFREELIST; freelist++) vm_freelist_to_flind[freelist]--; /* * Initialize the first_page and free_queues fields of each physical * memory segment. */ #ifdef VM_PHYSSEG_SPARSE npages = 0; #endif for (segind = 0; segind < vm_phys_nsegs; segind++) { seg = &vm_phys_segs[segind]; #ifdef VM_PHYSSEG_SPARSE seg->first_page = &vm_page_array[npages]; npages += atop(seg->end - seg->start); #else seg->first_page = PHYS_TO_VM_PAGE(seg->start); #endif #ifdef VM_FREELIST_ISADMA if (seg->end <= VM_ISADMA_BOUNDARY) { flind = vm_freelist_to_flind[VM_FREELIST_ISADMA]; KASSERT(flind >= 0, ("vm_phys_init: ISADMA flind < 0")); } else #endif #ifdef VM_FREELIST_LOWMEM if (seg->end <= VM_LOWMEM_BOUNDARY) { flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM]; KASSERT(flind >= 0, ("vm_phys_init: LOWMEM flind < 0")); } else #endif #ifdef VM_FREELIST_DMA32 if (seg->end <= VM_DMA32_BOUNDARY) { flind = vm_freelist_to_flind[VM_FREELIST_DMA32]; KASSERT(flind >= 0, ("vm_phys_init: DMA32 flind < 0")); } else #endif { flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT]; KASSERT(flind >= 0, ("vm_phys_init: DEFAULT flind < 0")); } seg->free_queues = &vm_phys_free_queues[seg->domain][flind]; } /* * Initialize the free queues. */ for (dom = 0; dom < vm_ndomains; dom++) { for (flind = 0; flind < vm_nfreelists; flind++) { for (pind = 0; pind < VM_NFREEPOOL; pind++) { fl = vm_phys_free_queues[dom][flind][pind]; for (oind = 0; oind < VM_NFREEORDER; oind++) TAILQ_INIT(&fl[oind].pl); } } } rw_init(&vm_phys_fictitious_reg_lock, "vmfctr"); } /* * Split a contiguous, power of two-sized set of physical pages. */ static __inline void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order) { vm_page_t m_buddy; while (oind > order) { oind--; m_buddy = &m[1 << oind]; KASSERT(m_buddy->order == VM_NFREEORDER, ("vm_phys_split_pages: page %p has unexpected order %d", m_buddy, m_buddy->order)); vm_freelist_add(fl, m_buddy, oind, 0); } } /* * Allocate a contiguous, power of two-sized set of physical pages * from the free lists. * * The free page queues must be locked. */ vm_page_t vm_phys_alloc_pages(int domain, int pool, int order) { vm_page_t m; int flind; for (flind = 0; flind < vm_nfreelists; flind++) { m = vm_phys_alloc_freelist_pages(domain, flind, pool, order); if (m != NULL) return (m); } return (NULL); } /* * Allocate a contiguous, power of two-sized set of physical pages from the * specified free list. The free list must be specified using one of the * manifest constants VM_FREELIST_*. * * The free page queues must be locked. */ vm_page_t vm_phys_alloc_freelist_pages(int domain, int flind, int pool, int order) { struct vm_freelist *alt, *fl; vm_page_t m; int oind, pind; KASSERT(domain >= 0 && domain < vm_ndomains, ("vm_phys_alloc_freelist_pages: domain %d is out of range", domain)); KASSERT(flind < VM_NFREELIST, ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind)); KASSERT(pool < VM_NFREEPOOL, ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); KASSERT(order < VM_NFREEORDER, ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); fl = &vm_phys_free_queues[domain][flind][pool][0]; for (oind = order; oind < VM_NFREEORDER; oind++) { m = TAILQ_FIRST(&fl[oind].pl); if (m != NULL) { vm_freelist_rem(fl, m, oind); vm_phys_split_pages(m, oind, fl, order); return (m); } } /* * The given pool was empty. Find the largest * contiguous, power-of-two-sized set of pages in any * pool. Transfer these pages to the given pool, and * use them to satisfy the allocation. */ for (oind = VM_NFREEORDER - 1; oind >= order; oind--) { for (pind = 0; pind < VM_NFREEPOOL; pind++) { alt = &vm_phys_free_queues[domain][flind][pind][0]; m = TAILQ_FIRST(&alt[oind].pl); if (m != NULL) { vm_freelist_rem(alt, m, oind); vm_phys_set_pool(pool, m, oind); vm_phys_split_pages(m, oind, fl, order); return (m); } } } return (NULL); } /* * Find the vm_page corresponding to the given physical address. */ vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa) { struct vm_phys_seg *seg; int segind; for (segind = 0; segind < vm_phys_nsegs; segind++) { seg = &vm_phys_segs[segind]; if (pa >= seg->start && pa < seg->end) return (&seg->first_page[atop(pa - seg->start)]); } return (NULL); } vm_page_t vm_phys_fictitious_to_vm_page(vm_paddr_t pa) { struct vm_phys_fictitious_seg tmp, *seg; vm_page_t m; m = NULL; tmp.start = pa; tmp.end = 0; rw_rlock(&vm_phys_fictitious_reg_lock); seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); rw_runlock(&vm_phys_fictitious_reg_lock); if (seg == NULL) return (NULL); m = &seg->first_page[atop(pa - seg->start)]; KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m)); return (m); } static inline void vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start, long page_count, vm_memattr_t memattr) { long i; bzero(range, page_count * sizeof(*range)); for (i = 0; i < page_count; i++) { vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr); range[i].oflags &= ~VPO_UNMANAGED; range[i].busy_lock = VPB_UNBUSIED; } } int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, vm_memattr_t memattr) { struct vm_phys_fictitious_seg *seg; vm_page_t fp; long page_count; #ifdef VM_PHYSSEG_DENSE long pi, pe; long dpage_count; #endif KASSERT(start < end, ("Start of segment isn't less than end (start: %jx end: %jx)", (uintmax_t)start, (uintmax_t)end)); page_count = (end - start) / PAGE_SIZE; #ifdef VM_PHYSSEG_DENSE pi = atop(start); pe = atop(end); if (pi >= first_page && (pi - first_page) < vm_page_array_size) { fp = &vm_page_array[pi - first_page]; if ((pe - first_page) > vm_page_array_size) { /* * We have a segment that starts inside * of vm_page_array, but ends outside of it. * * Use vm_page_array pages for those that are * inside of the vm_page_array range, and * allocate the remaining ones. */ dpage_count = vm_page_array_size - (pi - first_page); vm_phys_fictitious_init_range(fp, start, dpage_count, memattr); page_count -= dpage_count; start += ptoa(dpage_count); goto alloc; } /* * We can allocate the full range from vm_page_array, * so there's no need to register the range in the tree. */ vm_phys_fictitious_init_range(fp, start, page_count, memattr); return (0); } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { /* * We have a segment that ends inside of vm_page_array, * but starts outside of it. */ fp = &vm_page_array[0]; dpage_count = pe - first_page; vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count, memattr); end -= ptoa(dpage_count); page_count -= dpage_count; goto alloc; } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { /* * Trying to register a fictitious range that expands before * and after vm_page_array. */ return (EINVAL); } else { alloc: #endif fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, M_WAITOK); #ifdef VM_PHYSSEG_DENSE } #endif vm_phys_fictitious_init_range(fp, start, page_count, memattr); seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO); seg->start = start; seg->end = end; seg->first_page = fp; rw_wlock(&vm_phys_fictitious_reg_lock); RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg); rw_wunlock(&vm_phys_fictitious_reg_lock); return (0); } void vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) { struct vm_phys_fictitious_seg *seg, tmp; #ifdef VM_PHYSSEG_DENSE long pi, pe; #endif KASSERT(start < end, ("Start of segment isn't less than end (start: %jx end: %jx)", (uintmax_t)start, (uintmax_t)end)); #ifdef VM_PHYSSEG_DENSE pi = atop(start); pe = atop(end); if (pi >= first_page && (pi - first_page) < vm_page_array_size) { if ((pe - first_page) <= vm_page_array_size) { /* * This segment was allocated using vm_page_array * only, there's nothing to do since those pages * were never added to the tree. */ return; } /* * We have a segment that starts inside * of vm_page_array, but ends outside of it. * * Calculate how many pages were added to the * tree and free them. */ start = ptoa(first_page + vm_page_array_size); } else if (pe > first_page && (pe - first_page) < vm_page_array_size) { /* * We have a segment that ends inside of vm_page_array, * but starts outside of it. */ end = ptoa(first_page); } else if (pi < first_page && pe > (first_page + vm_page_array_size)) { /* Since it's not possible to register such a range, panic. */ panic( "Unregistering not registered fictitious range [%#jx:%#jx]", (uintmax_t)start, (uintmax_t)end); } #endif tmp.start = start; tmp.end = 0; rw_wlock(&vm_phys_fictitious_reg_lock); seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp); if (seg->start != start || seg->end != end) { rw_wunlock(&vm_phys_fictitious_reg_lock); panic( "Unregistering not registered fictitious range [%#jx:%#jx]", (uintmax_t)start, (uintmax_t)end); } RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg); rw_wunlock(&vm_phys_fictitious_reg_lock); free(seg->first_page, M_FICT_PAGES); free(seg, M_FICT_PAGES); } /* * Free a contiguous, power of two-sized set of physical pages. * * The free page queues must be locked. */ void vm_phys_free_pages(vm_page_t m, int order) { struct vm_freelist *fl; struct vm_phys_seg *seg; vm_paddr_t pa; vm_page_t m_buddy; KASSERT(m->order == VM_NFREEORDER, ("vm_phys_free_pages: page %p has unexpected order %d", m, m->order)); KASSERT(m->pool < VM_NFREEPOOL, ("vm_phys_free_pages: page %p has unexpected pool %d", m, m->pool)); KASSERT(order < VM_NFREEORDER, ("vm_phys_free_pages: order %d is out of range", order)); mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); seg = &vm_phys_segs[m->segind]; if (order < VM_NFREEORDER - 1) { pa = VM_PAGE_TO_PHYS(m); do { pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); if (pa < seg->start || pa >= seg->end) break; m_buddy = &seg->first_page[atop(pa - seg->start)]; if (m_buddy->order != order) break; fl = (*seg->free_queues)[m_buddy->pool]; vm_freelist_rem(fl, m_buddy, order); if (m_buddy->pool != m->pool) vm_phys_set_pool(m->pool, m_buddy, order); order++; pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); m = &seg->first_page[atop(pa - seg->start)]; } while (order < VM_NFREEORDER - 1); } fl = (*seg->free_queues)[m->pool]; vm_freelist_add(fl, m, order, 1); } /* * Free a contiguous, arbitrarily sized set of physical pages. * * The free page queues must be locked. */ void vm_phys_free_contig(vm_page_t m, u_long npages) { u_int n; int order; /* * Avoid unnecessary coalescing by freeing the pages in the largest * possible power-of-two-sized subsets. */ mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); for (;; npages -= n) { /* * Unsigned "min" is used here so that "order" is assigned * "VM_NFREEORDER - 1" when "m"'s physical address is zero * or the low-order bits of its physical address are zero * because the size of a physical address exceeds the size of * a long. */ order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, VM_NFREEORDER - 1); n = 1 << order; if (npages < n) break; vm_phys_free_pages(m, order); m += n; } /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */ for (; npages > 0; npages -= n) { order = flsl(npages) - 1; n = 1 << order; vm_phys_free_pages(m, order); m += n; } } /* * Scan physical memory between the specified addresses "low" and "high" for a * run of contiguous physical pages that satisfy the specified conditions, and * return the lowest page in the run. The specified "alignment" determines * the alignment of the lowest physical page in the run. If the specified * "boundary" is non-zero, then the run of physical pages cannot span a * physical address that is a multiple of "boundary". * * "npages" must be greater than zero. Both "alignment" and "boundary" must * be a power of two. */ vm_page_t vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, int options) { vm_paddr_t pa_end; vm_page_t m_end, m_run, m_start; struct vm_phys_seg *seg; int segind; KASSERT(npages > 0, ("npages is 0")); KASSERT(powerof2(alignment), ("alignment is not a power of 2")); KASSERT(powerof2(boundary), ("boundary is not a power of 2")); if (low >= high) return (NULL); for (segind = 0; segind < vm_phys_nsegs; segind++) { seg = &vm_phys_segs[segind]; if (seg->domain != domain) continue; if (seg->start >= high) break; if (low >= seg->end) continue; if (low <= seg->start) m_start = seg->first_page; else m_start = &seg->first_page[atop(low - seg->start)]; if (high < seg->end) pa_end = high; else pa_end = seg->end; if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages)) continue; m_end = &seg->first_page[atop(pa_end - seg->start)]; m_run = vm_page_scan_contig(npages, m_start, m_end, alignment, boundary, options); if (m_run != NULL) return (m_run); } return (NULL); } /* * Set the pool for a contiguous, power of two-sized set of physical pages. */ void vm_phys_set_pool(int pool, vm_page_t m, int order) { vm_page_t m_tmp; for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) m_tmp->pool = pool; } /* * Search for the given physical page "m" in the free lists. If the search * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return * FALSE, indicating that "m" is not in the free lists. * * The free page queues must be locked. */ boolean_t vm_phys_unfree_page(vm_page_t m) { struct vm_freelist *fl; struct vm_phys_seg *seg; vm_paddr_t pa, pa_half; vm_page_t m_set, m_tmp; int order; mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); /* * First, find the contiguous, power of two-sized set of free * physical pages containing the given physical page "m" and * assign it to "m_set". */ seg = &vm_phys_segs[m->segind]; for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && order < VM_NFREEORDER - 1; ) { order++; pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); if (pa >= seg->start) m_set = &seg->first_page[atop(pa - seg->start)]; else return (FALSE); } if (m_set->order < order) return (FALSE); if (m_set->order == VM_NFREEORDER) return (FALSE); KASSERT(m_set->order < VM_NFREEORDER, ("vm_phys_unfree_page: page %p has unexpected order %d", m_set, m_set->order)); /* * Next, remove "m_set" from the free lists. Finally, extract * "m" from "m_set" using an iterative algorithm: While "m_set" * is larger than a page, shrink "m_set" by returning the half * of "m_set" that does not contain "m" to the free lists. */ fl = (*seg->free_queues)[m_set->pool]; order = m_set->order; vm_freelist_rem(fl, m_set, order); while (order > 0) { order--; pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order)); if (m->phys_addr < pa_half) m_tmp = &seg->first_page[atop(pa_half - seg->start)]; else { m_tmp = m_set; m_set = &seg->first_page[atop(pa_half - seg->start)]; } vm_freelist_add(fl, m_tmp, order, 0); } KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); return (TRUE); } /* * Allocate a contiguous set of physical pages of the given size * "npages" from the free lists. All of the physical pages must be at * or above the given physical address "low" and below the given * physical address "high". The given value "alignment" determines the * alignment of the first physical page in the set. If the given value * "boundary" is non-zero, then the set of physical pages cannot cross * any physical address boundary that is a multiple of that value. Both * "alignment" and "boundary" must be a power of two. */ vm_page_t vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) { vm_paddr_t pa_end, pa_start; vm_page_t m_run; struct vm_phys_seg *seg; int segind; KASSERT(npages > 0, ("npages is 0")); KASSERT(powerof2(alignment), ("alignment is not a power of 2")); KASSERT(powerof2(boundary), ("boundary is not a power of 2")); mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); if (low >= high) return (NULL); m_run = NULL; for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { seg = &vm_phys_segs[segind]; if (seg->start >= high || seg->domain != domain) continue; if (low >= seg->end) break; if (low <= seg->start) pa_start = seg->start; else pa_start = low; if (high < seg->end) pa_end = high; else pa_end = seg->end; if (pa_end - pa_start < ptoa(npages)) continue; m_run = vm_phys_alloc_seg_contig(seg, npages, low, high, alignment, boundary); if (m_run != NULL) break; } return (m_run); } /* * Allocate a run of contiguous physical pages from the free list for the * specified segment. */ static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary) { struct vm_freelist *fl; vm_paddr_t pa, pa_end, size; vm_page_t m, m_ret; u_long npages_end; int oind, order, pind; KASSERT(npages > 0, ("npages is 0")); KASSERT(powerof2(alignment), ("alignment is not a power of 2")); KASSERT(powerof2(boundary), ("boundary is not a power of 2")); mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); /* Compute the queue that is the best fit for npages. */ for (order = 0; (1 << order) < npages; order++); /* Search for a run satisfying the specified conditions. */ size = npages << PAGE_SHIFT; for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) { for (pind = 0; pind < VM_NFREEPOOL; pind++) { fl = (*seg->free_queues)[pind]; TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) { /* * Is the size of this allocation request * larger than the largest block size? */ if (order >= VM_NFREEORDER) { /* * Determine if a sufficient number of * subsequent blocks to satisfy the * allocation request are free. */ pa = VM_PAGE_TO_PHYS(m_ret); pa_end = pa + size; for (;;) { pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1); if (pa >= pa_end || pa < seg->start || pa >= seg->end) break; m = &seg->first_page[atop(pa - seg->start)]; if (m->order != VM_NFREEORDER - 1) break; } /* If not, go to the next block. */ if (pa < pa_end) continue; } /* * Determine if the blocks are within the * given range, satisfy the given alignment, * and do not cross the given boundary. */ pa = VM_PAGE_TO_PHYS(m_ret); pa_end = pa + size; if (pa >= low && pa_end <= high && (pa & (alignment - 1)) == 0 && rounddown2(pa ^ (pa_end - 1), boundary) == 0) goto done; } } } return (NULL); done: for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) { fl = (*seg->free_queues)[m->pool]; vm_freelist_rem(fl, m, m->order); } if (m_ret->pool != VM_FREEPOOL_DEFAULT) vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind); fl = (*seg->free_queues)[m_ret->pool]; vm_phys_split_pages(m_ret, oind, fl, order); /* Return excess pages to the free lists. */ npages_end = roundup2(npages, 1 << imin(oind, order)); if (npages < npages_end) vm_phys_free_contig(&m_ret[npages], npages_end - npages); return (m_ret); } #ifdef DDB /* * Show the number of physical pages in each of the free lists. */ DB_SHOW_COMMAND(freepages, db_show_freepages) { struct vm_freelist *fl; int flind, oind, pind, dom; for (dom = 0; dom < vm_ndomains; dom++) { db_printf("DOMAIN: %d\n", dom); for (flind = 0; flind < vm_nfreelists; flind++) { db_printf("FREE LIST %d:\n" "\n ORDER (SIZE) | NUMBER" "\n ", flind); for (pind = 0; pind < VM_NFREEPOOL; pind++) db_printf(" | POOL %d", pind); db_printf("\n-- "); for (pind = 0; pind < VM_NFREEPOOL; pind++) db_printf("-- -- "); db_printf("--\n"); for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { db_printf(" %2.2d (%6.6dK)", oind, 1 << (PAGE_SHIFT - 10 + oind)); for (pind = 0; pind < VM_NFREEPOOL; pind++) { fl = vm_phys_free_queues[dom][flind][pind]; db_printf(" | %6.6d", fl[oind].lcnt); } db_printf("\n"); } db_printf("\n"); } db_printf("\n"); } } #endif Index: user/jeff/numa/usr.bin/cpuset/cpuset.c =================================================================== --- user/jeff/numa/usr.bin/cpuset/cpuset.c (revision 326888) +++ user/jeff/numa/usr.bin/cpuset/cpuset.c (revision 326889) @@ -1,367 +1,465 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007, 2008 Jeffrey Roberson * All rights reserved. * * Copyright (c) 2008 Nokia Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include static int Cflag; static int cflag; static int dflag; static int gflag; static int iflag; static int jflag; static int lflag; +static int nflag; static int pflag; static int rflag; static int sflag; static int tflag; static int xflag; static id_t id; static cpulevel_t level; static cpuwhich_t which; static void usage(void); -static void printset(cpuset_t *mask); +struct numa_policy { + const char *name; + int policy; +}; +static struct numa_policy policies[] = { + { "round-robin", DOMAINSET_POLICY_ROUNDROBIN }, + { "rr", DOMAINSET_POLICY_ROUNDROBIN }, + { "first-touch", DOMAINSET_POLICY_FIRSTTOUCH }, + { "ft", DOMAINSET_POLICY_FIRSTTOUCH }, + { NULL, DOMAINSET_POLICY_INVALID } +}; + +BITSET_DEFINE(bitset, 1); +static void printset(struct bitset *mask, int size); + static void -parselist(char *list, cpuset_t *mask) +parselist(char *list, struct bitset *mask, int size) { enum { NONE, NUM, DASH } state; int lastnum; int curnum; char *l; - if (strcasecmp(list, "all") == 0) { - if (cpuset_getaffinity(CPU_LEVEL_ROOT, CPU_WHICH_PID, -1, - sizeof(*mask), mask) != 0) - err(EXIT_FAILURE, "getaffinity"); - return; - } state = NONE; curnum = lastnum = 0; for (l = list; *l != '\0';) { if (isdigit(*l)) { curnum = atoi(l); - if (curnum > CPU_SETSIZE) + if (curnum > size) errx(EXIT_FAILURE, - "Only %d cpus supported", CPU_SETSIZE); + "List entry %d exceeds maximum of %d", + curnum, size); while (isdigit(*l)) l++; switch (state) { case NONE: lastnum = curnum; state = NUM; break; case DASH: for (; lastnum <= curnum; lastnum++) - CPU_SET(lastnum, mask); + BIT_SET(size, lastnum, mask); state = NONE; break; case NUM: default: goto parserr; } continue; } switch (*l) { case ',': switch (state) { case NONE: break; case NUM: - CPU_SET(curnum, mask); + BIT_SET(size, curnum, mask); state = NONE; break; case DASH: goto parserr; break; } break; case '-': if (state != NUM) goto parserr; state = DASH; break; default: goto parserr; } l++; } switch (state) { case NONE: break; case NUM: - CPU_SET(curnum, mask); + BIT_SET(size, curnum, mask); break; case DASH: goto parserr; } return; parserr: - errx(EXIT_FAILURE, "Malformed cpu-list %s", list); + errx(EXIT_FAILURE, "Malformed list %s", list); } static void -printset(cpuset_t *mask) +parsecpulist(char *list, cpuset_t *mask) { + + if (strcasecmp(list, "all") == 0) { + if (cpuset_getaffinity(CPU_LEVEL_ROOT, CPU_WHICH_PID, -1, + sizeof(*mask), mask) != 0) + err(EXIT_FAILURE, "getaffinity"); + return; + } + parselist(list, (struct bitset *)mask, CPU_SETSIZE); +} + +/* + * permissively parse policy:domain list + * allow: + * round-robin:0-4 explicit + * round-robin:all explicit root domains + * 0-4 implicit root policy + * round-robin implicit root domains + * all explicit root domains and implicit policy + */ +static void +parsedomainlist(char *list, domainset_t *mask, int *policyp) +{ + domainset_t rootmask; + struct numa_policy *policy; + char *l; + int p; + + /* + * Use the rootset's policy as the default for unspecified policies. + */ + if (cpuset_getdomain(CPU_LEVEL_ROOT, CPU_WHICH_PID, -1, + sizeof(rootmask), &rootmask, &p) != 0) + err(EXIT_FAILURE, "getdomain"); + + l = list; + for (policy = &policies[0]; policy->name != NULL; policy++) { + if (strncasecmp(l, policy->name, strlen(policy->name)) == 0) { + p = policy->policy; + l += strlen(policy->name); + if (*l != ':' && *l != '\0') + errx(EXIT_FAILURE, "Malformed list %s", list); + if (*l == ':') + l++; + break; + } + } + *policyp = p; + if (strcasecmp(l, "all") == 0 || *l == '\0') { + DOMAINSET_COPY(&rootmask, mask); + return; + } + parselist(l, (struct bitset *)mask, DOMAINSET_SETSIZE); +} + +static void +printset(struct bitset *mask, int size) +{ int once; - int cpu; + int bit; - for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) { - if (CPU_ISSET(cpu, mask)) { + for (once = 0, bit = 0; bit < size; bit++) { + if (BIT_ISSET(size, bit, mask)) { if (once == 0) { - printf("%d", cpu); + printf("%d", bit); once = 1; } else - printf(", %d", cpu); + printf(", %d", bit); } } printf("\n"); } static const char *whichnames[] = { NULL, "tid", "pid", "cpuset", "irq", "jail", "domain" }; static const char *levelnames[] = { NULL, " root", " cpuset", "" }; +static const char *policynames[] = { "invalid", "round-robin", "first-touch" }; static void printaffinity(void) { + domainset_t domain; cpuset_t mask; + int policy; if (cpuset_getaffinity(level, which, id, sizeof(mask), &mask) != 0) err(EXIT_FAILURE, "getaffinity"); printf("%s %jd%s mask: ", whichnames[which], (intmax_t)id, levelnames[level]); - printset(&mask); + if (dflag) + goto out; + if (cpuset_getdomain(level, which, id, sizeof(domain), &domain, + &policy) != 0) + err(EXIT_FAILURE, "getdomain"); + printset((struct bitset *)&mask, CPU_SETSIZE); + printf("%s %jd%s domain policy: %s mask: ", whichnames[which], + (intmax_t)id, levelnames[level], policynames[policy]); + printset((struct bitset *)&domain, DOMAINSET_SETSIZE); +out: exit(EXIT_SUCCESS); } static void printsetid(void) { cpusetid_t setid; /* * Only LEVEL_WHICH && WHICH_CPUSET has a numbered id. */ if (level == CPU_LEVEL_WHICH && !sflag) level = CPU_LEVEL_CPUSET; if (cpuset_getid(level, which, id, &setid)) err(errno, "getid"); printf("%s %jd%s id: %d\n", whichnames[which], (intmax_t)id, levelnames[level], setid); } int main(int argc, char *argv[]) { + domainset_t domains; cpusetid_t setid; cpuset_t mask; + int policy; lwpid_t tid; pid_t pid; int ch; CPU_ZERO(&mask); + DOMAINSET_ZERO(&domains); level = CPU_LEVEL_WHICH; which = CPU_WHICH_PID; id = pid = tid = setid = -1; - while ((ch = getopt(argc, argv, "Ccd:gij:l:p:rs:t:x:")) != -1) { + while ((ch = getopt(argc, argv, "Ccd:gij:l:n:p:rs:t:x:")) != -1) { switch (ch) { case 'C': Cflag = 1; break; case 'c': cflag = 1; level = CPU_LEVEL_CPUSET; break; case 'd': dflag = 1; which = CPU_WHICH_DOMAIN; id = atoi(optarg); break; case 'g': gflag = 1; break; case 'i': iflag = 1; break; case 'j': jflag = 1; which = CPU_WHICH_JAIL; id = atoi(optarg); break; case 'l': lflag = 1; - parselist(optarg, &mask); + parsecpulist(optarg, &mask); break; + case 'n': + nflag = 1; + parsedomainlist(optarg, &domains, &policy); + break; case 'p': pflag = 1; which = CPU_WHICH_PID; id = pid = atoi(optarg); break; case 'r': level = CPU_LEVEL_ROOT; rflag = 1; break; case 's': sflag = 1; which = CPU_WHICH_CPUSET; id = setid = atoi(optarg); break; case 't': tflag = 1; which = CPU_WHICH_TID; id = tid = atoi(optarg); break; case 'x': xflag = 1; which = CPU_WHICH_IRQ; id = atoi(optarg); break; default: usage(); } } argc -= optind; argv += optind; if (gflag) { - if (argc || Cflag || lflag) + if (argc || Cflag || lflag || nflag) usage(); /* Only one identity specifier. */ if (dflag + jflag + xflag + sflag + pflag + tflag > 1) usage(); if (iflag) printsetid(); else printaffinity(); exit(EXIT_SUCCESS); } + if (dflag || iflag || rflag) usage(); /* * The user wants to run a command with a set and possibly cpumask. */ if (argc) { if (Cflag || pflag || tflag || xflag || jflag) usage(); if (sflag) { if (cpuset_setid(CPU_WHICH_PID, -1, setid)) err(argc, "setid"); } else { if (cpuset(&setid)) err(argc, "newid"); } if (lflag) { if (cpuset_setaffinity(level, CPU_WHICH_PID, -1, sizeof(mask), &mask) != 0) err(EXIT_FAILURE, "setaffinity"); } + if (nflag) { + if (cpuset_setdomain(level, CPU_WHICH_PID, + -1, sizeof(domains), &domains, policy) != 0) + err(EXIT_FAILURE, "setdomain"); + } errno = 0; execvp(*argv, argv); err(errno == ENOENT ? 127 : 126, "%s", *argv); } /* * We're modifying something that presently exists. */ if (Cflag && (jflag || !pflag || sflag || tflag || xflag)) usage(); - if (!lflag && cflag) + if ((!lflag && !nflag) && cflag) usage(); - if (!lflag && !(Cflag || sflag)) + if ((!lflag && !nflag) && !(Cflag || sflag)) usage(); /* You can only set a mask on a thread. */ if (tflag && (sflag | pflag | xflag | jflag)) usage(); /* You can only set a mask on an irq. */ if (xflag && (jflag | pflag | sflag | tflag)) usage(); if (Cflag) { /* * Create a new cpuset and move the specified process * into the set. */ if (cpuset(&setid) < 0) err(EXIT_FAILURE, "newid"); sflag = 1; } if (pflag && sflag) { if (cpuset_setid(CPU_WHICH_PID, pid, setid)) err(EXIT_FAILURE, "setid"); /* * If the user specifies a set and a list we want the mask * to effect the pid and not the set. */ which = CPU_WHICH_PID; id = pid; } if (lflag) { if (cpuset_setaffinity(level, which, id, sizeof(mask), &mask) != 0) err(EXIT_FAILURE, "setaffinity"); + } + if (nflag) { + if (cpuset_setdomain(level, which, id, sizeof(domains), + &domains, policy) != 0) + err(EXIT_FAILURE, "setdomain"); } exit(EXIT_SUCCESS); } static void usage(void) { fprintf(stderr, "usage: cpuset [-l cpu-list] [-s setid] cmd ...\n"); fprintf(stderr, " cpuset [-l cpu-list] [-s setid] -p pid\n"); fprintf(stderr, " cpuset [-c] [-l cpu-list] -C -p pid\n"); fprintf(stderr, " cpuset [-c] [-l cpu-list] [-j jailid | -p pid | -t tid | -s setid | -x irq]\n"); fprintf(stderr, " cpuset -g [-cir] [-d domain | -j jailid | -p pid | -t tid | -s setid |\n" " -x irq]\n"); exit(1); }